content
stringlengths 5
1.05M
|
|---|
import os
import sys
import logging
import paddle
import argparse
import functools
import math
import time
import numpy as np
import paddle.fluid as fluid
sys.path[0] = os.path.join(
os.path.dirname("__file__"), os.path.pardir, os.path.pardir)
from paddleslim.common import get_logger
import models
from utility import add_arguments, print_arguments
import models.inception_v4 as Model
_logger = get_logger(__name__, level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('model', str, "MobileNet", "The target model.")
add_arg('pretrained_model', str, "../pretrained_model/MobileNetV1_pretained", "Whether to use pretrained model.")
add_arg('data', str, "mnist", "Which data to use. 'mnist' or 'imagenet'")
add_arg('test_period', int, 10, "Test period in epoches.")
## reader_cv2 eval
add_arg('image_shape', str, "3,224,224", "Input image size")
add_arg('params_filename', str, None, "params file name")
add_arg('resize_short_size', int, 256, "Set resize short size")
## reader_cv2 train
add_arg('batch_size', int, 128, "Minibatch size.")
add_arg('total_images', int, 16235, "Training image number.")
add_arg('use_mixup', bool, False, "Whether to use mixup or not")
add_arg('mixup_alpha', float, 0.2, "Set the mixup_alpha parameter")
add_arg('l2_decay', float, 1e-4, "L2_decay parameter.")
add_arg('momentum_rate', float, 0.9, "momentum_rate.")
add_arg('use_label_smoothing', bool, False, "Whether to use label_smoothing or not")
add_arg('label_smoothing_epsilon', float, 0.2, "Set the label_smoothing_epsilon parameter")
add_arg('lower_scale', float, 0.08, "Set the lower_scale in ramdom_crop")
add_arg('lower_ratio', float, 3./4., "Set the lower_ratio in ramdom_crop")
add_arg('upper_ratio', float, 4./3., "Set the upper_ratio in ramdom_crop")
add_arg('num_epochs', int, 50, "number of epochs.")
add_arg('class_dim', int, 23, "Class number.")
add_arg('lr', float, 0.1, "set learning rate.")
# yapf: enable
model_list = [m for m in dir(models) if "__" not in m]
def export_model(args):
if args.data == "mnist":
import paddle.dataset.mnist as reader
train_reader = reader.train()
val_reader = reader.test()
class_dim = 10
image_shape = "1,28,28"
elif args.data == "imagenet":
import imagenet_reader as reader
train_reader = reader.train()
val_reader = reader.val()
class_dim = 1000
image_shape = "3,224,224"
elif args.data == "fruit_veg":
import reader_cv2 as reader
train_reader = reader.train(settings=args)
val_reader = reader.val(settings=args)
class_dim = 23
image_shape = "3,224,224"
resize_short_size = 256
elif args.data == "yolov3-384":
import reader_cv2 as reader
train_reader = reader.train(settings=args)
val_reader = reader.val(settings=args)
class_dim = 80
image_shape = "3,384,384"
else:
raise ValueError("{} is not supported.".format(args.data))
image_shape = [int(m) for m in image_shape.split(",")]
image = fluid.data(
name='image', shape=[None] + image_shape, dtype='float32')
assert args.model in model_list, "{} is not in lists: {}".format(
args.model, model_list)
# model definition
# model = models.__dict__[args.model]()
model = Model.InceptionV4()
out = model.net(input=image, class_dim=class_dim)
val_program = fluid.default_main_program().clone(for_test=True)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if args.pretrained_model:
def if_exist(var):
return os.path.exists(
os.path.join(args.pretrained_model, var.name))
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
else:
assert False, "args.pretrained_model must set"
fluid.io.save_inference_model(
'./inference_model/' + args.model,
feeded_var_names=[image.name],
target_vars=[out],
executor=exe,
main_program=val_program,
model_filename='model',
params_filename='weights')
def main():
args = parser.parse_args()
print_arguments(args)
export_model(args)
if __name__ == '__main__':
main()
|
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import Counter
from unittest import mock
from tripleo_ansible.ansible_plugins.modules import lvm2_physical_devices_facts as lvm2
from tripleo_ansible.tests import base as tests_base
class TestLvm2PhysicalDevicesFacts(tests_base.TestCase):
def test_get_pvs(self):
mock_module = mock.Mock()
mock_module.run_command.return_value = (0, ' myvgname\n myvgname\n', '')
result = lvm2.get_vgs_with_active_lvs(mock_module)
self.assertEqual(['myvgname'], result)
mock_module.run_command.return_value = (0, ' /dev/sdb1\n /dev/sdb2\n', '')
result = lvm2.get_vgs_with_active_lvs(mock_module)
self.assertEqual(Counter(['/dev/sdb1', '/dev/sdb2']), Counter(result))
|
import dill
from sklearn.feature_selection import RFECV
from sklearn.model_selection import StratifiedKFold
from robotehr.pipelines.supporters.plots import plot_rfe
def recursive_feature_elimination(X, y, algorithm, step_size=50, create_figure=False, filename="", n_splits=5):
estimator = algorithm()
rfecv = RFECV(
estimator=estimator,
step=step_size,
cv=StratifiedKFold(n_splits=n_splits),
scoring=estimator.score_auroc,
verbose=1,
n_jobs=-1
)
rfecv.fit(X, y)
if create_figure:
fig = plot_rfe(rfecv, X, step_size, filename)
else:
fig = {}
return {
'n_features': rfecv.n_features_,
'X_supported': X[X.columns[rfecv.support_]],
'y': y,
'figure': fig,
'rfecv': rfecv,
}
class DataLoader:
def __init__(self, column_selector):
self.column_selector = column_selector
self.objects = {}
def transform(self, X, y):
return NotImplementedError
def transform_training_data(self, X_train, y_train):
return NotImplementedError
def transform_test_data(self, X_test, y_test):
return NotImplementedError
def dump(self, path):
return dill.dump(self, open(path, "wb"))
@classmethod
def load(cls, path):
return dill.load(open(path, "rb"))
|
from onmt.modules.StochasticTransformer.Models import StochasticTransformerEncoder, StochasticTransformerDecoder
# For flake8 compatibility.
__all__ = [StochasticTransformerEncoder, StochasticTransformerDecoder]
|
from flask_restplus import Namespace, Resource
from flask import request, jsonify, current_app
from flask_jwt_simple import jwt_required, get_jwt_identity
from kafka import KafkaProducer
from micro_utils.flask.jwt import resolve_jwt_identity
from micro_utils.messaging.adapters import TopicProducer
from micro_utils.proto.SimpleSong_pb2 import SimpleSong
from micro_utils.proto.Song_pb2 import Song
from micro_records.models import Track, Album, Artist
from micro_records.schemas import tracks_schema, track_schema
from micro_records.config import KAFKA_CONFIG
api = Namespace('', description='Tracks Lister')
@api.route('/album/<int:album>')
class Tracks(Resource):
@jwt_required
def get(self, album):
tracks = Track.query.filter(Track.album_id == album).order_by(Track.track_number)
schema = tracks_schema.dump(tracks).data
return {'tracks': schema}
@jwt_required
def post(self, album):
track, _, author = (Track.query
.join(Album, Album.id==Track.album_id)
.join(Artist, Artist.id==Album.artist_id)
.add_columns(Album.name, Artist.name)
.filter(Track.id == album).first())
self._send_song(track=track)
self._emit_song_played(track=track, author=author)
return {'Status': 'Track ' + track.name + ' added'}, 200
@staticmethod
def _send_song(track):
producer = KafkaProducer(**KAFKA_CONFIG)
song = Song(
name=track.name,
path=track.path,
user=resolve_jwt_identity(current_app)
)
producer.send(value=song.SerializeToString(), topic='playlist')
producer.flush()
@staticmethod
def _emit_song_played(track, author):
producer = KafkaProducer(**KAFKA_CONFIG)
print('#########')
print(track.genre)
print('#########')
song = SimpleSong(
song_id=track.id,
author=author,
genre=track.genre,
name=track.name)
producer.send(value=song.SerializeToString(), topic='track_selected')
producer.flush()
|
#test cases starting from 0
class Solution:
def fib(self, N):
if N==0: return 0
lastTwo = [1 , 1]
counter = 3
while counter <= N:
nextFib = lastTwo[0] + lastTwo[1]
lastTwo[0] = lastTwo[1]
lastTwo[1] = nextFib
counter += 1
return lastTwo[1] if N > 1 else lastTwo[0]
|
# This relies on each of the submodules having an __all__ variable.
from .client import *
from .exceptions import *
from .protocol import *
from .server import *
from .uri import *
__all__ = (
client.__all__
+ exceptions.__all__
+ protocol.__all__
+ server.__all__
+ uri.__all__
)
from .version import version as __version__ # noqa
|
"""Unit tests for testing support
"""
import logging
import os
import unittest
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
from rascil.data_models.polarisation import PolarisationFrame
from rascil.processing_components.image.operations import export_image_to_fits, scale_and_rotate_image
from rascil.processing_components.imaging.base import create_image_from_visibility
from rascil.processing_components.imaging.primary_beams import create_pb, create_vp, convert_azelvp_to_radec
from rascil.processing_components.simulation import create_named_configuration
from rascil.processing_components.visibility.base import create_visibility
log = logging.getLogger('logger')
log.setLevel(logging.WARNING)
class TestPrimaryBeams(unittest.TestCase):
def setUp(self):
from rascil.data_models.parameters import rascil_path
self.dir = rascil_path('test_results')
self.persist = os.getenv("RASCIL_PERSIST", False)
def createVis(self, config='MID', dec=-35.0, rmax=1e3, freq=1.3e9):
self.frequency = [freq]
self.channel_bandwidth = [1e6]
self.flux = numpy.array([[100.0]])
self.phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
self.config = create_named_configuration(config)
self.times = numpy.linspace(-300.0, 300.0, 3) * numpy.pi / 43200.0
nants = self.config.xyz.shape[0]
self.npixel = 1024
self.fov = 8
self.cellsize = numpy.pi * self.fov / (self.npixel * 180.0)
assert nants > 1
assert len(self.config.names) == nants
assert len(self.config.mount) == nants
self.config = create_named_configuration(config, rmax=rmax)
self.phasecentre = SkyCoord(ra=+15 * u.deg, dec=dec * u.deg, frame='icrs', equinox='J2000')
self.vis = create_visibility(self.config, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame('stokesI'))
def test_create_primary_beams_RADEC(self):
self.createVis()
for telescope in ['VLA', 'ASKAP', 'MID', 'LOW']:
model = create_image_from_visibility(self.vis, cellsize=self.cellsize, npixel=self.npixel,
override_cellsize=False)
beam = create_pb(model, telescope=telescope, use_local=False)
assert numpy.max(beam.data) > 0.0
if self.persist: export_image_to_fits(beam, "%s/test_primary_beam_RADEC_%s.fits" % (self.dir, telescope))
def test_create_primary_beams_AZELGEO(self):
self.createVis()
for telescope in ['VLA', 'ASKAP', 'MID', 'MID_GAUSS', 'MID_FEKO_B1', 'MID_FEKO_B2', 'MID_FEKO_Ku', 'MID_GRASP',
'LOW']:
model = create_image_from_visibility(self.vis, cellsize=self.cellsize, npixel=self.npixel,
override_cellsize=False)
beam = create_pb(model, telescope=telescope, use_local=True)
assert numpy.max(beam.data) > 0.0
if self.persist: export_image_to_fits(beam, "%s/test_primary_beam_AZELGEO_%s.fits" % (self.dir, telescope))
def test_create_voltage_patterns(self):
self.createVis()
for telescope in ['VLA', 'ASKAP', 'LOW']:
model = create_image_from_visibility(self.vis, cellsize=self.cellsize, npixel=self.npixel,
override_cellsize=False)
beam = create_vp(model, telescope=telescope)
assert numpy.max(numpy.abs(beam.data.real)) > 0.0
assert numpy.max(numpy.abs(beam.data.imag)) < 1e-15, numpy.max(numpy.abs(beam.data.imag))
def test_create_voltage_patterns_MID_GAUSS(self):
self.createVis()
model = create_image_from_visibility(self.vis, npixel=self.npixel, cellsize=self.cellsize,
override_cellsize=False)
for telescope in ['MID_GAUSS']:
beam = create_vp(model, telescope=telescope, padding=4)
beam_data = beam.data
beam.data = numpy.real(beam_data)
if self.persist: export_image_to_fits(beam, "%s/test_voltage_pattern_real_%s.fits" % (self.dir, telescope))
beam.data = numpy.imag(beam_data)
if self.persist: export_image_to_fits(beam, "%s/test_voltage_pattern_imag_%s.fits" % (self.dir, telescope))
def test_create_voltage_patterns_MID(self):
self.createVis(freq=1.4e9)
model = create_image_from_visibility(self.vis, npixel=self.npixel, cellsize=self.cellsize,
override_cellsize=False)
for telescope in ['MID', 'MID_FEKO_B1', 'MID_FEKO_B2', 'MID_FEKO_Ku']:
beam = create_vp(model, telescope=telescope, padding=4)
beam_data = beam.data
beam.data = numpy.real(beam_data)
beam.wcs.wcs.crval[0] = 0.0
beam.wcs.wcs.crval[1] = 90.0
if self.persist: export_image_to_fits(beam,
"%s/test_voltage_pattern_real_zenith_%s.fits" % (self.dir, telescope))
def test_create_voltage_patterns_MID_rotate(self):
self.createVis(freq=1.4e9)
model = create_image_from_visibility(self.vis, npixel=self.npixel, cellsize=self.cellsize,
polarisation_frame=PolarisationFrame("stokesIQUV"),
override_cellsize=False)
for telescope in ['MID_FEKO_B1', 'MID_FEKO_B2', 'MID_FEKO_Ku']:
beam = create_vp(telescope=telescope)
beam = scale_and_rotate_image(beam, scale=[1.2, 0.8])
self.persist = True
if self.persist: export_image_to_fits(beam,
"%s/test_voltage_pattern_real_prerotate_%s.fits" % (self.dir, telescope))
beam_radec = convert_azelvp_to_radec(beam, model, numpy.pi/4.0)
beam_data = beam_radec.data
beam_radec.data = numpy.real(beam_data)
if self.persist: export_image_to_fits(beam_radec,
"%s/test_voltage_pattern_real_rotate_%s.fits" % (self.dir, telescope))
if __name__ == '__main__':
unittest.main()
|
from csv import DictWriter
from itertools import groupby
import os
import re
from string import capwords
import xml.etree.ElementTree as et
import requests
STATE_PROP_PATTERN = re.compile(r"^Proposition (\d+) - (.+)$")
CITY_MEASURE_PATTERN = re.compile(r"^([A-Z]{1,2})-(.+)$")
def get_results(use_cache=True):
url = "https://www.livevoterturnout.com/Orange/LiveResults/summary_6.xml"
cache_path = 'metadata/.cache'
file_path = f"{cache_path}/{url.split('/')[-1]}"
if os.path.exists(file_path) and use_cache:
with open(file_path) as f:
results = f.read()
else:
r = requests.get(url, verify=False)
r.raise_for_status()
results = r.content
with open(file_path, 'wb') as f:
f.write(results)
return results
def parse_data(xml):
root = et.fromstring(xml)
data = set(
(e.find('RaceID').text, e.find('RaceName').text)
for e in root.findall('./Table')
)
assert len(data) == 181
return sorted(list(data), key=lambda x: x[0])
def has_line_chart(race_id):
return race_id in [
1120, 1130, 1140, 1170, 1180, 1200, 1240,
1320, 1340, 4001
]
def format_data(race_id, race_name):
state_prop_match = STATE_PROP_PATTERN.match(race_name)
city_measure_match = CITY_MEASURE_PATTERN.match(race_name)
if state_prop_match:
num, title = state_prop_match.groups()
formatted_name = f"STATE PROPOSITION {num}"
description = capwords(title)
elif city_measure_match:
identifier, desc = city_measure_match.groups()
formatted_name = f"City Ballot Measure {identifier}"
description = desc
elif "Governing Board Member" in race_name:
if "Governing Board Member," in race_name:
formatted_name = race_name \
.replace("Governing Board Member,", "—") \
.strip()
else:
formatted_name = race_name \
.replace("Governing Board Member", "") \
.strip()
description = "Governing Board Member"
else:
formatted_name = race_name
description = None
formatted_name = formatted_name \
.replace("Orange County", "OC") \
.replace("ORANGE COUNTY", "OC")
return {
'id': race_id,
'title': formatted_name,
'source_title': race_name,
'description': description,
'has_bar_chart': True,
'has_line_chart': has_line_chart(race_id),
}
def main():
results = get_results()
with open('metadata/races.csv', 'w') as f:
columns = [
'id', 'title', 'source_title', 'description',
'has_bar_chart', 'has_line_chart'
]
writer = DictWriter(f, columns)
writer.writeheader()
for race_id, race_name in parse_data(results):
row = format_data(race_id, race_name)
writer.writerow(row)
if __name__ == '__main__':
main()
|
""" PreAssembly Report.
See FALCON-pbsmrtpipe/pbfalcon/report_preassembly.py for XML version.
"""
# Copied from
# http://swarm/files/depot/branches/springfield/S2.3/software/smrtanalysis/bioinformatics/tools/pbreports/pbreports/report/preassembly.py
from .FastaReader import open_fasta_reader
from .io import capture as syscall
from . import functional
import collections
import glob
import itertools
import logging
import os
import pprint
import re
log = logging.getLogger(__name__)
__version__ = '0.1'
Stats = collections.namedtuple(
'FastaStats', ['nreads', 'total', 'n50', 'p95', 'esize'])
# Copied from pbreports/util.py
# We want to avoid a dependency on pbreports b/c it needs matplotlib.
def get_fasta_readlengths(fasta_file):
"""
Get a sorted list of contig lengths
:return: (tuple)
"""
lens = []
with open_fasta_reader(fasta_file) as f:
for record in f:
lens.append(len(record.sequence))
lens.sort()
return lens
def get_db_readlengths(fn):
"""Use DBdump on a DAZZ_DB.
If DBsplit was run, then we see the filtered reads only, since we do not provide '-u' to DBdump.
"""
call = 'DBdump -h {}'.format(fn)
return list(functional.parsed_readlengths_from_dbdump_output(syscall(call)))
class FastaContainer(object):
def __init__(self, nreads, total, file_name):
self.nreads = nreads
self.total = total
self.file_name = file_name
@staticmethod
def from_file(file_name):
# nreads, total = _compute_values(file_name)
read_lens = get_fasta_readlengths(file_name)
nreads = len(read_lens)
total = sum(read_lens)
return FastaContainer(nreads, total, file_name)
def __str__(self):
return "N {n} Total {t} File: {f}".format(n=self.nreads, t=self.total, f=self.file_name)
def _validate_file(file_name):
if os.path.isfile(file_name):
return os.path.abspath(file_name)
else:
msg = "Unable to find {f}".format(f=file_name)
log.error(msg)
raise IOError(msg)
def cutoff_reads(read_lens, min_read_len):
return [rl for rl in read_lens if rl >= min_read_len]
def read_len_above(read_lens, threshold):
subtotal = 0
# Reverse-order calculation is faster.
for irev, rl in enumerate(reversed(read_lens)):
subtotal += rl
if subtotal >= threshold:
return rl
def percentile(read_lens, p):
# TODO: Fix this when p=1.0
return read_lens[int(len(read_lens) * p)]
def stats_from_sorted_readlengths(read_lens):
nreads = len(read_lens)
total = sum(read_lens)
sum_squares = sum(r * r for r in read_lens)
n50 = read_len_above(read_lens, int(total * 0.50))
p95 = percentile(read_lens, 0.95)
esize = sum_squares / total
#alt_n50 = pbreports.util.compute_n50(read_lens)
# log.info('our n50=%s, pbreports=%s' %(n50, alt_n50)) # Ours is more correct when median is between 2 reads.
return Stats(nreads=nreads, total=total, n50=n50, p95=p95, esize=esize)
def read_lens_from_fofn(fofn_fn):
"""Return sorted list.
"""
fns = [fn.strip() for fn in open(fofn_fn) if fn.strip()]
# get_fasta_readlengths() returns sorted, so sorting the chain is roughly linear.
return list(sorted(itertools.chain.from_iterable(get_fasta_readlengths(fn) for fn in fns)))
def read_lens_from_db(db_fn):
"""Return sorted read-lengths from a DAZZ_DB.
"""
return list(sorted(get_db_readlengths(db_fn)))
def abs_filenames(fofn_fn):
fofn_dir = os.path.dirname(fofn_fn)
def abs_fn(fn):
if os.path.isabs(fn):
return fn
return os.path.join(fofn_dir, fn)
fns = [abs_fn(fn.strip()) for fn in open(fofn_fn) if fn.strip()]
return fns
def metric_fragmentation(preads_fofn):
# https://jira.pacificbiosciences.com/browse/SAT-105
# sed -nr 's;>prolog/([0-9]*)[0-9]/.*;\1;p' %s/*.fasta | sort | uniq -c | awk '{print $1}' | sort | uniq -c
fastas = abs_filenames(preads_fofn)
assert fastas, 'No fasta found in {!r}'.format(preads_fofn)
call = r"""perl -e 'while (<>) { if ( m{>[^/]+/(\d+)\d/} ) { $id{$1}++; } }; while (my ($k, $v) = each %%id) { $counts{$v}++; }; while (my ($k, $v) = each %%counts) { print "$v $k\n"; };' %s""" % (' '.join(fastas))
counts = syscall(call)
return functional.calc_metric_fragmentation(counts)
def metric_truncation(db, preads_fofn):
# https://jira.pacificbiosciences.com/browse/SAT-105
fastas = abs_filenames(preads_fofn)
assert fastas, 'No fasta found in {!r}'.format(preads_fofn)
call = r"""perl -e 'while (<>) { if ( m{>[^/]+/0*(\d+)\d/(\d+)_(\d+)} ) { $lengths{(1 + $1)} += ($3 - $2); } }; while (my ($k, $v) = each %%lengths) { print "$k $v\n"; };' %s""" % (' '.join(fastas))
# The +1 is because of the DBdump readids start at 1, but these start at 0.
length_pairs_output = syscall(call)
call = 'DBdump -rh {}'.format(db)
dbdump_output = syscall(call)
return functional.calc_metric_truncation(dbdump_output, length_pairs_output)
def stats_dict(stats_raw_reads, stats_seed_reads, stats_corrected_reads, genome_length, length_cutoff,
fragmentation, truncation):
"""All inputs are paths to fasta files.
genome_length and length_cutoff can be None.
"""
log.info('stats for raw reads: %s' % repr(stats_raw_reads))
log.info('stats for seed reads: %s' % repr(stats_seed_reads))
log.info('stats for corrected reads: %s' % repr(stats_corrected_reads))
kwds = {}
genome_length = -1 if not genome_length else genome_length
kwds['genome_length'] = genome_length
kwds['length_cutoff'] = 0 if length_cutoff is None else length_cutoff
kwds['raw_reads'] = stats_raw_reads.nreads
kwds['raw_bases'] = stats_raw_reads.total
kwds['raw_mean'] = stats_raw_reads.total / stats_raw_reads.nreads
kwds['raw_n50'] = stats_raw_reads.n50
kwds['raw_p95'] = stats_raw_reads.p95
kwds['raw_coverage'] = stats_raw_reads.total / genome_length
kwds['raw_esize'] = stats_raw_reads.esize
kwds['seed_reads'] = stats_seed_reads.nreads
kwds['seed_bases'] = stats_seed_reads.total
kwds['seed_mean'] = stats_seed_reads.total / stats_seed_reads.nreads
kwds['seed_n50'] = stats_seed_reads.n50
kwds['seed_p95'] = stats_seed_reads.p95
kwds['seed_coverage'] = stats_seed_reads.total / genome_length
kwds['seed_esize'] = stats_seed_reads.esize
kwds['preassembled_reads'] = stats_corrected_reads.nreads
kwds['preassembled_bases'] = stats_corrected_reads.total
kwds['preassembled_mean'] = stats_corrected_reads.total / \
stats_corrected_reads.nreads
kwds['preassembled_n50'] = stats_corrected_reads.n50
kwds['preassembled_p95'] = stats_corrected_reads.p95
kwds['preassembled_coverage'] = stats_corrected_reads.total / genome_length
kwds['preassembled_esize'] = stats_corrected_reads.esize
kwds['preassembled_yield'] = stats_corrected_reads.total / \
stats_seed_reads.total
kwds['preassembled_seed_fragmentation'] = fragmentation
kwds['preassembled_seed_truncation'] = truncation
def round_if_float(v):
return v if type(v) is not float else round(v, 3)
result = {k: round_if_float(v) for (k, v) in kwds.items()}
return result
# DEPRECATED
def make_dict(
i_preads_fofn_fn,
i_raw_reads_fofn_fn,
genome_length,
length_cutoff,
fragmentation=-1,
truncation=-1,
):
raw_reads = read_lens_from_fofn(i_raw_reads_fofn_fn)
stats_raw_reads = stats_from_sorted_readlengths(raw_reads)
seed_reads = cutoff_reads(raw_reads, length_cutoff)
stats_seed_reads = stats_from_sorted_readlengths(seed_reads)
preads = read_lens_from_fofn(i_preads_fofn_fn)
stats_preads = stats_from_sorted_readlengths(preads)
report_dict = stats_dict(
stats_raw_reads=stats_raw_reads,
stats_seed_reads=stats_seed_reads,
stats_corrected_reads=stats_preads,
genome_length=genome_length,
length_cutoff=length_cutoff,
fragmentation=fragmentation,
truncation=truncation,
)
return report_dict
def calc_dict(
i_preads_fofn_fn,
i_raw_reads_db_fn,
genome_length,
length_cutoff,
):
try:
frag = metric_fragmentation(i_preads_fofn_fn)
except:
frag = -1.0
log.exception('Using arbitrary fragmentation metric: {}'.format(frag))
try:
trunc = metric_truncation(i_raw_reads_db_fn, i_preads_fofn_fn)
except:
trunc = -1.0
log.exception('Using arbitrary truncation metric: {}'.format(trunc))
raw_reads = read_lens_from_db(i_raw_reads_db_fn)
stats_raw_reads = stats_from_sorted_readlengths(raw_reads)
seed_reads = cutoff_reads(raw_reads, length_cutoff)
stats_seed_reads = stats_from_sorted_readlengths(seed_reads)
preads = read_lens_from_fofn(i_preads_fofn_fn)
stats_preads = stats_from_sorted_readlengths(preads)
report_dict = stats_dict(
stats_raw_reads=stats_raw_reads,
stats_seed_reads=stats_seed_reads,
stats_corrected_reads=stats_preads,
genome_length=genome_length,
length_cutoff=length_cutoff,
fragmentation=frag,
truncation=trunc,
)
log.info('Calculated pre-assembly stats:\n{}'.format(
pprint.pformat(report_dict)))
return report_dict
|
#!/usr/bin/python
# helper functions for your analysis of data from Isobel Hawes, for Le Yan and the biohub
# import libraries
import numpy as np
import math
import scipy
# import pandas to work with data
import pandas as pd
# plotting
import matplotlib.pyplot as plt
import seaborn as sns
# import itertools, reduce to examine intersection of four different sampling dates
from itertools import chain
from itertools import combinations
from itertools import product
from functools import reduce
# processing data
from sklearn.preprocessing import StandardScaler
# import Biopython
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio.Alphabet import generic_nucleotide
from Bio.SeqUtils import GC
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
# File I/O
import os
# Helper functions
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(1,len(s)+1))
#normalize the rows of a matrix
# notice that this will NOT work for normalizing a single vector!!!!!!!!
def normalize_rows(arr):
return np.array([x / np.linalg.norm(x) for x in arr])
# for given array (with rows normalized), given the cosine similarity matrix of the rows
def cos_sim_mat(arr):
mat = []
for x in arr:
row = []
for y in arr:
row.append(np.dot(x,y))
mat.append(row)
return np.array(mat)
# gives average cosine similarity for a matrix showing cosine similarities between each pair of vector
# ignores diagonal of all ones
def avg_cos_sim_diag(arr):
n = len(arr)
return (arr.sum() - n) / (n ** 2 -n)
# gives average cosine similarity for a matrix showing cosine similarities between each pair of vector
# includes diagonal b/c this is for blocks where no vector is being compared with itself
def avg_cos_sim_no_diag(arr):
r,c = arr.shape
return (arr.sum()) / (r * c)
def mat_cuts(lst):
return [0] + list(np.cumsum(lst))
# this function takes in cos_sim_mat, which is a matrix consisting of the cosine similarities betweeen different vectors,
# and splits them according to the different patients (according to split_lst)
def cos_sim_block_mat(cos_sim_mat, split_lst):
cuts = mat_cuts(split_lst)
outer_lst = []
for i in range(len(split_lst)):
inner_lst = []
for j in range(len(split_lst)):
block_mat = cos_sim_mat[cuts[i]: cuts[i + 1], cuts[j]: cuts[j + 1]]
if i == j:
avg_cos_sim = avg_cos_sim_diag(block_mat)
else:
avg_cos_sim = avg_cos_sim_no_diag(block_mat)
inner_lst.append(avg_cos_sim)
outer_lst.append(inner_lst)
return outer_lst
#remove .csv ending from file names
def kill_csv(name):
return name[:-4]
#sanity check to perform after any operation on a dataframe
def sanity_check_df(df):
print(df.shape)
display(df.head())
display(df.tail())
# gives the amount of peptide sequence p measured in sample s
def extract_from_hiv_by_sample(s,p):
curr_df = hiv1_only_df.loc[s]
arr = np.array(curr_df.loc[curr_df['peptide_sequence'] == p]['reads_per_100K_(rpK)'])
if arr.size == 0:
return 0
else:
return arr[0]
# natural logarithm with minimum value set to 0, as no peptide with rpK less than 1 is reported in the data
def soft_log(x):
return np.log(x + 1)
# create dictionary where key is sample id, value are names of peptides measured in this sample with start position s
def date_peptide_s_dictionary(s):
date_peptide_dictionary = {}
for x in file_names_no_csv:
curr_df = hiv1_only_df.loc[x]
date_peptide_dictionary[x] = set(curr_df.loc[curr_df['start'] == s]['peptide'])
return date_peptide_dictionary
# gives list of all peptides SEQUENCES in data with given starting position s
def peptides_s(s):
return hiv1_only_df.loc[hiv1_only_df['start'] == s]['peptide_sequence'].unique()
# gives the similarity matrix between peptides sequences in given list pep_seq
# score is given by alignments divided by length, so gives an output between 0 and 1
# definitely can change how the scoring is accomplished
# should make formula clear and have examples
def pep_sim_mat(pep_lst):
outer_lst = []
for p1 in pep_lst:
inner_lst = []
for p2 in pep_lst:
alignments = pairwise2.align.globalxx(p1, p2)
inner_lst.append(alignments[0][2] / float(alignments[0][4] - alignments[0][3]))
outer_lst.append(inner_lst)
sim_mat_raw = np.array(outer_lst)
sim_mat_df = pd.DataFrame(sim_mat_raw, index = pep_lst)
sim_mat_clust = sns.clustermap(sim_mat_df, cmap = 'jet', vmin = -1, vmax = 1, cbar_kws = {'label' : 'peptide sequence similarity'})
reorder_inds_peps = sim_mat_clust.dendrogram_col.reordered_ind
reordered_peps = [pep_lst[x] for x in reorder_inds_peps]
reordered_sim_mat = [[sim_mat_raw[x][y] for y in reorder_inds_peps] for x in reorder_inds_peps]
reordered_sim_mat_df = pd.DataFrame(reordered_sim_mat, index = reordered_peps)
reordered_sim_mat_df.columns = reordered_peps
return reordered_sim_mat_df
# for given eigenvectors, principal component start and end range, data, and patient splits, gives the enrichments, the cosine similarities between samples, and the average cosine similarity within and between patients
# data goes in as a dataframe
def component_exploration(evecs, start, stop, data_df, patient_splts):
# first we show the enrichment scores
unnormalized_enrichment = np.matmul(np.array([x for x in data_df.values]), evecs[:,start:stop])
normalized_enrichment = normalize_rows(unnormalized_enrichment)
ax = plt.axes()
y_axis_labels = file_names_no_csv # labels for y-axis
ax.set_title("Enrichment scores of principal components " + str(start) + " to " + str(stop - 1) + " (log rpK)")
sns.heatmap(normalized_enrichment, cmap = 'jet', yticklabels = y_axis_labels, ax = ax, cbar_kws = {'label' : 'enrichment score'})
ax.set_ylim(len(data_df),0) # hard code b/c heatmap no longer works with matplotlib
plt.yticks(rotation=0)
plt.xlabel('principal components')
plt.show()
plt.close()
# then we show the cosine similarity between patients
cos_sim_enriched = cos_similarity_patient_splits(normalized_enrichment, file_names_no_csv, patient_splts)
# then we return the average similarity between patients
# should we show stdv as well?
return cos_sim_block_mat(cos_sim_enriched, patient_splts)
# gives the cosine similarity matrix, delineating the splits between patients
#sample_scores gives the vectors for each patient sample
#sample_names give the name of each patient sample
# title is the string that gives the title of each patient
def cos_similarity_patient_splits(sample_scores, sample_names, patient_splts, title):
cos_sim = cos_sim_mat(normalize_rows(sample_scores))
ax = plt.axes()
y_axis_labels = sample_names # sample names
x_axis_labels = sample_names
ax.set_title("Cos. sim. between patients" + title)
# mask = np.zeros_like(cos_sim)
# mask[np.diag_indices_from(mask)] = True
# sns.heatmap(cos_sim, mask = mask, cmap = 'jet', xticklabels = x_axis_labels, yticklabels = y_axis_labels, ax = ax, cbar_kws = {'label' : 'cosine similarity'})
sns.heatmap(cos_sim, cmap = 'jet', vmin = 0, vmax = 1, xticklabels = x_axis_labels, yticklabels = y_axis_labels, ax = ax, cbar_kws = {'label' : 'cosine similarity'})
ax.set_ylim(len(sample_scores),0)
plt.yticks(rotation=0)
ax.hlines(mat_cuts(patient_splts), *ax.get_xlim()) #lines to indicate patient grouping
ax.vlines(mat_cuts(patient_splts), *ax.get_ylim()) #lines to indicate patient grouping
plt.show()
plt.close()
return cos_sim
# reports the mean and standard deviation of each row in a matrix
# meant to be used for cos_sim_block_mat
def mean_and_std_by_row(arr):
mat = []
for r in arr:
m = np.mean(r)
s = np.std(r)
mat.append([m,s])
return mat
# produces a nicely labeled heatmap of data in dataframe (df)
# provide strings for"
# the title of the plot (title)
# what the colormap menas (cbar)
# x-axis and y-axies label (xlabel and label)
def label_heatmap(df, title, cbar, xlabel, ylabel):
ax = plt.axes()
# y_axis_labels = file_names_no_csv
ax.set_title(title)
sns.heatmap(df, cmap = 'jet', ax = ax, cbar_kws = {'label' : cbar})
ax.set_ylim(len(df),0) # hard code b/c heatmap no longer works with matplotlib
plt.yticks(rotation=0)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.show()
plt.close()
# gives heatmap of the average cosine similarity within and between patients
# takes in the title (a string)
# and the cos_sim_mat of all the patient samples with each other
def avg_cos_sim_heatmap(title, cos_sim_mat):
ax = plt.axes()
y_axis_labels = patient_lst
x_axis_labels = patient_lst
ax.set_title('Avg. cos. sim. between patients ' + title)
sns.heatmap(cos_sim_block_mat(cos_sim_mat, split_sample_lst), vmin = 0, vmax = 1, cmap = 'jet', annot=True, ax = ax, yticklabels = y_axis_labels, xticklabels = x_axis_labels, cbar_kws = {'label' : "avg cos sim"})
ax.set_ylim(len(patient_lst),0) # hard code b/c heatmap no longer works with matplotlib
plt.yticks(rotation=0)
plt.show()
plt.close()
|
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
verts = [
(0., 0.), # left, bottom
(0., 1.), # left, top
(1., 1.), # right, top
(1., 0.), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
fig = plt.figure()
ax = fig.add_subplot(111)
patch = patches.PathPatch(path, facecolor='orange', lw=2)
ax.add_patch(patch)
ax.set_xlim(-2,2)
ax.set_ylim(-2,2)
plt.show()
|
from __future__ import print_function
import pytest
from urbansim_templates.shared import CoreTemplateSettings
def test_property_persistence():
"""
Confirm CoreTemplateSettings properties persist through to_dict() and from_dict().
"""
obj = CoreTemplateSettings()
obj.name = 'name'
obj.tags = ['tag1', 'tag2']
obj.notes = 'notes'
obj.autorun = True
obj.template = 'CoolNewTemplate'
obj.template_version = '0.1.dev0'
d = obj.to_dict()
print(d)
obj2 = CoreTemplateSettings.from_dict(d)
assert(obj2.to_dict() == d)
|
"""
Operator to resample a timeseries based dataframe
"""
import pandas as pd
from tasrif.processing_pipeline import ProcessingOperator
from tasrif.processing_pipeline.validators import GroupbyCompatibleValidatorMixin
class ResampleOperator(GroupbyCompatibleValidatorMixin, ProcessingOperator):
"""
Group and aggregate rows in 2D data frame based on a column feature.
This operator works on a 2D data frames where the
columns represent the features. The returned data frame contains aggregated values as the column features together
with the base feature used for grouping.
Examples
--------
>>> import pandas as pd
>>> from tasrif.processing_pipeline.custom import ResampleOperator
>>> df = pd.DataFrame([
>>> [1, "2020-05-01 00:00:00", 1],
>>> [1, "2020-05-01 01:00:00", 1],
>>> [1, "2020-05-01 03:00:00", 2],
>>> [2, "2020-05-02 00:00:00", 1],
>>> [2, "2020-05-02 01:00:00", 1]],
>>> columns=['logId', 'timestamp', 'sleep_level'])
>>>
>>> df['timestamp'] = pd.to_datetime(df['timestamp'])
>>> df = df.set_index('timestamp')
>>> op = ResampleOperator('D', {'sleep_level': 'mean'})
>>> op.process(df)
[ sleep_level
timestamp
2020-05-01 1.333333
2020-05-02 1.000000]
"""
def __init__(self, rule, aggregation_definition, **resample_args):
"""Creates a new instance of ResampleOperator
Args:
rule (ruleDateOffset, Timedelta, str):
The offset string or object representing target conversion.
aggregation_definition (dict, str):
Dictionary containing feature to aggregation functions mapping.
function defining the aggregation behavior ('sum', 'mean', 'ffill', etc.)
**resample_args:
key word arguments passed to pandas DataFrame.resample method
"""
super().__init__()
self.rule = rule
self.aggregation_definition = aggregation_definition
self.resample_args = resample_args
def _process(self, *data_frames):
"""Processes the passed data frame as per the configuration define in the constructor.
Args:
*data_frames (list of pd.DataFrame):
Variable number of pandas dataframes to be processed
Returns:
pd.DataFrame -or- list[pd.DataFrame]
Processed dataframe(s) resulting from applying the operator
"""
processed = []
for data_frame in data_frames:
resampler = data_frame.resample(self.rule, **self.resample_args)
if isinstance(self.aggregation_definition, dict):
data_frame = resampler.agg(self.aggregation_definition)
else:
data_frame = getattr(resampler, self.aggregation_definition)()
if isinstance(data_frame, pd.Series):
data_frame = pd.DataFrame(data_frame)
processed.append(data_frame)
return processed
|
from distutils.core import setup
setup(name='networkmonitor',
version='0.0.1',
description='Console application to let you monitor the status of devices.',
author='James Tombleson',
url='http://github.com/luther38/networkmonitor',
packages=[
'pipenv',
'requests',
'click'
],
entry_points='''
[console_scripts]
networkmonitor=networkmonitor:init
'''
)
|
import numpy as np
from ..data import Dataset
def show_accuracy_loss(net, scaling="scaled", test_dataset_path="../data/processed/extended"):
"""Show performance on the test sets
Args:
net (Keras model): Keras compiled model
scaling (str, optional): dataset properties, assuming the datasets are named test_fold_properties.csv. Defaults to "scaled".
test_dataset_path (str, optional): Dataset folder. Defaults to "../data/processed/extended".
Returns:
List: Accuracy and loss performance
"""
loss = []
accuracy = []
for fold in [5, 7, 8, 9, 10]:
td = Dataset(dataset_path=f"{test_dataset_path}/test_{fold}_{scaling}.csv", test_size=0)
x_test, y_test = td.get_splits()
results = net.evaluate(x_test, y_test, batch_size=128)
loss.append(results[0])
accuracy.append(results[1])
print("\nAccuracy:")
print(f"\tMean: {np.mean(accuracy)} \n\tStandard deviation: {np.std(accuracy)}")
print("\nLoss:")
print(f"\tMean: {np.mean(loss)} \n\tStandard deviation: {np.std(loss)}")
return accuracy, loss
|
import argparse
import json
import numpy as np
import subprocess as sp
import copy
from librw.loader import Loader
from librw.rw import Rewriter
from librw.analysis.register import RegisterAnalysis
from rwtools.asan.instrument import Instrument
from librw.analysis.stackframe import StackFrameAnalysis
def do_symbolization(input, outfile):
loader = Loader(input)
flist = loader.flist_from_symtab()
loader.load_functions(flist)
slist = loader.slist_from_symtab()
loader.load_data_sections(slist, lambda x: x in Rewriter.DATASECTIONS)
reloc_list = loader.reloc_list_from_symtab()
loader.load_relocations(reloc_list)
global_list = loader.global_data_list_from_symtab()
loader.load_globals_from_glist(global_list)
loader.container.attach_loader(loader)
rw = Rewriter(loader.container, outfile + ".s")
rw.symbolize()
StackFrameAnalysis.analyze(loader.container)
try:
with open(outfile + ".analysis_cache") as fd:
analysis = json.load(fd)
print("[*] Loading analysis cache")
for func, info in analysis.items():
for key, finfo in info.items():
loader.container.functions[int(func)].analysis[key] = dict()
for k, v in finfo.items():
try:
addr = int(k)
except ValueError:
addr = k
loader.container.functions[int(func)].analysis[key][addr] = v
except IOError:
print("[*] Analyzing free registers")
RegisterAnalysis.analyze(loader.container)
analysis = dict()
for addr, func in loader.container.functions.items():
analysis[addr] = dict()
for key, info in func.analysis.items():
analysis[addr][key] = dict()
for k, v in info.items():
analysis[addr][key][k] = list(v)
with open(outfile + ".analysis_cache", "w") as fd:
json.dump(analysis, fd)
return rw
def delta_debug(args):
excluded = set()
all_locations = set()
safe_set = set()
verified = False
err_locs = set()
rewriter = do_symbolization(args.binary, args.outfile)
instrumenter = Instrument(rewriter)
instrumenter.do_instrument()
for func, sites in instrumenter.memcheck_sites.items():
for site in sites:
addr = rewriter.container.functions[func].cache[site]
all_locations.add(addr.address)
while not verified:
instrument_sites = all_locations.difference(excluded).difference(safe_set)
while len(instrument_sites) > 1:
rewriter = do_symbolization(args.binary, args.outfile)
instrumenter = Instrument(rewriter)
# Create round skip
round_skip = set()
for site in instrument_sites:
randsk = np.random.randint(2, size=1)
if randsk:
round_skip.add(site)
# Exclude these sites
instrumenter.skip_instrument = round_skip.union(excluded)
instrumenter.do_instrument()
rewriter.dump()
# Test!
result = test_function(args)
# True case, test case passed. Therefore, something in round_skip is
# causing the error.
if result:
safe_set = safe_set.union(
all_locations.difference(instrumenter.skip_instrument))
excluded = all_locations.difference(round_skip)
else:
# Ok, we failed again, add round skip to excluded to safely exclude
excluded = excluded.union(round_skip)
print(len(all_locations), len(instrument_sites),
len(excluded), len(round_skip))
instrument_sites = all_locations.difference(excluded).difference(safe_set)
print("[*] Localized Error to:", instrument_sites)
# Verify the solution set
err_locs.update(instrument_sites)
excluded = copy.copy(err_locs)
rewriter = do_symbolization(args.binary, args.outfile)
instrumenter = Instrument(rewriter)
instrumenter.skip_instrument = excluded
print("[*] Verifying Solution:", excluded)
instrumenter.do_instrument()
rewriter.dump()
result = test_function(args)
if result:
verified = True
print("Error set:", [hex(x) for x in err_locs])
else:
print("[X] Verification Failed. Retrying.")
SPEC_LOC = "/home/number_four/projects/spec"
SPEC_CMD = "cd " + SPEC_LOC + " && source shrc && runspec --config asan.cfg --nobuild"
MAKE_CMD = "cd tests/SPECCPU2006 && make && make asan_install"
def test_function(args):
try:
print("[*] Building ...")
sp.check_call(MAKE_CMD, shell=True)
print("[*] Running SPEC CMD")
proc = sp.Popen(["/bin/bash", "-c", SPEC_CMD], stdout=sp.PIPE, stderr=sp.PIPE)
proc.wait()
output, err = proc.communicate()
except sp.CalledProcessError:
return False
err_str = "****************************************"
#err_str = "*** Miscompare of su3imp.out;"
output = output.decode('utf-8').split("\n")
print("\n".join(output))
if any([x.strip().startswith(err_str) for x in output]):
#if any([x.strip().startswith("*** Miscompare") for x in output]):
print("Miscompare found! Failed Test!")
return False
return True
if __name__ == "__main__":
argp = argparse.ArgumentParser()
argp.add_argument("binary", type=str, help="Input binary to instrument")
argp.add_argument("outfile", type=str, help="Input binary to instrument")
argp.add_argument(
"--compile", type=str, help="Compile command for the binary")
argp.add_argument(
"--gcc", action='store_true', help="Use gcc compile final binary")
argp.add_argument(
"--clang", action='store_true', help="Use clang compile final binary")
args = argp.parse_args()
delta_debug(args)
|
print("Hello world")
print("What is the score of india vs england match?:????")
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ayarlar.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Ayarlar(object):
def setupUi(self, Ayarlar):
Ayarlar.setObjectName("Ayarlar")
Ayarlar.resize(310, 190)
self.verticalLayout = QtWidgets.QVBoxLayout(Ayarlar)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.frame = QtWidgets.QFrame(Ayarlar)
self.frame.setMaximumSize(QtCore.QSize(16777215, 45))
self.frame.setStyleSheet("#frame{\n"
"background-color:rgb(52,52,52);\n"
"border-radius:16px;\n"
"}")
self.frame.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout_4.setContentsMargins(-1, 0, 5, 0)
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.frame_4 = QtWidgets.QFrame(self.frame)
self.frame_4.setMinimumSize(QtCore.QSize(250, 35))
self.frame_4.setMaximumSize(QtCore.QSize(16777215, 35))
self.frame_4.setStyleSheet("#frame_4{\n"
"background-color:rgb(25, 25, 25);\n"
"border-radius:15px\n"
"}")
self.frame_4.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_4.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_4.setObjectName("frame_4")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.frame_4)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label = QtWidgets.QLabel(self.frame_4)
self.label.setStyleSheet("#label{\n"
" font: 15pt \"Ubuntu Mono\";\n"
" color: rgb(255, 255, 255);\n"
"}")
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout_3.addWidget(self.label)
self.verticalLayout_4.addWidget(self.frame_4)
self.verticalLayout.addWidget(self.frame)
self.frame_2 = QtWidgets.QFrame(Ayarlar)
self.frame_2.setStyleSheet("#frame_2{\n"
" background-color: rgb(52, 52, 52);\n"
"border-radius:15px\n"
"\n"
"}")
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.frame_2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.groupBox = QtWidgets.QGroupBox(self.frame_2)
self.groupBox.setStyleSheet("#groupBox{\n"
"color:white;\n"
"\n"
"}")
self.groupBox.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox.setFlat(True)
self.groupBox.setObjectName("groupBox")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.groupBox)
self.horizontalLayout.setContentsMargins(40, 0, 0, 0)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.TRY = QtWidgets.QRadioButton(self.groupBox)
self.TRY.setStyleSheet("#TRY{\n"
"color:white;\n"
"}")
self.TRY.setChecked(True)
self.TRY.setObjectName("TRY")
self.horizontalLayout.addWidget(self.TRY)
self.USD = QtWidgets.QRadioButton(self.groupBox)
self.USD.setStyleSheet("#USD{\n"
"color:white;\n"
"}")
self.USD.setObjectName("USD")
self.horizontalLayout.addWidget(self.USD)
self.EUR = QtWidgets.QRadioButton(self.groupBox)
self.EUR.setStyleSheet("#EUR{\n"
"color:white;\n"
"}")
self.EUR.setObjectName("EUR")
self.horizontalLayout.addWidget(self.EUR)
self.verticalLayout_2.addWidget(self.groupBox)
self.frame_3 = QtWidgets.QFrame(self.frame_2)
self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_3.setObjectName("frame_3")
self.gridLayout = QtWidgets.QGridLayout(self.frame_3)
self.gridLayout.setObjectName("gridLayout")
self.pushButton = QtWidgets.QPushButton(self.frame_3)
self.pushButton.setMinimumSize(QtCore.QSize(48, 48))
self.pushButton.setMaximumSize(QtCore.QSize(48, 48))
self.pushButton.setText("")
self.pushButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.pushButton, 0, 0, 1, 1)
self.pushButton_2 = QtWidgets.QPushButton(self.frame_3)
self.pushButton_2.setMinimumSize(QtCore.QSize(48, 48))
self.pushButton_2.setMaximumSize(QtCore.QSize(48, 48))
self.pushButton_2.setText("")
self.pushButton_2.setObjectName("pushButton_2")
self.gridLayout.addWidget(self.pushButton_2, 0, 1, 1, 1)
self.verticalLayout_2.addWidget(self.frame_3)
self.verticalLayout.addWidget(self.frame_2)
self.retranslateUi(Ayarlar)
QtCore.QMetaObject.connectSlotsByName(Ayarlar)
def retranslateUi(self, Ayarlar):
_translate = QtCore.QCoreApplication.translate
Ayarlar.setWindowTitle(_translate("Ayarlar", "Ayarlar"))
self.label.setText(_translate("Ayarlar", "AYARLAR"))
self.groupBox.setTitle(_translate("Ayarlar", "Para Birimleri"))
self.TRY.setText(_translate("Ayarlar", "TRY"))
self.USD.setText(_translate("Ayarlar", "USD"))
self.EUR.setText(_translate("Ayarlar", "EUR"))
|
# -*- coding: utf-8 -*-
# Command line utilties and helper functions
from clint.textui import puts, colored, indent
from getpass import getpass
from blockcypher.utils import (is_valid_address_for_coinsymbol,
coin_symbol_from_mkey, format_output, UNIT_CHOICES)
from blockcypher.constants import COIN_SYMBOL_MAPPINGS, COIN_SYMBOL_LIST
from bitmerchant.wallet.keys import PrivateKey
import json
from datetime import datetime
DEFAULT_PROMPT = '฿'
BCWALLET_PRIVPIPE_EXPLANATION = "You can also pipe in your HD wallet (you could modify this to hide your HD wallet from your bash history and/or store it in an encrypted file):\n"
EXPLAINER_COPY = [
['Multi-Currency', 'Supports Bitcoin (and Testnet), Litecoin, Dogecoin, and BlockCypher Testnet.'],
['Nearly Trustless', 'Keys are generated from the seed and transactions are signed locally for trustless use.'],
['No Key Pool', 'The seed is not stored locally, the app is booted with the user supplying the master key so the filesystem is never used.'],
['Hard to Mess Up', "As long as you don't lose or share your master private key, everything else is simple."],
['Accurate Transaction Fees', 'Smart calculation lets user decide how long until their transaction will make it into a block.'],
['Airgap Usage', 'Can be booted with the public key in watch-only mode, which is great for fetching transaction info to sign offline with a more secure machine.'],
['Very Few LoC', 'Blockchain heavy lifting powered by BlockCypher, which leads to massive reduction in client-side code used for ease of auditing.'],
]
class DateTimeEncoder(json.JSONEncoder):
# http://stackoverflow.com/a/27058505/1754586
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return json.JSONEncoder.default(self, o)
def debug_print(to_print):
if type(to_print) is dict:
to_print = json.dumps(to_print, cls=DateTimeEncoder, indent=2)
puts(colored.yellow(str(to_print)))
def choice_prompt(user_prompt=DEFAULT_PROMPT, acceptable_responses=[],
default_input=None, show_default=True, quit_ok=False):
assert len(acceptable_responses) > 0, acceptable_responses
acceptable_responses = [str(x) for x in acceptable_responses]
if quit_ok:
acceptable_responses.extend(['q', 'Q'])
if default_input and show_default:
prompt_to_use = '%s [%s]: ' % (user_prompt, default_input)
else:
prompt_to_use = '%s: ' % user_prompt
user_input = raw_input(prompt_to_use).strip()
if not user_input and default_input in acceptable_responses:
return default_input
if user_input not in acceptable_responses:
puts(colored.red('Sorry, %s is not a valid entry. Please try again.' % user_input))
return choice_prompt(
user_prompt=user_prompt,
acceptable_responses=acceptable_responses,
default_input=default_input,
show_default=show_default,
)
return user_input
def get_user_entropy(user_prompt=DEFAULT_PROMPT):
return getpass('%s: ' % user_prompt)
def get_crypto_qty(max_num, input_type, user_prompt=DEFAULT_PROMPT,
default_input=None, show_default=False, quit_ok=False):
assert input_type in UNIT_CHOICES, input_type
if default_input and show_default:
prompt_to_use = '%s [%s]: ' % (user_prompt, default_input)
else:
prompt_to_use = '%s: ' % user_prompt
user_input = raw_input(prompt_to_use).strip()
if default_input and not user_input:
return int(default_input)
if quit_ok and user_input in ['q', 'Q']:
return False
try:
user_input_cleaned = user_input.replace(',', '')
if user_input_cleaned == '-1':
# for sweeping
return -1
user_float = float(user_input_cleaned)
except ValueError:
if not user_input_cleaned:
puts(colored.red('No entry. Please enter something.'))
else:
puts(colored.red('%s is not an integer. Please try again.' % user_input))
return get_crypto_qty(
max_num=max_num,
input_type=input_type,
user_prompt=user_prompt,
default_input=default_input,
show_default=show_default,
quit_ok=quit_ok,
)
if user_float <= 0:
puts(colored.red('%s <= 0. Please try again.' % (
format_output(user_float, output_type=input_type),
)))
return get_crypto_qty(
max_num=max_num,
input_type=input_type,
user_prompt=user_prompt,
default_input=default_input,
show_default=show_default,
quit_ok=quit_ok,
)
if max_num is not None and user_float > max_num:
puts(colored.red('%s > %s. Please try again.' % (
format_output(user_float, output_type=input_type),
format_output(max_num, output_type=input_type),
)))
return get_crypto_qty(
max_num=max_num,
input_type=input_type,
user_prompt=user_prompt,
default_input=default_input,
show_default=show_default,
quit_ok=quit_ok,
)
return user_float
def get_int(max_int, min_int=1, user_prompt=DEFAULT_PROMPT, default_input=None,
show_default=False, quit_ok=False):
if default_input and show_default:
prompt_to_use = '%s [%s]: ' % (user_prompt, default_input)
else:
prompt_to_use = '%s: ' % user_prompt
user_input = raw_input(prompt_to_use).strip()
if default_input and not user_input:
return int(default_input)
if quit_ok and user_input in ['q', 'Q']:
return False
try:
user_int = int(user_input.replace(',', ''))
except ValueError:
puts(colored.red('%s is not an integer. Please try again.' % user_input))
return get_int(
max_int=max_int,
min_int=min_int,
default_input=default_input,
show_default=show_default,
)
if user_int < min_int:
puts(colored.red('%s < %s. Please try again.' % (
user_int,
min_int,
)))
return get_int(
max_int=max_int,
min_int=min_int,
default_input=default_input,
show_default=show_default,
)
if user_int > max_int:
puts(colored.red('%s > %s. Please try again.' % (
user_int,
max_int,
)))
return get_int(
max_int=max_int,
min_int=min_int,
default_input=default_input,
show_default=show_default,
)
return user_int
def get_crypto_address(coin_symbol, user_prompt=DEFAULT_PROMPT, quit_ok=False):
display_shortname = COIN_SYMBOL_MAPPINGS[coin_symbol]['display_shortname']
destination_address = raw_input('%s: ' % user_prompt).strip()
if not destination_address:
err_str = 'No entry, please enter something'
if quit_ok:
err_str += " (or Q to quit)"
puts(colored.red(err_str))
return get_crypto_address(
coin_symbol=coin_symbol,
user_prompt=user_prompt,
quit_ok=quit_ok,
)
if quit_ok and destination_address in ['q', 'Q']:
return False
if is_valid_address_for_coinsymbol(destination_address,
coin_symbol=coin_symbol):
return destination_address
else:
puts('Invalid %s address, try again' % display_shortname)
return get_crypto_address(
coin_symbol=coin_symbol,
user_prompt=user_prompt,
quit_ok=quit_ok,
)
def get_wif_obj(network, user_prompt=DEFAULT_PROMPT, quit_ok=False):
user_input = raw_input('%s: ' % user_prompt).strip()
if quit_ok and user_input in ['q', 'Q']:
return False
try:
return PrivateKey.from_wif(user_input, network=network)
except Exception:
puts(colored.red('Invalid WIF `%s`, Please Try Again' % user_input))
return get_wif_obj(network=network, user_prompt=user_prompt, quit_ok=quit_ok)
def coin_symbol_chooser(user_prompt=DEFAULT_PROMPT, quit_ok=True):
ACTIVE_COIN_SYMBOL_LIST = [x for x in COIN_SYMBOL_LIST if x != 'uro']
for cnt, coin_symbol_choice in enumerate(ACTIVE_COIN_SYMBOL_LIST):
with indent(2):
puts(colored.cyan('%s: %s' % (
cnt+1,
COIN_SYMBOL_MAPPINGS[coin_symbol_choice]['display_name'],
)))
if ACTIVE_COIN_SYMBOL_LIST[4] == 'bcy':
default_input = 5
show_default = True
else:
default_input = None
show_default = False
coin_symbol_int = get_int(
min_int=1,
user_prompt=user_prompt,
max_int=len(ACTIVE_COIN_SYMBOL_LIST),
default_input=default_input,
show_default=show_default,
quit_ok=quit_ok,
)
if not coin_symbol_int:
return False
else:
return ACTIVE_COIN_SYMBOL_LIST[coin_symbol_int-1]
def txn_preference_chooser(user_prompt=DEFAULT_PROMPT):
puts('How quickly do you want this transaction to confirm? The higher the miner preference, the higher the transaction fee.')
TXN_PREFERENCES = (
('high', '1-2 blocks to confirm'),
('medium', '3-6 blocks to confirm'),
('low', '7+ blocks to confirm'),
# ('zero', 'no fee, may not ever confirm (advanced users only)'),
)
for cnt, pref_desc in enumerate(TXN_PREFERENCES):
pref, desc = pref_desc
with indent(2):
puts(colored.cyan('%s (%s priority): %s' % (cnt+1, pref, desc)))
choice_int = choice_prompt(
user_prompt=user_prompt,
acceptable_responses=range(1, len(TXN_PREFERENCES)+1),
default_input='1', # high pref
show_default=True,
)
return TXN_PREFERENCES[int(choice_int)-1][0]
def confirm(user_prompt=DEFAULT_PROMPT, default=None):
if default is True:
prompt_to_use = user_prompt + ' [Y/n]: '
elif default is False:
prompt_to_use = user_prompt + ' [y/N]: '
elif default is None:
prompt_to_use = user_prompt + ': '
else:
raise Exception('Bad Default Value: %s' % default)
user_input = raw_input(prompt_to_use).strip()
if not user_input:
return default
elif user_input.lower() == 'y':
return True
elif user_input.lower() == 'n':
return False
else:
puts(colored.red('`%s` is not a valid entry. Please enter either Y or N.' % user_input))
return confirm(user_prompt=user_prompt, default=default)
def get_public_wallet_url(mpub):
# subchain indices set at 0 * 1
return 'https://live.blockcypher.com/%s/xpub/%s/?subchain-indices=0-1' % (
coin_symbol_from_mkey(mpub),
mpub,
)
# TODO: move to blockcypher python library
def first4mprv_from_mpub(mpub):
coin_symbol = coin_symbol_from_mkey(mkey=mpub)
return COIN_SYMBOL_MAPPINGS[coin_symbol]['first4_mprv']
def print_bcwallet_basic_pub_opening(mpub):
puts("You've opened your HD wallet in PRIVATE key mode, so you CAN sign transactions.")
puts("If you like, you can always open your HD wallet in PUBLIC key mode like this:\n")
with indent(2):
puts(colored.magenta('$ bcwallet --wallet=%s\n' % mpub))
def print_pubwallet_notice(mpub):
puts("You've opened your HD wallet in PUBLIC key mode, so you CANNOT sign transactions.")
puts("To sign transactions, open your HD wallet in private key mode like this:\n")
priv_to_display = first4mprv_from_mpub(mpub=mpub) + '...'
print_bcwallet_basic_priv_opening(priv_to_display=priv_to_display)
def print_bcwallet_basic_priv_opening(priv_to_display):
with indent(4):
puts(colored.magenta('$ bcwallet --wallet=%s\n' % priv_to_display))
def print_bcwallet_piped_priv_opening(priv_to_display):
with indent(4):
puts(colored.magenta('$ echo %s | bcwallet\n' % priv_to_display))
def print_childprivkey_warning():
puts("\nNOTE:")
puts("Do not reveal your private keys to anyone!")
puts("One quirk of HD wallets is that if an attacker learns any of your non-hardened child private keys as well as your master public key then the attacker can derive all of your private keys and steal all of your funds.""")
def print_traversal_warning():
puts("\nNOTE:")
puts("There are over a billion keys (and corresponding addresses) that can easily be derived from your master key, but that doesn't mean BlockCypher will automatically detect a transaction sent to any one of them.")
puts("By default, BlockCypher will look 10 addresses ahead of the latest transaction on each subchain.")
puts("For example, if the transaction that has traversed furthest on the change address chain is at m/0/5, then BlockCypher will automatically detect any transactions sent to m/0/0-m/0/15.")
puts("For normal bcwallet users you never have to think about this, but if you're in this section manually traversing keys then it's essential to understand.")
puts("This feature should primarily be considered a last resource to migrate away from bcwallet if BlockCypher is down.")
|
@when(u'I submit my employment status as "{type}"')
def step_impl(context, type):
context.execute_steps(u'''
When I choose "%s" from "you_are"
And I press "Continue"
''' % type)
@when(u'I submit my benefits as "{type}"')
def step_impl(context, type):
context.execute_steps(u'''
When I choose "%s" from "benefit_type"
And I choose "$pay_period" from "pay_period"
And I fill in "pay_amount" with "$benefit_amount"
And I press "Continue"
''' % type)
@when(u'I submit my home pay amount')
def step_impl(context):
context.execute_steps(u'''
When I choose "$pay_period" from "pay_period"
And I fill in "pay_amount" with "$pay_amount"
And I press "Continue"
''')
@when(u'I choose no hardship')
def step_impl(context):
context.execute_steps(u'When I choose "False" from "hardship"')
@then(u'I should see my calculated weekly income of "{amount}"')
def step_impl(context, amount):
context.execute_steps(u'''
Then the browser's URL should be "plea/your_income/"
And I should see "Total weekly income"
And I should see "%s"
''' % str(amount))
@then(u'I should see be asked to review my plea')
def step_impl(context):
context.execute_steps(u'''
Then the browser's URL should be "plea/review/"
And I should see "Review the information you've given before making your pleas."
''')
|
# -*- coding: utf-8 -*-
import logging
import logging.config
import time
import threading
from pynetdicom2 import uids
from . import ae
from . import config
from . import event_bus
class Server:
"""Server class itself.
Sets up event bus. Initializes all components and passes config to them.
:ivar config: server config
:ivar bus: event bus
:ivar ae: AE instance
:ivar components: all available components
"""
def __init__(self, _config: config.Config):
"""Initializes server
:param _config: server configuration
:type _config: config.Config
"""
self.config = _config
logging.config.dictConfig(self.config.log)
self.bus = event_bus.EventBus()
self.ae = None
self.components = list(self.initalize_components())
def start(self):
"""Starts the server.
Broadcasts `ON_START` and `ON_STARTED` events.
"""
self.bus.broadcast(event_bus.DefaultChannels.ON_START)
self.ae = ae.AE(self.bus, self.config.ae)
threading.Thread(target=self.ae.serve_forever).start()
# TODO: Wait for actual AE to start
self.bus.broadcast(event_bus.DefaultChannels.ON_STARTED)
def start_with_block(self):
"""Starts the server and blocks current thread."""
self.start()
try:
while True:
time.sleep(0.1)
except KeyboardInterrupt:
logging.info('Server exiting due to keyboard interupt')
except SystemExit:
logging.info('Server exiting due to SystemExit')
finally:
self.exit()
def exit(self):
"""Handles server exit.
Broadcasts `ON_EXIT` event.
"""
self.bus.broadcast_nothrow(event_bus.DefaultChannels.ON_EXIT)
self.ae.quit()
def initalize_components(self):
"""Component initialization
:yield: initializes components
:rtype: component.Component
"""
for component, _config in self.config.components.items():
is_on = _config.get('on', False)
if not is_on:
# Component is disabled
continue
factory = config.COMPONENT_REGISTRY.get(component)
if factory is None:
# TODO: add dynamic component loading
pass
component = factory(self.bus, _config)
yield component
|
"""
AppConfig
"""
from django.apps import AppConfig
class GradesConfig(AppConfig):
"""
App config for this app
"""
name = "grades"
def ready(self):
"""
Ready handler. Import signals.
"""
import grades.signals # pylint: disable=unused-import
|
from io import TextIOBase
import os.path
import operator
from itertools import combinations, permutations
from functools import reduce, partial
from math import isfinite, prod
from collections import Counter
INPUT=os.path.join(os.path.dirname(__file__), "input.txt")
with open(INPUT) as f:
data = f.read()
test="""2199943210
3987894921
9856789892
8767896789
9899965678"""
def get_adj(m, j, i):
if i > 0:
yield (j,i-1),int(m[j][i-1])
if i < len(m[j])-1:
yield (j,i+1),int(m[j][i+1])
if j > 0:
yield (j-1,i),int(m[j-1][i])
if j < len(m)-1:
yield (j+1,i),int(m[j+1][i])
def get_low_points(m):
for j, row in enumerate(m):
for i, c in enumerate(row):
c = int(c)
if all(n > c for _,n in get_adj(m,j,i)):
yield (j,i),c
def part1(data):
return sum(1+v for _,v in get_low_points(data.splitlines()))
print(part1(test))
print(part1(data))
def part2(data):
m = data.splitlines()
low_points = [p for p,_ in get_low_points(m)]
visited = set()
all_basis=[]
for p in low_points:
assert p not in visited
visited.add(p)
q = [p]
basis = set([p])
while q:
p = q.pop(0)
for pn,n in get_adj(m, *p):
if n == 9:
continue
if pn in visited:
continue
visited.add(pn)
basis.add(pn)
q.append(pn)
all_basis.append(len(basis))
all_basis.sort()
return prod(all_basis[-3:])
print(part2(test))
print(part2(data))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-09-27 19:50
from __future__ import unicode_literals
import awx.main.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0052_v340_remove_project_scm_delete_on_next_update'),
]
operations = [
migrations.AddField(
model_name='workflowjob',
name='char_prompts',
field=models.JSONField(blank=True, null=True, default=dict),
),
migrations.AddField(
model_name='workflowjob',
name='inventory',
field=models.ForeignKey(
blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='workflowjobs', to='main.Inventory'
),
),
migrations.AddField(
model_name='workflowjobtemplate',
name='ask_inventory_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='workflowjobtemplate',
name='inventory',
field=models.ForeignKey(
blank=True,
default=None,
help_text='Inventory applied to all job templates in workflow that prompt for inventory.',
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name='workflowjobtemplates',
to='main.Inventory',
),
),
]
|
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import gin
import torch
import alf
from alf.environments import suite_mario, alf_environment
from alf.environments import thread_environment, parallel_environment
import alf.nest as nest
class SuiteMarioTest(alf.test.TestCase):
def setUp(self):
super().setUp()
if not suite_mario.is_available():
self.skipTest('suite_mario is not available.')
else:
gin.clear_config()
def tearDown(self):
super().tearDown()
self._env.close()
def test_process_env(self):
game = 'SuperMarioBros-Nes'
self._env = suite_mario.load(
game=game, state='Level1-1', wrap_with_process=True)
self.assertIsInstance(self._env, alf_environment.AlfEnvironment)
self.assertEqual(torch.uint8, self._env.observation_spec().dtype)
self.assertEqual((4, 84, 84), self._env.observation_spec().shape)
actions = self._env.action_spec().sample()
for _ in range(10):
time_step = self._env.step(actions)
def test_thread_env(self):
game = 'SuperMarioBros-Nes'
self._env = thread_environment.ThreadEnvironment(
lambda: suite_mario.load(
game=game, state='Level1-1', wrap_with_process=False))
self.assertIsInstance(self._env, alf_environment.AlfEnvironment)
self.assertEqual(torch.uint8, self._env.observation_spec().dtype)
self.assertEqual((4, 84, 84), self._env.observation_spec().shape)
actions = self._env.action_spec().sample()
for _ in range(10):
time_step = self._env.step(actions)
def test_parallel_env(self):
game = 'SuperMarioBros-Nes'
env_num = 8
def ctor(game, env_id=None):
return suite_mario.load(
game=game, state='Level1-1', wrap_with_process=False)
constructor = functools.partial(ctor, game)
self._env = parallel_environment.ParallelAlfEnvironment(
[constructor] * env_num)
self.assertTrue(self._env.batched)
self.assertEqual(self._env.batch_size, env_num)
self.assertEqual(torch.uint8, self._env.observation_spec().dtype)
self.assertEqual((4, 84, 84), self._env.observation_spec().shape)
actions = self._env.action_spec().sample(outer_dims=(env_num, ))
for _ in range(10):
time_step = self._env.step(actions)
if __name__ == '__main__':
alf.test.main()
|
"""
SQLAlchemy-JSONAPI Serializer.
Colton J. Provias - cj@coltonprovias.com
http://github.com/coltonprovias/sqlalchemy-jsonapi
Licensed with MIT License
"""
from functools import wraps
from sqlalchemy.orm.base import MANYTOONE, ONETOMANY
def as_relationship(to_many=False, linked_key=None, link_key=None,
columns=[]):
"""
Turn a method into a pseudo-relationship for serialization.
Arguments:
- to_many: Whether the relationship is to-many or to-one.
- linked_key: The key used in the linked section of the serialized data
- link_key: The key used in the link section in the model's serialization
- columns: Columns tied to this relationship
"""
def wrapper(f):
@wraps(f)
def wrapped(*args, **kwargs):
return f(*args, **kwargs)
if to_many:
wrapped.direction = ONETOMANY
else:
wrapped.direction = MANYTOONE
wrapped.key = link_key or wrapped.__name__
wrapped.linked_key = linked_key or wrapped.key
wrapped.local_columns = columns
return wrapped
return wrapper
class JSONAPIMixin:
""" Mixin that enables serialization of a model. """
# Columns to be excluded from serialization
jsonapi_columns_exclude = []
# Extra columns to be included with serialization
jsonapi_columns_include = []
# Hook for overriding column data
jsonapi_columns_override = {}
# Relationships to be excluded from serialization
jsonapi_relationships_exclude = []
# Extra relationships to be included with serialization
jsonapi_relationships_include = []
# Hook for overriding relationships
jsonapi_relationships_override = {}
def id(self):
""" JSON API recommends having an id for each resource. """
raise NotImplemented
def jsonapi_can_view(self):
""" Return True if this model can be serialized. """
return True
class SkipType(object):
""" Used for skipping types during conversion. """
pass
class JSONAPI:
""" The main JSONAPI serializer class. """
# A dictionary of converters for serialization
converters = {}
def __init__(self, model):
"""
Create a serializer object.
Arguments:
- model: Should be a SQLAlchemy model class.
"""
self.model = model
def inflector(self, to_inflect):
"""
Format text for use in keys in serialization.
Override this if you need to meet requirements on your front-end.
Arguments:
- to_inflect: The string to be inflected
Returns the altered string.
"""
return to_inflect
def convert(self, item, to_convert):
"""
Convert from Python objects to JSON-friendly values.
Arguments:
- item: A SQLAlchemy model instance
- to_convert: Python object to be converted
Returns either a string, int, float, bool, or SkipType.
"""
if to_convert is None:
return None
if isinstance(to_convert, (str, int, float, bool)):
return to_convert
if callable(to_convert):
return to_convert(item)
if self.converters[type(to_convert).__name__] is not None:
converter = self.converters[type(to_convert).__name__]
return converter(to_convert)
return SkipType
def get_api_key(self, model):
"""
Generate a key for a model.
Arguments:
- model: SQLAlchemy model instance
Returns an inflected key that is generated from jsonapi_key or from
__tablename__.
"""
api_key = getattr(model, 'jsonapi_key', model.__tablename__)
return self.inflector(api_key)
def sort_query(self, model, query, sorts):
"""
Sort a query based upon provided sorts.
Arguments:
- model: SQLAlchemy model class
- query: Instance of Query or AppenderQuery
- sorts: A dictionary of sorts keyed by the api_key for each model
Returns a query with appropriate order_by appended.
"""
if sorts is None:
return query
api_key = self.get_api_key(model)
for sort in sorts[api_key]:
if sort.startswith('-'):
sort_by = getattr(model, sort[1:]).desc()
else:
sort_by = getattr(model, sort)
query = query.order_by(sort_by)
return query
def parse_include(self, include):
"""
Parse the include query parameter.
Arguments:
- include: A list of resources to be included by link_keys
Returns a dictionary of the parsed include list. A None value
signifies that the resource itself should be dumped.
"""
ret = {}
for item in include:
if '.' in item:
local, remote = item.split('.', maxsplit=1)
else:
local = item
remote = None
if local not in ret.keys():
ret[local] = []
ret[local].append(remote)
return ret
def dump_column_data(self, item, fields):
"""
Dump the data from the colums of a model instance.
Arguments:
- item: An SQLAlchemy model instance
- fields: A list of requested fields. If it is None, all available
fields will be returned.
Returns a dictionary representing the instance's data.
"""
obj = dict()
columns = list(item.__table__.columns)
column_data = dict()
api_key = self.get_api_key(item)
for column in columns:
if column.name in item.jsonapi_columns_exclude:
continue
column_data[column.name] = getattr(item, column.name)
for column in item.jsonapi_columns_include:
column_data[column] = getattr(item, column)
column_data.update(item.jsonapi_columns_override)
for name, value in column_data.items():
key = self.inflector(name)
if key != 'id' and fields is not None and \
api_key in fields.keys() and \
key not in fields[api_key]:
continue
converted = self.convert(item, value)
if converted != SkipType:
obj[key] = converted
return obj
def dump_relationship_data(self, item, obj, depth, fields, sort, include):
"""
Handle relationship dumping for a model.
Arguments:
- item: SQLAlchemy model instance
- obj: Column data for the model post-dump
- depth: How much deeper into the relationships do we have to go
captain?
- fields: A dictionary of fields to be parsed based on linked_keys.
- sort: A dictionary of fields to sort by
- include: A list of resources to be included by link_keys.
"""
relationships = dict(list(map((lambda x: (x.key, x)),
item.__mapper__.relationships)))
for key in item.jsonapi_relationships_exclude:
if key not in relationships.keys():
continue
del relationships[key]
for key in item.jsonapi_relationships_include:
relationships[key] = getattr(item, key)
for key, value in item.jsonapi_relationships_override:
relationships[key] = getattr(item, value)
if include is not None:
include = self.parse_include(include)
obj['links'] = {}
linked = {}
for key, relationship in relationships.items():
dump_this = True
link_key = self.inflector(key)
if hasattr(relationship, 'mapper'):
mapper = relationship.mapper.class_
linked_key = self.inflector(getattr(mapper, 'jsonapi_key',
mapper.__tablename__))
else:
linked_key = self.inflector(relationship.linked_key)
if relationship.direction == MANYTOONE:
for column in relationship.local_columns:
if isinstance(column, str):
col_name = self.inflector(column)
else:
col_name = self.inflector(column.name)
if col_name in obj.keys():
obj['links'][link_key] = self.convert(item,
obj[col_name])
del obj[col_name]
if include is not None:
if link_key not in include.keys():
continue
local_include = include[link_key]
if None in include[link_key]:
local_include.remove(None)
else:
dump_this = False
else:
local_include = None
if depth > 0 or (include is not None and
local_include is not None):
if callable(relationship):
related = relationship()
else:
related = getattr(item, relationship.key)
if relationship.direction == MANYTOONE:
if isinstance(related, JSONAPIMixin):
if not related.jsonapi_can_view():
continue
if dump_this and linked_key not in linked.keys():
linked[linked_key] = {}
r_obj, r_lnk = self.dump_object(related, depth - 1,
fields, sort,
local_include)
linked.update(r_lnk)
if dump_this:
linked[linked_key][str(r_obj['id'])] = r_obj
else:
if sort is not None and linked_key in sort.keys():
related = self.sort_query(mapper, related, sort)
if link_key not in obj['links'].keys():
obj['links'][link_key] = []
for local_item in list(related):
if not isinstance(local_item, JSONAPIMixin):
continue
if not local_item.jsonapi_can_view():
continue
if dump_this and linked_key not in linked.keys():
linked[linked_key] = {}
obj['links'][link_key].append(str(local_item.id))
r_obj, r_lnk = self.dump_object(local_item, depth - 1,
fields, sort,
local_include)
linked.update(r_lnk)
if dump_this:
linked[linked_key][str(r_obj['id'])] = r_obj
return obj, linked
def dump_object(self, item, depth, fields, sort, include):
"""
Quick, simple way of coordinating a dump.
Arguments:
- item: Instance of a SQLAlchemy model
- depth: Integer of how deep relationships should be queried
- fields: Dictionary of fields to be returned, keyed by linked_keys
- sort: Dictionary of fields to sory by, keyed by linked_keys
- include: List of resources to side-load by link_keys.
"""
obj = self.dump_column_data(item, fields)
return self.dump_relationship_data(item, obj, depth, fields, sort,
include)
def serialize(self, to_serialize, depth=1, fields=None, sort=None,
include=None):
"""
Perform the serialization to dictionary in JSON API format.
Arguments:
- to_serialize: The query, collection, or instance to serialize.
- depth: How deep to side-load relationships. If include is provided,
this will be overridden
- fields: Dictionary of fields to be returned keyed by linked_keys or
a list of fields for the current instance
- sort: Dictionary of fields to sort by keyed by linked_keys or a list
of fields to sort by for the current instance
- include: List of resources to side-load by link_keys.
"""
api_key = self.get_api_key(self.model)
to_return = {api_key: [], 'linked': {}, 'meta': {}}
linked = dict()
if isinstance(to_serialize, JSONAPIMixin):
is_single = True
to_serialize = [to_serialize]
else:
is_single = False
if isinstance(fields, list):
fields = {api_key: fields}
if isinstance(sort, list):
sort = {api_key: sort}
if not is_single:
to_serialize = self.sort_query(self.model, to_serialize, sort)
for item in to_serialize:
if not item.jsonapi_can_view():
continue
dumped = self.dump_object(item, depth, fields, sort, include)
if dumped is None:
continue
obj, new_linked = dumped
to_return[api_key].append(obj)
for key in new_linked.keys():
if key not in linked.keys():
linked[key] = dict()
linked[key].update(new_linked[key])
for key in linked.keys():
to_return['linked'][key] = list(linked[key].values())
if is_single:
to_return[api_key] = to_return[api_key][0]
return to_return
|
from .doctype import *
from .html import *
from .sys import *
from .date import *
from ._if import *
from ._for import *
from ._while import *
from ._import import *
from ._def import *
from .include import *
from .extends import *
from .entitize import *
from .constants import *
from .ieif import *
from .types import *
from .additions import *
from .markdown import *
from .builtins import *
|
#!/usr/bin/env python
import csv
with open('SeqTracking.csv', encoding='ISO-8859-1') as csvfile:
with open('Metadata_csv.csv', 'w') as outfile:
outfile.write('SeqID,Genus,Quality\n')
reader = csv.DictReader(csvfile)
for row in reader:
if row['Genus'] == '':
genus = 'NA'
else:
genus = row['Genus']
seqid = row['SEQID']
if row['CuratorFlag'] == 'REFERENCE':
quality = 'Reference'
elif row['CuratorFlag'] == 'PASS':
quality = 'Pass'
else:
quality = 'Fail'
outfile.write('{},{},{}\n'.format(seqid, genus, quality))
|
from typing import Callable, List, Optional, Sequence
from ebl.transliteration.domain.atf import Atf, WORD_SEPARATOR
from ebl.transliteration.domain.egyptian_metrical_feet_separator_token import (
EgyptianMetricalFeetSeparator,
)
from ebl.transliteration.domain.enclosure_tokens import (
AccidentalOmission,
BrokenAway,
DocumentOrientedGloss,
Erasure,
IntentionalOmission,
PerhapsBrokenAway,
Removal,
Emendation,
)
from ebl.transliteration.domain.side import Side
from ebl.transliteration.domain.sign_tokens import Divider
from ebl.transliteration.domain.tokens import (
CommentaryProtocol,
LanguageShift,
LineBreak,
Token,
TokenVisitor,
)
from ebl.transliteration.domain.word_tokens import Word
class AtfVisitorState:
def __init__(self, prefix: Optional[str]):
self.parts: List[str] = [] if prefix is None else [prefix]
self._force_separator: bool = prefix is not None
self._omit_separator: bool = prefix is None
def append_with_forced_separator(self, token: Token) -> None:
self.append_separator()
self.append_token(token)
self.set_force()
def append_with_separator(self, token: Token) -> None:
if self._force_separator or not self._omit_separator:
self.append_separator()
self.append_token(token)
self.set_omit(False)
def append_left_bracket(self, token: Token) -> None:
if not self._omit_separator:
self.append_separator()
self.append_token(token)
self.set_omit(True)
def append_right_bracket(self, token: Token) -> None:
if self._force_separator:
self.append_separator()
self.append_token(token)
self.set_omit(False)
def append_token(self, token: Token) -> None:
self.parts.append(token.value)
def append_separator(self) -> None:
self.parts.append(WORD_SEPARATOR)
def set_omit(self, omit: bool) -> None:
self._omit_separator = omit
self._force_separator = False
def set_force(self) -> None:
self._omit_separator = False
self._force_separator = True
class AtfVisitor(TokenVisitor):
def __init__(self, prefix: Optional[str]):
self._state = AtfVisitorState(prefix)
@property
def result(self) -> Atf:
return Atf("".join(self._state.parts).strip())
def visit(self, token: Token) -> None:
self._state.append_with_separator(token)
def visit_language_shift(self, shift: LanguageShift) -> None:
self._state.append_with_forced_separator(shift)
def visit_word(self, word: Word) -> None:
self._state.append_with_separator(word)
def visit_document_oriented_gloss(self, gloss: DocumentOrientedGloss) -> None:
self._side(gloss.side)(gloss)
def visit_broken_away(self, broken_away: BrokenAway) -> None:
self._side(broken_away.side)(broken_away)
def visit_perhaps_broken_away(self, broken_away: PerhapsBrokenAway) -> None:
self._side(broken_away.side)(broken_away)
def visit_omission(self, omission: AccidentalOmission) -> None:
self._side(omission.side)(omission)
def visit_accidental_omission(self, omission: IntentionalOmission) -> None:
self._side(omission.side)(omission)
def visit_removal(self, removal: Removal) -> None:
self._side(removal.side)(removal)
def visit_emendation(self, emendation: Emendation) -> None:
self._side(emendation.side)(emendation)
def _side(self, side: Side) -> Callable[[Token], None]:
return {
Side.LEFT: self._state.append_left_bracket,
Side.RIGHT: self._state.append_right_bracket,
}[side]
def visit_erasure(self, erasure: Erasure):
def left():
self._state.append_separator()
self._state.append_token(erasure)
self._state.set_omit(True)
def center():
self._state.append_token(erasure)
self._state.set_omit(True)
def right():
self._state.append_token(erasure)
self._state.set_force()
{Side.LEFT: left, Side.CENTER: center, Side.RIGHT: right}[erasure.side]()
def visit_divider(self, divider: Divider) -> None:
self._state.append_with_forced_separator(divider)
def visit_line_break(self, line_break: LineBreak) -> None:
self._state.append_with_forced_separator(line_break)
def visit_egyptian_metrical_feet_separator(
self, egyptian_metrical_feet_separator: EgyptianMetricalFeetSeparator
) -> None:
self._state.append_with_forced_separator(egyptian_metrical_feet_separator)
def visit_commentary_protocol(self, protocol: CommentaryProtocol) -> None:
self._state.append_with_forced_separator(protocol)
def convert_to_atf(prefix: Optional[str], tokens: Sequence[Token]) -> Atf:
visitor = AtfVisitor(prefix)
for token in tokens:
token.accept(visitor)
return visitor.result
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud simple wrapper.
"""
import os
from google.cloud import bigquery
from google.cloud.bigquery import LoadJob
from google.cloud.exceptions import Conflict, NotFound
from google.cloud.storage import Bucket
from google.oauth2 import service_account
import pandas as pd
from pandas import DataFrame
from config import ConfigFile
from core.BucketLocation import BucketLocation
from logger import pr_logger
from core.Context import Context
from core.GitHubSortingMethod import GitHubSortingMethod
_LOGGER = pr_logger.get_logger(os.path.basename(__file__))
_CURRENT_DATE = pd.to_datetime("today")
def load_job_config(config: ConfigFile):
"""
returns a job config instance
See: https://googleapis.dev/python/bigquery/latest/generated/google.cloud.bigquery.job.LoadJobConfig.html
"""
return bigquery.LoadJobConfig(
autodetect=True,
time_partitioning=bigquery.TimePartitioning(
type_=bigquery.TimePartitioningType.DAY,
field=config.partition_column_name, # name of column to use for partitioning
),
clustering_fields=config.clustering_columns # name of column to use for clustering
)
def get_credentials(credentials: str):
"""
returns a credentials instance
See: https://google-auth.readthedocs.io/en/master/reference/google.oauth2.service_account.html
"""
return service_account.Credentials.from_service_account_file(
credentials,
)
def init_context(config: ConfigFile) -> Context:
"""
returns a Context instance
See: core.Context
"""
credentials = get_credentials(config.gcp_credentials_path)
return Context(project_id=config.project_id,
credentials=credentials,
git_hub_token=config.gh_app_token)
def create_dataset(context: Context, dataset_id: str) -> None:
"""
create a dataset in google cloud Bigquery
See: https://cloud.google.com/bigquery/docs/datasets
"""
# Construct a BigQuery client object.
client = context.big_query_client
# Construct a full Dataset object to send to the API.
dataset = bigquery.Dataset(dataset_id)
dataset.location = "US"
# Send the dataset to the API for creation, with an explicit timeout.
# Raises google.api_core.exceptions.Conflict if the Dataset already
# exists within the project.
try:
dataset = client.create_dataset(dataset, timeout=30) # Make an API request.
_LOGGER.info("Created dataset {}.{}".format(client.project, dataset.dataset_id))
except Exception as e:
_LOGGER.error("dataset creation {}".format(e.args[-1]))
raise e
def to_sql_std_format(name: str) -> str:
return name.replace('/', '_').replace('-', '_')
def delete_dataset(context: Context, dataset_id: str):
"""
delete a dataset in google cloud Bigquery
See: https://cloud.google.com/bigquery/docs/datasets
"""
# Construct a BigQuery client object.
client = context.big_query_client
# Construct a full Dataset object to send to the API.
dataset = bigquery.Dataset(dataset_id)
dataset.location = "US"
# Send the dataset to the API for creation, with an explicit timeout.
# Raises google.api_core.exceptions.Conflict if the Dataset does not exist
# within the project.
try:
client.delete_dataset(dataset, timeout=30, delete_contents=True)
_LOGGER.info("Deleted dataset {}.{}".format(client.project, dataset.dataset_id))
except Exception as e:
_LOGGER.error("dataset deletion {}".format(e.args[-1]))
raise e
def extracting_data(context, git_repo):
"""
Extract pull requests data from the specified git repo
See: https://pygithub.readthedocs.io/en/latest/github_objects/Repository.html
"""
try:
_LOGGER.info("Extracting {repo} pull requests from Github API ...".format(repo=git_repo))
repo = context.github_client.get_repo(git_repo)
pulls = repo.get_pulls(sort=GitHubSortingMethod.LONG_RUNNING.value)
return [pull.raw_data for pull in pulls]
except Exception as e:
_LOGGER.error("pr extraction {}".format(e.args[-1]))
raise e
def transform_data(config: ConfigFile, paginated_data) -> DataFrame:
"""
Transform raw extracted data to pandas dataframe
"""
_LOGGER.info("Transforming raw paginated data to pandas dataframe ...")
pullRequestsDf = pd.DataFrame(paginated_data)
pullRequestsDf[config.partition_column_name] = _CURRENT_DATE
return pullRequestsDf
def ingest_data_to_bq(context, ingestion_job_config, table_id, data_frame: DataFrame) -> LoadJob:
"""
Ingest a pandas dataframe to BigQuery
"""
if data_frame.empty:
_LOGGER.warn("dataframe is empty. Nothing to ingest: {}".format(table_id))
else:
try:
_LOGGER.info("Ingesting table {} ...".format(table_id))
return context.big_query_client.load_table_from_dataframe(
data_frame, table_id, job_config=ingestion_job_config
)
except Exception as e:
_LOGGER.error("table ingestion {}".format(e.args[-1]))
raise e
def get_ingestion_report(context, table_id):
"""
Display ingestion job report
"""
table = context.big_query_client.get_table(table_id) # Make an API request.
_LOGGER.info(
"Loaded {} rows and {} columns to {}".format(
table.num_rows, len(table.schema), table_id
)
)
return table.num_rows
def create_bucket(context, bucket_name, storage_class: str, location: BucketLocation) -> Bucket:
"""
Create a new bucket
"""
storage_client = context.storage_client
bucket = storage_client.bucket(bucket_name)
bucket.storage_class = storage_class
try:
new_bucket = storage_client.create_bucket(bucket, location=location.value)
_LOGGER.info(
"Created bucket {} in {} with storage class {}".format(
new_bucket.name, new_bucket.location, new_bucket.storage_class
)
)
return new_bucket
except Conflict as e:
_LOGGER.warning(e.args[-1])
return storage_client.get_bucket(bucket_name)
def delete_bucket(context, bucket_name):
"""Deletes a bucket. The bucket must be empty."""
storage_client = context.storage_client
bucket = storage_client.get_bucket(bucket_name)
bucket.delete()
_LOGGER.info("Bucket {} deleted".format(bucket.name))
def dataset_exist(big_query_client: bigquery.Client, dataset_id) -> bool:
try:
big_query_client.get_dataset(dataset_id) # Make an API request.
_LOGGER.warning("Dataset {} already exists".format(dataset_id))
return True
except NotFound:
_LOGGER.warning("Dataset {} is not found".format(dataset_id))
return False
def run_pr_ingestion_pipeline(config: ConfigFile, git_repo_name: str):
"""Run a local pipeline for the specified git_repo_name."""
context = init_context(config)
tableId = 'pr_github_dataset.{}'.format(to_sql_std_format(git_repo_name))
# Specifying job config
jobConfig = load_job_config(config)
_LOGGER.info("Loading job config {}".format(jobConfig.__str__()))
# Extracting data
paginatedRawData = extracting_data(context, git_repo_name)
# Preparing the dataset
pullRequestsDf = transform_data(config, paginatedRawData)
# Create dataset if not exist
dataset_id = "{}.pr_github_dataset".format(context.big_query_client.project)
if not dataset_exist(context.big_query_client, dataset_id=dataset_id):
create_dataset(dataset_id=dataset_id, context=context)
# Ingesting the dataset
return ingest_data_to_bq(context=context, ingestion_job_config=jobConfig,
table_id=tableId, data_frame=pullRequestsDf)
|
# "Stopwatch: The Game"
import simplegui
# define global variables
current_time = 0
time_display = "0:00.0"
attempts = 0
succeeds = 0
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def time_format(t):
D = t % 10
t /= 10
BC = t % 60
A = t / 60
if BC < 10:
return str(A) + ":0" + str(BC) + "." + str(D)
else:
return str(A) + ":" + str(BC) + "." + str(D)
def result_format(x, y):
return str(x) + "/" + str(y)
# define event handlers for buttons; "Start", "Stop", "Reset"
def start_click():
if not timer.is_running():
timer.start()
def stop_click():
global attempts, succeeds
if timer.is_running():
timer.stop()
attempts += 1
if current_time % 10 ==0:
succeeds += 1
def reset_click():
global current_time, time_display, attempts, succeeds
if timer.is_running():
timer.stop()
current_time = 0
time_display = "0:00.0"
succeeds = 0
attempts = 0
# define event handler for timer with 0.1 sec interval
def timer_tick():
global current_time, time_display
current_time += 1
time_display = time_format(current_time)
# define draw handler
def draw_handler(canvas):
global time_display, attempts, succeeds
if time_display[-1] == '0' and not timer.is_running():
canvas.draw_text(time_display, [50,110], 50, "Green")
canvas.draw_text("Nice!", [70,175], 50, "White")
elif time_display[-1] != '0' and not timer.is_running():
canvas.draw_text(time_display, [50,110], 50, "Red")
canvas.draw_text("Oops!", [70,175], 50, "White")
else:
canvas.draw_text(time_display, [50,110], 50, "Blue")
canvas.draw_text(result_format(succeeds, attempts), [200,45], 40, "White")
# create frame
frame = simplegui.create_frame("Stopwatch", 300, 200)
# register event handlers
btn_start = frame.add_button("Start", start_click)
btn_stop = frame.add_button("Stop", stop_click)
btn_reset = frame.add_button("Reset", reset_click)
timer = simplegui.create_timer(100, timer_tick)
frame.set_draw_handler(draw_handler)
# start frame
frame.start()
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Export model to CSV
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
from __future__ import print_function
import sys
import argparse
# Third-party modules
from django.apps import apps
# NOC modules
from noc.sa.models.managedobject import ManagedObject # noqa
from noc.core.management.base import BaseCommand
from noc.core.csvutils import csv_export
from noc.models import load_models
from noc.core.mongo.connection import connect
class Command(BaseCommand):
help = "Export model to CSV"
def add_arguments(self, parser):
parser.add_argument(
"-t",
"--template",
dest="template",
action="store_true",
default=False,
help="dump only header row",
),
parser.add_argument("args", nargs=argparse.REMAINDER, help="List of extractor names")
def _usage(self):
print("Usage:")
print("%s csv-export [-t] <model>" % (sys.argv[0]))
print("Where <model> is one of:")
load_models()
for m in apps.get_models():
t = m._meta.db_table
app, model = t.split("_", 1)
print("%s.%s" % (app, model))
sys.exit(1)
def get_queryset(self, model, args):
if not args:
return model.objects.all()
q = {}
for a in args:
if "=" in a:
k, v = a.split("=", 1)
q[k] = v
return model.objects.filter(**q)
def handle(self, *args, **options):
connect()
if len(args) < 1:
self._usage()
r = args[0].split(".")
if len(r) != 2:
self._usage()
app, model = r
load_models()
m = apps.get_model(app, model)
if not m:
return self._usage()
print(
csv_export(
m, queryset=self.get_queryset(m, args[1:]), first_row_only=options.get("template")
)
)
if __name__ == "__main__":
Command().run()
|
#!/usr/bin/env python3
"""
Very simple HTTP server in python for logging requests
Usage::
./server.py [<port>]
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse, parse_qs
import logging
import json
class Server(BaseHTTPRequestHandler):
_config = None
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
logging.info("GET request,\nPath: %s\nHeaders:\n%s\n", str(self.path), str(self.headers))
self._set_response()
query_components = parse_qs(urlparse(self.path).query)
device = query_components["device"]
data = self._config[device[0]]
json_string = json.dumps(data)
self.wfile.write("{}".format(json_string).encode('utf-8'))
def run(server_class=HTTPServer, handler_class=Server, port=8081, configFile="config.json"):
logging.basicConfig(level=logging.INFO)
server_address = ('', port)
handler_class._config = json.load(open(configFile))
httpd = server_class(server_address, handler_class)
logging.info('Starting httpd...\n')
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
logging.info('Stopping httpd...\n')
if __name__ == '__main__':
from sys import argv
if len(argv) == 3:
run(port=int(argv[1]), configFile=str(argv[2]))
else:
run()
|
import getopt
import sys
from libcloud.compute.types import NodeState
from lc import get_lc
from printer import Printer
def lister_main(what, resource=None,
extension=False, supports_location=False, **kwargs):
"""Shortcut for main() routine for lister
tools, e.g. lc-SOMETHING-list
@param what: what we are listing, e.g. 'nodes'
@param extension: is it an extension of core libcloud functionality?
@param kwargs: additional arguments for the call
@type what: C{string}
@param supports_location: tells that objects we
listing could be filtered by location
@type supports_location: C{bool}
"""
list_method = "%slist_%s" % ({True: 'ex_', False: ''}[extension], what)
profile = "default"
format = location = None
options = "f:p:"
if supports_location:
options += "l:"
try:
opts, args = getopt.getopt(sys.argv[1:], options)
except getopt.GetoptError, err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
for o, a in opts:
if o == "-f":
format = a
if o == "-p":
profile = a
if o == "-l":
location = a
try:
conn = get_lc(profile, resource=resource)
list_kwargs = kwargs
if supports_location and location is not None:
nodelocation = filter(lambda loc: str(loc.id) == location,
conn.list_locations())[0]
list_kwargs["location"] = nodelocation
for node in getattr(conn, list_method)(**list_kwargs):
Printer.do(node, format)
except Exception, err:
sys.stderr.write("Error: %s\n" % str(err))
def save_image_main():
"""Shortcut for main() routine for provider
specific image save tools.
"""
def usage(progname):
sys.stdout.write("%s -i <node_id> -n <image_name> [-p <profile]\n\n" % progname)
profile = 'default'
name = node_id = None
try:
opts, args = getopt.getopt(sys.argv[1:], "i:n:p:")
except getopt.GetoptError, err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
for o, a in opts:
if o == "-i":
node_id = a
if o == "-n":
name = a
if o == "-p":
profile = a
if node_id is None or name is None:
usage(sys.argv[0])
sys.exit(1)
conn = get_lc(profile)
node = get_node_or_fail(conn, node_id, print_error_and_exit,
("Error: cannot find node with id '%s'." % node_id,))
Printer.do(conn.ex_save_image(node, name))
def get_node_or_fail(conn, node_id, coroutine=None, cargs=(), ckwargs={}):
"""Shortcut to get a single node by its id. In case when
such node could not be found, coroutine could be called
to handle such case. Typically coroutine will output an
error message and exit from application.
@param conn: libcloud connection handle
@param node_id: id of the node to search for
@param coroutine: a callable object to handle case
when node cannot be found
@param cargs: positional arguments for coroutine
@param kwargs: keyword arguments for coroutine
@return: node object if found, None otherwise"""
try:
node = [node for node in conn.list_nodes()
if str(node.id) == str(node_id)][0]
return node
except IndexError:
if callable(coroutine):
coroutine(*cargs, **ckwargs)
return None
def print_error_and_exit(message):
sys.stderr.write("%s\n" % message)
sys.exit(1)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ... import TestUnitBase
class TestDESDerive(TestUnitBase):
def test_real_world_01(self):
self.assertEqual(self.load()(B''), B'\x01' * 8)
def test_real_world_02(self):
self.assertEqual(self.load()(B'pw0rdEXAMPLE.Cpianist'), bytes.fromhex('158961F132EC04BC'))
def test_real_world_03(self):
self.assertEqual(self.load()(B'ANND3133'), bytes.fromhex('BF79FDA76267E089'))
def test_real_world_04(self):
self.assertEqual(self.load()(B'NNNN6666FFFFAAAA'), bytes.fromhex('F71FC802192A0DD5'))
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base test case class for unit and integration tests."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from functools import wraps
import os.path
import random
import re
import shutil
import tempfile
import six
import boto
import gslib.tests.util as util
from gslib.tests.util import unittest
from gslib.utils.constants import UTF8
from gslib.utils.posix_util import NA_ID
from gslib.utils.posix_util import NA_MODE
MAX_BUCKET_LENGTH = 63
def NotParallelizable(func):
"""Wrapper function for cases that are not parallelizable."""
@wraps(func)
def ParallelAnnotatedFunc(*args, **kwargs):
return func(*args, **kwargs)
ParallelAnnotatedFunc.is_parallelizable = False
return ParallelAnnotatedFunc
def RequiresIsolation(func):
"""Wrapper function for cases that require running in a separate process."""
@wraps(func)
def RequiresIsolationFunc(*args, **kwargs):
return func(*args, **kwargs)
RequiresIsolationFunc.requires_isolation = True
return RequiresIsolationFunc
class GsUtilTestCase(unittest.TestCase):
"""Base test case class for unit and integration tests."""
def setUp(self):
if six.PY2:
self.assertRegex = self.assertRegexpMatches
self.assertNotRegex = self.assertNotRegexpMatches
if util.RUN_S3_TESTS:
self.test_api = 'XML'
self.default_provider = 's3'
self.provider_custom_meta = 'amz'
else:
self.test_api = boto.config.get('GSUtil', 'prefer_api', 'JSON').upper()
self.default_provider = 'gs'
self.provider_custom_meta = 'goog'
self.tempdirs = []
def tearDown(self):
while self.tempdirs:
tmpdir = self.tempdirs.pop()
shutil.rmtree(tmpdir, ignore_errors=True)
def assertNumLines(self, text, numlines):
self.assertEqual(text.count('\n'), numlines)
def GetTestMethodName(self):
return self._testMethodName
def MakeRandomTestString(self):
"""Creates a random string of hex characters 8 characters long."""
return '%08x' % random.randrange(256**4)
def MakeTempName(self, kind, prefix='', suffix=''):
"""Creates a temporary name that is most-likely unique.
Args:
kind (str): A string indicating what kind of test name this is.
prefix (str): Prefix prepended to the temporary name.
suffix (str): Suffix string appended to end of temporary name.
Returns:
(str) The temporary name. If `kind` was "bucket", the temporary name may
have coerced this string, including the supplied `prefix`, such that it
contains only characters that are valid across all supported storage
providers (e.g. replacing "_" with "-", converting uppercase letters to
lowercase, etc.).
"""
name = '{prefix}gsutil-test-{method}-{kind}'.format(
prefix=prefix, method=self.GetTestMethodName(), kind=kind)
name = name[:MAX_BUCKET_LENGTH - 9]
name = '{name}-{rand}'.format(name=name, rand=self.MakeRandomTestString())
total_name_len = len(name) + len(suffix)
if suffix:
if kind == 'bucket' and total_name_len > MAX_BUCKET_LENGTH:
self.fail(
'Tried to create a psuedo-random bucket name with a specific '
'suffix, but the generated name was too long and there was not '
'enough room for the suffix. Please use shorter strings or perform '
'name randomization manually.\nRequested name: ' + name + suffix)
name += suffix
if kind == 'bucket':
name = util.MakeBucketNameValid(name)
return name
# TODO: Convert tests to use this for object names.
def MakeTempUnicodeName(self, kind, prefix=''):
return self.MakeTempName(kind, prefix=prefix) + '材'
def CreateTempDir(self, test_files=0, contents=None):
"""Creates a temporary directory on disk.
The directory and all of its contents will be deleted after the test.
Args:
test_files: The number of test files to place in the directory or a list
of test file names.
contents: The contents for each generated test file.
Returns:
The path to the new temporary directory.
"""
tmpdir = tempfile.mkdtemp(prefix=self.MakeTempName('directory'))
self.tempdirs.append(tmpdir)
try:
iter(test_files)
except TypeError:
test_files = [self.MakeTempName('file') for _ in range(test_files)]
for i, name in enumerate(test_files):
contents_file = contents
if contents_file is None:
contents_file = ('test %d' % i).encode('ascii')
self.CreateTempFile(tmpdir=tmpdir, file_name=name, contents=contents_file)
return tmpdir
def CreateTempFifo(self, tmpdir=None, file_name=None):
"""Creates a temporary fifo file on disk. Should not be used on Windows.
Args:
tmpdir: The temporary directory to place the file in. If not specified, a
new temporary directory is created.
file_name: The name to use for the file. If not specified, a temporary
test file name is constructed. This can also be a tuple, where
('dir', 'foo') means to create a file named 'foo' inside a
subdirectory named 'dir'.
Returns:
The path to the new temporary fifo.
"""
tmpdir = tmpdir or self.CreateTempDir()
file_name = file_name or self.MakeTempName('fifo')
if isinstance(file_name, six.string_types):
fpath = os.path.join(tmpdir, file_name)
else:
fpath = os.path.join(tmpdir, *file_name)
os.mkfifo(fpath)
return fpath
def CreateTempFile(self,
tmpdir=None,
contents=None,
file_name=None,
mtime=None,
mode=NA_MODE,
uid=NA_ID,
gid=NA_ID):
"""Creates a temporary file on disk.
Note: if mode, uid, or gid are present, they must be validated by
ValidateFilePermissionAccess and ValidatePOSIXMode before calling this
function.
Args:
tmpdir: The temporary directory to place the file in. If not specified, a
new temporary directory is created.
contents: The contents to write to the file. If not specified, a test
string is constructed and written to the file. Since the file
is opened 'wb', the contents must be bytes.
file_name: The name to use for the file. If not specified, a temporary
test file name is constructed. This can also be a tuple, where
('dir', 'foo') means to create a file named 'foo' inside a
subdirectory named 'dir'.
mtime: The modification time of the file in POSIX time (seconds since
UTC 1970-01-01). If not specified, this defaults to the current
system time.
mode: The POSIX mode for the file. Must be a base-8 3-digit integer
represented as a string.
uid: A POSIX user ID.
gid: A POSIX group ID.
Returns:
The path to the new temporary file.
"""
tmpdir = six.ensure_str(tmpdir or self.CreateTempDir())
file_name = file_name or self.MakeTempName(str('file'))
if isinstance(file_name, (six.text_type, six.binary_type)):
fpath = os.path.join(tmpdir, six.ensure_str(file_name))
else:
file_name = map(six.ensure_str, file_name)
fpath = os.path.join(tmpdir, *file_name)
if not os.path.isdir(os.path.dirname(fpath)):
os.makedirs(os.path.dirname(fpath))
if isinstance(fpath, six.binary_type):
fpath = fpath.decode(UTF8)
with open(fpath, 'wb') as f:
contents = (contents if contents is not None else self.MakeTempName(
str('contents')))
if isinstance(contents, bytearray):
contents = bytes(contents)
else:
contents = six.ensure_binary(contents)
f.write(contents)
if mtime is not None:
# Set the atime and mtime to be the same.
os.utime(fpath, (mtime, mtime))
if uid != NA_ID or int(gid) != NA_ID:
os.chown(fpath, uid, int(gid))
if int(mode) != NA_MODE:
os.chmod(fpath, int(mode, 8))
return fpath
def assertRegexpMatchesWithFlags(self, text, pattern, msg=None, flags=0):
"""Like assertRegexpMatches, but allows specifying additional re flags.
Args:
text: The text in which to search for pattern.
pattern: The pattern to search for; should be either a string or compiled
regex returned from re.compile().
msg: The message to be displayed if pattern is not found in text. The
values for pattern and text will be included after this message.
flags: Additional flags from the re module to be included when compiling
pattern. If pattern is a regex that was compiled with existing flags,
these, flags will be added via a bitwise-or.
"""
if isinstance(pattern, six.string_types):
pattern = re.compile(pattern, flags=flags)
else: # It's most likely an already-compiled pattern.
pattern = re.compile(pattern.pattern, flags=pattern.flags | flags)
if not pattern.search(text):
failure_msg = msg or 'Regex didn\'t match'
failure_msg = '%s: %r not found in %r' % (failure_msg, pattern.pattern,
text)
raise self.failureException(failure_msg)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Donny You(youansheng@gmail.com)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils.tools.logger import Logger as Log
class FocalLoss(nn.Module):
def __init__(self, configer):
super(FocalLoss, self).__init__()
self.num_classes = configer.get('data', 'num_classes')
def _one_hot_embeding(self, labels):
"""Embeding labels to one-hot form.
Args:
labels(LongTensor): class labels
num_classes(int): number of classes
Returns:
encoded labels, sized[N, #classes]
"""
y = torch.eye(self.num_classes) # [D, D]
return y[labels] # [N, D]
def focal_loss(self, x, y):
"""Focal loss
Args:
x(tensor): size [N, D]
y(tensor): size [N, ]
Returns:
(tensor): focal loss
"""
alpha = 0.25
gamma = 2
t = self._one_hot_embeding(y.data.cpu())
t = Variable(t).cuda() # [N, 20]
logit = F.softmax(x)
logit = logit.clamp(1e-7, 1.-1e-7)
conf_loss_tmp = -1 * t.float() * torch.log(logit)
conf_loss_tmp = alpha * conf_loss_tmp * (1-logit)**gamma
conf_loss = conf_loss_tmp.sum()
return conf_loss
def forward(self, loc_preds, loc_targets, cls_preds, cls_targets):
"""Compute loss between (loc_preds, loc_targets) and (cls_preds, cls_targets).
Args:
loc_preds(tensor): predicted locations, sized [batch_size, #anchors, 4].
loc_targets(tensor): encoded target locations, sized [batch_size, #anchors, 4].
cls_preds(tensor): predicted class confidences, sized [batch_size, #anchors, #classes].
cls_targets(tensor): encoded target labels, sized [batch_size, #anchors].
Returns:
(tensor) loss = SmoothL1Loss(loc_preds, loc_targets) + FocalLoss(cls_preds, cls_targets).
"""
pos = cls_targets > 0 # [N,#anchors]
num_pos = pos.data.long().sum()
# loc_loss = SmoothL1Loss(pos_loc_preds, pos_loc_targets)
mask = pos.unsqueeze(2).expand_as(loc_preds) # [N,#anchors,4]
masked_loc_preds = loc_preds[mask].view(-1, 4) # [#pos,4]
masked_loc_targets = loc_targets[mask].view(-1, 4) # [#pos,4]
loc_loss = F.smooth_l1_loss(masked_loc_preds, masked_loc_targets, size_average=False)
# cls_loss = FocalLoss(loc_preds, loc_targets)
pos_neg = cls_targets > -1 # exclude ignored anchors
# num_pos_neg = pos_neg.data.long().sum()
mask = pos_neg.unsqueeze(2).expand_as(cls_preds)
masked_cls_preds = cls_preds[mask].view(-1, self.num_classes)
cls_loss = self.focal_loss(masked_cls_preds, cls_targets[pos_neg])
num_pos = max(1.0, num_pos)
Log.debug('loc_loss: %.3f | cls_loss: %.3f' % (loc_loss.data[0] / num_pos, cls_loss.data[0] / num_pos))
loss = loc_loss / num_pos + cls_loss / num_pos
return loss
class MultiBoxLoss(nn.Module):
def __init__(self, configer):
super(MultiBoxLoss, self).__init__()
self.num_classes = configer.get('data', 'num_classes')
def _cross_entropy_loss(self, x, y):
"""Cross entropy loss w/o averaging across all samples.
Args:
x(tensor): sized [N,D]
y(tensor): sized [N,]
Returns:
(tensor): cross entropy loss, sized [N,]
"""
xmax = x.data.max()
log_sum_exp = torch.log(torch.sum(torch.exp(x - xmax), dim=1)) + xmax
return log_sum_exp.view(-1, 1) - x.gather(1, y.view(-1, 1))
def test_cross_entropy_loss(self):
a = Variable(torch.randn(10, 4))
b = Variable(torch.ones(10).long())
loss = self.cross_entropy_loss(a, b)
print(loss.mean())
print(F.cross_entropy(a, b))
def _hard_negative_mining(self, conf_loss, pos):
"""Return negative indices that is 3x the number as positive indices.
Args:
conf_loss: (tensor) cross entropy loss between conf_preds and conf_targets, sized [N*8732,]
pos: (tensor) positive(matched) box indices, sized [N, 8732]
Returns:
(tensor): negative indices, sized [N, 8732]
"""
batch_size, num_boxes = pos.size()
conf_loss[pos] = 0 # set pos boxes = 0, the rest are neg conf_loss
conf_loss = conf_loss.view(batch_size, -1) # [N,8732]
_, idx = conf_loss.sort(1, descending=True) # sort by neg conf_loss
_, rank = idx.sort(1) # [N,8732]
num_pos = pos.long().sum(1) # [N,1]
num_neg = torch.clamp(3 * num_pos, min=1, max=num_boxes-1) # [N,1]
neg = rank < num_neg.unsqueeze(1).expand_as(rank) # [N,8732]
return neg
def forward(self, loc_preds, loc_targets, conf_preds, conf_targets):
"""Compute loss between (loc_preds, loc_targets) and (conf_preds, conf_targets).
Args:
loc_preds(tensor): predicted locations, sized [batch_size, 8732, 4]
loc_targets(tensor): encoded target locations, sized [batch_size, 8732, 4]
conf_preds(tensor): predicted class confidences, sized [batch_size, 8732, num_classes]
conf_targets:(tensor): encoded target classes, sized [batch_size, 8732]
is_print: whether print loss
img: using for visualization
loss:
(tensor) loss = SmoothL1Loss(loc_preds, loc_targets) + CrossEntropyLoss(conf_preds, conf_targets)
loc_loss = SmoothL1Loss(pos_loc_preds, pos_loc_targets)
conf_loss = CrossEntropyLoss(pos_conf_preds, pos_conf_targets)
+ CrossEntropyLoss(neg_conf_preds, neg_conf_targets)
"""
batch_size, num_boxes, _ = loc_preds.size()
pos = conf_targets > 0 # [N,8732], pos means the box matched.
num_matched_boxes = pos.data.long().sum()
if num_matched_boxes == 0:
print("No matched boxes")
# loc_loss.
pos_mask = pos.unsqueeze(2).expand_as(loc_preds) # [N, 8732, 4]
pos_loc_preds = loc_preds[pos_mask].view(-1, 4) # [pos,4]
pos_loc_targets = loc_targets[pos_mask].view(-1, 4) # [pos,4]
loc_loss = F.smooth_l1_loss(pos_loc_preds, pos_loc_targets, size_average=False)
# conf_loss.
conf_loss = self._cross_entropy_loss(conf_preds.view(-1, self.num_classes), conf_targets.view(-1)) # [N*8732,]
neg = self._hard_negative_mining(conf_loss, pos) # [N,8732]
pos_mask = pos.unsqueeze(2).expand_as(conf_preds) # [N,8732,21]
neg_mask = neg.unsqueeze(2).expand_as(conf_preds) # [N,8732,21]
mask = (pos_mask + neg_mask).gt(0)
pos_and_neg = (pos + neg).gt(0)
preds = conf_preds[mask].view(-1, self.num_classes) # [pos + neg,21]
targets = conf_targets[pos_and_neg] # [pos + neg,]
conf_loss = F.cross_entropy(preds, targets, size_average=False)
if num_matched_boxes > 0:
loc_loss /= num_matched_boxes
conf_loss /= num_matched_boxes
else:
return conf_loss + loc_loss
Log.debug("loc_loss: %f, cls_loss: %f" % (float(loc_loss.data[0]), float(conf_loss.data[0])))
return loc_loss + conf_loss
|
#!/c/Python27/python
# wpadmin.py - Command line tool for WordPress
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Raul Chacon <raulchacon@outlook.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
r"""
For usage and a list of options, try this:
$ python wpadmin.py -h
This program lives here:
https://github.com/raulchacon/wpadmin.py
"""
__version__ = '0.1.1'
import os
from functools import wraps
class NotInWordPressRootError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def wp_root(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if (os.path.isdir(os.path.join(args[0].project_root, 'wp-content',
'themes')) and
os.path.isdir(os.path.join(args[0].project_root, 'wp-content',
'plugins'))):
return f(*args, **kwargs)
raise NotInWordPressRootError('You must run this script from \
the WordPress root folder.')
return decorated_function
@wp_root
def starttheme(args):
"""Creates theme folder with the following empty files/folders:
index.php, style.css, images/, css/ and js/. If with_timber is
True then it additionally creates views/base.twig
"""
# Create Theme folder
theme_root = os.path.join(args.project_root, 'wp-content', 'themes',
args.name)
os.makedirs(theme_root)
# Create files
theme_files = [
'404.php',
'functions.php',
'index.php',
'style.css',
'README.md',
'.gitignore'
]
for f in theme_files:
fh = open(os.path.join(theme_root, f), 'w')
if '.php' in f:
fh.write("<?php\n\n")
fh.close()
if args.classic:
static_dir = theme_root
else:
# Create a static sub directory
static_dir = os.path.join(theme_root, 'static')
os.makedirs(static_dir)
# Change default twig directory from "views" to "templates"
functionsphp = open(os.path.join(theme_root, 'functions.php'), 'a')
functionsphp.write("Timber::$dirname = 'templates';\n")
functionsphp.close()
twig_templates = os.path.join(theme_root, 'templates')
os.makedirs(os.path.join(twig_templates))
open(os.path.join(twig_templates, 'base.twig'), 'a').close()
# Create sub directories
for d in ['images', 'css', 'js']:
os.makedirs(os.path.join(static_dir, d))
@wp_root
def startplugin(args):
"""Creates plugin folder with a php file of the same name."""
plugin_root = os.path.join(
args.project_root,
'wp-content',
'plugins',
args.name
)
os.makedirs(plugin_root)
open(os.path.join(plugin_root, 'README.md'), 'a').close()
open(os.path.join(plugin_root, '.gitignore'), 'a').close()
with open(os.path.join(plugin_root, args.name + '.php'), 'w') as f:
f.write("<?php\n\n")
def _main():
"""Parse options and execute wpadmin commands"""
import argparse
# Create top level parser
parser = argparse.ArgumentParser(description="Create WordPress \
theme/plugin skeleton")
subparsers = parser.add_subparsers()
# Create the parser for the "starttheme" command
parser_starttheme = subparsers.add_parser('starttheme')
parser_starttheme.add_argument('name')
parser_starttheme.add_argument("-c", "--classic", help="create classic theme \
skeleton", action="store_true")
parser_starttheme.set_defaults(project_root=os.getcwd())
parser_starttheme.set_defaults(func=starttheme)
# Create the parser for the "startplugin" command
parser_startplugin = subparsers.add_parser('startplugin')
parser_startplugin.add_argument('name')
parser_startplugin.set_defaults(project_root=os.getcwd())
parser_startplugin.set_defaults(func=startplugin)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
_main()
|
"""Tools to cache time-series data.
"""
from collections import deque
from py2store.utils.affine_conversion import get_affine_converter_and_inverse
class RegularTimeseriesCache:
"""
A type that pretends to be a (possibly very large) list, but where contents of the list are populated as they are
needed. Further, the indexing of the list can be overwritten for the convenience of the user.
The canonical application is where we have segments of continuous waveform indexed by utc microseconds timestamps.
It is convenient to be able to read segments of this waveform as if it was one big waveform (handling the
discontinuities gracefully), and have the choice of using (relative or absolute) integer indices or utc indices.
"""
def __init__(self, data_rate=1, time_rate=1, maxlen=None):
self.buffer = deque(iterable=(), maxlen=maxlen)
self.data_rate = data_rate
self.time_rate = time_rate
self.time_per_data = self.time_rate / self.data_rate
self.data_per_time = self.data_rate / self.time_rate
self.bt = None
self.tt = None
def time_to_idx(self, t):
return (t - self.bt) * self.data_per_time
def idx_to_time(self, idx):
return idx * self.time_per_data + self.bt
def update(self, bt):
pass
def __getitem__(self, item):
if isinstance(item, slice):
start = item.start
|
""" Defines the rule for building external library with CMake
"""
load(
"//tools/build_defs:framework.bzl",
"CC_EXTERNAL_RULE_ATTRIBUTES",
"cc_external_rule_impl",
"create_attrs",
)
load(
"//tools/build_defs:detect_root.bzl",
"detect_root",
)
load(
"//tools/build_defs:cc_toolchain_util.bzl",
"get_flags_info",
"get_tools_info",
"is_debug_mode",
)
load(":cmake_script.bzl", "create_cmake_script")
load("//tools/build_defs/shell_toolchain/toolchains:access.bzl", "create_context")
load("//tools/build_defs/native_tools:tool_access.bzl", "get_cmake_data", "get_ninja_data")
load("@rules_foreign_cc//tools/build_defs:shell_script_helper.bzl", "os_name")
def _cmake_external(ctx):
cmake_data = get_cmake_data(ctx)
tools_deps = ctx.attr.tools_deps + cmake_data.deps
ninja_data = get_ninja_data(ctx)
make_commands = ctx.attr.make_commands
if _uses_ninja(ctx.attr.make_commands):
tools_deps += ninja_data.deps
make_commands = [command.replace("ninja", ninja_data.path) for command in make_commands]
attrs = create_attrs(
ctx.attr,
configure_name = "CMake",
create_configure_script = _create_configure_script,
postfix_script = "##copy_dir_contents_to_dir## $$BUILD_TMPDIR$$/$$INSTALL_PREFIX$$ $$INSTALLDIR$$\n" + ctx.attr.postfix_script,
tools_deps = tools_deps,
cmake_path = cmake_data.path,
ninja_path = ninja_data.path,
make_commands = make_commands,
)
return cc_external_rule_impl(ctx, attrs)
def _uses_ninja(make_commands):
for command in make_commands:
(before, separator, after) = command.partition(" ")
if before == "ninja":
return True
return False
def _create_configure_script(configureParameters):
ctx = configureParameters.ctx
inputs = configureParameters.inputs
root = detect_root(ctx.attr.lib_source)
if len(ctx.attr.working_directory) > 0:
root = root + "/" + ctx.attr.working_directory
tools = get_tools_info(ctx)
# CMake will replace <TARGET> with the actual output file
flags = get_flags_info(ctx, "<TARGET>")
no_toolchain_file = ctx.attr.cache_entries.get("CMAKE_TOOLCHAIN_FILE") or not ctx.attr.generate_crosstool_file
define_install_prefix = "export INSTALL_PREFIX=\"" + _get_install_prefix(ctx) + "\"\n"
configure_script = create_cmake_script(
workspace_name = ctx.workspace_name,
# as default, pass execution OS as target OS
target_os = os_name(ctx),
cmake_path = configureParameters.attrs.cmake_path,
tools = tools,
flags = flags,
install_prefix = "$$INSTALL_PREFIX$$",
root = root,
no_toolchain_file = no_toolchain_file,
user_cache = dict(ctx.attr.cache_entries),
user_env = dict(ctx.attr.env_vars),
options = ctx.attr.cmake_options,
include_dirs = inputs.include_dirs,
is_debug_mode = is_debug_mode(ctx),
)
return define_install_prefix + configure_script
def _get_install_prefix(ctx):
if ctx.attr.install_prefix:
return ctx.attr.install_prefix
if ctx.attr.lib_name:
return ctx.attr.lib_name
return ctx.attr.name
def _attrs():
attrs = dict(CC_EXTERNAL_RULE_ATTRIBUTES)
attrs.update({
# Relative install prefix to be passed to CMake in -DCMAKE_INSTALL_PREFIX
"install_prefix": attr.string(mandatory = False),
# CMake cache entries to initialize (they will be passed with -Dkey=value)
# Values, defined by the toolchain, will be joined with the values, passed here.
# (Toolchain values come first)
"cache_entries": attr.string_dict(mandatory = False, default = {}),
# CMake environment variable values to join with toolchain-defined.
# For example, additional CXXFLAGS.
"env_vars": attr.string_dict(mandatory = False, default = {}),
# Other CMake options
"cmake_options": attr.string_list(mandatory = False, default = []),
# When True, CMake crosstool file will be generated from the toolchain values,
# provided cache-entries and env_vars (some values will still be passed as -Dkey=value
# and environment variables).
# If CMAKE_TOOLCHAIN_FILE cache entry is passed, specified crosstool file will be used
# When using this option, it makes sense to specify CMAKE_SYSTEM_NAME in the
# cache_entries - the rule makes only a poor guess about the target system,
# it is better to specify it manually.
"generate_crosstool_file": attr.bool(mandatory = False, default = False),
# Working directory, with the main CMakeLists.txt
# (otherwise, the top directory of the lib_source label files is used.)
"working_directory": attr.string(mandatory = False, default = ""),
})
return attrs
""" Rule for building external library with CMake.
Attributes:
See line comments in _attrs() method.
Other attributes are documented in framework.bzl:CC_EXTERNAL_RULE_ATTRIBUTES
"""
cmake_external = rule(
attrs = _attrs(),
fragments = ["cpp"],
output_to_genfiles = True,
implementation = _cmake_external,
toolchains = [
"@rules_foreign_cc//tools/build_defs:cmake_toolchain",
"@rules_foreign_cc//tools/build_defs:ninja_toolchain",
"@rules_foreign_cc//tools/build_defs/shell_toolchain/toolchains:shell_commands",
"@bazel_tools//tools/cpp:toolchain_type",
],
)
|
# -*- coding: utf-8 -*-
import functools
import itertools as it
import json
import os
import re
import socket
from typing import NamedTuple
from urllib.parse import urlparse
import jsonschema
import pygtrie
import structlog
import ujson
from .errors import InvalidUpstreamHost
from .errors import InvalidUpstreamURL
logger = structlog.get_logger(__name__)
ACCOUNT_TRANSFER_PATTERN = re.compile(r'^\/?@([^\/\s]+)/transfers$')
# -------------------
# TTLS
# NO EXPIRE: 0
# NO CACHE: -1
# NO EXPIRE IF IRREVERSIBLE: -2
# -------------------
# TIMEOUTS
# NO TIMEOUT: 0
# -------------------
# RETRIES
# NO RETRIES: 0
# -------------------
UPSTREAM_SCHEMA_FILE = 'upstreams_schema.json'
with open(UPSTREAM_SCHEMA_FILE) as f:
UPSTREAM_SCHEMA = json.load(f)
jsonschema.Draft4Validator.check_schema(UPSTREAM_SCHEMA)
#CONFIG_VALIDATOR = jsonschema.Draft4Validator(UPSTREAM_SCHEMA)
class _Upstreams(object):
__NAMESPACES = None
__URLS = None
__TTLS = None
__TIMEOUTS = None
__TRANSLATE_TO_APPBASE = None
def __init__(self, config, validate=True):
upstream_config = config['upstreams']
# CONFIG_VALIDATOR.validate(upstream_config)
self.config = upstream_config
self.__hash = hash(ujson.dumps(self.config))
self.__NAMESPACES = frozenset(c['name'] for c in self.config)
for namespace in self.__NAMESPACES:
assert not namespace.endswith('_api'),\
f'Invalid namespace {namespace} : Namespaces cannot end with "_api"'
assert not namespace == 'jsonrpc',\
f'Invalid namespace {namespace} : Namespace "jsonrpc" is not allowed'
self.__URLS = self.__build_trie('urls')
self.__TTLS = self.__build_trie('ttls')
self.__TIMEOUTS = self.__build_trie('timeouts')
self.__TRANSLATE_TO_APPBASE = frozenset(
c['name'] for c in self.config if c.get('translate_to_appbase', False) is True)
if validate:
self.validate_urls()
def __build_trie(self, key):
trie = pygtrie.StringTrie(separator='.')
for item in it.chain.from_iterable(c[key] for c in self.config):
if isinstance(item, list):
prefix, value = item
else:
keys = list(item.keys())
prefix_key = 'prefix'
value_key = keys[keys.index(prefix_key) - 1]
prefix = item[prefix_key]
value = item[value_key]
trie[prefix] = value
return trie
@functools.lru_cache(8192)
def url(self, request_urn) -> str:
try:
if (request_urn.api == 'database_api' or request_urn.api == 'condenser_api') and ACCOUNT_TRANSFER_PATTERN.match(
request_urn.params[0]):
url = os.environ.get('JEFFERSON_ACCOUNT_TRANSFER_DPAYD_URL')
if url:
return url
except Exception:
pass
_, url = self.__URLS.longest_prefix(str(request_urn))
if not url:
raise InvalidUpstreamURL(
url=url, reason='No matching url found', urn=str(request_urn))
elif url.startswith('ws') or url.startswith('http'):
return url
raise InvalidUpstreamURL(url=url, reason='invalid format', urn=str(request_urn))
@functools.lru_cache(8192)
def ttl(self, request_urn) -> int:
_, ttl = self.__TTLS.longest_prefix(str(request_urn))
return ttl
@functools.lru_cache(8192)
def timeout(self, request_urn) -> int:
_, timeout = self.__TIMEOUTS.longest_prefix(str(request_urn))
if timeout is 0:
timeout = None
return timeout
@property
def urls(self) -> frozenset:
return frozenset(u for u in self.__URLS.values())
@property
def namespaces(self)-> frozenset:
return self.__NAMESPACES
def translate_to_appbase(self, request_urn) -> bool:
return request_urn.namespace in self.__TRANSLATE_TO_APPBASE
def validate_urls(self):
logger.info('testing upstream urls')
for url in self.urls:
try:
parsed_url = urlparse(url)
host = urlparse(url).netloc
logger.info('attempting to add uptream url', url=parsed_url)
socket.gethostbyname(host)
logger.info('added upstream url', url=parsed_url)
except socket.gaierror:
raise InvalidUpstreamHost(url=url)
except Exception as e:
raise InvalidUpstreamURL(url=url, reason=str(e))
def __hash__(self):
return self.__hash
class Upstream(NamedTuple):
url: str
ttl: int
timeout: int
@classmethod
@functools.lru_cache(4096)
def from_urn(cls, urn, upstreams: _Upstreams=None):
return Upstream(upstreams.url(urn),
upstreams.ttl(urn),
upstreams.timeout(urn))
|
'''
Train Neural RE Model
'''
__author__ = 'Maosen'
import os
import random
import torch
import logging
import argparse
import pickle
import numpy as np
from tqdm import tqdm
import utils
from model import Model
from utils import Dataset
torch.backends.cudnn.deterministic = True
def train(args):
model = Model(args, device, train_dset.rel2id, word_emb=emb_matrix)
logging.info('Model: %s, Parameter Number: %d' % (args.model, model.count_parameters()))
max_dev_f1 = 0.0
test_result_on_max_dev_f1 = (0.0, 0.0, 0.0)
for iter in range(niter):
loss = 0.0
if args.fix_bias:
model.set_bias(train_lp)
for idx, batch in enumerate(tqdm(train_dset.batched_data)):
loss_batch = model.update(batch)
loss += loss_batch
loss /= len(train_dset.batched_data)
valid_loss, (dev_prec, dev_recall, dev_f1) = model.eval(dev_dset)
logging.info('Iteration %d, Train loss %f' % (iter, loss))
logging.info(
'Dev loss: {:.4f}, P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(valid_loss, dev_prec, dev_recall,
dev_f1))
if args.fix_bias:
model.set_bias(test_lp)
logging.warn('Currently test evaluation is using gold test label distribution, only for reference.')
test_loss, (test_prec, test_recall, test_f1) = model.eval(test_dset)
logging.info(
'Test loss: {:.4f}, P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(test_loss, test_prec, test_recall,
test_f1))
if dev_f1 > max_dev_f1:
max_dev_f1 = dev_f1
test_result_on_max_dev_f1 = (test_prec, test_recall, test_f1)
# the saved model should have train_lp as bias.
if args.fix_bias:
model.set_bias(train_lp)
save_filename = os.path.join(args.save_dir, '%s_%d.pkl' % (args.info, runid))
model.save(save_filename, iter)
model.update_lr(valid_loss)
logging.info('Max Dev F1: %.4f' % max_dev_f1)
test_p, test_r, test_f1 = test_result_on_max_dev_f1
logging.info('Test P, R, F1 on best epoch: {:.4f}, {:.4f}, {:.4f}'.format(test_p, test_r, test_f1))
logging.info('\n')
return max_dev_f1, test_result_on_max_dev_f1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/neural/KBP', help='specify dataset with directory')
parser.add_argument('--vocab_dir', type=str, default='data/neural/vocab', help='directory storing word2id file and word embeddings.')
# Model Specs
parser.add_argument('--model', type=str, default='bgru', help='model name, (cnn|pcnn|bgru|lstm|palstm)')
parser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')
parser.add_argument('--ner_dim', type=int, default=30, help='NER embedding dimension.')
parser.add_argument('--pos_dim', type=int, default=30, help='POS embedding dimension.')
parser.add_argument('--attn_dim', type=int, default=200, help='Attention size.')
parser.add_argument('--position_dim', type=int, default=30, help='Position encoding dimension.')
parser.add_argument('--hidden', type=int, default=200, help='RNN hidden state size.')
parser.add_argument('--window_size', type=int, default=3, help='Convolution window size')
parser.add_argument('--num_layers', type=int, default=2, help='Num of RNN layers.')
parser.add_argument('--bidirectional', dest='bidirectional', action='store_true', help='Bidirectional RNN.' )
parser.set_defaults(bidirectional=True)
parser.add_argument('--bias', dest='bias', action='store_true', help='Whether Bias term is used for linear layer.')
parser.set_defaults(bias=True)
parser.add_argument('--fix_bias', dest='fix_bias', action='store_true', help='Train model with fix bias (not fixed by default).')
parser.set_defaults(fix_bias=False)
# Data Loading & Pre-processing
parser.add_argument('--mask_no_type', dest='mask_with_type', action='store_false')
parser.set_defaults(mask_with_type=True)
parser.add_argument('--lower', dest='lower', action='store_true', help='Lowercase all words.')
parser.add_argument('--no-lower', dest='lower', action='store_false')
parser.set_defaults(lower=False)
parser.add_argument('--batch_size', type=int, default=64)
# Optimization
parser.add_argument('--lr', type=float, default=1.0, help='Applies to SGD and Adagrad.')
parser.add_argument('--lr_decay', type=float, default=0.9)
parser.add_argument('--num_epoch', type=int, default=30)
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
# Optimization - Dropout
parser.add_argument('--in_drop', type=float, default=0.6, help='Input dropout rate.')
parser.add_argument('--intra_drop', type=float, default=0.1, help='Intra-layer dropout rate.')
parser.add_argument('--state_drop', type=float, default=0.5, help='RNN state dropout rate.')
parser.add_argument('--out_drop', type=float, default=0.6, help='Output dropout rate.')
# Other options
parser.add_argument('--seed', type=int, default=7698)
parser.add_argument('--repeat', type=int, default=5, help='train the model for multiple times.')
parser.add_argument('--save_dir', type=str, default='./dumped_models', help='Root dir for saving models.')
parser.add_argument('--info', type=str, default='KBP_default', help='description, also used as filename to save model.')
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args()
# Set random seed
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Load vocab file (id2word)
with open(args.vocab_dir + '/vocab.pkl', 'rb') as f:
vocab = pickle.load(f)
word2id = {}
for idx, word in enumerate(vocab):
word2id[word] = idx
# Load word embedding
emb_file = args.vocab_dir + '/embedding.npy'
emb_matrix = np.load(emb_file)
assert emb_matrix.shape[0] == len(vocab)
assert emb_matrix.shape[1] == args.emb_dim
args.vocab_size = len(vocab)
niter = args.num_epoch
if args.cpu:
args.cuda = False
device = torch.device("cuda:0" if args.cuda else "cpu")
print('Using device: %s' % device.type)
# Load data.
print('Reading data......')
rel2id = utils.load_rel2id('%s/relation2id.json' % args.data_dir)
train_filename = '%s/train.json' % args.data_dir
test_filename = '%s/test.json' % args.data_dir
dev_filename = '%s/dev.json' % args.data_dir
train_dset = Dataset(train_filename, args, word2id, device, rel2id=rel2id, shuffle=True, mask_with_type=args.mask_with_type)
dev_dset = Dataset(dev_filename, args, word2id, device, rel2id=rel2id, mask_with_type=args.mask_with_type)
test_dset = Dataset(test_filename, args, word2id, device, rel2id=rel2id, mask_with_type=args.mask_with_type)
# Get label distribution from train set. Used in fix_bias.
train_lp = torch.from_numpy(train_dset.log_prior).to(device)
test_lp = torch.from_numpy(test_dset.log_prior).to(device)
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
for runid in range(1, args.repeat + 1):
logging.info('Run model #%d time......' % runid)
dev_f1, test_result = train(args)
logging.info('')
|
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
import shutil
import sys
import tempfile
from six import StringIO
import os
import yaml
from mako.lookup import TemplateLookup
from mock import patch
from nose.plugins.skip import Skip
from pyff.mdrepo import MDRepository, MetadataException
from pyff.pipes import plumbing, Plumbing, PipeException
from pyff.test import ExitException
from pyff.test import SignerTestCase
from pyff.utils import hash_id, parse_xml, resource_filename, root
from pyff.parse import ParserException
from pyff.fetch import ResourceException
__author__ = 'leifj'
class PipeLineTest(SignerTestCase):
def run_pipeline(self, pl_name, ctx=None, md=None):
if ctx is None:
ctx = dict()
if md is None:
md = MDRepository()
templates = TemplateLookup(directories=[os.path.join(self.datadir, 'simple-pipeline')])
pipeline = tempfile.NamedTemporaryFile('w').name
template = templates.get_template(pl_name)
with open(pipeline, "w") as fd:
fd.write(template.render(ctx=ctx))
res = plumbing(pipeline).process(md, state={'batch': True, 'stats': {}})
os.unlink(pipeline)
return res, md, ctx
def exec_pipeline(self, pstr):
md = MDRepository()
p = yaml.load(StringIO(pstr))
print(p)
res = Plumbing(p, pid="test").process(md, state={'batch': True, 'stats': {}})
return res, md
@classmethod
def setUpClass(cls):
SignerTestCase.setUpClass()
def setUp(self):
SignerTestCase.setUpClass()
print("setup called for PipeLineTest")
self.templates = TemplateLookup(directories=[os.path.join(self.datadir, 'simple-pipeline')])
class StreamCapturing(object):
def __init__(self, stream):
self.captured = []
self.stream = stream
def __getattr__(self, attr):
return getattr(self.stream, attr)
def write(self, data):
self.captured.append(data)
self.stream.write(data)
class ParseTest(PipeLineTest):
def parse_test(self):
self.output = tempfile.NamedTemporaryFile('w').name
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout),
stderr=StreamCapturing(sys.stderr)):
from testfixtures import LogCapture
with LogCapture() as l:
res, md = self.exec_pipeline("""
- load:
- %s/metadata
- select
- stats
""" % self.datadir)
print(sys.stdout.captured)
print(sys.stderr.captured)
eIDs = [e.get('entityID') for e in md.store]
assert ('https://idp.example.com/saml2/idp/metadata.php1' not in eIDs)
assert ('https://idp.example.com/saml2/idp/metadata.php' in eIDs)
assert ("removing 'https://idp.example.com/saml2/idp/metadata.php1': schema validation failed" in str(l))
# To run all LoadErrorTests: ./setup.py test -s pyff.test.test_pipeline.LoadErrorTest
# To run individual test: ./setup.py test -s pyff.test.test_pipeline.LoadErrorTest.test_fail_on_error_no_file
class LoadErrorTest(PipeLineTest):
# A File that does not exist must throw an error with fail_on_error=True
def test_fail_on_error_no_file(self):
self.output = tempfile.NamedTemporaryFile('w').name
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout),
stderr=StreamCapturing(sys.stderr)):
from testfixtures import LogCapture
with LogCapture() as l:
try:
res, md = self.exec_pipeline("""
- load fail_on_error True:
- %s/metadata/test01.xml
- %s/file_that_does_not_exist.xml
- select
- stats
""" % (self.datadir, self.datadir))
except (PipeException,ResourceException) as ex:
print(ex)
assert ("file_that_does_not_exist.xml" in str(ex))
return True
finally:
if os.path.isfile(self.output):
os.unlink(self.output)
print(sys.stdout.captured)
print(sys.stderr.captured)
assert "Expected PipeException or ResourceException" == False
# A File that does not exist must throw an error with fail_on_error=True
def test_fail_on_error_no_file_url(self):
self.output = tempfile.NamedTemporaryFile('w').name
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout),
stderr=StreamCapturing(sys.stderr)):
from testfixtures import LogCapture
with LogCapture() as l:
try:
res, md = self.exec_pipeline("""
- load fail_on_error True:
- %s/metadata/test01.xml
- file://%s/file_that_does_not_exist.xml
- select
- stats
""" % (self.datadir, self.datadir))
except ResourceException as ex:
print(ex)
assert ("file_that_does_not_exist.xml" in str(ex))
return True
finally:
if os.path.isfile(self.output):
os.unlink(self.output)
print(sys.stdout.captured)
print(sys.stderr.captured)
assert "Expected ResourceException" == False
# An URL that cannot be downloaded must throw an error with fail_on_error=True
# Note: Due to load_url retries it takes 20s to complete this test
def test_fail_on_error_no_url(self):
self.output = tempfile.NamedTemporaryFile('w').name
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout),
stderr=StreamCapturing(sys.stderr)):
from testfixtures import LogCapture
with LogCapture() as l:
try:
res, md = self.exec_pipeline("""
- load fail_on_error True:
- %s/metadata/test01.xml
- http://127.0.0.1/does_not_exist.xml
- select
- stats
""" % (self.datadir))
except Exception as ex:
print(ex)
assert ("does_not_exist.xml" in str(ex))
return True
finally:
if os.path.isfile(self.output):
os.unlink(self.output)
print(sys.stdout.captured)
print(sys.stderr.captured)
assert "Expected Exception" == False
# A file with invalid XML must throw an exception with fail_on_error True:
def test_fail_on_error_invalid_file(self):
self.output = tempfile.NamedTemporaryFile('w').name
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout),
stderr=StreamCapturing(sys.stderr)):
from testfixtures import LogCapture
with LogCapture() as l:
try:
res, md = self.exec_pipeline("""
- load fail_on_error True:
- %s/metadata/test01.xml
- %s/metadata/test02-invalid.xml
- select
- stats
""" % (self.datadir, self.datadir))
except (MetadataException, ParserException) as ex:
print(ex)
return True
finally:
if os.path.isfile(self.output):
os.unlink(self.output)
print(sys.stdout.captured)
print(sys.stderr.captured)
assert "Expected MetadataException or ParserException" == False
# A directory with a file with invalid metadata must throw an exception with fail_on_error True and filter_invalid False:
def test_fail_on_error_invalid_dir(self):
self.output = tempfile.NamedTemporaryFile('w').name
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout),
stderr=StreamCapturing(sys.stderr)):
from testfixtures import LogCapture
with LogCapture() as l:
try:
res, md = self.exec_pipeline("""
- load fail_on_error True filter_invalid False:
- %s/metadata/
- select
- stats
""" % (self.datadir))
except (MetadataException, ParserException) as ex:
print(ex)
return True
finally:
if os.path.isfile(self.output):
os.unlink(self.output)
print(sys.stdout.captured)
print(sys.stderr.captured)
assert "Expected MetadataException or ParserException" == False
# A file with invalid XML must not throw an exception by default (fail_on_error False):
def test_no_fail_on_error_invalid_file(self):
self.output = tempfile.NamedTemporaryFile('w').name
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout),
stderr=StreamCapturing(sys.stderr)):
from testfixtures import LogCapture
with LogCapture() as l:
res, md = self.exec_pipeline("""
- load:
- %s/metadata/test01.xml
- %s/metadata/test02-invalid.xml
- select
- stats
""" % (self.datadir, self.datadir))
print(sys.stdout.captured)
print(sys.stderr.captured)
if os.path.isfile(self.output):
os.unlink(self.output)
# Loading an xml file with an invalid entity must throw when filter_invalid False and fail_on_error True
def test_fail_on_error_invalid_entity(self):
self.output = tempfile.NamedTemporaryFile('w').name
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout),
stderr=StreamCapturing(sys.stderr)):
from testfixtures import LogCapture
with LogCapture() as l:
try:
res, md = self.exec_pipeline("""
- load fail_on_error True filter_invalid False:
- %s/metadata/test01.xml
- %s/metadata/test03-invalid.xml
- select
- stats
""" % (self.datadir, self.datadir))
except MetadataException as ex:
print(ex)
assert ("schema validation failed" in str(ex))
assert ("/metadata/test03-invalid.xml" in str(ex))
return True
finally:
if os.path.isfile(self.output):
os.unlink(self.output)
print(sys.stdout.captured)
print(sys.stderr.captured)
# Test default behaviour. Loading a file with an invalid entity must not raise an exception
def test_no_fail_on_error_invalid_entity(self):
self.output = tempfile.NamedTemporaryFile('w').name
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout),
stderr=StreamCapturing(sys.stderr)):
from testfixtures import LogCapture
with LogCapture() as l:
res, md = self.exec_pipeline("""
- load:
- %s/metadata/test01.xml
- %s/metadata/test03-invalid.xml
- select
- stats
""" % (self.datadir, self.datadir))
print(sys.stdout.captured)
print(sys.stderr.captured)
if os.path.isfile(self.output):
os.unlink(self.output)
# A directory with a file with invalid metadata must not throw by default:
def test_no_fail_on_error_invalid_dir(self):
self.output = tempfile.NamedTemporaryFile('w').name
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout),
stderr=StreamCapturing(sys.stderr)):
from testfixtures import LogCapture
with LogCapture() as l:
res, md = self.exec_pipeline("""
- load:
- %s/metadata/
- select
- stats
""" % (self.datadir))
if os.path.isfile(self.output):
os.unlink(self.output)
print(sys.stdout.captured)
print(sys.stderr.captured)
class SortTest(PipeLineTest):
EID1 = "https://idp.aco.net/idp/shibboleth"
EID2 = "https://idp.example.com/saml2/idp/metadata.php"
EID3 = "https://sharav.abes.fr/idp/shibboleth"
@staticmethod
def _run_sort_test(expected_order, sxp, res, l):
if sxp is not None:
# Verify expected warnings for missing sort values
for e in expected_order:
try:
if not isinstance(e[1], bool):
raise TypeError
if not e[1]:
keygen_fail_str = ("Sort pipe: unable to sort entity by '%s'. "
"Entity '%s' has no such value" % (sxp, e[0]))
try:
assert (keygen_fail_str in unicode(l))
except AssertionError:
print("Test failed on expecting missing sort value from: '%s'.\nCould not find string "
"on the output: '%s'.\nOutput was:\n %s" % (e[0], keygen_fail_str,unicode(l)))
raise
except (IndexError, TypeError):
print("Test failed for: '%s' due to 'order_by' xpath supplied without proper expectation tuple." %
"".join(e))
raise
# Verify order
for i, me in enumerate(expected_order):
try:
assert res[i].attrib.get("entityID") == me[0]
except AssertionError:
print(("Test failed on verifying sort position %i.\nExpected: %s; Found: %s " %
(i, me[0], res[i].attrib.get("entityID"))))
raise
# Test sort by entityID only
def test_sort(self):
sxp = None
self.output = tempfile.NamedTemporaryFile('w').name
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout),
stderr=StreamCapturing(sys.stderr)):
from testfixtures import LogCapture
with LogCapture() as l:
res, md = self.exec_pipeline("""
- load:
- %s/metadata
- %s/simple-pipeline/idp.aco.net.xml
- select:
- "!//md:EntityDescriptor[md:IDPSSODescriptor]"
- sort
- stats
""" % (self.datadir, self.datadir))
print(sys.stdout.captured)
print(sys.stderr.captured)
# tuple format (entityID, has value for 'order_by' xpath)
expected_order = [(self.EID1, ), (self.EID2, ), (self.EID3, )]
self._run_sort_test(expected_order, sxp, res, l)
# Test sort entries first by registrationAuthority
def test_sort_by_ra(self):
sxp = ".//md:Extensions/mdrpi:RegistrationInfo/@registrationAuthority"
self.output = tempfile.NamedTemporaryFile('w').name
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout),
stderr=StreamCapturing(sys.stderr)):
from testfixtures import LogCapture
with LogCapture() as l:
res, md = self.exec_pipeline("""
- load:
- %s/metadata
- %s/simple-pipeline/idp.aco.net.xml
- select:
- "!//md:EntityDescriptor[md:IDPSSODescriptor]"
- sort order_by %s
- stats
""" % (self.datadir, self.datadir, sxp))
print(sys.stdout.captured)
print(sys.stderr.captured)
# tuple format (entityID, has value for 'order_by' xpath)
expected_order = [(self.EID3, True), (self.EID1, False), (self.EID2, False)]
self._run_sort_test(expected_order, sxp, res, l)
# Test group entries by specific NameIDFormat support
def test_sort_group(self):
sxp = ".//md:IDPSSODescriptor/md:NameIDFormat[./text()='urn:mace:shibboleth:1.0:nameIdentifier']"
self.output = tempfile.NamedTemporaryFile('w').name
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout),
stderr=StreamCapturing(sys.stderr)):
from testfixtures import LogCapture
with LogCapture() as l:
res, md = self.exec_pipeline("""
- load:
- %s/metadata
- %s/simple-pipeline/idp.aco.net.xml
- select:
- "!//md:EntityDescriptor[md:IDPSSODescriptor]"
- sort order_by %s
- stats
""" % (self.datadir, self.datadir, sxp))
print(sys.stdout.captured)
print(sys.stderr.captured)
# tuple format (entityID, has value for 'order_by' xpath)
expected_order = [(self.EID1, True), (self.EID3, True), (self.EID2, False)]
self._run_sort_test(expected_order, sxp, res, l)
# noinspection PyUnresolvedReferences
class SigningTest(PipeLineTest):
def test_signing(self):
self.output = tempfile.NamedTemporaryFile('w').name
res, md, ctx = self.run_pipeline("signer.fd", self)
eIDs = [e.get('entityID') for e in md.store]
assert ('https://idp.aco.net/idp/shibboleth' in eIDs)
assert ('https://skriptenforum.net/shibboleth' in eIDs)
os.unlink(self.output)
def test_signing_and_validation(self):
self.output = tempfile.NamedTemporaryFile('w').name
res_s, md_s, ctx_s = self.run_pipeline("signer.fd", self)
res_v, md_v, ctx_v = self.run_pipeline("validator.fd", self)
eIDs = [e.get('entityID') for e in md_v.store]
assert ('https://idp.aco.net/idp/shibboleth' in eIDs)
assert ('https://skriptenforum.net/shibboleth' in eIDs)
os.unlink(self.output)
def test_cert_report(self):
self.output = tempfile.NamedTemporaryFile('w').name
res, md, ctx = self.run_pipeline("certreport.fd", self)
eIDs = [e.get('entityID') for e in md.store]
assert ('https://idp.aco.net/idp/shibboleth' in eIDs)
assert ('https://skriptenforum.net/shibboleth' in eIDs)
with open(self.output, 'r') as fd:
lines = fd.readline()
assert (len(lines) > 0)
def test_cert_report_swamid(self):
self.output = tempfile.NamedTemporaryFile('w').name
res, md, ctx = self.run_pipeline("certreport-swamid.fd", self)
with open(self.output, 'r') as fd:
print(fd.read())
def test_info_and_dump(self):
with patch("sys.stdout", StreamCapturing(sys.stdout)) as ctx:
try:
self.exec_pipeline("""
- load:
- http://mds.swamid.se/md/swamid-2.0.xml
- select
- dump
- info
""")
assert ('https://idp.nordu.net/idp/shibboleth' in sys.stdout.captured)
except IOError:
raise Skip
def test_end_exit(self):
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)):
try:
self.exec_pipeline("""
- end:
code: 22
message: "slartibartifast"
""")
assert False
except IOError:
raise Skip
except ExitException as ex:
assert ex.code == 22
assert "slartibartifast" in "".join(sys.stdout.captured)
def test_single_dump(self):
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)):
try:
self.exec_pipeline("""
- dump
""")
assert '<EntitiesDescriptor xmlns="urn:oasis:names:tc:SAML:2.0:metadata"/>' \
in "".join(sys.stdout.captured)
except IOError:
raise Skip
def test_missing_select(self):
for stmt in ('publish', 'signcerts', 'info', 'sign', 'store', 'finalize',
'xslt', 'certreport', 'emit', 'finalize', 'first', 'setattr', 'stats'):
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)):
try:
self.exec_pipeline("""
- %s
""" % stmt)
assert False
except PipeException:
pass
except IOError:
raise Skip
def test_first_select_as(self):
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)):
tmpfile = tempfile.NamedTemporaryFile('w').name
try:
self.exec_pipeline("""
- load:
- file://%s/metadata/test01.xml
- select as FOO
- first
- publish: %s
""" % (self.datadir, tmpfile))
t1 = parse_xml(resource_filename("metadata/test01.xml", self.datadir))
assert t1 is not None
entity_id = 'https://idp.example.com/saml2/idp/metadata.php'
t2 = parse_xml(tmpfile)
assert t2 is not None
assert root(t1).get('entityID') == root(t2).get('entityID')
assert root(t2).get('entityID') == entity_id
except PipeException:
pass
except IOError:
raise Skip
finally:
try:
os.unlink(tmpfile)
except:
pass
def test_prune(self):
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)):
tmpfile = tempfile.NamedTemporaryFile('w').name
try:
self.exec_pipeline("""
- load:
- file://%s/metadata/test01.xml
- select
- prune:
- .//{urn:oasis:names:tc:SAML:metadata:ui}UIInfo
- publish: %s
""" % (self.datadir, tmpfile))
t1 = parse_xml(resource_filename("metadata/test01.xml", self.datadir))
uiinfo = t1.find(".//{urn:oasis:names:tc:SAML:metadata:ui}UIInfo")
assert uiinfo is not None
t2 = parse_xml(tmpfile)
assert t2 is not None
gone = t2.find(".//{urn:oasis:names:tc:SAML:metadata:ui}UIInfo")
assert gone is None
except PipeException:
pass
except IOError:
raise Skip
finally:
try:
os.unlink(tmpfile)
except:
pass
def test_empty_store(self):
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)):
try:
self.exec_pipeline("""
- store
""")
assert False
except PipeException:
pass
except IOError:
raise Skip
def test_empty_store2(self):
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)):
try:
self.exec_pipeline("""
- store:
directory: /tmp
""")
assert False
except PipeException:
pass
except IOError:
raise Skip
def test_empty_dir_error(self):
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)):
from testfixtures import LogCapture
with LogCapture() as l:
try:
self.exec_pipeline("""
- load:
- %s/empty
""" % self.datadir)
except IOError:
raise Skip
assert "no entities found in" in str(l)
def test_store_and_retrieve(self):
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)):
tmpdir = tempfile.mkdtemp()
os.rmdir(tmpdir) # lets make sure 'store' can recreate it
try:
self.exec_pipeline("""
- load:
- file://%s/metadata/test01.xml
- select
- store:
directory: %s
""" % (self.datadir, tmpdir))
t1 = parse_xml(resource_filename("metadata/test01.xml", self.datadir))
assert t1 is not None
entity_id = 'https://idp.example.com/saml2/idp/metadata.php'
sha1id = hash_id(entity_id, prefix=False)
fn = "%s/%s.xml" % (tmpdir, sha1id)
assert os.path.exists(fn)
t2 = parse_xml(fn)
assert t2 is not None
assert root(t1).get('entityID') == root(t2).get('entityID')
assert root(t2).get('entityID') == entity_id
except IOError:
raise Skip
finally:
shutil.rmtree(tmpdir)
def test_empty_certreport(self):
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)):
try:
self.exec_pipeline("""
- certreport
""")
assert False
except PipeException:
pass
except IOError:
raise Skip
def test_pick_invalid(self):
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)):
tmpfile = tempfile.NamedTemporaryFile('w').name
try:
self.exec_pipeline("""
- load validate False:
- %s/metadata
- pick:
- https://idp.example.com/saml2/idp/metadata.php1
- publish: %s
""" % (self.datadir, tmpfile))
assert False
except PipeException as ex:
print("".join(sys.stdout.captured))
print(str(ex))
pass
except IOError:
raise Skip
finally:
try:
os.unlink(tmpfile)
except:
pass
def test_blacklist(self):
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)):
tmpfile = tempfile.NamedTemporaryFile('w').name
try:
res, md = self.exec_pipeline("""
- when batch:
- load:
- %s/metadata via blacklist_example
- loadstats
- when blacklist_example:
- fork merge remove:
- filter:
- https://idp.example.com/saml2/idp/metadata.php
""" % self.datadir)
except IOError:
raise Skip
print(md.lookup('https://idp.example.com/saml2/idp/metadata.php'))
assert (not md.lookup('https://idp.example.com/saml2/idp/metadata.php'))
def test_bad_namespace(self):
with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)):
tmpfile = tempfile.NamedTemporaryFile('w').name
try:
res, md = self.exec_pipeline("""
- when batch:
- load:
- %s/bad_metadata cleanup bad
- loadstats
- when bad:
- check_xml_namespaces
""" % self.datadir)
except ValueError:
raise Skip
assert("Expected exception from bad namespace in")
|
from __future__ import print_function
import datetime
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from pyrfc3339 import parse
from util.models import CalendarDTO
from util.models.AppointmentDTO import AppointmentDTO
class GoogleCalendar:
# If modifying these scopes, delete the file token.json.
SCOPES = 'https://www.googleapis.com/auth/calendar'
googleCalendarAPI = None
def __init__(self, userName):
self.googleCalendarAPI = self.__authenticate(userName)
def createAppointment(self, appointmentDTO: AppointmentDTO):
self.__insertNewCaledarAppointment(self.__appointmentDTOTogoogleAppointment(appointmentDTO))
def get_calendarEntries(self, date):
todayStart = datetime.datetime(date.year, date.month, date.day, 0, 0, 0)
todayEnd = datetime.datetime(date.year, date.month, date.day, 23, 59, 59)
googleCalendar = self.__fetchCalendarAppointements(todayStart, todayEnd)
calendarDTO = CalendarDTO()
if googleCalendar.get('description'):
calendarDTO.calendarName = googleCalendar['description']
else:
calendarDTO.calendarName = googleCalendar['summary']
print("please add a description to your calendar")
appointmentDTOs = None
googleAppointments = googleCalendar.get('items', [])
if not googleAppointments:
print('No upcoming events found.')
else:
appointmentDTOs = self.__googleAppointmentsToDTO(googleAppointments)
calendarDTO.appointments = appointmentDTOs
return calendarDTO
def __authenticate(self, userName):
store = file.Storage('Configs/token_' + userName + '_prod.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('Configs/credentials_calapi_prod.json', self.SCOPES)
creds = tools.run_flow(flow, store)
return build('calendar', 'v3', http=creds.authorize(Http()))
def __fetchCalendarAppointements(self, startDate: datetime, endDate: datetime):
start = startDate.isoformat() + 'Z' # 'Z' indicates UTC time
end = endDate.isoformat() + 'Z' # 'Z' indicates UTC time
events_result = self.googleCalendarAPI.events().list(calendarId='primary', timeMin=start, timeMax=end,
maxResults=100, singleEvents=True,
orderBy='startTime').execute()
return events_result
def __insertNewCaledarAppointment(self, appointment):
event = self.googleCalendarAPI.events().insert(calendarId='primary', body=appointment).execute()
def __googleAppointmentsToDTO(self, googleAppointments):
appointmentDTO = []
for appointment in googleAppointments:
if appointment['start'].get('dateTime'):
start = parse(appointment['start'].get('dateTime'))
else:
start = datetime.datetime.strptime(appointment['start'].get('date'), "%Y-%m-%d")
if appointment['end'].get('dateTime'):
end = parse(appointment['end'].get('dateTime'))
else:
end = datetime.datetime.strptime(appointment['end'].get('date'), "%Y-%m-%d")
appointmentDTO.append(AppointmentDTO(appointment['id'], start, end, appointment['summary']))
return appointmentDTO
def __appointmentDTOTogoogleAppointment(self, appointmentDTO: AppointmentDTO):
json = {
"summary": appointmentDTO.description,
"start": {
'dateTime': appointmentDTO.start.astimezone().isoformat() # 'Z' indicates UTC time
},
"end": {
'dateTime': appointmentDTO.end.astimezone().isoformat() # 'Z' indicates UTC time
}
}
return json
|
import os
from dotenv import load_dotenv
from archivy import app
def main():
load_dotenv()
port = int(os.environ.get("ARCHIVY_PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
from setuptools import find_packages, setup
VERSION = '0.8'
setup(
name = 'urlrap',
packages = find_packages(),
version = VERSION,
platforms=['any'],
description = 'URL connivance functions.',
author = 'Bob Colner',
author_email = 'bcolner@gmail.com',
url = 'https://github.com/bobcolner/urlrap',
download_url = 'https://github.com/bobcolner/urlrap/tarball/{0}'.format(VERSION),
keywords = ['url', 'utility'], # arbitrary keywords
license = 'MIT',
classifiers = [ # See: https://pypi.python.org/pypi?%3Aaction=list_classifiers
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP',
# Pick your license as you wish (should match 'license' above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here.
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
install_requires = ['urltools >=0.3.2, <2.0', 'dateutil >=0.12, <2.0'],
test_suite = 'tests'
)
|
"""
.. module:: transducer_services_Simple
:platform: Unix, Windows
:synopsis: Defines a simple example implementation for IEEE1451.0
Transducer Services for ncaplite.
.. moduleauthor:: James Ethridge <jeethridge@gmail.com>
"""
from ncaplite.transducer_services_base import TransducerAccessBase
import ncaplite.ieee1451types as ieee1451
import logging
logger = logging.getLogger(__name__)
class TransducerAccessSimple(TransducerAccessBase):
def __init__(self):
self.transducer_interfaces = {
1: (0, 0), # comm_id0 : tim_id0, chanid0
2: (0, 1), # comm_id1 : tim_id0, chanid1
3: (1, 0), # comm_id2 : tim_id1, chanid0
}
self.out_data = {1: 0, 2: 0, 3: 0}
self.in_data = {1: 0, 2: 0, 3: 0}
def find_com_id(self, tim_id, channel_id):
""" Simple helper function to find trans_comm_id given
a tim_id and channel_id. Assumes 0 is not a valid trans_comm_id.
"""
for comid, tim in self.transducer_interfaces.iteritems():
if tim == (tim_id, channel_id):
return comid
return 0 # assumes 0 not valid comid
def open(self, tim_id, channel_id):
trans_comm_id = self.find_com_id(tim_id, channel_id)
logger.debug("TransducerAccessSimple.open: " + str(trans_comm_id))
error_code = ieee1451.Error(ieee1451.ErrorSource.ERROR_SOURCE_LOCAL_0,
ieee1451.ErrorCode.NO_ERROR)
return (error_code, trans_comm_id)
def open_qos(self, tim_id, channel_id, qos_params):
error_code = ieee1451.Error(ieee1451.ErrorSource.ERROR_SOURCE_LOCAL_0,
ieee1451.ErrorCode.NO_ERROR)
qos_params = ()
trans_comm_id = 0
return (error_code, qos_params, trans_comm_id)
def open_group(self, tim_ids, channel_ids):
error_code = ieee1451.Error(ieee1451.ErrorSource.ERROR_SOURCE_LOCAL_0,
ieee1451.ErrorCode.NO_ERROR)
trans_comm_id = 0
return (error_code, trans_comm_id)
def open_group_qos(self, tim_ids, channel_ids, qos_params):
error_code = ieee1451.Error(ieee1451.ErrorSource.ERROR_SOURCE_LOCAL_0,
ieee1451.ErrorCode.NO_ERROR)
qos_params = ()
trans_comm_id = 0
return (error_code, qos_params, trans_comm_id)
def close(self, trans_comm_id):
error_code = ieee1451.Error(ieee1451.ErrorSource.ERROR_SOURCE_LOCAL_0,
ieee1451.ErrorCode.NO_ERROR)
return error_code
def read_data(self, trans_comm_id, timeout, sampling_mode):
data = self.out_data[trans_comm_id]
tmp = (trans_comm_id, self.out_data[trans_comm_id])
logger.debug("TransducerAccessSimple.read_data: " + str(tmp))
data = data+1
self.out_data[trans_comm_id] = data
arg = ieee1451.Argument(ieee1451.TypeCode.UINT32_TC, data)
arg_array = ieee1451.ArgumentArray()
arg_array.put_by_index(0, arg)
error_code = ieee1451.Error(ieee1451.ErrorSource.ERROR_SOURCE_LOCAL_0,
ieee1451.ErrorCode.NO_ERROR)
return (error_code, arg_array)
def write_data(self, trans_comm_id, timeout, sampling_mode, value):
val = value.get_by_index(0).value
self.in_data[trans_comm_id] = val
tmp = (trans_comm_id, self.in_data[trans_comm_id])
print("TransducerAccessSimple.write data: " + str(tmp))
error_code = ieee1451.Error(ieee1451.ErrorSource.ERROR_SOURCE_LOCAL_0,
ieee1451.ErrorCode.NO_ERROR)
return error_code
def start_read_data(self, trans_comm_id, trigger_time, timeout,
sampling_mode, callback):
error_code = ieee1451.Error(ieee1451.ErrorSource.ERROR_SOURCE_LOCAL_0,
ieee1451.ErrorCode.NO_ERROR)
operation_id = 0
return (error_code, operation_id)
def start_write_data(self, trans_comm_id, trigger_time, timeout,
sampling_mode, value, callback):
error_code = ieee1451.Error(ieee1451.ErrorSource.ERROR_SOURCE_LOCAL_0,
ieee1451.ErrorCode.NO_ERROR)
operation_id = 0
return (error_code, operation_id)
def start_stream(self, trans_comm_id, callback, operation_id):
error_code = ieee1451.Error(ieee1451.ErrorSource.ERROR_SOURCE_LOCAL_0,
ieee1451.ErrorCode.NO_ERROR)
operation_id = 0
return (error_code, operation_id)
def cancel(self, operation_id):
error_code = ieee1451.Error(ieee1451.ErrorSource.ERROR_SOURCE_LOCAL_0,
ieee1451.ErrorCode.NO_ERROR)
return error_code
if __name__ == '__main__':
print('Subclass:', issubclass(TransducerAccessSimple,
TransducerAccessBase))
print('Instance:', isinstance(TransducerAccessSimple(),
TransducerAccessBase))
|
from mygrad.tensor_base import Tensor
from .ops import *
__all__ = ["reshape", "squeeze", "ravel", "expand_dims", "broadcast_to"]
def reshape(a, *newshape, constant=False):
""" Returns a tensor with a new shape, without changing its data.
This docstring was adapted from ``numpy.reshape``
Parameters
----------
a : array_like
The tensor to be reshaped
*newshape : Union[int, Tuple[int, ...]]
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D tensor of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the tensor and remaining dimensions.
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not back-propagate a gradient)
Returns
-------
mygrad.Tensor
``a`` with its shape changed permuted. A new tensor is returned.
Notes
-----
``reshape`` utilizes C-ordering, meaning that it reads & writes elements using
C-like index ordering; the last axis index changing fastest, and, proceeding
in reverse order, the first axis index changing slowest.
Examples
--------
>>> import mygrad as mg
>>> a = mg.Tensor([[1,2,3], [4,5,6]])
>>> mg.reshape(a, 6)
Tensor([1, 2, 3, 4, 5, 6])
>>> mg.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
Tensor([[1, 2],
[3, 4],
[5, 6]])"""
if not newshape:
raise TypeError("reshape() takes at least 1 argument (0 given)")
if hasattr(newshape[0], "__iter__"):
if len(newshape) > 1:
raise TypeError("an integer is required")
newshape = newshape[0]
return Tensor._op(Reshape, a, op_args=(newshape,), constant=constant)
def squeeze(a, axis=None, constant=False):
"""
Remove single-dimensional entries from the shape of a tensor.
This docstring was adapted from ``numpy.squeeze``
Parameters
----------
a : array_like
The tensor to be reshaped
axis : Optional[int, Tuple[int, ...]]
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not back-propagate a gradient)
Returns
-------
mygrad.Tensor
Raises
------
ValueError
If ``axis`` is not ``None``, and an axis being squeezed is not of length 1
Examples
--------
>>> import mygrad as mg
>>> x = mg.Tensor([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> mg.squeeze(x).shape
(3,)
>>> mg.squeeze(x, axis=0).shape
(3, 1)
>>> mg.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> mg.squeeze(x, axis=2).shape
(1, 3)"""
return Tensor._op(Squeeze, a, op_args=(axis,), constant=constant)
def ravel(a, constant=False):
"""
Flattens contents of a tensor into a contiguous 1-D array. A copy is made only if needed.
This docstring was adapted from ``numpy.ravel``.
Parameters
----------
a : array_like
The tensor to be flattened
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not back-propagate a gradient)
Returns
-------
mygrad.Tensor
Notes
-----
``ravel`` utilizes C-ordering, meaning that it reads & writes elements using
C-like index ordering; the last axis index changing fastest, and, proceeding
in reverse order, the first axis index changing slowest.
Examples
--------
>>> import mygrad as mg
>>> x = mg.Tensor([[1, 2],
... [3, 4]])
>>> mg.ravel(x)
Tensor([1, 2, 3, 4])
"""
return Tensor._op(Ravel, a, constant=constant)
def expand_dims(a, axis, constant=False):
"""
Expand the dimensions of a tensor by adding a new axis.
This docstring was adapted from ``numpy.expand_dims``.
Parameters
----------
a : array_like
The tensor to be expanded
axis : int
The position of the new axis in the expanded array shape.
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not back-propagate a gradient)
Returns
-------
mygrad.Tensor
Examples
--------
>>> import mygrad as mg
>>> x = mg.Tensor([1, 2])
>>> x.shape
(2,)
>>> y = mg.expand_dims(x, 1)
>>> y.shape
(2, 1)
>>> z = mg.expand_dims(y, 0)
>>> z.shape
(1, 2, 1)
"""
return Tensor._op(ExpandDims, a, op_args=(axis,), constant=constant)
def broadcast_to(a, shape, constant=False):
"""
Broadcast a tensor to a new shape.
This docstring was adapted from ``numpy.broadcast_to``.
Parameters
----------
a : array_like
The tensor to be broadcasted
shape: Tuple[int, ...]
The shape of the broadcasted tensor. This shape
should be broadcast-compatible with the original
shape.
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not back-propagate a gradient)
Returns
-------
mygrad.Tensor
Raises
------
ValueError
If the array is not compatible with the new shape
according to Numpy's broadcasting rules.
Examples
--------
>>> import mygrad as mg
>>> x = mg.Tensor([1, 2, 3])
>>> mg.broadcast_to(x, (3,3))
Tensor([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
>>> mg.broadcast_to(x, (4,4))
Traceback (most recent call last):
...
ValueError: operands could not be broadcast together with remapped
shapes [original->remapped]: (3,) and requested shape (4,4)
"""
return Tensor._op(BroadcastTo, a, op_args=(shape,), constant=constant)
|
# based on works of 2015 Matthias Groncki
# https://github.com/mgroncki/IPythonScripts
#
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
#- Redistributions of source code must retain the above copyright notice,
#this list of conditions and the following disclaimer.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#This disclaimer is taken from the QuantLib license
#
import numpy as np
import matplotlib.pyplot as plt
import QuantLib as ql
def calc_cva( hazard_rate=0.02, market_rate=0.02, ir_vol=0.0075, swap_term=5, notional=10000, N=2000, seed=1 ):
# Setting evaluation date
today = ql.Date(15,2,2018)
ql.Settings.instance().setEvaluationDate(today)
# Setup Marketdata
rate = ql.SimpleQuote( market_rate )
rate_handle = ql.QuoteHandle(rate)
dc = ql.Actual365Fixed()
yts = ql.FlatForward(today, rate_handle, dc)
yts.enableExtrapolation()
hyts = ql.RelinkableYieldTermStructureHandle(yts)
t0_curve = ql.YieldTermStructureHandle(yts)
euribor6m = ql.Euribor6M(hyts)
# IR vol
volas = [ql.QuoteHandle(ql.SimpleQuote(ir_vol)), ql.QuoteHandle(ql.SimpleQuote(ir_vol))]
meanRev = [ql.QuoteHandle(ql.SimpleQuote(0.02))]
model = ql.Gsr(t0_curve, [today+100], volas, meanRev, 16.)
# Setup a dummy portfolio with single swap
def makeSwap(start, maturity, nominal, fixedRate, index, typ=ql.VanillaSwap.Payer):
"""
creates a plain vanilla swap with fixedLegTenor 1Y
parameter:
start (ql.Date) : Start Date
maturity (ql.Period) : SwapTenor
nominal (float) : Nominal
fixedRate (float) : rate paid on fixed leg
index (ql.IborIndex) : Index
return: tuple(ql.Swap, list<Dates>) Swap and all fixing dates
"""
end = ql.TARGET().advance(start, maturity)
fixedLegTenor = ql.Period("1y")
fixedLegBDC = ql.ModifiedFollowing
fixedLegDC = ql.Thirty360(ql.Thirty360.BondBasis)
spread = 0.0
fixedSchedule = ql.Schedule(start,
end,
fixedLegTenor,
index.fixingCalendar(),
fixedLegBDC,
fixedLegBDC,
ql.DateGeneration.Backward,
False)
floatSchedule = ql.Schedule(start,
end,
index.tenor(),
index.fixingCalendar(),
index.businessDayConvention(),
index.businessDayConvention(),
ql.DateGeneration.Backward,
False)
swap = ql.VanillaSwap(typ,
nominal,
fixedSchedule,
fixedRate,
fixedLegDC,
floatSchedule,
index,
spread,
index.dayCounter())
return swap, [index.fixingDate(x) for x in floatSchedule][:-1]
portfolio = [makeSwap(today + ql.Period("2d"),
ql.Period( swap_term, ql.Years ),
notional,
0.02,
euribor6m),
]
# Setup pricing engine and calculate the npv
engine = ql.DiscountingSwapEngine(hyts)
for deal, fixingDates in portfolio:
deal.setPricingEngine(engine)
deal.NPV()
process = model.stateProcess()
# Define evaluation grid
date_grid = [today + ql.Period(i,ql.Months) for i in range(0,12*6)]
for deal in portfolio:
date_grid += deal[1]
date_grid = np.unique(np.sort(date_grid))
time_grid = np.vectorize(lambda x: ql.ActualActual().yearFraction(today, x))(date_grid)
dt = time_grid[1:] - time_grid[:-1]
# Random number generator
urng = ql.MersenneTwisterUniformRng(seed)
usrg = ql.MersenneTwisterUniformRsg(len(time_grid)-1,urng)
generator = ql.InvCumulativeMersenneTwisterGaussianRsg(usrg)
x = np.zeros((N, len(time_grid)))
y = np.zeros((N, len(time_grid)))
pillars = np.array([0.0, 0.5, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
zero_bonds = np.zeros((N, len(time_grid), 12))
for j in range(12):
zero_bonds[:, 0, j] = model.zerobond(pillars[j],
0,
0)
for n in range(0,N):
dWs = generator.nextSequence().value()
for i in range(1, len(time_grid)):
t0 = time_grid[i-1]
t1 = time_grid[i]
x[n,i] = process.expectation(t0,
x[n,i-1],
dt[i-1]) + dWs[i-1] * process.stdDeviation(t0,
x[n,i-1],
dt[i-1])
y[n,i] = (x[n,i] - process.expectation(0,0,t1)) / process.stdDeviation(0,0,t1)
for j in range(12):
zero_bonds[n, i, j] = model.zerobond(t1+pillars[j],
t1,
y[n, i])
discount_factors = np.vectorize(t0_curve.discount)(time_grid)
npv_cube = np.zeros((N,len(date_grid), len(portfolio)))
for p in range(0,N):
for t in range(0, len(date_grid)):
date = date_grid[t]
ql.Settings.instance().setEvaluationDate(date)
ycDates = [date,
date + ql.Period(6, ql.Months)]
ycDates += [date + ql.Period(i,ql.Years) for i in range(1,11)]
yc = ql.DiscountCurve(ycDates,
zero_bonds[p, t, :],
ql.Actual365Fixed())
yc.enableExtrapolation()
hyts.linkTo(yc)
if euribor6m.isValidFixingDate(date):
fixing = euribor6m.fixing(date)
euribor6m.addFixing(date, fixing)
for i in range(len(portfolio)):
npv_cube[p, t, i] = portfolio[i][0].NPV()
ql.IndexManager.instance().clearHistories()
ql.Settings.instance().setEvaluationDate(today)
hyts.linkTo(yts)
# Calculate the discounted npvs
discounted_cube = np.zeros(npv_cube.shape)
for i in range(npv_cube.shape[2]):
discounted_cube[:,:,i] = npv_cube[:,:,i] * discount_factors
# Calculate the portfolio npv by netting all NPV
portfolio_npv = np.sum(npv_cube,axis=2)
discounted_npv = np.sum(discounted_cube, axis=2)
# Setup Default Curve
pd_dates = [today + ql.Period(i, ql.Years) for i in range(11)]
hzrates = [ hazard_rate * i for i in range(11) ]
pd_curve = ql.HazardRateCurve(pd_dates,hzrates,ql.Actual365Fixed())
pd_curve.enableExtrapolation()
# Calculation of the default probs
defaultProb_vec = np.vectorize(pd_curve.defaultProbability)
dPD = defaultProb_vec(time_grid[:-1], time_grid[1:])
# calculate expected exposure
dE = discounted_npv.copy()
dE[dE<0] = 0
dEE = np.sum(dE, axis=0)/N
dEEstd = np.std( dE / N, axis=0 )
# Calculation of the CVA
recovery = 0.4
CVA = (1-recovery) * np.sum(dEE[1:] * dPD)
return CVA, dEE, dEEstd, dPD
def worker_calc_cva( a ):
# return only CVA
return calc_cva( **a )[0]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import multiprocessing
import time
import os
import random
import string
import sys
from core.targets import analysis
from core.attack import start_attack
from core.alert import info
from core.alert import warn
from core.alert import error
from core.alert import write
from core.alert import messages
from core.log import sort_logs
from core.load_modules import load_all_modules
from core.load_modules import load_all_graphs
from core.args_loader import load_all_args
from core.args_loader import check_all_required
from core.update import _check
from core.compatible import _version_info
from core._time import now
def load():
write("\n\n")
# load libs
from core.color import finish
# load all modules in lib/brute, lib/scan, lib/graph
module_names = load_all_modules()
graph_names = load_all_graphs()
# Parse ARGVs
try:
parser, options, startup_update_flag = load_all_args(module_names, graph_names)
except SystemExit:
finish()
sys.exit(1)
# Filling Options
check_ranges = options.check_ranges
check_subdomains = options.check_subdomains
targets = options.targets
targets_list = options.targets_list
thread_number = options.thread_number + 1
thread_number_host = options.thread_number_host
log_in_file = options.log_in_file
scan_method = options.scan_method
exclude_method = options.exclude_method
users = options.users
users_list = options.users_list
passwds = options.passwds
passwds_list = options.passwds_list
timeout_sec = options.timeout_sec
ports = options.ports
time_sleep = options.time_sleep
language = options.language
verbose_level = options.verbose_level
show_version = options.show_version
check_update = options.check_update
socks_proxy = options.socks_proxy
retries = options.retries
graph_flag = options.graph_flag
help_menu_flag = options.help_menu_flag
ping_flag = options.ping_flag
methods_args = options.methods_args
method_args_list = options.method_args_list
wizard_mode = options.wizard_mode
profile = options.profile
start_api = options.start_api
api_host = options.api_host
api_port = options.api_port
api_debug_mode = options.api_debug_mode
api_access_key = options.api_access_key
api_client_white_list = options.api_client_white_list
api_client_white_list_ips = options.api_client_white_list_ips
api_access_log = options.api_access_log
api_access_log_filename = options.api_access_log_filename
# Checking Requirements
(targets, targets_list, thread_number, thread_number_host,
log_in_file, scan_method, exclude_method, users, users_list,
passwds, passwds_list, timeout_sec, ports, parser, module_names, language, verbose_level, show_version,
check_update, socks_proxy, retries, graph_flag, help_menu_flag, methods_args, method_args_list, wizard_mode,
profile, start_api, api_host, api_port, api_debug_mode, api_access_key, api_client_white_list,
api_client_white_list_ips, api_access_log, api_access_log_filename) = \
check_all_required(
targets, targets_list, thread_number, thread_number_host,
log_in_file, scan_method, exclude_method, users, users_list,
passwds, passwds_list, timeout_sec, ports, parser, module_names, language, verbose_level, show_version,
check_update, socks_proxy, retries, graph_flag, help_menu_flag, methods_args, method_args_list, wizard_mode,
profile, start_api, api_host, api_port, api_debug_mode, api_access_key, api_client_white_list,
api_client_white_list_ips, api_access_log, api_access_log_filename
)
info(messages(language, 0))
# check for update
if startup_update_flag:
__version__, __code_name__ = _version_info()
_check(__version__, __code_name__, language, socks_proxy)
info(messages(language, 96).format(len(load_all_modules()) - 1 + len(load_all_graphs())))
suff = now(model="%Y_%m_%d_%H_%M_%S") + "".join(random.choice(string.ascii_lowercase) for x in
range(10))
subs_temp = "tmp/subs_temp_" + suff
range_temp = "tmp/ranges_" + suff
total_targets = -1
for total_targets, _ in enumerate(
analysis(targets, check_ranges, check_subdomains, subs_temp, range_temp, log_in_file, time_sleep,
language, verbose_level, retries, socks_proxy, True)):
pass
total_targets += 1
total_targets = total_targets * len(scan_method)
try:
os.remove(range_temp)
except:
pass
range_temp = "tmp/ranges_" + suff
targets = analysis(targets, check_ranges, check_subdomains, subs_temp, range_temp, log_in_file, time_sleep,
language, verbose_level, retries, socks_proxy, False)
trying = 0
scan_id = "".join(random.choice("0123456789abcdef") for x in range(32))
scan_cmd = " ".join(sys.argv)
for target in targets:
for sm in scan_method:
trying += 1
p = multiprocessing.Process(target=start_attack, args=(
str(target).rsplit()[0], trying, total_targets, sm, users, passwds, timeout_sec, thread_number,
ports, log_in_file, time_sleep, language, verbose_level, socks_proxy, retries, ping_flag, methods_args,
scan_id, scan_cmd))
p.name = str(target) + "->" + sm
p.start()
while 1:
n = 0
processes = multiprocessing.active_children()
for process in processes:
if process.is_alive():
n += 1
else:
processes.remove(process)
if n >= thread_number_host:
time.sleep(0.01)
else:
break
_waiting_for = 0
while 1:
try:
exitflag = True
if len(multiprocessing.active_children()) is not 0:
exitflag = False
_waiting_for += 1
if _waiting_for > 3000:
_waiting_for = 0
info(messages(language, 138).format(", ".join([p.name for p in multiprocessing.active_children()])))
time.sleep(0.01)
if exitflag:
break
except KeyboardInterrupt:
for process in multiprocessing.active_children():
process.terminate()
break
info(messages(language, 42))
os.remove(subs_temp)
os.remove(range_temp)
info(messages(language, 43))
sort_logs(log_in_file, language, graph_flag, scan_id, scan_cmd, verbose_level, 0, profile, scan_method, ports)
write("\n")
info(messages(language, 44))
write("\n\n")
finish()
|
from setuptools import setup, find_packages
with open("requirements.txt", 'r') as file:
libs = file.readlines()
setup(
name = "image_toolbox",
version = "0.0.1",
author = "Kareem Janou",
description = ("Collection of usefull tools, that can make the developement of neural networks easier."),
license = "BSD",
keywords = "ML tools AI CNN pytorch",
packages=find_packages(),
install_requires= libs,
classifiers=[
"Development Status :: 1 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.8',
],
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# update_backscatter.py, Angeline G. Burrell (AGB), UoL
#
# Comments: Update the beam's groundscatter flag, calculate the virtual height
# and propagation path, determine the origin field-of-view, update
# the elevation.
#-----------------------------------------------------------------------------
"""
update_backscatter.py:
Routines to update the groundscatter and elevation angle, as well as determine
the virtual height, hop, and origin field-of-view for each backscatter point.
Functions
------------------------------------------------------------------------------
assign_region ionosphere region based on virtual height
test_propagation test propgation against reality
select_alt_groups determine altitude limits for range gate
get_beam load beams from list or pointer
calc_distance calculate slant range
select_beam_groundscatter filter to select groundscatter data
calc_frac_points calculate precentage of groundscatter
update_bs_w_scan update propagation parameters, 1 > beam
update_beam_fit update beam data
update_backscatter update propagation parameters, one beam
beam_ut_struct_test test for continuity in UT across beams
------------------------------------------------------------------------------
Author: Angeline G. Burrell (AGB)
Date: January 15, 2015
Inst: University of Leicester (UoL)
"""
import numpy as np
from scipy import constants as scicon
from scipy import stats as stats
from scipy import optimize as optimize
from scipy import signal as scisig
import datetime as dt
def assign_region(vheight, region_hmax={"D":115.0,"E":150.0,"F":900.0},
region_hmin={"D":75.0,"E":115.0,"F":150.0}, case="upper"):
region = ""
rpad = {"D":0.0, "E":0.0, "F":1.0}
for rr in region_hmax.keys():
if region_hmin[rr] <= vheight and vheight < region_hmax[rr] + rpad[rr]:
region = rr.lower() if case is "lower" else rr
return region
#---------------------------------------------------------------------------
def test_propagation(hop, vheight, dist,
region_hmax={"D":115.0,"E":150.0,"F":900.0},
region_hmin={"D":75.0,"E":115.0,"F":150.0}):
good = True
if region_hmax.has_key("D") and vheight <= region_hmax["D"]:
if hop > 0.5 or dist > 500.0: good = False
elif region_hmax.has_key("E") and vheight <= region_hmax["E"]:
if hop < 1.5 and dist > 900.0: good = False
return good
#---------------------------------------------------------------------------
def select_alt_groups(gate, vheight, rmin, rmax, vh_box, min_pnts=3):
def gaussian(x, *p):
A, mu, sigma = p
return A * np.exp(-(x - mu)**2 / (2. * sigma**2))
# Initialize output
vh_mins = list()
vh_maxs = list()
vh_peaks = list()
# Create a histogram of the number of observations at each virtual height
bnum = int((rmax-rmin) / (vh_box * 0.25))
hnum, hbin = np.histogram(vheight, bnum if bnum > 10 else 10, (rmin,rmax))
# Find the maxima in the histogram
hmax = scisig.argrelmax(hnum, order=2)[0]
# Since the signal routine won't be able to identify a maxima if two bins
# next to each other have the same value, use the global maximum if no
# local maxima were identified
if len(hmax) == 0 and max(hnum) > min_pnts:
hmax = np.array([list(hnum).index(max(hnum))])
# Consider each maxima seperately or, if none could be found, set limits
# using the suggested width.
tmin = np.nanmin(vheight)
tmax = np.nanmax(vheight)
if len(hmax) == 0:
if np.isnan(tmin) or np.isnan(tmax):
return vh_mins, vh_maxs
vnum = np.ceil((tmax - tmin) / vh_box)
vmin = (tmax - tmin) / vnum + tmin - vh_box
vh_mins = [vmin + n * vh_box for n in np.arange(vnum)]
vh_maxs = [n + vh_box for n in vh_mins]
else:
# For each maxima, fit a Gaussian
param = [0.0, 0.0, vh_box * 0.5]
cbin = (hbin[:-1] + hbin[1:]) / 2.0
hpeak = {hnum[ih]:ih for ih in hmax}
for hh in sorted(hpeak.keys(), reverse=True):
ih = hpeak[hh]
param[0] = hh
param[1] = cbin[ih]
try:
coeff, var = optimize.curve_fit(gaussian, cbin, hnum, p0=param)
# Evaluate for np.nan in coefficients
try:
np.isnan(coeff).tolist().index(True)
except:
# Get the 3 sigma limits
vmin = coeff[1] - 3.0 * coeff[2]
if vmin < rmin:
vmin = rmin
vmax = coeff[1] + 3.0 * coeff[2]
if vmax > rmax:
vmax = rmax
# Get the 2 sigma limits
vlow = coeff[1] - 2.0 * coeff[2]
if vlow < rmin:
vlow = rmin
vhigh = coeff[1] + 2.0 * coeff[2]
if vhigh > rmax:
vhigh = rmax
# If the fitted curve does not include the detected peak
# within a 2 sigma limit, throw out this fit.
if cbin[ih] < vlow or cbin[ih] > vhigh:
coeff = list()
else:
# To allow secondary peaks to be fitted, remove this
# peak from consideration
hnum = [hnum[ic] if cc < vmin or cc >= vmax else 0
for ic,cc in enumerate(cbin)]
# Save the initial peak boundaries
vh_mins.append(vmin)
vh_maxs.append(vmax)
vh_peaks.append(coeff[1])
except:
pass
# Evaluate the current limits to see if they overlap other limits
# or to see if there are gaps. Re-order the limits to start at the
# lowest and end at the highest. If no limits were found, set them.
if len(vh_maxs) == 0:
vnum = np.ceil((tmax - tmin) / vh_box)
vmin = (tmax - tmin) / vnum + tmin - vh_box
vh_mins = [vmin + n * vh_box for n in np.arange(vnum)]
vh_maxs = [n + vh_box if n + vh_box < rmax else rmax
for n in vh_mins]
for n,vmin in enumerate(vh_mins):
if vmin < rmin:
vh_mins[n] = rmin
else:
break
else:
new_min = list()
new_max = list()
new_peak = list()
priority = list() # Low number means high priority to keep limits
# If there are points that fall below the lower limit, add more
# regions to include these points.
if min(vh_mins) > tmin:
vmax = min(vh_mins)
vnum = round((vmax - tmin) / vh_box)
if vnum == 0.0:
# The outlying points are close enough that the lower limit
# should be expanded
imin = vh_mins.index(min(vh_mins))
vh_mins[imin] = np.floor(tmin)
if vh_mins[imin] < rmin:
vh_mins[imin] = rmin
else:
vspan = (vmax - tmin) / vnum
for n in np.arange(vnum):
nmin = tmin + n * vspan
if nmin < rmin:
nmin = rmin
new_min.append(nmin)
new_max.append(tmin + (n + 1.0) * vspan)
new_peak.append(tmin + (n + 0.5) * vspan)
priority.append(len(vh_mins) + len(new_min))
# Sort the Gaussian limits by minimum virtual height and cycle
# through them.
for vmin in sorted(vh_mins):
iv = vh_mins.index(vmin)
if len(new_min) > 0:
# Test for overlaps or gaps with the previous height window
if new_max[-1] >= vh_peaks[iv] or vmin <= new_peak[-1]:
# There is a significant overlap between the two regions
if priority[-1] < iv:
# Adjust the current boundaries
vmin = new_max[-1]
else:
# Adjust the previous boundaries
new_max[-1] = vmin
# If this adjustment places the previous maximum
# at or below the previous minimum, remove that
# division
if new_max[-1] <= new_min[-1]:
new_max.pop()
new_min.pop()
new_peak.pop()
priority.pop()
elif new_max[-1] < vmin:
# There is a gap between the two windows. Construct
# bridging window(s) before adding the current max and
# min to the list.
bmin = new_max[-1]
bmax = vmin
vnum = round((bmax - bmin) / vh_box)
if vnum == 0.0:
# The outlying points are close enough that the
# last upper limit should be expanded
new_max[-1] = vmin
else:
vspan = (bmax - bmin) / vnum
for n in np.arange(vnum):
new_min.append(bmin + n * vspan)
new_max.append(bmin + (n + 1.0) * vspan)
new_peak.append(bmin + (n + 0.5) * vspan)
priority.append(len(vh_mins) + len(new_min))
# Now append the current window, if it is wide enough to
# be sensible
if vmin < vh_maxs[iv]:
new_min.append(vmin)
new_max.append(vh_maxs[iv])
new_peak.append(vh_peaks[iv])
priority.append(iv)
# If there are points that fall above the upper limit, add more
# regions to include these points.
if len(new_max) == 0 or max(new_max) < tmax:
vmin = max(new_max)
vnum = round((tmax - vmin) / vh_box)
if vnum == 0.0:
# The outlying points are close enough that the upper limit
# should be expanded
imax = new_max.index(max(new_max))
new_max[imax] = np.ceil(tmax)
if new_max[imax] > rmax:
new_max[imax] = rmax
else:
vspan = (tmax - vmin) / vnum
for n in np.arange(vnum):
nmax = vmin + (n + 1.0) * vspan
if nmax > rmax:
nmax = rmax
new_min.append(vmin + n * vspan)
new_max.append(rmax)
new_peak.append(vmin + (n + 0.5) * vspan)
priority.append(len(vh_mins) + len(new_min))
# Rename the output
vh_mins = new_min
vh_maxs = new_max
# Return the limits
return vh_mins, vh_maxs
#---------------------------------------------------------------------------
def get_beam(radar_beams, nbeams):
"""Define a routine to load the beams from either a list/np.array or
pointer
Parameters
------------
radar_beams : (list, numpy array, or class `sdio.radDataTypes.radDataPtr`)
Object containing the radar beam data
nbeams : (int)
Number of beams returned before this beam
Returns
--------
beam : (class `sdio.radDataTypes.radDataBeam` or NoneType)
Beam containing radar data or None, if no data is available
nbeams : (int)
Number of beams retrieved from radar_beams, including this beam
"""
import davitpy.pydarn.sdio as sdio
if((isinstance(radar_beams, list) or isinstance(radar_beams, np.ndarray))
and nbeams < len(radar_beams)):
beam = radar_beams[nbeams]
nbeams += 1
elif isinstance(radar_beams, sdio.radDataTypes.radDataPtr):
beam = radar_beams.readRec()
nbeams += 1
else:
beam = None
return beam, nbeams
#----------------------------------------------------------------------------
def calc_distance(beam, rg_attr="slist", dist_units="km", hop=.5):
from get_sd_data import Beam
#---------------------------------
# Check the input
estr = None
if not isinstance(beam, Beam):
estr = 'the beam must be a beamData class'
elif not isinstance(rg_attr, str) or not hasattr(beam.fit, rg_attr):
estr = 'no range gate attribute [{:}]'.format(rg_attr)
elif dist_units is not "km" and dist_units is not "m":
estr = 'unknown units for distance [{:}]'.format(dist_units)
elif not isinstance(hop, float) and hop > 0.0 and hop % 0.5 == 0.0:
estr = 'unknown hop number [{:}]'.format(hop)
else:
# Load the range gate data
try:
rg = getattr(beam, rg_attr)
if not isinstance(rg, list) or len(rg) == 0:
estr = 'unable to load range gate'
except:
estr = 'unable to load range gate'
#---------------------------------------------------------
# Convert from range gates to distance or exit with error
if estr is None:
# Determine the number of reflection/refraction points along the
# propagation path
bounces = 2.0 * hop
# Determine the unit conversion
units = 1000.0 if dist_units is "m" else 1.0
# Calculate the distance
dist = 5.0e-10 * scicon.c * units * (np.array(rg) * beam.prm.smsep
+ beam.prm.lagfr) / bounces
else:
logging.error(estr)
dist = None
return dist
#---------------------------------------------------------------------------
def select_beam_groundscatter(beam, dist, min_rg=10, max_rg=76, rg_box=5,
max_p=5.0, max_v=30.0, max_w=90.0, gs_tol=.5,
nmin=5):
"""A routine to select groundscatter data. Currently uses a range gate
limit where all data beyond the maximum range gate is rejected, all
data with 0.5 hop distances closer than 160 km are rejected, and all points
closer than the minimum range gate that have a power greater than the
specified power maximum are also rejected. One these requirements have
been met, the data must have positive power, and have the groundscatter
flag set.
Parameters
------------
beam : (class beamData)
An object with radar data for a certain beam, channel, and radar
dist : (list or np.array)
List of slant path (radar to reflection point) distances in km
min_rg : (int)
Minimum range gate to look for groundscatter with any power level
(default=10)
max_rg : (int)
Maximum range gate to look for groundscatter with any power level
(default=76)
rg_box : (int)
Number of range gates to search above and below the range gate
specified by rg_index. (default=10)
max_p : (float)
Maximum power to allow at range gates closer than the minimum range
gate (default=5 dB)
max_v : (float)
Maximum velocity to allow at range gates closer than the minimum range
gate (default=30 m/s)
max_w : (float)
Maximum spectral width to allow at range gates closer than the minimum
rangegate (default=90 m/s)
gs_tol : (float)
Minimum fraction of points within a range gate box that should be
groundscatter if this point is to actually be considered groundscatter.
(default=0.5)
nmin : (int)
Minimum number of points that must bepresent within a range gate box to
consider the backscatter anything other than noise. (default=3)
Returns
------------
gnd_index : (list)
List of indices corresponding to selected groundscatter data in the
input beam (eg slist, p_l, etc.)
If there is an input error, exits with an exception
"""
from get_sd_data import Beam
#---------------------
# Check input
assert isinstance(beam, Beam), \
logging.error("beam is not a beamData object")
assert((isinstance(dist, list) or isinstance(dist, np.ndarray))
and len(dist) == len(beam.fit.slist)), \
logging.error("distance list does not match this beam")
if isinstance(min_rg, float):
min_rg = int(min_rg)
assert isinstance(min_rg, int), \
logging.error("min_rg is not an integer")
if isinstance(max_rg, float):
max_rg = int(max_rg)
assert isinstance(max_rg, int), \
logging.error("max_rg is not an integer")
if isinstance(rg_box, float):
rg_box = int(rg_box)
assert(isinstance(rg_box, int) and rg_box > 0), \
logging.error("rg_box is not a positive integer")
if isinstance(max_p, int):
max_p = float(max_p)
assert isinstance(max_p, float), \
logging.error("maximum power is not a float")
if isinstance(max_v, int):
max_v = float(max_v)
assert isinstance(max_v, float), \
logging.error("maximum velocity is not a float")
if isinstance(max_w, int):
max_w = float(max_w)
assert isinstance(max_w, float), \
logging.error("maximum spectral width is not a float")
assert(isinstance(gs_tol, float) and gs_tol >= 0.0 and gs_tol <= 1.0), \
logging.error("gs_tol is not a positive fraction")
if isinstance(nmin, float):
nmin = int(nmin)
assert(isinstance(nmin, int) and nmin > 0), \
logging.error("rg_box is not a positive integer")
#--------------------------------------------------------------------
# Identify all instances that are flagged as ground scatter and have
# appropriate power fits based on their location
def isgroundscatter(rg, dist, p_l, p_s, sd_gflg):
"""A routine to apply the logic that states whether or not a point is
groundscatter or not, rejecting groundscatter points that are
ambiguous
Parameters
-----------
rg : (int)
Range gate
dist : (float)
Slant path distance to from radar to reflection point (km)
p_l : (float)
Power determined using exponential fit (dB)
p_s : (float)
Power determined using Gaussian fit (dB)
sd_gflg : (int)
SuperDARN groundscatter flag
Returns
---------
gflg : (boolean)
New groundscatter flag
"""
gflg = False
# To be groundscatter, the point must have been identified by the
# SuperDARN routine (which uses velocity and spectral width to flag
# all points that are most likely not ionospheric scatter) and have
# successful exponential and Gaussain power fits. The distance
# must also be greater than 78 km from the radar, since this is the
# smallest imaginable distance that groundscatter could possibly occur
# at (yeilds a virtual height of 110 km for an elevation angle of 45
# deg)
if sd_gflg == 1 and p_l >= 0.0 and p_s >= 0.0 and dist > 78.0:
# Test the nearby range gates to ensure the power is not too high.
# This will remove slow moving ionospheric scatter
if rg < min_rg:
if p_l <= max_p and p_s <= max_p:
gflg = True
else:
gflg = True
return gflg
# END isgroundscatter
gi = [i for i,s in enumerate(beam.fit.slist)
if(s <= max_rg and isgroundscatter(s, dist[i], beam.fit.p_l[i],
beam.fit.p_s[i],
beam.fit.gflg[i]))]
#--------------------------------------------------------------------------
# Ensure that the flagged groundscatter is not mislabeled by testing to see
# if it is an isolated point surrounded by ionospheric scatter or not.
gnd_index = list()
for i in gi:
gs_frac, npnts = calc_frac_points(beam, "slist", gi, i, box=rg_box,
dat_min=0, dat_max=beam.prm.nrang)
if gs_frac >= gs_tol and npnts >= nmin:
gnd_index.append(i)
return(gnd_index)
#----------------------------------------------------------------------
def calc_frac_points(beam, dat_attr, dat_index, central_index, box,
dat_min=None, dat_max=None):
"""Calculate the fraction of points within a certain distance about a
specified range gate are groundscatter.
Parameters
------------
beam : (class beamData)
An object with radar data for a certain beam, channel, and radar
dat_attr : (str)
Attribute of data type
dat_index : (list of int)
A list containing the indexes of acceptable data points within the
specified beam.fit attribute list
central_index : (int)
The index of the desired data point to search about.
box : (float or int)
Size of to data box to search above and below the central data value
specified by the central_index. This must be in units of the specified
data.
dat_min : (float or int)
Lowest possible value of the data (eg 0 for range gates). (default=None)
dat_max : (float or int)
Highest possible value of the data (eg 75 for range gates at han).
(default=None)
Returns
----------
frac : (float)
A number between 0.0 and 1.0, indicating the fraction of points in the
specified area that are acceptable according to the dat_index list.
npnts : (int)
Total number of observations in the specified box.
If there is an input error, exits with an exception
"""
from get_sd_data import Beam
#----------------
# Check input
assert isinstance(beam, Beam), \
logging.error("beam is not a beamData object")
assert isinstance(dat_attr, str) and hasattr(beam.fit, dat_attr), \
logging.error("beam does not contain attribute {:}".format(dat_attr))
assert isinstance(dat_index, list) and isinstance(dat_index[0], int), \
logging.error("dat_index is not a list of integers")
assert box > 0, logging.error("box is not positive")
assert isinstance(dat_min, type(box)) or dat_min is None, \
logging.error("dat_min is of a different type is suspect")
assert isinstance(dat_max, type(box)) or dat_max is None, \
logging.error("dat_max is of a different type is suspect")
# Get the data list and ensure there is a value to search about
data = getattr(beam, dat_attr)
assert isinstance(central_index, int) and central_index < len(data), \
logging.error("no value for central_index in {:s}".format(dat_attr))
#-------------------------------------------------------------------------
# Set evaluation variables, restraining range gate box to realistic values
dmin = data[central_index] - box
dmax = data[central_index] + box
if dat_min is not None and dmin < dat_min:
dmin = dat_min
if dat_max is not None and dmax > dat_max:
dinc = 1 if isinstance(dat_max, int) else 1.0
dmax = dat_max + dinc
#---------------------
# Initialize output
frac = 0.0
npnts = 0
#-----------------------------------------------------------------------
# Cycle through the range gates, updating the total number of points and
# total number of groundscatter ponts
for i,d in enumerate(data):
if d >= dmin and d < dmax:
npnts += 1
try:
dat_index.index(i)
frac += 1.0
except Exception:
pass
if npnts > 0 and frac > 0.0:
frac /= float(npnts)
return(frac, npnts)
#---------------------------------------------------------------------------
def update_bs_w_scan(scan, hard, min_pnts=3,
region_hmax={"D":115.0,"E":150.0,"F":900.0},
region_hmin={"D":75.0,"E":115.0,"F":150.0},
rg_box=[2,5,10,20], rg_max=[5,25,40,76],
vh_box=[50.0,50.0,50.0,150.0], max_hop=3.0, tdiff=None,
tdiff_args=list(), tdiff_e=None, tdiff_e_args=list(),
ptest=True, strict_gs=False, bmaz_e=0.0, boresite_e=0.0,
ix_e=0.0, iy_e=0.0, iz_e=0.0, step=6):
"""Updates the propagation path, elevation, backscatter type, structure
flag, and origin field-of-view (FoV) for all backscatter observations in
each beam for a scan of data. A full scan is not necessary, but if the
number of beams is less than the specified minimum, a less rigerous
evaluation method is used.
Parameters
-------------
scan : (list or np.array)
A list of beamData class objects, representing a scan across the
radar's field-of-view (as performed in most common operational modes).
hard : (class `pydarn.radar.radStruct.site`)
Radar hardware data for this scan
min_pnts : (int)
The minimum number of points necessary to perform certain range gate
or beam specific evaluations. (default=3)
region_hmax : (dict)
Maximum virtual heights allowed in each ionospheric layer.
(default={"D":115.0,"E":150.0,"F":900.0})
region_hmin : (dict)
Minimum virtual heights allowed in each ionospheric layer.
(default={"D":75.0,"E":115.0,"F":150.0})
rg_box : (list or np.array of int)
The total number of range gates to include when examining the elevation
angle across all beams. (default=[2,5,10,20])
vh_box : (list or np.array of float)
The total width of the altitude box to consider when examining the
elevation angle across all beams at a given range gate.
(default=[50.0,50.0,50.0,150.0])
max_hop : (list or np.array of float)
The maximum hop to consider for the range gate and height criteria
specified by each list element in rg_box, srg_box, vh_box, and svh_box.
(default=[3.0])
tdiff : (function or NoneType)
A function to retrieve tdiff values (in microsec) using the radar ID
number, current datetime, and transmisson frequency as input.
Additional inputs may be specified using tdiff_args. Example:
def get_tdiff(stid, time, tfreq, filename) { do things } return tdiff
tdiff=get_tdiff, tdiff_args=["tdiff_file"]
(default=None)
tdiff_args : (list)
A list specifying any arguements other than radar, time, and
transmission frequency to run the specified tdiff function.
(default=list())
tdiff_e : (function or NoneType)
A function to retrieve tdiff error values (in microsec) using the radar
ID number, current datetime, and transmisson frequency as input.
Additionalinputs may be specified using tdiff_e_args. Example:
def get_tdiffe(stid, time, tfreq, filename) { do things } return tdiffe
tdiff_e=get_tdiffe, tdiff_e_args=["tdiff_file"]
(default=None)
tdiff_e_args : (list)
A list specifying any arguements other than radar, time, and
transmission frequency to run the specified tdiff_e function.
(default=list())
ptest : (boolian)
Perform test to see if propagation modes are realistic? (default=True)
strict_gs : (boolian)
Remove indeterminately flagged backscatter (default=False)
bmaz_e : (float)
Error in beam azimuth in degrees (default=0.0)
boresite_e : (float)
Error in the boresite location in degrees (default=0.0)
ix_e : (float)
Error in the interferometer x coordinate in meters (default=0.0)
iy_e : (float)
Error in the interferometer y coordinate in meters (default=0.0)
iz_e : (float)
Error in the interferometer z coordinate in meters (default=0.0)
step : (int)
Integer denoting the number of processing steps to perform. This should
always be set to 6 (or greater) unless one wishes to reproduce the
demonstration plots in Burrell et al (2015). (default=6) The step
numbers coincide with those indicated in the paper:
1 or 2: Examine the elevation structure across each scan
3: Add assignments for points with realistic heights in only one FoV
4: Add assignments using single-beam elevation angle variations
5 or more: Test assignements for consistency along the scan.
Returns
---------
beams : (np.array)
An array of updated beamData class objects. These updated objects have
the following additional/updated attributes
beam.fit.fovelv : added : Accounts for adjusted tdiff and origin FoV
beam.fit.fovelv_e : added : elevation error
beam.fit.felv : added : Elevation angle assuming front FoV
beam.fit.felv_e : added : Elevation angle error assuming front FoV
beam.fit.belv : added : Elevation angle assuming rear FoV
beam.fit.belv_e : added : Elevation angle error assuming front FoV
beam.fit.vheight : added : virtual height of ionosphere in km
beam.fit.vheight_e : added : error in virtual height (km)
beam.fit.fvheight : added : virtual height assuming front FoV
beam.fit.fvheight_e : added : error in virtual height assuming front FoV
beam.fit.bvheight : added : virtual height assuming rear FoV
beam.fit.bvheight_e : added : error in virtual height assuming rear FoV
beam.fit.hop : added : Hop assuming the assigned origin FoV
beam.fit.fhop : added : Hop assuming the front FoV
beam.fit.bhop : added : Hop assuming the rear FoV
beam.fit.region : added : Region assuming the assigned origin FoV
beam.fit.fregion : added : Region assuming the front FoV
beam.fit.bregion : added : Region assuming the rear FoV
beam.fit.fovflg : added : Flag indicating origin FoV (1=front, -1=back,
0=indeterminate)
beam.fit.fovpast : added : Flag indicating past FoV assignments
beam.fit.gflg : updated : Flag indicating backscatter type
(1=ground, 0=ionospheric, -1=indeterminate)
beam.prm.tdiff : added : tdiff used in elevation (microsec)
beam.prm.tdiff_e : added : tdiff error (microsec)
"""
from get_sd_data import Beam
import davitpy.pydarn.radar as pyrad
max_std = 3.0 # This is the maximum standard deviation in degrees.
max_score = 3.0 # This is the maximum z-score. z = (x - mean(X)) / std(X)
fov_frac = 2.0 / 3.0
fov = {1:"front", -1:"back"}
near_rg = -1
#----------------------------------
# Test input
if(not ((isinstance(scan, list) or isinstance(scan, np.ndarray)) and
len(scan) > 0 and len(scan) <= hard.maxbeam and
isinstance(scan[0], Beam))
and not isinstance(scan, sdio.radDataTypes.radDataPtr)):
estr = 'need a list of beams or a radar data pointer with [1-'
estr = '{:s}{:d}] beams: length={:d}'.format(estr, hard.maxbeam,
len(scan))
logging.error(estr)
return None
if isinstance(min_pnts, float):
min_pnts = int(min_pnts)
if not isinstance(min_pnts, int) or min_pnts < 0:
logging.error('unknown point minimum [{:}]'.format(min_pnts))
return None
if not isinstance(region_hmin, dict) or min(region_hmin.values()) < 0.0:
estr = 'unknown minimum virtual heights [{:}]'.format(region_hmin)
logging.error(estr)
return None
if not isinstance(region_hmax, dict):
estr = 'unknown maximum virtual heights [{:}]'.format(region_hmax)
logging.error(estr)
return None
if((not isinstance(rg_box, list) and not isinstance(rg_box, np.ndarray))
or min(rg_box) < 1.0):
logging.error('bad FoV range gate box[{:}]'.format(rg_box))
return None
if((not isinstance(vh_box, list) and not isinstance(vh_box, np.ndarray))
or min(vh_box) < 0.0):
logging.error('bad FoV virtual height box [{:}]'.format(vh_box))
return None
#-------------------------------------------------------------------------
# Loading the beams into the output list, updating the distance,
# groundscatter flag, virtual height, and propogation path
beams = np.empty(shape=(hard.maxbeam,), dtype='O')
elvs = {"front":[list() for bi in range(hard.maxbeam)],
"back":[list() for bi in range(hard.maxbeam)]}
elv_errs = {"front":[list() for bi in range(hard.maxbeam)],
"back":[list() for bi in range(hard.maxbeam)]}
vheights = {"front":[list() for bi in range(hard.maxbeam)],
"back":[list() for bi in range(hard.maxbeam)]}
vherrs = {"front":[list() for bi in range(hard.maxbeam)],
"back":[list() for bi in range(hard.maxbeam)]}
hops = {"front":[list() for bi in range(hard.maxbeam)],
"back":[list() for bi in range(hard.maxbeam)]}
regions = {"front":[list() for bi in range(hard.maxbeam)],
"back":[list() for bi in range(hard.maxbeam)]}
bnum = 0
snum = 0
while scan is not None:
# Load beams from scan, accounting for different input types
if isinstance(scan, list) or isinstance(scan, np.ndarray):
if snum < len(scan):
beams[bnum] = scan[snum]
snum += 1
else:
scan = None
else:
try:
beams[bnum] = scan.readRec()
except:
estr = "{:s} INFO: empty data pointer".format(rn)
logging.info(estr)
scan = None
bnum += 1
# If a new beam was loaded, update the beam
if bnum > len(beams):
bnum = len(beams)
elif beams[bnum-1] is None:
bnum -= 1
else:
# Update the beam parameters
if tdiff is None:
beams[bnum-1].prm.tdiff = None
else:
args = [beams[bnum-1].stid, beams[bnum-1].time,
beams[bnum-1].prm.tfreq]
args.extend(tdiff_args)
beams[bnum-1].prm.tdiff = tdiff(*args)
if tdiff_e is None:
beams[bnum-1].prm.tdiff_e = None
else:
args = [beams[bnum-1].stid, beams[bnum-1].time,
beams[bnum-1].prm.tfreq]
args.extend(tdiff_e_args)
beams[bnum-1].prm.tdiff_e = tdiff_e(*args)
# Update the beam fit values
(beams[bnum-1], e, eerr, vh, verr, hh, rr,
nhard) = update_beam_fit(beams[bnum-1], hard=hard,
region_hmax=region_hmax,
region_hmin=region_hmin, max_hop=max_hop,
ptest=ptest, strict_gs=strict_gs,
bmaz_e=bmaz_e, boresite_e=boresite_e,
ix_e=ix_e, iy_e=iy_e, iz_e=iz_e)
if e is None or nhard is None:
beams[bnum-1] = None
bnum -= 1
else:
if near_rg < 0:
near_rg = ((500.0 / (5.0e-10 * scicon.c) -
beams[bnum-1].prm.lagfr)
/ beams[bnum-1].prm.smsep)
for ff in e.keys():
elvs[ff][bnum-1] = e[ff]
elv_errs[ff][bnum-1] = eerr[ff]
vheights[ff][bnum-1] = vh[ff]
vherrs[ff][bnum-1] = verr[ff]
hops[ff][bnum-1] = hh[ff]
regions[ff][bnum-1] = rr[ff]
if bnum == 0:
logging.error("unable to update any beams in this scan")
return None
if bnum < len(beams):
beams.resize(bnum)
#-------------------------------------------------------------------------
# To determine the FoV, evaluate the elevation variations across all beams
# for a range gate and virtual height band, considering each propagation
# path (region and hop) seperately.
min_inc = 0.5 * min(rg_box)
min_rg = int(min_inc)
max_rg = hard.maxgate if hard.maxgate < max(rg_max) else max(rg_max)
max_rg = int(np.ceil(max_rg - min_inc))
fovbelong = [[{"out":0, "in":0, "mix":0} for r in beams[bi].fit.slist]
for bi in range(bnum)]
fovpast = [[0 for r in beams[bi].fit.slist] for bi in range(bnum)]
fovflg = [[0 for r in beams[bi].fit.slist] for bi in range(bnum)]
fovstd = [[100.0 + max_std for r in beams[bi].fit.slist]
for bi in range(bnum)]
fovslope = [[0.01 for r in beams[bi].fit.slist] for bi in range(bnum)]
fovscore = [[max_score + 100.0 for r in beams[bi].fit.slist]
for bi in range(bnum)]
for r in np.arange(min_rg, max_rg + 1):
rgnum = 0
rgelv = {"front":list(), "back":list()}
rgvh = {"front":list(), "back":list()}
rghop = {"front":list(), "back":list()}
rgreg = {"front":list(), "back":list()}
rgbi = list()
rgsi = list()
rgrg = list()
ilim = 0
while ilim < len(rg_max) and r >= rg_max[ilim]:
ilim += 1
if ilim >= len(rg_max):
estr = "range gate [{:d}] is above the allowed maximum [".format(r)
logging.info("{:s}{:d}]".format(estr, rg_max[-1]))
continue
width = np.floor(0.5 * rg_box[ilim])
rmin = r - int(width)
rmin = rmin if rmin >= 0 else 0
rmax = int(r + int(width) + (rg_box[ilim] % 2.0))
rmax = (rmax if rmax <= hard.maxgate else
(hard.maxgate if hard.maxgate < max(rg_max) else max(rg_max)))
# For each beam, load the data for this range gate window
for bi in range(bnum):
b = beams[bi]
for ir in np.arange(rmin, rmax):
try:
si = b.fit.slist.index(ir)
except:
si = -1
# Only load data if an elevation has been calculated for
# at least one field-of-view
if si >= 0 and (not np.isnan(elvs["front"][bi][si]) or
not np.isnan(elvs["back"][bi][si])):
# Save the data for determining FoV if this value falls
# within the desired range
if ir >= rmin and ir < rmax:
rgbi.append(bi)
rgsi.append(si)
rgrg.append(ir)
goodpath = False
for ff in fov.values():
rgelv[ff].append(elvs[ff][bi][si])
rgvh[ff].append(vheights[ff][bi][si])
rghop[ff].append(hops[ff][bi][si])
rgreg[ff].append(regions[ff][bi][si])
if(not np.isnan(hops[ff][bi][si]) and
len(regions[ff][bi][si]) == 1):
goodpath = True
if goodpath:
rgnum += 1
if rgnum < min_pnts:
continue
rgbi = np.array(rgbi)
rgsi = np.array(rgsi)
rgrg = np.array(rgrg)
rgpath = set(["{:.1f}{:s}".format(rghop[ff][ii], reg)
for ii,reg in enumerate(rgreg[ff])
if len(reg) == 1 and not np.isnan(rghop[ff][ii])
for ff in fov.values()])
for ff in fov.values():
rgelv[ff] = np.array(rgelv[ff])
rgvh[ff] = np.array(rgvh[ff])
# Determine the standard deviation of the elevation for the observations
# at each virtual height at this range gate window and hop.
for pp in rgpath:
hop = float(pp[0:3])
reg = pp[3:4]
# Seperate this propagation path into virtual height groups and
# test the linear regression of the elevation angles
for ff in fov.keys():
itest = [it for it,fhop in enumerate(rghop[fov[ff]])
if fhop == hop and rgreg[fov[ff]][it] == reg]
if len(itest) < min_pnts:
estr = "insufficient points to determine virtual height "
estr = "{:s}limits in the {:s} field-".format(estr, fov[ff])
estr = "{:s}of-view for propagation path [".format(estr)
estr = "{:s}{:s}] at range gate [{:d}]".format(estr, pp, r)
logging.info(estr)
else:
# Establish the virtual height windows
vmins, vmaxs = select_alt_groups(rgrg[itest],
rgvh[fov[ff]][itest],
region_hmin[reg],
region_hmax[reg],
vh_box[ilim], min_pnts)
for iv,vmin in enumerate(vmins):
# Select the data for this height range
velv = list()
vbm = list()
vrg = list()
vih = list()
for ih,vh in enumerate(rgvh[fov[ff]][itest]):
if(not np.isnan(vh) and vh >= vmin and
vh < vmaxs[iv]):
velv.append(rgelv[fov[ff]][itest][ih])
vbm.append(rgbi[itest][ih])
vrg.append(rgrg[itest][ih])
vih.append(ih)
# See if there are enough beams at this height
if len(list(set(vbm))) < min_pnts:
estr = "insufficient beams to evaluate "
estr = "{:s}{:s} field-of-".format(estr, fov[ff])
estr = "{:s}view between [{:.0f}".format(estr, vmin)
estr = "{:s}-{:.0f} km] at ".format(estr, vmaxs[iv])
estr = "{:s}range gate {:d}".format(estr, r)
logging.info(estr)
else:
# Initialize evaluation statistics to bad values
line_std = max_std + 100.0
line_dev = [max_std + 100.0 for ee in velv]
# Get the linear regression of the elevation
# angles as a function of range gate. The slope
# of this line must be flat or negative.
# Aliasing will cause positive jumps, but these
# should not be present in all boxes, allowing
# data to be assigned at times when the aliasing
# jump is not present. A more robust method
# (such as RANSAC or Theil-Sen) was not used
# since the number of points available are small
try:
ecoeff = stats.linregress(vrg, velv)
except:
# If there are not enough points to
# perform a linear regression, assume a flat
# slope with an intercept given by the mean
ecoeff = [0.0, np.nanmean(velv)]
if not np.isnan(ecoeff[0]) and ecoeff[0] <= 0.0:
lval = np.array([ecoeff[1] + ecoeff[0]
* rr for rr in vrg])
ldev = lval - np.array(velv)
lstd = np.nanstd(ldev)
lscore = [abs(ss) for ss in stats.zscore(ldev)]
# Use the current and past z-scores to
# determine whether or not each point is
# well characterized by the linear
# regression
if lstd <= max_std:
for ih,bi in enumerate(vbm):
si = rgsi[itest][vih[ih]]
if(lscore[ih] <= max_score and
lstd <= max_std and
lscore[ih] < fovscore[bi][si]
and lstd <= fovstd[bi][si]):
# If the FoV is changing, record
# that this point also met the
# criteria for the other Fov
if fovflg[bi][si] != ff:
fovpast[bi][si] = fovflg[bi][si]
# Replace if the FoV criteria are
# better, regardless of the FoV
fovflg[bi][si] = ff
fovstd[bi][si] = lstd
fovslope[bi][si] = ecoeff[0]
fovscore[bi][si] = lscore[ih]
#--------------------------------------------------------------------------
# Assign FoV to points that have realistic elevation angles in only one
# FoV. Also evaluate points that don't have FoV flags due to insufficient
# data across the range gates. Evaluate elevation spread using a (possibly)
# expanded range gate window
inc_rg_box = 3.0
for bi in range(bnum):
if step < 3:
estr = "not testing backscatter unassigned after performing scan"
logging.info("{:s}evaluation".format(estr))
break
lelv = {"front":np.array(elvs["front"][bi]),
"back":np.array(elvs["back"][bi])}
lvh = {"front":np.array(vheights["front"][bi]),
"back":np.array(vheights["back"][bi])}
for si,ifov in enumerate(fovflg[bi]):
if np.isnan(lelv['front'][si]) and np.isnan(lelv['back'][si]):
continue
if ifov == 0:
rg = beams[bi].fit.slist[si]
# If this point is unassigned, there is only one realistic
# elevation, and aliasing is unlikely, assign the FoV with the
# realistic elevation
if(np.isnan(lelv['front'][si])
and not np.isnan(lelv['back'][si]) and rg < near_rg):
fovflg[bi][si] = -1
fovstd[bi][si] = 0.0
fovslope[bi][si] = 0.0
fovscore[bi][si] = 0.0
elif(not np.isnan(lelv['front'][si])
and np.isnan(lelv['back'][si]) and rg < near_rg):
fovflg[bi][si] = 1
fovstd[bi][si] = 0.0
fovslope[bi][si] = 0.0
fovscore[bi][si] = 0.0
else:
if step < 4:
estr = "not assigning backscatter by testing the single"
logging.info("{:s} beam variations".format(estr))
continue
# Examine the surrounding observations along the beam using
# an extended range gate window
#
# Differentiate by hop
ilim = 0
while(ilim < len(rg_max) and rg >= rg_max[ilim]):
ilim += 1
if ilim >= len(rg_max):
estr = "no guidelines provided for range gate "
logging.info("{:s}[{:d}]".format(estr, rg))
continue
rg_half = (0.5 * (rg_box[ilim] + inc_rg_box))
irg_half = int(np.floor(rg_half))
min_si = si - irg_half if si >= irg_half else 0
max_si = (si + irg_half if si + irg_half < hard.maxgate
else (hard.maxgate - 1
if hard.maxgate < max(rg_max)
else max(rg_max) - 1))
# Load the front and back elevations for this range gate
# and within the extended range gate window
for ff in fov.keys():
ihop = hops[fov[ff]][bi][si]
ireg = regions[fov[ff]][bi][si]
test_rg = beams[bi].fit.slist[min_si:max_si]
test_si = list()
ecoeff = list()
lstd = max_std + 100.0
lscore = max_score + 100.0
if not np.isnan(ihop) and len(ireg) == 1:
for ri,r in enumerate(test_rg):
rsi = min_si + ri
if(hops[fov[ff]][bi][rsi] == ihop and
regions[fov[ff]][bi][rsi] == ireg and
abs(rg - beams[bi].fit.slist[rsi])
<= rg_half):
test_si.append(rsi)
if len(test_si) < min_pnts:
# If there are not enough points to perform a
# comparison continue without assigning a FoV flag
if not np.isnan(ihop) and len(ireg) == 1:
estr = "not enough points to do single-beam "
estr = "{:s}test for the ".format(estr)
estr = "{:s}{:s} field-of".format(estr, fov[ff])
estr = "{:s}-view for hop [".format(estr)
estr = "{:s}{:.1f}{:s}".format(estr, ihop, ireg)
estr = "{:s}] beam [{:d}] ".format(estr, bi)
estr = "{:s}range gate [{:d}]".format(estr, rg)
logging.info(estr)
else:
test_rg = np.array(beams[bi].fit.slist)[test_si]
ri = test_si.index(si)
try:
ecoeff = stats.linregress(test_rg, \
lelv[fov[ff]][test_si])
except:
ecoeff = [0.0,
np.nanmean(lelv[fov[ff]][test_si])]
if ecoeff[0] <= 0.0:
lval = np.array([ecoeff[1] + ecoeff[0] * rr
for rr in test_rg])
ldev = lval - np.array(lelv[fov[ff]][test_si])
lstd = np.nanstd(ldev)
lscore = [abs(ss) for ss in stats.zscore(ldev)]
# Evaluate the standard deviations and the FoV
# of the surrounding points to determine the
# FoV for this point
if lstd <= max_std:
for ih,ti in enumerate(test_si):
if(lscore[ih] <= max_score and
lstd <= max_std and
lscore[ih] < fovscore[bi][si]
and lstd <= fovstd[bi][si]):
# If the FoV is changing, record
# that this point also met the
# criteria for the other Fov
if fovflg[bi][si] != ff:
fovpast[bi][si] = fovflg[bi][si]
# Replace if this new FoV
# criteria are better, regardless
# of whether or not the FoV changes
fovflg[bi][si] = ff
fovstd[bi][si] = lstd
fovslope[bi][si] = ecoeff[0]
fovscore[bi][si] = lscore[ih]
#--------------------------------------------------------------------------
# Evaluate the FoV flags, removing points that are surrounded by data
# assigned to the opposite FoV.
for r in np.arange(min_rg, max_rg + 1):
if step < 5:
estr = "not testing backscatter assignments with azimuthal "
logging.info("{:s}continuity".format(estr))
break
# Initialize the hop-dependent data
sihop = {ih:list() for ih in np.arange(0.5, max_hop + 0.5, 0.5)}
bihop = {ih:list() for ih in np.arange(0.5, max_hop + 0.5, 0.5)}
fovhop = {ih:list() for ih in np.arange(0.5, max_hop + 0.5, 0.5)}
reghop = {ih:list() for ih in np.arange(0.5, max_hop + 0.5, 0.5)}
min_range = hard.maxgate
max_range = 0
ilim = 0
# Calculate the range gate limits
while ilim < len(rg_max) and r >= rg_max[ilim]:
ilim += 1
width = np.floor(0.5 * (rg_box[ilim] + inc_rg_box))
rm = r - int(width)
rmin = rm if rm >= 0 else 0
if rmin < min_range:
min_range = rmin
rm = r + int(width + rg_box[ilim] % 2.0)
rmax = rm if rm <= hard.maxgate else hard.maxgate + 1
if rmax > max_range:
max_range = rmax
# For each beam in the maximum possible range gate window, gather the
# range gate, FoV flag, beam index, and range gate index for each hop
for bi in range(bnum):
b = beams[bi]
for ir in np.arange(min_range, max_range):
try:
si = b.fit.slist.index(ir)
except:
si = -1
# Save the data if a FoV flag has been found and the range
# gate limits are appropriate for the hop
if si >= 0 and fovflg[bi][si] != 0:
ihop = hops[fov[fovflg[bi][si]]][bi][si]
ireg = regions[fov[fovflg[bi][si]]][bi][si]
if(len(ireg) == 1 and not np.isnan(ihop) and ihop <= max_hop
and ir >= rmin and ir < rmax):
bihop[ihop].append(bi)
sihop[ihop].append(si)
fovhop[ihop].append(fovflg[bi][si])
reghop[ihop].append(ireg)
# Determine the fraction of each points in the front and back Fov for
# azimuthally constraints (beam limits) added to the previous limits.
# If there are an overwhelming number of points in one FoV, remove
# all FoV flags from the points in the other Fov.
for ihop in fovhop.keys():
for ireg in set(reghop[ihop]):
rind = [ii for ii,rr in enumerate(reghop[ihop]) if rr == ireg]
# If there are sufficient points, evaluate the data at this hop
if len(rind) > min_pnts:
# Evaluate the data in an azimuthal box
for bi in set(np.array(bihop[ihop])[rind]):
# Determine the azimuthal limits
bmnum = beams[bi].bmnum
bwidth = int(min_pnts * 0.75)
bmin = bmnum - bwidth if bmnum >= min_pnts else 0
if bmnum <= hard.maxbeam - bwidth:
bmax = bmnum + bwidth
else:
bmax = hard.maxbeam
ibeam = [ii for ii in rind
if(beams[bihop[ihop][ii]].bmnum >= bmin and
beams[bihop[ihop][ii]].bmnum < bmax)]
bad_fov = 0
good_fov = False
if len(ibeam) > min_pnts:
# Sum the points in this box
fn = sum([1 for ff in np.array(fovhop[ihop])[ibeam]
if ff == 1])
bn = sum([1 for ff in np.array(fovhop[ihop])[ibeam]
if ff == -1])
else:
fn = 0
bn = 0
if fn + bn > 0:
ffrac = float(fn) / float(fn + bn)
if ffrac >= fov_frac and bn > 0:
bad_fov = -1
good_fov = True
elif (1.0 - ffrac) >= fov_frac and fn > 0:
bad_fov = 1
good_fov = True
# Tag all points whose FoV are or are not consistent
# with the observed structure at this hop
for ff,ifov in enumerate(np.array(fovhop[ihop])[ibeam]):
ii = ibeam[ff]
si = sihop[ihop][ii]
ci = bihop[ihop][ii]
if good_fov:
if ifov != bad_fov:
# This point is associated with a structure
# that is predominantly the same FoV
fovbelong[ci][si]["in"] += 1
else:
# If this point is not associated with a
# structure that is predominately the same
# FoV and this is not the only FoV capable
# of producing a realistic elevation angle,
# flag this point as an outlier
ir = beams[ci].fit.slist[si]
if(not (np.isnan(elvs[fov[-ifov]][ci][si])
and ir < near_rg)):
fovbelong[ci][si]["out"] += 1
else:
fovbelong[ci][si]["mix"] += 1
# If any points have been flagged as outliers, remove or change their FoV
for bi in range(bnum):
# Break this loop if no continuity tests are desired
if step < 5:
break
for si,bdict in enumerate(fovbelong[bi]):
if bdict["out"] > 0 and bdict["in"] < bdict["out"] + bdict["mix"]:
# This point is an outlier in a structure with the opposite FoV.
# If this point fit the criteria for the other FoV in the past,
# assign that FoV. Otherwise remove any FoV assignment.
if bdict['out'] > bdict['mix'] and bdict['out'] > bdict['in']:
fovflg[bi][si] = fovpast[bi][si]
else:
fovflg[bi][si] = 0
fovpast[bi][si] = 0
estr = "field-of-view is not consistent with the observed "
estr = "{:s}structure at hop [{:.1f}".format(estr, ihop)
estr = "{:s}{:s}] beam [".format(estr, ireg)
estr = "{:s}{:d}] range gate [".format(estr, beams[bi].bmnum)
estr = "{:s}{:d}]".format(estr, beams[bi].fit.slist[si])
logging.info(estr)
#--------------------------------------------------------------------------
# Assign the appropriate virtual heights and elevation angles to each
# point based on their FoV. Also assign initial regions based on virtual
# height
for bi in range(bnum):
snum = len(beams[bi].fit.slist)
beams[bi].fit.region = ["" for si in range(snum)]
beams[bi].fit.hop = [np.nan for si in range(snum)]
beams[bi].fit.vheight = [np.nan for si in range(snum)]
beams[bi].fit.vheight_e = [np.nan for si in range(snum)]
beams[bi].fit.fovelv = [np.nan for si in range(snum)]
beams[bi].fit.fovelv_e = [np.nan for si in range(snum)]
beams[bi].fit.fovflg = fovflg[bi]
for si,ifov in enumerate(beams[bi].fit.fovflg):
if ifov == 0 or np.isnan(ifov):
# Default to front FoV if none was found
beams[bi].fit.fovelv[si] = elvs["front"][bi][si]
beams[bi].fit.vheight[si] = vheights["front"][bi][si]
beams[bi].fit.hop[si] = hops["front"][bi][si]
beams[bi].fit.region[si] = regions["front"][bi][si]
else:
# Assign the appropriate FoV
beams[bi].fit.region[si] = regions[fov[ifov]][bi][si]
beams[bi].fit.hop[si] = hops[fov[ifov]][bi][si]
beams[bi].fit.vheight[si] = vheights[fov[ifov]][bi][si]
beams[bi].fit.vheight_e[si] = vherrs[fov[ifov]][bi][si]
beams[bi].fit.fovelv_e[si] = elv_errs[fov[ifov]][bi][si]
beams[bi].fit.fovelv[si] = elvs[fov[ifov]][bi][si]
# Additional values returned for use in analysis and UT continuity test
beams[bi].fit.felv = elvs["front"][bi]
beams[bi].fit.felv_e = elv_errs["front"][bi]
beams[bi].fit.belv = elvs["back"][bi]
beams[bi].fit.belv_e = elv_errs["back"][bi]
beams[bi].fit.fvheight = vheights["front"][bi]
beams[bi].fit.fvheight_e = vherrs["front"][bi]
beams[bi].fit.bvheight = vheights["back"][bi]
beams[bi].fit.bvheight_e = vherrs["back"][bi]
beams[bi].fit.fhop = hops["front"][bi]
beams[bi].fit.bhop = hops["back"][bi]
beams[bi].fit.fregion = regions["front"][bi]
beams[bi].fit.bregion = regions["back"][bi]
beams[bi].fit.pastfov = fovpast[bi]
return beams
#-------------------------------------------------------------------------
def update_beam_fit(beam, hard=None,
region_hmax={"D":115.0,"E":150.0,"F":900.0},
region_hmin={"D":75.0,"E":115.0,"F":150.0}, max_hop=3.0,
ptest=True, strict_gs=False, bmaz_e=0.0, boresite_e=0.0,
ix_e=0.0, iy_e=0.0, iz_e=0.0):
"""Update the beam.fit and beam.prm class, updating and adding attributes
needed for common data analysis.
Currently the earth radius error and slant distance error have no update
option through this routine and are identically zero.
Parameters
------------
beam : (class `sdio.radDataTypes.beamData`)
Radar data for a specific beam
hard : (class `pydarn.radar.radStruct.site` or NoneType)
Hardware information for this radar. Will load if not supplied.
(default=None)
region_hmax : (dict)
Maximum virtual heights allowed in each ionospheric layer.
(default={"D":115.0,"E":150.0,"F":400.0})
region_hmin : (dict)
Minimum virtual heights allowed in each ionospheric layer.
(default={"D":75.0,"E":115.0,"F":150.0})
max_hop : (float)
The maximum allowable hop to be considered physical. (default=2.0)
ptest : (boolian)
Perform test to see if propagation modes are realistic? (default=True)
strict_gs : (boolian)
Remove indeterminately flagged backscatter (default=False)
bmaz_e : (float)
Error in beam azimuth in degrees (default=0.0)
boresite_e : (float)
Error in the boresite location in degrees (default=0.0)
ix_e : (float)
Error in the interferometer x coordinate in meters (default=0.0)
iy_e : (float)
Error in the interferometer y coordinate in meters (default=0.0)
iz_e : (float)
Error in the interferometer z coordinate in meters (default=0.0)
Returns
---------
return beam, elvs, elv_errs, vheights, vherrs, hops, regions, hard
beam : (class beamData)
Updated beamDAta class object. The beam as the following additional
or adjusted attributes:
beam.fit.gflg : updated : Flag indicating backscatter type
(1=ground, 0=ionospheric, -1=indeterminate)
beam.prm.tdiff : possibly updated : tdiff used in elevation (microsec)
beam.prm.tdiff_e : possibly updated : tdiff error (microsec)
elvs : (dict)
Elevation angles for the front "front" and rear "back" FoV
elv_errs : (dict)
Elevation angle errors for the front "front" and rear "back" FoV.
There is currently no method for calculating these errors from the
fit data, so np.nan will be returned in all cases.
vheights : (dict)
Virtual heights for the front "front" and rear "back" FoV
vherrs : (dict)
Virtual height errors for the front "front" and rear "back" FoV.
There is currently no method for calculating these errors from the
fit data, so np.nan will be returned in all cases.
hops : (dict)
Hops for the front "front" and rear "back" FoV
regions : (dict)
Ionospheric regions for the front "front" and rear "back" FoV
hard : (class `pydarn.radar.radStruct.site`)
Radar hardware data for this scan
"""
import davitpy.pydarn.sdio as sdio
import davitpy.pydarn.radar as pyrad
import davitpy.utils.geoPack as geo
import calc_elevation as ce
import calc_height as ch
#----------------------------------
# Test input
if not isinstance(region_hmin, dict) or min(region_hmin.values()) < 0.0:
estr = 'unknown minimum virtual heights [{:}]'.format(region_hmin)
logging.error(estr)
return beam, None, None, None, None, None, None, None
if not isinstance(region_hmax, dict):
estr = 'unknown maximum virtual heights [{:}]'.format(region_hmax)
logging.error(estr)
return beam, None, None, None, None, None, None, None
if isinstance(max_hop, int):
max_hop = float(max_hop)
if not isinstance(max_hop, float) or max_hop < 0.5:
logging.error('maximum hop must be a float greater than 0.5')
return beam, None, None, None, None, None, None, None
if beam is None or beam.fit.slist is None or len(beam.fit.slist) <= 0:
logging.warning("no fit data in beam at {:}".format(beam.time))
return beam, None, None, None, None, None, None, None
#-----------------------------------
# Initialize FoV dependent values
slist = getattr(beam, "slist")
elvs_aliased = {"front":[np.nan for s in slist],
"back":[np.nan for s in slist]}
elva_errs = {"front":[np.nan for s in slist],
"back":[np.nan for s in slist]}
elvs = {"front":[np.nan for s in slist], "back":[np.nan for s in slist]}
elv_errs = {"front":[np.nan for s in slist], "back":[np.nan for s in slist]}
vheights = {"front":[np.nan for s in slist], "back":[np.nan for s in slist]}
vheights_aliased = {"front":[np.nan for s in slist],
"back":[np.nan for s in slist]}
vheighta_errs = {"front":[np.nan for s in slist],
"back":[np.nan for s in slist]}
vherrs = {"front":[np.nan for s in slist], "back":[np.nan for s in slist]}
hops = {"front":[0.5 for s in slist], "back":[0.5 for s in slist]}
regions = {"front":["" for s in slist], "back":["" for s in slist]}
# Initialize local constants
vmin = min(region_hmin.values())
vmax = max(region_hmax.values())
#------------------------------------------------------------------------
# Load the radar hardware data and calculate hardware specific variables,
# if it hasn't been done already
if hard is None:
try:
hard = pyrad.site(radId=beam.stid, dt=beam.time)
except:
estr = "unable to load hardware data for radar "
estr = "{:s}{:d} at {:}".format(estr, beam.stid, beam.time)
logging.warning(estr)
return beam, elvs, elv_errs, vheights, vherrs, None, None, None
# Use the geodetic/geocentric conversion to get the terrestrial radius at
# the radar location (same in both coordinate systems)
(lat, lon, radius) = geo.geodToGeoc(hard.geolat, hard.geolon, False)
# Calculate the 0.5 hop distance and initialize the hop list
dlist = calc_distance(beam)
dist = {'front':np.array(dlist), "back":np.array(dlist)}
# Update the groundscatter flag (both distances are the same)
gflg = select_beam_groundscatter(beam, dist['front'], max_rg=hard.maxgate)
for i,g in enumerate(beam.fit.gflg):
if g == 1:
try:
gflg.index(i)
# If this is groundscatter, update the distance and the hop
hops['front'][i] = 1.0
hops['back'][i] = 1.0
dist['front'][i] *= 0.5
dist['back'][i] *= 0.5
except:
# This point was found not to be groundscatter. It is probably
# slow moving ionospheric backscatter, so treat it like
# ionospheric backscatter but change the flag to let the user
# know that it was not flagged by the initial ionospheric
# backscatter test
beam.fit.gflg[i] = -1
if strict_gs:
hops['front'][i] = np.nan
hops['back'][i] = np.nan
dist['front'][i] = np.nan
dist['back'][i] = np.nan
# Remove backscatter with negative power estimates
if beam.fit.p_l[i] < 0.0 or beam.fit.p_s[i] < 0.0:
hops['front'][i] = np.nan
hops['back'][i] = np.nan
dist['front'][i] = np.nan
dist['back'][i] = np.nan
# Calculate the elevation angles for the front and rear FoV, after
# initializing the beam parameters with the supplied tdiff
if not hasattr(beam.prm, "tdiff") or beam.prm.tdiff is None:
beam.prm.tdiff = hard.tdiff
if not hasattr(beam.prm, "tdiff_e") or beam.prm.tdiff_e is None:
beam.prm.tdiff_e = np.nan
for ff in ["front", "back"]:
# Calculate the elevation
try:
(elvs[ff], elv_errs[ff], pamb,
hard) = ce.calc_elv_w_err(beam, hard=hard, bmaz_e=bmaz_e,
boresite_e=boresite_e, ix_e=ix_e,
iy_e=iy_e, iz_e=iz_e,
tdiff=beam.prm.tdiff,
tdiff_e=beam.prm.tdiff_e, fov=ff)
(elvs_aliased[ff], elva_errs[ff], pamb,
hard) = ce.calc_elv_w_err(beam, hard=hard, bmaz_e=bmaz_e,
boresite_e=boresite_e, ix_e=ix_e,
iy_e=iy_e, iz_e=iz_e,
tdiff=beam.prm.tdiff, alias=1.0, fov=ff)
except:
estr = "can't get elevation for beam {:d} at {:}".format(beam.bmnum,
beam.time)
logging.info(estr)
elvs[ff] = None
if elvs[ff] is not None:
# Get the virtual height and virtual height error
vheights[ff], vherrs[ff] = \
ch.calc_virtual_height_w_err(beam, radius, elv=elvs[ff],
elv_e=elv_errs[ff], dist=dist[ff],
dist_e=[0.0 for dd in dist[ff]],
dist_units="km")
vheights_aliased[ff], vheighta_errs[ff] = \
ch.calc_virtual_height_w_err(beam, radius, elv=elvs_aliased[ff],
elv_e=elva_errs[ff], dist=dist[ff],
dist_e=[0.0 for dd in dist[ff]],
dist_units="km")
# Test the virtual height
for i,vh in enumerate(vheights[ff]):
if not np.isnan(vh) and vh < vmin:
# This height is too low. Replace it with a value corrected
# with a 2 pi alias or remove it from consideration for
# this FoV
if vheights_aliased[ff][i] < vmin:
elvs[ff][i] = elvs_aliased[ff][i]
elv_errs[ff][i] = elva_errs[ff][i]
vheights[ff][i] = vheights_aliased[ff][i]
vherrs[ff][i] = vheighta_errs[ff][i]
else:
elvs[ff][i] = np.nan
vheights[ff][i] = np.nan
vh = vheights[ff][i]
vhe = vherrs[ff][i]
if not np.isnan(vh):
hop = hops[ff][i]
dd = dlist[i] * 0.5 / hop
ghop = True
while vh > vmax and hop <= max_hop:
# This height is too high. Increase the hop
# number to acheive a realistic value
hop += 1.0
dd = dlist[i] * 0.5 / hop
vout = ch.calc_virtual_height_w_err(beam, radius, \
elv=[elvs[ff][i]], elv_e=[elv_errs[ff][i]],\
dist=[dd], dist_e=[0.0], dist_units="km")
vh = vout[0][0]
vhe = vout[1][0]
# Test the distance and hop to ensure that this
# mode is realistic
if ptest:
ghop = test_propagation(hop, vh, dd,
region_hmax=region_hmax,
region_hmin=region_hmin)
if not ghop:
# If this is not a valid propagation path, attempt to
# use the elevation angle with a 2pi alias added
ea = elvs_aliased[ff][i]
ee = elva_errs[ff][i]
vh = vheights_aliased[ff][i]
vhe = vheighta_errs[ff][i]
hop = 1.0 if beam.fit.gflg[i] == 1 else 0.5
dd = dlist[i] * 0.5 / hop
while vh > vmax and hop <= max_hop:
# This height is too high. Increase the hop
# number to acheive a realistic value
hop += 1.0
dd = dlist[i] * 0.5 / hop
vout = ch.calc_virtual_height_w_err(beam, radius, \
elv=[ea],
elv_e=[ee], \
dist=[dd], dist_e=[0.0], dist_units="km")
vh = vout[0][0]
vhe = vout[1][0]
if vh >= vmin:
ghop = test_propagation(hop, vh, dd,
region_hmax=region_hmax,
region_hmin=region_hmin)
else:
ea = elvs[ff][i]
ee = elv_errs[ff][i]
if hop <= max_hop and ghop:
# Update the lists
hops[ff][i] = hop
dist[ff][i] = dd
vheights[ff][i] = vh
vherrs[ff][i] = vhe
elvs[ff][i] = ea
elv_errs[ff][i] = ee
regions[ff][i] = assign_region(vh,
region_hmax=region_hmax,
region_hmin=region_hmin)
else:
# Unable to calculate a realistic virtual
# height within a sane number of hops, even accounting
# for possible aliasing
hops[ff][i] = np.nan
elvs[ff][i] = np.nan
vheights[ff][i] = np.nan
else:
hops[ff] = [np.nan for r in slist]
elvs[ff] = [np.nan for r in slist]
vheights[ff] = [np.nan for r in slist]
return beam, elvs, elv_errs, vheights, vherrs, hops, regions, hard
#---------------------------------------------------------------------------
def update_backscatter(rad_bms, min_pnts=3,
region_hmax={"D":115.0,"E":150.0,"F":900.0},
region_hmin={"D":75.0,"E":115.0,"F":150.0},
rg_box=[2,5,10,20], vh_box=[50.0,50.0,50.0,150.0],
max_rg=[5,25,40,76], max_hop=3.0,
ut_box=dt.timedelta(minutes=20.0), tdiff=None,
tdiff_args=list(), tdiff_e=None, tdiff_e_args=list(),
ptest=True, strict_gs=False, bmaz_e=0.0, boresite_e=0.0,
ix_e=0.0, iy_e=0.0, iz_e=0.0, step=6):
"""Updates the propagation path, elevation, backscatter type, and origin
field-of-view (FoV) for all backscatter observations in each beam. Scans
of data are used to determine the origin field-of-view (FoV), but a full
scan is not necessary, but if the number of beams is less than the specified
minimum, a less rigerous evaluation method is used.
Parameters
-------------
rad_bms : (list or class `pydarn.sdio.radDataTypes.radDataPtr`)
A list of or pointer to beamData class objects
min_pnts : (int)
The minimum number of points necessary to perform certain range gate
or beam specific evaluations. (default=3)
region_hmax : (dict)
Maximum virtual heights allowed in each ionospheric layer.
(default={"D":115.0,"E":150.0,"F":900.0})
region_hmin : (dict)
Minimum virtual heights allowed in each ionospheric layer.
(default={"D":75.0,"E":115.0,"F":150.0})
rg_box : (list of int)
The total number of range gates to include when examining the elevation
angle across all beams. (default=[2,5,10,20])
vh_box : (list of float)
The total width of the altitude box to consider when examining the
elevation angle across all beams at a given range gate.
(default=[50.0,50.0,50.0,150.0])
max_hop : (list of floats)
Maximum hop that the corresponding rg_box and vh_box values applies
to. (default=3.0)
ut_box : (class `dt.timedelta`)
Total width of universal time box to examine for backscatter FoV
continuity. (default=20.0 minutes)
tdiff : (function or NoneType)
A function to retrieve tdiff values (in microsec) using the radar ID
number current datetime, and transmisson frequency as input.
Additional inputs may be specified using tdiff_args. Example:
def get_tdiff(stid, time, tfreq, filename) { do things } return tdiff
tdiff=get_tdiff, tdiff_args=["tdiff_file"]
(default=None)
tdiff_args : (list)
A list specifying any arguements other than radar, time, and
transmission frequency to run the specified tdiff function.
(default=list())
tdiff_e : function or NoneType)
A function to retrieve tdiff error values (in microsec) using the radar
ID number, current datetime, and transmisson frequency as input.
Additional inputs may be specified using tdiff_e_args. Example:
def get_tdiffe(stud, time, tfreq, filename) { do things } return tdiffe
tdiff_e=get_tdiffe, tdiff_e_args=["tdiff_file"]
(default=None)
tdiff_e_args : (list)
A list specifying any arguements other than radar, time, and
transmission frequency to run the specified tdiff_e function.
(default=list())
ptest : (boolian)
Test to see if a propagation path is realistic (default=True)
strict_gs : (boolian)
Remove indeterminately flagged backscatter (default=False)
bmaz_e : (float)
Error in beam azimuth in degrees (default=0.0)
boresite_e : (float)
Error in the boresite location in degrees (default=0.0)
ix_e : (float)
Error in the interferometer x coordinate in meters (default=0.0)
iy_e : (float)
Error in the interferometer y coordinate in meters (default=0.0)
iz_e : (float)
Error in the interferometer z coordinate in meters (default=0.0)
step : (int)
Integer denoting the number of processing steps to perform. This should
always be set to 6 (or greater) unless one wishes to reproduce the
demonstration plots in Burrell et al (2015). (default=6) The step
numbers coincide with those indicated in the paper:
1 or 2: Examine the elevation structure across each scan
3: Add assignments for points with realistic heights in only one FoV
4: Add assignments using single-beam elevation angle variations
5 or more: Test assignements for consistency along the scan.
Returns
---------
beams : (list)
A dictionary of updated beamData class objects. The dictionary keys
correspond to the beam numbers, and contain np.arrays of beams sorted
by UT with the following additional/updated attributes
beam.fit.fovelv : added : Accounts for adjusted tdiff and origin FoV
beam.fit.fovelv_e : added : elevation error
beam.fit.felv : added : Elevation angle assuming front FoV
beam.fit.felv_e : added : Elevation angle error assuming front FoV
beam.fit.belv : added : Elevation angle assuming rear FoV
beam.fit.belv_e : added : Elevation angle error assuming front FoV
beam.fit.vheight : added : virtual height of ionosphere in km
beam.fit.vheight_e : added : error in virtual height (km)
beam.fit.fvheight : added : virtual height assuming front FoV
beam.fit.fvheight_e : added : error in virtual height assuming front FoV
beam.fit.bvheight : added : virtual height assuming rear FoV
beam.fit.bvheight_e : added : error in virtual height assuming rear FoV
beam.fit.hop : added : Hop assuming the assigned origin FoV
beam.fit.fhop : added : Hop assuming the front FoV
beam.fit.bhop : added : Hop assuming the rear FoV
beam.fit.region : added : Region assuming the assigned origin FoV
beam.fit.fregion : added : Region assuming the front FoV
beam.fit.bregion : added : Region assuming the rear FoV
beam.fit.fovflg : added : Flag indicating origin FoV (1=front, -1=back,
0=indeterminate)
beam.fit.pastfov : added : Flag indicating past FoV assignments
beam.fit.gflg : updated : Flag indicating backscatter type
(1=ground, 0=ionospheric, -1=indeterminate)
beam.prm.tdiff : added : tdiff used in elevation (microsec)
beam.prm.tdiff_e : possibly added : tdiff error (microsec)
If the input is incorrect, exits with an exception
"""
import davitpy.pydarn.sdio as sdio
import davitpy.pydarn.radar as pyrad
#----------------------------------
# Test input
assert(((isinstance(rad_bms, list) or isinstance(rad_bms, np.ndarray)) and
isinstance(rad_bms[0], sdio.radDataTypes.beamData)) or
isinstance(rad_bms, sdio.radDataTypes.radDataPtr)), \
logging.error('need a list/array of beams or a radar data pointer')
if isinstance(min_pnts, float):
min_pnts = int(min_pnts)
assert isinstance(min_pnts, int) and min_pnts >= 0, \
logging.error('unknown point minimum [{:}]'.format(min_pnts))
assert isinstance(region_hmin, dict) and min(region_hmin.values()) >= 0.0, \
logging.error("unknown minimum h' [{:}]".format(region_hmin))
assert isinstance(region_hmax, dict), \
logging.error("unknown maximum h' [{:}]".format(region_hmax))
assert((isinstance(rg_box, list) or isinstance(rg_box, np.ndarray))
and min(rg_box) >= 1.0), \
logging.error('range gate box is too small [{:}]'.format(rg_box))
assert((isinstance(vh_box, list) or isinstance(vh_box, np.ndarray))
and min(vh_box) >= 0.0), \
logging.error('virtual height box is too small [{:}]'.format(vh_box))
assert((isinstance(max_rg, list) or isinstance(max_rg, np.ndarray))
and min(max_rg) >= 0), \
logging.error('max range gate box is too small [{:}]'.format(max_rg))
if isinstance(max_hop, int):
max_hop = float(max_hop)
assert isinstance(max_hop, float) and max_hop >= 0.5, \
logging.error('hop limits are unrealistic [{:}]'.format(max_hop))
assert isinstance(ut_box, dt.timedelta) and ut_box.total_seconds() > 0.0, \
logging.error('UT box must be a positive datetime.timdelta object')
if isinstance(step, float):
step = int(step)
assert isinstance(step, int), logging.error('step flag must be an int')
#-----------------------------------------------------------------------
# Cycle through all the beams
snum = 0
num = 0
bm, num = get_beam(rad_bms, num)
max_del_beam = 3
have_scan = False
# Load the hardware data for the first time
try:
hard = pyrad.site(radId=bm.stid, dt=bm.time)
except:
logging.error("no data available in input rad structure")
return None
#----------------------------------------------------------------
# Cycle through the data, updating the beams one scan at a time
scan = np.empty(shape=(hard.maxbeam,), dtype=type(bm))
beams = list()
while bm is not None:
# Load the beam into the current scan if the scan is empty or if
# the current beam is within a specified period of time considering
# the difference in beams
if snum == 0:
bm.scan_time = bm.time
scan[snum] = bm
snum += 1
bm_sign = 0
else:
del_time = (bm.time - scan[snum-1].time).total_seconds()
del_beam = bm.bmnum - scan[snum-1].bmnum
time_inc = bm.prm.inttsc + bm.prm.inttus * 1.0e-6
if(del_beam != 0 and bm.cp == scan[0].cp and
del_time <= 3.0 * abs(del_beam) * time_inc and
abs(del_beam) <= max_del_beam):
if bm_sign == 0 or bm_sign == np.sign(del_beam):
bm_sign = np.sign(del_beam)
bm.scan_time = scan[0].time
scan[snum] = bm
snum += 1
else:
have_scan = True
else:
have_scan = True
#-----------------------------------------------------------------
# If a scan has been loaded, update the backscatter data in the
# beams and load the current beam as the first element of a new scan
if have_scan:
if snum >= min_pnts:
st = scan[0].time
b = update_bs_w_scan(scan[0:snum], hard, min_pnts=min_pnts,
region_hmax=region_hmax,
region_hmin=region_hmin,
rg_box=rg_box, vh_box=vh_box,
rg_max=max_rg, max_hop=max_hop,
tdiff=tdiff, tdiff_args=tdiff_args,
tdiff_e=tdiff_e,
tdiff_e_args=tdiff_e_args, ptest=ptest,
strict_gs=strict_gs, bmaz_e=bmaz_e,
boresite_e=boresite_e, ix_e=ix_e,
iy_e=iy_e, iz_e=iz_e, step=step)
if b is not None:
beams.extend(list(b))
else:
logging.info("unable to update scan at {:}".format(st))
bm.scan_time = bm.time
scan[0] = bm
snum = 1
bm_sign = 0
have_scan = False
# Cycle to next beam
bm, num = get_beam(rad_bms, num)
#---------------------------------------------------------------------
# Once the scans have been loaded, beam-UT tests of the FoV flags can
# be performed
inc_rg_box = 3
beam_dict = beam_ut_struct_test(beams, frg_box=np.array(rg_box)+inc_rg_box,
max_rg=max_rg, ut_box=ut_box,
reg_attr="region", hop_attr="hop",
fov_attr="fovflg", step=step)
return(beam_dict)
def beam_ut_struct_test(rad_bms, min_frac=.10, frg_box=[5,8,13,23],
max_rg=[5,25,40,76], ut_box=dt.timedelta(minutes=20.0),
reg_attr="region", hop_attr="hop", fov_attr="fovflg",
restrict_attr=[], restrict_lim=[], step=6):
"""Routine to test for field-of-view (FoV) and structure continuity in UT
across each beam. Hop (or groundscatter flag) will be used to seperate
structure types.
Parameters
-----------
rad_bms : (list or class `sdio.radDataTypes.radDataPtr`)
List of or pointer to beam data
min_frac : (float)
Minimum fraction of possible backscatter points needed in the RG/UT
box to perform the FoV calculation (default=.1)
frg_box : (list, np.array)
Total width of range gate box to examine for backscatter FoV
continuity. (default=[5,8,13,23])
ut_box : (class `dt.timedelta`)
Total width of universal time box to examine for backscatter FoV
continuity. (default=20.0 minutes)
reg_attr : (string)
beam.fit attribute name to seperate different ionospheric regions.
Can discard by entering nothing. (default="region")
hop_attr : (string)
beam.fit attribute name to seperate different structure types. Designed
to use either the groundscatter flag or the hop data. (default="hop")
fov_attr : (string)
beam.fit attribute name of the FoV flag
restrict_attr : (list)
List containing strings with attribute names. Used to restrict the
consideration further, such as by virtual height or slant path distance
from the radar to the first ionospheric reflection point. An empty list
means no additional restrictions are desired. (default=[])
restrict_lim : (list)
List containing two-element lists with the minimum and maximum values
of the restriction limits for the attributes contained in restrict_attr.
(default=[])
step : (int)
Integer denoting the number of processing steps to perform. This should
always be set to 6 (or greater) unless one wishes to reproduce the
demonstration plots in Burrell et al (2015). (default=6) The step
numbers coincide with those indicated in the paper:
1-5: Examine the elevation structure and consistency along the scan
6: Test for temporal consistency
Returns
----------
beams : (dict)
Dictionary containing lists of beams with updated FoV flags seperated
by beam number. The beam numbers are the dictionary keys
"""
import davitpy.pydarn.sdio as sdio
import davitpy.pydarn.radar as pyrad
fov_frac = 2.0 / 3.0
near_rg = -1
#----------------------------
# Initialize the output
beams = dict()
#----------------------------------
# Test input
if(not isinstance(rad_bms, list) and
not isinstance(rad_bms, sdio.radDataTypes.radDataPtr)):
logging.error('need a list of beams or a radar data pointer')
return beams
if(isinstance(rad_bms, list) and
(len(rad_bms) <= 0 or not isinstance(rad_bms[0],
sdio.radDataTypes.beamData))):
logging.error('list must contain at least one beam')
return beams
if isinstance(min_frac, int):
min_frac = float(min_frac)
if not isinstance(min_frac, float) or min_frac <= 0.0 or min_frac > 1.0:
estr = 'unrealistic minimum FoV fraction [{:}]'.format(min_frac)
logging.error(estr)
return beams
if((not isinstance(frg_box, list) and not isinstance(frg_box, np.ndarray))
or len(frg_box) <= 0):
estr = 'unrealistic FoV range gate box [{:}]'.format(frg_box)
logging.error(estr)
return beams
if((not isinstance(max_rg, list) and not isinstance(max_rg, np.ndarray))
or len(max_rg) <= 0):
estr = 'unrealistic maximum range gate box [{:}]'.format(max_rg)
logging.error(estr)
return beams
if not isinstance(ut_box, dt.timedelta) or ut_box.total_seconds() <= 0.0:
logging.error('unrealistic UT box [{:}]'.format(ut_box))
return beams
if not isinstance(restrict_attr, list):
logging.error('provide more restricting attributes in a list')
return beams
if not isinstance(restrict_lim, list):
logging.error('provide more restricting limits in a list')
return beams
if isinstance(step, float):
step = int(step)
if not isinstance(step, int):
logging.error('unrealistic step flag [{:}]'.format(step))
return beams
if not isinstance(reg_attr, str) or len(reg_attr) <= 0:
logging.error('badly formated region attribute [{:}]'.format(reg_attr))
return beams
if not isinstance(hop_attr, str) or len(reg_attr) <= 0:
logging.error('badly formated hop attribute [{:}]'.format(hop_attr))
return beams
if not isinstance(fov_attr, str) or len(reg_attr) <= 0:
estr = 'badly formated FoV flag attribute [{:}]'.format(fov_attr)
logging.error(estr)
return beams
#-----------------------------------------------------------------------
# Load the first beam and initialize limits
num = 0
bm, num = get_beam(rad_bms, num)
rhalf = [int(r * 0.5) for r in frg_box]
try:
hard = pyrad.site(radId=bm.stid, dt=bm.time)
except:
logging.error("no data available in input rad structure")
return(beams)
while bm is not None:
bnum = bm.bmnum
if near_rg < 0:
near_rg = ((500.0 / (5.0e-10 * scicon.c) - bm.prm.lagfr)
/ bm.prm.smsep)
# Load the beams into the output dictionary
if beams.has_key(bnum):
beams[bnum].append(bm)
else:
beams[bnum] = [bm]
# Cycle to the next beam
bm, num = get_beam(rad_bms, num)
#-----------------------------------------------------------------------
# Test the step flag and see if the temporal continuity test should be
# performed
if step < 6:
estr = "not testing backscatter assignments with temporal continuity"
logging.info(estr)
return(beams)
#-----------------------------------------------------------------------
# Cycle through all the beams, updating the FoV flag and structure flag
# once enough data has been loaded
for bnum in beams.keys():
bis = 0
fovbelong = [[{"out":0, "in":0, "mix":0}
for j in beams[bnum][i].fit.slist]
for i in np.arange(0, len(beams[bnum]))]
fovpast = [[j for j in beams[bnum][i].fit.pastfov]
for i in np.arange(0, len(beams[bnum]))]
for i in np.arange(0, len(beams[bnum])):
# See if there is enough data at this beam to begin the evaluation
while beams[bnum][i].time - beams[bnum][bis].time >= ut_box:
# Check the common program of each of the beams. For a UT
# comparision, the cp must be the same for all beams
bicp = [bis + j for j,b in enumerate(beams[bnum][bis:i])
if(b.cp == beams[bnum][bis].cp and
b.time - beams[bnum][bis].time < ut_box)]
# Test to see if there is enough data to fill the time window
if beams[bnum][i].time - beams[bnum][bis].time < ut_box:
break
# Get the range gate, FoV flag, hop, beam index, and range
# gate index for all backscatter points at these beams
rgates = list()
fovflg = list()
onefov = list()
hops = list()
regions = list()
bi = list()
ri = list()
rdata = dict()
for rattr in restrict_attr:
rdata[rattr] = list()
for bb in bicp:
b = beams[bnum][bb]
# Load data from the beam, if it exists
if(b.fit is not None and hasattr(b.fit, "slist") and
hasattr(b.fit, fov_attr) and hasattr(b.fit, hop_attr)):
slist = getattr(b.fit, "slist")
rgates.extend(slist)
bi.extend([bb for j in slist])
ri.extend([j for j,r in enumerate(slist)])
fflg = getattr(b.fit, fov_attr)
fovflg.extend(fflg)
otherelv = [b.fit.felv[oe] if ff == -1 else
b.fit.belv[oe] for oe,ff in enumerate(fflg)]
onefov.extend([np.isnan(oe) if slist[j] < near_rg
else False
for j,oe in enumerate(otherelv)])
hops.extend(getattr(b.fit, hop_attr))
if len(reg_attr) > 0 and hasattr(b.fit, reg_attr):
regions.extend(getattr(b.fit, reg_attr))
for j,rattr in enumerate(restrict_attr):
if hasattr(b.fit, rattr):
rdata[rattr].extend(getattr(b.fit, rattr))
else:
rdata[rattr].extend([restrict_lim[j][0]
for r in slist])
if len(rgates) > 0:
# Cycle through range gate boxes
range_min = np.nanmin(rgates)
range_max = np.nanmax(rgates)
if range_max > max(max_rg):
range_max = max(max_rg)
rgates = np.array(rgates)
fovflg = np.array(fovflg)
onefov = np.array(onefov)
# Combine hop and region data (if available), to allow
# a comprehensive division by propagation path
if len(regions) == len(hops):
chops = ["{:.1f}{:s}".format(hops[ihop], reg)
if not np.isnan(hops[ihop]) and len(reg) > 0
else np.nan for ihop,reg in enumerate(regions)]
else:
chops = hops
for rattr in restrict_attr:
rdata[rattr] = np.array(rdata[rattr])
for r in np.arange(range_min, range_max + 1):
# Select the indexes for this range gate box
ilim = 0
while ilim < len(max_rg) and r >= max_rg[ilim]:
ilim += 1
rmin = r - rhalf[ilim]
rmax = r + rhalf[ilim]
# If the box size is even, then the testing
# conditions will put too many points in the box
# unless the size is reduced. Effectively sets:
# jr = np.where(rgates[ir] < rmax)[0]
if frg_box[ilim] % 2 == 0:
rmax -= 1
# Now that we know how big our window is, we can
# determine the maximum number of points
max_pnts = float(len(bicp) * frg_box[ilim])
ir = np.where(rgates >= rmin)[0]
jr = np.where(rgates[ir] <= rmax)[0]
# Find the hop numbers to consider
shop = set([chops[ihop] for ihop in ir[jr]
if isinstance(chops[ihop], str) or
not np.isnan(chops[ihop])])
for ihop in shop:
hr = [ih for ih in ir[jr] if chops[ih] == ihop]
# Test any additional restrictions
if float(len(hr)) / max_pnts >= min_frac:
for j,rattr in enumerate(restrict_attr):
if len(restrict_lim[j]) == 2:
hk = [hr[k] for k,rd in
enumerate(rdata[rattr][hr])
if(rd >= restrict_lim[j][0]
and rd < restrict_lim[j][1])]
hr = hk
# Quit testing if there aren't enough
# points to perform the UT structure
# evaluation
if float(len(hr)) / max_pnts < min_frac:
break
# Evaluate the temporal FoV structures
if float(len(hr)) / max_pnts < min_frac:
# There are not enough points in this range
# gate and UT box to evaluate the
# backscatter structures at this hop
estr = "unable to evaluate beam ["
estr = "{:s}{:d}] at [".format(estr, bnum)
estr = "{:s}{:}".format(estr,
beams[bnum][bis].time)
estr = "{:s}] gate [{:d}], ".format(estr, r)
estr = "{:s}insufficient ".format(estr)
estr = "{:s}backscatter [".format(estr)
estr = "{:s}{:d} < ".format(estr, len(hr))
estr = "{:s}{:.0f}".format(estr, max_pnts
* min_frac)
estr = "{:s}] at hop [{:s}]".format(estr, ihop)
logging.info(estr)
elif float(len(hr)) / max_pnts > 1.0:
estr = "maximum number of points exceeded for "
estr = "{:s}beam [{:d}] ".format(estr, bnum)
estr = "{:s}between range gates ".format(estr)
estr = "{:s}[{:d}-{:d}".format(estr, rmin, rmax)
estr = "{:s}] at [{:}".format(estr, \
beams[bnum][bis].time)
estr = "{:s} to {:}]".format(estr, \
beams[bnum][max(bicp)].time)
estr = "{:s}: {:d} > ".format(estr, len(hr))
estr = "{:s}{:f}".format(estr, max_pnts)
logging.error(estr)
else:
# Get the number of backscatter observations
# in each field-of-view
rr = dict()
rr[1] = np.where(fovflg[hr] == 1)[0]
rr[-1] = np.where(fovflg[hr] == -1)[0]
fn = float(len(rr[1]))
bn = float(len(rr[-1]))
tn = fn + bn
ffrac = fn / tn if tn > 0.0 else -1.0
bad_fov = 0
good_fov = False
if(ffrac > 0.0 and ffrac >= fov_frac and
bn > 0.0):
good_fov = True
bad_fov = -1
elif(ffrac >= 0.0 and 1.0-ffrac >= fov_frac
and fn > 0.0):
good_fov = True
bad_fov = 1
# Tag the FoV for being consistent or not and
# mixed or not, unless this backscatter point
# only as one valid FoV
if good_fov:
for irr in rr[bad_fov]:
if not onefov[hr[irr]]:
zz = bi[hr[irr]]
yy = ri[hr[irr]]
fovbelong[zz][yy]['out'] += 1
for irr in rr[-bad_fov]:
zz = bi[hr[irr]]
yy = ri[hr[irr]]
fovbelong[zz][yy]['in'] += 1
else:
for ih in hr:
if abs(fovflg[ih]) == 1:
zz = bi[ih]
yy = ri[ih]
fovbelong[zz][yy]['mix'] += 1
del rgates, fovflg, hops, bi, ri
bis += 1
# Update the fovflags
for i in np.arange(0, len(beams[bnum])):
for j,bdict in enumerate(fovbelong[i]):
if(bdict["out"] > 0 and
bdict["in"] < bdict["out"] + bdict["mix"]):
# Update the FoV flag and the structure flag, since a
# structure cannot be set without a FoV
if(bdict['out'] > bdict['mix'] and
bdict['out'] > bdict['in']):
beams[bnum][i].fit.fovflg[j] = fovpast[i][j]
else:
beams[bnum][i].fit.fovflg[j] = 0
if fovpast[i][j] != 0:
if fovpast[i][j] == -1:
nelv = beams[bnum][i].fit.belv[j]
nelve = beams[bnum][i].fit.belv_e[j]
nheight = beams[bnum][i].fit.bvheight[j]
nheighte = beams[bnum][i].fit.bvheight_e[j]
nhop = beams[bnum][i].fit.bhop[j]
nreg = beams[bnum][i].fit.bregion[j]
else:
nelv = beams[bnum][i].fit.felv[j]
nelve = beams[bnum][i].fit.felv_e[j]
nheight = beams[bnum][i].fit.fvheight[j]
nheighte = beams[bnum][i].fit.fvheight_e[j]
nhop = beams[bnum][i].fit.fhop[j]
nreg = beams[bnum][i].fit.fregion[j]
beams[bnum][i].fit.fovelv[j] = nelv
beams[bnum][i].fit.fovelv_e[j] = nelve
beams[bnum][i].fit.vheight[j] = nheight
beams[bnum][i].fit.vheight_e[j] = nheighte
beams[bnum][i].fit.hop[j] = nhop
beams[bnum][i].fit.region[j] = nreg
fovpast[i][j] = 0
return(beams)
|
__author__ = 'robson'
# convert D in nm^2/ps -> 10^-5 cm^2/s for 3D/2D/1D diffusion - same as GROMACS!
FACTOR = {
'msd': 1000.0 / 6.0,
'msd_xy': 1000.0 / 4.0,
'msd_yz': 1000.0 / 4.0,
'msd_xz': 1000.0 / 4.0,
'msd_x': 1000.0 / 2.0,
'msd_y': 1000.0 / 2.0,
'msd_z': 1000.0 / 2.0,
}
class MSDException(Exception):
def __init__(self, *args, **kwargs):
super(MSDException, self).__init__(*args, **kwargs)
|
"""
Validation
Implemented by Peng Zhang
"""
import argparse
import os
import torch
from torch.utils.data import DataLoader
from torchvision import utils as v_utils
from tqdm import tqdm
from data_path import DATA_PATH
from dataset.augmentation import ValidFrameSampler, ValidAugmentation
from dataset.video_matte import VideoMatte240KDataset
from model import MattingBase
from model.utils import load_matched_state_dict
# --------------- Arguments ---------------
parser = argparse.ArgumentParser()
parser.add_argument('--dataset-name', type=str, default='videomatte8k', choices=DATA_PATH.keys())
parser.add_argument('--model-backbone', type=str, default='resnet50', choices=['resnet50'])
parser.add_argument('--model-checkpoint', type=str, default=r'<path to checkpoint>')
parser.add_argument('--output-path', type=str, default=r'<path to output>')
parser.add_argument('--seq-length', type=int, default=1)
parser.add_argument('--num-workers', type=int, default=0)
args = parser.parse_args()
# --------------- Loading ---------------
dataset_valid = VideoMatte240KDataset(
video_matte_path=DATA_PATH[args.dataset_name]['valid'],
background_image_path=DATA_PATH['backgrounds']['valid'],
seq_length=args.seq_length,
seq_sampler=ValidFrameSampler(),
transform=ValidAugmentation((224, 224)),
background_image_id=142
)
dataloader_valid = DataLoader(
dataset_valid,
pin_memory=False,
batch_size=1,
num_workers=args.num_workers
)
# Model
model = MattingBase(args.model_backbone).cuda()
load_matched_state_dict(model, torch.load(args.model_checkpoint))
model.eval()
# Validate
def save_img_tensor_list(t, start_index, output_dir):
output_path = os.path.join(args.output_path, output_dir)
os.makedirs(output_path, exist_ok=True)
index = start_index
for img in t[0]:
v_utils.save_image(img, os.path.join(output_path, f'{index:06d}.png'))
index += 1
os.makedirs(args.output_path, exist_ok=True)
with torch.no_grad():
for i, (fgr, pha, bgr) in enumerate(tqdm(dataset_valid)):
true_fgr = fgr.unsqueeze(0).cuda(non_blocking=True)
true_bgr = bgr.unsqueeze(0).cuda(non_blocking=True)
true_pha = pha.unsqueeze(0).cuda(non_blocking=True)
true_src = true_bgr.clone()
true_src = true_fgr * true_pha + true_src * (1 - true_pha)
pred_pha, pred_fgr, pred_err = model(true_src)[:3]
state = model.decoder.state3
index_start = i * args.seq_length
for i in range(state[0][0].size()[1]):
save_img_tensor_list(state[0][0][:, i, :, :].unsqueeze(0), index_start, f'state_h_{i}')
save_img_tensor_list(state[0][1][:, i, :, :].unsqueeze(0), index_start, f'state_c_{i}')
save_img_tensor_list(pred_pha, index_start, 'pred_pha')
save_img_tensor_list(true_pha, index_start, 'true_pha')
save_img_tensor_list(pred_err, index_start, 'pred_err')
save_img_tensor_list(true_src, index_start, 'true_src')
|
class Student:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
self.promoted = False
def __bool__(self):
return self.promoted
def run_example():
student = Student(first_name="Mikołaj", last_name="Lewandowski")
print(bool(student))
student.promoted = True
print(bool(student))
if student:
print("If student")
student.promoted = False
if student:
print("If student")
if __name__ == '__main__':
run_example()
|
import csv
import io
from datetime import datetime
from django.contrib import messages
from django.db.models import Q
from django.http import JsonResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.views.generic import CreateView, UpdateView, ListView
from projeto.equipamento.actions.export_xlsx import export_xlsx
from projeto.equipamento.actions.import_xlsx import import_xlsx as action_import_xlsx
from .models import Equipamento
from .forms import EquipamentoForm
# ----------------- LIST ------------------------
def equipamento_list(request):
template_name = 'equipamento_list.html'
objects = Equipamento.objects.all()
search = request.GET.get('search')
if search:
objects = objects.filter(asset__icontains=search)
context = {'object_list': objects}
return render(request, template_name, context)
class EquipamentoList(ListView):
model = Equipamento
template_name = 'equipamento_list.html'
paginate_by = 10
def get_queryset(self):
queryset = super(EquipamentoList, self).get_queryset()
search = self.request.GET.get('search')
if search:
queryset = queryset.filter(
Q(asset__icontains=search) |
Q(n_serie__icontains=search)
)
return queryset
# ----------------- DETAIL ----------------------
def equipamento_detail(request, pk):
template_name = 'equipamento_detail.html'
obj = Equipamento.objects.get(pk=pk)
context = {'object': obj}
return render(request, template_name, context)
# ----------------- ADD/CREATE ------------------
def equipamento_add(request):
template_name = 'equipamento_form.html'
if request.method == 'POST':
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('equipamento:equipamento_list'))
context = {'form': form}
return render(request, template_name, context)
class EquipamentoCreate(CreateView):
model = Equipamento
template_name = 'Equipamento_form.html'
form_class = EquipamentoForm
# ----------------- UPDATE -----------------------
class EquipamentoUpdate(UpdateView):
model = Equipamento
template_name = 'equipamento_form.html'
form_class = EquipamentoForm
# ----------------- IMPORT/EXPORT -----------------
def save_data(data):
'''
Salva os dados no banco
'''
aux = []
for item in data:
estado = item.get('estado')
asset = item.get('asset')
n_serie = item.get('n_serie')
data_compra = item.get('data_compra')
garantia = item.get('garantia')
tipo = item.get('tipo')
marca = item.get('marca')
modelo = item.get('modelo')
cpu = item.get('cpu')
hd = item.get('hd')
memoria = item.get('memoria')
obs = item.get('obs')
obj = Equipamento(
estado=estado,
asset=asset,
n_serie=n_serie,
data_compra=data_compra,
garantia=garantia,
tipo=tipo,
marca=marca,
modelo=modelo,
cpu=cpu,
hd=hd,
memoria=memoria,
obs=obs,
)
aux.append(obj)
Equipamento.objects.bulk_create(aux)
def import_csv(request):
if request.method == 'POST' and request.FILES['myfile']:
myfile = request.FILES['myfile']
# Lendo arquivo InMemoryUploadedFile
file = myfile.read().decode('utf-8')
reader = csv.DictReader(io.StringIO(file))
# Gerando uma list comprehension
data = [line for line in reader]
save_data(data)
return HttpResponseRedirect(reverse('equipamento:equipamento_list'))
template_name = 'equipamento_import.html'
return render(request, template_name)
def export_csv(request):
header = (
'estado', 'asset', 'n_serie', 'data_compra', 'garantia', 'tipo', 'marca', 'modelo', 'cpu', 'hd', 'memoria', 'obs',
)
equipamentos = Equipamento.objects.all().values_list(*header)
with open('C:/Users/dnascimento_prest/Desktop/projeto-estoque-master/fix/equipamentos_exportados.csv', 'w') as csvfile:
equipamento_writer=csv.writer(csvfile)
equipamento_writer.writerow(header)
for equipamento in equipamentos:
equipamento_writer.writerow(equipamento)
messages.success(request, 'Equipamentos exportador com sucesso')
return HttpResponseRedirect(reverse('equipamento:equipamento_list'))
def import_xlsx(request):
filename = 'C:/Users/dnascimento_prest/Desktop/projeto-estoque-master/fix/equipamentos.xlsx'
action_import_xlsx(filename)
messages.success(request, 'Equipamentos importados com sucesso.')
return HttpResponseRedirect(reverse('equipamento:equipamento_list'))
def export_equipamentos_xlsx(request):
MDATA = datetime.now().strftime('%Y-%m-%d')
model = 'Equipamento'
filename = 'equipamentos_exportados.xlsx'
_filename = filename.split('.')
filename_final = f'{_filename[0]}_{MDATA}.{_filename[1]}'
queryset = Equipamento.objects.all().values_list(
'estado',
'asset',
'n_serie',
'data_compra',
'garantia',
'tipo',
'marca',
'modelo',
'cpu',
'hd',
'memoria',
'obs',
)
columns = ('estado', 'asset', 'n_serie', 'data_compra', 'garantia', 'tipo', 'marca', 'modelo', 'cpu', 'hd', 'memoria', 'obs')
response = export_xlsx(model, filename_final, queryset, columns)
return response
|
import sys
from abc import ABC, abstractmethod
from itertools import chain
from pathlib import Path
from typing import Dict, Tuple
import numpy as np
import torch
import torch.nn.functional as FF
from src.data.common import get_loader
from torch.distributions import Distribution
from torch.nn import LSTM, Embedding, Linear, Module
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence
from torch.tensor import Tensor
from tqdm import tqdm, trange
from torch.nn.utils import clip_grad_norm_
cuda = torch.cuda.is_available()
if cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
model_directory = Path(__file__).parent / ".." / ".." / "models"
def simple_elementwise_apply(fn, packed_sequence):
"""applies a pointwise function fn to each element in packed_sequence"""
return PackedSequence(fn(packed_sequence.data), packed_sequence.batch_sizes)
def get_variable(x):
""" Converts tensors to cuda, if available. """
if cuda:
return x.cuda()
return x
def get_numpy(x):
""" Get numpy array for both cuda and not. """
if cuda:
return x.cpu().data.numpy()
return x.data.numpy()
class OneHotPacked(Module):
def __init__(self, num_classes):
super().__init__()
self.num_classes = num_classes
def forward(self, x):
return PackedSequence(
FF.one_hot(x.data, num_classes=self.num_classes).float(),
x.batch_sizes
)
class EmbeddingPacked(Module):
def __init__(self, **kwargs):
super().__init__()
self.embedding = Embedding(**kwargs)
def forward(self, x):
return simple_elementwise_apply(self.embedding, x)
class ModelTrainer(ABC):
def __init__(
self,
model,
optimizer,
max_epochs,
batch_size,
training_data,
validation_data=None,
clip_max_norm=None,
):
# Input parameters
self.model = model
self.optimizer = optimizer
self.max_epochs = max_epochs
self.batch_size = batch_size
self.training_data = training_data
self.validation_data = validation_data
self.clip_max_norm=clip_max_norm
# Data loaders
self.train_loader = get_loader(self.training_data, batch_size)
self.validation_loader = (
get_loader(self.validation_data, batch_size)
if self.validation_data is not None
else None
)
# Initializing fresh training params
self.current_epoch = -1
self.training_loss = []
self.validation_loss = []
self.best_model = {
'validation_loss' : float('inf'),
'state_dict' : None,
}
# Saving model name
self.model_name = model.__class__.__name__
self.cuda = cuda
if self.cuda:
self.device = torch.device("cuda")
print("Training using CUDA")
else:
self.device = torch.device("cpu")
print("Training using CPU")
sys.stdout.flush()
self.model.to(device)
@abstractmethod
def get_loss(self, x):
""" Get average loss in batch x. x is PackedSequence """
pass
def train(self, progress_bar=False):
model = self.model
train_loader = self.train_loader
validation_loader = self.validation_loader
optimizer = self.optimizer
# For each epoch
if progress_bar == 'epoch':
epoch_iter = trange(self.current_epoch + 1, self.max_epochs)
else:
epoch_iter = range(self.current_epoch + 1, self.max_epochs)
for epoch in epoch_iter:
self.epoch_callback()
# Track loss per batch
epoch_training_loss = []
epoch_validation_loss = []
model.train()
if progress_bar == 'batch':
train_loader = tqdm(self.train_loader)
# For each sentence in training set
for x in train_loader:
x = get_variable(x)
# Average loss per tweet
loss = self.get_loss(x)
optimizer.zero_grad()
loss.backward()
if self.clip_max_norm is not None:
clip_grad_norm_(model.parameters(), self.clip_max_norm)
optimizer.step()
epoch_training_loss.append(
(
x.batch_sizes[0].numpy(),
get_numpy(loss.detach()),
)
)
model.eval()
with torch.no_grad():
# For each sentence in validation set
for x in validation_loader:
x = get_variable(x)
loss = self.get_loss(x)
# Update loss
epoch_validation_loss.append(
(
x.batch_sizes[0].numpy(),
get_numpy(loss.detach()),
)
)
# Save loss for plot
weigths, batch_average = zip(*epoch_training_loss)
self.training_loss.append(np.average(batch_average, weights=weigths))
weigths, batch_average = zip(*epoch_validation_loss)
self.validation_loss.append(np.average(batch_average, weights=weigths))
if self.validation_loss[-1] < self.best_model['validation_loss']:
self.best_model['validation_loss'] = self.validation_loss[-1]
self.best_model['state_dict'] = self.model.state_dict()
self.current_epoch = epoch
if progress_bar != 'epoch':
print(f"Epoch {epoch+1} done!")
print(f"T. loss: {self.training_loss[-1]}")
print(f"V. loss: {self.validation_loss[-1]}")
sys.stdout.flush()
elif progress_bar == 'epoch':
epoch_iter.set_postfix({
"t_loss" : self.training_loss[-1],
"v_loss" : self.validation_loss[-1],
})
self.save_checkpoint()
(model_directory / self.model_name / "finished").touch()
def save_checkpoint(self):
loc = model_directory / self.model_name
loc.mkdir(parents=True, exist_ok=True)
torch.save(
{
"epoch": self.current_epoch,
"model_state_dict": self.model.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
"training_loss": self.training_loss,
"validation_loss": self.validation_loss,
"best_model": self.best_model,
},
loc / "checkpoint.pt",
)
def restore_checkpoint(self):
checkpoint = _get_checkpoint(self.model_name, self.device)
if checkpoint is None:
print("No checkpoint found, training fresh model.")
return
print("Checkpoint found, continuing training.")
self.current_epoch = checkpoint["epoch"]
self.training_loss = checkpoint["training_loss"]
self.validation_loss = checkpoint["validation_loss"]
self.model.load_state_dict(checkpoint["model_state_dict"])
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
self.best_model = checkpoint["best_model"]
if self.cuda:
self.model.to(self.device)
# Fix for optimizer on gpu
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
def epoch_callback(self):
pass
class VITrainer(ModelTrainer):
def __init__(self, vi, *args, beta_scheduler=None, **kwargs):
super(VITrainer, self).__init__(*args, **kwargs)
self.vi = vi
self.beta_scheduler = beta_scheduler
def get_loss(self, x):
loss, _, _ = self.vi(self.model, x)
return loss
def epoch_callback(self):
""" gets called before epoch """
if self.beta_scheduler is not None:
i = self.current_epoch+1
self.vi.beta = self.beta_scheduler(i)
class CriterionTrainer(ModelTrainer):
def __init__(self, criterion, *args, **kwargs):
super(CriterionTrainer, self).__init__(*args, **kwargs)
self.criterion = criterion
def get_loss(self, x):
output = self.model(x)
loss = self.criterion(output.data, x.data) / x.batch_sizes[0]
return loss
def _get_checkpoint(model_name, device, subdir=None):
if subdir is not None:
model_directory_ = model_directory / subdir
else:
model_directory_ = model_directory
try:
checkpoint = torch.load(
model_directory_ / model_name / "checkpoint.pt",
map_location=device,
)
return checkpoint
except FileNotFoundError:
return None
def get_trained_model(model, training_info=False, model_name=None, latest=False, subdir=None):
if model_name is None:
model_name = model.__class__.__name__
checkpoint = _get_checkpoint(model_name, device, subdir=subdir)
if not latest:
model.load_state_dict(checkpoint["best_model"]["state_dict"])
else:
model.load_state_dict(checkpoint["model_state_dict"])
if training_info:
return (
model,
{
"training_loss": checkpoint["training_loss"],
"validation_loss": checkpoint["validation_loss"],
"best_validation_loss": checkpoint["best_model"]["validation_loss"],
"num_epocs": checkpoint["epoch"] + 1,
},
)
else:
return model
def decode_tweet_to_text(decoded_tweet, embedding, joined=False):
# Init list for words
decoded_tweet_word_list = []
# Loop over all words
for word in decoded_tweet:
# Stop when end reached
if all(word == 0):
break
# Add decoded word
decoded_tweet_word_list.append(
embedding.similar_by_vector(np.array(word), topn=1, restrict_vocab=None)[0][
0
]
)
# Return decoded list
if joined:
return " ".join(decoded_tweet_word_list)
else:
return decoded_tweet_word_list
def cos_sim(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def decode_tweet_to_text(decoded_tweet, embedding, joined=False):
# Init list for words
decoded_tweet_word_list = []
# Loop over all words
for word in decoded_tweet:
# Stop when end reached
if all(word == 0):
break
# Add decoded word
decoded_tweet_word_list.append(
embedding.similar_by_vector(np.array(word), topn=1, restrict_vocab=None)[0][
0
]
)
# Return decoded list
if joined:
return " ".join(decoded_tweet_word_list)
else:
return decoded_tweet_word_list
def cos_sim(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def actual_decode_similarity(sample, target, embedding):
# Calculate average
rsum = np.zeros(300)
for word, _ in embedding.vocab.items():
rsum += embedding[word]
average = rsum / len(embedding.vocab)
# Init values
cos_sim_embed = 0
cos_sim_target = 0
cos_sim_avg = 0
counter = 0
# Loop over words
for (sample_word, target_word) in zip(sample, target):
# Stop when end reached
if all(sample_word == 0):
break
# Get debedded word
embed_word = embedding.similar_by_vector(
np.array(sample_word), topn=1, restrict_vocab=None
)[0][0]
# Calculate cosine similarities
cos_sim_embed += cos_sim(embedding[embed_word], sample_word)
cos_sim_target += cos_sim(target_word, sample_word)
cos_sim_avg += cos_sim(target_word, average)
# Increment couter for average
counter += 1
return {
embedding: cos_sim_embed / counter,
target: cos_sim_target / counter,
average: cos_sim_avg / counter,
}
# Encoder defition
class Encoder(Module):
def __init__(
self,
input_dim,
hidden_size,
latent_features,
):
super(Encoder, self).__init__()
self.input_dim = input_dim
self.hidden_size = hidden_size
self.latent_features = latent_features
self.rnn = LSTM(
input_size=self.input_dim,
hidden_size=self.hidden_size,
num_layers=2,
)
self.linear = Linear(
in_features=self.hidden_size,
out_features=self.latent_features,
bias=False,
)
def forward(self, x):
x, (hidden_n, _) = self.rnn(x)
x = self.linear(hidden_n[-1])
return x
# Decoder defitinion
class Decoder(Module):
def __init__(
self,
latent_features,
hidden_size,
output_dim,
):
super(Decoder, self).__init__()
self.latent_features = latent_features
self.hidden_size = hidden_size
self.output_dim = output_dim
self.rnn1 = LSTM(
input_size=self.latent_features,
hidden_size=self.latent_features,
)
self.rnn2 = LSTM(
input_size=self.latent_features,
hidden_size=self.hidden_size,
)
self.output_layer = Linear(hidden_size, self.output_dim )
def forward(self, x, batch_sizes):
x = x.repeat(len(batch_sizes), 1, 1)
lengths = -np.diff(np.append(batch_sizes.numpy(), 0))
sequence_lengths = list(
chain.from_iterable(n * [i + 1] for i, n in enumerate(lengths) if n)
)[::-1]
x = pack_padded_sequence(x, sequence_lengths)
x, (_, _) = self.rnn1(x)
x, (_, _) = self.rnn2(x)
return simple_elementwise_apply(self.output_layer, x)
class ReparameterizedDiagonalGaussian(Distribution):
"""
A distribution `N(y | mu, sigma I)` compatible with the reparameterization trick given `epsilon ~ N(0, 1)`.
"""
def __init__(self, mu: Tensor, log_sigma: Tensor):
assert (
mu.shape == log_sigma.shape
), f"Tensors `mu` : {mu.shape} and ` log_sigma` : {log_sigma.shape} must be of the same shape"
self.mu = mu
self.sigma = log_sigma.exp()
def sample_epsilon(self) -> Tensor:
"""`\eps ~ N(0, I)`"""
return torch.empty_like(self.mu).normal_()
def sample(self) -> Tensor:
"""sample `z ~ N(z | mu, sigma)` (without gradients)"""
with torch.no_grad():
return self.rsample()
def rsample(self) -> Tensor:
"""sample `z ~ N(z | mu, sigma)` (with the reparameterization trick) """
return self.mu + self.sigma * self.sample_epsilon()
def log_prob(self, z: Tensor) -> Tensor:
"""return the log probability: log `p(z)`"""
return torch.distributions.normal.Normal(self.mu, self.sigma).log_prob(z)
class VariationalInference(Module):
def __init__(self, beta: float = 1.0):
super().__init__()
self.beta = beta
def forward(self, model: Module, x: Tensor) -> Tuple[Tensor, Dict]:
# forward pass through the model
outputs = model(x)
# unpack outputs
pz = outputs['pz'] # Prior
z = outputs['z'] # Sample from posterior
px = outputs['px'] # Observation model
if 'lz' in outputs:
lz = outputs['lz'] # Log likelihood of sample from approx. posterior
log_qz = lz / len(z)
else:
qz = outputs['qz'] # Approx. posterior
log_qz = qz.log_prob(z).sum(dim=1).mean()
log_px = px.log_prob(x.data).sum() / len(z)
log_pz = pz.log_prob(z).sum(dim=1).mean()
# compute the ELBO with and without the beta parameter:
# `L^\beta = E_q [ log p(x|z) - \beta * D_KL(q(z|x) | p(z))`
# where `D_KL(q(z|x) | p(z)) = log q(z|x) - log p(z)`
kl = log_qz - log_pz
elbo = log_px - kl
beta_elbo = log_px - self.beta * kl
loss = -beta_elbo
# prepare the output
with torch.no_grad():
diagnostics = {"elbo": elbo, "log_px": log_px, "kl": kl}
return loss, diagnostics, outputs
|
import argparse
import numpy as np
import torch
import json
from collections import OrderedDict
from torch import nn
from torch import optim
from torch.autograd import Variable
from torchvision import models
import torch.nn.functional as F
from train import construct_model
from PIL import Image
def get_command_line_args():
parser = argparse.ArgumentParser()
parser.add_argument('--image', type=str, help='path to the image to test')
parser.add_argument('--topk', type=int, help='Top classes to return', default=5)
parser.add_argument('--checkpoint', type=str, help='Saved Checkpoint')
parser.add_argument('--gpu', default='False',action='store_true', help='Where to use gpu or cpu')
parser.add_argument('--epoch', type=int, help='amount of times the model will train')
parser.add_argument('--labels', type=str, help='file for label names', default='paind-project/cat_to_name.json')
# arch and hidden units of checkpoint added per review
parser.add_argument('--arch', type=str, default='vgg16', help='chosen model')
parser.add_argument('--hidden_units', type=int, default='4000', help='hidden units for the model')
return parser.parse_args()
def load_checkpoint(checkpoint, arch, hidden_units):
# Credit to Michael for providing me with a way to convert gpu tenors to cpu
checkpoint_state = torch.load(checkpoint, map_location = lambda storage, loc: storage)
class_to_idx = checkpoint_state['class_to_idx']
model, optimizer, criterion = construct_model(hidden_units, class_to_idx,arch)
model.load_state_dict(checkpoint_state['state_dict'])
optimizer.load_state_dict(checkpoint_state['optimizer'])
print("Loaded checkpoint => {} with arch {}, hidden units {} and epochs {}".format
(checkpoint,
checkpoint_state['arch'],
checkpoint_state['hidden_units'],
checkpoint_state['epochs']))
return model, optimizer, criterion
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
size = 256,256
pill_image = Image.open(image)
pill_image = pill_image.resize(size)
pill_image = pill_image.crop((16,16,240,240))
np_image = np.array(pill_image)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = ((np_image/255) - mean)/std
np_image = np.transpose(np_image, (2, 0, 1))
return np_image
def predict(image, checkpoint, topk, labels, arch, hidden_units,device, gpu=False):
model, optimizer, criterion = load_checkpoint(checkpoint, arch, hidden_units)
model.eval()
image = process_image(image)
if gpu:
myInput = torch.FloatTensor(image).cuda()
else:
myInput = torch.FloatTensor(image)
model= model.to(device)
myInput.unsqueeze_(0)
output = model(myInput)
ps = torch.exp(output)
probs, classes = torch.topk(ps, topk)
inverted_class_to_index = {model.class_to_idx[x]: x for x in model.class_to_idx}
new_classes = []
for index in classes.cpu().numpy()[0]:
new_classes.append(inverted_class_to_index[index])
return probs.cpu().detach().numpy()[0], new_classes
def main():
args = get_command_line_args()
use_gpu = torch.cuda.is_available() and args.gpu
if use_gpu:
print("Using GPU.")
device = torch.device('cuda')
else:
print("Using CPU.")
device = torch.device('cpu')
if(args.checkpoint,args.image):
probs, new_classes = predict(args.image, args.checkpoint, args.topk,args.labels, args.arch, args.hidden_units ,device,args.gpu)
with open(args.labels, 'r') as j:
# Thanks to michael for helping me understand the use of argmax
cat_to_name = json.load(j)
biggest_idx = np.argmax(probs)
max_class = new_classes[biggest_idx]
first = cat_to_name[max_class]
print("---------Classes and Probabilities---------")
for i, idx in enumerate(new_classes):
print("Class:", cat_to_name[idx], "Probability:", probs[i])
main()
|
# -*- coding:utf-8 -*-
from flask_restful import Resource, reqparse, request
from flask import g
from common.log import Logger
from common.audit_log import audit_log
from common.db import DB
from common.utility import uuid_prefix
from common.sso import access_required
import json
from user.user import update_user_privilege
from common.const import role_dict
logger = Logger()
parser = reqparse.RequestParser()
parser.add_argument("name", type=str, required=True, trim=True)
parser.add_argument("product_id", type=str, required=True, trim=True)
# 不必填写的字段一定要指定默认值为"",否则无法转换成字典
parser.add_argument("description", type=str, default="", trim=True)
class Groups(Resource):
@access_required(role_dict["product"])
def get(self, groups_id):
db = DB()
status, result = db.select_by_id("groups", groups_id)
db.close_mysql()
if status is True:
if result:
try:
groups = eval(result[0][0])
except Exception as e:
return {"status": False, "message": str(e)}, 500
else:
return {"status": False, "message": "%s does not exist" % groups_id}, 404
else:
return {"status": False, "message": result}, 500
return {"group": groups, "status": True, "message": ""}, 200
@access_required(role_dict["product"])
def delete(self, groups_id):
user = g.user_info["username"]
db = DB()
status, result = db.delete_by_id("groups", groups_id)
db.close_mysql()
if status is not True:
logger.error("Delete groups error: %s" % result)
return {"status": False, "message": result}, 500
if result is 0:
return {"status": False, "message": "%s does not exist" % groups_id}, 404
audit_log(user, groups_id, "", "groups", "delete")
info = update_user_privilege("groups", groups_id)
if info["status"] is False:
return {"status": False, "message": info["message"]}, 500
return {"status": True, "message": ""}, 200
@access_required(role_dict["product"])
def put(self, groups_id):
user = g.user_info["username"]
args = parser.parse_args()
args["id"] = groups_id
groups = args
db = DB()
# 判断是否存在
select_status, select_result = db.select_by_id("groups", groups_id)
if select_status is not True:
db.close_mysql()
logger.error("Modify groups error: %s" % select_result)
return {"status": False, "message": select_result}, 500
if not select_result:
db.close_mysql()
return {"status": False, "message": "%s does not exist" % groups_id}, 404
# 判断名字否已经存在
status, result = db.select("groups", "where data -> '$.name'='%s'" % args["name"])
if status is True:
if len(result) != 0:
info = eval(result[0][0])
if groups_id != info.get("id"):
db.close_mysql()
return {"status": False, "message": "The groups name already exists"}, 200
status, result = db.update_by_id("groups", json.dumps(groups, ensure_ascii=False), groups_id)
db.close_mysql()
if status is not True:
logger.error("Modify groups error: %s" % result)
return {"status": False, "message": result}, 500
audit_log(user, groups_id, "", "groups", "edit")
return {"status": True, "message": ""}, 200
class GroupsList(Resource):
@access_required(role_dict["product"])
def get(self):
product_id = request.args.get("product_id")
db = DB()
status, result = db.select("groups", "where data -> '$.product_id'='%s'" % product_id)
db.close_mysql()
groups_list = []
if status is True:
if result:
for i in result:
try:
groups_list.append(eval(i[0]))
except Exception as e:
return {"status": False, "message": str(e)}, 500
else:
return {"groups": {"group": groups_list}, "status": True, "message": ""}, 200
else:
return {"status": False, "message": result}, 500
return {"groups": {"group": groups_list}, "status": True, "message": ""}, 200
@access_required(role_dict["product"])
def post(self):
args = parser.parse_args()
args["id"] = uuid_prefix("g")
user = g.user_info["username"]
groups = args
db = DB()
status, result = db.select("groups", "where data -> '$.name'='%s'" % args["name"])
if status is True:
if len(result) == 0:
insert_status, insert_result = db.insert("groups", json.dumps(groups, ensure_ascii=False))
db.close_mysql()
if insert_status is not True:
logger.error("Add groups error: %s" % insert_result)
return {"status": False, "message": insert_result}, 500
audit_log(user, args["id"], "", "groups", "add")
return {"status": True, "message": ""}, 201
else:
db.close_mysql()
return {"status": False, "message": "The groups name already exists"}, 200
else:
db.close_mysql()
logger.error("Select groups name error: %s" % result)
return {"status": False, "message": result}, 500
|
"""vitamins.match.hitbox -- hitbox class and data."""
from functools import partial
from vitamins.match.base import OrientedObject
from vitamins import draw
class Hitbox:
width: float
length: float
height: float
angle: float # todo: take this into account
root_to_front: float
root_to_top: float
root_to_side: float
root_to_back: float
def __init__(self, car: OrientedObject, width: float):
self.car = car
hitbox_class = {
832: DominusHitbox,
842: OctaneHitbox,
846: PlankHitbox,
805: BreakoutHitbox,
822: HybridHitbox,
}.get(int(width * 10), OctaneHitbox)
self.width = hitbox_class.width
self.length = hitbox_class.length
self.height = hitbox_class.height
self.angle = hitbox_class.angle
self.root_to_front = hitbox_class.root_to_front
self.root_to_top = hitbox_class.root_to_top
self.root_to_side = hitbox_class.root_to_side
self.root_to_back = hitbox_class.root_to_back
def __call__(self, corner_str: str, dt: float = 0):
return self.location(corner_str, dt)
@property
def _fwd(self):
return self.car.forward * self.root_to_front
@property
def _back(self):
return self.car.backward * self.root_to_back
@property
def _left(self):
return self.car.left * self.root_to_side
@property
def _up(self):
return self.car.up * self.root_to_top
@property
def _down(self):
return self.car.down * (self.height - self.root_to_top)
def location(self, corner_str: str, dt: float = 0):
"""Returns a location on the hitbox.
Args:
corner_str: Specifies the location on the hibox (see below).
dt: Estimates the position `dt` seconds into the future (or past if <0).
This is useful for drawing the hitbox since rendering is a couple
of frames behind.
Location specifier:
FB: front/back
UD: up/down
LR: left/right
Examples:
FLU = forward left top corner
FU = center of the top front edge
U = center of the top face
Note: The order and case of the letters do not matter. So FRU, URF, urf, rFu
all refer to the same top-right-front corner of the hitbox.
Note: "Center" means aligned with the center of rotation. So e.g. RU is closer
to RUB than to RUF, because the center of rotation for all cars is shifted
somewhat toward the rear of the hitbox.
"""
corner_str = corner_str.upper()
# todo: take angular velocity into account, too:
pos = self.car.position + dt * self.car.velocity
if "F" in corner_str:
pos += self._fwd
if "B" in corner_str:
pos += self._back
if "L" in corner_str:
pos += self._left
if "R" in corner_str:
pos -= self._left
if "U" in corner_str:
pos += self._up
if "D" in corner_str:
pos += self._down
return pos
def draw(self, color: str = "", dt: float = 0):
"""Draw a wireframe hitbox for visualization."""
c = partial(self.location, dt=dt)
draw.line_3d(c("blu"), c("flu"), color)
draw.line_3d(c("bru"), c("fru"), color)
draw.line_3d(c("bld"), c("fld"), color)
draw.line_3d(c("brd"), c("frd"), color)
draw.line_3d(c("flu"), c("fru"), color)
draw.line_3d(c("fld"), c("frd"), color)
draw.line_3d(c("blu"), c("bru"), color)
draw.line_3d(c("bld"), c("brd"), color)
draw.line_3d(c("fld"), c("flu"), color)
draw.line_3d(c("frd"), c("fru"), color)
draw.line_3d(c("bld"), c("blu"), color)
draw.line_3d(c("brd"), c("bru"), color)
# Specific hitbox data for each car type. Source:
# https://onedrive.live.com/view.aspx?resid=F0182A0BAEBB5DFF!14583&ithint=file%2cxlsx&app=Excel&authkey=!ALu0cMkDZDoWOws
class DominusHitbox(Hitbox):
width = 83.28
length = 127.93
height = 31.30
angle = -0.96
root_to_front = 72.96
root_to_top = 31.40
root_to_side = 41.64
root_to_back = 54.96
class OctaneHitbox(Hitbox):
width = 84.20
length = 118.01
height = 36.16
angle = -0.55
root_to_front = 72.88
root_to_top = 38.83
root_to_side = 42.10
root_to_back = 45.13
class PlankHitbox(Hitbox):
width = 84.67
length = 128.82
height = 29.39
angle = -0.34
root_to_front = 73.42
root_to_top = 26.79
root_to_side = 42.34
root_to_back = 55.40
class BreakoutHitbox(Hitbox):
width = 80.52
length = 131.49
height = 30.30
angle = -0.98
root_to_front = 78.25
root_to_top = 26.90
root_to_side = 40.26
root_to_back = 53.25
class HybridHitbox(Hitbox):
width = 82.19
length = 127.02
height = 34.16
angle = -0.055
root_to_front = 77.39
root_to_top = 37.83
root_to_side = 41.09
root_to_back = 49.63
|
import numpy
import unittest
import cupy
from cupy import testing
@testing.gpu
class TestDims(unittest.TestCase):
_multiprocess_can_split_ = True
def check_atleast(self, func, xp):
a = testing.shaped_arange((), xp)
b = testing.shaped_arange((2,), xp)
c = testing.shaped_arange((2, 2), xp)
d = testing.shaped_arange((4, 3, 2), xp)
return func(a, b, c, d)
@testing.numpy_cupy_array_list_equal()
def test_atleast_1d1(self, xp):
return self.check_atleast(xp.atleast_1d, xp)
@testing.numpy_cupy_array_equal()
def test_atleast_1d2(self, xp):
a = testing.shaped_arange((1, 3, 2), xp)
return xp.atleast_1d(a)
@testing.numpy_cupy_array_list_equal()
def test_atleast_2d1(self, xp):
return self.check_atleast(xp.atleast_2d, xp)
@testing.numpy_cupy_array_equal()
def test_atleast_2d2(self, xp):
a = testing.shaped_arange((1, 3, 2), xp)
return xp.atleast_2d(a)
@testing.numpy_cupy_array_list_equal()
def test_atleast_3d1(self, xp):
return self.check_atleast(xp.atleast_3d, xp)
@testing.numpy_cupy_array_equal()
def test_atleast_3d2(self, xp):
a = testing.shaped_arange((1, 3, 2), xp)
return xp.atleast_3d(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_broadcast_arrays(self, xp, dtype):
a = testing.shaped_arange((2, 1, 3, 4), xp, dtype)
b = testing.shaped_arange((3, 1, 4), xp, dtype)
c, d = xp.broadcast_arrays(a, b)
return d
@testing.with_requires('numpy>=1.10')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_broadcast_to(self, xp, dtype):
# Note that broadcast_to is only supported on numpy>=1.10
a = testing.shaped_arange((3, 1, 4), xp, dtype)
b = xp.broadcast_to(a, (2, 3, 3, 4))
return b
@testing.with_requires('numpy>=1.10')
@testing.for_all_dtypes()
@testing.numpy_cupy_raises()
def test_broadcast_to_fail(self, xp, dtype):
# Note that broadcast_to is only supported on numpy>=1.10
a = testing.shaped_arange((3, 1, 4), xp, dtype)
xp.broadcast_to(a, (1, 3, 4))
@testing.with_requires('numpy>=1.10')
@testing.for_all_dtypes()
@testing.numpy_cupy_raises()
def test_broadcast_to_short_shape(self, xp, dtype):
# Note that broadcast_to is only supported on numpy>=1.10
a = testing.shaped_arange((1, 3, 4), xp, dtype)
xp.broadcast_to(a, (3, 4))
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_broadcast_to_numpy19(self, xp, dtype):
# Note that broadcast_to is only supported on numpy>=1.10
a = testing.shaped_arange((3, 1, 4), xp, dtype)
if xp is cupy:
b = xp.broadcast_to(a, (2, 3, 3, 4))
else:
dummy = xp.empty((2, 3, 3, 4))
b, _ = xp.broadcast_arrays(a, dummy)
return b
@testing.for_all_dtypes()
def test_broadcast_to_fail_numpy19(self, dtype):
# Note that broadcast_to is only supported on numpy>=1.10
a = testing.shaped_arange((3, 1, 4), cupy, dtype)
with self.assertRaises(ValueError):
cupy.broadcast_to(a, (1, 3, 4))
@testing.for_all_dtypes()
def test_broadcast_to_short_shape_numpy19(self, dtype):
# Note that broadcast_to is only supported on numpy>=1.10
a = testing.shaped_arange((1, 3, 4), cupy, dtype)
with self.assertRaises(ValueError):
cupy.broadcast_to(a, (3, 4))
@testing.numpy_cupy_array_equal()
def test_expand_dims0(self, xp):
a = testing.shaped_arange((2, 3), xp)
return xp.expand_dims(a, 0)
@testing.numpy_cupy_array_equal()
def test_expand_dims1(self, xp):
a = testing.shaped_arange((2, 3), xp)
return xp.expand_dims(a, 1)
@testing.numpy_cupy_array_equal()
def test_expand_dims2(self, xp):
a = testing.shaped_arange((2, 3), xp)
return xp.expand_dims(a, 2)
@testing.numpy_cupy_array_equal()
def test_expand_dims_negative1(self, xp):
a = testing.shaped_arange((2, 3), xp)
return xp.expand_dims(a, -2)
@testing.numpy_cupy_array_equal()
def test_expand_dims_negative2(self, xp):
a = testing.shaped_arange((2, 3), xp)
return xp.expand_dims(a, -4)
@testing.numpy_cupy_array_equal()
def test_squeeze1(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return a.squeeze()
@testing.numpy_cupy_array_equal()
def test_squeeze2(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return a.squeeze()
@testing.numpy_cupy_array_equal()
def test_squeeze_int_axis1(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return a.squeeze(axis=2)
@testing.numpy_cupy_array_equal()
def test_squeeze_int_axis2(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return a.squeeze(axis=-3)
@testing.with_requires('numpy>=1.13')
@testing.numpy_cupy_raises()
def test_squeeze_int_axis_failure1(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
a.squeeze(axis=-9)
def test_squeeze_int_axis_failure2(self):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), cupy)
with self.assertRaises(cupy.core.core._AxisError):
a.squeeze(axis=-9)
@testing.numpy_cupy_array_equal()
def test_squeeze_tuple_axis1(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return a.squeeze(axis=(2, 4))
@testing.numpy_cupy_array_equal()
def test_squeeze_tuple_axis2(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return a.squeeze(axis=(-4, -3))
@testing.numpy_cupy_array_equal()
def test_squeeze_tuple_axis3(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return a.squeeze(axis=(4, 2))
@testing.numpy_cupy_array_equal()
def test_squeeze_tuple_axis4(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return a.squeeze(axis=())
@testing.with_requires('numpy>=1.13')
@testing.numpy_cupy_raises()
def test_squeeze_tuple_axis_failure1(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
a.squeeze(axis=(-9,))
@testing.numpy_cupy_raises()
def test_squeeze_tuple_axis_failure2(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
a.squeeze(axis=(2, 2))
def test_squeeze_tuple_axis_failure3(self):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), cupy)
with self.assertRaises(cupy.core.core._AxisError):
a.squeeze(axis=(-9,))
@testing.numpy_cupy_array_equal()
def test_squeeze_scalar1(self, xp):
a = testing.shaped_arange((), xp)
return a.squeeze(axis=0)
@testing.numpy_cupy_array_equal()
def test_squeeze_scalar2(self, xp):
a = testing.shaped_arange((), xp)
return a.squeeze(axis=-1)
@testing.with_requires('numpy>=1.13')
@testing.numpy_cupy_raises()
def test_squeeze_scalar_failure1(self, xp):
a = testing.shaped_arange((), xp)
a.squeeze(axis=-2)
@testing.with_requires('numpy>=1.13')
@testing.numpy_cupy_raises()
def test_squeeze_scalar_failure2(self, xp):
a = testing.shaped_arange((), xp)
a.squeeze(axis=1)
def test_squeeze_scalar_failure3(self):
a = testing.shaped_arange((), cupy)
with self.assertRaises(cupy.core.core._AxisError):
a.squeeze(axis=-2)
def test_squeeze_scalar_failure4(self):
a = testing.shaped_arange((), cupy)
with self.assertRaises(cupy.core.core._AxisError):
a.squeeze(axis=1)
@testing.numpy_cupy_raises()
def test_squeeze_failure(self, xp):
a = testing.shaped_arange((2, 1, 3, 4), xp)
a.squeeze(axis=2)
@testing.numpy_cupy_array_equal()
def test_external_squeeze(self, xp):
a = testing.shaped_arange((1, 2, 1, 3, 1, 1, 4, 1), xp)
return xp.squeeze(a)
@testing.parameterize(
{'shapes': [(), ()]},
{'shapes': [(0,), (0,)]},
{'shapes': [(1,), (1,)]},
{'shapes': [(2,), (2,)]},
{'shapes': [(0,), (1,)]},
{'shapes': [(2, 3), (1, 3)]},
{'shapes': [(2, 1, 3, 4), (3, 1, 4)]},
{'shapes': [(4, 3, 2, 3), (2, 3)]},
{'shapes': [(2, 0, 1, 1, 3), (2, 1, 0, 0, 3)]},
{'shapes': [(0, 1, 1, 3), (2, 1, 0, 0, 3)]},
{'shapes': [(0, 1, 1, 0, 3), (5, 2, 0, 1, 0, 0, 3), (2, 1, 0, 0, 0, 3)]},
)
@testing.gpu
class TestBroadcast(unittest.TestCase):
# TODO(niboshi): Run test of xp.broadcast_arrays in this class
def _broadcast(self, xp, shapes):
arrays = [
testing.shaped_arange(s, xp, xp.float32) for s in shapes]
return xp.broadcast(*arrays)
def test_broadcast(self):
broadcast_np = self._broadcast(numpy, self.shapes)
broadcast_cp = self._broadcast(cupy, self.shapes)
self.assertEqual(broadcast_np.shape, broadcast_cp.shape)
self.assertEqual(broadcast_np.size, broadcast_cp.size)
self.assertEqual(broadcast_np.nd, broadcast_cp.nd)
@testing.parameterize(
{'shapes': [(3,), (2,)]},
{'shapes': [(3, 2), (2, 3,)]},
{'shapes': [(3, 2), (3, 4,)]},
{'shapes': [(0,), (2,)]},
)
@testing.gpu
class TestInvalidBroadcast(unittest.TestCase):
# TODO(niboshi): Run test of xp.broadcast_arrays in this class
@testing.numpy_cupy_raises()
def test_invalid_broadcast(self, xp):
arrays = [
testing.shaped_arange(s, xp, xp.float32) for s in self.shapes]
xp.broadcast(*arrays)
|
import os
## coverted from model.eval.genotyped.sh
def modelEvalCVGenotyped (path, pheno, model, snpList, genotypeFle):
phenoFile = path + "/pheno_data/pheno_" + str(pheno) + ".txt"
covarFile = path + "/pheno_data/covar_" + str(pheno) + ".txt"
outFile = path + "/association_cv/chr0." + str(pheno)
extractOrNot = os.path.isfile(snpList)
extractFile = path + str(snpList)
cmd = ""
if (extractOrNot == False):
print ("NO selected SNP!\n")
if (model == "liner"):
print ("Using liner model for genotpye " + str (pheno) + "\n")
print ("1")
cmd = "plink --bfile " + genotypeFle + \
" --linear --vif 1000 --maf 0.000001 --pheno " + phenoFile + \
" --covar " + covarFile + \
" --hide-covar --silent --noweb --out " + outFile
else:
## logistic
print ("Using logistic model for genotpye " + str (pheno) + "\n")
print ("2")
cmd = "plink --bfile " + genotypeFle + \
" --logistic --vif 1000 --maf 0.000001 --1 --ci .95 --pheno " + phenoFile + \
" --covar " + covarFile + \
" --hide-covar --silent --noweb --out " + outFile
else:
print ("Here are the selected SNP!\n")
if (model == "liner"):
print ("Using liner model for genotpye " + str (pheno) + "\n")
print ("3")
cmd = "plink --bfile " + genotypeFle + \
" --extract " + extractFile + \
" --linear --vif 1000 --maf 0.000001 --pheno " + phenoFile + \
" --covar " + covarFile + \
" --hide-covar --silent --noweb --out " + outFile
else:
## logistic
print ("Using logistic model for genotpye " + str (pheno) + "\n")
print ("4")
cmd = "plink --bfile " + genotypeFle + \
" --extract " + extractFile +\
" --logistic --vif 1000 --maf 0.000001 --1 --ci .95 --pheno " + phenoFile + \
" --covar " + covarFile + \
" --hide-covar --silent --noweb --out " + outFile
return(cmd)
## coverted from model.eval.genotyped.sh
def modelEvalCVImputed (path, pheno, model, snpList, genotypeFle ="/ddn/gs1/home/li11/local/accord/data/geno_data/post_qc.unc.uva.merged"):
print ("hello world")
phenoFile = path + "/pheno_data/pheno_" + str(pheno) + ".txt"
covarFile = path + "/pheno_data/covar_" + str(pheno) + ".txt"
outFile = path + "/association_cv/chr0." + str(pheno)
extractOrNot = os.path.isfile(snpList)
extractFile = path + str(snpList)
cmd = ""
# phenos = [line.strip() for line in open(path+'/phenotypes.txt', 'r')]
# models = [line.strip() for line in open(path+'/modeltypes.txt', 'r')]
|
import time
import sys
from scapy.all import *
from scapy.all import *
from scapy.all import send
from scapy.layers.inet import *
import scapy
srcIP = "192.168.0.107"
destIP = "192.168.0.105"
IPLayer = IP(dst=destIP, src=srcIP)
for i in range(1,100):
TCPLayer = TCP(seq=i, dport=135, sport=135)
spoofpkt = IPLayer/TCPLayer
send(spoofpkt, verbose=2)
print("Spoofed Packet Sent...")
|
from .managers import CoreManager
from .models import AbstractAudit, AbstractChoice
__all__ = ['CoreManager', 'AbstractAudit', 'AbstractChoice', ]
|
# Generated by Django 3.0.9 on 2020-10-23 15:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quotations', '0033_auto_20201020_1511'),
]
operations = [
migrations.AddField(
model_name='quotation',
name='discount',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='quotation',
name='total_price',
field=models.IntegerField(default=0),
),
]
|
from django.db import models
# Create your models here.
SOCIAL_CHOICES = (
('F', 'Facebook'),
('W', 'Whatsapp'),
('T', 'Twitter'),
('I', 'Instagram'),
('L', 'LinkedIn'),
('Y', 'Youtube'),
)
class SocialLink(models.Model):
site = models.CharField(choices=SOCIAL_CHOICES, max_length=1,)
link = models.URLField()
def __str__(self):
return dict(SOCIAL_CHOICES).get(self.site, self.link)
def save(self,*args, **kwargs):
social_link_qs = SocialLink.objects.filter(site=self.site)
if social_link_qs.exists():
self.id = social_link_qs.first().id
super(SocialLink, self).save(*args, **kwargs)
|
import ctypes
import numpy
from nidaqmx._lib import lib_importer, wrapped_ndpointer, c_bool32
from nidaqmx.constants import FillMode
from nidaqmx.errors import check_for_error
def _write_analog_f_64(
task_handle, write_array, num_samps_per_chan, auto_start, timeout,
data_layout=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteAnalogF64
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double, ctypes.c_int,
wrapped_ndpointer(dtype=numpy.float64, flags=('C')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout,
data_layout.value, write_array,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code, samps_per_chan_written=samps_per_chan_written.value)
return samps_per_chan_written.value
def _write_analog_scalar_f_64(task_handle, value, auto_start, timeout):
cfunc = lib_importer.windll.DAQmxWriteAnalogScalarF64
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, c_bool32, ctypes.c_double,
ctypes.c_double, ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, auto_start, timeout, value, None)
check_for_error(error_code)
def _write_binary_i_16(
task_handle, write_array, num_samps_per_chan, auto_start, timeout,
data_layout=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteBinaryI16
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double, ctypes.c_int,
wrapped_ndpointer(dtype=numpy.int16, flags=('C')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout,
data_layout.value, write_array,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code, samps_per_chan_written=samps_per_chan_written.value)
return samps_per_chan_written.value
def _write_binary_u_16(
task_handle, write_array, num_samps_per_chan, auto_start, timeout,
data_layout=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteBinaryU16
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double, ctypes.c_int,
wrapped_ndpointer(dtype=numpy.uint16, flags=('C')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout,
data_layout.value, write_array,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code, samps_per_chan_written=samps_per_chan_written.value)
return samps_per_chan_written.value
def _write_binary_i_32(
task_handle, write_array, num_samps_per_chan, auto_start, timeout,
data_layout=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteBinaryI32
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double, ctypes.c_int,
wrapped_ndpointer(dtype=numpy.int32, flags=('C')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout,
data_layout.value, write_array,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code, samps_per_chan_written=samps_per_chan_written.value)
return samps_per_chan_written.value
def _write_binary_u_32(
task_handle, write_array, num_samps_per_chan, auto_start, timeout,
data_layout=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteBinaryU32
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double, ctypes.c_int,
wrapped_ndpointer(dtype=numpy.uint32, flags=('C')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout,
data_layout.value, write_array,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code, samps_per_chan_written=samps_per_chan_written.value)
return samps_per_chan_written.value
def _write_digital_u_8(
task_handle, write_array, num_samps_per_chan, auto_start, timeout,
data_layout=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteDigitalU8
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double, ctypes.c_int,
wrapped_ndpointer(dtype=numpy.uint8, flags=('C')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout,
data_layout.value, write_array,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code, samps_per_chan_written=samps_per_chan_written.value)
return samps_per_chan_written.value
def _write_digital_u_16(
task_handle, write_array, num_samps_per_chan, auto_start, timeout,
data_layout=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteDigitalU16
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double, ctypes.c_int,
wrapped_ndpointer(dtype=numpy.uint16, flags=('C')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout,
data_layout.value, write_array,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code, samps_per_chan_written=samps_per_chan_written.value)
return samps_per_chan_written.value
def _write_digital_u_32(
task_handle, write_array, num_samps_per_chan, auto_start, timeout,
data_layout=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteDigitalU32
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double, ctypes.c_int,
wrapped_ndpointer(dtype=numpy.uint32, flags=('C')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout,
data_layout.value, write_array,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code, samps_per_chan_written=samps_per_chan_written.value)
return samps_per_chan_written.value
def _write_digital_scalar_u_32(task_handle, value, auto_start, timeout):
cfunc = lib_importer.windll.DAQmxWriteDigitalScalarU32
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, c_bool32, ctypes.c_double,
ctypes.c_uint, ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, auto_start, timeout, value, None)
check_for_error(error_code)
def _write_digital_lines(
task_handle, write_array, num_samps_per_chan, auto_start, timeout,
data_layout=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteDigitalLines
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double, ctypes.c_int,
wrapped_ndpointer(dtype=bool, flags=('C')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout,
data_layout.value, write_array,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code, samps_per_chan_written=samps_per_chan_written.value)
return samps_per_chan_written.value
def _write_ctr_freq(
task_handle, freq, duty_cycle, num_samps_per_chan, auto_start, timeout,
data_layout=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteCtrFreq
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double, ctypes.c_int,
wrapped_ndpointer(dtype=numpy.float64, flags=('C')),
wrapped_ndpointer(dtype=numpy.float64, flags=('C')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout,
data_layout.value, freq, duty_cycle,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code, samps_per_chan_written=samps_per_chan_written.value)
return samps_per_chan_written.value
def _write_ctr_freq_scalar(task_handle, freq, duty_cycle, auto_start, timeout):
cfunc = lib_importer.windll.DAQmxWriteCtrFreqScalar
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, c_bool32, ctypes.c_double,
ctypes.c_double, ctypes.c_double, ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, auto_start, timeout, freq, duty_cycle, None)
check_for_error(error_code)
def _write_ctr_time(
task_handle, high_time, low_time, num_samps_per_chan, auto_start,
timeout, data_layout=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteCtrTime
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double, ctypes.c_int,
wrapped_ndpointer(dtype=numpy.float64, flags=('C')),
wrapped_ndpointer(dtype=numpy.float64, flags=('C')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout,
data_layout.value, high_time, low_time,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code, samps_per_chan_written=samps_per_chan_written.value)
return samps_per_chan_written.value
def _write_ctr_time_scalar(
task_handle, high_time, low_time, auto_start, timeout):
cfunc = lib_importer.windll.DAQmxWriteCtrTimeScalar
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, c_bool32, ctypes.c_double,
ctypes.c_double, ctypes.c_double,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, auto_start, timeout, high_time,
low_time, None)
check_for_error(error_code)
def _write_ctr_ticks(
task_handle, high_tick, low_tick, num_samps_per_chan, auto_start,
timeout, data_layout=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteCtrTicks
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double, ctypes.c_int,
wrapped_ndpointer(dtype=numpy.uint32, flags=('C')),
wrapped_ndpointer(dtype=numpy.uint32, flags=('C')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout,
data_layout.value, high_tick, low_tick,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code, samps_per_chan_written=samps_per_chan_written.value)
return samps_per_chan_written.value
def _write_ctr_ticks_scalar(
task_handle, high_ticks, low_ticks, auto_start, timeout):
cfunc = lib_importer.windll.DAQmxWriteCtrTicksScalar
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, c_bool32, ctypes.c_double,
ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, auto_start, timeout, high_ticks, low_ticks, None)
check_for_error(error_code)
def _write_raw(
task_handle, num_samps_per_chan, numpy_array, auto_start, timeout):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteRaw
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double,
wrapped_ndpointer(dtype=numpy_array.dtype,
flags=('C')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout, numpy_array,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code, samps_per_chan_written=samps_per_chan_written.value)
return samps_per_chan_written.value
|
from run_experiments_transfer import test_robustness
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch
import numpy as np
from clustorch.kmeans import KMeans
from clustorch.spectral import SpectralClustering
from clustorch.hierarchical import Hierarchical
from experiments.device import opts
from experiments.run_fashion import shuffle_indexes, filter_by_label
DEVICE = opts.device
PATH = opts.path
PATH += "/pr2021/"
def split_train_val(X_train_val, Y_train_val, n_samples):
n, m, _ = X_train_val.shape
n_in_class = n_samples // 2
idxs = shuffle_indexes(torch.arange(n))
X_train, Y_train = X_train_val[:n_in_class], Y_train_val[:n_in_class]
X_train = torch.cat([X_train, X_train_val[n_samples : n_samples + n_in_class]])
Y_train = torch.cat([Y_train, Y_train_val[n_samples : n_samples + n_in_class]])
X_val, Y_val = (
X_train_val[n_in_class:n_samples],
Y_train_val[n_in_class:n_samples],
)
X_val = torch.cat([X_val, X_train_val[-n_in_class:]])
Y_val = torch.cat([Y_val, Y_train_val[-n_in_class:]])
return X_train, Y_train, X_val, Y_val
def main():
root = "./data/"
trans = transforms.Compose([transforms.ToTensor(),])
train_set = dset.FashionMNIST(root=root, train=True, transform=trans, download=True)
torch.manual_seed(4)
n_samples = 1600
dt_range = np.linspace(start=0.05, num=20, stop=1)
s_range = np.linspace(start=0.01, num=20, stop=0.6)
X_train_val, Y_train_val = filter_by_label(
x=train_set.data,
y=train_set.targets,
labels=[6, 9],
n_samples=n_samples,
device=DEVICE,
)
X, Y, X_transf, Y_transf = split_train_val(X_train_val, Y_train_val, n_samples)
k = len(Y.unique())
print(Y.sum(), Y_transf.sum())
models = [
KMeans(n_clusters=k),
SpectralClustering(
n_clusters=k, lmode="rw", similarity="gaussian_zp", assign_labels="kmeans",
),
Hierarchical(n_clusters=k),
]
test_robustness(
X,
Y,
X_transf,
Y_transf,
models=models,
dt_range=dt_range,
s_range=s_range,
lb=1 / 255,
mutation_rate=0.1,
path=PATH + "/TransferfashionMNIST/",
)
if __name__ == "__main__":
main()
|
import os
import sys
import unittest
from collections import OrderedDict
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import setup_malcolm_paths
import numpy as np
from malcolm.core.vmetas import NumberArrayMeta
class TestValidation(unittest.TestCase):
def test_numpy_array(self):
nm = NumberArrayMeta("float64")
values = np.array([1.2, 3.4, 5.6])
response = nm.validate(values)
for i, value in enumerate(response):
self.assertEqual(values[i], value)
def test_numpy_array_wrong_type_raises(self):
nm = NumberArrayMeta("float64")
values = "[1.2, 3.4, 5.6]"
with self.assertRaises(TypeError):
nm.validate(values)
def test_numpy_array_wrong_number_type_raises(self):
nm = NumberArrayMeta("int32")
values = np.array([1.2, 3.4, 5.6])
with self.assertRaises(TypeError):
nm.validate(values)
def test_float_against_float64(self):
nm = NumberArrayMeta("float64")
values = [1.2, 3.4, 5.6]
response = nm.validate(values)
for i, value in enumerate(response):
self.assertEqual(values[i], value)
def test_float_against_float32(self):
nm = NumberArrayMeta("float32")
values = [1.2, 3.4, 5.6]
response = nm.validate(values)
for i, value in enumerate(response):
self.assertAlmostEqual(values[i], response[i], places=5)
def test_int_against_float(self):
nm = NumberArrayMeta("float32")
values = [1, 2, 3]
response = nm.validate(values)
for i, value in enumerate(response):
self.assertEqual(values[i], value)
nm = NumberArrayMeta("float64")
values = [1, 2, 3]
response = nm.validate(values)
for i, value in enumerate(response):
self.assertEqual(values[i], value)
def test_int_against_int(self):
nm = NumberArrayMeta("int32")
values = [1, 2, 3]
response = nm.validate(values)
for i, value in enumerate(response):
self.assertEqual(values[i], value)
def test_float_against_int_raises(self):
nm = NumberArrayMeta("int32")
self.assertRaises(ValueError, nm.validate, [1.2, 34, 56])
def test_null_element_raises(self):
nm = NumberArrayMeta("float32")
self.assertRaises(ValueError, nm.validate, [1.2, None, 5.6])
def test_none_validates(self):
nm = NumberArrayMeta("int32")
self.assertIsNone(nm.validate(None))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
import os
def setup_working_directories(config_vars):
## Expected raw data directories:
config_vars["raw_images_dir"] = os.path.join(config_vars["root_directory"], 'raw_images/')
config_vars["raw_annotations_dir"] = os.path.join(config_vars["root_directory"], 'raw_annotations/')
## Split files
config_vars["path_files_training"] = os.path.join(config_vars["root_directory"], 'training.txt')
config_vars["path_files_validation"] = os.path.join(config_vars["root_directory"], 'validation.txt')
config_vars["path_files_test"] = os.path.join(config_vars["root_directory"], 'test.txt')
## Transformed data directories:
config_vars["normalized_images_dir"] = os.path.join(config_vars["root_directory"], 'norm_images/')
config_vars["boundary_labels_dir"] = os.path.join(config_vars["root_directory"], 'boundary_labels/')
return config_vars
|
import pytest
from wemake_python_styleguide.violations.oop import (
UnpythonicGetterSetterViolation,
)
from wemake_python_styleguide.visitors.ast.classes import WrongClassBodyVisitor
module_getter_and_setter = """
attribute = 1
def get_attribute():
...
def set_attribute():
...
"""
static_getter_and_setter = """
attribute = 1
class Test(object):
@staticmethod
def get_attribute():
...
@staticmethod
def set_attribute():
...
"""
paired_getter_and_setter = """
class Test(object):
def get_attribute():
...
def set_attribute():
...
"""
property_getter_and_setter = """
class Test(object):
def __init__(self):
self.attribute = 1
@property
def attribute(self):
...
@attribute.setter
def attribute(self):
...
"""
dataclass_property_getter_setter = """
@dataclass
class DataClass(object):
attribute: int
@property
def attribute(self):
...
@attribute.setter
def attribute(self):
...
"""
dataclass_incorrect_property_getter_setter = """
@dataclass
class DataClass(object):
attribute: int
@property
def get_attribute(self):
...
@attribute.setter
def set_attribute(self):
...
"""
dataclass_getter_setter = """
@dataclass
class DataClass(object):
attribute: int
def get_attribute(self):
...
def set_attribute(self):
...
"""
child_getter_and_setter = """
class TestParent(object):
def __init__(self):
self.attribute = 1
class TestChild(TestParent):
def get_attribute(self):
...
def set_attribute(self):
...
"""
nested_getter_and_setter = """
class Template(object):
def __init__(self):
self.attribute = 1
def some_function(self):
def get_attribute(self):
...
def set_attribute(self):
...
get_attribute(self)
"""
class_getter_and_setter_attributes = """
class Test(object):
attribute = 1
get_attribute = 1
set_attribute = 1
"""
instance_getter_and_setter_attributes = """
class Test(object):
def __init__(self):
self.attribute = 1
self.get_attribute = 1
self.set_attribute = 1
"""
other_getter_and_setter = """
class Test(object):
def __init__(self, other):
other.attr = self.some()
def get_attr(self):
return something.unrelated()
"""
instance_attribute_template = """
class Template(object):
def __init__(self):
self.{0}{1}{2}
{3}
def {4}(self):
...
"""
class_attribute_template = """
class Template(object):
{0}{1}{2}
{3}
def {4}:
...
"""
class_mixed = """
class Test(object):
first: int
second = 2
third: int = 3
def __init__(self):
self.{0}{1} = 5
def get_{2}(self):
...
def set_{3}(self):
...
"""
@pytest.mark.parametrize('code', [
module_getter_and_setter,
nested_getter_and_setter,
property_getter_and_setter,
class_getter_and_setter_attributes,
instance_getter_and_setter_attributes,
dataclass_property_getter_setter,
other_getter_and_setter,
])
def test_valid_getter_and_setter(
assert_errors,
parse_ast_tree,
default_options,
code,
mode,
):
"""Testing that correct usage of getter/setter is allowed."""
tree = parse_ast_tree(mode(code))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
dataclass_getter_setter,
dataclass_incorrect_property_getter_setter,
static_getter_and_setter,
child_getter_and_setter,
paired_getter_and_setter,
])
def test_invalid_getter_and_setter(
assert_errors,
parse_ast_tree,
default_options,
code,
mode,
):
"""Testing that wrong use of getter/setter is prohibited."""
tree = parse_ast_tree(mode(code))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [
UnpythonicGetterSetterViolation,
UnpythonicGetterSetterViolation,
])
@pytest.mark.parametrize('access', [''])
@pytest.mark.parametrize('assignment', [' = 1'])
@pytest.mark.parametrize(('attribute_name', 'annotation', 'method_name'), [
('attribute', '', 'get_attribute_some'),
('attribute', '', 'some_get_attribute'),
('attribute', '', 'get_some_attribute'),
('attribute', '', 'attribute_get'),
('some_attribute', '', 'get_attribute'),
('attribute_some', '', 'get_attribute'),
])
def test_nonmatching_instance(
assert_errors,
parse_ast_tree,
default_options,
access,
assignment,
attribute_name,
annotation,
method_name,
mode,
):
"""Testing that non matching attribute and getter/setter is allowed."""
test_instance = instance_attribute_template.format(
access, attribute_name, assignment, annotation, method_name,
)
tree = parse_ast_tree(mode(test_instance))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('access', ['', '_', '__'])
@pytest.mark.parametrize('assignment', [
' = 1',
': int = 1',
' = self.other = 1',
', self.other = 1, 2',
])
@pytest.mark.parametrize(('attribute_name', 'annotation', 'method_name'), [
('attribute', '', 'get_attribute'),
('attribute', '', 'set_attribute'),
('attribute_some', '', 'get_attribute_some'),
('some_attribute', '', 'set_some_attribute'),
('attribute', '@classmethod', 'get_attribute'),
('attribute', '@classmethod', 'set_attribute'),
('attribute', '@staticmethod', 'get_attribute'),
('attribute', '@staticmethod', 'set_attribute'),
('attribute', '@property', 'get_attribute'),
('attribute', '@attribute.setter', 'set_attribute'),
])
def test_instance_getter_setter(
assert_errors,
parse_ast_tree,
default_options,
access,
assignment,
attribute_name,
annotation,
method_name,
mode,
):
"""Testing that instance attribute and getter/setter is prohibited."""
test_instance = instance_attribute_template.format(
access, attribute_name, assignment, annotation, method_name,
)
tree = parse_ast_tree(mode(test_instance))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [UnpythonicGetterSetterViolation])
@pytest.mark.parametrize('access', [''])
@pytest.mark.parametrize('assignment', [' = 1'])
@pytest.mark.parametrize(('attribute_name', 'annotation', 'method_name'), [
('attribute', '@classmethod', 'get_attribute_some(self)'),
('attribute', '@classmethod', 'some_get_attribute(self)'),
('attribute', '@classmethod', 'get_some_attribute(self)'),
('attribute', '@classmethod', 'attribute_get(self)'),
('some_attribute', '@classmethod', 'get_attribute(self)'),
('attribute_some', '@classmethod', 'get_attribute(self)'),
])
def test_nonmatching_class(
assert_errors,
parse_ast_tree,
default_options,
access,
attribute_name,
annotation,
method_name,
assignment,
mode,
):
"""Testing that non matching attribute and getter/setter is allowed."""
test_instance = class_attribute_template.format(
access, attribute_name, assignment, annotation, method_name,
)
tree = parse_ast_tree(mode(test_instance))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('access', ['', '_', '__'])
@pytest.mark.parametrize('assignment', [
' = 1',
': int = 1',
': int',
' = other = 1',
', other = 1, 2',
])
@pytest.mark.parametrize(('attribute_name', 'annotation', 'method_name'), [
('attribute', '@classmethod', 'get_attribute(cls)'),
('attribute', '@classmethod', 'set_attribute(cls)'),
('attribute_some', '@classmethod', 'get_attribute_some(self)'),
('some_attribute', '@classmethod', 'set_some_attribute(self)'),
('attribute', '', 'get_attribute(cls)'),
('attribute', '', 'set_attribute(cls)'),
('attribute', '@staticmethod', 'get_attribute(cls)'),
('attribute', '@staticmethod', 'set_attribute(cls)'),
])
def test_class_attributes_getter_setter(
assert_errors,
parse_ast_tree,
default_options,
attribute_name,
access,
annotation,
method_name,
assignment,
mode,
):
"""Testing that using getter/setters with class attributes is prohibited."""
test_instance = class_attribute_template.format(
access, attribute_name, assignment, annotation, method_name,
)
tree = parse_ast_tree(mode(test_instance))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [UnpythonicGetterSetterViolation])
@pytest.mark.parametrize('access', ['', '_', '__'])
@pytest.mark.parametrize(('first', 'second', 'third'), [
('attribute', 'some', 'other'),
('attribute', 'some', 'another'),
])
def test_class_mixed(
assert_errors,
parse_ast_tree,
default_options,
access,
first,
second,
third,
mode,
):
"""Testing correct use of methods with get/set in name."""
test_instance = class_mixed.format(access, first, second, third)
tree = parse_ast_tree(mode(test_instance))
visitor = WrongClassBodyVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
|
# Copyright 2022 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from os import path, mkdir
import sys
import os
import numpy as np
import nnabla as nn
from nnabla.utils.image_utils import imsave
from nnabla.ext_utils import get_extension_context
import args
from utils import utils
sys.path.append(os.path.abspath('../../image-generation/pggan/'))
from functions import pixel_wise_feature_vector_normalization
from helpers import load_gen
def convert_images_to_uint8(images, drange=[-1, 1]):
"""
convert float32 -> uint8
"""
if isinstance(images, nn.Variable):
images = images.d
if isinstance(images, nn.NdArray):
images = images.data
scale = 255 / (drange[1] - drange[0])
images = images * scale + (0.5 - drange[0] * scale)
return np.uint8(np.clip(images, 0, 255))
def generate_images(gen, num_images, n_latent=512,
hyper_sphere=True, save_dir=None, latent_vector=None):
"""
generate the images
Args:
gen : load generator
num_images (int) : number of images to generate
n_latent (int) : 512-D latent space trained on the CelebA
hyper_sphere (bool) : default True
save_dir (str) : directory to save the images
latent_vector (str) : path to save the latent vectors(.pkl file)
"""
if not path.isdir(save_dir):
mkdir(save_dir)
z_data = np.random.randn(num_images, n_latent, 1, 1)
# Saving latent vectors
with open(latent_vector, 'wb+') as f:
pickle.dump(z_data.reshape((num_images, n_latent)), f)
z = nn.Variable.from_numpy_array(z_data)
z = pixel_wise_feature_vector_normalization(z) if hyper_sphere else z
batch_size = 64
iterations = int(num_images/batch_size)
if num_images % batch_size != 0:
iterations += 1
count = 0
for ell in range(iterations):
y = gen(z[ell * batch_size:(ell + 1) * batch_size], test=True)
images = convert_images_to_uint8(y, drange=[-1, 1])
for i in range(images.shape[0]):
imsave(save_dir+'/gen_'+str(count)+'.jpg',
images[i], channel_first=True)
count += 1
print("images are generated")
def generate_flipped_images(gen, latent_vector, hyper_sphere=True, save_dir=None):
"""
generate flipped images
Args:
gen : generator
latent_vector(numpy.ndarray) : latent_vector
hyper_sphere (bool) : default True
save_dir (str) : directory to save the images
"""
if not path.isdir(save_dir):
mkdir(save_dir)
z_data = np.reshape(
latent_vector, (latent_vector.shape[0], latent_vector.shape[1], 1, 1))
z = nn.Variable.from_numpy_array(z_data)
z = pixel_wise_feature_vector_normalization(z) if hyper_sphere else z
batch_size = 64 # we have taken batch size of 64
num_images = latent_vector.shape[0]
iterations = int(num_images / batch_size)
if num_images % batch_size != 0:
iterations += 1
count = 0
for ell in range(iterations):
y = gen(z[ell * batch_size:(ell + 1) * batch_size], test=True)
images = convert_images_to_uint8(y, drange=[-1, 1])
for i in range(images.shape[0]):
imsave(save_dir + '/gen_' + str(count) +
'.jpg', images[i], channel_first=True)
count += 1
print("all paired images generated")
if __name__ == "__main__":
# args
opt = args.get_args()
experiment = opt['generate']
num_images = opt['num_images']
attr_list = utils.get_all_attr()
# Context
ctx = get_extension_context(
opt['context'], device_id=opt['device_id'], type_config=opt['type_config'])
nn.set_default_context(ctx)
nn.set_auto_forward(True)
# Generate config
model_load_path = opt['generator_model']
use_bn = False
last_act = 'tanh'
use_wscale = True
use_he_backward = False
# Load generator
gen = load_gen(model_load_path, use_bn=use_bn, last_act=last_act,
use_wscale=use_wscale, use_he_backward=use_he_backward)
if experiment == 'orig':
save_dir = "{}/AllGenImages".format(opt["fake_data_dir"])
latent_vector = r"{}/latent_vectors.pkl".format(
opt['record_latent_vector'])
generate_images(gen, num_images,
save_dir=save_dir, latent_vector=latent_vector)
if experiment == 'flip':
save_dir = "{}/{}/".format(
opt["fake_data_dir"], attr_list[opt['attribute']])
latent = pickle.load(open(r"{}/latent_vectors_{}.pkl".format(
opt['record_latent_vector'], attr_list[opt['attribute']]), 'rb'))
generate_flipped_images(gen, latent, save_dir=save_dir)
|
from setuptools import setup, find_packages
setup(
name = "Boids",
version = "6.6.2",
description = "Simulation of Boids",
author = "Leo Carlos-Sandberg"
url = "https://github.com/lcarlossandberg/Bad_Boids",
license = "MIT License"
packages = find_packages(exclude=['*test']),
scripts = ['scripts/Boids'],
install_requires = ['argparse','numpy', 'matplotlib']
)
|
#!/usr/bin/env python
# Written by: DGC
# python imports
# local imports
# done before any other imports in case of errors in them
import Error
Error.set_exception_handler()
# next start logging.
import Log
Log.start_logging()
# now set up localisation
import Localisation
localisation = Localisation.Localiser()
import UserText
UserText.USER_TEXT = UserText.UserText(localisation.language())
import ToolTips
ToolTips.TOOL_TIP = ToolTips.ToolTip(localisation.language())
del localisation
import HiddenImports
#==============================================================================
if (__name__ == "__main__"):
pass
|
from unittest.mock import MagicMock, patch
from django.test import TestCase
from monitor.decorators import hub_signature_required
class TestHubSignatureRequired(TestCase):
def fn_to_test(self, request):
return request.body
def setUp(self):
self.decorator = hub_signature_required
self.signature = 'sha=signature'
self.request = MagicMock()
self.request.body = 'request test body'
self.request.META = {
'HTTP_X_HUB_SIGNATURE': self.signature
}
@patch('monitor.utils.hub_signature_verify')
def test_passes(self, hub_signature_verify):
hub_signature_verify.return_value = True
result = self.decorator(self.fn_to_test)(self.request)
hub_signature_verify.assert_called_with(
self.request.body, self.signature
)
self.assertEqual(result, self.request.body)
@patch('monitor.utils.hub_signature_verify')
def test_fails(self, hub_signature_verify):
hub_signature_verify.return_value = False
result = self.decorator(self.fn_to_test)(self.request)
hub_signature_verify.assert_called_with(
self.request.body, self.signature
)
self.assertEqual(result.status_code, 403)
|
import functools
import sys
import types
from nose import SkipTest
from nose.tools import eq_
from .. import helper
from ..helper import MockXPI
from appvalidator.constants import SPIDERMONKEY_INSTALLATION
from appvalidator.errorbundle import ErrorBundle
from appvalidator.errorbundle.outputhandlers.shellcolors import OutputHandler
import appvalidator
import appvalidator.testcases.content
appvalidator.testcases.javascript.traverser.JS_DEBUG = True
appvalidator.testcases.javascript.predefinedentities.enable_debug()
def uses_js(func):
if func:
try:
setattr(func, "js", True)
except Exception:
# If Python >2.7 squaks about methods being bound, just work around
# the nonsense.
setattr(func.__func__, "js", True)
return func
def skip_on_acorn(func):
"""Skips a test when the test is run under Acorn."""
if not SPIDERMONKEY_INSTALLATION:
raise SkipTest()
return func
class TestCase(helper.TestCase):
"""A TestCase object with specialized functions for JS testing."""
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
for method in filter(callable, (getattr(self, m) for m in dir(self))):
if not method.__name__.startswith("test_"):
continue
uses_js(method)
uses_js(None)
def setUp(self):
self.file_path = "foo.js"
self.final_context = None
super(TestCase, self).setUp()
def run_script_from_file(self, path):
"""
Run the standard set of JS engine tests on a script found at the
location in `path`.
"""
with open(path) as script_file:
return self.run_script(script_file.read())
def run_script(self, script):
"""
Run the standard set of JS engine tests on the script passed via
`script`.
"""
print "Running", script
if self.err is None:
self.setup_err()
appvalidator.testcases.content._process_file(self.err, MockXPI(),
self.file_path, script)
if self.err.final_context is not None:
print self.err.final_context.output()
self.final_context = self.err.final_context
def get_var(self, name):
"""
Return the value of a variable from the final script context.
"""
try:
return self.final_context.data[name].get_literal_value()
except KeyError:
raise ("Test seeking variable (%s) not found in final context." %
name)
def assert_var_eq(self, name, value, explanation=None):
"""
Assert that the value of a variable from the final script context
contains the value specified.
"""
print "Testing {var} == {val}".format(var=name, val=value)
val = self.get_var(name)
if isinstance(val, float):
val *= 100000
val = round(val)
val /= 100000
eq_(val, value,
explanation or "%r doesn't equal %r" % (val, value))
def must_assert(func):
"Decorator for asserting that a JS assert method is used."
@functools.wraps(func)
def wrap(self):
func(self)
assert getattr(self.err, "asserts", False), "Does not assert!"
return wrap
def silent(func):
"Decorator for asserting that the output of a test is silent."
@functools.wraps(func)
def wrap(self):
func(self)
self.assert_silent()
return wrap
def warnings(count=None):
"Decorator for asserting that the output of a test has warnings."
def decorator(func):
@functools.wraps(func)
def wrap(self):
func(self)
self.assert_failed(with_warnings=True)
if count is not None:
eq_(len(self.err.warnings), count,
"Warning count does not match")
return wrap
return decorator
def errors(count=None):
"Decorator for asserting that the output of a test has errors."
def decorator(func):
@functools.wraps(func)
def wrap(self):
func(self)
self.assert_failed(with_errors=True)
if count is not None:
eq_(len(self.err.errors), count,
"Warning count does not match")
return wrap
return decorator
|
import datetime
from abstractblock import AbstractBlock
from textdatav1 import TextDataV1
class GenesisBlock(AbstractBlock):
@property
def id(self):
return self._index
@property
def timestamp(self):
return self._timestamp
@property
def data(self):
return self._data
@property
def previous_hash(self):
return self._previous_hash
@property
def hash(self):
return self._hash
def __init__(self):
genesis_index = 0
self._index = genesis_index
self._previous_hash = genesis_index
self._timestamp = datetime.datetime.utcnow()
self._data = TextDataV1("Genesis Block")
self._hash = self._hash()
|
from django import forms
from .models import (
Book,
Category,
Shelf,
Author
)
class BookCreationAddForm(forms.ModelForm):
class Meta:
model = Book
fields = ('name', 'author', 'category',
'amount', 'price', 'image', 'shelf', )
class CategoryCreationForm(forms.ModelForm):
class Meta:
model = Category
fields = ('name',)
class CategoryUpdateForm(forms.ModelForm):
name = forms.CharField(max_length=120, label="Category Name", widget=forms.TextInput(
attrs={'placeholder': 'Enter new category name'}))
class Meta:
model = Shelf
fields = ('name',)
class ShelfCreationForm(forms.ModelForm):
class Meta:
model = Shelf
fields = ('name',)
class ShelfUpdateForm(forms.ModelForm):
name = forms.CharField(max_length=120, label="Shelf Name", widget=forms.TextInput(
attrs={'placeholder': 'Enter new shelf name'}))
class Meta:
model = Shelf
fields = ('name', 'active')
class AuthorCreationForm(forms.ModelForm):
class Meta:
model = Author
fields = ('first_name', 'last_name', 'born', 'died', 'image', )
|
_base_ = ['./segmentation_static.py', '../_base_/backends/onnxruntime.py']
onnx_config = dict(input_shape=[2048, 1024])
|
from django.db import models
# Create your models here.
class Grocery(models.Model):
ItemName = models.CharField(max_length=100)
ItemQuantity = models.CharField(max_length=50)
ItemStatus = models.CharField(max_length=15)
Date = models.DateField()
UserId = models.IntegerField(default=5)
def __str__(self):
return self.ItemName
|
def aumentar(n=0):
return n * 1.5
def diminuir(n=0):
return n * 0.5
def dobro(n=0):
return n * 2
def metade(n=0):
return n / 2
def moeda(n=0):
return f'R${n:.2f}'.replace('.', ',')
|
from __future__ import print_function, absolute_import
import torch
import torch.nn.functional as F
from torch import nn, autograd
from torch.autograd import Variable, Function
import numpy as np
import math
torch.autograd.set_detect_anomaly(True)
class ExemplarMemory(Function):
def __init__(self, em, alpha=0.01):
super(ExemplarMemory, self).__init__()
self.em = em
self.alpha = alpha
def forwarding(self, inputs, targets):
self.save_for_backward(inputs, targets)
outputs = inputs.mm(self.em.t())
return outputs
def backward(self, grad_outputs):
inputs, targets = self.saved_tensors
grad_inputs = None
if self.needs_input_grad[0]:
grad_inputs = grad_outputs.mm(self.em)
for x, y in zip(inputs, targets):
self.em[y] = self.alpha * self.em[y] + (1.0 - self.alpha) * x
self.em[y] /= self.em[y].norm()
return grad_inputs, None
class CAPMemory(nn.Module):
def __init__(self, beta=0.05, alpha=0.01, all_img_cams='', crosscam_epoch=5, bg_knn=50):
super(CAPMemory, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.alpha = alpha # Memory update rate
self.beta = beta # Temperature factor
self.all_img_cams = torch.tensor(all_img_cams).to(torch.device('cuda'))
self.unique_cams = torch.unique(self.all_img_cams)
self.all_pseudo_label = ''
self.crosscam_epoch = crosscam_epoch
self.bg_knn = bg_knn
def forwarding(self, features, targets, cams=None, epoch=None, all_pseudo_label='',
batch_ind=-1, init_intra_id_feat=''):
loss = torch.tensor([0.]).to(device='cuda')
self.all_pseudo_label = all_pseudo_label
self.init_intra_id_feat = init_intra_id_feat
loss = self.loss_using_pseudo_percam_proxy(features, targets, cams, batch_ind, epoch)
return loss
def loss_using_pseudo_percam_proxy(self, features, targets, cams, batch_ind, epoch):
if batch_ind == 0:
# initialize proxy memory
self.percam_memory = []
self.memory_class_mapper = []
self.concate_intra_class = []
for cc in self.unique_cams:
percam_ind = torch.nonzero(self.all_img_cams == cc).squeeze(-1)
uniq_class = torch.unique(self.all_pseudo_label[percam_ind])
uniq_class = uniq_class[uniq_class >= 0]
self.concate_intra_class.append(uniq_class)
cls_mapper = {int(uniq_class[j]): j for j in range(len(uniq_class))}
self.memory_class_mapper.append(cls_mapper) # from pseudo label to index under each camera
if len(self.init_intra_id_feat) > 0:
# print('initializing ID memory from updated embedding features...')
proto_memory = self.init_intra_id_feat[cc]
proto_memory = proto_memory.to(torch.device('cuda'))
self.percam_memory.append(proto_memory.detach())
self.concate_intra_class = torch.cat(self.concate_intra_class)
if epoch >= self.crosscam_epoch:
percam_tempV = []
for ii in self.unique_cams:
percam_tempV.append(self.percam_memory[ii].detach().clone())
percam_tempV = torch.cat(percam_tempV, dim=0).to(torch.device('cuda'))
loss = torch.tensor([0.]).to(self.device)
for cc in torch.unique(cams):
inds = torch.nonzero(cams == cc).squeeze(-1)
percam_targets = self.all_pseudo_label[targets[inds]]
percam_feat = features[inds]
# intra-camera loss
mapped_targets = [self.memory_class_mapper[cc][int(k)] for k in percam_targets]
mapped_targets = torch.tensor(mapped_targets).to(torch.device('cuda'))
percam_inputs = ExemplarMemory(self.percam_memory[cc], alpha=self.alpha).forwarding(percam_feat, mapped_targets)
percam_inputs /= self.beta # similarity score before softmax
loss += F.cross_entropy(percam_inputs, mapped_targets)
# global loss
if epoch >= self.crosscam_epoch:
associate_loss = 0
target_inputs = percam_feat.mm(percam_tempV.t().clone())
temp_sims = target_inputs.detach().clone()
target_inputs /= self.beta
for k in range(len(percam_feat)):
ori_asso_ind = torch.nonzero(self.concate_intra_class == percam_targets[k]).squeeze(-1)
temp_sims[k, ori_asso_ind] = -10000.0 # mask out positive
sel_ind = torch.sort(temp_sims[k])[1][-self.bg_knn:]
concated_input = torch.cat((target_inputs[k, ori_asso_ind], target_inputs[k, sel_ind]), dim=0)
concated_target = torch.zeros((len(concated_input)), dtype=concated_input.dtype).to(torch.device('cuda'))
concated_target[0:len(ori_asso_ind)] = 1.0 / len(ori_asso_ind)
associate_loss += -1 * (F.log_softmax(concated_input.unsqueeze(0), dim=1) * concated_target.unsqueeze(0)).sum()
loss += 0.5 * associate_loss / len(percam_feat)
return loss
|
from random import choice
import numpy as np
from constants import *
from tile import Tile, TileType
class Board(object):
"""
Board class.
"""
def __init__(self, row, column, board_setup=None):
"""
Board class construct.
:param row: # of rows in the board
:param column: # of columns in the board
:param board_setup: initial board setup to use when initialising the state of the board. Default is None.
"""
self.board: list = []
self.board_row: int = row
self.board_column: int = column
self._initialise_board(board_setup)
@property
def board(self):
return self._board
@board.setter
def board(self, value):
if not isinstance(value, list):
raise TypeError(f"Incorrect variable type assigned to board: {value}")
self._board = value
@property
def board_row(self):
return self._board_row
@board_row.setter
def board_row(self, value):
if not isinstance(value, int):
raise TypeError(f"Incorrect variable type assigned to board_row: {value}")
if value <= 3:
raise ValueError(f"Row must be greater than 3: {value}")
self._board_row = value
@property
def board_column(self):
return self._board_column
@board_column.setter
def board_column(self, value):
if not isinstance(value, int):
raise TypeError(f"Incorrect variable type assigned to board_row: {value}")
if value <= 3:
raise ValueError(f"Row must be greater than 3: {value}")
self._board_column = value
def _initialise_board(self, board_setup):
"""
Initialise the board with non-empty Tile objects
:param board_setup: 2D-array with prescribed non-empty Tile objects i.e. tile_type is at least one. If None,
the board will instead be randomised with non-empty Tile objects.
:return:
"""
if board_setup is None:
nonempty_types = [i for i in TileType if i != TileType.EMPTY]
for i in range(self._board_row):
self._board.append([])
for j in range(self._board_column):
tile = Tile(choice(nonempty_types))
self._board[i].append(tile)
else:
if len(board_setup) != self._board_row:
raise ValueError(f"INITIALISATION ERROR: row dimensions do not match")
for i in range(self._board_row):
if self._board_column != len(board_setup[i]):
raise ValueError(f"INITIALISATION ERROR: column size of row {i} does not match column size "
f"parameter")
for j in range(self._board_column):
if not isinstance(board_setup[i][j], Tile):
raise TypeError(f"INITIALISATION ERROR: board position ({i}, {j}) "
f"is not an instance of class Tile.")
if board_setup[i][j].tile_type == TileType.EMPTY:
raise ValueError(f"INITIALISATION ERROR: tile_type of board position ({i}, {j}) is zero. "
f"Must be at least one.")
self._board = board_setup
def _get_tile_sprite(self, row_pos, col_pos):
return self._board[row_pos][col_pos]
def get_tile_type(self, row_pos, col_pos):
"""
Get the tile_type variable of a Tile class with board position (row_pos, col_pos)
:param row_pos: Row index of tile (NB: First row has index 0!)
:param col_pos: Column index of tile
:return: tile_type from Tile class at (row_pos, col_pos)
"""
return self._board[row_pos][col_pos].tile_type
def set_tile_type(self, row_pos, col_pos, new_tile_type):
"""
Set the tile_type variable of a Tile class with board position (row_pos, col_pos). Load the appropriate tile
texture afterwards.
:param row_pos: Row index of tile (NB: First row has index 0!)
:param col_pos: Column index of tile
:param new_tile_type: TileType enum
:return:
"""
self._board[row_pos][col_pos].tile_type = new_tile_type
self._board[row_pos][col_pos].set_tile_texture()
def remove_tiles(self, tile_coordinates):
"""
Remove selected tiles so that their tile type is zero.
:param tile_coordinates: list of tile co-ordinates, each of the form (row_pos, col_pos).
:return:
"""
for coord in tile_coordinates:
self.set_tile_type(coord[0], coord[1], TileType.EMPTY)
def increment_board_tiles(self, tile_coordinates):
"""
Increment selected tiles provided that they are non-empty. If tile type is maximum (4), reset it to one.
:param tile_coordinates: list of tile co-ordinates, each of the form (row_pos, col_pos).
:return:
"""
for coord in tile_coordinates:
if self.get_tile_type(coord[0], coord[1]) == TileType.ONE_TILE:
self.set_tile_type(coord[0], coord[1], TileType.TWO_TILE)
continue
if self.get_tile_type(coord[0], coord[1]) == TileType.TWO_TILE:
self.set_tile_type(coord[0], coord[1], TileType.THREE_TILE)
continue
if self.get_tile_type(coord[0], coord[1]) == TileType.THREE_TILE:
self.set_tile_type(coord[0], coord[1], TileType.FOUR_TILE)
continue
if self.get_tile_type(coord[0], coord[1]) == TileType.FOUR_TILE:
self.set_tile_type(coord[0], coord[1], TileType.ONE_TILE)
continue
def find_group_and_perimeter(self, row_pos, col_pos):
"""
Given a row and column position on the board, find the group of contiguous tiles of the same type and the set
of tiles that surround them having a different tile type. Uses breadth-first search.
:param row_pos: row position selected
:param col_pos: column position selected
:return: List, List
"""
target_type = self.get_tile_type(row_pos, col_pos)
group = [(row_pos, col_pos)]
perimeter = []
queue = [(row_pos, col_pos)]
while queue:
node = queue.pop(0)
adjacent_tiles = []
# Append the top tile
if node[0] > 0:
adjacent_tiles.append(self._board[node[0] - 1][node[1]])
# Append the left tile
if node[1] > 0:
adjacent_tiles.append(self._board[node[0]][node[1] - 1])
# Append the right tile
if node[1] < self._board_column - 1:
adjacent_tiles.append(self._board[node[0]][node[1] + 1])
# Append the bottom tile
if node[0] < self._board_row - 1:
adjacent_tiles.append(self._board[node[0] + 1][node[1]])
selected_tiles = [t.coordinates for t in adjacent_tiles if t.tile_type == target_type and
t.coordinates not in group]
boundary_tiles = [t.coordinates for t in adjacent_tiles if t.tile_type != target_type and
t.coordinates not in perimeter]
group.extend(selected_tiles)
perimeter.extend(boundary_tiles)
queue.extend(selected_tiles)
return group, perimeter
def highlight_group(self, group, counter):
if len(group) == 1:
return
for coord in group:
if self.get_tile_type(coord[0], coord[1]) == TileType.EMPTY:
continue
self._get_tile_sprite(coord[0], coord[1]).color = (255,
int(255 * 0.5 * (np.sin(HIGHLIGHT_SPEED * counter) + 1)),
int(255 * 0.5 * (np.sin(HIGHLIGHT_SPEED * counter) + 1)))
def _flush_tile(self, row_pos, col_pos):
self._get_tile_sprite(row_pos, col_pos).color = (255, 255, 255)
def flush_tiles(self, group):
for coord in group:
self._flush_tile(coord[0], coord[1])
def flush_board(self):
for row in range(self._board_row):
for column in range(self._board_column):
self._flush_tile(row, column)
def any_legal_moves(self):
"""
Check if there any available moves in the board. A 'move' is present on the board if there are at least two
non-empty contiguous tiles of the same type.
:return: Boolean
"""
for i in range(self._board_row):
for j in range(self._board_column):
tile = self._board[i][j]
# Check that the tile in question is non-empty
if tile.tile_type == TileType.EMPTY:
continue
# Check the top row
if i == 0:
if j == 0:
moves_exist = (tile.tile_type == self._board[0][1].tile_type) \
or (tile.tile_type == self._board[1][0].tile_type)
elif j == self._board_column - 1:
moves_exist = (tile.tile_type == self._board[0][j - 1].tile_type) \
or (tile.tile_type == self._board[1][j].tile_type)
else:
moves_exist = (tile.tile_type == self._board[0][j - 1].tile_type) \
or (tile.tile_type == self._board[0][j + 1].tile_type) \
or (tile.tile_type == self._board[1][j].tile_type)
# Check the bottom row
elif i == self._board_row - 1:
if j == 0:
moves_exist = (tile.tile_type == self._board[i][1].tile_type) \
or (tile.tile_type == self._board[i - 1][0].tile_type)
elif j == self._board_column - 1:
moves_exist = (tile.tile_type == self._board[i][j - 1].tile_type) \
or (tile.tile_type == self._board[i - 1][j].tile_type)
else:
moves_exist = (tile.tile_type == self._board[i][j - 1].tile_type) \
or (tile.tile_type == self._board[i][j + 1].tile_type) \
or (tile.tile_type == self._board[i - 1][j].tile_type)
# Check the rest of the rows
else:
if j == 0:
moves_exist = (tile.tile_type == self._board[i][j + 1].tile_type) \
or (tile.tile_type == self._board[i + 1][j].tile_type) \
or (tile.tile_type == self._board[i - 1][j].tile_type)
elif j == self._board_column - 1:
moves_exist = (tile.tile_type == self._board[i][j - 1].tile_type) \
or (tile.tile_type == self._board[i + 1][j].tile_type) \
or (tile.tile_type == self._board[i - 1][j].tile_type)
else:
moves_exist = (tile.tile_type == self._board[i][j - 1].tile_type) \
or (tile.tile_type == self._board[i][j + 1].tile_type) \
or (tile.tile_type == self._board[i - 1][j].tile_type) \
or (tile.tile_type == self._board[i + 1][j].tile_type)
if moves_exist:
return True
return False
def __str__(self):
"""
Print out current state of the board. Note that we have to mirror the board when we print it out to match
the grid displayed in the game window.
:return: string
"""
string = ""
for i in range(self._board_row - 1, -1, -1):
for j in range(self._board_column):
string += str(self._board[i][j]) + " " + str(self._board[i][j].coordinates) + "\t"
string += "\n"
return string
if __name__ == "__main__":
board = Board(5, 5)
print(board)
print(board.any_legal_moves())
|
# coding=utf-8
# main.py
# Autore: Matteo Esposito
# Versione di Python: 2.6.9
import time
from lib.settings import DEBUG, RELEASE
from lib.ProjUtilities import generateRandomAVLTree, concatenate
global A, B
def benchmark(n):
"""
Main delle funzioni di benchamrking, si occupa di creare gli alebri AVL e di richiamare
la "doTest" al fine di raccogliere informazioni e dati
sul tempo di esecuzione dell'algoritmo in relazione alla quantita dell'input
:param n: numero di volte che si vuole testare l'algoritmo
:return:
"""
global A, B
while n > 0:
if n % 2 == 0:
d = 0
else:
d = 1
A, B = generateRandomAVLTree(0, 255*n, 50*n, 10*n, d)
doTest(n,50*n,10*n,d)
n = n-1
def doTest(i, elements,diff, dir):
"""
Funzione usata per fare il benchmarking,
data la natura del'algoritmo e la richiesta di generare in runtime degli alberi AVL
ho considerato come scelta "migliore" lo spezzare il codice in due frammenti in cui
viene semplicemente calcolato il SOLO tempo di esecuzione dell'algoritmo e non
anche della creazione degli alberi AVL
:param i: i-esima iterazione
:param elements: elementi dell'albero più basso
:param diff: numero di elementi da aggiungrìere in più all'albero più alto rispetto all'albero più basso
:param dir: 0: A più alto di B
1: B più alto di A
:return: None
"""
global A, B
tempo_iniziale = time.time()
concatenate(A,B)
tempo_finale = time.time()
d = "direzione"
if dir == 0:
d = "B"
else:
d = "A"
eltot = 2*elements + diff
print "Test N:", str(i), ",", str(tempo_finale - tempo_iniziale), ", secondi,", "elementi: ", str(elements), ", differenza di elementi: ", str(diff), "elementi totali: ", str(eltot), ", albero maggiore: ", d
if __name__ == "__main__":
# Main Debugging and Testing Unit:
# Ho scelto di implementare così il mio algoritmo
# di seguito è possibile cambaire il valore della variabile
# tra 0 ed 1 con i seguenti risultati:
# Valori di C:
#
# 0: Debug di un generico albero AVL
# 1: Benchmarking dell'algoritmo
#
C = 0 # Modificare questo valore per accedere ai due diversi ambienti di Testing
# Per Avere una Verbosità maggiore nei risultati aggiustare i realtivi FLAG
# nel file settings.py nella cartella lib ( di Default essi sono settati su FALSE )
if C == 0:
A, B = generateRandomAVLTree(0, 20000, 100, 300, 0)
C = concatenate(A, B)
if DEBUG | RELEASE:
print("\n \n Albero Concatenato \n \n ")
C.tree.stampa()
elif C == 1:
print "######## Inizio Test ########"
benchmark(1000)
print "######## Fine Test ########"
else:
print "Scelta non valida!"
|
import os
import tornado.ioloop
import tornado.web
import TileStache
class TornadoRequestHandler(tornado.web.RequestHandler):
""" Create a Tornado HTTP get and post handler.
This class is documented as part of Tornado public RequestHandler API:
http://www.tornadoweb.org/en/stable/guide/structure.html
"""
def initialize(self, config=None, autoreload=False):
self.config = config
self.autoreload = autoreload
try:
self.tsconfig = TileStache.parseConfig(self.config)
except:
print "Error loading Tilestache config:"
raise
def get(self, *args, **kwargs):
if self.autoreload: # re-parse the config file on every request
try:
self.tsconfig = parseConfig(self.config)
except Exception, e:
raise Core.KnownUnknown("Error loading Tilestache configuration:\n%s" % str(e))
status_code, headers, content = TileStache.requestHandler2(
self.tsconfig, args[0])
# Get the header
header = headers.items()[0]
# Tornado syntax for passing headers
self.set_header(header[0], header[1])
self.write(content)
class TornadoTileApplication(tornado.web.Application):
""" Create a Tornado application that can handle HTTP requests.
This class is documented as part of TileStache's public API:
http://tilestache.org/doc/#wsgi
The Tornado application is an instance of this class. Example:
app = TornadoTileApplication(config='/path/to/tilestache.cfg')
app.listen(8080)
tornado.ioloop.IOLoop.current().start()
"""
def __init__(self, **kwargs):
config = kwargs.get("config") or None
autoreload = kwargs.get("autoreload") or None
if type(config) in (str, unicode, dict):
hargs = dict(config=config, autoreload=autoreload)
kwargs['handlers'] = [(r"/(.*)", TornadoRequestHandler, hargs),
(r'/(favicon.ico)',
tornado.web.StaticFileHandler,
{'path': 'www/mustaches.jpg'})]
super(TornadoTileApplication, self).__init__(**kwargs)
else:
assert hasattr(config, 'cache'), 'Configuration object must have a cache.'
assert hasattr(config, 'layers'), 'Configuration object must have layers.'
assert hasattr(config, 'dirpath'), 'Configuration object must have a dirpath.'
|
"""Posts views."""
# Rest framework
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.generics import get_object_or_404
from rest_framework import status, viewsets, mixins
# Permissions
from rest_framework.permissions import (
AllowAny,
IsAuthenticated
)
from apps.posts.permissions import IsPostOwner
# Serializers
from apps.posts.serializers import (
CommentModelSerializer,
PostModelSerializer,
CreatePostSerializer,
AddCommentSerializer
)
# Models
from apps.posts.models import Post, Comment
class PostViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""Post view set."""
serializer_class = PostModelSerializer
def get_permissions(self):
"""Assign permissions based on action."""
if self.action in ['like', 'retrieve', 'list', 'store']:
permissions = [IsAuthenticated]
elif self.action in ['update', 'partial_update']:
permissions = [IsAuthenticated, IsPostOwner]
else:
permissions = [IsAuthenticated]
return [p() for p in permissions]
def get_object(self):
"""return specific post."""
return get_object_or_404(
Post,
pk=self.kwargs['pk']
)
def get_queryset(self):
"""Assign querys based on actions."""
queryset = Post.objects.all()
if self.action in ['like', 'retrieve', 'update', 'partial_update', 'destroy']:
"""all the actions is for one specific post."""
return queryset.get(pk=self.kwargs['pk'])
elif self.action == 'list':
"""Filter posts to show only the post of the users that
the requesting is following."""
user = self.request.user
id_list = []
follow_list = list(user.follow.all())
for i in follow_list:
id_list.append(i.id)
id_list.append(user.id)
return queryset.filter(user_id__in=id_list)
return queryset
@action(detail=False, methods=['POST'])
def store(self, request):
"""Handle post creation."""
serializer = CreatePostSerializer(context={'request': request}, data=request.data)
serializer.is_valid()
post = serializer.save()
data = PostModelSerializer(post).data
# Update profile stats
profile = request.user.profile
profile.blog_posted += 1
profile.save()
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=True, methods=['POST'])
def comment(self, request, pk):
"""handle comments creation for posts."""
post = self.get_object()
serializer = AddCommentSerializer(
context={'request': request, 'post': post},
data=request.data
)
serializer.is_valid()
serializer.save()
# add coments to the response
comments = Comment.objects.filter(post=post)
data = {
'post': PostModelSerializer(post).data,
'comments': CommentModelSerializer(comments, many=True).data
}
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=True, methods=['POST'])
def like(self, request, pk):
"""Check if the user already like de post and
add or remove the like of the post updating
post like stats."""
post = self.get_object()
liked = False
if post.like.filter(id=request.user.id).exists():
post.like.remove(request.user)
liked = False
post.likes -= 1
post.save()
else:
post.like.add(request.user)
liked = True
post.likes += 1
post.save()
if liked == True:
message = 'You liked {}'.format(post.title)
else:
message = 'You unliked {}'.format(post.title)
comments = Comment.objects.filter(post=post)
data = {
'post': PostModelSerializer(post).data,
'comments': CommentModelSerializer(comments, many=True).data,
'message': message
}
return Response(data, status=status.HTTP_200_OK)
def retrieve(self, request, *args, **kwargs):
"""Add extra content to the response."""
response = super(PostViewSet, self).retrieve(request, *args, **kwargs)
post = self.get_object()
comments = Comment.objects.filter(post=post)
data = {
'post': PostModelSerializer(post).data,
'comments': CommentModelSerializer(comments, many=True).data
}
response.data = data
return response
|
import sys
sys.path.append('./site-packages')
# flake8: noqa: E402
from crhelper import CfnResource
import boto3
from pathlib import Path
helper = CfnResource()
@helper.create
@helper.update
def on_create(event, __):
pass
def delete_s3_objects(bucket_name):
if bucket_name:
s3_resource = boto3.resource("s3")
try:
s3_resource.Bucket(bucket_name).objects.all().delete()
print(
"Successfully deleted objects in bucket "
"called '{}'.".format(bucket_name)
)
except s3_resource.meta.client.exceptions.NoSuchBucket:
print(
"Could not find bucket called '{}'. "
"Skipping delete.".format(bucket_name)
)
@helper.delete
def on_delete(event, __):
s3_bucket = event["ResourceProperties"]["S3Bucket"]
delete_s3_objects(s3_bucket)
def handler(event, context):
helper(event, context)
|
#!/usr/bin/env python
"""testwang - a tool for working with randomly-failing tests."""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import argparse
import atexit
import copy
import io
import itertools
import json
import os
import subprocess
import sys
import tempfile
import time
from collections import Counter
class TestSpecModuleNotFound(Exception):
@property
def path(self):
return '.'.join(self.args[0])
class ResultForOneTestRun(object):
def __init__(self, outcome, duration):
self.outcome = outcome
self.duration = duration
class ResultsForOneTest(object):
def __init__(self):
self.cycle_results = []
def __iter__(self):
return iter(self.cycle_results)
def __len__(self):
return len(self.cycle_results)
@property
def overall_outcome(self):
try:
first_outcome = self.cycle_results[0].outcome
except IndexError:
return 'NOT RUN'
all_outcomes = set((result.outcome for result in self))
if {first_outcome} == all_outcomes:
return first_outcome
else:
return 'MIXED ({:.0f}%)'.format(100 * self.outcome_consistency)
@property
def outcome_consistency(self):
outcome_counts = Counter((result.outcome for result in self))
if not outcome_counts:
return 0
top_freq = list(sorted(outcome_counts.values()))[-1]
total_freq = sum(outcome_counts.values())
return float(top_freq) / total_freq
@property
def total_duration(self):
return sum((result.duration for result in self))
@property
def mean_duration(self):
cycles = len(self)
return self.total_duration / cycles if cycles else 0
def append(self, result_for_one_test_run):
self.cycle_results.append(result_for_one_test_run)
class Observable(object):
def __init__(self):
self._observers = []
def register(self, observer):
self._observers.append(observer)
def notify(self, fn_name, *args, **kwargs):
for observer in self._observers:
try:
fn = getattr(observer, fn_name)
except AttributeError:
if not getattr(observer, 'strict', True):
continue
else:
raise NotImplementedError((observer, fn_name))
fn(*args, **kwargs)
def copy_observers_to(self, other):
for observer in self._observers:
other.register(observer)
class Testwanger(Observable):
def __init__(self, collector, runner):
super(Testwanger, self).__init__()
self.collector = collector
self.runner = runner
def testwang(self):
start = time.time()
tests, results, actual_cycles = self.collect_and_run_tests()
elapsed = time.time() - start
self.notify(
'all_cycles_finished',
tests,
results,
actual_cycles,
elapsed,
)
def collect_and_run_tests(self):
tests = self.collector.collect_tests()
if not tests:
self.notify('no_tests_found')
return
results, actual_cycles = self.runner.run_tests(tests)
return tests, results, actual_cycles
class TestCollector(Observable):
def __init__(self, tests_file_path):
super(TestCollector, self).__init__()
self.tests_file_path = tests_file_path
def collect_tests(self):
self.notify('collecting_tests', self.tests_file_path)
tests = self.convert_jenkins_test_specs_to_pytest_format(
self.get_tests_to_examine(self.tests_file_path),
)
self.notify('collected_tests', tests)
return tests
@staticmethod
def get_tests_to_examine(tests_file_path):
with io.open(tests_file_path, encoding='utf-8') as infile:
lines = [line.strip() for line in infile.readlines()]
return [
line for line in lines
if line and not line.startswith('#')
]
def convert_jenkins_test_specs_to_pytest_format(self, test_specs):
return [
self.convert_jenkins_test_spec_to_pytest_format(test_spec)
for test_spec in test_specs
]
def convert_jenkins_test_spec_to_pytest_format(self, test_spec):
test_spec_parts = test_spec.split('.')
module_path_parts = self.compute_test_spec_module_path_parts(
test_spec_parts,
)
test_path_parts = test_spec_parts[len(module_path_parts):]
module_path = '/'.join(module_path_parts) + '.py'
test_path = '::'.join(test_path_parts)
return module_path + '::' + test_path
def compute_test_spec_module_path_parts(self, test_spec_parts):
for prefix in sliced_prefixes(test_spec_parts):
path = os.path.join(*prefix) + '.py'
if os.path.exists(path) and os.path.isfile(path):
return prefix
self.notify('test_not_found', test_path=test_spec_parts)
raise TestSpecModuleNotFound(test_spec_parts)
class TestCyclesRunner(Observable):
def __init__(
self,
requested_cycles,
failure_focus,
pytest_python,
pytest_echo,
pytest_json_path,
pytest_extra_args,
):
super(TestCyclesRunner, self).__init__()
self.requested_cycles = requested_cycles
self.failure_focus = failure_focus
self.pytest_python = pytest_python
self.pytest_echo = pytest_echo
self.pytest_json_path = pytest_json_path
self.pytest_extra_args = pytest_extra_args
def run_tests(self, tests):
results = {
test: ResultsForOneTest() for test in tests
}
active_tests = list(tests)
actual_cycles = 0
for cycle in range(self.requested_cycles):
active_tests = self.run_tests_cycle(cycle, active_tests, results)
if active_tests:
actual_cycles += 1
else:
break
return results, actual_cycles
def run_tests_cycle(self, cycle, active_tests, results):
if not active_tests:
self.notify('no_active_tests')
return []
estimated_cycle_time = self.estimate_cycle_time(active_tests, results)
self.notify(
'test_cycle_began',
cycle,
active_tests,
estimated_cycle_time,
)
duration, cycle_results = self.run_tests_for_cycle(active_tests, cycle)
self.notify('test_cycle_ended', cycle, duration)
for test in active_tests:
if test in cycle_results:
results[test].append(cycle_results[test])
if self.failure_focus:
active_tests = [
test for test in active_tests
if (
test not in cycle_results # No result: problem?
or cycle_results[test].outcome != 'PASSED'
)
]
return active_tests
@staticmethod
def estimate_cycle_time(tests, prior_results):
return sum(prior_results[test].mean_duration for test in tests)
def run_tests_for_cycle(self, tests, cycle):
command = self.construct_tests_run_command(tests)
self.notify('pytest_command', command)
final_cycle = cycle == self.requested_cycles - 1
echoing = (
self.pytest_echo == 'ALL'
or (self.pytest_echo == 'FINAL' and final_cycle)
)
duration, _ = run_timed(self.run_command, command, echo=echoing)
results = self.parse_json_results()
return duration, results
def construct_tests_run_command(self, tests):
command = [os.path.expanduser(self.pytest_python), '-m', 'pytest']
command.extend(self.pytest_extra_args)
command.append('--json={}'.format(self.pytest_json_path))
command.extend(tests)
return command
@staticmethod
def run_command(command, echo):
stdout = stderr = None if echo else subprocess.PIPE
process = subprocess.Popen(
command,
stdout=stdout,
stderr=stderr,
env=copy.deepcopy(os.environ),
)
process.communicate()
return process.returncode
def parse_json_results(self):
with io.open(self.pytest_json_path, encoding='utf-8') as json_file:
contents = json.load(json_file)
return dict((
self.parse_json_results_one_test(test_json)
for test_json in contents['report']['tests']
))
@staticmethod
def parse_json_results_one_test(test_json):
name = test_json['name']
outcome = test_json['outcome'].upper()
duration = sum((
section.get('duration', 0)
for section in test_json.values()
if isinstance(section, dict)
))
return name, ResultForOneTestRun(outcome, duration)
# noinspection PyMethodMayBeStatic
class TestwangConsoleOutput(object):
strict = True
def __init__(
self,
requested_cycles,
failure_focus,
report_cycle_detail,
debug=False
):
self.requested_cycles = requested_cycles
self.failure_focus = failure_focus
self.report_cycle_detail = report_cycle_detail
self._debug = debug
def debug(self, *args, **kwargs):
if self._debug:
print(*args, **kwargs)
def test_not_found(self, test_path):
print('Test not found: {}'.format(test_path))
def no_tests_found(self):
print('No tests found')
def collecting_tests(self, tests_file_path):
print('Collecting tests from {}'.format(unexpand_user(tests_file_path)))
def collected_tests(self, tests):
print('\nWill run the following {} tests:\n'.format(len(tests)))
for test in tests:
print(' ' + test)
print('')
def no_active_tests(self):
print('No tests to run')
def test_cycle_began(self, cycle, tests, estimated_cycle_time):
if estimated_cycle_time:
estimate = ', time estimate: {:5.2f}s'.format(estimated_cycle_time)
else:
estimate = ''
header = self._header_for_test_cycle(cycle)
print('{}{} tests to run{}'.format(header, len(tests), estimate))
def _header_for_test_cycle(self, cycle):
return 'Test cycle {:2} of {:2} -- '.format(
cycle + 1,
self.requested_cycles,
)
def pytest_command(self, command):
self.debug(' '.join(command))
def test_cycle_ended(self, cycle, duration):
indent = ' ' * len(self._header_for_test_cycle(cycle))
print(indent + '{:.2f}s for cycle'.format(duration))
def all_cycles_finished(self, tests, results, actual_cycles, elapsed):
longest_outcome = max((
len(outcome) for outcome in self._all_test_result_outcomes(results)
))
template = '{{:{}}} {{}}s'.format(longest_outcome)
print('\nRan {} {} of tests in {:5.2f}s\n'.format(
actual_cycles,
'cycle' if self.requested_cycles == 1 else 'cycles',
elapsed,
))
for test in tests:
test_results = results[test]
if self.failure_focus and test_results.overall_outcome != 'FAILED':
continue
print(template.format(test_results.overall_outcome, test))
if self.report_cycle_detail:
self.report_test_cycle_result(
test_results,
longest_outcome,
)
def _all_test_result_outcomes(self, results):
outcomes = set()
for test_result in results.values():
outcomes.add(test_result.overall_outcome)
for cycle_result in test_result:
outcomes.add(cycle_result.outcome)
return outcomes
def report_test_cycle_result(self, test_results, longest_outcome):
def indented(msg):
return ' ' * longest_outcome + ' ' + msg
inner_template = '{{:{}}} {{:5.2f}}s'.format(longest_outcome)
for cycle in test_results:
print(indented(
inner_template.format(cycle.outcome, cycle.duration),
))
if len(test_results) > 1:
print(indented(indented('{:5.2f}s total, {:5.2f}s mean\n'.format(
test_results.total_duration,
test_results.mean_duration,
))))
def main():
args, pytest_extra_args = parse_args()
collector = TestCollector(args.tests_file_path)
runner = TestCyclesRunner(
requested_cycles=args.requested_cycles,
failure_focus=args.failure_focus,
pytest_python=args.pytest_python,
pytest_echo=args.pytest_echo,
pytest_json_path=args.pytest_json_path,
pytest_extra_args=pytest_extra_args,
)
wanger = Testwanger(
collector=collector,
runner=runner,
)
console_output = TestwangConsoleOutput(
requested_cycles=args.requested_cycles,
failure_focus=args.failure_focus,
report_cycle_detail=args.report_cycles,
debug=args.debug,
)
for observable in (collector, runner, wanger):
observable.register(console_output)
try:
wanger.testwang()
except TestSpecModuleNotFound:
sys.exit(1)
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'tests_file_path', metavar='TESTS_FILE_PATH',
help='Path to file containing failing tests spec',
)
parser.add_argument(
'-P', '--python', metavar='PYTHON_EXE_PATH', dest='pytest_python',
default=sys.executable,
help="""
Path to python executable to use to run pytest; default is
whichever python is being used to run this script, currently: {}
""".format(unexpand_user(sys.executable)),
)
parser.add_argument(
'-J', '--json-path', metavar='JSON_PATH',
dest='pytest_json_path',
help="""
File path to store test run results in; by default they are
stored in a temporary folder and deleted after use. If this
argument is passed, the temp file is not deleted after use.
If running multiple test cycles, this will end up containing
the result of the final test run
""",
)
parser.add_argument(
'-N', '--cycles',
default=1,
type=positive_int,
dest='requested_cycles',
help='How many times to run the tests; default is just once',
)
parser.add_argument(
'-F', '--failure-focus',
action='store_true',
help="""
As soon as a test passes once, don't run it again in later cycles.
""",
)
parser.add_argument(
'-R', '--report-cycles',
action='store_true',
help="""
When reporting test results at end, also report each
test's result for each cycle, and the time spent in that
test across all cycles.
""",
)
echo_parser = parser.add_mutually_exclusive_group()
echo_parser.add_argument(
'-e', '--echo',
action='store_true',
help='Echo pytest output as it runs; default is to suppress it',
)
echo_parser.add_argument(
'-E', '--echo-final',
action='store_true',
help="""
When running multiple cycles, echo pytest output only of final
test run and suppress output from earlier runs; if running a
single cycle, this is equivalent to --echo
""",
)
parser.add_argument(
'--debug',
action='store_true',
help='Activate debug output',
)
# We will pass any unrecognized args through to pytest
args, pytest_extra_args = parser.parse_known_args()
# In case any of those unrecognized args are env vars, expand them.
pytest_extra_args = tuple(flatten(
os.path.expandvars(arg).split() for arg in pytest_extra_args
))
if not args.pytest_json_path:
args.pytest_json_path = create_tmp_json_path_and_register_for_cleanup()
if args.echo:
args.pytest_echo = 'ALL'
elif args.echo_final:
args.pytest_echo = 'FINAL'
else:
args.pytest_echo = None
del args.echo
del args.echo_final
return args, pytest_extra_args
def create_tmp_json_path_and_register_for_cleanup():
h, json_path = tempfile.mkstemp()
os.close(h)
atexit.register(os.unlink, json_path)
return json_path
def positive_int(x):
"""Argument value parser: positive integers."""
try:
n = int(x)
if n <= 0:
raise ValueError()
return n
except ValueError:
raise argparse.ArgumentTypeError(
'invalid positive int value: {}'.format(x),
)
def unexpand_user(path):
return path.replace(os.path.expanduser('~'), '~')
def sliced_prefixes(slicable):
return (slicable[:i] for i in range(1, len(slicable) + 1))
def flatten(iterator):
return itertools.chain(*iterator)
def run_timed(fn, *args, **kwargs):
started = time.time()
result = fn(*args, **kwargs)
duration = time.time() - started
return duration, result
if __name__ == '__main__':
main()
|
"""
bot commands
"""
from . import bot
from ..lib.owoify import owoify, owoify2
from ..lib.smh import expand
bot.add_formatter(owoify, "owoify", "owoifies text")
bot.add_formatter(owoify2, "owoify2", "owoifies text, but worse")
bot.add_formatter(expand, "smh", "expands instances of smh")
import discord
import requests_html
import os.path
@bot.command()
async def ping(ctx):
"""pongs a ping"""
await ctx.send("pong")
@bot.command()
async def echo(ctx, *, text: str):
"""echos the text argument"""
await ctx.send(text)
@bot.command()
async def gn(ctx):
"""emotes goodnight"""
await ctx.send("<:megauwu:679614776267243559>")
@bot.command()
async def kirbcult(ctx, leftwardarm: str, rightwardarm: str):
"""makes a cult of Kirbys from the arm args"""
l, r = leftwardarm, rightwardarm
await ctx.send(f"({r}O_O){r} {l}(O_O){r} {l}(O_O{l})")
@bot.command()
async def lucario(ctx):
"""posts a random (safe) Lucario image from e621"""
response = requests_html.HTMLSession().get(
"https://e621.net/posts.json?"
"tags=lucario+rating:safe+score:%3E=50+-type:mp4+-type:swf+-type:webm+-type:zip+order:random&limit=1"
)
json = response.json()
post = next(iter(json["posts"]))
id = post["id"]
url = post["file"]["url"]
embed = discord.Embed(title = "a wild lucario appeared!")
embed.set_image(url = post["file"]["url"])
embed.set_author(name = post['id'], icon_url = "https://e621.net/favicon.ico")
await ctx.send(embed = embed)
@bot.command()
async def play(ctx):
"""plays a local coconut-mall.mp3"""
# read audio file
filename = "coconut-mall.mp3"
filepath = f"resources/audio/{filename}"
if not os.path.isfile(filepath):
await ctx.send("file not found!")
return
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(
filepath
))
# join voice channel
if ctx.voice_client is not None:
await ctx.voice_client.move_to(ctx.author.voice.channel)
else:
await ctx.author.voice.channel.connect()
# play audio
ctx.voice_client.play(
source,
after=lambda e: print('player error: %s' % e) if e else None
)
# announce
await ctx.send(f"playing: {filename}")
@bot.command()
async def leave(ctx):
"""leaves voice channel"""
await ctx.voice_client.disconnect()
await ctx.send(f"bye-bye!")
|
"""Load Hbase.thrift as the Hbase_thrift module using thriftpy2 on import."""
from pkg_resources import resource_filename
import thriftpy2
thriftpy2.load(resource_filename(__name__, 'Hbase.thrift'), 'Hbase_thrift')
|
from django.shortcuts import redirect
from django.contrib.auth.models import User
from django.http import JsonResponse
def login_redirect(request):
return redirect('/')
def validate_username(request):
username = request.GET.get('username', None)
data = {
'is_taken': User.objects.filter(username__iexact=username).exists()
}
if data['is_taken']:
data['error_message'] = 'Käyttäjätunnus on jo käytössä. Valitse toinen'
return JsonResponse(data)
|
import time
from datetime import datetime
def str_to_seconds(time_s, format_s='%Y-%m-%d %H:%M:%S'):
struct_t = time.strptime(time_s, format_s)
return int(time.mktime(struct_t))
def str_to_mcroseconds(time_s, format_s='%Y-%m-%d %H:%M:%S'):
struct_t = datetime.strptime(time_s, format_s)
return time.mktime(struct_t.timetuple()) * 1000 + struct_t.microsecond / 1000
def microseconds_format(time_ms, format_s='%Y-%m-%d %H:%M:%S'):
struct_t = datetime.fromtimestamp(time_ms/1000)
return time.mktime(struct_t.timetuple()) * 1000 + struct_t.microsecond / 1000
def get_current_datetime():
return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if __name__ == '__main__':
print long(str_to_mcroseconds('2015-12-12 12:12:12'))
print long(str_to_seconds('2015-12-12 12:12:12'))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by Hao Luo at 2019-09-02
"""Step_simulate.py
:description : script
:param :
:returns:
:rtype:
"""
import os
import cobra
import matplotlib.pyplot as plt
import numpy as np
os.chdir('../../ComplementaryData/Step_03_Compare_Refine/')
print('----- loading model -----')
iHL622 = cobra.io.load_json_model('../../ModelFiles/iHL622.json')
# %% <biomass vs od >
print('----- change medium -----')
iHL622.objective = "BIOMASS"
experiment_group = ['A', 'B', 'C', 'D', 'E']
experiment_result = [1.38, 1.88, 1.92, 1.92, 1.90]
experiment_result_err = [0.66, 0.35, 0.69, 0.37, 0.47]
experiment_medium = {
'EX_glc__D_e': [-20, -20, -20, -20, -20, ],
'EX_glyc_e': [0.00, -5.00, -5.00, -10.00, -10.],
'EX_fru_e': [0.00, -1.00, -5.00, -1.00, -5.],
'EX_lac__L_e': [18.215, 21.334, 20.882, 17.881, 16.577],
'EX_ac_e': [17.058, 18.301, 18.285, 19.703, 19.643],
'EX_etoh_e': [5.135, 4.623, 4.312, 2.558, 2.230],
}
# for k in experiment_medium.keys(): # g/L --> mM
# temp = np.array(experiment_medium[k])*1000/iHL622.metabolites.get_by_id(k.replace('EX_','')).formula_weight
# experiment_medium[k] = temp
predict_result = []
for i in range(0, len(experiment_result)):
model = iHL622.copy()
for rea in experiment_medium.keys():
bound = experiment_medium[rea][i]
if bound <= 0:
model.reactions.get_by_id(rea).bounds = (bound, 0)
elif bound >= 0:
model.reactions.get_by_id(rea).bounds = (0, bound)
sol = model.optimize()
predict_result.append(round(sol.objective_value, 3))
print('Experiment Biomass:', experiment_result)
print('iHL622 Biomass:', predict_result)
# %% <vitmin B12 > NOTE: error
# experiment_medium = {
# 'BIOMASS': predict_result,
# 'EX_glc__D_e': [-20, -20, -20, -20, -20, ],
# 'EX_glyc_e': [0.00, -5.00, -5.00, -10.00, -10.],
# 'EX_fru_e': [0.00, -1.00, -5.00, -1.00, -5.], }
#
# predict_result_b12 = []
# for i in range(0, len(experiment_result)):
# model = iHL622.copy()
# rea = cobra.Reaction('EX_adeadocbl_c')
# model.add_reaction(rea)
# model.reactions.get_by_id('EX_adeadocbl_c').reaction = 'adeadocbl_c --> '
# model.objective = 'EX_adeadocbl_c'
# # model.reactions.get_by_id('EX_ade_e').bounds = (0,0)
# for rea in experiment_medium.keys():
# bound = experiment_medium[rea][i]
# if rea == 'BIOMASS':
# model.reactions.get_by_id(rea).bounds = (bound, bound)
#
# elif bound <= 0:
# model.reactions.get_by_id(rea).bounds = (bound, 0)
# elif bound >= 0:
# model.reactions.get_by_id(rea).bounds = (0, bound)
# predict_result_b12.append(
# round(model.optimize().objective_value * 1355.365, 3)) # Cobalamin: Molar mass: 1,355.365 g/mol
# print('iHL622 b12:', predict_result_b12)
# %% <draw>
import brewer2mpl
fig, ax = plt.subplots(figsize=(6, 4))
ax2 = ax.twinx()
bmap = brewer2mpl.get_map('Set2', 'qualitative', 7)
colors = bmap.mpl_colors
# plt.ylim((0.0, 1.0))
x = np.arange(0, 5)
width = 0.25 # the width of the bars
rects2 = ax.bar(x + width / 2, predict_result, width, label='Model Growth rate', color=colors[0]) # ,
rects1 = ax2.bar(x - width / 2, experiment_result, width, yerr=experiment_result_err, label='Experiment OD600',
color=colors[1]) #
rects1_ = ax2.bar(0, 0, label='Model Growth rate', color=colors[0], )
# Add some text for labels, title and custom x-axis tick labels, etc.
ax2.set_ylabel("OD600", fontsize=16)
ax.set_ylabel('Growth rate (mmol/gDW/h)', fontsize=16) # color = 'tab:blue'
# ax.tick_params(axis='y') # , labelcolor='tab:blue'
ax2.set_ylim((0, 3.2))
ax.set_ylim((0, 2.2))
ax.set_title('Growth rate simulation', fontsize=18)
labels = [''] + experiment_group
ax2.set_xticklabels(labels, fontsize=16)
ax2.legend(loc='best', fontsize=11)
# ax2.legend(loc='best', fontsize=14)
fig.tight_layout()
plt.show()
fig.savefig('Growth rate simulation case2_1.png')
|
def escreva(msg):
t = len(msg)
print('-' * t)
print(msg)
print('-' * t)
escreva('Henrique Alvaro')
escreva('HAMS')
escreva('python')
|
import unittest
from hotspots.hs_io import HotspotReader
from ccdc.utilities import _csd_required, PushDir
from ccdc import io
from ccdc.molecule import Atom
from ccdc.pharmacophore import Pharmacophore, GeometricDescriptors
from hotspots.protein_extension import Protein
from hotspots.grid_extension import Grid
from hotspots.wrapper_pymol import PyMOLFile, PyMOLCommands
from hotspots.pharmacophore_extension \
import LigandPharmacophoreModel, InteractionPharmacophoreModel, \
PharmacophoreModel, ProteinPharmacophoreModel, HotspotPharmacophoreModel, Feature, \
create_consensus, _create_grids, _features_to_grid, closest_peak_index
import os
_csd = None
def csd():
global _csd
if _csd is None:
_csd = io.EntryReader('csd')
return _csd
##################################################################################################################
class NoClassMethods(unittest.TestCase):
def setUp(self):
self.parent_dir = "testdata/pharmacophore_extension/PharmacophoreModel"
self.fnames = ["1dmt_ligand.cm", "1r1h_ligand.cm", "1r1j_ligand.cm", "1y8j_ligand.cm"]
self.pharmacophores = [PharmacophoreModel.from_file(os.path.join(self.parent_dir, f)) for f in self.fnames]
self.cm_dir = os.path.dirname(os.path.dirname(io.csd_directory()))
Pharmacophore.read_feature_definitions(os.path.join(self.cm_dir, "CSD_CrossMiner/feature_definitions"))
def test_create_grids(self):
grid_dic = _create_grids(self.pharmacophores)
# Point is unhashable, set comprehension is not possible
features = [(s.centre[0], s.centre[1], s.centre[2])
for p in self.pharmacophores for f in p.features for s in f.spheres]
# Point causes problems with Grid.value_at_point()
self.assertTrue(all([grid_dic["ring_planar_projected"].contains_point(p) for p in features]))
def test_features_to_grid(self):
pm = PharmacophoreModel()
pm.feature_definitions = ["acceptor"]
pts = [[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[2.5, 2.5, 2.5],
[3.5, 3.5, 3.5]]
g = Grid.initalise_grid(pts, padding=3)
features = [Feature(pm.feature_definitions["acceptor"],
GeometricDescriptors.Sphere(centre=p, radius=1)) for p in pts]
for f in features:
f.point = f.spheres[0]
h = _features_to_grid(features, g)
# should be 3 peaks, all with a value of 1.0
self.assertEqual(3, len(h.get_peaks(min_distance=1, cutoff=0)))
self.assertTrue(all([h.value_at_point(peak) == 1.0 for peak in h.get_peaks(min_distance=1, cutoff=0)]))
# def test_closest_peak(self):
# x = 1
def test_consensus(self):
feats = create_consensus(self.pharmacophores)
self.assertTrue(2, len(feats))
##################################################################################################################
class TestProteinPharmacophoreModel(unittest.TestCase):
def setUp(self):
top = os.getcwd()
# self.prot = Protein.from_file("testdata/6y2g_A/protein.pdb")
# self.mol_grid = Grid.from_file("testdata/6y2g_A/molA.grd")
self.binding_site = Protein.from_file("testdata/6y2g_A/binding_site.pdb")
self.protein_pharmacophore = ProteinPharmacophoreModel()
self.protein_pharmacophore.feature_definitions = ["acceptor_projected",
"donor_projected",
"donor_ch_projected"]
def test_detect_from_prot(self):
# create binding site
# bs = Protein.BindingSiteFromGrid(self.prot, self.mol_grid)
# self.binding_site = self.prot.copy()
# remove = {r.identifier for r in self.prot.residues} - {r.identifier for r in bs.residues}
# for r in remove:
# self.binding_site.remove_residue(r)
# with io.MoleculeWriter("testdata/6y2g_A/binding_site.pdb") as w:
# w.write(self.binding_site)
###
self.protein_pharmacophore.detect_from_prot(self.binding_site)
print(self.protein_pharmacophore.feature_definitions)
donor_feats = [f for f in self.protein_pharmacophore.detected_features if f.identifier == "donor_projected"]
self.assertEqual(26, len(donor_feats))
self.protein_pharmacophore.pymol_visulisation(outdir="testdata/pharmacophore_extension/ProteinPharmacophoreModel/from_prot")
##################################################################################################################
class TestHotspotPharmacophoreModel(unittest.TestCase):
def setUp(self) -> None:
x = 1
with HotspotReader("testdata/pharmacophore_extension/provided_data/out.zip") as r:
self.hr = [hr for hr in r.read() if hr.identifier == "best_volume"][0]
# smoothing is really important to this workflow
for p, g in self.hr.super_grids.items():
h = g.max_value_of_neighbours()
h = h.gaussian()
self.hr.super_grids[p] = h
def test_noprojections(self):
p = HotspotPharmacophoreModel()
p.from_hotspot(self.hr, projections=False)
top_feats = p.top_features(3)
p.detected_features = top_feats
p.pymol_visulisation("testdata/pharmacophore_extension/HotspotPharmacophoreModel/no_projections")
def test_projections(self):
p = HotspotPharmacophoreModel()
p.from_hotspot(self.hr, projections=True)
feat_0 = [f for f in p.detected_features if f.identifier == "acceptor_projected"][0]
self.assertIsInstance(feat_0.projected_atom, Atom)
p.pymol_visulisation("testdata/pharmacophore_extension/HotspotPharmacophoreModel/projections")
##################################################################################################################
@_csd_required
class TestLigandPharmacophoreModel(unittest.TestCase):
def setUp(self):
self.csd = csd()
self.crystal = csd().crystal('AABHTZ')
self.crystal.molecule.add_hydrogens()
self.ligand_pharmacophore = LigandPharmacophoreModel()
def testSetters(self):
self.assertEqual(0, len(self.ligand_pharmacophore.features))
self.ligand_pharmacophore.feature_definitions = ["acceptor"]
self.assertEqual(1, len(self.ligand_pharmacophore.feature_definitions))
def testdetect_from_ligand(self):
wrkdir = "testdata/pharmacophore_extension/LigandPharmacophoreModel/from_ligand"
with PushDir(wrkdir):
self.ligand_pharmacophore.feature_definitions = ["acceptor"]
print(self.ligand_pharmacophore.feature_definitions["acceptor"].point_generator_names)
self.ligand_pharmacophore.detect_from_ligand(ligand=self.crystal)
self.assertEqual(5, len(self.ligand_pharmacophore.detected_features))
# test score setter
self.assertEqual(0, self.ligand_pharmacophore.detected_features[0].score)
self.ligand_pharmacophore.detected_features[0].score = 5
self.assertEqual(5, self.ligand_pharmacophore.detected_features[0].score)
# test write
for f in self.ligand_pharmacophore.detected_features:
self.ligand_pharmacophore.add_feature(f)
self.ligand_pharmacophore.write(pharmacophore_path="pharmacophore.cm")
read_me = LigandPharmacophoreModel.from_file("pharmacophore.cm")
self.assertEqual(len(self.ligand_pharmacophore.features), len(read_me.features))
print(read_me.features[0].spheres[0].centre)
# self.ligand_pharmacophore.pymol_visulisation()
def testto_pymol_str(self):
self.ligand_pharmacophore.feature_definitions = ["acceptor_projected"]
self.ligand_pharmacophore.detect_from_ligand(ligand=self.crystal)
f = PyMOLFile()
f.commands += self.ligand_pharmacophore.detected_features[0].to_pymol_str()
f.write("testdata/pharmacophore_extension/LigandPharmacophoreModel/from_ligand/feature_write.py")
def testdetect_from_pdb(self):
testpdb = "2vta"
testhetid = "LZ1"
testchainid = "A"
self.ligand_pharmacophore.feature_definitions = ["donor_projected",
"acceptor_projected"]
self.ligand_pharmacophore.detect_from_pdb(pdb=testpdb,
hetid=testhetid,
chainid=testchainid)
self.assertEqual(2, len(self.ligand_pharmacophore.detected_features))
# self.ligand_pharmacophore.pymol_visulisation("testdata/pharmacophore_extension/LigandPharmacophoreModel/from_pdb")
def testdetect_from_ligand_ensemble(self):
wrk_dir = "testdata/pharmacophore_extension/LigandPharmacophoreModel/from_ligand_ensemble"
with PushDir(wrk_dir):
test_overlay = io.MoleculeReader("test_overlay.mol2")
ligand_pharmacophore = LigandPharmacophoreModel()
ligand_pharmacophore.feature_definitions = ["ring_planar_projected"]
ligand_pharmacophore.detect_from_ligand_ensemble(ligands=test_overlay, cutoff=2)
# ligand_pharmacophore.pymol_visulisation(outdir="")
self.assertEqual(2, len(ligand_pharmacophore.detected_features))
def testdetect_from_ligand_ensemble_cdk2(self):
wrk_dir = "testdata/pharmacophore_extension/LigandPharmacophoreModel/from_ligand_ensemble_big_all"
with PushDir(wrk_dir):
test_overlay = io.MoleculeReader("cdk2_ligands.mol2")
ligand_pharmacophore = LigandPharmacophoreModel()
ligand_pharmacophore.feature_definitions = ["ring_planar_projected",
"donor_projected",
"acceptor_projected"]
ligand_pharmacophore.detect_from_ligand_ensemble(ligands=test_overlay, cutoff=2)
feature_count = 4
selected = ligand_pharmacophore.top_features(num=feature_count)
ligand_pharmacophore.detected_features = selected
self.assertEqual(feature_count, len(ligand_pharmacophore))
# ligand_pharmacophore.pymol_visulisation(outdir="")
##################################################################################################################
#
#
# class TestInteractionPharmacophoreModel2vta(unittest.TestCase):
# def setUp(self):
# self.protein_path = "testdata/pharmacophore_extension/2vta/test_2vta_protonated.pdb"
# self.hetid = "LZ1"
# self.chain = "A"
# self.prot_lig_pharmacophore = InteractionPharmacophoreModel()
# self.featdefs = ["donor_projected",
# "donor_ch_projected",
# "acceptor_projected",
# "ring"]
#
# self.prot_lig_pharmacophore.feature_definitions = self.featdefs
#
# def testligand_pharmacophore(self):
# self.assertEqual(0, len(self.prot_lig_pharmacophore.features))
# self.assertEqual(len(self.featdefs), len(self.prot_lig_pharmacophore.feature_definitions))
#
# def testdetection(self):
# self.prot_lig_pharmacophore.detect_from_arpeggio(self.protein_path, self.hetid, self.chain)
# self.assertEqual(6, len(self.prot_lig_pharmacophore.detected_features))
# self.prot_lig_pharmacophore.pymol_visulisation("testdata/pharmacophore_extension/2vta")
#
#
# class TestInteractionPharmacophoreModel1xkk(unittest.TestCase):
# def setUp(self):
# self.protein_path = "testdata/pharmacophore_extension/1xkk/1xkk.pdb"
# self.hetid = "FMM"
# self.chain = "A"
# self.prot_lig_pharmacophore = InteractionPharmacophoreModel()
# self.prot_lig_pharmacophore.feature_definitions = ["donor_projected",
# "donor_ch_projected",
# "acceptor_projected",
# "ring"]
#
# # def testligand_pharmacophore(self):
# # self.assertEqual(0, len(self.prot_lig_pharmacophore.features))
# # self.assertEqual(6, len(self.prot_lig_pharmacophore.feature_definitions))
#
# def testdetection(self):
# self.prot_lig_pharmacophore.detect_from_arpeggio(self.protein_path, self.hetid, self.chain)
# # self.assertEqual(7, len(self.prot_lig_pharmacophore.selected_features))
# self.prot_lig_pharmacophore.pymol_visulisation("testdata/pharmacophore_extension/1xkk")
#
class TestInteractionPharmacophoreModel1aq1(unittest.TestCase):
def setUp(self):
self.protein_path = "/home/pcurran/github_packages/pharmacophores/testdata/alignment/1AQ1_aligned.pdb"
self.hetid = "STU"
self.chain = "A"
self.prot_lig_pharmacophore = InteractionPharmacophoreModel()
self.prot_lig_pharmacophore.feature_definitions = ["donor_projected",
"donor_ch_projected",
"acceptor_projected",
"ring_planar_projected"]
# def testligand_pharmacophore(self):
# self.assertEqual(0, len(self.prot_lig_pharmacophore.features))
# self.assertEqual(6, len(self.prot_lig_pharmacophore.feature_definitions))
def testdetection(self):
self.prot_lig_pharmacophore.detect_from_arpeggio(self.protein_path, self.hetid, self.chain)
# self.assertEqual(7, len(self.prot_lig_pharmacophore.selected_features))
self.prot_lig_pharmacophore.pymol_visulisation("/home/pcurran/github_packages/pharmacophores/testdata/alignment")
class TestInteractionPharmacophoreModel(unittest.TestCase):
def setUp(self):
self.wrkdir = "/home/pcurran/github_packages/pharmacophores/testdata/alignment"
# def testdetect_interactions(self):
# with PushDir(self.wrkdir):
# bs = Protein.from_file("1AQ1_bs.pdb")
# lig = io.MoleculeReader("1AQ1_STU_aligned.mol2")[0]
# ipm = InteractionPharmacophoreModel()
#
# ipm.detect_interactions(bs, lig)
def testdetect_from_pl_ensemble(self):
wrkdir = "/home/pcurran/github_packages/pharmacophores/testdata/alignment"
with PushDir(wrkdir):
paths = ["1AQ1_aligned.pdb", "1B38_aligned.pdb", "1B39_aligned.pdb", "1CKP_aligned.pdb"]
hetids = ["STU", "ATP", "ATP", "PVB"]
chains = ["A", "A", "A", "A"]
for i in range(len(paths)):
ipm = InteractionPharmacophoreModel()
ipm.feature_definitions = ["donor_projected",
"donor_ch_projected",
"acceptor_projected",
"ring_planar_projected"]
ipm.detect_from_arpeggio(paths[i], hetids[i], chains[i])
for feat in ipm.detected_features:
ipm.add_feature(feat)
ipm.write(f"{paths[i].split('_')[0]}_{hetids[i]}.cm")
##################################################################################################################
if __name__ == '__main__':
unittest.main()
|
import json
import serial
class PrometheusSerial:
portPrefix = '/dev/ttyUSB'
portNumber = 0
connected = False
connection = None
socket = None
reactor = None
def __init__(self, reactor):
print "PrometheusSerial: Starting serial communication"
self.reactor = reactor
self.init()
def init(self):
serialPort = self.portPrefix + str(self.portNumber)
try:
print "PrometheusSerial: Attempting to connect on port " + serialPort
self.connection = serial.Serial(serialPort, 115200, timeout=0.05) # 9600, 115200
except serial.serialutil.SerialException, e:
self.portNumber += 1
if self.portNumber > 9:
self.portNumber = 0
self.connected = False
print "PrometheusSerial: Failed to establish communication, retrying in 10 seconds"
self.reactor.callLater(10, self.init)
else:
self.init()
else:
#self.connection.stopbits = 2
self.connection.open()
self.connected = True
print "PrometheusSerial: Successfully connected on port " + serialPort
linesToSkip = 2
currentLine = 0
while self.connection.inWaiting() > 0 or currentLine < linesToSkip:
self.connection.readline()
currentLine += 1
def close(self):
self.connection.close()
self.connected = False
def setSocket(self, socket):
self.socket = socket
def writeToSocket(self, data):
if self.socket != None:
self.socket.writeToAll(data)
print "PrometheusSerial: Sending data to socket: " + str(data)
def proccessData(self):
if self.connected:
if self.connection.inWaiting() > 0:
data = self.connection.readline().split()
print "PrometheusSerial: Data from arduino: " + str(data)
try:
command = data[0]
jsonData = {
"command": command
}
if command == "armStatus":
jsonData["currentAngle"] = {}
jsonData["currentError"] = {}
for index in xrange(5):
jsonData["currentAngle"][index] = data[index + 1]
for index in xrange(5):
jsonData["currentError"][index] = data[index + 6]
self.writeToSocket(json.dumps(jsonData))
elif command == "jointAngle":
jsonData["jointNumber"] = data[1]
jsonData['angle'] = data[2]
self.writeToSocket(json.dumps(jsonData))
elif command == "jointSetPoint":
jsonData["jointNumber"] = data[1]
jsonData['setPoint'] = data[2]
self.writeToSocket(json.dumps(jsonData))
elif command == "jointLimits":
jsonData["jointNumber"] = data[1]
jsonData["min"] = data[2]
jsonData["max"] = data[3]
self.writeToSocket(json.dumps(jsonData))
elif command == "jointGains":
jsonData["jointNumber"] = data[1]
jsonData["PGain"] = data[2]
jsonData["IGain"] = data[3]
jsonData["DGain"] = data[4]
self.writeToSocket(json.dumps(jsonData))
elif command == "jointError":
jsonData["jointNumber"] = data[1]
jsonData["error"] = data[2]
self.writeToSocket(json.dumps(jsonData))
else:
print "PrometheusSerial: Unknown command received from Arduino"
except:
print "PrometheusSerial: Bad data received from Arduino"
def setJointAngle(self, jointNumber, angle):
if self.connected:
self.connection.write("setJointAngle " + str(jointNumber) + " " + str(angle))
def getJointAngle(self, jointNumber):
if self.connected:
self.connection.write("getJointAngle " + str(jointNumber))
def getJointLimits(self, jointNumber):
if self.connected:
self.connection.write("getJointLimits " + str(jointNumber))
def setJointGains(self, jointNumber, PGain, IGain, DGain):
if self.connected:
self.connection.write("setJointGains " + str(jointNumber) + \
" " + str(PGain) + \
" " + str(IGain) + \
" " + str(DGain))
def getJointGains(self, jointNumber):
if self.connected:
self.connection.write("getJointGains " + str(jointNumber))
def getJointError(self, jointNumber):
if self.connected:
self.connection.write("getJointError " + str(jointNumber))
def setClawState(self, clawCommand):
if self.connected:
if clawCommand > 2 or clawCommand < 0:
clawCommand = 1
self.connection.write("setClawState " + str(clawCommand))
|
from flask import Flask # flask server
from flask import request # how the user requested a resource
from flask import render_template # to use templates/layouts
app = Flask(__name__)
# Something i can do in templates:
# {{ var_name }}
# {% """kind of python code on flask""" %}
# [@] signifies a decorator - way to wrap a function and modifying its behavior.
# In Flask, we are mapping url to return value.
@app.route('/')
def index():
return 'This is the homepage'
@app.route('/about')
def about():
return 'This is the about page.<br />brrr'
# Variables in routing. google query: flask converters
# Variable in URL - in brackets
# Example1 - strings. Strings are default type.
@app.route('/profile/<username>')
def profile(username):
return 'Hello, %s' % username
# Example2 - integers
# In case of exceptions (if post_id is string) 404 page is already implemented.
@app.route('/post/<int:post_id>')
def post(post_id):
return 'post_id is %d' % post_id
# HTTP methods. Here: GET and POST methods
# Example of simple GET method
@app.route('/method')
def check_method():
return 'Method used: %s' % request.method
# Example of different methods
@app.route('/add', methods=['GET', 'POST'])
def add():
if request.method == 'POST':
return 'You are using POST method'
elif request.method == 'GET':
return 'You are using GET'
return 'You are using something else...'
# HTML Templates
# [templates] and [static] folders are necessary
@app.route('/tmp/<name>')
def tmp_name(name):
return render_template("profile.html", name=name)
# Mapping multiple URLs
@app.route('/mult/')
@app.route('/mult/<user>')
def mult(user=None):
return render_template("user.html", user=user)
# Passing lists to the template
@app.route('/shopping')
def shopping():
food = ["cheese", "eggs", "ham"]
return render_template('shopping.html', food=food)
if __name__ == '__main__':
app.run(debug=True)
|
num =1
num2 =22
num4 =3333
num5 =88
num5 = 666
|
#!/usr/bin/env python
"""
Functions for solving a 1D wave equation.
"""
from __future__ import division # disable integer division
from scitools.numpyutils import *
from CurveViz import *
def solver(I, f, c, U_0, U_L, L, n, dt, tstop,
user_action=None, version='scalar'):
"""
Solve the wave equation u_tt=u_xx + f(x,t) on (0,L) with
u(0,t)=U_0(t), u(L,t)=U_L(t), for t=dt,2*dt,...,tstop
Initial conditions: u(x,0)=I(x), du/dt=0.
n is the total number of grid cells; grid points are numbered
from 0 to n.
dt is the time step. If dt<=0, the optimal time step
(dt=dx/c) is used.
tstop is the stop time for the simulation.
I, f, U_0, U_L are functions: I(x), f(x,t), U_0(t), U_L(t)
user_action is a function of (u, x, t) where the calling code
can add visualization, error computations, data analysis,
store solutions, etc.
"""
import time
t0 = time.clock()
dx = L/float(n)
x = linspace(0, L, n+1) # grid points in x dir
if dt <= 0: dt = dx/float(c) # max time step?
C2 = (c*dt/dx)**2 # help variable in the scheme
dt2 = dt*dt
up = zeros(n+1) # NumPy solution array
u = up.copy() # solution at t-dt
um = up.copy() # solution at t-2*dt
# set initial condition (pointwise - allows straight if-tests):
t = 0.0
for i in iseq(0,n):
u[i] = I(x[i])
for i in iseq(1,n-1):
um[i] = u[i] + 0.5*C2*(u[i-1] - 2*u[i] + u[i+1]) + \
dt2*f(x[i], t)
um[0] = U_0(t+dt); um[n] = U_L(t+dt)
if user_action is not None:
user_action(u, x, t)
while t <= tstop:
t_old = t; t += dt
# update all inner points:
if version == 'scalar':
for i in iseq(start=1, stop=n-1):
up[i] = - um[i] + 2*u[i] + \
C2*(u[i-1] - 2*u[i] + u[i+1]) + \
dt2*f(x[i], t_old)
elif version == 'vectorized':
up[1:n] = - um[1:n] + 2*u[1:n] + \
C2*(u[0:n-1] - 2*u[1:n] + u[2:n+1]) + \
dt2*f(x[1:n], t_old)
else:
raise ValueError, 'version=%s' % version
# insert boundary conditions:
up[0] = U_0(t); up[n] = U_L(t)
if user_action is not None:
user_action(up, x, t)
# update data structures for next step:
um, u, up = u, up, um # switch references
#tmp = um; um = u; u = up; up = tmp # traditional
#um = u.copy(); u = up.copy() # slow
t1 = time.clock()
return dt, x, t1-t0
def visualizer(I, f, c, U_0, U_L, L, n, dt, tstop,
user_action=None, version='scalar', graphics=None):
"""
Call solver but let the user_action funtion be a function
where the solution is visualized and stored in a list.
All arguments are passed on to the solver function,
except graphics. graphics is a plot object with the max/min
values of the y axis set in the calling code.
"""
solutions = [] # store all u fields at all time levels
def action_with_plot(u, x, t):
# note: nested function blocks may lead to
# mixing of scopes of variables - this might be tricky
if graphics is not None:
graphics.configure(coor=x)
graphics.plotcurve(u, legend='u(x,t=%9.4E)' % t, ps=0)
solutions.append(u.copy()) # save a copy!
if user_action is not None:
user_action(u, x, t) # call user's function
dt, x, cpu = solver(I, f, c, U_0, U_L, L, n, dt, tstop,
action_with_plot, version)
return solutions, x, dt, cpu
def test_solver_plug(plot=1, version='scalar', n=50):
L = 1
c = 1
tstop = 2
def I(x):
"""Plug profile as initial condition."""
if abs(x-L/2.0) > 0.1:
return 0
else:
return 1
def f(x,t):
return 0
def U_0(t):
return 0
def U_L(t):
return 0
def action(u, x, t):
pass
#print t, u
if plot:
g = graph(program='Gnuplot')
g.configure(ymin=-1.1, ymax=1.1)
else:
g = None
import time
t0 = time.clock()
solutions, x, dt, cpu = visualizer(I, f, c, U_0, U_L, L,
n, 0, tstop, user_action=None, version=version, graphics=g)
print 'CPU time: %s version =' % version, cpu
# check that first and last (if tstop=2) are equal:
if not allclose(solutions[0], solutions[-1],
atol=1.0E-10, rtol=1.0E-12):
print 'error in computations'
else:
print 'correct solution'
def test_solver1(N, version='scalar'):
"""
Very simple test case.
Store the solution at every N time level.
"""
def I(x): return sin(2*x*pi/L)
def f(x,t): return 0
solutions = []
# Need time_level_counter as global variable since
# it is assigned in the action function (that makes
# a variable local to that block otherwise).
# The manager class below provides a cleaner solution.
global time_level_counter
time_level_counter = 0
def action(u, x, t):
global time_level_counter
if time_level_counter % N == 0:
solutions.append(u.copy())
time_level_counter += 1
n = 100; tstop = 6; L = 10
dt, x, cpu = solver(I, f, 1.0, lambda t: 0, lambda t: 0,
L, n, 0, tstop,
user_action=action, version=version)
print 'CPU time:', cpu
print 'Max value in final u:', arrmax(solutions[-1])
class StoreSolution:
"""
Very simple test case.
Store the solution at every N time level.
"""
def __init__(self):
self.L = 10
def I(self, x): return sin(2*x*pi/self.L)
def f(self, x, t): return 0
def action(self, u, x, t):
if self.time_level_counter % self.N == 0:
self.solutions.append(u.copy())
self.time_level_counter += 1
def main(self, N=1, version='scalar'):
self.solutions = []
self.time_level_counter = 0
self.N = N
n = 6; tstop = 40
self.dt, self.x, self.cpu = \
solver(self.I, self.f, 1.0, lambda t: 0, lambda t: 0,
self.L, n, 0, tstop,
user_action=self.action, version=version)
def test_solver2(N, plot=True, version='scalar'):
s = StoreSolution()
s.main(N, version)
print 'CPU time:', s.cpu
if len(s.x) < 10: print s.solutions
if plot:
from CurveViz import graph
g = graph(program='Gnuplot', coor=s.x, ymax=1, ymin=-1)
for s in s.solutions:
g.plotcurve(s)
def test_solver1c(N, version='scalar'):
"""
As test_solver1, but use class for action function.
"""
def I(x): return sin(2*x*pi/L)
def f(x, t): return 0
class Action:
def __init__(self):
self.solutions = []
self.time_level_counter = 0
def __call__(self, u, x, t):
if self.time_level_counter % N == 0:
self.solutions.append(u.copy())
self.time_level_counter += 1
action = Action()
n = 100; tstop = 6; L = 10
dt, x, cpu = solver(I, f, 1.0, lambda t: 0, lambda t: 0,
L, n, 0, tstop,
user_action=action, version=version)
print 'CPU time:', cpu
print 'Max value in final u:', arrmax(action.solutions[-1])
class ExactSolution1:
def __init__(self):
self.L = 10
def exact(self, x, t):
m = 3.0
return cos(m*pi/self.L*t)*sin(m*pi/self.L*x)
def I(self, x): return self.exact(x, 0)
def f(self, x, t): return 0
def U_0(self, t): return self.exact(0, t)
def U_L(self, t): return self.exact(self.L, t)
def action(self, u, x, t):
e = u - self.exact(x, t)
self.errors.append(sqrt(dot(e,e))) # store norm of e
def main(self, n, version='scalar'):
self.errors = []
tstop = 10
self.dt, self.x, self.cpu = \
solver(self.I, self.f, 1.0, self.U_0,
lambda t: self.exact(self.L, t),
self.L, n, 0, tstop,
user_action=self.action, version=version)
def test_solver3(version='scalar'):
s = ExactSolution1()
s.main(5, version)
print 'Max error:', max(s.errors)
if __name__ == '__main__':
if len(sys.argv) < 2:
print """Usage %s test_solver_plug 1 "'vectorized'" """ % \
sys.argv[0]
sys.exit(0)
cmd = '%s(%s)' % (sys.argv[1], ', '.join(sys.argv[2:]))
print cmd
exec(cmd)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.