content
stringlengths 5
1.05M
|
|---|
# coding=utf-8
from phi.tf.flow import *
from .pde.pde_base import collect_placeholders_channels
from .sequences import StaggeredSequence, SkipSequence, LinearSequence
from .hierarchy import PDEExecutor
class ControlTraining(LearningApp):
def __init__(self, n, pde, datapath, val_range, train_range,
trace_to_channel=None,
obs_loss_frames=(-1,),
trainable_networks=('CFE', 'OP2'),
sequence_class=StaggeredSequence,
batch_size=16,
view_size=16,
learning_rate=1e-3,
learning_rate_half_life=1000,
dt=1.0,
new_graph=True):
"""
:param n:
:param pde:
:param datapath:
:param sequence_matching:
:param train_cfe:
"""
if new_graph:
tf.reset_default_graph()
LearningApp.__init__(self, 'Control Training', 'Train PDE control: OP / CFE', training_batch_size=batch_size,
validation_batch_size=batch_size, learning_rate=learning_rate, stride=50)
self.initial_learning_rate = learning_rate
self.learning_rate_half_life = learning_rate_half_life
if n <= 1:
sequence_matching = False
diffphys = sequence_class is not None
if sequence_class is None:
assert 'CFE' not in trainable_networks, 'CRE training requires a sequence_class.'
assert len(obs_loss_frames) > 0, 'No loss provided (no obs_loss_frames and no sequence_class).'
sequence_class = SkipSequence
self.n = n
self.dt = dt
self.data_path = datapath
self.checkpoint_dict = None
self.info('Sequence class: %s' % sequence_class)
# --- Set up PDE sequence ---
world = World(batch_size=batch_size)
pde.create_pde(world, 'CFE' in trainable_networks, sequence_class != LinearSequence) # TODO BATCH_SIZE=None
world.state = pde.placeholder_state(world, 0)
self.add_all_fields('GT', world.state, 0)
target_state = pde.placeholder_state(world, n*dt)
self.add_all_fields('GT', target_state, n)
in_states = [world.state] + [None] * (n-1) + [target_state]
for frame in obs_loss_frames:
if in_states[frame] is None:
in_states[frame] = pde.placeholder_state(world, frame*self.dt)
# --- Execute sequence ---
executor = self.executor = PDEExecutor(world, pde, target_state, trainable_networks, self.dt)
sequence = self.sequence = sequence_class(n, executor)
sequence.execute()
all_states = self.all_states = [frame.worldstate for frame in sequence if frame is not None]
# --- Loss ---
loss = 0
reg = None
if diffphys:
target_loss = pde.target_matching_loss(target_state, sequence[-1].worldstate)
self.info('Target loss: %s' % target_loss)
if target_loss is not None:
loss += target_loss
reg = pde.total_force_loss([state for state in all_states if state is not None])
self.info('Force loss: %s' % reg)
for frame in obs_loss_frames:
supervised_loss = pde.target_matching_loss(in_states[frame], sequence[frame].worldstate)
if supervised_loss is not None:
self.info('Supervised loss at frame %d: %s' % (frame, supervised_loss))
self.add_scalar('GT_obs_%d' % frame, supervised_loss)
self.add_all_fields('GT', in_states[frame], frame)
loss += supervised_loss
self.info('Setting up loss')
if loss is not 0:
self.add_objective(loss, 'Loss', reg=reg)
for name, scalar in pde.scalars.items():
self.add_scalar(name, scalar)
# --- Training data ---
self.info('Preparing data')
placeholders, channels = collect_placeholders_channels(in_states, trace_to_channel=trace_to_channel)
data_load_dict = {p: c for p, c in zip(placeholders, channels)}
self.set_data(data_load_dict,
val=None if val_range is None else Dataset.load(datapath, val_range),
train=None if train_range is None else Dataset.load(datapath, train_range))
# --- Show all states in GUI ---
for i, (placeholder, channel) in enumerate(zip(placeholders, channels)):
def fetch(i=i): return self.viewed_batch[i]
self.add_field('%s %d' % (channel, i), fetch)
for i, worldstate in enumerate(all_states):
self.add_all_fields('Sim', worldstate, i)
for name, field in pde.fields.items():
self.add_field(name, field)
def add_all_fields(self, prefix, worldstate, index):
with struct.unsafe():
fields = struct.flatten(struct.map(lambda x: x, worldstate, trace=True))
for field in fields:
name = '%s[%02d] %s' % (prefix, index, field.path())
if field.value is not None:
self.add_field(name, field.value)
# else:
# self.info('Field %s has value None' % name)
def load_checkpoints(self, checkpoint_dict):
if not self.prepared:
self.prepare()
self.checkpoint_dict = checkpoint_dict
self.executor.load(self.n, checkpoint_dict, preload_n=True, session=self.session, logf=self.info)
def action_save_model(self):
self.save_model()
def step(self):
if self.learning_rate_half_life is not None:
self.float_learning_rate = self.initial_learning_rate * 0.5 ** (self.steps / float(self.learning_rate_half_life))
LearningApp.step(self)
def infer_all_frames(self, data_range):
dataset = Dataset.load(self.data_path, data_range)
reader = BatchReader(dataset, self._channel_struct)
batch = reader[0:len(reader)]
feed_dict = self._feed_dict(batch, True)
inferred = self.session.run(self.all_states, feed_dict=feed_dict)
return inferred
def infer_scalars(self, data_range):
dataset = Dataset.load(self.data_path, data_range)
reader = BatchReader(dataset, self._channel_struct)
batch = reader[0:len(reader)]
feed_dict = self._feed_dict(batch, True)
scalar_values = self.session.run(self.scalars, feed_dict, summary_key='val', merged_summary=self.merged_scalars, time=self.steps)
scalar_values = {name: value for name, value in zip(self.scalar_names, scalar_values)}
return scalar_values
|
import torch.nn as nn
import torch
import os
import torch.nn.functional as F
import torchvision.models as models
from torch.autograd import Variable
import torchvision.utils as vutils
import torchvision.transforms as transforms
import numpy as np
from util.metrics import PSNR
from skimage.measure import compare_ssim as SSIM
from PIL import Image
import cv2
class DeblurModel(nn.Module):
def __init__(self):
super(DeblurModel, self).__init__()
def get_input(self, data):
img = data['A']
inputs = img
targets = data['B']
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
return inputs, targets
def tensor2im(self, image_tensor, imtype=np.uint8):
image_numpy = image_tensor[0].cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def get_acc(self, output, target, full=False):
fake = self.tensor2im(output.data)
real = self.tensor2im(target.data)
psnr = PSNR(fake, real)
ssim = SSIM(fake, real, multichannel=True)
return psnr, ssim
def get_loss(self, mean_loss, mean_psnr, mean_ssim):
return '{:.3f}; psnr={}; ssim={}'.format(mean_loss, mean_psnr, mean_ssim)
def visualize_data(self, phase, config, data, outputs, niter, degrad_type):
# try:
# #images = vutils.make_grid([input_image, result_image, gt_image])
# #images = vutils.make_grid(input_image)
# print(data['A'].size())
# images = (vutils.make_grid(torch.squeeze(data['A']).permute(1, 2, 0)) + 1) / 2.0 * 255.0
# writer.add_image('Images', images, niter)
# except Exception as e:
# print(e)
# print('I fucked up', niter, degrad_type)
# pass
input_image = data['A'][0].cpu().float().numpy()
input_image = (np.transpose(input_image, (1, 2, 0)) + 1) / 2.0 * 255.0
gt_image = data['B'][0].cpu().float().numpy()
# print(gt_image.shape)
gt_image = (np.transpose(gt_image, (1, 2, 0)) + 1) / 2.0 * 255.0
result_image = outputs[0].detach().cpu().float().numpy()
result_image = (np.transpose(result_image, (1, 2, 0)) + 1) / 2.0 * 255.0
input_image = input_image.astype('uint8')
gt_image = gt_image.astype('uint8')
result_image = result_image.astype('uint8')
result_image = np.hstack((input_image, result_image, gt_image))
folder_name = phase + '_images_'+str(degrad_type)+'_'+str(config['experiment_desc'])
if not os.path.exists(folder_name):
os.makedirs(folder_name)
cv2.imwrite(os.path.join(folder_name,
str(int(niter)) + '.png'),
cv2.cvtColor(result_image, cv2.COLOR_RGB2BGR))
def get_model(model_config):
return DeblurModel()
|
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import time
import numpy as np
from torch.autograd import Variable
from PIL import Image
import os, random
from collections import OrderedDict
from get_inputs import get_inputs
def Model():
model = models.vgg13(pretrained=True)
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
#define the feed-forward network with relu with dropout,softmax
network=nn.Sequential(OrderedDict([ ('fc1', nn.Linear(25088,1024)),
('drop', nn.Dropout(p=0.5)),
('relu', nn.ReLU()),
('fc2', nn.Linear( 1024,512)),
('drop', nn.Dropout(p=0.5)),
('relu', nn.ReLU()),
('fc3', nn.Linear( 512,256)),
('drop2', nn.Dropout(p=0.5)),
('relu2', nn.ReLU()),
('fc4', nn.Linear(256, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier =network
return model
if __name__=='__main__':
# arch=get_inputs().arch
print(Model())
# print(arch)
|
#!/usr/bin/env python3
# references:
# rfc1928(socks5): https://www.ietf.org/rfc/rfc1928.txt
# asyncio-socks5 https://github.com/RobberPhex/asyncio-socks5
# handshake
# +----+----------+----------+
# |VER | NMETHODS | METHODS |
# +----+----------+----------+
# | 1 | 1 | 1 to 255 |
# +----+----------+----------+
# request
# +----+-----+-------+------+----------+----------+
# |VER | CMD | RSV | ATYP | DST.ADDR | DST.PORT |
# +----+-----+-------+------+----------+----------+
# | 1 | 1 | X'00' | 1 | Variable | 2 |
# +----+-----+-------+------+----------+----------+
# reply
# +----+-----+-------+------+----------+----------+
# |VER | REP | RSV | ATYP | BND.ADDR | BND.PORT |
# +----+-----+-------+------+----------+----------+
# | 1 | 1 | X'00' | 1 | Variable | 2 |
# +----+-----+-------+------+----------+----------+
# udp relay request and reply
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
|
#!/usr/bin/python
# Check interface utilization using nicstat
# nicstat needs to be in $PATH
# Author: Mattias Mikkola
import subprocess
import sys
from optparse import OptionParser
from numpy import mean
parser = OptionParser()
parser.add_option('-i', '--interface', dest='interface', help='Interface to monitor')
parser.add_option('-w', '--warning', dest='warn', help='Warning threshhold')
parser.add_option('-c', '--critical', dest='crit', help='Critical threshhold')
parser.add_option('-n', '--iterations', dest='iter', help='Number of values to read')
(options, args) = parser.parse_args()
vals = []
p = subprocess.Popen('nicstat -p -i %s 1 %s' % (options.interface, int(options.iter)+1), shell=True, stdout=subprocess.PIPE)
for line in p.stdout.readlines():
s = str.split(line, ':')
vals.append(float(s[6]))
del vals[0]
avg = mean(vals)
if avg > float(options.crit):
status = 'CRITICAL'
code = 2
elif avg > float(options.warn):
status = 'WARNING'
code = 1
else:
status = 'OK'
code = 0
print('%s: Network utilization: %.2f%%|util=%.2f;%.2f;%.2f;0.00;100.00' % (status, avg, avg, float(options.warn), float(options.crit)))
exit(code)
|
# Generated by Django 3.0.8 on 2020-08-14 13:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0010_auto_20200814_1212'),
]
operations = [
migrations.AddField(
model_name='contact',
name='sap_id',
field=models.BigIntegerField(blank=True, null=True, verbose_name='sap_id'),
),
migrations.AddField(
model_name='product',
name='provider',
field=models.CharField(default='SITN', max_length=30, verbose_name='provider'),
),
]
|
from distutils.core import setup, Command
from distutils.dir_util import copy_tree
import sys
import os
import pyvx.nodes
import pyvx.capi
from pyvx import __version__
mydir = os.path.dirname(os.path.abspath(__file__))
class InstallLibCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pyvx.capi.build('/usr/local/lib')
copy_tree('pyvx/inc/headers/VX', '/usr/local/include/VX')
os.system('ldconfig')
class PyTestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import pytest
errno = pytest.main()
sys.exit(errno)
ext_modules = [n.ffi.verifier.get_extension()
for n in [pyvx.nodes.PlayNode, pyvx.nodes.ShowNode, pyvx.inc.vx]
if n.lib]
setup(
name='PyVX',
description='OpenVX implementation',
long_description='''
PyVX is an implementation of `OpenVX`_ in python. `OpenVX`_ is a standard for
expressing computer vision processing algorithms as a graph of function nodes.
This graph is verified once and can then be processed (executed) multiple
times. This implementation gains its performance by generating C-code during
the verification phase. This code is compiled and loaded dynamically and then
called during the process phase.
To use this python implementation as an `OpenVX`_ backend from a C program, a
shared library is provided. This library embeds python and provides an C API
following the `OpenVX`_ specification. That way the C program does not need to
be aware of the fact that python is used. Also, any C program following the
`OpenVX`_ specification will be compilable with this backend.
Further details are provided in the `Documentation`_
.. _`OpenVX`: https://www.khronos.org/openvx
.. _`Documentation`: https://pyvx.readthedocs.org
''',
version=__version__,
packages=['pyvx', 'pyvx.inc'],
package_data={'pyvx': ['glview.h', 'avplay.h', 'glview.c', 'avplay.c'],
'pyvx.inc': ['headers/VX/vx_api.h',
'headers/VX/vx.h',
'headers/VX/vx_kernels.h',
'headers/VX/vx_nodes.h',
'headers/VX/vx_types.h',
'headers/VX/vxu.h',
'headers/VX/vx_vendors.h',
]},
zip_safe=False,
url='http://pyvx.readthedocs.org',
author='Hakan Ardo',
author_email='pyvx@googlegroups.com',
license='MIT',
install_requires=['pycparser', 'cffi'],
ext_modules=ext_modules,
cmdclass={'test': PyTestCommand, 'libinstall': InstallLibCommand},
tests_require=['pytest'],
)
if pyvx.nodes.PlayNode.lib is None:
print
print "Warning: PlayNode not availible due to mssing dependencies. Try:"
print
print " apt-get install libavformat-dev libswscale-dev libavdevice-dev"
print
if pyvx.nodes.ShowNode.lib is None:
print
print "Warning: ShowNode not availible due to mssing dependencies. Try:"
print
print " apt-get install freeglut3-dev"
print
|
import warnings
import time
from skmultiflow.visualization.base_listener import BaseListener
from matplotlib.rcsetup import cycler
import matplotlib.pyplot as plt
from matplotlib import get_backend
from skmultiflow.utils import FastBuffer, constants
class EvaluationVisualizer(BaseListener):
""" This class is responsible for maintaining and updating the plot modules
for the evaluators in scikit-multiflow.
It uses `matplotlib.pyplot` modules to create the main plot, which
depending on the options passed to it as parameter, will create multiple
subplots to better display all requested metrics.
The plots are dynamically updated depending on frame counts amd time elapsed
since last update. This strategy account for fast and slow methods.
Line objects are used to describe performance measurements.
It supports multiple models per subplot as a way of comparing the performance
of different learning algorithms.
Parameters
----------
n_wait: int
The number of samples tracked in the sliding window for current performance.
dataset_name: string (Default: 'Unnamed graph')
The title of the plot. Algorithmically it's not important.
metrics: list
A list containing all the metrics to plot.
n_models: int
The number of models to compare.
Raises
------
ValueError: A ValueError can be raised for a series of reasons. If no plots
are passed as parameter to the constructor a ValueError is raised. If the wrong
type of parameter is passed to on_new_train_step the same error is raised.
Notes
-----
Using more than 3 plot types at a time is not recommended, as it can
significantly slow down the evaluation time. For the same reason comparing
more than 3 learners at a time is not recommended.
"""
def __init__(self, task_type, n_wait, dataset_name, metrics, n_models, model_names, data_dict):
super().__init__()
# Default values
self._sample_ids = []
self._is_legend_set = False
self._frame_cnt = 0
self._plot_trackers = {}
self._text_annotations = []
self._last_draw_timestamp = 0
# Configuration
self._data_dict = data_dict
self.n_wait = n_wait
self.dataset_name = dataset_name
self.n_models = n_models
# Validate inputs
if task_type is None or task_type == constants.UNDEFINED:
raise ValueError('Task type for visualizer object is undefined.')
else:
if task_type in [constants.CLASSIFICATION, constants.REGRESSION, constants.MULTI_TARGET_CLASSIFICATION,
constants.MULTI_TARGET_REGRESSION]:
self.task_type = task_type
else:
raise ValueError('Invalid task type: {}'.format(task_type))
if model_names is None:
self.model_names = ['M{}'.format(i) for i in range(n_models)]
else:
if isinstance(model_names, list):
if len(model_names) != n_models:
raise ValueError("Number of model names {} does not match the number of models {}.".
format(len(model_names), n_models))
else:
self.model_names = model_names
else:
raise ValueError("model_names must be a list.")
if metrics is not None:
if len(metrics) < 1:
raise ValueError('The metrics list is empty.')
else:
self.metrics = metrics
else:
raise ValueError('Invalid metrics {}'.format(metrics))
if constants.DATA_POINTS in metrics and n_models > 1:
raise ValueError("Can not use multiple models with data points visualization.")
# Proceed with configuration
self.__configure()
def on_new_train_step(self, sample_id, data_buffer):
""" This is the listener main function, which gives it the ability to
'listen' for the caller. Whenever the EvaluationVisualiser should
be aware of some new data, the caller will invoke this function,
passing the new data buffer.
Parameters
----------
sample_id: int
The current sample id.
data_buffer: EvaluationDataBuffer
A buffer containing evaluation data for a single training / visualization step.
Raises
------
ValueError: If an exception is raised during the draw operation.
"""
try:
current_time = time.time()
self._clear_annotations()
self._update_plots(sample_id, data_buffer)
# To mitigate re-drawing overhead for fast models use frame counter (default = 5 frames).
# To avoid slow refresh rate in slow models use a time limit (default = 1 sec).
if (self._frame_cnt == 5) or (current_time - self._last_draw_timestamp > 1):
plt.subplots_adjust(right=0.72, bottom=0.22) # Adjust subplots to include metrics annotations
if get_backend() == 'nbAgg':
self.fig.canvas.draw() # Force draw in'notebook' backend
plt.pause(1e-9)
self._frame_cnt = 0
self._last_draw_timestamp = current_time
else:
self._frame_cnt += 1
except BaseException as exception:
raise ValueError('Failed when trying to draw plot. Exception: {} | Type: {}'.
format(exception, type(exception).__name__))
def __configure(self):
""" This function will verify which subplots should be create. Initializing
all relevant objects to keep track of the plotting points.
Basic structures needed to keep track of plot values (for each subplot)
are: lists of values and matplot line objects.
The __configure function will also initialize each subplot with the
correct name and setup the axis.
The subplot size will self adjust to each screen size, so that data can
be better viewed in different contexts.
"""
font_size_small = 8
font_size_medium = 10
font_size_large = 12
plt.rc('font', size=font_size_small) # controls default text sizes
plt.rc('axes', titlesize=font_size_medium) # font size of the axes title
plt.rc('axes', labelsize=font_size_small) # font size of the x and y labels
plt.rc('xtick', labelsize=font_size_small) # font size of the tick labels
plt.rc('ytick', labelsize=font_size_small) # font size of the tick labels
plt.rc('legend', fontsize=font_size_small) # legend font size
plt.rc('figure', titlesize=font_size_large) # font size of the figure title
warnings.filterwarnings("ignore", ".*GUI is implemented.*")
warnings.filterwarnings("ignore", ".*left==right.*")
warnings.filterwarnings("ignore", ".*Passing 1d.*")
self._sample_ids = []
memory_time = {}
plt.ion()
self.fig = plt.figure(figsize=(9, 5))
self.fig.suptitle(self.dataset_name)
plot_metrics = [m for m in self.metrics if m not in [constants.RUNNING_TIME, constants.MODEL_SIZE]]
base = 11 + len(plot_metrics) * 100 # 3-digit integer describing the position of the subplot.
self.fig.canvas.set_window_title('scikit-multiflow')
# Subplots handler
for metric_id in self.metrics:
data_ids = self._data_dict[metric_id]
self._plot_trackers[metric_id] = PlotDataTracker(data_ids)
plot_tracker = self._plot_trackers[metric_id]
if metric_id not in [constants.RUNNING_TIME, constants.MODEL_SIZE]:
plot_tracker.sub_plot_obj = self.fig.add_subplot(base)
base += 1
if metric_id == constants.TRUE_VS_PREDICTED:
handle = []
plot_tracker.sub_plot_obj.set_prop_cycle(cycler('color', ['c', 'm', 'y', 'k']))
for data_id in data_ids:
if data_id == constants.Y_TRUE:
# True data
plot_tracker.data[data_id] = []
label = 'True value'
line_style = '--'
line_obj = plot_tracker.line_objs
if self.task_type == constants.CLASSIFICATION:
line_obj[data_id], = plot_tracker.sub_plot_obj.step(self._sample_ids,
plot_tracker.data[data_id],
label=label, linestyle=line_style)
else:
line_obj[data_id], = plot_tracker.sub_plot_obj.plot(self._sample_ids,
plot_tracker.data[data_id],
label=label, linestyle=line_style)
handle.append(line_obj[data_id])
else:
# Predicted data
plot_tracker.data[data_id] = [[] for _ in range(self.n_models)]
plot_tracker.line_objs[data_id] = [None for _ in range(self.n_models)]
line_obj = plot_tracker.line_objs[data_id]
for i in range(self.n_models):
label = 'Predicted {}'.format(self.model_names[i])
line_style = '--'
if self.task_type == constants.CLASSIFICATION:
line_obj[i], = plot_tracker.sub_plot_obj.step(self._sample_ids,
plot_tracker.data[data_id][i],
label=label, linestyle=line_style)
else:
line_obj[i], = plot_tracker.sub_plot_obj.plot(self._sample_ids,
plot_tracker.data[data_id][i],
label=label, linestyle=line_style)
handle.append(line_obj[i])
plot_tracker.sub_plot_obj.legend(handles=handle, loc=2, bbox_to_anchor=(1.01, 1.))
plot_tracker.sub_plot_obj.set_title('True vs Predicted')
plot_tracker.sub_plot_obj.set_ylabel('y')
elif metric_id == constants.DATA_POINTS:
plot_tracker.data['buffer_size'] = 100
plot_tracker.data['X'] = FastBuffer(plot_tracker.data['buffer_size'])
plot_tracker.data['target_values'] = None
plot_tracker.data['predictions'] = FastBuffer(plot_tracker.data['buffer_size'])
plot_tracker.data['clusters'] = []
plot_tracker.data['clusters_initialized'] = False
elif metric_id == constants.RUNNING_TIME:
# Only the current time measurement must be saved
for data_id in data_ids:
plot_tracker.data[data_id] = [0.0 for _ in range(self.n_models)]
# To make the annotations
memory_time.update(plot_tracker.data)
elif metric_id == constants.MODEL_SIZE:
plot_tracker.data['model_size'] = [0.0 for _ in range(self.n_models)]
memory_time['model_size'] = plot_tracker.data['model_size']
else:
# Default case, 'mean' and 'current' performance
handle = []
sorted_data_ids = data_ids.copy()
sorted_data_ids.sort() # For better usage of the color cycle, start with 'current' data
for data_id in sorted_data_ids:
plot_tracker.data[data_id] = [[] for _ in range(self.n_models)]
plot_tracker.line_objs[data_id] = [None for _ in range(self.n_models)]
line_obj = plot_tracker.line_objs[data_id]
for i in range(self.n_models):
if data_id == constants.CURRENT:
label = '{} (current, {} samples)'.format(self.model_names[i], self.n_wait)
line_style = '-'
else:
label = '{} (mean)'.format(self.model_names[i])
line_style = ':'
line_obj[i], = plot_tracker.sub_plot_obj.plot(self._sample_ids,
plot_tracker.data[data_id][i],
label=label,
linestyle=line_style)
handle.append(line_obj[i])
self._set_fig_legend(handle)
if metric_id == constants.ACCURACY:
plot_tracker.sub_plot_obj.set_title('Accuracy')
plot_tracker.sub_plot_obj.set_ylabel('acc')
elif metric_id == constants.KAPPA:
plot_tracker.sub_plot_obj.set_title('Kappa')
plot_tracker.sub_plot_obj.set_ylabel('kappa')
elif metric_id == constants.KAPPA_T:
plot_tracker.sub_plot_obj.set_title('Kappa T')
plot_tracker.sub_plot_obj.set_ylabel('kappa t')
elif metric_id == constants.KAPPA_M:
plot_tracker.sub_plot_obj.set_title('Kappa M')
plot_tracker.sub_plot_obj.set_ylabel('kappa m')
elif metric_id == constants.HAMMING_SCORE:
plot_tracker.sub_plot_obj.set_title('Hamming score')
plot_tracker.sub_plot_obj.set_ylabel('hamming score')
elif metric_id == constants.HAMMING_LOSS:
plot_tracker.sub_plot_obj.set_title('Hamming loss')
plot_tracker.sub_plot_obj.set_ylabel('hamming loss')
elif metric_id == constants.EXACT_MATCH:
plot_tracker.sub_plot_obj.set_title('Exact Match')
plot_tracker.sub_plot_obj.set_ylabel('exact match')
elif metric_id == constants.J_INDEX:
plot_tracker.sub_plot_obj.set_title('Jaccard Index')
plot_tracker.sub_plot_obj.set_ylabel('j-index')
elif metric_id == constants.MSE:
plot_tracker.sub_plot_obj.set_title('Mean Squared Error')
plot_tracker.sub_plot_obj.set_ylabel('mse')
elif metric_id == constants.MAE:
plot_tracker.sub_plot_obj.set_title('Mean Absolute Error')
plot_tracker.sub_plot_obj.set_ylabel('mae')
elif metric_id == constants.AMSE:
plot_tracker.sub_plot_obj.set_title('Average Mean Squared Error')
plot_tracker.sub_plot_obj.set_ylabel('amse')
elif metric_id == constants.AMAE:
plot_tracker.sub_plot_obj.set_title('Average Mean Absolute Error')
plot_tracker.sub_plot_obj.set_ylabel('amae')
elif metric_id == constants.ARMSE:
plot_tracker.sub_plot_obj.set_title('Average Root Mean Squared Error')
plot_tracker.sub_plot_obj.set_ylabel('armse')
elif metric_id == constants.DATA_POINTS:
plot_tracker.sub_plot_obj.set_title('')
plot_tracker.sub_plot_obj.set_xlabel('Feature x')
plot_tracker.sub_plot_obj.set_ylabel('Feature y')
else:
plot_tracker.sub_plot_obj.set_title('Unknown metric')
plot_tracker.sub_plot_obj.set_ylabel('')
if constants.DATA_POINTS not in self.metrics:
plt.xlabel('Samples')
if constants.RUNNING_TIME in self.metrics or \
constants.MODEL_SIZE in self.metrics:
self._update_time_and_memory_annotations(memory_time)
self.fig.subplots_adjust(hspace=.5)
self.fig.tight_layout(rect=[0, .04, 1, 0.98], pad=2.6, w_pad=0.4, h_pad=1.0)
def _set_fig_legend(self, handles=None):
if not self._is_legend_set:
self.fig.legend(handles=handles, ncol=2, bbox_to_anchor=(0.98, 0.04), loc="lower right")
self._is_legend_set = True
def _update_plots(self, sample_id, data_buffer):
self._sample_ids.append(sample_id)
memory_time = {}
for metric_id, data_ids in data_buffer.data_dict.items():
# update_xy_limits = True
update_xy_limits = metric_id not in [constants.RUNNING_TIME, constants.MODEL_SIZE]
y_min = 0.0
y_max = 1.0
pad = 0.1 # Default padding to set above and bellow plots
plot_tracker = self._plot_trackers[metric_id]
if metric_id == constants.TRUE_VS_PREDICTED:
# Process true values
data_id = constants.Y_TRUE
plot_tracker.data[data_id].append(data_buffer.get_data(metric_id=metric_id, data_id=data_id))
plot_tracker.line_objs[data_id].set_data(self._sample_ids, plot_tracker.data[data_id])
# Process predicted values
data_id = constants.Y_PRED
data = data_buffer.get_data(metric_id=metric_id, data_id=data_id)
for i in range(self.n_models):
plot_tracker.data[data_id][i].append(data[i])
plot_tracker.line_objs[data_id][i].set_data(self._sample_ids, plot_tracker.data[data_id][i])
y_min = min([plot_tracker.data[data_id][i][-1], plot_tracker.data[constants.Y_TRUE][-1], y_min])
y_max = max([plot_tracker.data[data_id][i][-1], plot_tracker.data[constants.Y_TRUE][-1], y_max])
elif metric_id == constants.DATA_POINTS:
update_xy_limits = False
# Process features
data_id = 'X'
features_dict = data_buffer.get_data(metric_id=metric_id, data_id=data_id)
feature_indices = list(features_dict.keys())
feature_indices.sort()
# Store tuple of feature values into the buffer, sorted by index
plot_tracker.data[data_id].add_element([[features_dict[feature_indices[0]],
features_dict[feature_indices[1]]]])
plot_tracker.sub_plot_obj.set_xlabel('Feature {}'.format(feature_indices[0]))
plot_tracker.sub_plot_obj.set_ylabel('Feature {}'.format(feature_indices[1]))
# TODO consider a fading/update strategy instead
plot_tracker.sub_plot_obj.clear()
X1 = plot_tracker.data[data_id].get_queue()[-1][0]
X2 = plot_tracker.data[data_id].get_queue()[-1][1]
# Process target values
data_id = 'target_values'
plot_tracker.data[data_id] = data_buffer.get_data(metric_id=metric_id, data_id=data_id)
if not plot_tracker.data['clusters_initialized']:
for j in range(len(plot_tracker.data[data_id])):
plot_tracker.data['clusters'].append(FastBuffer(plot_tracker.data['buffer_size']))
# Process predictions
data_id = 'predictions'
plot_tracker.data[data_id].add_element([data_buffer.get_data(metric_id=metric_id, data_id=data_id)])
for k, cluster in enumerate(plot_tracker.data['clusters']):
if plot_tracker.data[data_id].get_queue()[-1] == k:
plot_tracker.data['clusters'][k].add_element([(X1, X2)])
# TODO confirm buffer update inside the loop
if cluster.get_queue():
temp = cluster.get_queue()
plot_tracker.sub_plot_obj.scatter(*zip(*temp), label="Class {k}".format(k=k))
plot_tracker.sub_plot_obj.legend(loc=2, bbox_to_anchor=(1.01, 1.))
elif metric_id == constants.RUNNING_TIME:
# Only the current time measurement must be saved
for data_id in data_ids:
plot_tracker.data[data_id] = data_buffer.get_data(
metric_id=metric_id,
data_id=data_id
)
memory_time.update(plot_tracker.data)
elif metric_id == constants.MODEL_SIZE:
plot_tracker.data['model_size'] = data_buffer.get_data(
metric_id=metric_id,
data_id='model_size'
)
memory_time['model_size'] = plot_tracker.data['model_size']
else:
# Default case, 'mean' and 'current' performance
for data_id in data_ids:
# Buffer data
data = data_buffer.get_data(metric_id=metric_id, data_id=data_id)
for i in range(self.n_models):
plot_tracker.data[data_id][i].append(data[i])
plot_tracker.line_objs[data_id][i].set_data(self._sample_ids, plot_tracker.data[data_id][i])
# Process data
for i in range(self.n_models):
# Update annotations
self._update_annotations(i, plot_tracker.sub_plot_obj, self.model_names[i],
plot_tracker.data[constants.MEAN][i][-1],
plot_tracker.data[constants.CURRENT][i][-1])
# Update plot limits
if metric_id in [constants.KAPPA_T, constants.KAPPA_M]:
y_min = min([plot_tracker.data[constants.MEAN][i][-1],
plot_tracker.data[constants.CURRENT][i][-1], y_min])
if metric_id in [constants.MSE, constants.MAE, constants.AMSE, constants.AMAE, constants.ARMSE]:
y_min = -1
y_max = max([plot_tracker.data[constants.MEAN][i][-1],
plot_tracker.data[constants.CURRENT][i][-1], y_max])
pad = 0.5 * y_max # Padding bellow and above thresholds
if update_xy_limits:
plot_tracker.sub_plot_obj.set_ylim((y_min-pad, y_max+pad))
plot_tracker.sub_plot_obj.set_xlim(0, self._sample_ids[-1])
if constants.RUNNING_TIME in self.metrics or \
constants.MODEL_SIZE in self.metrics:
self._update_time_and_memory_annotations(memory_time)
def _clear_annotations(self):
""" Clear annotations, so next frame is correctly rendered. """
for i in range(len(self._text_annotations)):
self._text_annotations[i].remove()
self._text_annotations = []
def _update_annotations(self, idx, subplot, model_name, mean_value, current_value):
xy_pos_default = (1.02, .90) # Default xy position for metric annotations
shift_y = 10 * (idx + 1) # y axis shift for plot annotations
xy_pos = xy_pos_default
if idx == 0:
self._text_annotations.append(subplot.annotate('{: <12} | {: ^16} | {: ^16}'.
format('Model', 'Mean', 'Current'),
xy=xy_pos, xycoords='axes fraction'))
self._text_annotations.append(subplot.annotate('{: <10.10s}'.format(model_name[:6]),
xy=xy_pos, xycoords='axes fraction',
xytext=(0, -shift_y), textcoords='offset points'))
self._text_annotations.append(subplot.annotate('{: ^16.4f} {: ^16.4f}'.format(mean_value, current_value),
xy=xy_pos, xycoords='axes fraction',
xytext=(50, -shift_y), textcoords='offset points'))
def _update_time_and_memory_annotations(self, memory_time):
text_header = '{: <12s}'.format('Model')
if constants.RUNNING_TIME in self.metrics:
text_header += ' | {: ^16s} | {: ^16s} | {: ^16s}'.\
format('Train (s)', 'Predict (s)', 'Total (s)')
if constants.MODEL_SIZE in self.metrics:
text_header += ' | {: ^16}'.format('Mem (kB)')
last_plot = self.fig.get_axes()[-1]
x0, y0, width, height = last_plot.get_position().bounds
annotation_xy = (.1, .1)
self._text_annotations.append(self.fig.text(s=text_header, x=annotation_xy[0], y=annotation_xy[1]))
for i, m_name in enumerate(self.model_names):
text_info = '{: <15s}'.format(m_name[:6])
if constants.RUNNING_TIME in self.metrics:
text_info += '{: ^19.2f} {: ^19.2f} {: ^19.2f} '.format(memory_time['training_time'][i],
memory_time['testing_time'][i],
memory_time['total_running_time'][i])
if constants.MODEL_SIZE in self.metrics:
text_info += '{: ^19.2f}'.format(memory_time['model_size'][i])
shift_y = .03 * (i + 1) # y axis shift for plot annotations
self._text_annotations.append(self.fig.text(s=text_info, x=annotation_xy[0], y=annotation_xy[1] - shift_y))
@staticmethod
def hold():
plt.show(block=True)
def get_info(self):
pass
class PlotDataTracker(object):
""" A class to track relevant data for plots corresponding to selected metrics.
Data buffers and line objects are accessible via the corresponding data_id.
"""
def __init__(self, data_ids: list):
self.data_ids = None
self.data = {}
self.line_objs = {}
self.sub_plot_obj = None
self._validate(data_ids)
self._configure()
def _validate(self, data_ids):
if isinstance(data_ids, list):
if len(data_ids) > 0:
self.data_ids = data_ids
else:
raise ValueError('data_ids is empty')
else:
raise TypeError('data_ids must be a list, received: {}'.format(type(data_ids)))
def _configure(self):
for data_id in self.data_ids:
self.data[data_id] = None
self.line_objs[data_id] = None
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import pyplot
from sklearn.linear_model import LinearRegression
from sklearn import ensemble
from sklearn.cross_validation import train_test_split
#%%
def DataRead(str1, useCols, tablenames):
dataTable = pd.read_csv("%s" % str1, header=None, sep="\s*\;",usecols=useCols, names=tablenames, engine='python')
dataTable.drop(dataTable.index[[0]], inplace=True)
return dataTable
#%%
def CollumnFill (name, data, index, df):
df[name][index] = data
return
#%%
def CollumnAppend (name, data, df):
df = df.append(data, ignore_index=True)
return
#%% Define:
tablePath = "C:/Users/rsabedra/Documents/Python/example_sprit_cut_prices.csv"
tablePath2 = "C:/Users/rsabedra/Documents/Python/example_sprit_cut_station.csv"
#%%
useColls = [0, 1, 2, 3, 4]
tableCollumns = ['ID', 'E5', 'E10', 'DIESEL', 'DATA']
table = DataRead(tablePath, useColls , tableCollumns)
table.E5 = pd.to_numeric(table.E5, errors='coerce')
table.E10 = pd.to_numeric(table.E10, errors='coerce')
table.DIESEL = pd.to_numeric(table.DIESEL, errors='coerce')
useColls2 = [0, 4, 7,10,11]
tableCollumns2 = ['ID', 'BRAND', 'POST_CODE', 'LAT', 'LNG']
table2 = DataRead(tablePath2, useColls2 , tableCollumns2)
# =============================================================================
# Organizing and cleaning the data
# =============================================================================
table = table.dropna()
table = table.drop(table[table.E5 > 8000].index)
table = table.drop(table[table.E5 < 10].index)
table = table.drop(table[table.E10 > 8000].index)
table = table.drop(table[table.E10 < 10].index)
table = table.drop(table[table.DIESEL > 8000].index)
table = table.drop(table[table.DIESEL < 10].index)
table = table.reset_index(drop=True)
table = table.drop_duplicates(inplace=False)
table2 = table2.dropna()
table2 = table2.reset_index(drop=True)
table2 = table2.drop_duplicates(inplace=False)
#%%
# =============================================================================
# Auxiliary function to create a months list based on the data
# =============================================================================
MonthsTotal = list()
monthAux = table.iloc[0, 4][1:-20]
MonthsTotal.append(monthAux)
for x in range (0, len(table.DATA)):
if (monthAux != table.iloc[x, 4][1:-20]):
value = table.iloc[x, 4][1:-20]
monthAux = table.iloc[x, 4][1:-20]
MonthsTotal.append(value)
MonthsTotal = list(set(MonthsTotal))
MonthsTotal.sort()
#%%
# =============================================================================
# How many different brands are there?
# =============================================================================
tst = table2.groupby(['BRAND']).groups.keys()
len(tst)
#%%
# =============================================================================
# How many different locations are present in the data?
# =============================================================================
len(table2.groupby(['LAT' , 'LNG']).count())
#%%
# =============================================================================
# # What is the min, max price for each gasoline type, per month?
# =============================================================================
listMaxE5 = list()
listMaxE10 = list()
listMaxDIESEL = list()
listMinE5 = list()
listMinE10 = list()
listMinDIESEL = list()
listLocMinDIESEL = list()
var = table.iloc[0, 4][1:-20]
for y in range (0, len(MonthsTotal)):
E5Max = 0
E10Max = 0
DIESELMax = 0
E5Min = 9999999
E10Min = 9999999
DIESELMin = 9999999
for x in range (0, len(table.DATA)):
if(MonthsTotal[y] == table.iloc[x, 4][1:-20]):
if (E5Max < table.iloc[x, 1]):
E5Max = table.iloc[x, 1]
elif (E5Min > table.iloc[x, 1]):
E5Min = table.iloc[x, 1]
if (E10Max < table.iloc[x, 2]):
E10Max = table.iloc[x, 2]
elif (E10Min > table.iloc[x, 2]):
E10Min = table.iloc[x, 2]
if (DIESELMax < table.iloc[x, 3]):
DIESELMax = table.iloc[x, 3]
elif (DIESELMin > table.iloc[x, 3]):
DIESELMin = table.iloc[x, 3]
IdDiesel= table.iloc[x, 0]
listMaxE5.append(E5Max)
listMaxE10.append(E10Max)
listMaxDIESEL.append(DIESELMax)
listMinE5.append(E5Min)
listMinE10.append(E10Min)
listMinDIESEL.append(DIESELMin)
listLocMinDIESEL.append(IdDiesel)
MaxMinGasolineMonth = pd.DataFrame(
{'Month': MonthsTotal,
'MaxE5': listMaxE5,
'MinE5': listMinE5,
'MaxE10': listMaxE10,
'MinE10': listMinE10,
'MaxDIESEL': listMaxDIESEL,
'MinDIESEL': listMinDIESEL
})
MaxMinGasolineMonth['Month'] = pd.to_datetime(MaxMinGasolineMonth['Month'], format='%Y-%m')
MaxMinGasolineMonth.plot.line(x='Month')
plt.title('What is the min, max price for each gasoline type, per month?')
plt.xlabel('Date')
plt.ylabel('Price')
#%%
# =============================================================================
# #What is the mean of each gasoline type?
# =============================================================================
table.describe()
boxplot = pd.DataFrame()
boxplot['ID'] = table['ID']
boxplot['DIESEL'] = table['DIESEL']
boxplot['E5'] = table['E5']
boxplot['E10'] = table['E10']
ax = sns.boxplot( data=boxplot)
ax.set_title('What is the mean of each gasoline type?')
#%%
# =============================================================================
# #What is the brand with major number of gas stations?
# =============================================================================
listQuantOfBrands = list()
for y in range (0, len(tst)):
cont = 0
for x in range (0, len(table2.BRAND)):
if(tst[y] == table2.iloc[x, 1]):
cont += 1
listQuantOfBrands.append(cont)
majorGasStations = pd.DataFrame(
{
'Brands':[x for x in tst],
'Count':[ x for x in listQuantOfBrands]
})
majorGasStations = majorGasStations.drop(majorGasStations[majorGasStations.Count < 12].index)
a4_dims = (11.7, 8.27)
fig, ax = pyplot.subplots(figsize=a4_dims)
ax = sns.barplot(x="Brands", y="Count", data=majorGasStations)
ax.set_title('What is the brand with major number of gas stations?')
#%%
# =============================================================================
# #What is the maximum range of each gasoline type per month?
# =============================================================================
listRangeE5 = list()
listRangeE10 = list()
listRangeDIESEL = list()
for x in range (0, len(MonthsTotal)):
listRangeE5.append(listMaxE5[x] - listMinE5[x])
listRangeE10.append(listMaxE10[x] - listMinE10[x])
listRangeDIESEL.append(listMaxDIESEL[x] - listMinDIESEL[x])
RangePrices = pd.DataFrame(
{
'Month': MonthsTotal,
'E5': listRangeE5,
'E10': listRangeE10,
'DIESEL': listRangeDIESEL
})
f, ax = plt.subplots(1, 1, sharey=True)
ax.scatter(RangePrices.Month, RangePrices.E5)
ax.scatter(RangePrices.Month, RangePrices.E10)
ax.scatter(RangePrices.Month, RangePrices.DIESEL)
plt.title('What is the maximum range of each gasoline type per month?')
plt.xlabel('Date')
plt.ylabel('Price')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xticks(rotation=35)
#%%
# =============================================================================
# #What is the region with most concentration of gas stations?
# =============================================================================
listPostReg = list()
for x in range (0, len(table2.POST_CODE)):
listPostReg.append(table2.iloc[x, 2][0:-3])
RegionsGasStations = [[x,listPostReg.count(x)] for x in set(listPostReg)]
RegionsGasStations2 = pd.DataFrame(
{
'Region':[ x[0] for x in RegionsGasStations],
'Count':[ x[1] for x in RegionsGasStations]
})
RegionsGasStations2 = RegionsGasStations2.iloc[2:]
ax = sns.barplot(x="Region", y="Count", data=RegionsGasStations2)
ax.set_title('What is the region with most concentration of gas stations?')
#%%
# =============================================================================
# What is the whole Diesel variation?
# =============================================================================
diesel = pd.DataFrame()
diesel['ID'] = table['ID']
diesel['DIESEL'] = table['DIESEL']
diesel['DATA'] = pd.to_datetime(table['DATA'], format='"%Y-%m-%d %H:%M:%S.%f"')
diesel.set_index('DATA', inplace=True)
diesel.groupby('ID')['DIESEL'].plot(legend=False)
plt.title('What is the whole Diesel variation?')
plt.xlabel('Date')
plt.ylabel('Price')
# =============================================================================
# Machine learning predictor
# =============================================================================
#Preparing variables
datadates = diesel.DATA.values
datamonths = pd.Series(data=[pd.to_datetime(x).month for x in datadates], name='month')
datadays = pd.Series([pd.to_datetime(x).day for x in datadates], name='day')
datahour = pd.Series([pd.to_datetime(x).hour for x in datadates], name='hour')
diesel['DAY'] = datadays
diesel['MONTH'] = datamonths
diesel['HOUR'] = datahour
#%%
reg = LinearRegression()
teste = pd.DataFrame()
teste = diesel
teste['E5'] = table['E5']
teste['E10'] = table['E10']
teste= teste.iloc[:500000,:]
labels = teste['DIESEL']
train1 = teste.drop(['ID', 'DIESEL', 'DATA'],axis=1)
# =============================================================================
# Simple Linear Regressor
# =============================================================================
x_train, x_test, y_train, y_test =train_test_split(train1, labels, test_size=0.40, random_state=2)
reg.fit(x_train, y_train)
reg.score(x_test, y_test)
#%%
# =============================================================================
# Gradient Boosting Regressor
# =============================================================================
clf = ensemble.GradientBoostingRegressor(n_estimators = 800, max_depth = 8, min_samples_split = 4,
learning_rate = 0.4, loss = 'ls')
clf.fit(x_train, y_train)
clf.score(x_test,y_test)
#%%
# =============================================================================
# #%%
# #What is the region with the least cost of DIESEL?
#
#
# listOfBrands = list()
# listOfCode = list()
#
# for i in range (0, len(table2.ID)):
# for j in range (0, len(DieselLocation.ID)):
# if (table2.iloc[i, 0] == DieselLocation.iloc[j, 0]):
# listOfBrands.append(table2.iloc[i, 1])
# listOfCode.append(table2.iloc[i, 2][0:-3])
#
#
#
#
# DieselMinInfo = pd.DataFrame(
# {
# 'ID': listLocMinDIESEL,
# 'MinDIESEL': listMinDIESEL,
# 'Brands': listOfBrands,
# 'Code': listOfCode
# })
#
# #%%
# from sklearn import linear_model
# regr = linear_model.LinearRegression()
# regr.fit(table, table.DATA)
# LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False)
# print(regr.coef_)
#
#
# # The mean square error
# np.mean((regr.predict(table)-table.DATA)**2)
#
#
# # Explained variance score: 1 is perfect prediction
# # and 0 means that there is no linear relationship
# # between X and y.
# regr.score(table, table.DATA)
#
# =============================================================================
|
import codecs
import os
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ampr',
version='1.0.0',
description='Amateur Packet Radio API client',
long_description=long_description,
url='https://github.com/pd0mz/ampr',
author='Wijnand Modderman-Lenstra',
author_email='maze@pyth0n.org',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
],
keywords='ampr amateur packet radio api',
py_modules=['ampr'],
install_requires=['requests'],
)
|
from django.conf.urls import url
from django.conf.urls.static import static
from django.urls import path, include, re_path
from provarme_tenant.views import (TenantRegisterView, Login, Logout, activate, TenantProfile, PendingPageView, ThankPageView, create_purchase_upnid)
from provarme import settings
app_name="provarme_tenant"
urlpatterns = [
path('login/', Login.as_view(), name='login'),
path('logout/', Logout.as_view(), name="logout"),
# rota para ativação do tenant
path('activate/<int:id>/<token>/', activate, name='activate'),
#webhook upnid compra instancia
path('compra_plano_upnid/', create_purchase_upnid, name='purchase'),
# rota para perfil do usuario tenant
path('profile/', TenantProfile.as_view(), name='profile'),
]
if settings.DEBUG is True:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import feedparser
import models
import time
import datetime as dt
from bs4 import BeautifulSoup
VALID_IMAGE_ATTRIBUTES = ('alt', 'title', 'src')
def fetch(url):
f = feedparser.parse(url)
feed = models.Feed()
feed.link = url
feed.website = f.feed.get('link')
feed.title = f.feed.get('title')
feed.author = f.feed.get('author')
entries = []
for e in f.entries:
entry = models.Entry()
entry.link = e.get('link')
entry.title = e.get('title')
published = get_first_or_default(
e, ('updated_parsed', 'published_parsed'))
if published:
entry.published = dt.datetime.fromtimestamp(time.mktime(published))
if 'content' in e and e.content and isinstance(e.content, list):
first_content = e.content[0]
content = first_content.get('value')
else:
content = e.get('description')
entry.content = content
if content:
entry.image_content = get_comic_image(content)
entries.append(entry)
return (feed, entries)
def get_comic_image(html):
soup = BeautifulSoup(html)
img = soup.find('img')
if img:
img.attrs = {key: value for key, value in img.attrs.iteritems() if key in VALID_IMAGE_ATTRIBUTES}
return unicode(img)
else:
return None
def get_first_or_default(d, sequence, default=None):
for element in sequence:
if element in d:
return d[element]
return default
|
#!/usr/bin/python
"""
Plot parameter 1D marginal constraints as a fn. of FG subtraction efficiency.
"""
import numpy as np
import pylab as P
from rfwrapper import rf
nsurveys = 2 #5
nbins = 14
name = ['SD', 'Interferom.']
#name = ['A', 'B', 'C', 'D', 'E']
cols = ['b', 'g', 'c', 'r', 'y']
P.subplot(111)
for j in range(nsurveys):
for i in range(1): #range(nbins): # FIXME: Just z=0.1
#if j == 0: continue
fname_smooth = "tmp/pk_surveys_smooth_powspec_"+str(j)+"_"+str(i)+".npy"
fname_constr = "tmp/pk_surveys_constraints_"+str(j)+"_"+str(i)+".npy"
k, pk, fbao, pksmooth = np.load(fname_smooth).T
kc, pkc, pkerr, fbao_c, pkcsmooth = np.load(fname_constr).T
pkerr[np.where(np.isinf(pkerr))] = 1e9
#pkerr[np.where(np.isnan(pkerr))] = 1e9
P.plot(kc, pkerr, color=cols[j], label=name[j], marker='.', lw=1.5)
print pkerr
# FIXME: h^-1 units?
#P.plot(k, pk)
#yup, ylow = rf.fix_log_plot(pkc, pkerr*pkc)
#P.errorbar(kc, pkc, yerr=[ylow, yup], marker='.', ls='none', color='r')
P.xscale('log')
P.yscale('log')
P.xlim((4e-3, 2.))
#P.ylim((3e-3, 1e1))
# Display options
P.legend(loc='upper left', prop={'size':'x-large'}, ncol=2)
P.ylabel("$\Delta P(k) / P(k) \; (z=0)$", fontdict={'fontsize':'22'})
P.xlabel("$k \,[Mpc^{-1}]$", fontdict={'fontsize':'20'})
fontsize = 18.
for tick in P.gca().yaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
for tick in P.gca().xaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
P.tight_layout()
P.show()
|
# import unittest
# from main import randomly_distribute
# class TestRandomlyDistribute(unittest.TestCase):
# def test_random_distribute(self):
# tokens = 10
# for i in range(10):
# groups = 5 + i
# result = randomly_distribute(tokens, groups)
# self.assertEqual(sum(result), tokens)
# self.assertEqual(len(result), groups)
# if __name__ == '__main__':
# unittest.main()
|
import numpy as np
import cv2
import argparse
import sys
import time
from calibration_store import load_stereo_coefficients
def depth_map(imgL, imgR):
""" Depth map calculation. Works with SGBM and WLS. Need rectified images, returns depth map ( left to right disparity ) """
# SGBM Parameters -----------------
window_size = 3 # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
left_matcher = cv2.StereoSGBM_create(
minDisparity=-1,
numDisparities=12*16, # max_disp has to be dividable by 16 f. E. HH 192, 256
blockSize=window_size,
P1=8 * 3 * window_size,
# wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
P2=32 * 3 * window_size,
disp12MaxDiff=12,
uniquenessRatio=10,
speckleWindowSize=50,
speckleRange=32,
preFilterCap=63,
mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY
)
right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
# FILTER Parameters
lmbda = 80000
sigma = 1.3
visual_multiplier = 6 #6
wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
wls_filter.setLambda(lmbda)
wls_filter.setSigmaColor(sigma)
displ = left_matcher.compute(imgL, imgR) # .astype(np.float32)/16
dispr = right_matcher.compute(imgR, imgL) # .astype(np.float32)/16
displ = np.int16(displ)
dispr = np.int16(dispr)
filteredImg = wls_filter.filter(displ, imgL, None, dispr) # important to put "imgL" here!!!
filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX);
filteredImg = np.uint8(filteredImg)
#filteredImg = cv2.applyColorMap(filteredImg, cv2.COLORMAP_AUTUMN)
return filteredImg
def stereo_depth(calibration_file, left_source, right_source, is_real_time, save_path):
K1, D1, K2, D2, R, T, E, F, R1, R2, P1, P2, Q = load_stereo_coefficients(calibration_file) # Get cams params
leftFrame = cv2.imread(left_source,1)
leftFrame = cv2.rotate(leftFrame, cv2.ROTATE_90_COUNTERCLOCKWISE)
leftFrame = cv2.resize(leftFrame, (1824,1368))
#left_resized = cv2.resize(leftFrame, (0,0), fx=.3, fy=.3)
#cv2.imshow('left(R)', original_resized)
rightFrame = cv2.imread(right_source,1)
rightFrame = cv2.rotate(rightFrame, cv2.ROTATE_90_COUNTERCLOCKWISE)
rightFrame = cv2.resize(rightFrame, (1824,1368))
#right_resized = cv2.resize(rightFrame, (0,0), fx=.3, fy=.3)
#cv2.imshow('right(R)', original_resized)
height, width, channel = leftFrame.shape # We will use the shape for remap
# Undistortion and Rectification part!
leftMapX, leftMapY = cv2.initUndistortRectifyMap(K1, D1, R1, P1, (width, height), cv2.CV_32FC1)
left_rectified = cv2.remap(leftFrame, leftMapX, leftMapY, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)
left_rect_save = cv2.resize(left_rectified, (640,448))
cv2.imwrite(save_path+"/left_rectified.jpg",left_rect_save)
#original_resized = cv2.resize(left_rectified, (0,0), fx=.3, fy=.3)
#cv2.imshow('retleft(R)', original_resized)
rightMapX, rightMapY = cv2.initUndistortRectifyMap(K2, D2, R2, P2, (width, height), cv2.CV_32FC1)
right_rectified = cv2.remap(rightFrame, rightMapX, rightMapY, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)
right_rect_save = cv2.resize(right_rectified, (640,448))
cv2.imwrite(save_path+"/right_rectified.jpg",right_rect_save)
#original_resized = cv2.resize(right_rectified, (0,0), fx=.3, fy=.3)
#cv2.imshow('retright(R)', original_resized)
# We need grayscale for disparity map.
gray_left = cv2.cvtColor(left_rectified, cv2.COLOR_BGR2GRAY)
gray_right = cv2.cvtColor(right_rectified, cv2.COLOR_BGR2GRAY)
disparity_image = depth_map(gray_left, gray_right) # Get the disparity map
cv2.imwrite(save_path+"/depth.jpg", disparity_image)
return 1
# Show the images
#original_resized = cv2.resize(leftFrame, (0,0), fx=.3, fy=.3)
#cv2.imshow('left(R)', original_resized)
#original_resized = cv2.resize(rightFrame, (0,0), fx=.3, fy=.3)
#cv2.imshow('right(R)', original_resized)
#original_resized = cv2.resize(disparity_image, (0,0), fx=.2, fy=.2)
#cv2.imshow('Disparity', original_resized)
# Release the sources.
#cv2.destroyAllWindows()
|
def for_FIVE():
"""printing numbr 'FIVE' using for loop"""
for row in range(5):
for col in range(4):
if col==0 and row!=3 or col==3 and row!=1 or row==0 or row==2 or row==4:
print("*",end=" ")
else:
print(" ",end=" ")
print()
def while_FIVE():
"""printing number 'FIVE' using while loop"""
i=0
while i<5:
j=0
while j<4:
if i==0 or i==2 or i==4 or j==3 and i in(0,2,3,4)or j==0 and i==1:
print("*",end=" ")
else:
print(" ",end=" ")
j+=1
i+=1
print()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair and Paul Rougieux.
JRC biomass Project.
Unit D1 Bioeconomy.
"""
# Third party modules #
# First party modules #
# Internal modules #
from cbmcfs3_runner.graphs.inventory import InventoryAtStart, InventoryAtEnd
from cbmcfs3_runner.graphs.harvest import HarvestExpProvVol, HarvestExpProvArea
# Constants #
__all__ = [
'InventoryAtStart', 'InventoryAtEnd',
'HarvestExpProvVol', 'HarvestExpProvArea'
]
|
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from badges.utils import MetaBadge
from studio.helpers.mixins import BadgeMixin
from ..figures.models import Model3D
class ModelBadgeMixin:
model = Model3D
def get_user(self, instance):
return instance.designer
class StarBadge(BadgeMixin, ModelBadgeMixin, MetaBadge):
id = 'star'
title = _('Star')
description = _('Award for %(val)s views and more') % {'val': settings.MINIMUM_VIEWS_FOR_STAR_BADGE}
level = settings.BADGE_LEVEL.BADGE_STAR.value
def check_view_counter(self, instance):
return instance.view_counter >= settings.MINIMUM_VIEWS_FOR_STAR_BADGE
class CollectorBadge(BadgeMixin, ModelBadgeMixin, MetaBadge):
id = 'collector'
title = _('Collector')
description = _('Award for %(val)s models or more') % {'val': settings.MINIMUM_UPLOAD_FOR_COLLECTOR_BADGE}
level = settings.BADGE_LEVEL.BADGE_COLLECTOR.value
def check_model_counter(self, instance):
return self.model.objects.filter(
designer=instance.designer
).count() >= settings.MINIMUM_UPLOAD_FOR_COLLECTOR_BADGE
def register_badges():
for klass in (CollectorBadge, StarBadge):
klass()
|
from flask import Blueprint
bp = Blueprint('routes', __name__)
from . import index_view, auth_user, cuentas_views, librodiario_view
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Created by techno at 27/05/19
#Feature: #Enter feature name here
# Enter feature description here
#Scenario: # Enter scenario name here
# Enter steps here
import numpy as np
my_array = np.array([1,2,3,4])
my_2d_array = np.array([[1, 2, 3, 4],
[5, 6, 7, 8]])
my_3d_array = np.array([ [[ 1, 2, 3, 4],
[ 5, 6, 7, 8]],
[[ 1, 2, 3, 4],
[ 9, 10, 11, 12]]])
# Select the element at the 1st index
print(my_array[1])
# Select the element at row 1 column 2
print(my_2d_array[1][2])
# Select the element at row 1, column 2 and
for i in range(4):
print(my_3d_array[1,1,i])
#y = np.ones([4,4])
#print(y)
# Select items at index 0 and 1
print(my_array[0:2])
# Select items at row 0 and 1, column 1
print(my_2d_array[0:2,1])
# Select items at row 1
# This is the same as saying `my_3d_array[1,:,:]
print(my_3d_array[0,...])
"""
a[start:end] # items start through the end (but the end is not included!)
a[start:] # items start through the rest of the array
a[:end] # items from the beginning through the end (but the end is not included!)
"""
print()
# Try out a simple example
print(my_array[my_array<2])
# Specify a condition
bigger_than_3 = (my_3d_array >= 3)
# Use the condition to index our 3d array
print(my_3d_array[bigger_than_3])
print(" first print")
# Select elements at (1,0), (0,1), (1,2) and (0,0)
print(my_2d_array[[1, 0, 1, 0],[0, 1, 2, 0]])
print("second print")
# Select a subset of the rows and columns
print(my_2d_array[[1, 0, 1, 0]][:,[0,1,2,0]])
|
from bype import Bype
class MyServer(object):
def __init__(self, host, port=9090):
self.host = host
self.port = port
self.isrunning = False
def start(self):
self.isrunning = True
print 'server started at %s:%d' % (self.host, self.port)
def _handle_isprime(self, context):
number = context['number']
isprime = True
for i in range(2, number / 2 + 1):
if (number % i) == 0:
isprime = False
break
return 200, {'isprime': isprime}
def request(self, url, context=None):
assert self.isrunning
context = context or dict()
status = 404
response = {
'code': status,
'error': 'Page not found'
}
if url == '/isprime':
status, response = self._handle_isprime(context)
return status, response
class MyWorkflow(Bype):
def __init__(self, docstring=None):
self.server = None
self.status = None
self.response = {}
def kick_off_server(self, host, port=9090):
self.server = MyServer(host, port=port)
self.server.start()
def make_request(self, url, context=None):
print 'requesting %s...' % url
self.status, self.response = self.server.request(url, context=context)
print 'got response', self.status, self.response
def await_response(self, status, response=None):
print 'assertion', status, response, 'against', self.status, self.response
response = response or dict()
assert self.status == status
for key in response:
assert response[key] == self.response[key]
def test_simple():
MyWorkflow('''
Simple request-response workflow
'''
).kick_off_server(
'localhost',
port=9090
).make_request(
'/isprime',
{
'number': 13
}
).await_response(
200,
{
'isprime': True
}
)
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/api/')
def index():
return "Hello World"
if __name__ == '__main__':
app.run(debug=True)
|
from output.models.ms_data.regex.re_i10_xsd.re_i10 import (
Regex,
Doc,
)
__all__ = [
"Regex",
"Doc",
]
|
import os, sys
os.chdir("G:\\My Drive\\Academic\\Research\\Neural Heap")
import tensorflow as tf
class TFGraphUtils(object):
def _init__(
self):
pass
def set_args(
self,
config):
self.config = config
def initialize_weights_cpu(
self,
name,
shape,
standard_deviation=0.01,
decay_factor=None,
collection=None):
with tf.device("/cpu:0"):
weights = tf.get_variable(
name,
shape,
initializer=tf.truncated_normal_initializer(
stddev=standard_deviation,
dtype=tf.float32),
dtype=tf.float32)
if decay_factor is not None and collection is not None:
weight_decay = tf.multiply(
tf.nn.l2_loss(weights),
decay_factor,
name=(name + self.config.EXTENSION_LOSS))
tf.add_to_collection(collection, weight_decay)
return weights
def initialize_biases_cpu(
self,
name,
shape):
with tf.device("/cpu:0"):
biases = tf.get_variable(
name,
shape,
initializer=tf.constant_initializer(1.0),
dtype=tf.float32)
return biases
def l2_loss(
self,
prediction,
labels,
collection):
l2_norm = tf.nn.l2_loss(
labels - prediction)
tf.add_to_collection(collection, l2_norm)
return l2_norm
def cross_entropy(
self,
prediction,
labels,
collection):
entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=labels,
logits=prediction))
tf.add_to_collection(collection, entropy)
return entropy
def minimize(
self,
loss,
parameters):
learning_rate = tf.train.exponential_decay(
self.config.INITIAL_LEARNING_RATE,
tf.get_collection(self.config.GLOBAL_STEP_OP)[0],
self.config.DECAY_STEPS,
self.config.DECAY_FACTOR,
staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
gradient = optimizer.minimize(
loss,
var_list=parameters)
return gradient
def argmax_state_tuple(
self,
state_buffer,
best_q_enumerated):
state_batch = []
for z in range(self.config.LSTM_DEPTH):
cell_state = tf.stack([
state_buffer[a][z].c for a in range(self.config.OP_SIZE)
], axis=1)
cell_state = tf.gather_nd(
cell_state,
best_q_enumerated)
hidden_state = tf.stack([
state_buffer[a][z].h for a in range(self.config.OP_SIZE)
], axis=1)
hidden_state = tf.gather_nd(
hidden_state,
best_q_enumerated)
lstm_tuple = tf.contrib.rnn.LSTMStateTuple(
cell_state,
hidden_state)
state_batch += [lstm_tuple]
return tuple(state_batch)
def expand_hidden_state(
self,
actions_batch,
x_inputs,
lstm_forward,
state_batch,
q_function_w,
q_function_b):
hidden_buffer = []
state_buffer = []
q_buffer = []
for a in actions_batch:
a_inputs = tf.concat([
x_inputs,
a], axis=1)
q_hidden_batch, q_state_batch = lstm_forward.call(
a_inputs,
state_batch)
hidden_buffer += [q_hidden_batch]
state_buffer += [q_state_batch]
q_buffer += [tf.add(tf.tensordot(
q_hidden_batch,
q_function_w,
1), q_function_b)]
best_q = tf.reduce_max(
tf.stack(q_buffer, axis=1),
axis=1)
best_q_indices = tf.argmax(
tf.stack(q_buffer, axis=1),
axis=1,
output_type=tf.int32)
best_q_enumerated = tf.stack([
tf.range(self.config.BATCH_SIZE, dtype=tf.int32),
best_q_indices], axis=1)
return (hidden_buffer,
state_buffer,
best_q,
best_q_enumerated,
best_q_indices)
def prepare_inputs_actions(
self,
x_batch):
inputs_batch = [
tf.reshape(tf.slice(x_batch, [0, i, 0], [
self.config.BATCH_SIZE,
1,
self.config.DATASET_RANGE]),
[self.config.BATCH_SIZE, self.config.DATASET_RANGE])
for i in range(self.config.DATASET_COLUMNS * 2)]
actions_batch = [
tf.tile(
tf.reshape(
tf.one_hot(i, self.config.OP_SIZE),
[1, self.config.OP_SIZE]),
[self.config.BATCH_SIZE, 1]) for i in range(self.config.OP_SIZE)]
return inputs_batch, actions_batch
|
"""
Subsample from CS6 GT only those images that are used in the Dets or HP JSON.
srun --mem 10000 python tools/face/mod_json_gt_det.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
sys.path.append('./tools')
import numpy as np
import os, cv2
import argparse
import os.path as osp
import time
import skvideo.io
import json
import csv
from six.moves import xrange
from PIL import Image
from tqdm import tqdm
GT_JSON_FILE = 'data/CS6_annot/cs6-train-easy-gt.json'
HP_JSON_FILE = 'data/CS6_annot/cs6-train-easy-hp.json'
# OUT_DIR = '/mnt/nfs/work1/elm/arunirc/Data/CS6_annots'
OUT_DIR = 'Outputs/modified_annots/'
DEBUG = False
def parse_args():
parser = argparse.ArgumentParser(description='Modifying CS6 ground truth data')
parser.add_argument(
'--output_dir', help='directory for saving outputs',
default=OUT_DIR, type=str
)
parser.add_argument(
'--gt_json_file', default=GT_JSON_FILE
)
parser.add_argument(
'--hp_json_file', default=HP_JSON_FILE
)
parser.add_argument(
'--imdir', help="root directory for loading dataset images",
default='data/CS6_annot', type=str)
return parser.parse_args()
_GREEN = (18, 127, 15)
color_dict = {'red': (0,0,225), 'green': (0,255,0), 'yellow': (0,255,255),
'blue': (255,0,0), '_GREEN':(18, 127, 15), '_GRAY': (218, 227, 218)}
# -----------------------------------------------------------------------------------
def draw_detection_list(im, dets):
# -----------------------------------------------------------------------------------
""" Draw bounding boxes on a copy of image and return it.
[x0 y0 w h conf_score]
"""
im_det = im.copy()
if dets.ndim == 1:
dets = dets[np.newaxis,:] # handle single detection case
# format into [xmin, ymin, xmax, ymax]
dets[:, 2] = dets[:, 2] + dets[:, 0]
dets[:, 3] = dets[:, 3] + dets[:, 1]
for i, det in enumerate(dets):
bbox = dets[i, :4]
x0, y0, x1, y1 = [int(x) for x in bbox]
line_color = color_dict['yellow']
cv2.rectangle(im_det, (x0, y0), (x1, y1), line_color, thickness=2)
return im_det
if __name__ == '__main__':
args = parse_args()
if not osp.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
# Load gt JSON
with open(args.gt_json_file) as f:
ann_dict = json.load(f)
# Load HP JSON
with open(args.hp_json_file) as f:
hp_ann_dict = json.load(f)
# Keep gt annots only for images in HP annots
hp_images = set([ x['file_name'] for x in hp_ann_dict['images'] ])
keep_images = [x for x in ann_dict['images'] if x['file_name'] in hp_images]
keep_image_ids = set([x['id'] for x in keep_images])
keep_annots = [x for x in ann_dict['annotations'] if x['image_id'] in keep_image_ids]
# replace the images and annotations
ann_dict['images'] = keep_images
ann_dict['annotations'] = keep_annots
out_file = osp.join(args.output_dir,
osp.splitext(osp.basename(args.gt_json_file))[0]) + '-sub.json'
with open(out_file, 'w', encoding='utf8') as outfile:
outfile.write(json.dumps(ann_dict))
|
#!/usr/bin/python
#given a smarts rxn file (backwards), a core scaffold smarts file (with name containing connecting atoms)
# an sdf file with the results of clustering scaffolds and an input sdf file and an
#output prefix, output the extracted reactant conformations aligned to their scaffold
import sys,gzip,argparse
from rdkit.Chem import AllChem
def subMol(mol, match):
#not sure why this functionality isn't implemented natively
#but get the interconnected bonds for the match
atoms = set(match)
bonds = set()
for a in atoms:
atom = mol.GetAtomWithIdx(a)
for b in atom.GetBonds():
if b.GetOtherAtomIdx(a) in atoms:
bonds.add(b.GetIdx())
return AllChem.PathToSubmol(mol,list(bonds))
#return index of closest scaffold in scaffold to mol
def closestScaffold(scaffolds, pattern, core, mol):
ret = -1
match = mol.GetSubstructMatch(pattern)
if match:
sub = subMol(mol, match)
cmatch = sub.GetSubstructMatch(core)
if cmatch:
min = float('inf')
for (i,(s,smatch)) in enumerate(scaffolds):
r = AllChem.GetBestRMS(s, sub, maps=[zip(cmatch,smatch)])
if r < min:
min = r
ret = i
mmatch = mol.GetSubstructMatch(core)
AllChem.GetBestRMS(s,mol,maps=[zip(mmatch,smatch)])
return ret
#MAIN
parser = argparse.ArgumentParser()
parser.add_argument('-r','--rxn', help="Reaction file")
parser.add_argument('-c','--core',help="Core scaffold with connecting atoms in name")
parser.add_argument('-i','--input',help="Input conformers")
parser.add_argument('-s','--scaffolds',help="Scaffold conformers")
parser.add_argument('-o','--output',help="Output prefix")
args = parser.parse_args()
rxnf = open(args.rxn)
rxnsm = rxnf.readline().split()[0] #ignore any name
rxn = AllChem.ReactionFromSmarts(rxnsm)
rxn.Initialize()
if rxn.GetNumReactantTemplates() != 1:
print "Need backwards reaction"
sys.exit(-1)
coref = open(args.core)
corel = coref.readline()
coreconnects = corel.split()[1:]
core = AllChem.MolFromSmarts(corel.split()[0])
inscaffolds = AllChem.SDMolSupplier(args.scaffolds,False)
if inscaffolds is None:
print "Could not open ",args.scaffolds
sys.exit(-1)
inmols = AllChem.SDMolSupplier(args.input)
if inmols is None:
print "Could not open ",args.input
sys.exit(-1)
smart = AllChem.MolToSmarts(rxn.GetReactantTemplate(0))
pattern = AllChem.MolFromSmarts(smart)
#read in scaffolds
scaffolds = list()
for mol in inscaffolds:
#compute match of core
cmatch = mol.GetSubstructMatch(core)
scaffolds.append((mol,cmatch))
#setup output file, one for each reactant product
outputs = list()
for i in xrange(rxn.GetNumProductTemplates()):
outputs.append(list())
for j in xrange(len(scaffolds)):
sdwriter = AllChem.SDWriter("%s_%d_%d.sdf" % (args.output,i,j))
outputs[i].append(sdwriter)
for mol in inmols:
#for each mol, decompose it into its reactants
mol = AllChem.AddHs(mol)
#figure out which scaffold conformation is closest
c = closestScaffold(scaffolds, pattern, core, mol)
prods = rxn.RunReactants([mol])
if c >= 0:
for p in prods: #there may be multiple possible products
for (i,react) in enumerate(p):
react = AllChem.RemoveHs(react)
react.SetProp('_Name',AllChem.MolToSmiles(react))
outputs[i][c].write(react)
|
"""
You're given a string s consisting solely of "(" and ")". Return whether the parentheses are balanced.
Constraints
n ≤ 100,000 where n is the length of s.
https://binarysearch.com/problems/Balanced-Brackets
"""
from collections import deque
class Solution:
def solve(self, s):
left = 0
for c in s:
if c == ")":
if left == 0:
return False
else:
left -= 1
else:
left += 1
return left == 0
def solve2(self, s):
slen = len(s)
if slen == 0:
return True
stack = deque([])
for c in s:
if c == ")" and (not stack or stack.pop() != "("):
return False
elif c == "(":
stack.append(c)
return len(stack) == 0
|
# Adam Shaat
# program that outputs whether it is a weekday
# or a weekend
import datetime
now = datetime.datetime.now()
day = now.weekday()
weekend = (5,6)
dayname ={0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday', 4:'Friday', 5:'Saturday', 6:'Sunday'}
# assigning to print the weekday
print(dayname[day])
if day in weekend:
print ("It is the weekend, yay!")
else:
print("unfortunately today is a weekday.")
|
r"""
Contains two classes used for sweep functionalities.
NOTE : Both of these classes are not intended to be directly instanciated by the user.
:class:`Simulation <quanguru.classes.Simulation.Simulation>` objects **has** ``Sweep/s`` as their attributes, and
``_sweep/s`` are intended to be created by calling the relevant methods over ``Simulation.Sweep``.
.. currentmodule:: quanguru.classes.QSweep
.. autosummary::
_sweep
Sweep
.. |c| unicode:: U+2705
.. |x| unicode:: U+274C
.. |w| unicode:: U+2000
======================= ================== ============== ================ ===============
**Function Name** **Docstrings** **Examples** **Unit Tests** **Tutorials**
======================= ================== ============== ================ ===============
`_sweep` |w| |w| |w| |c| |w| |w| |x| |w| |w| |x| |w| |w| |x|
`Sweep` |w| |w| |w| |c| |w| |w| |x| |w| |w| |x| |w| |w| |x|
======================= ================== ============== ================ ===============
"""
from functools import reduce
from numpy import arange, logspace
from .base import qBase, _recurseIfList
from .baseClasses import updateBase
__all__ = [
'Sweep'
]
class _sweep(updateBase): # pylint: disable=too-many-instance-attributes
r"""
Implements methods and attributes to sweep the value of an attribute for some objects for a list of values.
The default sweep :meth:`~_defSweep` sweeps the value for a given attribute
(a string stored in :py:attr:`~_sweep.key`) of objects in ``subSys`` dictionary.
The list of values (stored in :py:attr:`_sweepList`) to be swept are set either directly
by giving a ``list`` or the ``sweepMin-sweepMax-sweepStep`` with ``logSweep``.
Default sweep function can be replaced with any custom method by re-assigning the :py:attr:`~sweepFunction` to the
function reference. The default sweep method requires the index of the value from the list of values to set the next
value, this index is provided by the modularSweep and useful for multi-parameter sweeps. It keeps a value fixed
by re-assigning it using the same index, and the :class:`~paramBoundBase` and other relevant classes uses the
custom setattr methods (see :meth:`~setAttr` and :meth:`~setAttrParam`) to make sure that ``paramUpdated`` boolean
is not set to ``True`` for the same value. This class implements a single sweep, and multi parameter sweep is
achieved by the :class:`~Sweep` class.
"""
#: (**class attribute**) class label used in default naming
label = '_sweep'
#: (**class attribute**) number of instances created internally by the library
_internalInstances: int = 0
#: (**class attribute**) number of instances created explicitly by the user
_externalInstances: int = 0
#: (**class attribute**) number of total instances = _internalInstances + _externalInstances
_instances: int = 0
__slots__ = ['sweepMax', 'sweepMin', 'sweepStep', '_sweepList', 'logSweep', 'multiParam', '_sweepIndex']
#@sweepInitError
def __init__(self, **kwargs):
super().__init__()
#: protected attribute pointing to a sweep function, by default :meth:`~_defSweep`. This attribute get&set
#: using the sweepFunction property to replace default with a customized sweep method.
self._updateBase__function = self._defSweep # pylint: disable=assigning-non-slot
#: maximum value for the swept parameter, used with other attributes to create the sweepList
self.sweepMax = None
#: minimum value for the swept parameter, used with other attributes to create the sweepList
self.sweepMin = None
#: corresponds to the step size in a linearly spaced sweepList, or number of steps in logarithmic case,
#: used with other attributes to create the sweepList
self.sweepStep = None
#: protected attribute to store a list of values for the swept parameter. Can be given a full list or
#: be created using sweepMin-sweepMax-sweepStep values.
self._sweepList = None
#: boolean to create either linearly or logarithmically spaced list values (from sweepMin-sweepMax-sweepStep).
self.logSweep = False
#: boolean to determine, if two different sweeps are swept simultaneously (same length of list and pair of
#: values at the same index are swept) or a multi-parameter sweep (fix one sweep the other and repeat).
self.multiParam = False
#: stores the index of the value (from the _sweepList) currently being assigned by the sweep function. Used by
#: the default methods but also useful for custom methods. It is calculated by the modular arithmetic in
#: modularSweep and passed to here by :class:`~Sweep` object containing self in its subSys. It starts from -1
#: and the correspoding property returns _sweepIndex + 1, while the :meth:`~runSweep` sets it to ind+1 for a given
#: ind from modularSweep. This whole ordeal is due to make sure that python list indexing and modular arithmetic
#: properly agrees for the sweep functionality. I feel it can be improved but will leave as it is for now.
self._sweepIndex = -1
self._named__setKwargs(**kwargs) # pylint: disable=no-member
@property
def index(self):
r"""
returns ``self._sweepIndex + 1``. reason for +1 is explained in :py:attr:`~_sweepIndex`. There is no setter, the value
of _sweepIndex is updated by the :meth:`~runSweep` and is an internal process.
"""
return self._sweepIndex + 1
@property
def sweepFunction(self):
r"""
gets and set :py:attr:`~_updateBase__function`, which should point to a Callable.
"""
return self._updateBase__function # pylint: disable=no-member
@sweepFunction.setter
def sweepFunction(self, func):
self._updateBase__function = func # pylint: disable=assigning-non-slot
@property
def sweepKey(self):
r"""
gets and sets :py:attr:`~_updateBase__key`, which should be string.
"""
return self._updateBase__key # pylint: disable=no-member
@sweepKey.setter
def sweepKey(self, keyStr):
self._updateBase__key = keyStr # pylint: disable=assigning-non-slot
@property
def sweepList(self):
r"""
gets and sets :py:attr:`~_sweepList`. Setter requires a list input, if it is not set, getter tries creating the
list (and setting :py:attr:`~_sweepList`) using sweepMin-sweepMax-sweepStep attributes.
"""
if self._sweepList is None:
try:
if self.logSweep is False:
self._sweepList = arange(self.sweepMin, self.sweepMax + self.sweepStep, # pylint: disable=no-member
self.sweepStep) # pylint: disable=no-member
elif self.logSweep is True:
self._sweepList = logspace(self.sweepMin, self.sweepMax,
num=self.sweepStep, base=10.0) # pylint: disable=no-member
except: #pylint:disable=bare-except # noqa: E722
pass
return self._sweepList
@sweepList.setter
def sweepList(self, sList):
self._sweepList = sList
@staticmethod
def _defSweep(self): # pylint: disable=bad-staticmethod-argument
r"""
This is the default sweep function, and it just calls the
:meth:`_runUpdate <quanguru.classes.updateBase.updateBase._runUpdate>` by feeding it the value from the
``sweepList`` at the position ``ind``. :meth:`_runUpdate <quanguru.classes.updateBase.updateBase._runUpdate>`
function just sets the attribute (for the given key) of every ``subSys`` to a given value (``val``).
The modularSweep methods uses multiplication of length of ``sweepList/s`` (stored in
__inds attribute of :class:`Sweep` instances) as a loop range, and the current loop counter is used by the
:meth:`~_indicesForSweep` to calculate which indices of multi _sweep is currently needed.
Parameters
----------
ind : int
Index of the value from ``sweepList``
"""
val = self.sweepList[self.index]
self._runUpdate(val)
def runSweep(self, ind):
r"""
Wraps the ``_updateBase__function``, so that this will be the function that is always called to run the
sweeps. This is not essential and could be removed, but it kind of creates a duck-typing with ``Sweep`` class,
when we might want to use a nested sweep.
"""
self._sweepIndex = ind-1 # pylint: disable=assigning-non-slot
self._updateBase__function(self) # pylint: disable=no-member
class Sweep(qBase):
r"""
A container class for :class:`_sweep` objects and relevant methods for creating/removing and carrying
multi-parameter sweeps. It stores :class:`_sweep` objects in its ``subSys``
dictionary, and it has two additional private attributes to store sweep lengths and their multiplications, which are
used in modularSweep and by :meth:`~_indicesForSweep` to carry multi parameter sweeps.
Instances of this
class are used as attributes of :class:`Simulation <quanguru.classes.Simulation.Simulation>` objects, and those are
intended to be used for ``_sweep`` creations.
"""
#: Used in default naming of objects. See :attr:`label <quanguru.classes.QUni.qUniversal.label>`.
label = 'Sweep'
#: (**class attribute**) number of instances created internally by the library
_internalInstances: int = 0
#: (**class attribute**) number of instances created explicitly by the user
_externalInstances: int = 0
#: (**class attribute**) number of total instances = _internalInstances + _externalInstances
_instances: int = 0
__slots__ = ['__inds', '__indMultip']
# TODO init errors
def __init__(self, **kwargs):
super().__init__()
self.__inds = []
r"""
a list of ``sweepList`` length/s of multi-parameter ``_sweep`` object/s in ``subSys`` dictionary, meaning the
length for simultaneously swept ``_sweep`` objects are not repeated. the values are
appended to the list, if it is the first ``sweep`` to be included into ``subSys`` or ``multiParam is True``.
"""
self.__indMultip = 1
r"""
the multiplication of all the indices in ``inds``. This value is used as the loop range by modularSweep.
"""
self._named__setKwargs(**kwargs) # pylint: disable=no-member
@property
def inds(self):
r"""
``returns _Sweep__inds`` and there is no setter
"""
return self._Sweep__inds
@property
def indMultip(self):
r"""
``returns _Sweep__indMultip``, and there is no setter
NOTE : The reason this property returns a pre-assingned value rather than calculating from the ``inds`` is to
avoid calculating it over and over again, which could be avoided by checking if ``_Sweep__indMultip is None``,
but that might create other issues, such as re-running the same simulation after a change in ``sweepList``
length/s. It still can be improved, and it is possible to avoid such issues and get rid of :meth:`prepare`,
which is called in ``run`` methods of ``Simulations``, by some modifications in these properties.
"""
return self._Sweep__indMultip
@property
def sweeps(self):
r"""
The sweeps property wraps ``subSys`` dictionary to create new terminology, it works exactly as
:meth:`subSys <quanguru.classes.base.qBase.subSys>`.
"""
return self._qBase__subSys # pylint: disable=no-member
@sweeps.setter
def sweeps(self, sysDict):
super().addSubSys(sysDict)
@_recurseIfList
def removeSweep(self, sys):
r"""
Removes a ``_sweep`` it self, or all the ``_sweep`` objects that contain a particular ``sys`` in it.
Since, it uses :meth:`removeSubSys <quanguru.classes.base.qBase.removeSubSys>`, it works exactly the same,
meaning
names/aliases/objects/listOfObjects can be used to remove.
If the argument ``sys`` is an :class:`_sweep` object, this method calls
:meth:`removeSubSys <quanguru.classes.base.qBase.removeSubSys>` (since ``_sweep`` objects are stored in
``subSys`` dictionary of ``Sweep`` objects).
Else, it calls the :meth:`removeSubSys <quanguru.classes.base.qBase.removeSubSys>` on every ``_sweep`` in its
``subSys`` dictionary (since ``systems`` are stored in ``subSys`` dictionary of ``_sweep`` objects).
"""
if isinstance(sys, _sweep):
super().removeSubSys(sys, _exclude=[])
else:
sweeps = list(self.subSys.values())
for sweep in sweeps:
sweep.removeSubSys(sys, _exclude=[])
if len(sweep.subSys) == 0:
super().removeSubSys(sweep, _exclude=[])
def createSweep(self, system=None, sweepKey=None, **kwargs):
r"""
Creates a instance of ``_sweep`` and assing its ``system`` and ``sweepKey`` to given system
and sweepKey arguments of this method. Keyworded arguments are used to set the other attributes of the newly
created ``_sweep`` object.
Parameters
----------
system : Any
Since ``system`` property setter of ``_sweep`` behaves exactly as
:meth:`subSys <quanguru.classes.base.qBase.subSys>` setter, this can be various things, from a single
system to name/alias of the system, or from a class to a list/tuple contaning any combination
of these.
sweepKey : str
Name of the attribute of system/s that will be swept
:returns: The new ``_sweep`` instance.
"""
if system is None:
system = self.superSys.superSys
if system is None:
raise ValueError('?')
newSweep = _sweep(superSys=self, subSys=system, sweepKey=sweepKey, **kwargs)
if system is not self.auxObj:
if not isinstance(sweepKey, str):
raise ValueError("key")
# newSweep._aux = True #pylint: disable=protected-access
if hasattr(list(newSweep.subSys.values())[0], sweepKey):
for sys in newSweep.subSys.values():
if not hasattr(sys, sweepKey):
raise AttributeError("?")
else:
newSweep._aux = True #pylint: disable=protected-access
# ignores when object is given with a key it does not have
#elif not hasattr(list(newSweep.subSys.values())[0], sweepKey):
# newSweep._aux = True #pylint: disable=protected-access
super().addSubSys(newSweep)
return newSweep
def prepare(self):
r"""
This method is called inside ``run`` method of ``Simulation`` object/s to update ``inds`` and ``indMultip``
attributes/properties. The reason for this a bit argued in :meth:`indMultip`, but it is basically to ensure that
any changes to ``sweepList/s`` or ``multiParam/s`` are accurately used/reflected (especially on re-runs).
"""
if len(self.subSys) > 0:
self._Sweep__inds = [] # pylint: disable=assigning-non-slot
for indx, sweep in enumerate(self.subSys.values()):
if ((sweep.multiParam is True) or (indx == 0)):
self._Sweep__inds.insert(0, len(sweep.sweepList))
self._Sweep__indMultip = reduce(lambda x, y: x*y, self._Sweep__inds) # pylint: disable=assigning-non-slot
def runSweep(self, indList):
r"""
called in modularSweep to run all the ``_sweep``
objects in a ``Sweep``. indices from a given list ``indList`` are used by the ``runSweep`` method of ``_sweep``
objects, and it switches to a new index, if the ``multiParam is True``. This means that the ``_sweeps``
**should be created in an order** such that ``_sweep`` objects that run simultaneously **have to be** added to
``subSys`` one after the other. Also, for nested Sweeps, the indList should be a properly nested list.
"""
indx = 0
for sweep in self.sweeps.values():
if sweep.multiParam is True:
indx += 1
sweep.runSweep(indList[indx])
# function used in modular sweep
@staticmethod
def _indicesForSweep(ind, *args):
r"""
method used in modularSweep to calculate indices for each sweepList from the loop counter ``ìnd`` using the
total lengths ``*args``. It is hard to describe the exact calculation in words, but it is trivial to see from
the math (TODO) which i will do later.
the loop counter can at max be :math:`(\prod_{i = 1}^{len(args)} args[i]) - 1`, and multi-parameter
sweeps loops the first sweepList while fixing the others. So, at each inp = args[0] the first list should
start from zero, and the second list moves to next item, and this relation goes up in the chain, e.g. at each
inp = args[0]*args[1], the index of the third need to be increased, and so on. Therefore, the current index for
the first sweepList simply is the reminder of inp with args[0].
"""
indices = []
for arg in args:
remain = ind%arg
ind = (ind-remain)/arg
indices.insert(0, int(remain))
return indices
|
# -*- coding: utf-8 -*-
# Pytest test suite
import pytest
from result import Result, Ok, Err
@pytest.mark.parametrize('instance', [
Ok(1),
Result.Ok(1),
])
def test_ok_factories(instance):
assert instance._value == 1
assert instance.is_ok() is True
@pytest.mark.parametrize('instance', [
Err(2),
Result.Err(2),
])
def test_err_factories(instance):
assert instance._value == 2
assert instance.is_err() is True
def test_eq():
assert Ok(1) == Ok(1)
assert Err(1) == Err(1)
assert Ok(1) != Err(1)
assert Ok(1) != Ok(2)
assert not (Ok(1) != Ok(1))
assert Ok(1) != "abc"
assert Ok("0") != Ok(0)
def test_hash():
assert len({Ok(1), Err("2"), Ok(1), Err("2")}) == 2
assert len({Ok(1), Ok(2)}) == 2
assert len({Ok("a"), Err("a")}) == 2
def test_repr():
assert Ok(u"£10") == eval(repr(Ok(u"£10")))
assert Ok("£10") == eval(repr(Ok("£10")))
def test_ok():
res = Ok('haha')
assert res.is_ok() is True
assert res.is_err() is False
assert res.value == 'haha'
def test_err():
res = Err(':(')
assert res.is_ok() is False
assert res.is_err() is True
assert res.value == ':('
def test_ok_method():
o = Ok('yay')
n = Err('nay')
assert o.ok() == 'yay'
assert n.ok() is None
def test_err_method():
o = Ok('yay')
n = Err('nay')
assert o.err() is None
assert n.err() == 'nay'
def test_no_arg_ok():
top_level = Ok()
assert top_level.is_ok() is True
assert top_level.ok() is True
class_method = Result.Ok()
assert class_method.is_ok() is True
assert class_method.ok() is True
def test_no_constructor():
"""
Constructor should not be used directly.
"""
with pytest.raises(RuntimeError):
Result(is_ok=True, value='yay')
|
# Write a function day_name that converts an integer number 0 to 6 into the name of a day.
# Assume day 0 is “Sunday”.
# Once again, return None if the arguments to the function are not valid. Here are some tests that should pass:
# test(day_name(3) == "Wednesday")
# test(day_name(6) == "Saturday")
# test(day_name(42) == None)
import sys
weekday = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
def day_name(d):
if 6 >= int(d) >= 0:
return weekday[d]
else:
return
def test(did_pass):
""" Print the result of a test. """
linenum = sys._getframe(1).f_lineno # Get the caller's line number.
if did_pass:
msg = "Test at line {0} ok.".format(linenum)
else:
msg = ("Test at line {0} FAILED.".format(linenum))
print(msg)
def test_suite():
""" Run the suite of tests for code in this module (this file).
"""
test(day_name(3) == "Wednesday")
test(day_name(6) == "Saturday")
test(day_name(42) == None)
test_suite() # Here is the call to run the tests
|
import sys
import random
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication
from rx import Observable
from rx.subjects import Subject
from rx.concurrency import QtScheduler
from client.client_window import ClientWindow
REFRESH_STOCK_INTERVAL = 100
def random_stock(x):
symbol_names = [
['ABC', 'Abc Manufacturing'],
['DEF', 'Desert Inc.'],
['GHI', 'Ghi Ghi Inc.'],
['A', 'A Plus Consulting'],
['GS', 'Great Security Inc'],
['GO', 'Go Go Consulting'],
]
stock = random.choice(symbol_names)
return [
stock[0],
stock[1],
round(random.uniform(21, 22), 2),
round(random.uniform(20, 21), 2)
]
def order_is_valid(order):
if order['symbol'] != '' and order['price'] >= 0.01:
return True
return False
if __name__ == '__main__':
app = QApplication(sys.argv)
scheduler = QtScheduler(QtCore)
stock_prices = Observable.interval(REFRESH_STOCK_INTERVAL, scheduler) \
.map(random_stock) \
.publish()
client_window = ClientWindow(stock_prices_stream=stock_prices)
client_window.get_orders() \
.filter(order_is_valid) \
.subscribe(lambda x: print(x), lambda x: print('error'))
stock_prices.connect()
client_window.show()
sys.exit(app.exec_())
|
import matplotlib.pyplot as plt
import pandas as pd
def print_data(csv_1, csv_2, ax, title):
# Uses the first column for the x axes.
csv_1.plot(x=csv_1.columns[0], marker='o', xticks=[32,256,512], ax=ax)
csv_2.plot(x=csv_2.columns[0], marker='x', xticks=[32,256,512], ax=ax)
# Set the title.
ax.set_title(title, fontsize=20)
ax.set_yscale("log")
def main():
"""
Print the CPU and GPU execution times obtained from
the "lib/examples/op_time_functions.cpp" test (that records
the execution time of every function).
The file obtained from the test should be placed in the "data/"
directory.
"""
NB_OPERATIONS = 12 * 2
NB_ROWS = 4
NB_COLS = 6
OPERATIONS = ["Linear",
"Linear derivative",
"Binary Step",
"Binary Step derivative",
"Sigmoid",
"Sigmoid derivative",
"Relu",
"Relu derivative",
"Tanh",
"Tanh derivative",
"Softmax",
"Softmax derivative",
"MSE",
"MSE derivative",
"MAE",
"MAE derivative",
"MBE",
"MBE derivative",
"Hinge Loss",
"Hinge Loss derivative",
"BCE",
"BCE derivative",
"CE",
"CE derivative"]
XLABEL = "Size of a side of\nthe squared input matrices"
YLABEL = "Execution time (ms)"
fig, ax = plt.subplots(nrows=NB_ROWS, ncols=NB_COLS)
plt.tight_layout()
index_activation = 0
index_loss = int(NB_OPERATIONS / 2)
for i in range(0, NB_ROWS):
for j in range(0, NB_COLS):
index = 0
if j < NB_COLS / 2:
index = index_activation
index_activation += 1
else:
index = index_loss
index_loss += 1
data_1 = pd.read_csv("data/" + str(index) + "_functions_gpu.csv", delimiter=';')
data_2 = pd.read_csv("data/" + str(index) + "_functions_cpu.csv", delimiter=';')
print_data(data_1, data_2, ax[i][j], OPERATIONS[index])
# Set the labels.
for i in range(0, NB_ROWS):
plt.setp(ax[i, :], xlabel="")
plt.setp(ax[-1, :], xlabel=XLABEL)
plt.setp(ax[0:, 0], ylabel=YLABEL)
plt.setp(ax[0:, int(NB_COLS/2)], ylabel=YLABEL)
# Full screen.
manager = plt.get_current_fig_manager()
manager.full_screen_toggle()
# Save into pdf.
plt.savefig("op_time_functions", format="pdf", dpi=1200)
# Show the graph.
plt.show()
if __name__ == "__main__":
main()
|
# Use by adding e.g. "command script import ~/uno/scripts/unolldb.py" to ~/.lldbinit
import lldb
from sets import Set
moduleName = __name__
logFile = '/tmp/' + moduleName + '.log'
unoPrimitiveTypes = {
"Uno.Long": "int64_t",
"Uno.Int": "int32_t",
"Uno.Short": "int16_t",
"Uno.SByte": "int8_t",
"Uno.ULong": "uint64_t",
"Uno.UInt": "uint32_t",
"Uno.UShort": "uint16_t",
"Uno.Byte": "uint8_t",
"Uno.Bool": "bool",
"Uno.Double": "double",
"Uno.Float": "float"
}
def reverseString(s):
return s[::-1]
def rreplace(str, x, y, max):
return reverseString(reverseString(str).replace(x, y, max))
def cppTypeFromUnoTypeName(module, unoTypeName):
primitiveTypeName = unoPrimitiveTypes.get(unoTypeName, None)
if primitiveTypeName is not None:
log("Finding primitive type name " + primitiveTypeName)
t = module.FindFirstType(primitiveTypeName)
if t.IsValid():
log("Mapped primitive uno type " + unoTypeName + " to " + t.GetName())
return t;
numDots = unoTypeName.count('.')
log('Trying to get the C++ type for ' + unoTypeName)
for i in range(numDots + 1):
cppTypeName = 'g::' + rreplace(unoTypeName, '.', '::', numDots - i).replace('.', '__')
log(' Looking up C++ type ' + cppTypeName)
t = module.FindFirstType(cppTypeName)
if t.IsValid():
log(' Success!')
return t
else:
log(' No such C++ type')
class FirstChildSynthProvider:
def __init__(self, valobj, internal_dict):
log('FirstChildSynthProvider ' + valobj.GetTypeName())
self.obj = valobj;
while self.obj.GetNumChildren() == 1 and self.obj.GetChildAtIndex(0).GetNumChildren() == 1:
self.obj = self.obj.GetChildAtIndex(0)
def num_children(self):
return self.obj.GetNumChildren()
def get_child_index(self,name):
return self.obj.GetIndexOfChildWithName(name)
def get_child_at_index(self,index):
return self.obj.GetChildAtIndex(index)
def update(self):
return
def has_children(self):
return True
class UObjectSynthProvider:
def __init__(self, valobj, internal_dict):
log('UObjectSynthProvider ' + valobj.GetTypeName())
if not valobj.TypeIsPointerType():
self.obj = valobj
return
address = getPtrAddress(valobj)
frame = valobj.GetFrame()
unoType = callMethodRaw(frame, 'uObject*', address, 'GetType()->FullName')
if not unoType.IsValid():
self.obj = valobj
return
unoTypeName = getCString(unoType)
module = frame.GetModule()
cppType = cppTypeFromUnoTypeName(module, unoTypeName)
if cppType == None or not cppType.IsValid():
self.obj = valobj
return
cppPointerType = cppType.GetPointerType()
log(' The C++ (pointer) type is ' + cppPointerType.GetName())
cppTypeName = cppType.GetName()
typeIsStruct = frame.EvaluateExpression(cppTypeName + '_typeof()->Type == uTypeType::uTypeTypeStruct').GetValueAsSigned(0) != 0
typeIsEnum = frame.EvaluateExpression(cppTypeName + '_typeof()->Type == uTypeType::uTypeTypeEnum').GetValueAsSigned(0) != 0
offset = frame.EvaluateExpression('sizeof(uObject)').GetValueAsSigned(0) if typeIsStruct or typeIsEnum else 0
log(' Address is ' + hex(address) + ' offset ' + str(offset) + ' = ' + hex(address + offset))
replacedValobj = frame.EvaluateExpression('(' + cppPointerType.GetName() + ')' + hex(address + offset)).Dereference()
log(' replacedValobj ' + str(replacedValobj.IsValid()))
self.obj = replacedValobj if replacedValobj.IsValid() else valobj
def num_children(self):
return self.obj.GetNumChildren()
def get_child_index(self,name):
return self.obj.GetIndexOfChildWithName(name)
def get_child_at_index(self,index):
return self.obj.GetChildAtIndex(index)
def update(self):
return
def has_children(self):
return True
class UArraySynthProvider:
def trace(self, message):
# For debugging messages uncomment following line
# log(message)
pass
def __init__(self, valobj, internal_dict):
self.trace("UArraySynthProvider.__init__ path: \"" + valobj.path + "\" type: \"" + valobj.GetTypeName() + "\"")
self.valobj = valobj
self.length = None
self.unoElementName = None
self.cppElementType = None
self.ptr = None
def num_children(self):
self.trace("UArraySynthProvider.num_children")
result = self.length
self.trace("UArraySynthProvider.num_children exit")
return result
def get_child_index(self, name):
self.trace("UArraySynthProvider.get_child_index")
return int(name.lstrip("[").rstrip("]"))
def get_child_at_index(self, index):
self.trace("UArraySynthProvider.get_child_at_index")
self.trace(" unoElementTypeName is " + self.unoElementTypeName)
self.trace(" cppElementType is " + self.cppElementType.GetName())
if index >= self.length or index < 0:
return None
# Pointer to pointer..
result = self.ptr.CreateChildAtOffset("[" + str(index) + "]", self.cppElementType.GetByteSize() * index, self.cppElementType)
self.trace("UArraySynthProvider.get_child_at_index completed")
return result
def update(self):
self.update_impl()
return True
def update_impl(self):
self.trace("UArraySynthProvider.update")
self.unoElementTypeName = uArrayElementTypeString(self.valobj)
self.trace(" update unoElementTypeName is " + self.unoElementTypeName)
cppElementType = cppTypeFromUnoTypeName(self.valobj.GetFrame().GetModule(), self.unoElementTypeName)
if (not isValueType(self.valobj.GetFrame(), cppElementType)):
cppElementType = cppElementType.GetPointerType()
self.cppElementType = cppElementType
self.trace(" update cppElementType is " + self.cppElementType.GetName())
self.trace(" type for valobj is now " + self.valobj.GetTypeName())
self.length = self.valobj.GetChildMemberWithName('_length').GetValueAsSigned(0)
self.trace(" update length is " + str(self.length))
self.ptr = self.valobj.GetChildMemberWithName("_ptr").Cast(self.cppElementType.GetPointerType())
self.trace("UArraySynthProvider.update Element type is " + self.unoElementTypeName)
self.trace("UArraySynthProvider.update exit")
def has_children(self):
self.trace("UArraySynthProvider.has_children")
return True
def clearLog():
# f = open(logFile, 'w')
# f.close()
pass
def log(str):
# f = open(logFile, 'a')
# f.write(str + '\n')
# f.close()
pass
def getCString(cstr): # hackety hax
return cstr.summary[1:-1] # remove quotes
def getUtf16String(ptr, length):
log('getUtf16String')
if length == 0:
return ''
data = ptr.GetPointeeData(0, length)
bytes = data.ReadRawData(lldb.SBError(), 0, 2 * length)
str = bytes.decode('utf-16').encode('utf-8')
log('getUtf16String result ' + str)
return str
def getPtrAddress(value):
return value.data.GetAddress(lldb.SBError(), 0)
def isStructType(frame, cppType):
return frame.EvaluateExpression(cppType.GetName() + '_typeof()->Type == uTypeType::uTypeTypeStruct').GetValueAsSigned(0) != 0
def isEnumType(frame, cppType):
return frame.EvaluateExpression(cppType.GetName() + '_typeof()->Type == uTypeType::uTypeTypeEnum').GetValueAsSigned(0) != 0
def isValueType(frame, cppType):
# Definition taken from U_IS_VALUE macro in ObjectModel.h of uno
typeType = frame.EvaluateExpression(cppType.GetName() + '_typeof()->Type').GetValueAsSigned(0)
typeTypeByRefConst = frame.EvaluateExpression("uTypeType::uTypeTypeByRef").GetValueAsSigned(0)
return typeType < typeTypeByRefConst
def callMethodRaw(frame, typeName, address, methodName):
expr = '((' + typeName + ')' + hex(address) + ')->' + methodName
log('callMethodRaw ' + expr)
return frame.EvaluateExpression(expr)
def callMethod(thisValue, methodName):
return callMethodRaw(thisValue.frame, thisValue.GetTypeName(), getPtrAddress(thisValue), methodName)
def getUStringString(value):
log('getUStringString')
ptr = value.GetChildMemberWithName('_ptr')
length = value.GetChildMemberWithName('_length').GetValueAsSigned(0)
str = getUtf16String(ptr, length)
return str
def isNull(value):
if value.TypeIsPointerType():
address = value.GetData().GetAddress(lldb.SBError(), 0)
return address == 0
else:
return False
def uObjectToString(value):
log('uObjectToString')
if isNull(value):
return 'null'
ustring = None
if value.TypeIsPointerType():
log('Object?')
ustring = callMethod(value, 'ToString()')
else:
x = 'uBoxPtr(%s_typeof(), (void*)%s, nullptr, false)->ToString()' % (value.GetTypeName(), hex(getPtrAddress(value.AddressOf())))
log('Struct? ' + x)
ustring = value.frame.EvaluateExpression(x)
string = getUStringString(ustring)
log('uObjectToString result ' + string)
return string
def uArrayElementTypeString(value):
log('uArrayElementType ' + value.GetTypeName())
elementTypeUString = value.frame.EvaluateExpression(
'((uArrayType*)((uArray*)%s)->GetType())->ElementType->ToString()' % hex(getPtrAddress(value)))
return getUStringString(elementTypeUString)
def uStringSummary(value, *rest):
log('- uStringSummary')
if isNull(value):
return 'null'
return '"%s"' % getUStringString(value)
def uObjectSummary(value, *rest):
log('- uObjectSummary ' + value.GetTypeName())
return uObjectToString(value)
def uTypeSummary(value, *rest):
log('- uTypeSummary ' + value.GetTypeName())
return uObjectToString(value)
def uArraySummary(value, *rest):
log(' - uArraySummary')
if isNull(value):
return 'null'
length = callMethod(value, 'Length()').GetValueAsSigned(0)
log('Length ' + str(length))
return uArrayElementTypeString(value) + "[" + str(length) + "]"
def firstChildSummary(value, *rest):
log('- firstChildSummary ' + value.GetTypeName())
log(str(value.GetNumChildren()))
while value.GetNumChildren() == 1:
value = value.GetChildAtIndex(0)
return value.GetSummary()
def __lldb_init_module(debugger, dict):
clearLog()
log('********************************** init module 3')
category = debugger.GetDefaultCategory()
for type in ['uString', 'uObject', 'uType', 'uArray']:
summary = lldb.SBTypeSummary.CreateWithFunctionName(moduleName + '.' + type + 'Summary')
isRegex = False
debugger.GetDefaultCategory().AddTypeSummary(lldb.SBTypeNameSpecifier(type, isRegex), summary)
for type in ['uStrong', 'uSStrong', 'uWeak', 'uSWeak']:
summary = lldb.SBTypeSummary.CreateWithFunctionName(moduleName + '.firstChildSummary')
isRegex = True
category.AddTypeSummary(lldb.SBTypeNameSpecifier(type + '<.*>', isRegex), summary)
synthetic = lldb.SBTypeSynthetic.CreateWithClassName(moduleName + '.FirstChildSynthProvider')
synthetic.SetOptions(lldb.eTypeOptionCascade)
category.AddTypeSynthetic(lldb.SBTypeNameSpecifier(type + '<.*>', isRegex), synthetic)
summary = lldb.SBTypeSummary.CreateWithFunctionName(moduleName + '.uObjectSummary')
isRegex = True
category.AddTypeSummary(lldb.SBTypeNameSpecifier('g::.+', isRegex), summary)
synthetic = lldb.SBTypeSynthetic.CreateWithClassName(moduleName + '.UObjectSynthProvider')
synthetic.SetOptions(lldb.eTypeOptionCascade)
category.AddTypeSynthetic(lldb.SBTypeNameSpecifier('uObject', False), synthetic)
synthetic = lldb.SBTypeSynthetic.CreateWithClassName(moduleName + '.UObjectSynthProvider')
synthetic.SetOptions(lldb.eTypeOptionCascade)
category.AddTypeSynthetic(lldb.SBTypeNameSpecifier('g::.+', True), synthetic)
synthetic = lldb.SBTypeSynthetic.CreateWithClassName(moduleName + '.UArraySynthProvider')
synthetic.SetOptions(lldb.eTypeOptionCascade)
category.AddTypeSynthetic(lldb.SBTypeNameSpecifier('uArray', False), synthetic)
|
from securityheaders.models.xcontenttypeoptions import XContentTypeOptions
from securityheaders.checkers import Checker
class XContentTypeOptionsChecker(Checker):
def __init__(self):
pass
def getxcontenttypeoptions(self, headers):
return self.extractheader(headers, XContentTypeOptions)
|
# source: https://github.com/kuangliu/pytorch-cifar/blob/master/models/densenet.py
'''DenseNet in PyTorch.'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from cnns.nnlib.pytorch_layers.conv_picker import Conv
def conv3x3(in_planes, out_planes, args, stride=1):
"""3x3 convolution with padding"""
# return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
# padding=1, bias=False)
return Conv(kernel_sizes=[3], in_channels=in_planes,
out_channels=[out_planes], strides=[stride],
padding=[1], args=args, is_bias=False).get_conv()
def conv1x1(in_planes, out_planes, args, stride=1):
"""1x1 convolution"""
# return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
# bias=False)
# It is rather unnecessary to use fft convolution for kernels of size 1x1.
return Conv(kernel_sizes=[1], in_channels=in_planes,
out_channels=[out_planes], strides=[stride],
padding=[0], args=args, is_bias=False).get_conv()
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate, args):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
# self, in_channels, out_channels, kernel_size, stride=1, adding=0, dilation=1, groups=1, bias=True
# self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
# self.conv1 = Conv(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.conv1 = conv1x1(in_planes=in_planes, out_planes=4 * growth_rate,
args=args)
self.bn2 = nn.BatchNorm2d(4 * growth_rate)
# self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
self.conv2 = conv3x3(in_planes=4 * growth_rate, out_planes=growth_rate,
args=args)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out, x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes, args=None):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
# self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
self.conv = conv1x1(in_planes, out_planes, args)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, args, growth_rate=12, reduction=0.5):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2 * growth_rate
# self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.conv1 = conv3x3(in_planes=3, out_planes=num_planes, args=args)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0],
args=args)
num_planes += nblocks[0] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans1 = Transition(num_planes, out_planes, args=args)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1],
args=args)
num_planes += nblocks[1] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans2 = Transition(num_planes, out_planes, args=args)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2],
args=args)
num_planes += nblocks[2] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans3 = Transition(num_planes, out_planes, args=args)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3],
args=args)
num_planes += nblocks[3] * growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, args.num_classes)
def _make_dense_layers(self, block, in_planes, nblock, args):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate, args=args))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121(args):
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=32, args=args)
def DenseNet169(args):
return DenseNet(Bottleneck, [6, 12, 32, 32], growth_rate=32, args=args)
def DenseNet201(args):
return DenseNet(Bottleneck, [6, 12, 48, 32], growth_rate=32, args=args)
def DenseNet161(args):
return DenseNet(Bottleneck, [6, 12, 36, 24], growth_rate=48, args=args)
def densenet_cifar(args):
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=12, args=args)
def test():
net = densenet_cifar()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y)
|
from datetime import datetime
import errno
import fcntl
import os
import socket
import sys
import traceback
import threading
import xml.etree.ElementTree as ET
from zmq.eventloop import ioloop
from tornado import iostream
from pyfire import configuration as config
from pyfire.errors import XMPPProtocolError
from pyfire.logger import Logger
from pyfire.stream import processor
from pyfire.stream.stanzas import TagHandler
log = Logger(__name__)
class XMPPServer(object):
"""A non-blocking, single-threaded XMPP server."""
def __init__(self, io_loop=None):
self.io_loop = io_loop or ioloop.IOLoop.instance()
self._sockets = {} # fd -> socket object
self._started = False
self._connections = {}
self.checker = ioloop.PeriodicCallback(
self.check_for_closed_connections, 30000)
def listen(self, port, address=""):
"""Binds to the given port and starts the server in a single process.
This method is a shortcut for:
server.bind(port, address)
server.start()
"""
self.bind(port, address)
self.start()
def bind(self, port, address=None, family=socket.AF_UNSPEC):
"""Binds this server to the given port on the given address.
To start the server, call start(). You can call listen() as
a shortcut to the sequence of bind() and start() calls.
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either socket.AF_INET
or socket.AF_INET6 to restrict to ipv4 or ipv6 addresses, otherwise
both will be used if available.
This method may be called multiple times prior to start() to listen
on multiple ports or interfaces.
"""
if address == "":
address = None
for res in socket.getaddrinfo(address, port, family,
socket.SOCK_STREAM, 0,
socket.AI_PASSIVE | socket.AI_ADDRCONFIG):
af, socktype, proto, canonname, sockaddr = res
sock = socket.socket(af, socktype, proto)
flags = fcntl.fcntl(sock.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(sock.fileno(), fcntl.F_SETFD, flags)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
sock.setblocking(0)
sock.bind(sockaddr)
sock.listen(128)
log.info("Starting to listen on IP %s Port %s for connections" % sockaddr)
self._sockets[sock.fileno()] = sock
if self._started:
self.io_loop.add_handler(sock.fileno(), self._handle_events,
ioloop.IOLoop.READ)
def start(self):
"""Starts this server in the IOLoop."""
assert not self._started
for fd in self._sockets.keys():
self.io_loop.add_handler(fd, self._handle_events,
ioloop.IOLoop.READ)
def stop(self):
"""Stops listening for new connections.
Streams currently running may still continue after the
server is stopped.
"""
for fd, sock in self._sockets.iteritems():
self.io_loop.remove_handler(fd)
sock.close()
def _handle_events(self, fd, events):
while True:
try:
connection, address = self._sockets[fd].accept()
except(socket.error, e):
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return
raise
try:
stream = iostream.IOStream(connection, io_loop=self.io_loop)
log.info("Starting new connection for client connection from %s:%s" % address)
self._connections[address] = XMPPConnection(stream, address)
if not self.checker._running:
self.checker.start()
except (Exception, e):
exc_type, exc_value, exc_traceback = sys.exc_info()
log.error("Error in connection callback, %s" % str(e))
for line in traceback.format_tb(exc_traceback):
if line.find("\n") >= 0:
for subline in line.split("\n"):
log.error(subline)
else:
log.error(line.rstrip("\n"))
def check_for_closed_connections(self):
log.debug("checking for closed connections")
for address in self._connections.keys():
connection = self._connections[address]
if connection.closed():
log.debug("detected dead stream/connection: %s:%s" % connection.address)
del self._connections[address]
if len(self._connections) == 0:
log.debug("stopping checker")
self.checker.stop()
class XMPPConnection(object):
"""One XMPP connection initiated by class:`XMPPServer`"""
def __init__(self, stream, address):
self.stream = stream
self.address = address
self.connectiontime = self.last_seen = datetime.now()
self.taghandler = TagHandler(self)
self.parser = processor.StreamProcessor(
self.taghandler.streamhandler,
self.taghandler.contenthandler)
self.stream.read_bytes(1, self._read_char)
def _read_char(self, data):
"""Reads from client in byte mode"""
try:
if data == " ":
log.debug("Found whitespace keepalive")
self.stream.read_bytes(1, self._read_char)
else:
log.debug("Processing byte: %s" % data)
self.parser.feed(data)
self.stream.read_until(">", self._read_xml)
self.last_seen = datetime.now()
except IOError:
self.done()
def _read_xml(self, data):
"""Reads from client until closing tag for xml is found"""
try:
self.last_seen = datetime.now()
log.debug("Processing chunk: %s" % data)
self.parser.feed(data)
if self.parser.depth >= 2:
self.stream.read_until(">", self._read_xml)
else:
self.stream.read_bytes(1, self._read_char)
except IOError:
self.done()
def send_string(self, string, raises_error=True):
"""Sends a string to client"""
try:
self.stream.write(string)
log.debug("Sent string to client:" + string)
except IOError:
if raises_error:
raise
def send_element(self, element, raises_error=True):
"""Serializes and send an ET Element"""
self.send_string(ET.tostring(element), raises_error)
def stop_connection(self):
"""Sends stream close, discards stream closed errors"""
# Ignore IOErrors as stream already has been closed
# as there is no need so send stream end element on closed streams ;)
try:
self.taghandler.close()
self.send_string("</stream:stream>")
except IOError:
pass
self.done()
def done(self):
"""Does cleanup work"""
self.stream.close()
def closed(self):
"""Checks if underlying stream is closed"""
return self.stream.closed()
|
#
# PySNMP MIB module CISCOSB-RMON (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCOSB-RMON
# Produced by pysmi-0.3.4 at Mon Apr 29 18:07:17 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint")
switch001, = mibBuilder.importSymbols("CISCOSB-MIB", "switch001")
OwnerString, EntryStatus = mibBuilder.importSymbols("RMON-MIB", "OwnerString", "EntryStatus")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Gauge32, ObjectIdentity, IpAddress, iso, NotificationType, ModuleIdentity, Unsigned32, Counter64, Counter32, Bits, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Gauge32", "ObjectIdentity", "IpAddress", "iso", "NotificationType", "ModuleIdentity", "Unsigned32", "Counter64", "Counter32", "Bits", "TimeTicks")
RowStatus, TruthValue, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TruthValue", "TextualConvention", "DisplayString")
rlRmonControl = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49))
rlRmonControl.setRevisions(('2004-06-01 00:00',))
if mibBuilder.loadTexts: rlRmonControl.setLastUpdated('200406010000Z')
if mibBuilder.loadTexts: rlRmonControl.setOrganization('Cisco Small Business')
rlRmonControlMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlRmonControlMibVersion.setStatus('current')
rlRmonControlHistoryControlQuotaBucket = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(8)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlRmonControlHistoryControlQuotaBucket.setStatus('current')
rlRmonControlHistoryControlMaxGlobalBuckets = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(300)).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlRmonControlHistoryControlMaxGlobalBuckets.setStatus('current')
rlHistoryControlTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4), )
if mibBuilder.loadTexts: rlHistoryControlTable.setStatus('current')
rlHistoryControlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1), ).setIndexNames((0, "CISCOSB-RMON", "rlHistoryControlIndex"))
if mibBuilder.loadTexts: rlHistoryControlEntry.setStatus('current')
rlHistoryControlIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlHistoryControlIndex.setStatus('current')
rlHistoryControlDataSource = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1, 2), ObjectIdentifier()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlHistoryControlDataSource.setStatus('current')
rlHistoryControlBucketsRequested = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(50)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlHistoryControlBucketsRequested.setStatus('current')
rlHistoryControlBucketsGranted = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlHistoryControlBucketsGranted.setStatus('current')
rlHistoryControlInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 3600)).clone(1800)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlHistoryControlInterval.setStatus('current')
rlHistoryControlOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1, 6), OwnerString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlHistoryControlOwner.setStatus('current')
rlHistoryControlStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1, 7), EntryStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlHistoryControlStatus.setStatus('current')
rlHistoryTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 5), )
if mibBuilder.loadTexts: rlHistoryTable.setStatus('current')
rlHistoryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 5, 1), ).setIndexNames((0, "CISCOSB-RMON", "rlHistoryIndex"), (0, "CISCOSB-RMON", "rlHistorySampleIndex"))
if mibBuilder.loadTexts: rlHistoryEntry.setStatus('current')
rlHistoryIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlHistoryIndex.setStatus('current')
rlHistorySampleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlHistorySampleIndex.setStatus('current')
rlHistoryIntervalStart = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 5, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlHistoryIntervalStart.setStatus('current')
rlHistoryValue = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 5, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlHistoryValue.setStatus('current')
rlControlHistoryControlQuotaBucket = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(8)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlControlHistoryControlQuotaBucket.setStatus('current')
rlControlHistoryControlMaxGlobalBuckets = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(300)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlControlHistoryControlMaxGlobalBuckets.setStatus('current')
rlControlHistoryMaxEntries = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(300)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlControlHistoryMaxEntries.setStatus('current')
mibBuilder.exportSymbols("CISCOSB-RMON", rlHistoryEntry=rlHistoryEntry, PYSNMP_MODULE_ID=rlRmonControl, rlRmonControlMibVersion=rlRmonControlMibVersion, rlRmonControlHistoryControlQuotaBucket=rlRmonControlHistoryControlQuotaBucket, rlHistoryControlTable=rlHistoryControlTable, rlControlHistoryControlQuotaBucket=rlControlHistoryControlQuotaBucket, rlHistoryIntervalStart=rlHistoryIntervalStart, rlHistoryControlBucketsGranted=rlHistoryControlBucketsGranted, rlControlHistoryMaxEntries=rlControlHistoryMaxEntries, rlHistoryControlInterval=rlHistoryControlInterval, rlHistoryControlBucketsRequested=rlHistoryControlBucketsRequested, rlHistoryIndex=rlHistoryIndex, rlRmonControl=rlRmonControl, rlHistoryControlIndex=rlHistoryControlIndex, rlRmonControlHistoryControlMaxGlobalBuckets=rlRmonControlHistoryControlMaxGlobalBuckets, rlControlHistoryControlMaxGlobalBuckets=rlControlHistoryControlMaxGlobalBuckets, rlHistorySampleIndex=rlHistorySampleIndex, rlHistoryControlStatus=rlHistoryControlStatus, rlHistoryTable=rlHistoryTable, rlHistoryControlOwner=rlHistoryControlOwner, rlHistoryValue=rlHistoryValue, rlHistoryControlDataSource=rlHistoryControlDataSource, rlHistoryControlEntry=rlHistoryControlEntry)
|
"""
@Author : Ailitonia
@Date : 2021/07/17 2:04
@FileName : config.py
@Project : nonebot2_miya
@Description :
@GitHub : https://github.com/Ailitonia
@Software : PyCharm
"""
from pydantic import BaseSettings
class Config(BaseSettings):
# 是否启用正则匹配matcher
# 如果 bot 配置了命令前缀, 但需要额外响应无前缀的 "签到" 等消息, 请将本选项设置为 True
# 如果 bot 没有配置命令前缀或空白前缀, 请将本选项设置为 False, 避免重复响应
enable_regex_matcher: bool = True
# 是否启用自动下载签到头图的定时任务
enable_pic_preparing_scheduler: bool = True
# 缓存的签到头图的数量限制
cache_pic_limit: int = 2000
# 相关数值显示命令
favorability_alias: str = '好感度'
energy_alias: str = '能量值'
currency_alias: str = '硬币'
# 能量值与好感度的兑换比例 公式为(能量值 * 兑换比 = 好感度)
ef_exchange_rate: float = 0.25
class Config:
extra = "ignore"
|
# Generated by Django 2.1.2 on 2018-11-02 15:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("data_refinery_common", "0005_auto_20181030_1955"),
]
operations = [
migrations.AddField(
model_name="surveyjob",
name="nomad_job_id",
field=models.CharField(max_length=256, null=True),
),
migrations.AddField(
model_name="surveyjob", name="ram_amount", field=models.IntegerField(default=256),
),
]
|
"""Sleight of Hand ported from https://twitter.com/MunroHoberman/status/1345134382810619913
"""
from pypico8 import (
Table,
add,
circ,
cos,
delete,
pico8_to_python,
print,
printh,
rectfill,
rnd,
run,
sin,
t,
)
printh(
pico8_to_python(
"""
d={}r,e=rectfill,rnd::_::for i=0,999do
circ(e(128),e(128),1)end
for z=1,52do
f=d[z]or add(d,{0,0,z})a=z/52+t()/4x=f[1]y=f[2]f[1]+=(cos(a)*55+60-x)/9f[2]+=(sin(a)*55+57-y)/9r(x,y,x+9,y+14,6)r(x,y,x+8,y+13,7)
?chr(128+f[3]),x+1,y+4,f[3]*8
end
add(d,del(d,e(d)),e(#d)+1)flip()goto _
"""
)
)
def _init():
global d, r, e
d = Table()
r, e = rectfill, rnd
def _update():
pass
def _draw():
for _ in range(0, 1000):
circ(e(128), e(128), 1)
for z in range(1, 53):
f = d[z] or add(d, Table([0, 0, z]))
a = z / 52 + t() / 4
x = f[1]
y = f[2]
f[1] += (cos(a) * 55 + 60 - x) / 9
f[2] += (sin(a) * 55 + 57 - y) / 9
r(x, y, x + 9, y + 14, 6)
r(x, y, x + 8, y + 13, 7)
print(chr(128 + f[3]), x + 1, y + 4, f[3] * 8)
add(d, delete(d, e(d)), e(len(d)) + 1)
run(_init, _update, _draw)
|
# coding: utf-8
# flake8: noqa
"""
Account API
The <b>Account API</b> gives sellers the ability to configure their eBay seller accounts, including the seller's policies (seller-defined custom policies and eBay business policies), opt in and out of eBay seller programs, configure sales tax tables, and get account information. <br><br>For details on the availability of the methods in this API, see <a href=\"/api-docs/sell/account/overview.html#requirements\">Account API requirements and restrictions</a>. # noqa: E501
OpenAPI spec version: v1.7.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from ...sell_account.models.amount import Amount
from ...sell_account.models.category_type import CategoryType
from ...sell_account.models.compact_custom_policy_response import CompactCustomPolicyResponse
from ...sell_account.models.custom_policy import CustomPolicy
from ...sell_account.models.custom_policy_create_request import CustomPolicyCreateRequest
from ...sell_account.models.custom_policy_request import CustomPolicyRequest
from ...sell_account.models.custom_policy_response import CustomPolicyResponse
from ...sell_account.models.deposit import Deposit
from ...sell_account.models.error import Error
from ...sell_account.models.error_parameter import ErrorParameter
from ...sell_account.models.fulfillment_policy import FulfillmentPolicy
from ...sell_account.models.fulfillment_policy_request import FulfillmentPolicyRequest
from ...sell_account.models.fulfillment_policy_response import FulfillmentPolicyResponse
from ...sell_account.models.international_return_override_type import InternationalReturnOverrideType
from ...sell_account.models.kyc_check import KycCheck
from ...sell_account.models.kyc_response import KycResponse
from ...sell_account.models.payment_method import PaymentMethod
from ...sell_account.models.payment_policy import PaymentPolicy
from ...sell_account.models.payment_policy_request import PaymentPolicyRequest
from ...sell_account.models.payment_policy_response import PaymentPolicyResponse
from ...sell_account.models.payments_program_onboarding_response import PaymentsProgramOnboardingResponse
from ...sell_account.models.payments_program_onboarding_steps import PaymentsProgramOnboardingSteps
from ...sell_account.models.payments_program_response import PaymentsProgramResponse
from ...sell_account.models.program import Program
from ...sell_account.models.programs import Programs
from ...sell_account.models.rate_table import RateTable
from ...sell_account.models.rate_table_response import RateTableResponse
from ...sell_account.models.recipient_account_reference import RecipientAccountReference
from ...sell_account.models.region import Region
from ...sell_account.models.region_set import RegionSet
from ...sell_account.models.return_policy import ReturnPolicy
from ...sell_account.models.return_policy_request import ReturnPolicyRequest
from ...sell_account.models.return_policy_response import ReturnPolicyResponse
from ...sell_account.models.sales_tax import SalesTax
from ...sell_account.models.sales_tax_base import SalesTaxBase
from ...sell_account.models.sales_taxes import SalesTaxes
from ...sell_account.models.selling_limit import SellingLimit
from ...sell_account.models.selling_privileges import SellingPrivileges
from ...sell_account.models.set_fulfillment_policy_response import SetFulfillmentPolicyResponse
from ...sell_account.models.set_payment_policy_response import SetPaymentPolicyResponse
from ...sell_account.models.set_return_policy_response import SetReturnPolicyResponse
from ...sell_account.models.shipping_option import ShippingOption
from ...sell_account.models.shipping_service import ShippingService
from ...sell_account.models.time_duration import TimeDuration
|
from multiprocessing import Pool
from collections import defaultdict
import gym
from el_agent import ELAgent
from frozen_lake_util import show_q_value
class CompareAgent(ELAgent):
def __init__(self, q_learning=True, epsilon=0.33):
self.q_learning = q_learning
super().__init__(epsilon)
def learn(self, env, episode_count=1000, gamma=0.9,
learning_rate=0.1, render=False, report_interval=50):
self.init_log()
self.Q = defaultdict(lambda: [0] * len(actions))
actions = list(range(env.action_space.n))
for e in range(episode_count):
s = env.reset()
done = False
a = self.policy(s, actions)
while not done:
if render:
env.render()
n_state, reward, done, info = env.step(a)
if done and reward == 0:
reward = -0.5 # Reward as penalty
n_action = self.policy(n_state, actions)
if self.q_learning:
gain = reward + gamma * max(self.Q[n_state])
else:
gain = reward + gamma * self.Q[n_state][n_action]
estimated = self.Q[s][a]
self.Q[s][a] += learning_rate * (gain - estimated)
s = n_state
if self.q_learning:
a = self.policy(s, actions)
else:
a = n_action
else:
self.log(reward)
if e != 0 and e % report_interval == 0:
self.show_reward_log(episode=e)
def train(q_learning):
env = gym.make("FrozenLakeEasy-v0")
agent = CompareAgent(q_learning=q_learning)
agent.learn(env, episode_count=3000)
return dict(agent.Q)
if __name__ == "__main__":
with Pool() as pool:
results = pool.map(train, ([True, False]))
for r in results:
show_q_value(r)
|
import socket
from smtplib import *
from configuration import *
debug = False
verbose = True
version = "1.0.0"
key_sender = 'sender'
key_subject = 'subject'
key_username = 'username'
key_password = 'password'
key_receivers = 'receivers'
key_smtp_server = 'smtp_server'
key_smtp_server_port = 'smtp_server_port'
param_configuration_names = '--configuration'
def log(what):
if verbose:
print what
def notify(content, configuration_names):
if configuration_names:
for configuration_name in configuration_names:
if debug:
log("Using '" + configuration_name + "' configuration")
if configuration_name in configurations:
configuration = configurations[configuration_name]
notify_with_configuration(content, configuration)
else:
log("There is no configuration with the name: '" + configuration_name + "'")
else:
if debug:
log("Using all configurations.")
for configuration in configurations:
notify_with_configuration(content, configurations[configuration])
def notify_with_configuration(content, configuration):
receivers_str = ""
for receiver_str in configuration[key_receivers]:
if configuration[key_receivers].index(receiver_str) > 0:
receivers_str += ", " + receiver_str
else:
receivers_str += receiver_str
message = """From: %s
To: %s
Subject: %s
%s
""" % (configuration[key_sender], receivers_str, configuration[key_subject], content)
if debug:
log("We will send the following message:\n" + message)
try:
server = SMTP(configuration[key_smtp_server], configuration[key_smtp_server_port], timeout=30)
if key_username in configuration:
username = configuration[key_username]
log("Logging in user: " + username)
password = ""
if key_password in configuration:
password = configuration[key_password]
server.login(username, password)
receivers = configuration[key_receivers]
log("Sending mail to: " + str(receivers))
server.sendmail(configuration[key_sender], receivers, message)
log("Shutting down connection.")
server.quit()
return True
except (SMTPHeloError, SMTPAuthenticationError, SMTPAuthenticationError, SMTPException,
SMTPRecipientsRefused, SMTPSenderRefused, SMTPDataError, socket.timeout) as e:
log("Error: " + str(e))
pass
return False
|
# Objective: check whether win conditions can be achieved in one step
def check_horizontale(grille, x, y):
"""Alignements horizontaux"""
symbole = grille.grid[y][x]
# Alignement horizontal de trois jetons consécutifs, le noeud (x,y) étant le plus à droite
if grille.is_far_from_left(x):
if all(symbole == grille.grid[y][x - i - 1] for i in range(2)):
my_play = grille.play_if_possible(x - 3, y)
if my_play is not None:
return my_play
# Alignements horizontaux, le noeud (x,y) étant le plus à gauche
if grille.is_far_from_right(x):
# Alignement horizontal de trois jetons consécutifs
if all(symbole == grille.grid[y][x + i + 1] for i in range(2)):
my_play = grille.play_if_possible(x + 3, y)
if my_play is not None:
return my_play
# Alignement horizontal de trois jetons non consécutifs
if symbole == grille.grid[y][x + 3]:
# Alignement horizontal de la forme X.XX
if symbole == grille.grid[y][x + 2]:
my_play = grille.play_if_possible(x + 1, y)
if my_play is not None:
return my_play
# Alignement horizontal de la forme XX.X
if symbole == grille.grid[y][x + 1]:
my_play = grille.play_if_possible(x + 2, y)
if my_play is not None:
return my_play
return None
def check_verticale(grille, x, y):
"""Alignement vertical"""
symbole = grille.grid[y][x]
# Alignement vertical de trois jetons consécutifs, le noeud (x,y) étant le plus haut
if grille.is_quite_far_from_bottom(y) and not (grille.is_at_top(y)):
if all(symbole == grille.grid[y + i + 1][x] for i in range(2)):
my_play = grille.play_if_possible(x, y - 1)
if my_play is not None:
return my_play
return None
def check_oblique_montante(grille, x, y):
"""Alignements diagonaux montants (/) : allant du coin bas gauche au coin haut droit"""
symbole = grille.grid[y][x]
# Alignement diagonal montant de la forme XXX., le noeud (x,y) étant le plus bas et à gauche
if grille.is_far_from_top(y) and grille.is_far_from_right(x):
if all(symbole == grille.grid[y - i - 1][x + i + 1] for i in range(2)):
my_play = grille.play_if_possible(x + 3, y - 2)
if my_play is not None:
return my_play
# Alignements diagonaux montants, le noeud (x,y) étant le plus haut et à droite
if grille.is_far_from_bottom(y) and grille.is_far_from_left(x):
# Alignement diagonal de la forme .XXX
if all(symbole == grille.grid[y + i + 1][x - i - 1] for i in range(2)):
if grille.is_very_far_from_bottom(y):
my_play = grille.play_if_possible(x - 3, y + 3)
if my_play is not None:
return my_play
if symbole == grille.grid[y + 3][x - 3]:
# Alignement diagonal de la forme X.XX
if symbole == grille.grid[y + 2][x - 2]:
my_play = grille.play_if_possible(x - 1, y + 1)
if my_play is not None:
return my_play
# Alignement diagonal de la forme XX.X
if symbole == grille.grid[y + 1][x - 1]:
my_play = grille.play_if_possible(x - 2, y + 2)
if my_play is not None:
return my_play
return None
def check_oblique_descendante(grille, x, y):
"""Alignements diagonaux descendants (\) : allant du coin haut gauche au coin bas droit"""
symbole = grille.grid[y][x]
# Alignement diagonal descendant de la forme .XXX, le noeud (x,y) étant le plus bas et à droite
if grille.is_far_from_top(y) and grille.is_far_from_left(x):
if all(symbole == grille.grid[y - i - 1][x - i - 1] for i in range(2)):
my_play = grille.play_if_possible(x - 2, y - 3)
if my_play is not None:
return my_play
# Alignements diagonaux descendants, le noeud (x,y) étant le plus haut et à gauche
if grille.is_far_from_bottom(y) and grille.is_far_from_right(x):
# Alignement diagonal de la forme XXX.
if all(symbole == grille.grid[y + i + 1][x + i + 1] for i in range(2)):
if grille.is_very_far_from_bottom(y):
my_play = grille.play_if_possible(x + 3, y + 3)
if my_play is not None:
return my_play
if symbole == grille.grid[y + 3][x + 3]:
# Alignement diagonal de la forme X.XX
if symbole == grille.grid[y + 2][x + 2]:
my_play = grille.play_if_possible(x + 1, y + 1)
if my_play is not None:
return my_play
# Alignement diagonal de la forme XX.X
if symbole == grille.grid[y + 1][x + 1]:
my_play = grille.play_if_possible(x + 2, y + 2)
if my_play is not None:
return my_play
return None
def look_for_obvious_steps(grille, player_symbol='X', opponent_symbol='O'):
"""Vérifier s'il est possible de gagner pour l'un ou l'autre joueur.
Rechercher un coup qui permette (d'abord) ou empêche (ensuite) un alignement de quatre jetons.
Si oui, renvoyer le numéro de la colonne à jouer, sinon None"""
# Check the player symbol first, so that a winning step is preferred to a step to avoid a loss
for checked_symbol in [player_symbol, opponent_symbol]:
for y in range(len(grille.grid)):
for x in range(len(grille.grid[y])):
if grille.grid[y][x] == checked_symbol:
my_play = check_horizontale(grille, x, y)
if my_play is not None:
return my_play
my_play = check_verticale(grille, x, y)
if my_play is not None:
return my_play
my_play = check_oblique_montante(grille, x, y)
if my_play is not None:
return my_play
my_play = check_oblique_descendante(grille, x, y)
if my_play is not None:
return my_play
# Aucun coup urgent, alors renvoyer -1
return None
|
user='shalom@somewhere.net'
password='secretsecret'
base_url = 'http://cdn.mydomain.net'
platform = 'production'
lambda_region = 'us-east-1'
sourcebucket = 'mybucket'
# not used yet
sdb_region = 'us-east-1'
sdb_domain = 'purge-akamai'
sdb_item='bucket-map'
|
# -*- coding: utf-8 -*-
from marshmallow import ValidationError
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPBadRequest
from amnesia.modules.content.validation import IdListSchema
from amnesia.modules.content import SessionResource
def includeme(config):
''' Pyramid includeme '''
config.scan(__name__)
@view_config(name='scopy', request_method='POST',
renderer='json', context=SessionResource,
permission='copy')
def copy_oids_to_session(context, request):
''' Copy oids to session '''
try:
result = IdListSchema().load(request.POST.mixed())
except ValidationError as error:
raise HTTPBadRequest(error.messages)
oids = context.copy_oids(result['oid'])
return {'oids': oids}
@view_config(name='sremove', request_method='POST',
renderer='json', context=SessionResource,
permission='delete')
def remove_oids_from_session(context, request):
''' Clear the session oids '''
removed = context.clear_oids()
return {'removed': removed}
|
graph = [
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, "cat", 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, "bone", 0],
[0, 0, 0, 0, 0, 0, 0]
]
class Node:
# 表示从cat方块到当前方框的距离
G = 0
# 当前方块到目标点(我们把它称为点B,代表骨头!)的移动量估算值
H = 0
# G = G+H 表示路径增量
F = 0
def __init__(self,x,y):
self.x = x
self.y = y
cat_x = 1
cat_y = 3
bone_x = 4
bone_y = 5
# 一个记录下所有被考虑来寻找最短路径的方块(称为open列表)
open = []
# 保存已确定点的横纵坐标
closed = []
width= len(graph)-1
height= len(graph[0])-1
for i in range(len(graph)):
for j in range(len(graph[i])):
if graph[i][j] == "cat":
cat_x, cat_y = i, j
if graph[i][j] == "bone":
bone_x, bone_y = i, j
# print((cat_x,cat_y),(bone_x,bone_y))
# 判断一个方格是否符合条件
def is_good(x,y):
if x >= 0 and x <= width and y>=0 and y <= height:
if graph[x][y] != 1 and (x,y) not in closed:
return True
else:
return False
else:
return False
# 获取当前方块到"bone"方块的距离
def get_instance(x,y):
return abs(bone_x-x)+abs(y-bone_y)
# 获取具有最小F值的格子
def get_node_with_MinFValue(node_list):
temp_set = set()
for node in node_list:
temp_set.add(node.F)
if len(temp_set) == 1:
print("F值相等,选择最后一个")
return node_list[-1]
else:
sorted_node_list = sorted(node_list,key = lambda node:node.F)
print("F值不相等,选择第一个")
print(waited_nodes(sorted_node_list))
return sorted_node_list[0]
# 生成可行的node候选集
def get_proper_nodes(current_node):
temp = []
if is_good(current_node.x,current_node.y-1):
temp.append((current_node.x,current_node.y-1))
if is_good(current_node.x,current_node.y+1):
temp.append((current_node.x,current_node.y+1))
if is_good(current_node.x+1,current_node.y):
temp.append((current_node.x+1,current_node.y))
if is_good(current_node.x-1,current_node.y):
temp.append((current_node.x-1,current_node.y))
return temp
# 获取node对象列表中的坐标值列表
def waited_nodes(open):
node_list = []
for node in open:
node_list.append((node.x,node.y))
return node_list
node_cat = Node(cat_x,cat_y)
node_cat.G = 0
node_cat.H = get_instance(cat_x,cat_y)
node_cat.F = node_cat.G + node_cat.H
open.append(node_cat)
while True:
print("候选点:",waited_nodes(open))
current_node = get_node_with_MinFValue(open)
print("选择的下一个点:",(current_node.x,current_node.y))
print("\n")
closed.append((current_node.x,current_node.y))
open.remove(current_node)
if (bone_x,bone_y) in closed: # 已找到路径
break
proper_node = get_proper_nodes(current_node)
if len(proper_node) > 0:
for (x,y) in proper_node:
temp_node = Node(x,y)
temp_node.G = current_node.G + 1
temp_node.H = get_instance(x,y)
print("({},{}).G:{}".format(x,y,temp_node.G))
print("({},{}).H:{}".format(x,y,temp_node.H))
temp_node.F = temp_node.G + temp_node.H
if temp_node not in open:
open.append(temp_node)
# if its already in the open list
# test if using the current G score make the aSquare F score lower,
# if yes update the parent because it means its a better path
else:
print("已在open中的node:",(temp_node.x,temp_node.y))
# if temp_node.G<
if len(open) == 0: # Continue until there is no more available square in the open list (which means there is no path)
break
print("closed表:",closed)
closed_copy = closed[:]
# for i in range(len(closed_copy)-1):
# if (abs(closed_copy[i][0]-closed_copy[i+1][0])+abs(closed_copy[i][1]-closed_copy[i+1][1]))>1:
# closed.remove((closed_copy[i+1][0],closed_copy[i+1][1]))
# 倒序删除,解决长度变化问题
for i in range(len(closed_copy)-1,-1,-1):
if (abs(closed_copy[i][0]-closed_copy[i-1][0])+abs(closed_copy[i][1]-closed_copy[i-1][1]))>1 and i>0:
print(closed_copy[i-1])
closed_copy.remove(closed_copy[i-1])
print("最短路径表:",closed_copy)
print("最短路径长度:",len(closed_copy))
|
from __future__ import unicode_literals
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
|
from random import random
import theme_utils
import input_utils
def pick_theme(themes):
sum_of_ratings = sum([theme['rating'] for theme in themes])
random_number = random() * sum_of_ratings
chosen_theme_number = 0
while themes[chosen_theme_number]['rating'] < random_number:
random_number -= themes[chosen_theme_number]['rating']
chosen_theme_number += 1
return themes[chosen_theme_number]
batch = [{'theme': theme_utils.generate_random_theme()} for _ in range(5)]
while True:
print('\nStarting round...\n')
for theme in batch:
theme_utils.preview_theme(theme['theme'])
theme['rating'] = float(input_utils.get_input(
'Please rate theme: ', lambda x: input_utils.is_int(x) and int(x) >= 0))
new_batch = []
for theme in batch:
new_batch.append({'theme': theme_utils.mutate_theme(
theme_utils.theme_from_parents(*[pick_theme(batch)['theme'] for _ in range(2)]))})
batch = new_batch
|
import os
import sys
from setuptools import setup, find_packages
if sys.version_info[0] < 3:
with open('README.rst') as f:
long_description = f.read()
else:
with open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name='piotr',
version='1.0.2',
description='Piotr is an instrumentation tool for qemu-system-arm able to emulate ARM-based embedded devices.',
long_description=long_description,
url='https://github.com/virtualabs/piotr',
author='virtualabs',
author_email='virtualabs@gmail.com',
packages=find_packages('src'),
package_dir={"":"src"},
package_data = {
'piotr':[
'data/*'
]
},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux'
],
entry_points = {
'console_scripts': [
'piotr=piotr:main',
'piotr-shell=piotr.shell:guest_shell',
'piotr-ps=piotr.shell:host_ps',
'piotr-debug=piotr.shell:debug_process'
],
},
install_requires = [
'blessings',
'psutil',
'pyyaml'
],
python_requires='>=3.5',
test_suite='tests'
)
|
# Copyright (c) 2016, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
"""Build performance test base classes and functionality"""
import json
import logging
import os
import re
import resource
import socket
import shutil
import time
import unittest
import xml.etree.ElementTree as ET
from collections import OrderedDict
from datetime import datetime, timedelta
from functools import partial
from multiprocessing import Process
from multiprocessing import SimpleQueue
from xml.dom import minidom
import oe.path
from oeqa.utils.commands import CommandError, runCmd, get_bb_vars
from oeqa.utils.git import GitError, GitRepo
# Get logger for this module
log = logging.getLogger('build-perf')
# Our own version of runCmd which does not raise AssertErrors which would cause
# errors to interpreted as failures
runCmd2 = partial(runCmd, assert_error=False, limit_exc_output=40)
class KernelDropCaches(object):
"""Container of the functions for dropping kernel caches"""
sudo_passwd = None
@classmethod
def check(cls):
"""Check permssions for dropping kernel caches"""
from getpass import getpass
from locale import getdefaultlocale
cmd = ['sudo', '-k', '-n', 'tee', '/proc/sys/vm/drop_caches']
ret = runCmd2(cmd, ignore_status=True, data=b'0')
if ret.output.startswith('sudo:'):
pass_str = getpass(
"\nThe script requires sudo access to drop caches between "
"builds (echo 3 > /proc/sys/vm/drop_caches).\n"
"Please enter your sudo password: ")
cls.sudo_passwd = bytes(pass_str, getdefaultlocale()[1])
@classmethod
def drop(cls):
"""Drop kernel caches"""
cmd = ['sudo', '-k']
if cls.sudo_passwd:
cmd.append('-S')
input_data = cls.sudo_passwd + b'\n'
else:
cmd.append('-n')
input_data = b''
cmd += ['tee', '/proc/sys/vm/drop_caches']
input_data += b'3'
runCmd2(cmd, data=input_data)
def str_to_fn(string):
"""Convert string to a sanitized filename"""
return re.sub(r'(\W+)', '-', string, flags=re.LOCALE)
class ResultsJsonEncoder(json.JSONEncoder):
"""Extended encoder for build perf test results"""
unix_epoch = datetime.utcfromtimestamp(0)
def default(self, obj):
"""Encoder for our types"""
if isinstance(obj, datetime):
# NOTE: we assume that all timestamps are in UTC time
return (obj - self.unix_epoch).total_seconds()
if isinstance(obj, timedelta):
return obj.total_seconds()
return json.JSONEncoder.default(self, obj)
class BuildPerfTestResult(unittest.TextTestResult):
"""Runner class for executing the individual tests"""
# List of test cases to run
test_run_queue = []
def __init__(self, out_dir, *args, **kwargs):
super(BuildPerfTestResult, self).__init__(*args, **kwargs)
self.out_dir = out_dir
self.hostname = socket.gethostname()
self.product = os.getenv('OE_BUILDPERFTEST_PRODUCT', 'oe-core')
self.start_time = self.elapsed_time = None
self.successes = []
def addSuccess(self, test):
"""Record results from successful tests"""
super(BuildPerfTestResult, self).addSuccess(test)
self.successes.append(test)
def addError(self, test, err):
"""Record results from crashed test"""
test.err = err
super(BuildPerfTestResult, self).addError(test, err)
def addFailure(self, test, err):
"""Record results from failed test"""
test.err = err
super(BuildPerfTestResult, self).addFailure(test, err)
def addExpectedFailure(self, test, err):
"""Record results from expectedly failed test"""
test.err = err
super(BuildPerfTestResult, self).addExpectedFailure(test, err)
def startTest(self, test):
"""Pre-test hook"""
test.base_dir = self.out_dir
log.info("Executing test %s: %s", test.name, test.shortDescription())
self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] "))
super(BuildPerfTestResult, self).startTest(test)
def startTestRun(self):
"""Pre-run hook"""
self.start_time = datetime.utcnow()
def stopTestRun(self):
"""Pre-run hook"""
self.elapsed_time = datetime.utcnow() - self.start_time
def all_results(self):
compound = [('SUCCESS', t, None) for t in self.successes] + \
[('FAILURE', t, m) for t, m in self.failures] + \
[('ERROR', t, m) for t, m in self.errors] + \
[('EXPECTED_FAILURE', t, m) for t, m in self.expectedFailures] + \
[('UNEXPECTED_SUCCESS', t, None) for t in self.unexpectedSuccesses] + \
[('SKIPPED', t, m) for t, m in self.skipped]
return sorted(compound, key=lambda info: info[1].start_time)
def write_buildstats_json(self):
"""Write buildstats file"""
buildstats = OrderedDict()
for _, test, _ in self.all_results():
for key, val in test.buildstats.items():
buildstats[test.name + '.' + key] = val
with open(os.path.join(self.out_dir, 'buildstats.json'), 'w') as fobj:
json.dump(buildstats, fobj, cls=ResultsJsonEncoder)
def write_results_json(self):
"""Write test results into a json-formatted file"""
results = OrderedDict([('tester_host', self.hostname),
('start_time', self.start_time),
('elapsed_time', self.elapsed_time),
('tests', OrderedDict())])
for status, test, reason in self.all_results():
test_result = OrderedDict([('name', test.name),
('description', test.shortDescription()),
('status', status),
('start_time', test.start_time),
('elapsed_time', test.elapsed_time),
('measurements', test.measurements)])
if status in ('ERROR', 'FAILURE', 'EXPECTED_FAILURE'):
test_result['message'] = str(test.err[1])
test_result['err_type'] = test.err[0].__name__
test_result['err_output'] = reason
elif reason:
test_result['message'] = reason
results['tests'][test.name] = test_result
with open(os.path.join(self.out_dir, 'results.json'), 'w') as fobj:
json.dump(results, fobj, indent=4,
cls=ResultsJsonEncoder)
def write_results_xml(self):
"""Write test results into a JUnit XML file"""
top = ET.Element('testsuites')
suite = ET.SubElement(top, 'testsuite')
suite.set('name', 'oeqa.buildperf')
suite.set('timestamp', self.start_time.isoformat())
suite.set('time', str(self.elapsed_time.total_seconds()))
suite.set('hostname', self.hostname)
suite.set('failures', str(len(self.failures) + len(self.expectedFailures)))
suite.set('errors', str(len(self.errors)))
suite.set('skipped', str(len(self.skipped)))
test_cnt = 0
for status, test, reason in self.all_results():
test_cnt += 1
testcase = ET.SubElement(suite, 'testcase')
testcase.set('classname', test.__module__ + '.' + test.__class__.__name__)
testcase.set('name', test.name)
testcase.set('description', test.shortDescription())
testcase.set('timestamp', test.start_time.isoformat())
testcase.set('time', str(test.elapsed_time.total_seconds()))
if status in ('ERROR', 'FAILURE', 'EXP_FAILURE'):
if status in ('FAILURE', 'EXP_FAILURE'):
result = ET.SubElement(testcase, 'failure')
else:
result = ET.SubElement(testcase, 'error')
result.set('message', str(test.err[1]))
result.set('type', test.err[0].__name__)
result.text = reason
elif status == 'SKIPPED':
result = ET.SubElement(testcase, 'skipped')
result.text = reason
elif status not in ('SUCCESS', 'UNEXPECTED_SUCCESS'):
raise TypeError("BUG: invalid test status '%s'" % status)
for data in test.measurements.values():
measurement = ET.SubElement(testcase, data['type'])
measurement.set('name', data['name'])
measurement.set('legend', data['legend'])
vals = data['values']
if data['type'] == BuildPerfTestCase.SYSRES:
ET.SubElement(measurement, 'time',
timestamp=vals['start_time'].isoformat()).text = \
str(vals['elapsed_time'].total_seconds())
attrib = dict((k, str(v)) for k, v in vals['iostat'].items())
ET.SubElement(measurement, 'iostat', attrib=attrib)
attrib = dict((k, str(v)) for k, v in vals['rusage'].items())
ET.SubElement(measurement, 'rusage', attrib=attrib)
elif data['type'] == BuildPerfTestCase.DISKUSAGE:
ET.SubElement(measurement, 'size').text = str(vals['size'])
else:
raise TypeError('BUG: unsupported measurement type')
suite.set('tests', str(test_cnt))
# Use minidom for pretty-printing
dom_doc = minidom.parseString(ET.tostring(top, 'utf-8'))
with open(os.path.join(self.out_dir, 'results.xml'), 'w') as fobj:
dom_doc.writexml(fobj, addindent=' ', newl='\n', encoding='utf-8')
class BuildPerfTestCase(unittest.TestCase):
"""Base class for build performance tests"""
SYSRES = 'sysres'
DISKUSAGE = 'diskusage'
build_target = None
def __init__(self, *args, **kwargs):
super(BuildPerfTestCase, self).__init__(*args, **kwargs)
self.name = self._testMethodName
self.base_dir = None
self.start_time = None
self.elapsed_time = None
self.measurements = OrderedDict()
self.buildstats = OrderedDict()
# self.err is supposed to be a tuple from sys.exc_info()
self.err = None
self.bb_vars = get_bb_vars()
# TODO: remove 'times' and 'sizes' arrays when globalres support is
# removed
self.times = []
self.sizes = []
@property
def tmp_dir(self):
return os.path.join(self.base_dir, self.name + '.tmp')
def shortDescription(self):
return super(BuildPerfTestCase, self).shortDescription() or ""
def setUp(self):
"""Set-up fixture for each test"""
if not os.path.isdir(self.tmp_dir):
os.mkdir(self.tmp_dir)
if self.build_target:
self.run_cmd(['bitbake', self.build_target, '--runall=fetch'])
def tearDown(self):
"""Tear-down fixture for each test"""
if os.path.isdir(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
def run(self, *args, **kwargs):
"""Run test"""
self.start_time = datetime.now()
super(BuildPerfTestCase, self).run(*args, **kwargs)
self.elapsed_time = datetime.now() - self.start_time
def run_cmd(self, cmd):
"""Convenience method for running a command"""
cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
log.info("Logging command: %s", cmd_str)
try:
runCmd2(cmd)
except CommandError as err:
log.error("Command failed: %s", err.retcode)
raise
def _append_measurement(self, measurement):
"""Simple helper for adding measurements results"""
if measurement['name'] in self.measurements:
raise ValueError('BUG: two measurements with the same name in {}'.format(
self.__class__.__name__))
self.measurements[measurement['name']] = measurement
def measure_cmd_resources(self, cmd, name, legend, save_bs=False):
"""Measure system resource usage of a command"""
def _worker(data_q, cmd, **kwargs):
"""Worker process for measuring resources"""
try:
start_time = datetime.now()
ret = runCmd2(cmd, **kwargs)
etime = datetime.now() - start_time
rusage_struct = resource.getrusage(resource.RUSAGE_CHILDREN)
iostat = OrderedDict()
with open('/proc/{}/io'.format(os.getpid())) as fobj:
for line in fobj.readlines():
key, val = line.split(':')
iostat[key] = int(val)
rusage = OrderedDict()
# Skip unused fields, (i.e. 'ru_ixrss', 'ru_idrss', 'ru_isrss',
# 'ru_nswap', 'ru_msgsnd', 'ru_msgrcv' and 'ru_nsignals')
for key in ['ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
'ru_majflt', 'ru_inblock', 'ru_oublock',
'ru_nvcsw', 'ru_nivcsw']:
rusage[key] = getattr(rusage_struct, key)
data_q.put({'ret': ret,
'start_time': start_time,
'elapsed_time': etime,
'rusage': rusage,
'iostat': iostat})
except Exception as err:
data_q.put(err)
cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
log.info("Timing command: %s", cmd_str)
data_q = SimpleQueue()
try:
proc = Process(target=_worker, args=(data_q, cmd,))
proc.start()
data = data_q.get()
proc.join()
if isinstance(data, Exception):
raise data
except CommandError:
log.error("Command '%s' failed", cmd_str)
raise
etime = data['elapsed_time']
measurement = OrderedDict([('type', self.SYSRES),
('name', name),
('legend', legend)])
measurement['values'] = OrderedDict([('start_time', data['start_time']),
('elapsed_time', etime),
('rusage', data['rusage']),
('iostat', data['iostat'])])
if save_bs:
self.save_buildstats(name)
self._append_measurement(measurement)
# Append to 'times' array for globalres log
e_sec = etime.total_seconds()
self.times.append('{:d}:{:02d}:{:05.2f}'.format(int(e_sec / 3600),
int((e_sec % 3600) / 60),
e_sec % 60))
def measure_disk_usage(self, path, name, legend, apparent_size=False):
"""Estimate disk usage of a file or directory"""
cmd = ['du', '-s', '--block-size', '1024']
if apparent_size:
cmd.append('--apparent-size')
cmd.append(path)
ret = runCmd2(cmd)
size = int(ret.output.split()[0])
log.debug("Size of %s path is %s", path, size)
measurement = OrderedDict([('type', self.DISKUSAGE),
('name', name),
('legend', legend)])
measurement['values'] = OrderedDict([('size', size)])
self._append_measurement(measurement)
# Append to 'sizes' array for globalres log
self.sizes.append(str(size))
def save_buildstats(self, measurement_name):
"""Save buildstats"""
def split_nevr(nevr):
"""Split name and version information from recipe "nevr" string"""
n_e_v, revision = nevr.rsplit('-', 1)
match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
n_e_v)
if not match:
# If we're not able to parse a version starting with a number, just
# take the part after last dash
match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
n_e_v)
name = match.group('name')
version = match.group('version')
epoch = match.group('epoch')
return name, epoch, version, revision
def bs_to_json(filename):
"""Convert (task) buildstats file into json format"""
bs_json = OrderedDict()
iostat = OrderedDict()
rusage = OrderedDict()
with open(filename) as fobj:
for line in fobj.readlines():
key, val = line.split(':', 1)
val = val.strip()
if key == 'Started':
start_time = datetime.utcfromtimestamp(float(val))
bs_json['start_time'] = start_time
elif key == 'Ended':
end_time = datetime.utcfromtimestamp(float(val))
elif key.startswith('IO '):
split = key.split()
iostat[split[1]] = int(val)
elif key.find('rusage') >= 0:
split = key.split()
ru_key = split[-1]
if ru_key in ('ru_stime', 'ru_utime'):
val = float(val)
else:
val = int(val)
rusage[ru_key] = rusage.get(ru_key, 0) + val
elif key == 'Status':
bs_json['status'] = val
bs_json['elapsed_time'] = end_time - start_time
bs_json['rusage'] = rusage
bs_json['iostat'] = iostat
return bs_json
log.info('Saving buildstats in JSON format')
bs_dirs = sorted(os.listdir(self.bb_vars['BUILDSTATS_BASE']))
if len(bs_dirs) > 1:
log.warning("Multiple buildstats found for test %s, only "
"archiving the last one", self.name)
bs_dir = os.path.join(self.bb_vars['BUILDSTATS_BASE'], bs_dirs[-1])
buildstats = []
for fname in os.listdir(bs_dir):
recipe_dir = os.path.join(bs_dir, fname)
if not os.path.isdir(recipe_dir):
continue
name, epoch, version, revision = split_nevr(fname)
recipe_bs = OrderedDict((('name', name),
('epoch', epoch),
('version', version),
('revision', revision),
('tasks', OrderedDict())))
for task in os.listdir(recipe_dir):
recipe_bs['tasks'][task] = bs_to_json(os.path.join(recipe_dir,
task))
buildstats.append(recipe_bs)
self.buildstats[measurement_name] = buildstats
def rm_tmp(self):
"""Cleanup temporary/intermediate files and directories"""
log.debug("Removing temporary and cache files")
for name in ['bitbake.lock', 'conf/sanity_info',
self.bb_vars['TMPDIR']]:
oe.path.remove(name, recurse=True)
def rm_sstate(self):
"""Remove sstate directory"""
log.debug("Removing sstate-cache")
oe.path.remove(self.bb_vars['SSTATE_DIR'], recurse=True)
def rm_cache(self):
"""Drop bitbake caches"""
oe.path.remove(self.bb_vars['PERSISTENT_DIR'], recurse=True)
@staticmethod
def sync():
"""Sync and drop kernel caches"""
runCmd2('bitbake -m', ignore_status=True)
log.debug("Syncing and dropping kernel caches""")
KernelDropCaches.drop()
os.sync()
# Wait a bit for all the dirty blocks to be written onto disk
time.sleep(3)
class BuildPerfTestLoader(unittest.TestLoader):
"""Test loader for build performance tests"""
sortTestMethodsUsing = None
class BuildPerfTestRunner(unittest.TextTestRunner):
"""Test loader for build performance tests"""
sortTestMethodsUsing = None
def __init__(self, out_dir, *args, **kwargs):
super(BuildPerfTestRunner, self).__init__(*args, **kwargs)
self.out_dir = out_dir
def _makeResult(self):
return BuildPerfTestResult(self.out_dir, self.stream, self.descriptions,
self.verbosity)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
from .experiment import Datapoint
def _build_columns_list(datapoints: tp.List[Datapoint]) -> tp.List[str]:
columns: tp.Set[str] = set()
for dp in datapoints:
columns = columns.union(dp.values.keys())
for reserved_col in ['uid', 'from_uid']:
try:
columns.remove(reserved_col)
except KeyError:
pass
return list(columns)
def compress(datapoints: tp.List[Datapoint]) -> tp.Dict[str, tp.Any]:
columns = _build_columns_list(datapoints)
rows: tp.List[tp.Any] = []
for dp in datapoints:
d: tp.List[tp.Any] = [dp.uid, dp.from_uid]
for c in columns:
d.append(dp.values.get(c))
rows.append(d)
return {
"columns": columns,
"rows": rows
}
|
"""
bing.py
Copyright 2006 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import urllib
import re
from w3af.core.data.search_engines.search_engine import SearchEngine
from w3af.core.data.parsers.doc.url import URL
class bing(SearchEngine):
"""
This class is a wrapper for doing bing searches. It allows the user to use
GET requests to search bing.com.
:author: Andres Riancho (andres.riancho@gmail.com)
"""
BLACKLISTED_DOMAINS = {'cc.bingj.com',
'www.microsofttranslator.com',
'onlinehelp.microsoft.com',
'go.microsoft.com',
'msn.com'}
def __init__(self, urlOpener):
SearchEngine.__init__(self)
self._uri_opener = urlOpener
def search(self, query, start, count=10):
"""
Search the web with Bing.
This method is based from the msn.py file from the massive enumeration
toolset, coded by pdp and released under GPL v2.
"""
url = 'http://www.bing.com/search?'
query = urllib.urlencode({'q': query,
'first': start + 1,
'FORM': 'PERE'})
url_instance = URL(url + query)
response = self._uri_opener.GET(url_instance, headers=self._headers,
cache=True, grep=False,
follow_redirects=True)
# This regex might become outdated, but the good thing is that we have
# test_bing.py which is going to fail and tell us that it's outdated
re_match = re.findall('<a href="((http|https)(.*?))" h="ID=SERP,',
response.get_body())
results = set()
for url, _, _ in re_match:
try:
url = URL(url)
except ValueError:
pass
else:
# Test for full match.
if url.get_domain() not in self.BLACKLISTED_DOMAINS:
# Now test for partial match
for blacklisted_domain in self.BLACKLISTED_DOMAINS:
if blacklisted_domain in url.get_domain():
# ignore this domain.
break
else:
bing_result = BingResult(url)
results.add(bing_result)
return results
class BingResult(object):
"""
Dummy class that represents the search result.
"""
def __init__(self, url):
if not isinstance(url, URL):
msg = ('The url __init__ parameter of a BingResult object must'
' be of url.URL type.')
raise TypeError(msg)
self.URL = url
def __repr__(self):
return '<bing result %s>' % self.URL
def __eq__(self, other):
return self.URL == other.URL
def __hash__(self):
return hash(self.URL)
|
# --- coding: utf-8 ---
import sqlite3
from ipaddress import ip_address, ip_network
from settings import snmp_user, snmp_password
from pprint import pprint
from pysnmp.entity.rfc3413.oneliner import cmdgen, mibvar
from pysnmp.proto.rfc1902 import OctetString
from check import check_ip
import paramiko
from pprint import pprint
class Mac(OctetString):
def prettyPrint(self):
res = ''
arr = bytearray(self._value)
for i in range(len(arr)):
res += '%02X'%arr[i]
if i != 5: res += ':'
return res
db_path = '/opt/modgud/3.0.0/etc/modgud/configuration.sqlite3'
db_query = 'select network, ip from bundles left join ranges on bundles.id=ranges.bundle_id where ranges.id >= 9000;'
snmp_oid_prefix = [
'1.3.6.1.2.1.3.1.1.2.16.1.',
'1.3.6.1.2.1.3.1.1.2.15.1.',
'1.3.6.1.2.1.3.1.1.2.14.1.',
'1.3.6.1.2.1.3.1.1.2.13.1.',
'1.3.6.1.2.1.3.1.1.2.12.1.',
'1.3.6.1.2.1.3.1.1.2.11.1.'
]
snmp_command = '/usr/bin/snmpget'
def get_gateway(ip):
print 'get_gateway:'
print ip
if not ip or not check_ip(ip):
return None
address = ip_address(ip.decode('latin-1'))
query = db_query
res = None
try:
con = sqlite3.connect(db_path)
cur = con.cursor()
cur.execute(query)
res = cur.fetchall()
con.close()
except:
pass
gateway = None
if res:
for net, gw in res:
if address in ip_network(net).hosts():
gateway = gw
break
return gateway
def snmp_get_mac(gw, ip):
print 'snmp_get_mac:'
print gw + ' -- ' + ip
mac = None
for prefix in snmp_oid_prefix:
pprint( (prefix + ip).encode('latin-1'))
try:
errorIndication, errorStatus, errorIndex, varBind = cmdgen.CommandGenerator().getCmd(
cmdgen.UsmUserData(snmp_user, snmp_password,
authProtocol=cmdgen.usmHMACMD5AuthProtocol,
privProtocol=cmdgen.usmDESPrivProtocol),
cmdgen.UdpTransportTarget((gw, 161)),
mibvar.MibVariable((prefix + ip).encode('latin-1'))
)
if not (errorIndication or errorStatus):
(var, val) = varBind[0]
mac = Mac(val).prettyPrint()
if mac:
break
except Exception as e:
print 'snmp exception '
print e
pass
return mac
def get_mac(ip):
mac = '00:00:00:00:00:00'
gw = get_gateway(ip)
if gw:
m = snmp_get_mac(gw, ip)
if m:
mac = m
return mac
if __name__ == '__main__':
print get_mac('10.101.0.222')
|
from .openapi import (
JSONResponse,
Parameter,
ParameterIn,
register_operation,
RequestBody,
Response,
setup,
)
__all__ = (
"JSONResponse",
"Parameter",
"ParameterIn",
"register_operation",
"RequestBody",
"Response",
"setup",
)
|
import re
import os
class DevelopmentConfigs:
_sds_pool_base = os.path.join(os.getcwd(), 'dev/sds_pool')
_test_base = os.path.join(os.getcwd(), 'dev/test')
SDS_PDF_FILES = os.path.join(_sds_pool_base, 'sds_pdf_files')
SDS_TEXT_FILES = os.path.join(_sds_pool_base, 'sds_text_files')
TEST_SDS_PDF_FILES = os.path.join(_test_base, 'test_sds_pdf_files')
TEST_SDS_TEXT_FILES = os.path.join(_test_base, 'test_sds_text_files')
class SDSRegexes:
# list of keys used for regex lookup
REQUEST_KEYS = [
'manufacturer',
'product_name',
'flash_point',
'specific_gravity',
'nfpa_fire',
'nfpa_health',
'nfpa_reactivity',
'sara_311',
'revision_date',
'physical_state',
'cas_number',
]
# SDS_FORMAT_REGEXES is a dict mapping manufacturer names ('Takasago', 'Robertet', etc.)
# to a dict of regexes specific to the manufacturer SDS format where the keys are data_request_keys
# and the values are regexes for those specific data searches
SDS_FORMAT_REGEXES = {
'citrus_and_allied': {
'manufacturer': (r"(?P<data>\bcitrus\s*?and\s*?allied\b)", re.I),
'product_name': (r"chemical\s*?identification[\s:]*(?P<data>.*?)product", (re.S|re.I)),
'flash_point': (r"flash\s?point.*?(?P<data>\d+)[^c]{3}", (re.S|re.I)),
'specific_gravity': (r"specific\s*?gravity.*?:.*?(?P<data>[\d.]+)", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"NFPA.*?[F|f]ire.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_health': (r"NFPA.*?[H|h]ealth.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_reactivity': (r"NFPA.*?(([R|r]eactivity)|([I|i]nstability)).*?(?P<data>[0-4])", (re.S|re.I)),
'sara_311': (r"SARA 311(?P<data>.{80})", (re.S|re.I)),
'revision_date': (r"revision\s*?date\D*(?P<data>[\d\-\/]{6,15})", (re.S|re.I)),
'physical_state': (r"appearance.{0,80}(?P<data>(liquid|solid))", (re.S|re.I))
},
'excellentia': {
'manufacturer': (r"(?P<data>\bexcellentia\b)", re.I),
'product_name': (r"trade\s*?name(\(s\))[\s:]*(?P<data>.*?)(·|article)", (re.S|re.I)),
'flash_point': (r"flash\s*?point([\sC\d°:\(\).]*?(?P<data>[0-9.]*)\s*?°?\s*?F)?", (re.S|re.I)),
'specific_gravity': (r"relative\s*?density[:\s]*(?P<data>[\d.]*)", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"nfpa.*?fire\D*(?P<data>\d)", (re.S|re.I)),
'nfpa_health': (r"nfpa.*?health\D*(?P<data>\d)", (re.S|re.I)),
'nfpa_reactivity': (r"nfpa.*?reactivity\D*(?P<data>\d)", (re.S|re.I)),
'sara_311': (r"sara 311(?P<data>.{80})", (re.S|re.I)),
'revision_date': (r"last\s*?revision\D*(?P<data>[\d\/]*)", (re.S|re.I)),
'physical_state': (r"appearance.{0,10}form\W*(?P<data>\w*)", (re.S|re.I))
},
'firmenich': {
'manufacturer': (r"(?P<data>\bfirmenich\b)", re.I),
'product_name': (r"(chemical\s*?name|product)[\s:\d\w\-]*?(?P<data>[^\-\d:]*?)(revised|synonyms)", (re.S|re.I)),
'flash_point': (r"flash\s*?point.*?((?P<fahrenheit>\d[\d.,\s]*)°?\s*?(\bF\b|Fahrenheit)|(?P<celsius>\d[\d.,\s]*)°?C(?!.{1,50}?(\bF\b|Fahrenheit)))", (re.S|re.I)),
'specific_gravity': (r"specific\s*?gravity.{1,50}?(?P<data>\d*\.\d*)", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"NFPA.*?[F|f]ire.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_health': (r"NFPA.*?[H|h]ealth.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_reactivity': (r"NFPA.*?(([R|r]eactivity)|([I|i]nstability)).*?(?P<data>[0-4])", (re.S|re.I)),
'sara_311': (r"SARA 311(?P<data>.{80})", (re.S|re.I)),
'revision_date': (r"[R|r]evision [D|d]ate(?P<data>.{20})", (re.S|re.I)),
'physical_state': (r"F[\s]*?o[\s]*?r[\s]*?m[\s]*?\W(?P<data>.{30})", (re.S|re.I))
},
'fisher': {
'manufacturer': (r"(?P<data>\bfisher\b)", re.I),
'product_name': (r"product\s*name\W*(?P<data>[()\d\-\w\s,+%]*?)(cat\b|stock)", (re.S|re.I)),
'flash_point': (r"flash\s*point.*?(?P<data>\d*)", (re.S|re.I)),
'specific_gravity': (r"(relative\s*density|specific\s*gravity)\D*?(?P<data>[\d.]*)", (re.S|re.I)),
'cas_number': (r"product\s*name.{1,250}[^a-z]cas\b.*?(?P<data>\d{2,7}-\d{2}-\d)", (re.S|re.I)),
'nfpa_fire': (r"NFPA.{0,1000}?flammability.{0,10}?(?P<data>[0-4]+?)", (re.S|re.I)),
'nfpa_health': (r"NFPA.{0,1000}?health.{0,10}?(?P<data>[0-4]+?)", (re.S|re.I)),
'nfpa_reactivity': (r"NFPA.{0,1000}?(reactivity|instability).{0,10}?(?P<data>[0-4]+?)", (re.S|re.I)),
'sara_311': (r"SARA 311(?P<data>.{80})", (re.S|re.I)),
'revision_date': (r"revision\s*date\s*?(?P<data>([\d\-\/.,a-z]|\s(?=(\d|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)))*\d)", (re.S|re.I)),
'physical_state': (r"((P|p)hysical\s*(S|s)tate|\b(F|f)orm\b:?)\s*(?P<data>[A-Z][a-z]*)", (re.S))
},
'frutarom': {
'manufacturer': (r"(?P<data>\bf\s*?r\s*?u\s*?t\s*?a\s*?r\s*?o\s*?m\b)", re.I),
'product_name': (r"\s[P|p]roduct\s(?P<data>.{80})", (re.S|re.I)),
'flash_point': (r"[F|f]lash\s*?[P|p]oint(?P<data>.{30})", (re.S|re.I)),
'specific_gravity': (r"([R|r]elative [D|d]ensity|[S|s]pecific [G|g]ravity)(?P<data>.{30})", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"NFPA.*?[F|f]ire.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_health': (r"NFPA.*?[H|h]ealth.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_reactivity': (r"NFPA.*?(([R|r]eactivity)|([I|i]nstability)).*?(?P<data>[0-4])", (re.S|re.I)),
'sara_311': (r"SARA 311(?P<data>.{80})", (re.S|re.I)),
'revision_date': (r"[R|r]evision [D|d]ate(?P<data>.{20})", (re.S|re.I)),
'physical_state': (r"F[\s]*?o[\s]*?r[\s]*?m[\s]*?\W(?P<data>.{30})", (re.S|re.I))
},
'givaudan': {
'manufacturer': (r"(?P<data>\bgivaudan\b)", re.I),
'product_name': (r"sales\s*?no\.[:\s\d]*\s*?(?P<data>.*?)relevant", (re.S|re.I)),
'flash_point': (r"flash\s*?point([\sC\d°:\(.\)]*?(?P<data>[0-9.]*)\s*?°?\s*?F)?", (re.S|re.I)),
'specific_gravity': (r"(?<!bulk\s)density[\s:]*(?P<data>[\d\W]*)", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"nfpa.*fire.*?(?P<data>\d)", (re.S|re.I)),
'nfpa_health': (r"nfpa.*fire.*?(?P<data>\d)", (re.S|re.I)),
'nfpa_reactivity': (r"nfpa.*fire.*?(?P<data>\d)", (re.S|re.I)),
'sara_311': (r"sara\s*?311.*?((?P<fire>fire hazard\s*)|(?P<acute>acute health hazard\s*)|(?P<chronic>chronic health hazard\s*)|(?P<reactive>reactive hazard\s*)|(?P<sudden>sudden release of pressure\s*))+", (re.S|re.I)),
'revision_date': (r"revision\s*?date[:\s]*(?P<data>[\d\-\/\w\s]*?)\bp", (re.S|re.I)),
'physical_state': (r"physical\s*?state[\s:]*(?P<data>[\w]*)", (re.S|re.I))
},
'iff': {
'manufacturer': (r"(?P<data>\biff\b)", re.I),
'product_name': (r"\sproduct\s*?name[:\s]*(?P<data>.*?)\biff\b", (re.S|re.I)),
'flash_point': (r"flash\s*?point([\sC\d°:\(.]*?(?P<data>[0-9.]*)\s*?°?\s*?F)?", (re.S|re.I)),
'specific_gravity': (r"relative\s*?density\s*?.*?:(?P<data>[\d\.\s]*)", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"NFPA.*?[F|f]ire.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_health': (r"NFPA.*?[H|h]ealth.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_reactivity': (r"NFPA.*?(([R|r]eactivity)|([I|i]nstability)).*?(?P<data>[0-4])", (re.S|re.I)),
'sara_311': (r"sara\s*?311.*?:(?P<data>(\s|fire|hazard|health|acute|chronic)*)", (re.S|re.I)),
'revision_date': (r"revision\s*?date[\s:]*(?P<data>[\d\/\-\s]*)", (re.S|re.I)),
'physical_state': (r"physical\s*?state[\s:]*(?P<data>\w*)", (re.S|re.I))
},
'kerry': {
'manufacturer': (r"(?P<data>\bkerry\b)", re.I),
'product_name': (r"product\s*?name[\s:]*(?P<data>\D*)(\d|product)", (re.S|re.I)),
'flash_point': (r"flash\s*?point\D*(?P<data>[0-9. °CF]+)", (re.S|re.I)),
'specific_gravity': (r"relative\s*?density\D*(?P<data>[\d.]+)", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"NFPA.*?[F|f]ire.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_health': (r"NFPA.*?[H|h]ealth.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_reactivity': (r"NFPA.*?(([R|r]eactivity)|([I|i]nstability)).*?(?P<data>[0-4])", (re.S|re.I)),
'sara_311': (r"SARA 311(?P<data>.{80})", (re.S|re.I)),
'revision_date': (r"date\s*?of\s*?revision\D*(?P<data>[\d.\-\/]+)", (re.S|re.I)),
'physical_state': (r"appearance.{0,80}(?P<data>(liquid|solid))", (re.S|re.I))
},
'pepsico_inc': {
'manufacturer': (r"(?P<data>\bpepsico\s*?inc\b)", re.I),
'product_name': (r"product\s*?name[\s:]*(?P<data>\D*)document", (re.S|re.I)),
'flash_point': (r"flash\s*point.{1,50}?((?P<farenheit>\d[\d°\s,]+)°\s*?f|(?P<celcius>\d[\d°\s,]+C(?![\d\s\(>,°]{1,50}?f)))", (re.S|re.I)),
'specific_gravity': (r"specific\s*?gravity\D{0,25}(?P<data>[\d,]*)", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"NFPA.*?[F|f]ire.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_health': (r"NFPA.*?[H|h]ealth.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_reactivity': (r"NFPA.*?(([R|r]eactivity)|([I|i]nstability)).*?(?P<data>[0-4])", (re.S|re.I)),
'sara_311': (r"SARA 311(?P<data>.{80})", (re.S|re.I)),
'revision_date': (r"revision\s*?date\D{0,25}(?P<data>\d[\d\-\w]*\d)", (re.S|re.I)),
'physical_state': (r"physical\s*?state[:\-\s]*(?P<data>\w*)", (re.S|re.I))
},
'robertet': {
'manufacturer': (r"(?P<data>\br\s*?o\s*?b\s*?e\s*?r\s*?t\s*?e\s*?t\b)", re.I),
'product_name': (r"\s[P|p]roduct\s(?P<data>.{80})", (re.S|re.I)),
'flash_point': (r"[F|f]lash\s*?[P|p]oint(?P<data>.{30})", (re.S|re.I)),
'specific_gravity': (r"([R|r]elative [D|d]ensity|[S|s]pecific [G|g]ravity)(?P<data>.{30})", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"NFPA.*?[F|f]ire.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_health': (r"NFPA.*?[H|h]ealth.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_reactivity': (r"NFPA.*?(([R|r]eactivity)|([I|i]nstability)).*?(?P<data>[0-4])", (re.S|re.I)),
'sara_311': (r"SARA 311(?P<data>.{80})", (re.S|re.I)),
'revision_date': (r"[R|r]evision [D|d]ate(?P<data>.{20})", (re.S|re.I)),
'physical_state': (r"F[\s]*?o[\s]*?r[\s]*?m[\s]*?\W(?P<data>.{30})", (re.S|re.I))
},
'sigma_aldrich': {
'manufacturer': (r"(?P<data>\bsigma[\-\s]*?aldrich\b)", re.I),
'product_name': (r"product\s*?name[\s:]*(?P<data>.*?)product", (re.S|re.I)),
'flash_point': (r"flash\s*?point([\sC\d°:\(.]*?(?P<data>[0-9.]*)\s*?°?\s*?F)?", (re.S|re.I)),
'specific_gravity': (r"relative\s*?density\D*(?P<data>[\d.]*)", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"nfpa.*fire.*?(?P<data>\d)", (re.S|re.I)),
'nfpa_health': (r"nfpa.*health.*?(?P<data>\d)", (re.S|re.I)),
'nfpa_reactivity': (r"nfpa.*reactivity.*?(?P<data>\d)", (re.S|re.I)),
'sara_311': (r"sara\s*?311\/312((?P<fire>fire hazard\s*)|(?P<acute>acute health hazard\s*)|(?P<chronic>chronic health hazard\s*)|(?P<reactive>reactive hazard\s*)|(?P<sudden>sudden release of pressure\s*)|\s|hazards)*", (re.S|re.I)),
'revision_date': (r"revision\s*?date\D*(?P<data>[\d\-\/]{6,15})", (re.S|re.I)),
'physical_state': (r"appearance.{0,10}form\W*(?P<data>\w*)", (re.S|re.I))
},
'symrise': {
'manufacturer': (r"(?P<data>\bsymrise\b)", re.I),
'product_name': (r"product\s*?name[\s:]*(?P<data>.*?)material", (re.S|re.I)),
'flash_point': (r"flash\s*?point([\sC\d°:\(.]*?(?P<data>[0-9.]*)\s*?°?\s*?(F|C))?", (re.S|re.I)),
'specific_gravity': (r"relative\s*?density[:\s]*(?P<data>[\d.]*)", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"NFPA.*?[F|f]ire.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_health': (r"NFPA.*?[H|h]ealth.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_reactivity': (r"NFPA.*?(([R|r]eactivity)|([I|i]nstability)).*?(?P<data>[0-4])", (re.S|re.I)),
'sara_311': (r"SARA 311(?P<data>.{80})", (re.S|re.I)),
'revision_date': (r"revision\s*?date\D*(?P<data>[\d.]*)", (re.S|re.I)),
'physical_state': (r"F[\s]*?o[\s]*?r[\s]*?m[\s]*?\W(?P<data>.{30})", (re.S|re.I))
},
'takasago': {
'manufacturer': (r"(?P<data>takasago)", re.I),
'product_name': (r"\s[P|p]roduct\s(?P<data>.{80})", (re.S|re.I)),
'flash_point': (r"[F|f]lash\s*?[P|p]oint(?P<data>.{30})", (re.S|re.I)),
'specific_gravity': (r"([R|r]elative [D|d]ensity|[S|s]pecific [G|g]ravity)(?P<data>.{30})", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"NFPA.*?[F|f]ire.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_health': (r"NFPA.*?[H|h]ealth.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_reactivity': (r"NFPA.*?(([R|r]eactivity)|([I|i]nstability)).*?(?P<data>[0-4])", (re.S|re.I)),
'sara_311': (r"SARA 311(?P<data>.{80})", (re.S|re.I)),
'revision_date': (r"[R|r]evision [D|d]ate(?P<data>.{20})", (re.S|re.I)),
'physical_state': (r"F[\s]*?o[\s]*?r[\s]*?m[\s]*?\W(?P<data>.{30})", (re.S|re.I))
},
'treatt': {
'manufacturer': (r"(?P<data>\btreatt\b)", re.I),
'product_name': (r"product\s*identifier.*?product\s*name\s*(?P<data>[^\d]+)", (re.S|re.I)),
'flash_point': (r"flash\s*point.{1,50}?((?P<farenheit>\d[\d°\s,.]+°?\s*?f)|(?P<celcius>\d[\d°\s,“]+C(?![\d\s\(>,.°“\/]{1,50}?f)))", (re.S|re.I)),
'specific_gravity': (r"(relative density|specific gravity)(?P<data>.{30})", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"NFPA.*?[F|f]ire.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_health': (r"NFPA.*?[H|h]ealth.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_reactivity': (r"NFPA.*?(([R|r]eactivity)|([I|i]nstability)).*?(?P<data>[0-4])", (re.S|re.I)),
'sara_311': (r"SARA 311(?P<data>.{80})", (re.S|re.I)),
'revision_date': (r"revision\s*?date\s*(?P<data>[\d\-\w]*\d)", (re.S|re.I)),
'physical_state': (r"physical\s*?state\s*(?P<data>[\w ]*)", (re.S|re.I))
},
'ungerer': {
'manufacturer': (r"(?P<data>\bungerer\s*?and\s*?company\b)", re.I),
'product_name': (r"product\s*?name\s*\w*\s?(?P<data>.*?)\d", (re.S|re.I)),
'flash_point': (r"flash\s*?point[\s\dc€°\/]*?(?P<data>[\d.]*)\s*?[€°]*?\s*?f", (re.S|re.I)),
'specific_gravity': (r"specific\s*?gravity\s*?(?P<data>[\d.]*)", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"nfpa.*flammability.*?(?P<data>\d)", (re.S|re.I)),
'nfpa_health': (r"nfpa.*health.*?(?P<data>\d)", (re.S|re.I)),
'nfpa_reactivity': (r"nfpa.*instability.*?(?P<data>\d)", (re.S|re.I)),
'sara_311': (r"sara\s*?311\/312(((?P<acute>acute health hazard)yes)|((?P<chronic>chronic health hazard)yes)|((?P<fire>fire hazard)yes)|((?P<sudden>sudden release of pressure hazard)yes)|((?P<reactive>reactive hazard)yes)|\s|hazard|categories|acute|health|yes|€|chronic|no|fire|sudden release of pressure|reactive)*", (re.S|re.I)),
'revision_date': (r"revision\s*?date[:€\s]*(?P<data>[\d\-\/\w]*?)€", (re.S|re.I)),
'physical_state': (r"physical\s*?state[\s\W\dc]*(?P<data>[\w]*)", (re.S|re.I))
},
}
DEFAULT_SDS_FORMAT = {
'manufacturer': (r"[C|c]ompany(?P<data>.{80})", (re.S|re.I)),
'product_name': (r"\s[P|p]roduct\s(?P<data>.{80})", (re.S|re.I)),
'flash_point': (r"[F|f]lash\s*?[P|p]oint(?P<data>.{30})", (re.S|re.I)),
'specific_gravity': (r"([R|r]elative [D|d]ensity|[S|s]pecific [G|g]ravity)(?P<data>.{30})", (re.S|re.I)),
'cas_number': (r"CAS(?P<data>.{30})", (re.S|re.I)),
'nfpa_fire': (r"NFPA.*?[F|f]ire.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_health': (r"NFPA.*?[H|h]ealth.*?(?P<data>[0-4])", (re.S|re.I)),
'nfpa_reactivity': (r"NFPA.*?(([R|r]eactivity)|([I|i]nstability)).*?(?P<data>[0-4])", (re.S|re.I)),
'sara_311': (r"SARA 311(?P<data>.{80})", (re.S|re.I)),
'revision_date': (r"[R|r]evision [D|d]ate(?P<data>.{20})", (re.S|re.I)),
'physical_state': (r"F[\s]*?o[\s]*?r[\s]*?m[\s]*?\W(?P<data>.{30})", (re.S|re.I))
}
|
"""
Created on Aug 20, 2020
@author: joseph-hellerstein
Codes that provide various analyses of residuals.
There are 3 types of timeseries: observed, fitted, and residuals
(observed - fitted).
Plots are organized by the timeseries and the characteristic analyzed. These
characteristics are: (a) over time, (b) histogram.
"""
from SBstoat.namedTimeseries import NamedTimeseries
import SBstoat._plotOptions as po
from SBstoat import timeseriesPlotter as tp
from docstring_expander.expander import Expander
import numpy as np
PLOT = "plot"
class ResidualsAnalyzer():
def __init__(self, observedTS:NamedTimeseries, fittedTS:NamedTimeseries,
residualsTS:NamedTimeseries=None, meanFittedTS=None,
stdFittedTS=None,
bandLowTS:NamedTimeseries=None, bandHighTS:NamedTimeseries=None,
isPlot:bool=True):
"""
Parameters
----------
observedTS: Observed values
residualsTS: same time values as observedTS
fittedTS: fitted values
may have different times than observedTS
meanFittedTS: fitted values with same times as observedTS
stdFittedTS: fitted values with same times as observedTS
bandLowTS: timeseries that describes the lower band for timeseries1
bandhighTS: timeseries that describes the higher band for timeseries1
"""
self.observedTS = observedTS
self.fittedTS = fittedTS
self.meanFittedTS = meanFittedTS
self.stdFittedTS = stdFittedTS
self.bandLowTS = bandLowTS
self.bandHighTS = bandHighTS
if residualsTS is None:
self.residualsTS = self.observedTS.copy()
cols = self.residualsTS.colnames
self.residualsTS[cols] -= self.fittedTS[cols]
self.residualsTS[cols] \
= np.nan_to_num(self.residualsTS[cols], nan=0.0)
else:
self.residualsTS = residualsTS.copy()
### Plotter
self._plotter = tp.TimeseriesPlotter(isPlot=isPlot)
@staticmethod
def _addKeyword(kwargs:dict, key:str, value:object):
if not key in kwargs:
kwargs[key] = value
@Expander(po.KWARGS, po.BASE_OPTIONS, indent=8,
header=po.HEADER)
def plotAll(self, **kwargs:dict):
"""
Does all residual plots.
Parameters
----------
#@expand
"""
for name in dir(self):
if name == "plotAll":
continue
if name[0:4] == PLOT:
statement = "self.%s(**kwargs)" % name
exec(statement)
@Expander(po.KWARGS, po.BASE_OPTIONS, indent=8,
header=po.HEADER)
def plotResidualsOverTime(self, **kwargs:dict):
"""
Plots residuals of a fit over time.
Parameters
----------
#@expand
"""
ResidualsAnalyzer._addKeyword(kwargs, po.MARKER, "o")
ResidualsAnalyzer._addKeyword(kwargs, po.SUPTITLE, "Residuals Over Time")
self._plotter.plotTimeSingle(self.residualsTS, **kwargs)
@Expander(po.KWARGS, po.BASE_OPTIONS, indent=8,
header=po.HEADER)
def plotFittedObservedOverTime(self, **kwargs:dict):
"""
Plots the fit with observed data over time.
Parameters
----------
#@expand
"""
title = "Observed vs. fitted"
if self.bandLowTS is not None:
title += " (with shading for 95th percentile)"
ResidualsAnalyzer._addKeyword(kwargs, po.SUPTITLE, title)
ResidualsAnalyzer._addKeyword(kwargs, po.MARKER, [None, "o", "^"])
legends = ["fitted", "observed"]
if self.meanFittedTS is not None:
legends.append("bootstrap fitted")
ResidualsAnalyzer._addKeyword(kwargs, po.LEGEND, legends)
ResidualsAnalyzer._addKeyword(kwargs, po.COLOR, ["b", "b", "r"])
self._plotter.plotTimeSingle(
self.fittedTS,
timeseries2=self.observedTS,
meanTS=self.meanFittedTS, stdTS=self.stdFittedTS,
bandLowTS=self.bandLowTS,
bandHighTS=self.bandHighTS,
**kwargs)
@Expander(po.KWARGS, po.BASE_OPTIONS, includes=[po.BINS], indent=8,
header=po.HEADER)
def plotResidualsHistograms(self, **kwargs:dict):
"""
Plots histographs of parameter values from a bootstrap.
Parameters
----------
parameters: List of parameters to do pairwise plots
#@expand
"""
ResidualsAnalyzer._addKeyword(kwargs, po.SUPTITLE, "Residual Distributions")
self._plotter.plotHistograms(self.residualsTS, **kwargs)
@Expander(po.KWARGS, po.BASE_OPTIONS, indent=8,
header=po.HEADER)
def plotResidualsAutoCorrelations(self, **kwargs:dict):
"""
Plots auto correlations between residuals of columns.
Parameters
----------
parameters: List of parameters to do pairwise plots
#@expand
"""
ResidualsAnalyzer._addKeyword(kwargs, po.SUPTITLE, "Residual Autocorrelations")
self._plotter.plotAutoCorrelations(self.residualsTS, **kwargs)
@Expander(po.KWARGS, po.BASE_OPTIONS, indent=8,
header=po.HEADER)
def plotResidualCrossCorrelations(self, **kwargs:dict):
"""
Plots cross correlations between residuals of columns.
Parameters
----------
parameters: List of parameters to do pairwise plots
#@expand
"""
ResidualsAnalyzer._addKeyword(kwargs, po.SUPTITLE, "Residual Cross Correlations")
self._plotter.plotCrossCorrelations(self.residualsTS, **kwargs)
|
import numpy as np
def create_vocabulary(input_text):
flat = ' '.join(input_text)
vocabulary = list(set(flat))
vocab_size = len(vocabulary)
char_to_idx = {char: idx for idx, char in enumerate(vocabulary)}
idx_to_char = {idx: char for idx, char in enumerate(vocabulary)}
return {'vocab_size': vocab_size, 'encoder': char_to_idx, 'decoder': idx_to_char}
def train_test_split(lines, test_size=0.1):
"""
Split data into a train and test set. Set test size to control the fraction
of rows in the test set.
:param Xs:
:param ys:
:param test_size:
:return:
"""
assert (0 < test_size < 1), "test_size must be between 0 and 1 (exclusive)"
n_examples = len(lines)
test_samples = int(round(n_examples * test_size))
train_samples = n_examples - test_samples
idxs = np.arange(len(lines))
# mutable shuffling of indices
np.random.shuffle(idxs)
train_idxs = idxs[:train_samples]
test_idxs = idxs[train_samples:]
l_train = lines[train_idxs]
l_test = lines[test_idxs]
return l_train, l_test
def text_to_input_and_target(text_lines):
flattened = ' '.join(text_lines)
xs = flattened[:-1]
ys = flattened[1:]
return xs, ys
|
def load(h):
return ({'abbr': '2tplm10',
'code': 1,
'title': '2m temperature probability less than -10 C %'},
{'abbr': '2tplm5',
'code': 2,
'title': '2m temperature probability less than -5 C %'},
{'abbr': '2tpl0',
'code': 3,
'title': '2m temperature probability less than 0 C %'},
{'abbr': '2tpl5',
'code': 4,
'title': '2m temperature probability less than 5 C %'},
{'abbr': '2tpl10',
'code': 5,
'title': '2m temperature probability less than 10 C %'},
{'abbr': '2tpg25',
'code': 6,
'title': '2m temperature probability greater than 25 C %'},
{'abbr': '2tpg30',
'code': 7,
'title': '2m temperature probability greater than 30 C %'},
{'abbr': '2tpg35',
'code': 8,
'title': '2m temperature probability greater than 35 C %'},
{'abbr': '2tpg40',
'code': 9,
'title': '2m temperature probability greater than 40 C %'},
{'abbr': '2tpg45',
'code': 10,
'title': '2m temperature probability greater than 45 C %'},
{'abbr': 'mn2tplm10',
'code': 11,
'title': 'Minimum 2 metre temperature probability less than -10 C %'},
{'abbr': 'mn2tplm5',
'code': 12,
'title': 'Minimum 2 metre temperature probability less than -5 C %'},
{'abbr': 'mn2tpl0',
'code': 13,
'title': 'Minimum 2 metre temperature probability less than 0 C %'},
{'abbr': 'mn2tpl5',
'code': 14,
'title': 'Minimum 2 metre temperature probability less than 5 C %'},
{'abbr': 'mn2tpl10',
'code': 15,
'title': 'Minimum 2 metre temperature probability less than 10 C %'},
{'abbr': 'mx2tpg25',
'code': 16,
'title': 'Maximum 2 metre temperature probability greater than 25 C %'},
{'abbr': 'mx2tpg30',
'code': 17,
'title': 'Maximum 2 metre temperature probability greater than 30 C %'},
{'abbr': 'mx2tpg35',
'code': 18,
'title': 'Maximum 2 metre temperature probability greater than 35 C %'},
{'abbr': 'mx2tpg40',
'code': 19,
'title': 'Maximum 2 metre temperature probability greater than 40 C %'},
{'abbr': 'mx2tpg45',
'code': 20,
'title': 'Maximum 2 metre temperature probability greater than 45 C %'},
{'abbr': '10spg10',
'code': 21,
'title': '10 metre wind speed probability of at least 10 m/s %'},
{'abbr': '10spg15',
'code': 22,
'title': '10 metre wind speed probability of at least 15 m/s %'},
{'abbr': '10spg20',
'code': 23,
'title': '10 metre wind speed probability of at least 20 m/s %'},
{'abbr': '10spg35',
'code': 24,
'title': '10 metre wind speed probability of at least 35 m/s %'},
{'abbr': '10spg50',
'code': 25,
'title': '10 metre wind speed probability of at least 50 m/s %'},
{'abbr': '10gpg20',
'code': 26,
'title': '10 metre wind gust probability of at least 20 m/s %'},
{'abbr': '10gpg35',
'code': 27,
'title': '10 metre wind gust probability of at least 35 m/s %'},
{'abbr': '10gpg50',
'code': 28,
'title': '10 metre wind gust probability of at least 50 m/s %'},
{'abbr': '10gpg75',
'code': 29,
'title': '10 metre wind gust probability of at least 75 m/s %'},
{'abbr': '10gpg100',
'code': 30,
'title': '10 metre wind gust probability of at least 100 m/s %'},
{'abbr': 'tppg1',
'code': 31,
'title': 'Total precipitation probability of at least 1 mm %'},
{'abbr': 'tppg5',
'code': 32,
'title': 'Total precipitation probability of at least 5 mm %'},
{'abbr': 'tppg10',
'code': 33,
'title': 'Total precipitation probability of at least 10 mm %'},
{'abbr': 'tppg20',
'code': 34,
'title': 'Total precipitation probability of at least 20 mm %'},
{'abbr': 'tppg40',
'code': 35,
'title': 'Total precipitation probability of at least 40 mm %'},
{'abbr': 'tppg60',
'code': 36,
'title': 'Total precipitation probability of at least 60 mm %'},
{'abbr': 'tppg80',
'code': 37,
'title': 'Total precipitation probability of at least 80 mm %'},
{'abbr': 'tppg100',
'code': 38,
'title': 'Total precipitation probability of at least 100 mm %'},
{'abbr': 'tppg150',
'code': 39,
'title': 'Total precipitation probability of at least 150 mm %'},
{'abbr': 'tppg200',
'code': 40,
'title': 'Total precipitation probability of at least 200 mm %'},
{'abbr': 'tppg300',
'code': 41,
'title': 'Total precipitation probability of at least 300 mm %'},
{'abbr': 'sfpg1',
'code': 42,
'title': 'Snowfall probability of at least 1 mm %'},
{'abbr': 'sfpg5',
'code': 43,
'title': 'Snowfall probability of at least 5 mm %'},
{'abbr': 'sfpg10',
'code': 44,
'title': 'Snowfall probability of at least 10 mm %'},
{'abbr': 'sfpg20',
'code': 45,
'title': 'Snowfall probability of at least 20 mm %'},
{'abbr': 'sfpg40',
'code': 46,
'title': 'Snowfall probability of at least 40 mm %'},
{'abbr': 'sfpg60',
'code': 47,
'title': 'Snowfall probability of at least 60 mm %'},
{'abbr': 'sfpg80',
'code': 48,
'title': 'Snowfall probability of at least 80 mm %'},
{'abbr': 'sfpg100',
'code': 49,
'title': 'Snowfall probability of at least 100 mm %'},
{'abbr': 'sfpg150',
'code': 50,
'title': 'Snowfall probability of at least 150 mm %'},
{'abbr': 'sfpg200',
'code': 51,
'title': 'Snowfall probability of at least 200 mm %'},
{'abbr': 'sfpg300',
'code': 52,
'title': 'Snowfall probability of at least 300 mm %'},
{'abbr': 'tccpg10',
'code': 53,
'title': 'Total Cloud Cover probability greater than 10% %'},
{'abbr': 'tccpg20',
'code': 54,
'title': 'Total Cloud Cover probability greater than 20% %'},
{'abbr': 'tccpg30',
'code': 55,
'title': 'Total Cloud Cover probability greater than 30% %'},
{'abbr': 'tccpg40',
'code': 56,
'title': 'Total Cloud Cover probability greater than 40% %'},
{'abbr': 'tccpg50',
'code': 57,
'title': 'Total Cloud Cover probability greater than 50% %'},
{'abbr': 'tccpg60',
'code': 58,
'title': 'Total Cloud Cover probability greater than 60% %'},
{'abbr': 'tccpg70',
'code': 59,
'title': 'Total Cloud Cover probability greater than 70% %'},
{'abbr': 'tccpg80',
'code': 60,
'title': 'Total Cloud Cover probability greater than 80% %'},
{'abbr': 'tccpg90',
'code': 61,
'title': 'Total Cloud Cover probability greater than 90% %'},
{'abbr': 'tccpg99',
'code': 62,
'title': 'Total Cloud Cover probability greater than 99% %'},
{'abbr': 'hccpg10',
'code': 63,
'title': 'High Cloud Cover probability greater than 10% %'},
{'abbr': 'hccpg20',
'code': 64,
'title': 'High Cloud Cover probability greater than 20% %'},
{'abbr': 'hccpg30',
'code': 65,
'title': 'High Cloud Cover probability greater than 30% %'},
{'abbr': 'hccpg40',
'code': 66,
'title': 'High Cloud Cover probability greater than 40% %'},
{'abbr': 'hccpg50',
'code': 67,
'title': 'High Cloud Cover probability greater than 50% %'},
{'abbr': 'hccpg60',
'code': 68,
'title': 'High Cloud Cover probability greater than 60% %'},
{'abbr': 'hccpg70',
'code': 69,
'title': 'High Cloud Cover probability greater than 70% %'},
{'abbr': 'hccpg80',
'code': 70,
'title': 'High Cloud Cover probability greater than 80% %'},
{'abbr': 'hccpg90',
'code': 71,
'title': 'High Cloud Cover probability greater than 90% %'},
{'abbr': 'hccpg99',
'code': 72,
'title': 'High Cloud Cover probability greater than 99% %'},
{'abbr': 'mccpg10',
'code': 73,
'title': 'Medium Cloud Cover probability greater than 10% %'},
{'abbr': 'mccpg20',
'code': 74,
'title': 'Medium Cloud Cover probability greater than 20% %'},
{'abbr': 'mccpg30',
'code': 75,
'title': 'Medium Cloud Cover probability greater than 30% %'},
{'abbr': 'mccpg40',
'code': 76,
'title': 'Medium Cloud Cover probability greater than 40% %'},
{'abbr': 'mccpg50',
'code': 77,
'title': 'Medium Cloud Cover probability greater than 50% %'},
{'abbr': 'mccpg60',
'code': 78,
'title': 'Medium Cloud Cover probability greater than 60% %'},
{'abbr': 'mccpg70',
'code': 79,
'title': 'Medium Cloud Cover probability greater than 70% %'},
{'abbr': 'mccpg80',
'code': 80,
'title': 'Medium Cloud Cover probability greater than 80% %'},
{'abbr': 'mccpg90',
'code': 81,
'title': 'Medium Cloud Cover probability greater than 90% %'},
{'abbr': 'mccpg99',
'code': 82,
'title': 'Medium Cloud Cover probability greater than 99% %'},
{'abbr': 'lccpg10',
'code': 83,
'title': 'Low Cloud Cover probability greater than 10% %'},
{'abbr': 'lccpg20',
'code': 84,
'title': 'Low Cloud Cover probability greater than 20% %'},
{'abbr': 'lccpg30',
'code': 85,
'title': 'Low Cloud Cover probability greater than 30% %'},
{'abbr': 'lccpg40',
'code': 86,
'title': 'Low Cloud Cover probability greater than 40% %'},
{'abbr': 'lccpg50',
'code': 87,
'title': 'Low Cloud Cover probability greater than 50% %'},
{'abbr': 'lccpg60',
'code': 88,
'title': 'Low Cloud Cover probability greater than 60% %'},
{'abbr': 'lccpg70',
'code': 89,
'title': 'Low Cloud Cover probability greater than 70% %'},
{'abbr': 'lccpg80',
'code': 90,
'title': 'Low Cloud Cover probability greater than 80% %'},
{'abbr': 'lccpg90',
'code': 91,
'title': 'Low Cloud Cover probability greater than 90% %'},
{'abbr': 'lccpg99',
'code': 92,
'title': 'Low Cloud Cover probability greater than 99% %'})
|
#!/usr/bin/env python
# -*- coding: ISO-8859-1 -*-
# generated by wxGlade 0.4 on Sun Mar 26 01:48:08 2006
import wx
class mainFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: mainFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.panel_1 = wx.Panel(self, -1)
self.sizer_4_staticbox = wx.StaticBox(self.panel_1, -1, "Message log")
# Menu Bar
self.frame_1_menubar = wx.MenuBar()
self.SetMenuBar(self.frame_1_menubar)
self.fileExitId = wx.NewId()
self.windowGraphEditorId = wx.NewId()
self.windowPythonShellId = wx.NewId()
self.windowMinimiseChildrenId = wx.NewId()
self.windowRestoreChildrenId = wx.NewId()
self.testingAllTestsId = wx.NewId()
self.helpContentsId = wx.NewId()
self.helpAboutId = wx.NewId()
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(self.fileExitId, "E&xit", "Exit DSCAS", wx.ITEM_NORMAL)
self.frame_1_menubar.Append(wxglade_tmp_menu, "&File")
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(self.windowGraphEditorId, "&Graph Editor", "Open up the graph editor window", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(self.windowPythonShellId, "&Python Shell", "Show the Python Shell interface", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(self.windowMinimiseChildrenId, "&Minimise children", "Minimise (iconise) all the child windows of the main window", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(self.windowRestoreChildrenId, "&Restore children", "Restore all child windows of the main window.", wx.ITEM_NORMAL)
self.frame_1_menubar.Append(wxglade_tmp_menu, "&Window")
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(self.testingAllTestsId, "&All Tests", "Run all test suites.", wx.ITEM_NORMAL)
self.frame_1_menubar.Append(wxglade_tmp_menu, "&Testing")
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(self.helpContentsId, "&Contents\tF1", "Show the main help contents", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(self.helpAboutId, "&About", "Get information about DeVIDE", wx.ITEM_NORMAL)
self.frame_1_menubar.Append(wxglade_tmp_menu, "&Help")
# Menu Bar end
self.frame_1_statusbar = self.CreateStatusBar(1, 0)
self.progressText = wx.StaticText(self.panel_1, -1, "...")
self.progressGauge = wx.Gauge(self.panel_1, -1, 100)
self.progressRaiseCheckBox = wx.CheckBox(self.panel_1, -1, "Raise this window when the progress is updated.")
self.messageLogTextCtrl = wx.TextCtrl(self.panel_1, -1, "", style=wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL)
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: mainFrame.__set_properties
self.SetTitle("DeVIDE main window")
self.frame_1_statusbar.SetStatusWidths([-1])
# statusbar fields
frame_1_statusbar_fields = ["Welcome to DeVIDE"]
for i in range(len(frame_1_statusbar_fields)):
self.frame_1_statusbar.SetStatusText(frame_1_statusbar_fields[i], i)
self.progressText.SetFont(wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.progressGauge.SetBackgroundColour(wx.Colour(50, 50, 204))
self.progressRaiseCheckBox.SetToolTipString("Each time the progress is updated, this window will be brought to the front. If you do not wish this behaviour, uncheck this box.")
self.messageLogTextCtrl.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
# end wxGlade
def __do_layout(self):
# begin wxGlade: mainFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_3 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.VERTICAL)
sizer_4 = wx.StaticBoxSizer(self.sizer_4_staticbox, wx.HORIZONTAL)
sizer_2.Add(self.progressText, 0, 0, 4)
sizer_2.Add(self.progressGauge, 0, wx.EXPAND, 5)
sizer_2.Add(self.progressRaiseCheckBox, 0, wx.TOP|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 7)
sizer_4.Add(self.messageLogTextCtrl, 1, wx.ALL|wx.EXPAND, 7)
sizer_4.Add((0, 150), 0, 0, 0)
sizer_2.Add(sizer_4, 1, wx.TOP|wx.EXPAND, 7)
sizer_2.Add((600, 0), 0, 0, 0)
sizer_3.Add(sizer_2, 1, wx.ALL|wx.EXPAND, 7)
self.panel_1.SetAutoLayout(True)
self.panel_1.SetSizer(sizer_3)
sizer_3.Fit(self.panel_1)
sizer_3.SetSizeHints(self.panel_1)
sizer_1.Add(self.panel_1, 1, wx.EXPAND, 0)
self.SetAutoLayout(True)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
sizer_1.SetSizeHints(self)
self.Layout()
# end wxGlade
# end of class mainFrame
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame_1 = mainFrame(None, -1, "")
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()
|
import json
from pathlib import Path
from typing import Tuple, Union
from openff.recharge.aromaticity import AromaticityModels
from openff.recharge.charges.bcc import (
BCCCollection,
BCCParameter,
original_am1bcc_corrections,
)
from openff.recharge.charges.library import LibraryChargeCollection
from openff.recharge.charges.qc import QCChargeSettings
from openff.recharge.charges.vsite import (
BondChargeSiteParameter,
DivalentLonePairParameter,
VirtualSiteCollection,
)
from openff.recharge.conformers import ConformerSettings
ChargeCollection = Union[
Tuple[ConformerSettings, QCChargeSettings], LibraryChargeCollection
]
def save_charge_model(
output_directory: Path,
charge_collection: ChargeCollection,
bcc_collection: BCCCollection,
v_site_collection: VirtualSiteCollection,
):
output_directory.mkdir(parents=True, exist_ok=True)
with Path(output_directory, "initial-parameters-base.json").open("w") as file:
if isinstance(charge_collection, LibraryChargeCollection):
file.write(charge_collection.json(indent=2))
else:
json.dump(
(charge_collection[0].dict(), charge_collection[1].dict()),
file,
indent=2,
)
with Path(output_directory, "initial-parameters-bcc.json").open("w") as file:
file.write(bcc_collection.json(indent=2))
with Path(output_directory, "initial-parameters-v-site.json").open("w") as file:
file.write(v_site_collection.json(indent=2))
def generate_pyridine_only_model(conformer_settings: ConformerSettings):
save_charge_model(
output_directory=Path("pyridine-only", "no-v-sites"),
charge_collection=(conformer_settings, QCChargeSettings(theory="am1")),
bcc_collection=original_am1bcc_corrections(),
v_site_collection=VirtualSiteCollection(parameters=[]),
)
save_charge_model(
output_directory=Path("pyridine-only", "v-sites"),
charge_collection=(conformer_settings, QCChargeSettings(theory="am1")),
bcc_collection=BCCCollection(
parameters=[
BCCParameter(
smirks=(
"[#6X3H1a:1]1:"
"[#7X2a:2]:"
"[#6X3H1a]:"
"[#6X3a]:"
"[#6X3a]:"
"[#6X3a]1"
),
value=0.0,
provenance=None,
),
BCCParameter(
smirks=(
"[#1:2]"
"[#6X3H1a:1]1:"
"[#7X2a]:"
"[#6X3H1a]:"
"[#6X3a]:"
"[#6X3a]:"
"[#6X3a]1"
),
value=0.0,
provenance=None,
),
*original_am1bcc_corrections().parameters,
],
aromaticity_model=AromaticityModels.AM1BCC,
),
v_site_collection=VirtualSiteCollection(
parameters=[
DivalentLonePairParameter(
smirks=(
"[#6X3H1a:2]1:"
"[#7X2a:1]:"
"[#6X3H1a:3]:"
"[#6X3a]:"
"[#6X3a]:"
"[#6X3a]1"
),
name="EP",
distance=0.35,
out_of_plane_angle=0.0,
charge_increments=(0.0, 0.35, 0.0),
sigma=0.0,
epsilon=0.0,
match="once",
)
]
),
)
def generate_halogen_only_model(conformer_settings: ConformerSettings):
save_charge_model(
output_directory=Path("halogens", "no-v-sites"),
charge_collection=(conformer_settings, QCChargeSettings(theory="am1")),
bcc_collection=original_am1bcc_corrections(),
v_site_collection=VirtualSiteCollection(parameters=[]),
)
save_charge_model(
output_directory=Path("halogens", "v-sites-1"),
charge_collection=(conformer_settings, QCChargeSettings(theory="am1")),
bcc_collection=original_am1bcc_corrections(),
v_site_collection=VirtualSiteCollection(
parameters=[
BondChargeSiteParameter(
smirks="[#6A:2]-[#17:1]",
name="EP1",
distance=1.0,
charge_increments=(-0.05, 0.0),
sigma=0.0,
epsilon=0.0,
match="all-permutations",
),
BondChargeSiteParameter(
smirks="[#6A:2]-[#35:1]",
name="EP1",
distance=1.0,
charge_increments=(-0.05, 0.0),
sigma=0.0,
epsilon=0.0,
match="all-permutations",
),
]
),
)
save_charge_model(
output_directory=Path("halogens", "v-sites-2"),
charge_collection=(conformer_settings, QCChargeSettings(theory="am1")),
bcc_collection=original_am1bcc_corrections(),
v_site_collection=VirtualSiteCollection(
parameters=[
BondChargeSiteParameter(
smirks="[#6A:2]-[#17:1]",
name="EP1",
distance=0.35,
charge_increments=(0.05, 0.0),
sigma=0.0,
epsilon=0.0,
match="all-permutations",
),
BondChargeSiteParameter(
smirks="[#6A:2]-[#17:1]",
name="EP2",
distance=1.0,
charge_increments=(-0.05, 0.0),
sigma=0.0,
epsilon=0.0,
match="all-permutations",
),
BondChargeSiteParameter(
smirks="[#6A:2]-[#35:1]",
name="EP1",
distance=0.35,
charge_increments=(0.05, 0.0),
sigma=0.0,
epsilon=0.0,
match="all-permutations",
),
BondChargeSiteParameter(
smirks="[#6A:2]-[#35:1]",
name="EP2",
distance=1.0,
charge_increments=(-0.05, 0.0),
sigma=0.0,
epsilon=0.0,
match="all-permutations",
),
]
),
)
def generate_vam1bcc_v1_charge_model(conformer_settings: ConformerSettings):
save_charge_model(
output_directory=Path("vam1bcc-v1", "no-v-sites"),
charge_collection=(conformer_settings, QCChargeSettings(theory="am1")),
bcc_collection=original_am1bcc_corrections(),
v_site_collection=VirtualSiteCollection(parameters=[]),
)
save_charge_model(
output_directory=Path("vam1bcc-v1", "v-sites"),
charge_collection=(conformer_settings, QCChargeSettings(theory="am1")),
bcc_collection=BCCCollection(
parameters=[
BCCParameter(
smirks=(
"[#6X3H1a:1]1:"
"[#7X2a:2]:"
"[#6X3H1a]:"
"[#6X3a]:"
"[#6X3a]:"
"[#6X3a]1"
),
value=0.0,
provenance=None,
),
BCCParameter(
smirks=(
"[#1:2]"
"[#6X3H1a:1]1:"
"[#7X2a]:"
"[#6X3H1a]:"
"[#6X3a]:"
"[#6X3a]:"
"[#6X3a]1"
),
value=0.0,
provenance=None,
),
*original_am1bcc_corrections().parameters,
],
aromaticity_model=AromaticityModels.AM1BCC,
),
v_site_collection=VirtualSiteCollection(
parameters=[
BondChargeSiteParameter(
smirks="[#6a:2]-[#17:1]",
name="EP1",
distance=1.45,
charge_increments=(-0.063, 0.0),
sigma=0.0,
epsilon=0.0,
match="all-permutations",
),
BondChargeSiteParameter(
smirks="[#6A:2]-[#17:1]",
name="EP1",
distance=1.45,
charge_increments=(-0.063, 0.0),
sigma=0.0,
epsilon=0.0,
match="all-permutations",
),
BondChargeSiteParameter(
smirks="[#6a:2]-[#35:1]",
name="EP1",
distance=1.55,
charge_increments=(-0.082, 0.0),
sigma=0.0,
epsilon=0.0,
match="all-permutations",
),
BondChargeSiteParameter(
smirks="[#6A:2]-[#35:1]",
name="EP1",
distance=1.55,
charge_increments=(-0.082, 0.0),
sigma=0.0,
epsilon=0.0,
match="all-permutations",
),
DivalentLonePairParameter(
smirks=(
"[#6X3H1a:2]1:"
"[#7X2a:1]:"
"[#6X3H1a:3]:"
"[#6X3a]:"
"[#6X3a]:"
"[#6X3a]1"
),
name="EP",
distance=0.45,
out_of_plane_angle=0.0,
charge_increments=(0.0, 0.45, 0.0),
sigma=0.0,
epsilon=0.0,
match="once",
),
]
),
)
def main():
conformer_settings = ConformerSettings(
method="omega-elf10", sampling_mode="dense", max_conformers=10
)
generate_pyridine_only_model(conformer_settings)
generate_halogen_only_model(conformer_settings)
generate_vam1bcc_v1_charge_model(conformer_settings)
if __name__ == "__main__":
main()
|
# Copyright 2020 Richard Jiang, Prashant Singh, Fredrik Wrede and Andreas Hellander
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Absolute epsilon selector
"""
from sciope.utilities.epsilonselectors.epsilon_selector import *
class AbsoluteEpsilonSelector(EpsilonSelector):
"""
Creates an epsilon selector based on a fixed sequence.
"""
def __init__(self, epsilon_sequence):
"""
Parameters
----------
epsilon_sequence : Seqeunce[float]
Sequence of epsilons to use.
"""
assert (len(epsilon_sequence) > 0)
self.epsilon_sequence = epsilon_sequence
self.last_round = len(self.epsilon_sequence) - 1
def get_initial_epsilon(self):
"""Gets the first epsilon in the sequence.
Returns
-------
epsilon : float
The initial epsilon value of this sequence
percentile : bool
Whether the epsilon should be interpreted as a percentile
has_more : bool
Whether there are more epsilons after this one
"""
return self.epsilon_sequence[0], False, len(self.epsilon_sequence) == 1
def get_epsilon(self, round, abc_history):
"""Returns the n-th epsilon in the seqeunce.
Parameters
----------
round : int
the round to get the epsilon for
abc_history : type
A list of dictionaries with keys `accepted_samples` and `distances`
representing the history of all ABC runs up to this point.
Returns
-------
epsilon : float
The epsilon value for ABC-SMC
percentile : bool
Whether the epsilon should be interpreted as a percentile
terminate : bool
Whether to stop after this epsilon
"""
return self.epsilon_sequence[round], False, round == last_round
|
__version__="1.0.2"
|
# vFabric Administration Server API
# Copyright (c) 2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vas.sqlfire.AgentInstances import AgentInstance
from vas.sqlfire.ServerInstances import ServerInstance
from vas.sqlfire.Groups import Group
from vas.sqlfire.InstallationImages import InstallationImage
from vas.sqlfire.Installations import Installations, Installation
from vas.sqlfire.LocatorInstances import LocatorInstance
from vas.test.VasTestCase import VasTestCase
class TestInstallations(VasTestCase):
def test_list(self):
self._assert_collection(
Installations(self._client, 'https://localhost:8443/sqlfire/v1/groups/1/installations/'))
def test_create(self):
installation_image_location = 'https://localhost:8443/sqlfire/v1/installation-images/0/'
location = 'https://localhost:8443/sqlfire/v1/groups/0/installations/'
self._return_location('https://localhost:8443/sqlfire/v1/groups/1/installations/2/')
installation = Installations(self._client, location).create(
InstallationImage(self._client, installation_image_location))
self.assertIsInstance(installation, Installation)
self._assert_post(location, {'image': installation_image_location}, 'installation')
def test_detail(self):
self._assert_item(Installation(self._client, 'https://localhost:8443/sqlfire/v1/groups/1/installations/2/'), [
('version', '6.6.1'),
('group', lambda actual: self.assertIsInstance(actual, Group)),
('agent_instances', [
AgentInstance(self._client, 'https://localhost:8443/sqlfire/v1/groups/1/agent-instances/3/')
]),
('installation_image', lambda actual: self.assertIsInstance(actual, InstallationImage)),
('locator_instances', [
LocatorInstance(self._client, 'https://localhost:8443/sqlfire/v1/groups/1/locator-instances/6/'),
LocatorInstance(self._client, 'https://localhost:8443/sqlfire/v1/groups/1/locator-instances/5/')
]),
('server_instances', [
ServerInstance(self._client, 'https://localhost:8443/sqlfire/v1/groups/1/server-instances/4/')
])
])
def test_delete(self):
self._assert_deletable(
Installation(self._client, 'https://localhost:8443/sqlfire/v1/groups/1/installations/2/'))
|
# -*- coding: utf-8 -*-
"""Writer for travis.yml files."""
from __future__ import unicode_literals
import os
from l2tdevtools.dependency_writers import interface
class TravisYMLWriter(interface.DependencyFileWriter):
"""Travis.yml file writer."""
_TEMPLATE_DIRECTORY = os.path.join('data', 'templates', '.travis.yml')
PATH = '.travis.yml'
def _GenerateFromTemplate(self, template_filename, template_mappings):
"""Generates file context based on a template file.
Args:
template_filename (str): path of the template file.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
Returns:
str: output based on the template string.
Raises:
RuntimeError: if the template cannot be formatted.
"""
template_filename = os.path.join(
self._l2tdevtools_path, self._TEMPLATE_DIRECTORY, template_filename)
return super(TravisYMLWriter, self)._GenerateFromTemplate(
template_filename, template_mappings)
def Write(self):
"""Writes a .travis.yml file."""
template_mappings = {}
file_content = []
template_data = self._GenerateFromTemplate('header', template_mappings)
file_content.append(template_data)
if self._project_definition.name in ('dfvfs', 'plaso'):
template_data = self._GenerateFromTemplate('jenkins', template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate(
'allow_failures', template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate('footer', template_mappings)
file_content.append(template_data)
file_content = ''.join(file_content)
file_content = file_content.encode('utf-8')
with open(self.PATH, 'wb') as file_object:
file_object.write(file_content)
|
# -*- coding: utf-8 -*-
import codecs
import nltk
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk import pos_tag
from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
from nltk.corpus import treebank
#mytext = """ربما كانت أحد أهم التطورات التي قامت بها الرياضيات العربية التي بدأت في هذا الوقت بعمل الخوارزمي و هي بدايات الجبر، و من المهم فهم كيف كانت هذه الفكرة الجديدة مهمة، فقد كانت خطوة ثورية بعيدا عن المفهوم اليوناني للرياضيات التي هي في جوهرها هندسة، الجبر كان نظرية موحدة تتيح الأعداد الكسرية و الأعداد اللا كسرية، و قدم وسيلة للتنمية في هذا الموضوع مستقبلا. و جانب آخر مهم لإدخال أفكار الجبر و هو أنه سمح بتطبيق الرياضيات على نفسها بطريقة لم تحدث من قبل"""
mytext = """Perhaps one of the most significant advances made byArabic mathematicsbegan at this time with the work of al-Khwarizmi, namely the beginnings of algebra. It is important to understand just how significant this new idea was. It was a revolutionary move away from the Greek concept of mathematics which was essentially geometry. Algebra was a unifying theory which allowedrational numbers,irrational numbers, geometrical magnitudes, etc., to all be treated as "algebraic objects". It gave mathematics a whole new development path so much broader in concept to that which had existed before, and provided a vehicle for future development of the subject. Another important aspect of the introduction of algebraic ideas was that it allowed mathematics to be applied to itselfin a way which had not happened before."""
words = word_tokenize(mytext)
phrase = sent_tokenize(mytext)
print("\nsent_tokenize : ", phrase)
print("#" * 50)
print("\nword_tokenize : ", words)
stopWords = set(stopwords.words('english'))
wordsFiltered = []
lemmatizing = []
stemming = []
for w in words:
if w not in stopWords:
wordsFiltered.append(w)
lemmatizing.append(WordNetLemmatizer().lemmatize(w))
stemming.append(PorterStemmer().stem(w))
tagg = pos_tag(wordsFiltered)
print("#" * 50)
print(tagg)
print("#" * 50)
print(nltk.chunk.ne_chunk(tagg))
print("#" * 50)
print(lemmatizing)
print("#" * 50)
print(stemming)
print("#" * 50)
#tree_bank = treebank.parsed_sents('wsj_0001.mrg')[0]
# tree_bank.draw()
|
import logging
import time
from useful.resource.readers import ResourceURL, readers
from useful.resource.parsers import parsers
_log = logging.getLogger(__name__)
def cached_load(timeout=300):
"""
Define timeout to be used in `load()` function.
Args:
timeout (int, optional): Number of seconds to cache data without
checking if it has changed in any way. Defaults to 300.
Returns:
function: A function using timeout variable
"""
memory = {}
def load(url, mimetype=None, parser=None, hook=None):
"""
Load resource from uri or cache if already used before.
Args:
url (str): String represeting URL specified in RFC 1738.
mimetype (str, optional): Forced MIME type if not None. Defaults to
None.
parser (useful.resource.parsers.Parser, optional): A parser class
to use instead of parsers from useful.resource.parsers.parsers.
Defaults to None.
hook (callable, optional): An optional function to call after
reading and parsing the data. Defaults to None.
Raises:
ValueError: No reader supports provided url scheme
ValueError: No parser supports provided mimetype
Returns:
object: Final data after running reader, parser and hook on the
resource url
"""
hash_ = None
# get the reader from url
resource_url = ResourceURL(url, mimetype=mimetype)
try:
reader = readers[resource_url.scheme]
except KeyError:
raise ValueError(
f"Unsupported reader scheme '{resource_url.scheme}'")
reader = reader(url=resource_url)
# if url has been cached for less than `timeout` or hash sum of the
# resource is still equal, return cached value
if url in memory:
now = time.time()
if now - memory[url]['time'] < timeout:
_log.debug(
f"Url '{url}' in memory for less then {timeout} seconds",
extra={"url": url, "timeout": timeout})
return memory[url]['data']
else:
hash_ = reader.hash()
if hash_ == memory[url]['hash']:
_log.debug(
f"Url '{url}' in memory hasn't changed hash sum",
extra={"url": url, "hash": hash_})
# update object timestamp in memory
memory[url]['time'] = now
return memory[url]['data']
# if url has been cached but needs to update use cached hook as a
# hook, otherwise use function parameter hook as hook object
hook = memory.get(url, {}).get("hook", hook)
# use already calculated above hash sum or calculate hash sum if it was
# never calculated
hash_ = hash_ or reader.hash()
# use user-provided parser or get parser from mimetype
try:
parser = parser or parsers[resource_url.mimetype]
parser = parser(reader=reader)
except KeyError:
raise ValueError(
f"Unsupported parser mimetype {resource_url.mimetype}")
# parse data provided by reader
data = parser.parse()
# call hook on data
if hook is not None:
data = hook(data)
# cache results and other relevant data
memory[url] = {
'time': time.time(),
'hash': hash_,
'data': data,
'hook': hook
}
_log.debug(f"Upserting url '{url}' in memory",
extra={"url": url, "hash": hash_})
return data
return load
load = cached_load(timeout=300)
|
# coding: utf-8
"""
Workflow Execution Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import libica.openapi.libwes
from libica.openapi.libwes.api.workflow_versions_api import WorkflowVersionsApi # noqa: E501
from libica.openapi.libwes.rest import ApiException
class TestWorkflowVersionsApi(unittest.TestCase):
"""WorkflowVersionsApi unit test stubs"""
def setUp(self):
self.api = libica.openapi.libwes.api.workflow_versions_api.WorkflowVersionsApi() # noqa: E501
def tearDown(self):
pass
def test_create_workflow_version(self):
"""Test case for create_workflow_version
Create a new workflow version # noqa: E501
"""
pass
def test_get_workflow_version(self):
"""Test case for get_workflow_version
Get the details of a workflow version # noqa: E501
"""
pass
def test_launch_workflow_version(self):
"""Test case for launch_workflow_version
Launch a workflow version # noqa: E501
"""
pass
def test_list_all_workflow_versions(self):
"""Test case for list_all_workflow_versions
Get a list of all workflow versions # noqa: E501
"""
pass
def test_list_workflow_versions(self):
"""Test case for list_workflow_versions
Get a list of workflow versions # noqa: E501
"""
pass
def test_update_workflow_version(self):
"""Test case for update_workflow_version
Update an existing workflow version # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: tfutils.py
# Author: Qian Ge <geqian1001@gmail.com>
import tensorflow as tf
def sample_normal_single(mean, stddev, name=None):
return tf.random_normal(
# shape=mean.get_shape(),
shape=tf.shape(mean),
mean=mean,
stddev=stddev,
dtype=tf.float32,
seed=None,
name=name,
)
|
import sys
from importlib import reload
from django.urls import clear_url_caches
import pytest
pytestmark = pytest.mark.django_db
DOCS_URL = "/docs/"
@pytest.fixture
def all_urlconfs():
return [
"apps.core.urls",
"apps.users.urls",
"conf.urls", # The ROOT_URLCONF must be last!
]
@pytest.fixture
def reloaded_urlconfs(all_urlconfs):
def _reloaded_urlconfs():
"""
Use this to ensure all urlconfs are reloaded as needed before the test.
"""
clear_url_caches()
for urlconf in all_urlconfs:
if urlconf in sys.modules:
reload(sys.modules[urlconf])
return _reloaded_urlconfs
def test_docs_view_public_api_doc_true(client, settings, reloaded_urlconfs):
"""Test docs view when PUBLIC_API_DOCUMENTATION is True."""
settings.STATICFILES_STORAGE = (
"django.contrib.staticfiles.storage.StaticFilesStorage"
)
# added because swagger need statifiles to show web page
settings.PUBLIC_API_DOCUMENTATION = True
settings.DEBUG = False
reloaded_urlconfs()
response = client.get(DOCS_URL)
assert response.status_code == 200
def test_docs_view_debug_true(client, settings, reloaded_urlconfs):
"""Test docs view when DEBUG is True."""
settings.STATICFILES_STORAGE = (
"django.contrib.staticfiles.storage.StaticFilesStorage"
)
# added because swagger need statifiles to show web page
settings.DEBUG = True
settings.PUBLIC_API_DOCUMENTATION = False
reloaded_urlconfs()
response = client.get(DOCS_URL)
assert response.status_code == 200
def test_docs_view_env_false(client, settings, reloaded_urlconfs):
"""Test docs view when PUBLIC_API_DOCUMENTATION is False."""
settings.PUBLIC_API_DOCUMENTATION = False
settings.DEBUG = False
reloaded_urlconfs()
response = client.get(DOCS_URL)
assert response.status_code == 404
|
# -*- coding: utf-8 -*-
"""Align the Prefix Commons with the Bioregistry."""
from typing import Any, Dict, Mapping, Sequence
from bioregistry.align.utils import Aligner
from bioregistry.external.prefix_commons import get_prefix_commons
__all__ = [
"PrefixCommonsAligner",
]
class PrefixCommonsAligner(Aligner):
"""Aligner for Prefix Commons."""
key = "prefixcommons"
getter = get_prefix_commons
curation_header = ["formatter", "identifiers", "purl"]
def get_skip(self) -> Mapping[str, str]:
"""Get entries for prefix commons that should be skipped."""
return {
"fbql": "not a real resource, as far as I can tell",
}
def prepare_external(self, external_id, external_entry) -> Dict[str, Any]:
"""Prepare Prefix Commons data to be added to the Prefix Commons for each BioPortal registry entry."""
formatter = external_entry["formatter"].strip()
return {
"formatter": formatter,
"is_identifiers": formatter.startswith("http://identifiers.org"),
"is_obo": formatter.startswith("http://purl.obolibrary.org"),
}
def get_curation_row(self, external_id, external_entry) -> Sequence[str]:
"""Prepare curation rows for unaligned Prefix Commons registry entries."""
formatter = external_entry["formatter"].strip()
return [
formatter,
formatter.startswith("http://identifiers.org"),
formatter.startswith("http://purl.obolibrary.org"),
]
if __name__ == "__main__":
PrefixCommonsAligner.align()
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import time
from heatclient import client as heat_client
from heatclient import exc as heat_exc
from keystoneclient.v2_0 import client as keyclient
from neutron._i18n import _LE
from neutron._i18n import _LW
from neutron.db import model_base
from neutron import manager
from neutron.plugins.common import constants as pconst
from oslo_config import cfg
from oslo_log import helpers as log
from oslo_log import log as logging
from oslo_serialization import jsonutils
import sqlalchemy as sa
from gbpservice.neutron.services.servicechain.common import exceptions as exc
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token')
service_chain_opts = [
cfg.IntOpt('stack_delete_retries',
default=5,
help=_("Number of attempts to retry for stack deletion")),
cfg.IntOpt('stack_delete_retry_wait',
default=3,
help=_("Wait time between two successive stack delete "
"retries")),
cfg.StrOpt('heat_uri',
default='http://localhost:8004/v1',
help=_("Heat server address to create services "
"specified in the service chain.")),
cfg.StrOpt('heat_ca_certificates_file', default=None,
help=_('CA file for heatclient to verify server certificates')),
cfg.BoolOpt('heat_api_insecure', default=False,
help=_("If True, ignore any SSL validation issues")),
]
cfg.CONF.register_opts(service_chain_opts, "simplechain")
# Service chain API supported Values
sc_supported_type = [pconst.LOADBALANCER, pconst.FIREWALL]
STACK_DELETE_RETRIES = cfg.CONF.simplechain.stack_delete_retries
STACK_DELETE_RETRY_WAIT = cfg.CONF.simplechain.stack_delete_retry_wait
class ServiceChainInstanceStack(model_base.BASEV2):
"""ServiceChainInstance stacks owned by the servicechain driver."""
__tablename__ = 'sc_instance_stacks'
instance_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
stack_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
class SimpleChainDriver(object):
@log.log_method_call
def initialize(self):
pass
@log.log_method_call
def create_servicechain_node_precommit(self, context):
if context.current['service_profile_id'] is None:
if context.current['service_type'] not in sc_supported_type:
raise exc.InvalidServiceTypeForReferenceDriver()
elif context.current['service_type']:
LOG.warning(_LW('Both service_profile_id and service_type are'
'specified, service_type will be ignored.'))
@log.log_method_call
def create_servicechain_node_postcommit(self, context):
pass
@log.log_method_call
def update_servicechain_node_precommit(self, context):
if (context.original['config'] != context.current['config']):
filters = {'servicechain_spec': context.original[
'servicechain_specs']}
sc_instances = context._plugin.get_servicechain_instances(
context._plugin_context, filters)
if sc_instances:
raise exc.NodeUpdateNotSupported()
@log.log_method_call
def update_servicechain_node_postcommit(self, context):
pass
@log.log_method_call
def delete_servicechain_node_precommit(self, context):
pass
@log.log_method_call
def delete_servicechain_node_postcommit(self, context):
pass
@log.log_method_call
def create_servicechain_spec_precommit(self, context):
pass
@log.log_method_call
def create_servicechain_spec_postcommit(self, context):
pass
@log.log_method_call
def update_servicechain_spec_precommit(self, context):
pass
@log.log_method_call
def update_servicechain_spec_postcommit(self, context):
if context.original['nodes'] != context.current['nodes']:
filters = {'servicechain_spec': [context.original['id']]}
sc_instances = context._plugin.get_servicechain_instances(
context._plugin_context, filters)
for sc_instance in sc_instances:
self._update_servicechain_instance(context,
sc_instance,
context._sc_spec)
@log.log_method_call
def delete_servicechain_spec_precommit(self, context):
pass
@log.log_method_call
def delete_servicechain_spec_postcommit(self, context):
pass
@log.log_method_call
def create_servicechain_instance_precommit(self, context):
pass
@log.log_method_call
def create_servicechain_instance_postcommit(self, context):
sc_instance = context.current
sc_spec_ids = sc_instance.get('servicechain_specs')
for sc_spec_id in sc_spec_ids:
sc_spec = context._plugin.get_servicechain_spec(
context._plugin_context, sc_spec_id)
sc_node_ids = sc_spec.get('nodes')
self._create_servicechain_instance_stacks(context, sc_node_ids,
sc_instance, sc_spec)
@log.log_method_call
def update_servicechain_instance_precommit(self, context):
pass
@log.log_method_call
def update_servicechain_instance_postcommit(self, context):
original_spec_ids = context.original.get('servicechain_specs')
new_spec_ids = context.current.get('servicechain_specs')
if set(original_spec_ids) != set(new_spec_ids):
for new_spec_id in new_spec_ids:
newspec = context._plugin.get_servicechain_spec(
context._plugin_context, new_spec_id)
self._update_servicechain_instance(context, context.current,
newspec)
@log.log_method_call
def delete_servicechain_instance_precommit(self, context):
pass
@log.log_method_call
def delete_servicechain_instance_postcommit(self, context):
self._delete_servicechain_instance_stacks(context._plugin_context,
context.current['id'])
@log.log_method_call
def create_service_profile_precommit(self, context):
if context.current['service_type'] not in sc_supported_type:
raise exc.InvalidServiceTypeForReferenceDriver()
@log.log_method_call
def create_service_profile_postcommit(self, context):
pass
@log.log_method_call
def update_service_profile_precommit(self, context):
pass
@log.log_method_call
def update_service_profile_postcommit(self, context):
pass
@log.log_method_call
def delete_service_profile_precommit(self, context):
pass
@log.log_method_call
def delete_service_profile_postcommit(self, context):
pass
def _get_ptg(self, context, ptg_id):
return self._get_resource(self._grouppolicy_plugin,
context._plugin_context,
'policy_target_group',
ptg_id)
def _get_pt(self, context, pt_id):
return self._get_resource(self._grouppolicy_plugin,
context._plugin_context,
'policy_target',
pt_id)
def _get_port(self, context, port_id):
return self._get_resource(self._core_plugin,
context._plugin_context,
'port',
port_id)
def _get_ptg_subnet(self, context, ptg_id):
ptg = self._get_ptg(context, ptg_id)
return ptg.get("subnets")[0]
def _get_member_ips(self, context, ptg_id):
ptg = self._get_ptg(context, ptg_id)
pt_ids = ptg.get("policy_targets")
member_addresses = []
for pt_id in pt_ids:
pt = self._get_pt(context, pt_id)
port_id = pt.get("port_id")
port = self._get_port(context, port_id)
ipAddress = port.get('fixed_ips')[0].get("ip_address")
member_addresses.append(ipAddress)
return member_addresses
def _fetch_template_and_params(self, context, sc_instance,
sc_spec, sc_node):
stack_template = sc_node.get('config')
# TODO(magesh):Raise an exception ??
if not stack_template:
LOG.error(_LE("Service Config is not defined for the service"
" chain Node"))
return
stack_template = jsonutils.loads(stack_template)
config_param_values = sc_instance.get('config_param_values', {})
stack_params = {}
# config_param_values has the parameters for all Nodes. Only apply
# the ones relevant for this Node
if config_param_values:
config_param_values = jsonutils.loads(config_param_values)
config_param_names = sc_spec.get('config_param_names', [])
if config_param_names:
config_param_names = ast.literal_eval(config_param_names)
# This service chain driver knows how to fill in two parameter values
# for the template at present.
# 1)Subnet -> Provider PTG subnet is used
# 2)PoolMemberIPs -> List of IP Addresses of all PTs in Provider PTG
# TODO(magesh):Process on the basis of ResourceType rather than Name
# eg: Type: OS::Neutron::PoolMember
# Variable number of pool members is not handled yet. We may have to
# dynamically modify the template json to achieve that
member_ips = []
provider_ptg_id = sc_instance.get("provider_ptg_id")
# If we have the key "PoolMemberIP*" in template input parameters,
# fetch the list of IPs of all PTs in the PTG
for key in config_param_names or []:
if "PoolMemberIP" in key:
member_ips = self._get_member_ips(context, provider_ptg_id)
break
member_count = 0
for key in config_param_names or []:
if "PoolMemberIP" in key:
value = (member_ips[member_count]
if len(member_ips) > member_count else '0')
member_count = member_count + 1
config_param_values[key] = value
elif key == "Subnet":
value = self._get_ptg_subnet(context, provider_ptg_id)
config_param_values[key] = value
node_params = (stack_template.get('Parameters')
or stack_template.get('parameters'))
if node_params:
for parameter in config_param_values.keys():
if parameter in node_params.keys():
stack_params[parameter] = config_param_values[parameter]
return (stack_template, stack_params)
def _create_servicechain_instance_stacks(self, context, sc_node_ids,
sc_instance, sc_spec):
heatclient = HeatClient(context._plugin_context)
for sc_node_id in sc_node_ids:
sc_node = context._plugin.get_servicechain_node(
context._plugin_context, sc_node_id)
stack_template, stack_params = self._fetch_template_and_params(
context, sc_instance, sc_spec, sc_node)
stack_name = ("stack_" + sc_instance['name'] + sc_node['name'] +
sc_instance['id'][:8])
stack = heatclient.create(
stack_name.replace(" ", ""), stack_template, stack_params)
self._insert_chain_stack_db(
context._plugin_context.session, sc_instance['id'],
stack['stack']['id'])
def _delete_servicechain_instance_stacks(self, context, instance_id):
stack_ids = self._get_chain_stacks(context.session, instance_id)
heatclient = HeatClient(context)
for stack in stack_ids:
heatclient.delete(stack.stack_id)
for stack in stack_ids:
self._wait_for_stack_delete(heatclient, stack.stack_id)
self._delete_chain_stacks_db(context.session, instance_id)
# Wait for the heat stack to be deleted for a maximum of 15 seconds
# we check the status every 3 seconds and call sleep again
# This is required because cleanup of subnet fails when the stack created
# some ports on the subnet and the resource delete is not completed by
# the time subnet delete is triggered by Resource Mapping driver
def _wait_for_stack_delete(self, heatclient, stack_id):
stack_delete_retries = STACK_DELETE_RETRIES
while True:
try:
stack = heatclient.get(stack_id)
if stack.stack_status == 'DELETE_COMPLETE':
return
elif stack.stack_status == 'DELETE_FAILED':
heatclient.delete(stack_id)
except Exception:
LOG.exception(_LE(
"Service Chain Instance cleanup may not have "
"happened because Heat API request failed "
"while waiting for the stack %(stack)s to be "
"deleted"), {'stack': stack_id})
return
else:
time.sleep(STACK_DELETE_RETRY_WAIT)
stack_delete_retries = stack_delete_retries - 1
if stack_delete_retries == 0:
LOG.warning(_LW(
"Resource cleanup for service chain instance"
" is not completed within %(wait)s seconds"
" as deletion of Stack %(stack)s is not"
" completed"),
{'wait': (STACK_DELETE_RETRIES *
STACK_DELETE_RETRY_WAIT),
'stack': stack_id})
return
else:
continue
def _get_instance_by_spec_id(self, context, spec_id):
filters = {'servicechain_spec': [spec_id]}
return context._plugin.get_servicechain_instances(
context._plugin_context, filters)
def _update_servicechain_instance(self, context, sc_instance, newspec):
self._delete_servicechain_instance_stacks(context._plugin_context,
sc_instance['id'])
sc_node_ids = newspec.get('nodes')
self._create_servicechain_instance_stacks(context,
sc_node_ids,
sc_instance,
newspec)
def _delete_chain_stacks_db(self, session, sc_instance_id):
with session.begin(subtransactions=True):
stacks = session.query(ServiceChainInstanceStack
).filter_by(instance_id=sc_instance_id
).all()
for stack in stacks:
session.delete(stack)
def _insert_chain_stack_db(self, session, sc_instance_id, stack_id):
with session.begin(subtransactions=True):
chainstack = ServiceChainInstanceStack(
instance_id=sc_instance_id,
stack_id=stack_id)
session.add(chainstack)
def _get_chain_stacks(self, session, sc_instance_id):
with session.begin(subtransactions=True):
stack_ids = session.query(ServiceChainInstanceStack.stack_id
).filter_by(instance_id=sc_instance_id
).all()
return stack_ids
def _get_resource(self, plugin, context, resource, resource_id):
obj_getter = getattr(plugin, 'get_' + resource)
obj = obj_getter(context, resource_id)
return obj
@property
def _core_plugin(self):
# REVISIT(Magesh): Need initialization method after all
# plugins are loaded to grab and store plugin.
return manager.NeutronManager.get_plugin()
@property
def _grouppolicy_plugin(self):
# REVISIT(Magesh): Need initialization method after all
# plugins are loaded to grab and store plugin.
plugins = manager.NeutronManager.get_service_plugins()
grouppolicy_plugin = plugins.get(pconst.GROUP_POLICY)
if not grouppolicy_plugin:
LOG.error(_LE("No Grouppolicy service plugin found."))
raise exc.ServiceChainDeploymentError()
return grouppolicy_plugin
class HeatClient(object):
def __init__(self, context, password=None):
api_version = "1"
self.tenant = context.tenant
self._keystone = None
endpoint = "%s/%s" % (cfg.CONF.simplechain.heat_uri, self.tenant)
kwargs = {
'token': self._get_auth_token(self.tenant),
'username': context.user_name,
'password': password,
'cacert': cfg.CONF.simplechain.heat_ca_certificates_file,
'insecure': cfg.CONF.simplechain.heat_api_insecure
}
self.client = heat_client.Client(api_version, endpoint, **kwargs)
self.stacks = self.client.stacks
def create(self, name, data, parameters=None):
fields = {
'stack_name': name,
'timeout_mins': 10,
'disable_rollback': True,
'password': data.get('password')
}
fields['template'] = data
fields['parameters'] = parameters
return self.stacks.create(**fields)
def delete(self, stack_id):
try:
self.stacks.delete(stack_id)
except heat_exc.HTTPNotFound:
LOG.warning(_LW(
"Stack %(stack)s created by service chain driver is "
"not found at cleanup"), {'stack': stack_id})
def get(self, stack_id):
return self.stacks.get(stack_id)
@property
def keystone(self):
if not self._keystone:
keystone_conf = cfg.CONF.keystone_authtoken
if keystone_conf.get('auth_uri'):
auth_url = keystone_conf.auth_uri
else:
auth_url = ('%s://%s:%s/v2.0/' % (
keystone_conf.auth_protocol,
keystone_conf.auth_host,
keystone_conf.auth_port))
user = (keystone_conf.get('admin_user') or keystone_conf.username)
pw = (keystone_conf.get('admin_password') or
keystone_conf.password)
self._keystone = keyclient.Client(
username=user, password=pw, auth_url=auth_url,
tenant_id=self.tenant)
return self._keystone
def _get_auth_token(self, tenant):
return self.keystone.get_token(tenant)
|
import json
import time
from fidesops.core.config import load_toml
from fidesops.models.connectionconfig import (
AccessLevel,
ConnectionConfig,
ConnectionType,
)
from fidesops.models.datasetconfig import DatasetConfig
import pytest
import pydash
import os
from typing import Any, Dict, Generator
from tests.fixtures.application_fixtures import load_dataset
from tests.fixtures.saas_example_fixtures import load_config
from sqlalchemy.orm import Session
from fidesops.schemas.saas.shared_schemas import SaaSRequestParams, HTTPMethod
from fidesops.service.connectors import SaaSConnector
from fidesops.util import cryptographic_util
from fidesops.util.saas_util import format_body
saas_config = load_toml("saas_config.toml")
HUBSPOT_FIRSTNAME = "SomeoneFirstname"
@pytest.fixture(scope="function")
def hubspot_secrets():
return {
"domain": pydash.get(saas_config, "hubspot.domain")
or os.environ.get("HUBSPOT_DOMAIN"),
"hapikey": pydash.get(saas_config, "hubspot.hapikey")
or os.environ.get("HUBSPOT_HAPIKEY"),
}
@pytest.fixture(scope="function")
def hubspot_identity_email():
return pydash.get(saas_config, "hubspot.identity_email") or os.environ.get(
"HUBSPOT_IDENTITY_EMAIL"
)
@pytest.fixture(scope="session")
def hubspot_erasure_identity_email():
return f"{cryptographic_util.generate_secure_random_string(13)}@email.com"
@pytest.fixture
def hubspot_config() -> Dict[str, Any]:
return load_config("data/saas/config/hubspot_config.yml")
@pytest.fixture
def hubspot_dataset() -> Dict[str, Any]:
return load_dataset("data/saas/dataset/hubspot_dataset.yml")[0]
@pytest.fixture(scope="function")
def connection_config_hubspot(
db: Session, hubspot_config, hubspot_secrets,
) -> Generator:
fides_key = hubspot_config["fides_key"]
connection_config = ConnectionConfig.create(
db=db,
data={
"key": fides_key,
"name": fides_key,
"connection_type": ConnectionType.saas,
"access": AccessLevel.write,
"secrets": hubspot_secrets,
"saas_config": hubspot_config,
},
)
yield connection_config
connection_config.delete(db)
@pytest.fixture
def dataset_config_hubspot(
db: Session,
connection_config_hubspot: ConnectionConfig,
hubspot_dataset,
hubspot_config,
) -> Generator:
fides_key = hubspot_config["fides_key"]
connection_config_hubspot.name = fides_key
connection_config_hubspot.key = fides_key
connection_config_hubspot.save(db=db)
dataset = DatasetConfig.create(
db=db,
data={
"connection_config_id": connection_config_hubspot.id,
"fides_key": fides_key,
"dataset": hubspot_dataset,
},
)
yield dataset
dataset.delete(db=db)
@pytest.fixture(scope="function")
def hubspot_erasure_data(connection_config_hubspot, hubspot_erasure_identity_email) -> Generator:
"""
Gets the current value of the resource and restores it after the test is complete.
Used for erasure tests.
"""
connector = SaaSConnector(connection_config_hubspot)
body = json.dumps({
"properties": {
"company": "test company",
"email": hubspot_erasure_identity_email,
"firstname": HUBSPOT_FIRSTNAME,
"lastname": "SomeoneLastname",
"phone": "(123) 123-1234",
"website": "someone.net"
}
})
updated_headers, formatted_body = format_body({}, body)
# create contact
contacts_request: SaaSRequestParams = SaaSRequestParams(
method=HTTPMethod.POST,
path=f"/crm/v3/objects/contacts",
headers=updated_headers,
body=formatted_body,
)
contacts_response = connector.create_client().send(contacts_request)
contacts_body = contacts_response.json()
contact_id = contacts_body["id"]
# no need to subscribe contact, since creating a contact auto-subscribes them
# Allows contact to be propagated in Hubspot before calling access / erasure requests
remaining_tries = 5
while _contact_exists(hubspot_erasure_identity_email, connector) is False:
if remaining_tries < 1:
raise Exception(f"Contact with contact id {contact_id} could not be added to Hubspot")
time.sleep(5)
yield contact_id
# delete contact
delete_request: SaaSRequestParams = SaaSRequestParams(
method=HTTPMethod.DELETE,
path=f"/crm/v3/objects/contacts/{contact_id}",
)
connector.create_client().send(delete_request)
# verify contact is deleted
remaining_tries = 5
while _contact_exists(hubspot_erasure_identity_email, connector) is True:
if remaining_tries < 1:
raise Exception(f"Contact with contact id {contact_id} could not be deleted from Hubspot")
time.sleep(5) # Ensures contact is deleted
def _contact_exists(hubspot_erasure_identity_email: str, connector: SaaSConnector) -> bool:
"""
Confirm whether contact exists by calling search api and comparing firstname str.
"""
body = json.dumps({
"filterGroups": [{
"filters": [{
"value": hubspot_erasure_identity_email,
"propertyName": "email",
"operator": "EQ"
}]
}]
})
updated_headers, formatted_body = format_body({}, body)
contact_request: SaaSRequestParams = SaaSRequestParams(
method=HTTPMethod.POST,
path="/crm/v3/objects/contacts/search",
headers=updated_headers,
body=formatted_body,
)
contact_response = connector.create_client().send(contact_request)
contact_body = contact_response.json()
return bool(contact_body["results"] and contact_body["results"][0]["properties"]["firstname"] == HUBSPOT_FIRSTNAME)
|
#!/usr/bin/python3
# coding : utf-8
def letter_position(letter):
# Lowercase
if letter >= 97 and letter <= 122:
return letter - 97
# Uppercase
elif letter >= 65 and letter <= 90:
return letter - 65
else:
return 0
def letter_case(letter):
# Lowercase
if letter >= 97 and letter <= 122:
return 'lowercase'
# Uppercase
elif letter >= 65 and letter <= 90:
return 'uppercase'
# Other
else:
return 'other'
|
import os
from datetime import datetime
from course_lib.Base.Evaluation.Evaluator import EvaluatorHoldout
from scripts.model_selection.cross_validate_utils import write_results_on_file, get_seed_list
from scripts.scripts_utils import read_split_load_data
from src.data_management.data_reader import get_ICM_train_new, get_ignore_users
from src.model import new_best_models
from src.model.KNN.ItemKNNCBFCFRecommender import ItemKNNCBFCFRecommender
from src.model_management.CrossEvaluator import EvaluatorCrossValidationKeepKOut
from src.utils.general_utility_functions import get_project_root_path, get_seed_lists, get_split_seed
# CONSTANTS TO MODIFY
K_OUT = 1
CUTOFF = 10
ALLOW_COLD_USERS = False
LOWER_THRESHOLD = -1 # Remove users below or equal this threshold (default value: -1)
UPPER_THRESHOLD = 22 # Remove users above or equal this threshold (default value: 2**16-1)
IGNORE_NON_TARGET_USERS = True
# VARIABLES TO MODIFY
model_name = "item_cbf_cf"
recommender_class = ItemKNNCBFCFRecommender
model_parameters = new_best_models.ItemCBF_CF.get_best_parameters()
if __name__ == '__main__':
# Set seed in order to have same splitting of data
seed_list = get_seed_list()
num_folds = len(seed_list)
URM_train_list = []
evaluator_list = []
ICM_train_list = []
for i in range(num_folds):
data_reader = read_split_load_data(K_OUT, ALLOW_COLD_USERS, seed_list[i])
URM_train, URM_test = data_reader.get_holdout_split()
ICM_all, _ = get_ICM_train_new(data_reader)
ignore_users = get_ignore_users(URM_train, data_reader.get_original_user_id_to_index_mapper(),
lower_threshold=LOWER_THRESHOLD, upper_threshold=UPPER_THRESHOLD,
ignore_non_target_users=IGNORE_NON_TARGET_USERS)
evaluator = EvaluatorHoldout(URM_test, cutoff_list=[CUTOFF], ignore_users=ignore_users)
URM_train_list.append(URM_train)
evaluator_list.append(evaluator)
ICM_train_list.append(ICM_all)
# Setting evaluator
evaluator = EvaluatorCrossValidationKeepKOut(URM_train_list, evaluator_list, cutoff=CUTOFF)
results = evaluator.crossevaluateContentRecommender(recommender_class, model_parameters, ICM_train_list)
# Writing on file cross validation results
date_string = datetime.now().strftime('%b%d_%H-%M-%S')
cross_valid_path = os.path.join(get_project_root_path(), "report/cross_validation/")
file_path = os.path.join(cross_valid_path, "cross_valid_{}_{}.txt".format(model_name, date_string))
write_results_on_file(file_path, recommender_class.RECOMMENDER_NAME, model_parameters, num_folds, seed_list,
results)
|
# Test akara.dist.setup()
import sys
import os
import tempfile
import subprocess
import shutil
from akara import dist
class SetupException(Exception):
pass
# Do a bit of extra work since nosetests might run in the top-level
# Akara directory or in test/ .
dirname = os.path.dirname(__file__)
setup_scripts_dir = os.path.join(dirname, "setup_scripts")
assert os.path.isdir(setup_scripts_dir), setup_scripts_dir
def call_setup(args):
p = subprocess.Popen([sys.executable] + args,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
cwd=setup_scripts_dir)
stdout = p.stdout.read()
p.wait()
if p.returncode != 0:
raise SetupException("setup.py failure %d: %s" % (p.returncode, stdout,))
# print here to help in case of failures;
# nosetests prints captured stdout
print stdout
def test_basic():
dirname = tempfile.mkdtemp(prefix="akara_setup_test_")
try:
call_setup(["setup_basic.py", "install",
"--root", dirname,
"--akara-modules-dir", dirname])
assert os.path.exists(os.path.join(dirname, "blah.py"))
finally:
shutil.rmtree(dirname)
def test_missing():
try:
call_setup(["setup_missing.py", "install",
"--root", "/../this/does/not/exist",
"--akara-modules-dir", "/../this/does/not/exist"])
raise AssertionError
except SetupException, err:
s = str(err)
assert "you need to include the 'akara_extensions' parameter" in s, s
def test_bad_ext():
try:
call_setup(["setup_bad_ext.py", "install",
"--root", "/../this/does/not/exist",
"--akara-modules-dir", "/../this/does/not/exist"])
raise AssertionError
except SetupException, err:
s = str(err)
assert "Akara extensions must end with '.py'" in s, s
def test_specifying_config():
dirname = tempfile.mkdtemp(prefix="akara_setup_test_")
config_filename = os.path.join(dirname, "akara.conf")
try:
f = open(config_filename, "w")
f.write("class Akara: ConfigRoot = %r + '/blather'\n" % dirname)
f.close()
call_setup(["setup_basic.py", "install",
"--root", dirname,
"--akara-config", config_filename])
assert os.path.exists(os.path.join(dirname, "blather", "modules", "blah.py"))
finally:
shutil.rmtree(dirname)
# dirname has priority
def test_specifying_config_and_dir():
dirname = tempfile.mkdtemp(prefix="akara_setup_test_")
try:
try:
call_setup(["setup_basic.py", "install",
"--root", dirname,
"--akara-config", "setup_akara.conf",
"--akara-modules-dir", dirname])
assert os.path.exists(os.path.join(dirname, "blah.py"))
except SetupException, err:
s = str(err)
assert "flapdoodle" in s, s
finally:
shutil.rmtree(dirname)
|
from sklearn.model_selection import GridSearchCV
class HypoSearcher(object):
def __init__(self, clf):
self.clf = clf
def optimize(self, x, y, params):
clf_search = GridSearchCV(self.clf, params, verbose=100).fit(x, y)
clf = clf_search.best_estimator_
return clf
|
"""
This file is part of the L3Morpho package.
L3Morpho is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
L3Morpho is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with L3Morpho. If not, see <http://www.gnu.org/licenses/>.
-------------------------------------------------------
Cascades of weighted finite state transducers.
-- 2011-05-20
Split off from fst.py.
"""
import re, os, time, functools
from .semiring import *
from .fst import FST
######################################################################
# CONTENTS
######################################################################
# 1. Finite State Transducer Cascade
# 2. Alternation Rules
# 3. Morphotactics
######################################################################
######################################################################
# Constants
######################################################################
FST_DIRECTORY = os.path.join(os.path.dirname(__file__),
os.path.pardir,
'FST')
## Regexs for parsing cascades
# string_set_label={chars1, chars1, chars2, ...}
SS_RE = re.compile('(\S+)\s*=\s*\{(.*)\}', re.U)
# weighting = UNIFICATION
WEIGHTING_RE = re.compile('weighting\s*=\s*(.*)')
# >fst<
CASC_FST_RE = re.compile(r'>(.*?)<')
# cascade name = {0, 1, 3, ...}
SUBCASC_RE = re.compile('cascade\s*(\S+)\s*=\s*\{(.*)\}')
# +lex+
CASC_LEX_RE = re.compile(r'\+(.*?)\+')
######################################################################
# Finite State Transducer Cascade
######################################################################
class FSTCascade(list):
"""
A list of FSTs to be composed.
"""
def __init__(self, label, *fsts):
list.__init__(self, fsts)
self.label = label
# String sets, abbreviated in cascade file
self._stringsets = {}
# Semiring weighting for all FSTs; defaults to FSS with unification
self._weighting = UNIFICATION_SR
# Composition of FSTs
self._composition = None
# All FSTs, including those not in the composition
self._fsts = {}
# Language this cascade belongs to
self.language = None
# Initial weight to use during transduction
self.init_weight = None
# Dictionary of lists of FST indices, for particular purposes
self._cascades = {}
def __str__(self):
"""Print name for cascade."""
return 'FST cascade ' + self.label
def add(self, fst):
"""Add an FST to the dictionary with its label as key."""
self._fsts[fst.label] = fst
def inverted(self):
"""Return a list of inverted FSTs in the cascade."""
fsts = [(fst.inverted() if isinstance(fst, FST) else fst) for fst in self]
inv = FSTCascade(self.label + '_inv', *fsts)
inv.init_weight = self.init_weight
inv._weighting = self._weighting
inv._stringsets = self._stringsets
return inv
def compose(self, begin=0, end=None, first=None, last=None, subcasc=None, backwards=False,
relabel=True, trace=0):
"""Compose the FSTs that make up the cascade list or a sublist, including possible first and last FSTs."""
if len(self) == 1:
return self[0]
else:
fsts = []
if subcasc:
if subcasc not in self._cascades:
raise ValueError("%r is not a valid subscascade label" % subcasc)
fsts = [self[i] for i in self._cascades[subcasc]]
else:
fsts = self[begin:(end if end != None else len(self))] # end could be 0
if first:
fsts = [first] + fsts
if last:
fsts.append(last)
return FST.compose(fsts, self.label + '@', relabel=relabel, trace=trace)
def mult_compose(self, ends):
begin = 0
fsts = []
for end in ends:
fsts.append(self.compose(begin, end))
begin = end
fsts.append(self.compose(begin, len(self)))
return fsts
def rev_compose(self, split_index, begin=0, trace=0):
"""Compose the FSTs in the cascade in two steps."""
# Compose from split_index to end
c1 = self.compose(begin=split_index, trace=trace)
# Compose from beginning to split_index
return self.compose(begin=begin, end=split_index, last=c1, trace=trace)
def compose_backwards(self, indices=[], subcasc=None, trace=0):
if not indices:
if subcasc:
# Use a copy of the cascade indices because we're going to reverse them
indices = list(self._cascades[subcasc])
else:
indices = range(len(self))
indices.reverse()
c = FST.compose([self[indices[1]], self[indices[0]]], trace=trace)
for n in indices[2:]:
c = FST.compose([self[n], c], trace=trace)
return c
def composition(self, begin=0, end=None):
"""The composed FSTs."""
if not self._composition:
self._composition = self.compose(begin=begin, end=end or len(self))
return self._composition
def transduce(self, inp_string, inp_weight, fsts, seg_units=[]):
result = [[inp_string, inp_weight]]
for fst in fsts:
print(fst.label)
result = reduce_lists([fst.transduce(x[0], x[1], seg_units=seg_units) for x in result])
if not result:
return False
return result
def stringset(self, label):
"""A labeled set of strings."""
return self._stringsets.get(label, None)
def stringset_label(self, stringset):
"""The label for a stringset if it's in the dict."""
for label, sset in self._stringsets.items():
if stringset == sset:
return label
def stringset_intersection(self, ss_label1=None, ss_label2=None, ss1=None, ss2=None):
"""Label for the intersection of two stringsets or element if only one.
Either the labels or the stringsets or both are provided."""
ss1 = ss1 or self.stringset(ss_label1)
ss2 = ss2 or self.stringset(ss_label2)
ss_label1 = ss_label1 or self.stringset_label(ss1)
ss_label2 = ss_label2 or self.stringset_label(ss2)
if ss1 and ss2:
intersect = ss1 & ss2
if intersect: # could be empty
if len(intersect) == 1:
# If there's only one element, don't create a new stringset
return list(intersect)[0]
# Otherwise create a new stringset
i_label = self.stringset_label(intersect)
if i_label:
# The stringset intersection is already in the dict
return i_label
# The stringset intersection is not in the dict
# Add it and return its label
new_label = FSTCascade.simplify_intersection_label(ss_label1, ss_label2)
return new_label
@staticmethod
def simplify_intersection_label(label1, label2):
"""Simplify an intersection label by eliminating common elements."""
if not '&' in label1 and not '&' in label2:
# the two expressions between with the same stringset
return FSTCascade.simplify_difference_intersection_labels(label1, label2)
else:
return '&'.join(set(label1.split('&')) | set(label2.split('&')))
@staticmethod
def simplify_difference_intersection_labels(label1, label2):
"""Simplify an intersection of differences if first elements are the same."""
labels1 = label1.split('-')
labels2 = label2.split('-')
if labels1[0] == labels2[0]:
set1 = set(labels1[1].split(',')) if len(labels1) > 1 else set()
set2 = set(labels2[1].split(',')) if len(labels2) > 1 else set()
subtracted = set1 | set2
return labels1[0] + '-' + ','.join(subtracted)
else:
return label1 + '&' + label2
def generate_stringset(self, label):
"""Make a stringset from a label.
L: stored stringset
L1-L2: difference of two stored stringsets
L1-abc: difference of stringset L1 and the set of characters {abc}
L1&L2: intersection of two stringsets (stored or generated)
"""
ss = self.stringset(label)
if ss:
return ss
if '-' in label or '&' in label:
return self.intersect_stringsets(label.split('&'))
def subtract_stringsets(self, label1, label2):
"""Difference between stringsets with labels or sets of characters."""
ss1 = self.stringset(label1)
if not ss1:
ss1 = set([label1])
ss2 = self.stringset(label2)
if not ss2:
ss2 = set([label2]) # set consisting of single phoneme/grapheme
return ss1 - ss2
def intersect_stringsets(self, labels):
"""Intersection of stringsets with given labels."""
return functools.reduce(lambda x, y: x.intersection(y), [self.diff_stringset(label) for label in labels])
def diff_stringset(self, label):
"""label is either a stored stringset or a stringset difference expression."""
ss = self.stringset(label)
if ss:
return ss
labels = label.split("-")
# Assume there's only one -
return self.subtract_strings(labels[0], labels[1])
def subtract_strings(self, label1, label2):
"""Difference between stringsets with labels or sets of characters."""
ss1 = self.stringset(label1)
if not ss1:
ss1 = set(label1.split(','))
ss2 = self.stringset(label2)
if not ss2:
ss2 = set(label2.split(','))
return ss1 - ss2
def add_stringset(self, label, seq):
"""Add a labeled set of strings, updating sigma accordingly."""
self._stringsets[label] = frozenset(seq)
def weighting(self):
"""The weighting semiring for the cascade."""
return self._weighting
def set_weighting(self, label):
"""Set the weighting for the cascade."""
label = label.lower()
if 'uni' in label:
self._weighting = UNIFICATION_SR
elif 'prob' in label:
self._weighting = PROBABILITY_SR
elif 'trop' in label:
self._weighting = TROPICAL_SR
def get(self, label):
"""The FST with the given label."""
return self._fsts.get(label)
def set_init_weight(self, fs):
self.init_weight = FSSet(fs)
@staticmethod
def load(filename, seg_units=[], create_networks=True, subcasc=None, language=None,
weight_constraint=None, verbose=True):
"""
Load an FST cascade from a file.
If not create_networks, only create the weighting and string sets.
"""
if verbose:
print('Loading FST cascade from', filename)
directory, fil = os.path.split(filename)
label = del_suffix(fil, '.')
return FSTCascade.parse(label, open(filename, encoding='utf-8').read(), directory=directory,
subcasc=subcasc, create_networks=create_networks, seg_units=seg_units,
language=language, weight_constraint=weight_constraint, verbose=verbose)
@staticmethod
def parse(label, s, directory='', create_networks=True, seg_units=[], subcasc=None, language=None,
weight_constraint=None, verbose=False):
"""
Parse an FST cascade from the contents of a file as a string.
If not create_networks, only create the weighting and string sets.
"""
cascade = FSTCascade(label)
cascade.language = language
cascade.seg_units = seg_units
lines = s.split('\n')[::-1]
subcasc_indices = []
while lines:
line = lines.pop().split('#')[0].strip() # strip comments
if not line: continue
# Weighting for all FSTs
m = WEIGHTING_RE.match(line)
if m:
cascade.set_weighting(m.group(1))
continue
# Subcascade, specifying indices
# label = {i, j, ...}
m = SUBCASC_RE.match(line)
if m:
label, indices = m.groups()
indices = [int(i.strip()) for i in indices.split(',')]
cascade._cascades[label] = indices
# If we're only loading a certain subcascade and this is it, save its indices
if label == subcasc:
subcasc_indices = indices
continue
# String set (a list, converted to a frozenset)
m = SS_RE.match(line)
if m:
label, strings = m.groups()
# Characters may contain unicode
# strings = strings.decode('utf8')
cascade.add_stringset(label, [s.strip() for s in strings.split(',')])
continue
# FST
m = CASC_FST_RE.match(line)
if m:
if create_networks:
label = m.group(1)
filename = label + '.fst'
if not subcasc_indices or len(cascade) in subcasc_indices:
fst = FST.load(os.path.join(directory, filename),
cascade=cascade, weighting=cascade.weighting(),
seg_units=seg_units, weight_constraint=weight_constraint,
verbose=verbose)
else:
fst = 'FST' + str(len(cascade))
if verbose:
print('Skipping FST', label)
cascade.append(fst)
continue
# FST in a lex file
m = CASC_LEX_RE.match(line)
if m:
if create_networks:
label = m.group(1)
# handle specs
filename = label + '.lex'
if not subcasc_indices or len(cascade) in subcasc_indices:
fst = FST.load(os.path.join(directory, filename),
cascade=cascade, weighting=cascade.weighting(),
seg_units=seg_units, weight_constraint=weight_constraint,
verbose=verbose, lex_features=True)
if verbose:
print('Adding lex FST', label, 'to cascade')
else:
fst = 'FST' + str(len(cascade))
if verbose:
print('Skipping lex FST', label)
cascade.append(fst)
continue
raise ValueError("bad line: %r" % line)
return cascade
|
import gdb
class Register :
"""
Helper to access register bitfield
>>> reg = Register(0x80000000000812d0)
>>> hex(reg.get_bitfield(60, 4))
'0x8'
>>> hex(reg.get_bitfield(0, 44))
'0x812d0'
>>> reg.set_bitfield(60, 4, 9)
>>> hex(reg.value)
'0x90000000000812d0'
>>> reg.set_bitfield(0, 44, 0x1A15d6)
>>> hex(reg.value)
'0x90000000001a15d6'
"""
def __init__(self, value):
self.value = int(value) & 0xffffffffffffffff
def get_bitfield(self, bitIdx, size):
"""get bitfield value of a register
Args:
bitIdx ([integer]): least signifiant bit of the bitfield to get in
register
size ([integer]): size of the bitfield to get
Returns:
[integer]: the data field that start at bit 'bitIdx' index of length
'size'
"""
mask = (1 << size) - 1
val = (self.value & (mask << bitIdx)) >> bitIdx
return (val)
def set_bitfield(self, bitIdx, size, value):
"""get bitfield value of a register
Args:
bitIdx ([integer]): least signifiant bit of the bitfield to set in
register
size ([integer]): [description]
value ([integer]): [description]
"""
mask = (1 << size) - 1
self.value = self.value & ~(mask << bitIdx)
bitfield = (value & mask) << bitIdx
self.value = self.value | bitfield
class VAddrTranslate(gdb.Command):
"""Print translated address form virtual memory"""
PPNSIZE = 44
def _read_phy_memory(self, address, length):
current_inferior = gdb.selected_inferior ()
satp = Register(gdb.parse_and_eval("$satp"))
previous_mode = satp.get_bitfield(60, 4)
satp.set_bitfield(60, 4, 0)
gdb.execute("set $satp =" + hex(satp.value))
try:
value = current_inferior.read_memory(address, length)
finally:
satp.set_bitfield(60, 4, previous_mode)
gdb.execute("set $satp =" + hex(satp.value))
return(value)
def _translate_SVXX(self, vAddr, PTESize, levels,
pageSize, VPNSize, VASize, ppnLengths):
# valid for RV64
satp = Register(gdb.parse_and_eval("$satp"))
ppn = satp.get_bitfield(0, self.PPNSIZE)
asid = satp.get_bitfield(self.PPNSIZE, 16)
mode = satp.get_bitfield(60, 4)
if mode == 0 :
print("No translation or protection")
return
vpn = []
vAddr = Register(vAddr)
for i in range(levels):
vpn.append(vAddr.get_bitfield(12 + i * VPNSize, VPNSize))
#step 1
i = levels - 1
a = ppn * pageSize
while (i >= 0) :
# step 2
pte_val = self._read_phy_memory(a + vpn[i] * PTESize, 8)
pte_val = int.from_bytes(pte_val, "little", signed=False)
pte = Register(pte_val)
# step 3
pte_v = pte.get_bitfield(0,1)
pte_r = pte.get_bitfield(1,1)
pte_w = pte.get_bitfield(2,1)
pte_x = pte.get_bitfield(3,1)
if (pte_v == 0) or (pte_r == 0 and pte_w == 1) :
print("error page-fault should have been raised\n")
return None
# step 4
if (pte_r == 1) or (pte_x == 1) :
break
else :
i = i - 1
if i < 0 :
print("error page-fault should have been raised\n")
return
else :
a = pte.get_bitfield(10,44) * pageSize
# step 5 : print access rights
print("access rights: " + "r:" + str(pte_r) +
" w:" + str(pte_w) + " x:" + str(pte_x))
# step 6 check missaligned
if (i > 0) and pte.get_bitfield(10, i * 9) != 0:
print("error page-fault should be raised: misaligned superpage\n")
return
# cannot do step 7 checks
pAddr = Register(0)
# add offset
pAddr.set_bitfield(0,12, vAddr.get_bitfield(0,12))
if i > 0 :
va_vpn = vAddr.get_bitfield(12, 9 * i)
pAddr.set_bitfield(12, 9 * i, va_vpn)
pAddr.set_bitfield(12 + sum(ppnLengths[0:i]),
sum(ppnLengths[i:levels]),
pte.get_bitfield(10 + sum(ppnLengths[0:i]),
sum(ppnLengths[i:levels])))
print("pAddr: " + hex(pAddr.value))
class SV39AddrTranslate(VAddrTranslate):
"""Print translated address form virtual memory"""
def __init__(self):
super(SV39AddrTranslate, self).__init__(
"sv39translate",
gdb.COMMAND_USER
)
def _translate_SV39(self, vAddr):
PTESIZE = 8
LEVELS = 3
PAGESIZE = 4096
VPNSIZE = 9
VASIZE = 39
PPNLENGTHS = [9,9,26]
self._translate_SVXX(vAddr, PTESIZE, LEVELS,
PAGESIZE, VPNSIZE, VASIZE, PPNLENGTHS)
def invoke(self, args, from_tty):
addr = int(gdb.parse_and_eval(args))
self._translate_SV39(addr)
class SV48AddrTranslate(VAddrTranslate):
"""Print translated address form virtual memory"""
def __init__(self):
super(SV48AddrTranslate, self).__init__(
"sv48translate",
gdb.COMMAND_USER
)
def _translate_SV48(self, vAddr):
PTESIZE = 8
LEVELS = 4
PAGESIZE = 4096
VPNSIZE = 9
VASIZE = 48
PPNLENGTHS = [9,9,9,17]
self._translate_SVXX(vAddr, PTESIZE, LEVELS,
PAGESIZE, VPNSIZE, VASIZE, PPNLENGTHS)
def invoke(self, args, from_tty):
addr = int(gdb.parse_and_eval(args))
self._translate_SV48(addr)
SV39AddrTranslate()
SV48AddrTranslate()
|
import sys
with open(sys.argv[1], "r") as a_file:
for line in a_file:
print(line.strip()
.replace(u'\u00AD', ' '))
|
from django import forms
class DocumentForm(forms.Form):
docfile = forms.FileField(label='Select a file')
def clean_file(self):
docfile = self.cleaned_data['docfile']
ext = docfile.name.split('.')[-1].lower()
if ext not in ["pdf"]:
raise forms.ValidationError("Only pdf and pdf files are allowed.")
return docfile
|
# Generated by Django 2.1.7 on 2019-03-11 02:00
import django.utils.datetime_safe
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("db_data", "0012_auto_20190210_1637")]
operations = [
migrations.AddField(
model_name="officer",
name="officer_since",
field=models.DateField(default=django.utils.datetime_safe.date.today),
preserve_default=False,
)
]
|
"""Package signing."""
import pickle
from django_q import core_signing as signing
from django_q.conf import Conf
BadSignature = signing.BadSignature
class SignedPackage:
"""Wraps Django's signing module with custom Pickle serializer."""
@staticmethod
def dumps(obj, compressed: bool = Conf.COMPRESSED) -> str:
return signing.dumps(
obj,
key=Conf.SECRET_KEY,
salt=Conf.PREFIX,
compress=compressed,
serializer=PickleSerializer,
)
@staticmethod
def loads(obj) -> any:
return signing.loads(
obj, key=Conf.SECRET_KEY, salt=Conf.PREFIX, serializer=PickleSerializer
)
class PickleSerializer:
"""Simple wrapper around Pickle for signing.dumps and signing.loads."""
@staticmethod
def dumps(obj) -> bytes:
return pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)
@staticmethod
def loads(data) -> any:
return pickle.loads(data)
|
#!/usr/bin/env python
import subprocess
import os
import datetime
import settings
import logging
import ssl
import sqlite3
import psutil
import requests
import json
import paho.mqtt.client as paho
from prom_lib import prometheus as prom
class speedbot():
def __init__(self):
#connect to the mqtt broker
#self.client = paho.Client()
#self.client.tls_set(settings.SSLCERTPATH+"/"+settings.SSLCERT,tls_version=ssl.PROTOCOL_TLSv1_2)
#self.client.tls_insecure_set(True)
try:
self.emit = prom()
except Exception as e:
logging.warn(e)
logging.warn("Can not emit Prometheus metrics.")
try:
#connect to the sqlite process
self.sqlcon = sqlite3.connect(settings.DB_PATH+'iotDB.db')
self.cursor = self.sqlcon.cursor()
except Exception as e:
logging.warn(e)
logging.warn("Could not connect to the IOT sqlite DB.")
try:
self.cursor.execute('''CREATE TABLE speedbot (upload_Mbps text, download_Mbps text, packetloss text, timestamp text, location text, country text, testhost text)''')
except Exception as e:
logging.warn(e)
logging.warn("Speedbot table already exists.")
def get_hostname(self):
"""
DESC: Get the hostname of the device
INPUT: None
OUTPUT: hostname
NOTE: None
"""
out_array = []
try:
proc = subprocess.Popen("hostname", stdout=subprocess.PIPE, shell=True)
(output, err) = proc.communicate()
hostname = output.decode('utf-8').strip()
except Exception as e:
logging.error(e)
logging.error("Could not get the hostname")
return hostname
def set_hostname(self, hostname=None):
"""
DESC: Set the hostname of the speedbot
INPUT: hostname
OUTPUT: None
NOTES: None
"""
logging.info('Setting the system hostname.')
if hostname == None:
hostname = settings.HOSTNAME
try:
proc = subprocess.Popen("hostname %s"%hostname, stdout=subprocess.PIPE, shell=True)
(output,err) = proc.communicate()
out_array = output.decode('utf-8').strip().split()
logging.info('Set the hostname to %s'%hostname)
except Exception as e:
logging.error(e)
logging.error("Could not set the Speedbot hostname.")
#Set the hostname permenately
try:
if os.path.exists('/etc/hostname'):
logging.info('Removed the hostname file and rewrote with new hostname %s'%hostname)
os.remove('/etc/hostname')
else:
loggin.warn('No /etc/hostname file')
except Exception as e:
logging.error(e)
logging.error('Could not delete /etc/hostname file.')
#Write the hostname
try:
new_file = open("/etc/hostname","w")
new_file.write(hostname)
new_file.close()
logging.info('Wrote new hostname file.')
except Exception as e:
logging.error(e)
logging.error("Could not create new hostname file.")
def get_location(self, ext_ip):
"""
DESC: Get the location of the speedbit based on the external IP
INPUT: ext_ip - ip of the internet faceing IP
OUTPUT:
NOTES: Get the external IP of the network(outward faceing NAT) from the check_speed function
"""
try:
output = requests.get("https://geolocation-db.com/json/ext_ip&position=true").json()
except Exception as e:
output = 'unknown'
logging.error('Could not determin the location')
return output
def get_ext_ip(self):
"""
DESC: Get the external ip of the NAT if using one.
INPUT: None
OUTPUT: External ipv4 ip address
NOTES: Can also get the external IP of the network(outward faceing NAT) from the check_speed function
"""
try:
output = requests.get("http://whatismyip.akamai.com/")
except Exception as e:
output = 'unknown'
logging.error('Could not determin the IP')
return output.text
def get_uptime(self):
"""
DESC: Run get system uptime
INPUT: None
OUTPUT: out_dict - days
- hours
- minutes
- start_date
- start_time
"""
up = True
out_dict = {}
try:
uptime = subprocess.Popen("uptime -p", stdout=subprocess.PIPE, shell=True)
(out, error) = uptime.communicate()
out = out.decode("utf-8").rstrip().split()
except Exception as e:
logging.error("Could not get uptime %s: %s"%error, e)
up = False
try:
since = subprocess.Popen("uptime -s", stdout=subprocess.PIPE, shell=True)
(sout, serror) = since.communicate()
sout = sout.decode("utf-8").rstrip().split()
except Exception as e:
logging.error("Could not get uptime %s: %s"%serror, e)
up = False
#make the out data useful
if up:
out_dict['uptime'] = out
out_dict['start_date'] = sout[0]
out_dict['start_time'] = sout[1]
return out_dict
def get_cpu_temp(self):
#cat /etc/armbianmonitor/datasources/soctemp
#/etc/update-motd.d/30-armbian-sysinfo
"""
DESC: Get the cpu temperature in C or F
INPUT: None
OUTPUT: out_dict - temp
- scale
"""
raw = open("/etc/armbianmonitor/datasources/soctemp", "r")
raw_temp = raw.read()
temp = int(raw_temp.strip())/1000
if settings.TEMP_SCALE == 'F':
temp = temp * 9/5.0 + 32
return {'temp':temp, 'scale':settings.TEMP_SCALE}
def get_network_status(self, nic=None):
"""
DESC: Get the netwotk transmit recieve on the selected nic.
INPUT: nic - nic name
OUTPUT: out_dict - recieve
- transmit
"""
if nic is None:
nic = settings.PHYSNET
try:
out = psutil.net_io_counters(pernic=True)
except Exception as e:
logging.error('Get network error: %s'%e)
return out[nic]
def get_system_memory(self):
"""
DESC: Get the system memory stats
INPUT: None
OUTPUT: out_dict - total_mem
- used_mem
- free_mem
- total_swap
- used_swap
- free_swap
"""
out_dict = {}
memory = {'total_mem':'$2', 'used_mem':'$3', 'free_mem':'$4'}
for k, v in memory.items():
try:
logging.info("Getting the %s"%(k))
raw = subprocess.Popen("free -hm | grep Mem | awk '{print %s}'"%(v), stdout=subprocess.PIPE, shell=True)
(mout, merror) = raw.communicate()
mout = mout.decode("utf-8").rstrip().split()
except Exception as e:
logging.error("Failed to get the %s"%(k))
logging.error(e)
out_dict['%s'%(k)] = mout[0]
swap = {'total_swap':'$2', 'used_swap':'$3', 'free_swap':'$4'}
for k, v in swap.items():
try:
logging.info("Getting the %s"%(k))
raw = subprocess.Popen("free -hm | grep Swap | awk '{print %s}'"%(v), stdout=subprocess.PIPE, shell=True)
(sout, serror) = raw.communicate()
sout = sout.decode("utf-8").rstrip().split()
except Exception as e:
logging.error("Failed to get the %s"%(k))
logging.error(e)
out_dict['%s'%(k)] = sout[0]
return out_dict
def get_system_status(self):
"""
DESC: Return a system status overview
INPUT: None
OUTPUT: out_dict - hostname
- total_mem
- cpu_temp
- ip
- uptime
- node_id
"""
hostname = self.get_hostname()
cpu_temp = self.get_cpu_temp()
mem = self.get_system_memory()
#ip = self.get_network_status(settings.PHYSNET)
uptime = self.get_uptime()
nodeid = self.get_hostname()
return {'hostname':hostname,
'cpu_temp':cpu_temp['temp'],
'scale':cpu_temp['scale'],
'total_mem':mem['total_mem'],
#'ip':ip['ip'],
'uptime':uptime,
'node_id':nodeid
}
####DB####
def db_insert(self,input_dict):
"""
DESC: Insert the values in the sqlite DB
INPUT: input_dict - upload_Mbps
- download_Mbps
- packetloss
- timestamp
- location
- country
- testhost
OUTPUT: None
NOTE: None
"""
try:
logging.info("Inserting speed info into db. %s"%(input_dict))
self.cursor.execute("INSERT INTO speedbot VALUES ('"+input_dict['upload_Mbps']+"','"+input_dict['download_Mbps']+"','"+input_dict['packetloss']+"','"+input_dict['timestamp']+"','" + input_dict['location'] + "','"+input_dict['country']+"','"+input_dict['testhost']+"')")
self.sqlcon.commit()
except Exception as e:
logging.error(e)
logging.error("Could not insert data %s into the database"%(input_dict))
def db_read(self):
pass
def db_purge(self):
pass
####System#####
def check_speed(self):
#run the OOkla speed test.
args = ['speedtest', '--accept-license','-I', settings.PHYSNET, '-p', 'no', '-f', 'json']
try:
cmd = subprocess.Popen(args, stdout=subprocess.PIPE)
output = json.loads(cmd.communicate()[0].decode("utf-8").rstrip())
except Exception as e:
logging.error("speedtest error: %s"%e)
output = 'ERROR'
#upload megabytes
up = ((int(output['upload']['bytes']) * 8) / int(output['upload']['elapsed'])) /1000
#download megabytes
down = ((int(output['download']['bytes']) * 8) / int(output['download']['elapsed'])) /1000
try:
self.db_insert({'upload_Mbps':str(up),
'download_Mbps':str(down),
'packetloss':str(output['packetLoss']),
'timestamp':str(output['timestamp']),
'location':str(output['server']['location']),
'country':str(output['server']['country']),
'testhost':str(output['server']['host'])
})
logging.info("Inserted speed info into Speedbot DB.")
except Exception as e:
logging.error("could not insert: %s"%e)
logging.info({'timestamp':output['timestamp'],
'external_ip':output['interface']['externalIp'],
'upload_Mbps':up,
'download_Mbps':down,
'server_location':output['server']['location'],
'country':output['server']['country'],
'testhost':output['server']['host'],
'packetloss':output['packetLoss'],
'jitter':output['ping']['jitter'],
'latency':output['ping']['latency'],
'result':output['result']
})
return {'timestamp':output['timestamp'],
'external_ip':output['interface']['externalIp'],
'upload_Mbps':up,
'download_Mbps':down,
'server_location':output['server']['location'],
'country':output['server']['country'],
'testhost':output['server']['host'],
'packetloss':output['packetLoss'],
'jitter':output['ping']['jitter'],
'latency':output['ping']['latency'],
'result':output['result']
}
def write_config(self):
pass
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import exceptions as excp
from anvil import log as logging
from anvil import type_utils as tu
from anvil import utils
LOG = logging.getLogger(__name__)
class Component(object):
def __init__(self, name, subsystems, instances, options, siblings, distro, passwords, **kwargs):
# Subsystems this was requested with
self.subsystems = subsystems
# The component name (from config)
self.name = name
# Any component options
self.options = options
# All the other active instances
self.instances = instances
# All the other class names that can be used alongside this class
self.siblings = siblings
# The distribution 'interaction object'
self.distro = distro
# Turned on and off as phases get activated
self.activated = False
# How we get any passwords we need
self.passwords = passwords
def get_password(self, option):
pw_val = self.passwords.get(option)
if pw_val is None:
raise excp.PasswordException("Password asked for option %s but none was pre-populated!" % (option))
return pw_val
def get_option(self, option, *options, **kwargs):
option_value = utils.get_deep(self.options, [option] + list(options))
if option_value is None:
return kwargs.get('default_value')
else:
return option_value
def get_bool_option(self, option, *options, **kwargs):
if 'default_value' not in kwargs:
kwargs['default_value'] = False
return tu.make_bool(self.get_option(option, *options, **kwargs))
def get_int_option(self, option, *options, **kwargs):
if 'default_value' not in kwargs:
kwargs['default_value'] = 0
return int(self.get_option(option, *options, **kwargs))
@property
def env_exports(self):
return {}
def verify(self):
pass
def __str__(self):
return "%s@%s" % (tu.obj_name(self), self.name)
@property
def params(self):
# Various params that are frequently accessed
return {
'APP_DIR': self.get_option('app_dir'),
'COMPONENT_DIR': self.get_option('component_dir'),
'CONFIG_DIR': self.get_option('cfg_dir'),
'TRACE_DIR': self.get_option('trace_dir'),
}
def warm_configs(self):
# Before any actions occur you get the chance to
# warmup the configs u might use (ie for prompting for passwords
# earlier rather than later)
pass
|
from dataclasses import field
from typing import Optional
from pydantic.dataclasses import dataclass
@dataclass
class Book:
title: str
price: int
quantity: Optional[int] = 0
_price: int = field(init=False, repr=False)
@property
def total(self):
return self.price * self.quantity
@property
def price(self):
print("price getter")
return self._price
@price.setter
def price(self, value):
print("price setter")
if not isinstance(value, (int, float)):
raise TypeError("Значение должно быть числом")
if not value >= 0:
raise ValueError("Значение должно быть положительным")
self._price = float(value)
book1 = Book("Good Omens", "24", 1000)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['UserHierarchyGroupArgs', 'UserHierarchyGroup']
@pulumi.input_type
class UserHierarchyGroupArgs:
def __init__(__self__, *,
instance_arn: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
parent_group_arn: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a UserHierarchyGroup resource.
:param pulumi.Input[str] instance_arn: The identifier of the Amazon Connect instance.
:param pulumi.Input[str] name: The name of the user hierarchy group.
:param pulumi.Input[str] parent_group_arn: The Amazon Resource Name (ARN) for the parent user hierarchy group.
"""
pulumi.set(__self__, "instance_arn", instance_arn)
if name is not None:
pulumi.set(__self__, "name", name)
if parent_group_arn is not None:
pulumi.set(__self__, "parent_group_arn", parent_group_arn)
@property
@pulumi.getter(name="instanceArn")
def instance_arn(self) -> pulumi.Input[str]:
"""
The identifier of the Amazon Connect instance.
"""
return pulumi.get(self, "instance_arn")
@instance_arn.setter
def instance_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_arn", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the user hierarchy group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="parentGroupArn")
def parent_group_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) for the parent user hierarchy group.
"""
return pulumi.get(self, "parent_group_arn")
@parent_group_arn.setter
def parent_group_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_group_arn", value)
class UserHierarchyGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
instance_arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent_group_arn: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Resource Type definition for AWS::Connect::UserHierarchyGroup
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] instance_arn: The identifier of the Amazon Connect instance.
:param pulumi.Input[str] name: The name of the user hierarchy group.
:param pulumi.Input[str] parent_group_arn: The Amazon Resource Name (ARN) for the parent user hierarchy group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserHierarchyGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::Connect::UserHierarchyGroup
:param str resource_name: The name of the resource.
:param UserHierarchyGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserHierarchyGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
instance_arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent_group_arn: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserHierarchyGroupArgs.__new__(UserHierarchyGroupArgs)
if instance_arn is None and not opts.urn:
raise TypeError("Missing required property 'instance_arn'")
__props__.__dict__["instance_arn"] = instance_arn
__props__.__dict__["name"] = name
__props__.__dict__["parent_group_arn"] = parent_group_arn
__props__.__dict__["user_hierarchy_group_arn"] = None
super(UserHierarchyGroup, __self__).__init__(
'aws-native:connect:UserHierarchyGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'UserHierarchyGroup':
"""
Get an existing UserHierarchyGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = UserHierarchyGroupArgs.__new__(UserHierarchyGroupArgs)
__props__.__dict__["instance_arn"] = None
__props__.__dict__["name"] = None
__props__.__dict__["parent_group_arn"] = None
__props__.__dict__["user_hierarchy_group_arn"] = None
return UserHierarchyGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="instanceArn")
def instance_arn(self) -> pulumi.Output[str]:
"""
The identifier of the Amazon Connect instance.
"""
return pulumi.get(self, "instance_arn")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the user hierarchy group.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="parentGroupArn")
def parent_group_arn(self) -> pulumi.Output[Optional[str]]:
"""
The Amazon Resource Name (ARN) for the parent user hierarchy group.
"""
return pulumi.get(self, "parent_group_arn")
@property
@pulumi.getter(name="userHierarchyGroupArn")
def user_hierarchy_group_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) for the user hierarchy group.
"""
return pulumi.get(self, "user_hierarchy_group_arn")
|
import time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.webdriver.common.by import By
from ensconce.dao import resources, groups
from tests.functional import SeleniumTestController
class ResourcesTest(SeleniumTestController):
"""
Test resources module.
"""
def setUp(self):
super(ResourcesTest, self).setUp()
self.login()
def tearDown(self):
self.logout()
super(ResourcesTest, self).tearDown()
def test_sub_navigation(self):
""" Test the resource sub nav. """
self.open_url('/resource/list')
self.wd.find_element(By.ID, "subnav-list").click()
time.sleep(0.5) # FIXME: Need to figure out how to wait on page loads; this is supposed to happen automatically ...
self.assertEquals('Resource List', self.wd.title)
self.open_url('/resource/list')
self.wd.find_element(By.ID, "subnav-create").click()
time.sleep(0.5) # FIXME: Need to figure out how to wait on page loads; this is supposed to happen automatically ...
self.assertEquals('Add Resource', self.wd.title)
# Copy/paste to check the other page
self.open_url('/resource/add')
self.wd.find_element(By.ID, "subnav-list").click()
time.sleep(0.5) # FIXME: Need to figure out how to wait on page loads; this is supposed to happen automatically ...
self.assertEquals('Resource List', self.wd.title)
self.open_url('/resource/add')
self.wd.find_element(By.ID, "subnav-create").click()
time.sleep(0.5) # FIXME: Need to figure out how to wait on page loads; this is supposed to happen automatically ...
self.assertEquals('Add Resource', self.wd.title)
def test_add_no_group(self):
""" Test adding a resource w/o specifying a group. """
self.open_url('/resource/add')
name = "ResourceTest.test_add_fail"
el = self.wd.find_element(By.ID, "name")
el.send_keys(name)
self.submit_form("resource_form")
self.assertEquals('Add Resource', self.wd.title)
def test_add_success(self):
""" Test adding a resource. """
self.open_url('/resource/add')
name = "ResourceTest.test_add_success"
el = self.wd.find_element(By.ID, "name")
el.send_keys(name)
# Choose '6th group' from the select list.
sel = Select(self.wd.find_element(By.ID, "group_ids"))
sel.select_by_visible_text("6th group")
self.submit_form("resource_form")
self.assertEquals('View Resource', self.wd.title)
self.assert_in_data_table(name, row=1)
self.open_url('/resource/list')
self.assertEquals('Resource List', self.wd.title)
self.assert_in_list_table(name, nobr=True)
def test_add_duplicate(self):
""" Test adding a resource with duplicate name. """
self.open_url('/resource/add')
name = "Bikeshed PIN"
el = self.wd.find_element(By.ID, "name")
el.send_keys(name)
# Choose '6th group' from the select list.
sel = Select(self.wd.find_element(By.ID, "group_ids"))
sel.select_by_visible_text("First Group")
self.submit_form("resource_form")
self.assert_form_error("Resource \"{0}\" already exists in group \"First Group\".".format(name))
def test_edit_duplicate(self):
""" Test editing a resource and specifying duplicate name. """
name = "Bikeshed PIN"
new_name = "BoA" # A name we know to exist in First Group
r1 = resources.get_by_name(name)
self.open_url('/resource/edit/{0}'.format(r1.id))
el = self.wd.find_element(By.ID, "name")
el.clear()
el.send_keys(new_name)
self.submit_form("resource_form")
self.assert_form_error("Resource \"{0}\" already exists in group \"First Group\".".format(new_name))
def test_edit_link(self):
""" Test clicking the edit link (prompt) """
r = resources.get_by_name("BoA")
self.open_url('/resource/list')
editlink = self.wd.find_element(By.ID, "edit-link-{0}".format(r.id))
editlink.click()
time.sleep(0.5) # FIXME: Need to figure out how to wait on page loads; this is supposed to happen automatically ...
self.assertEquals('Edit Resource', self.wd.title)
namefield = self.wd.find_element(By.ID, "name")
self.assertEquals("BoA", namefield.get_attribute('value'))
def test_delete_link_no_passwords(self):
""" Test clicking the delete link when no passwords. (prompt) """
r = resources.get_by_name("Bikeshed PIN")
self.open_url('/resource/list')
deletelink = self.wd.find_element(By.ID, "delete-link-{0}".format(r.id))
deletelink.click()
alert = self.wd.switch_to_alert()
self.assertEqual("Are you sure you want to remove resource {0} (id={1})".format(r.name, r.id), alert.text)
alert.accept()
self.assert_notification("Resource deleted: {0} (id={1})".format(r.name, r.id))
self.assert_not_in_list_table(r.name)
def test_delete_link_passwords(self):
""" Test clicking the delete link with passwords (confirm page) """
r = resources.get_by_name("BoA")
self.open_url('/resource/list')
deletelink = self.wd.find_element(By.ID, "delete-link-{0}".format(r.id))
deletelink.click()
self.assertEquals('Delete Resource', self.wd.title)
self.submit_form("delete_form")
alert = self.wd.switch_to_alert()
self.assertEqual("Are you sure you wish to permanently delete this resource and passwords?", alert.text)
alert.accept()
self.assert_notification("Resource deleted: {0} (id={1})".format(r.name, r.id))
self.assert_not_in_list_table(r.name)
|
import torch
import numpy as np
import os
import pandas as pd
import SimpleITK as sitk
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
class NiftyDatasetFromTSV(Dataset):
"""Creates a PyTorch Dataset using a .tsv file of .nii.gz paths and labels."""
def __init__(self, tsv_file, age_limit=None, data_limit=None, normalizer=None, resampler=None, aug=False, seed=42):
"""
Args:
tsv_file: (string) - path to tsv file. The file should contain columns:
0: (str) - list of .nii.gz filepaths
1: (str) - list of corresponding masks
2: (int) - age
3: (int) - sex - 0 = female, 1 = male
4: (int) - scanner - all identical if the same scanner.
data_limit: (int) - the maximum number of samples to include.
age_limit: (list of int, len = 2) - list of 2 ints [min_age, max_age], e.g. [48, 71]
normalizer: (string) - if not None, one of `tanh` or `sigmoid` to normalize image intensities.
aug: (bool) - whether to apply augmentations to the data.
Returns:
A PyTorch dataset.
"""
np.random.seed(seed)
self.data = pd.read_csv(tsv_file)
if age_limit:
self.data = self.data[self.data.iloc[:, 3].astype(float) >= age_limit[0]]
self.data = self.data[self.data.iloc[:, 3].astype(float) <= age_limit[1]]
self.data = self.data.reindex(np.random.permutation(self.data.index))
if data_limit:
self.data = self.data[:data_limit]
self.data = self.data[:200]
self.normalizer = None
if normalizer:
self.normalizer = torch.tanh if normalizer=='tanh' else self.normalizer
self.normalizer = torch.sigmoid if normalizer == 'sigmoid' else self.normalizer
self.aug = aug
self.samples = []
for idx in range(len(self.data)):
img_path = self.data.iloc[idx, 1]
mask_path = self.data.iloc[idx, 2]
age = np.array(self.data.iloc[idx, 3]/100, dtype='float64')
sex = np.array(self.data.iloc[idx, 4], dtype='float64')
scanner = np.array(self.data.iloc[idx, 5], dtype='int64')
sample = {'image': img_path, 'mask': mask_path, 'sex': sex, 'age':age, 'scanner': scanner}
if self.samples == []:
self.samples = [sample]
else:
self.samples.append(sample)
def __len__(self):
return len(self.data)
def do_translate(self, image, dx=0, dy=0, dz=0):
""" Performs random translation of the input image.
Args:
image: (np.array floats) [H, W, D] - the input image to augment.
dx, dy, dz: (int) - the translation to apply in the x, y and z directions in pixels.
If dx=dy=dz=0, random translation is applied.
Returns:
translated_image: (np.array floats [H, W, D] - the translated input image.
"""
if not (dx & dy & dz):
dx, dy, dz = [np.random.randint(-3, 3, 1)[0],
np.random.randint(-3, 3, 1)[0],
np.random.randint(-3, 1, 1)[0]]
orig_h, orig_w, orig_d = image.shape
max_shift = np.max(np.abs([dx, dy, dz]))
canvas = np.pad(image, max_shift)
canvas_center = np.array(canvas.shape) // 2
nx = canvas_center[0] - orig_h//2 + dx
ny = canvas_center[1] - orig_w//2 + dy
nz = canvas_center[2] - orig_d//2 + dz
translated = canvas[nx:nx+orig_h, ny:ny+orig_w, nz:nz+orig_d]
return translated.astype(np.float32), dx, dy, dz
def __getitem__(self, item):
sample = self.samples[item]
image = sitk.ReadImage(os.path.join('/vol/biomedic2/rdr16/pymira/pymira/apps/data_harmonization', sample['image'][2:]), sitk.sitkFloat32)
mask = sitk.ReadImage(os.path.join('/vol/biomedic2/rdr16/pymira/pymira/apps/data_harmonization',sample['mask'][2:]), sitk.sitkFloat32)
image_np = sitk.GetArrayFromImage(image)
mask_np = sitk.GetArrayFromImage(mask)
if self.aug:
if np.random.uniform() < 0.5:
image_np, dx, dy, dz = self.do_translate(image_np)
mask_np, _, _, _ = self.do_translate(mask_np, dx, dy, dz)
if np.random.uniform() < 0.5:
image_np = np.flip(image_np, 2).astype(np.float32)
mask_np = np.flip(mask_np, 2).astype(np.float32)
image = torch.from_numpy(image_np).unsqueeze(0)
mask = torch.from_numpy(mask_np).unsqueeze(0)
sex = sample['sex']
age = sample['age']
scanner = sample['scanner']
del image_np, mask_np
return {'image': image, 'mask': mask, 'sex': sex, 'age': age, 'scanner': scanner}
def get_sample(self, item):
return self.samples[item]
|
def calculate_investment_value(initial_value, percentage, years):
result = initial_value * (1 + percentage / 100) ** years
return result
|
from requests import get, exceptions
from re import compile, match, MULTILINE
from sys import exit
# url = "https://raw.githubusercontent.com/fire1ce/DDNS-Cloudflare-PowerShell/main/README.md"
# section_name = "License"
# section_name = "License"
url = "https://raw.githubusercontent.com/wsargent/docker-cheat-sheet/master/READMEdd.md"
section_name = "## CPU Constraints"
# check if url is valid and had ".md" extension
def is_valid_url(url):
if not match(r"^https?:\/\/.*\.md$", url):
print(f"Error! {url} is not a valid url")
exit(1)
return True
# get markdown from url if status code is 200
def get_markdown_from_url(url):
try:
response = get(url)
if response.status_code == 200:
# remove the heading
markdown = response.text
markdown = markdown[markdown.find("\n") + 1 :]
return markdown
else:
print(f"Error! {url} returned status code: {str(response.status_code)}")
exit(1)
except exceptions.ConnectionError:
print(f"Error! {url} returned connection error")
exit(1)
# get the section content from markdown
def get_section_from_markdown(markdown, section_name):
# Get the section level from section_name
try:
section_level = compile("^#+ ").search(section_name).span()[1] - 1
except:
print(
f"Error! Missing markdown section level at the beginning of section name: {section_name}"
)
exit(1)
# Gets the srart index of the section from markdown
try:
start_index = compile("^" + section_name + "$", MULTILINE).search(markdown).span()[1]
except:
print(f'Error! Section: "{section_name}" not found in markdown {url}')
exit(1)
# Gets the end index of the section from markdown (last section handle)
try:
end_index = (
compile("^#{2," + str(section_level) + "} ", MULTILINE)
.search(markdown[start_index:])
.span()[0]
)
markdown = markdown[start_index : end_index + start_index]
except:
markdown = markdown[start_index:]
return markdown
# get the markdown from url or markdown section
def external_markdown(url, section_name):
is_valid_url(url)
if section_name:
markdown = get_markdown_from_url(url)
if markdown:
return get_section_from_markdown(markdown, section_name)
else:
return get_markdown_from_url(url)
print(external_markdown(url, section_name))
|
#
# PySNMP MIB module ELTEX-ARP-INTERFACE-TABLE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ELTEX-ARP-INTERFACE-TABLE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:45:32 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection")
eltMesArpSpec, = mibBuilder.importSymbols("ELTEX-MES-IP", "eltMesArpSpec")
rsArpInterfaceEntry, = mibBuilder.importSymbols("RADLAN-IP", "rsArpInterfaceEntry")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
IpAddress, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, ModuleIdentity, iso, Integer32, MibIdentifier, Unsigned32, Counter64, NotificationType, Gauge32, Bits, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "ModuleIdentity", "iso", "Integer32", "MibIdentifier", "Unsigned32", "Counter64", "NotificationType", "Gauge32", "Bits", "TimeTicks")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
eltArpInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 35265, 1, 23, 91, 3, 1), )
if mibBuilder.loadTexts: eltArpInterfaceTable.setStatus('current')
eltArpInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 35265, 1, 23, 91, 3, 1, 1), )
rsArpInterfaceEntry.registerAugmentions(("ELTEX-ARP-INTERFACE-TABLE-MIB", "eltArpInterfaceEntry"))
eltArpInterfaceEntry.setIndexNames(*rsArpInterfaceEntry.getIndexNames())
if mibBuilder.loadTexts: eltArpInterfaceEntry.setStatus('current')
eltArpInterfaceArpLocalProxy = MibTableColumn((1, 3, 6, 1, 4, 1, 35265, 1, 23, 91, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eltArpInterfaceArpLocalProxy.setStatus('current')
mibBuilder.exportSymbols("ELTEX-ARP-INTERFACE-TABLE-MIB", eltArpInterfaceEntry=eltArpInterfaceEntry, eltArpInterfaceArpLocalProxy=eltArpInterfaceArpLocalProxy, eltArpInterfaceTable=eltArpInterfaceTable)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=C0103
# pylint: disable=E1101
# Python 2/3 compatibility
from __future__ import print_function
import glob
import os
import math
import numpy as np
def getCenterAndWH(name):
lines = open(name, "rt").readlines()
centerAndWH = [int(line) for line in lines[3:7]]
return centerAndWH
def getTruePosition():
txtnames = glob.glob("headPose/Per*/*.txt")
d = {}
for p in txtnames:
jpgname = p.replace(".txt", ".jpg")
d[jpgname] = getCenterAndWH(p)
return d
def cosd(deg):
return math.cos(math.pi*deg/180.0)
def sind(deg):
return math.sin(math.pi*deg/180.0)
def getRotatedPoint(point, deg, imgCenter):
a = np.array(point) - np.array(imgCenter)
deg = -deg
rotated = np.array((cosd(deg)*a[0] -sind(deg)*a[1], sind(deg)*a[0] +cosd(deg)*a[1]), dtype=np.int)
r = rotated + imgCenter
return (int(r[0]), int(r[1]))
def getAngles(p):
base = os.path.basename(p)
base = os.path.splitext(base)[0]
base = base.replace("+", "_+").replace("-", "_-")
f = base.split("_")
return f[-2:]
if __name__ == "__main__":
d = getTruePosition()
size = [384, 288]
imgCenter = (size[0]/2, size[1]/2)
keys = d.keys()
keys.sort()
deg = 90
for k in keys:
print(k, d[k], end='')
point = tuple(d[k][:2])
print(getRotatedPoint(point, deg, imgCenter))
|
import collections
#ChainMap提供了一种方便的方法,用于创建一个新实例,
# 在maps列表的前面有一个额外的映射,
# 以便于避免修改现有的底层数据结构。
a = {'a': 'A', 'c': 'C'}
b = {'b': 'B', 'c': 'D'}
m1 = collections.ChainMap(a, b)
m2 = m1.new_child()
print('m1 before:', m1)
print('m2 before:', m2)
m2['c'] = 'E'
print('m1 after:', m1)
print('m2 after:', m2)
"""
m1 before: ChainMap({'a': 'A', 'c': 'C'}, {'b': 'B', 'c': 'D'})
m2 before: ChainMap({}, {'a': 'A', 'c': 'C'}, {'b': 'B', 'c': 'D'})
m1 after: ChainMap({'a': 'A', 'c': 'C'}, {'b': 'B', 'c': 'D'})
m2 after: ChainMap({'c': 'E'}, {'a': 'A', 'c': 'C'}, {'b': 'B', 'c': 'D'})
"""
|
#!/usr/bin/env python
import roslib
import rospy
import math
from std_msgs.msg import Float64
from random import random
def main():
rospy.init_node("cos")
pub = rospy.Publisher("/cos", Float64, queue_size=1)
counter = 0
RESOLUTION = 100
while not rospy.is_shutdown():
if counter == RESOLUTION:
counter = 0
val = math.cos(2 * math.pi / RESOLUTION * counter)
pub.publish(Float64(val))
rospy.sleep(0.05)
counter = counter + 1
if __name__ == "__main__":
main()
|
from datetime import date, timedelta
from decouple import config
from itsdangerous import (TimedJSONWebSignatureSerializer
as Serializer, BadSignature, SignatureExpired)
from passlib.apps import custom_app_context as pwd_context
from api.app import db
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True)
nome = db.Column(db.String(120))
cidade = db.Column(db.String(120))
estado = db.Column(db.String(120))
genero = db.Column(db.String(32))
tipo_sanguineo = db.Column(db.String(32))
data_nascimento = db.Column(db.Date())
data_ultima_doacao = db.Column(db.Date())
senha_hash = db.Column(db.String(128))
def encrypt_password(self, password):
self.senha_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.senha_hash)
def generate_auth_token(self, expiration=600):
s = Serializer(config('SECRET_KEY'), expires_in=expiration)
return s.dumps({'id': self.id})
@property
def age(self):
age = 0
today = date.today()
birthdate = self.data_nascimento
if birthdate:
age = today.year - birthdate.year - ((today.month, today.day) < (birthdate.month, birthdate.day))
return age
@property
def next_donation_date(self):
last_donation = self.data_ultima_doacao
if last_donation:
return last_donation + timedelta(days=120)
return date.today()
@property
def is_able_to_donate(self):
able = False
age = self.age
today = date.today()
if age >= 16 and age <= 69:
if today >= self.next_donation_date:
able = True
return able
@staticmethod
def verify_auth_token(token):
s = Serializer(config('SECRET_KEY'))
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = User.query.get(data['id'])
return user
def __repr__(self) -> str:
return f'<User {self.email}>'
|
import numpy as np
import argparse
import osgeo.gdal as gdal
from scipy.spatial import voronoi_plot_2d, Voronoi
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull, convex_hull_plot_2d
from numpy import genfromtxt
import pandas as pd
import gdal
import os
import xarray as xr
import clhs as cl
import csv
import numpy as np
from scipy.linalg import solve_triangular, get_lapack_funcs, get_blas_funcs
from maxvolpy.maxvol import maxvol
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
def f_no_cut(idx, i, copy=False):
if copy:
idx = np.copy(idx)
idx[i] = 0
return idx
def f_cut_eps(idx, i, X, eps=0.1, copy=False):
if copy:
idx = np.copy(idx)
#print(np.abs(X - X[i]) < eps)
print(idx.shape, X.shape)
idx[np.abs(X - X[i]) < eps] = 0
return idx
def rect_maxvol_cut(A, tol = 1., maxK = None, min_add_K = None, minK = None, start_maxvol_iters = 10, identity_submatrix = True, top_k_index = -1, cut_fun=None, penalty=None):
"""Python implementation of rectangular 2-volume maximization. For information see :py:func:`rect_maxvol` function"""
# tol2 - square of parameter tol
tol2 = tol**2
# N - number of rows, r - number of columns of matrix A
N, r = A.shape
if N <= r:
return np.arange(N, dtype = int), np.eye(N, dtype = A.dtype)
if maxK is None or maxK > N:
maxK = N
if maxK < r:
maxK = r
if minK is None or minK < r:
minK = r
if minK > N:
minK = N
if min_add_K is not None:
minK = max(minK, r + min_add_K)
if minK > maxK:
minK = maxK
if top_k_index == -1 or top_k_index > N:
top_k_index = N
if top_k_index < r:
top_k_index = r
if cut_fun is None:
cut_fun = f_no_cut
if penalty is None:
#penalty_fun = np.ones(top_k_index, dtype=int)
chosen = np.ones(top_k_index, dtype=int)
else:
chosen = np.copy(penalty)
index = np.zeros(N, dtype = int)
tmp_index, C = maxvol(A, tol = 1, max_iters = start_maxvol_iters, top_k_index = top_k_index)
# --
index[:r] = tmp_index
#chosen[tmp_index] = 0 -- replaced
for ti in tmp_index:
cut_fun(chosen, ti)
C = np.asfortranarray(C)
# compute square 2-norms of each row in matrix C
row_norm_sqr = np.array([chosen[i]*np.linalg.norm(C[i], 2)**2 for i in range(top_k_index)])
# find maximum value in row_norm_sqr
i = np.argmax(row_norm_sqr)
K = r
# set cgeru or zgeru for complex numbers and dger or sger for float numbers
try:
ger = get_blas_funcs('geru', [C])
except:
ger = get_blas_funcs('ger', [C])
while (row_norm_sqr[i] > tol2 and K < maxK) or K < minK:
# add i to index and recompute C and square norms of each row by SVM-formula
index[K] = i
#chosen[i] = 0 -- replaced by the next line
#print(chosen)
cut_fun(chosen, i)
if (chosen == 0).all():
print('Failed')
c = C[i].copy()
v = C.dot(c.conj())
l = 1.0/(1+v[i])
ger(-l,v,c,a=C,overwrite_a=1)
C = np.hstack([C, l*v.reshape(-1,1)])
row_norm_sqr -= (l*v[:top_k_index]*v[:top_k_index].conj()).real
row_norm_sqr *= chosen
# find maximum value in row_norm_sqr
i = row_norm_sqr.argmax()
K += 1
if identity_submatrix:
C[index[:K]] = np.eye(K, dtype = C.dtype)
return index[:K].copy(), C
def make_dist(X):
n = len(X)
A = np.empty((n, n), dtype=X.dtype)
for ix, x in enumerate(X):
for iy, y in enumerate(X):
A[ix, iy] = np.abs(x - y)
return A
def f_penal(X, bnd, level=0.0):
Xmin = np.min(X)
Xmax = np.max(X)
bnd_abs = (Xmax - Xmin)*bnd
dist = np.minimum(np.abs(X - Xmin), np.abs(Xmax - X))
def lin_func(x):
if bnd == 0:
return x*0.0 + 1.0 # crookedly, but it works. Ann, never do like this!
else:
return (1.0 - level)*np.minimum(x, bnd_abs)/bnd_abs + level
return lin_func(dist)
def f_penal_2D(X, Y, bnd, level=0.0):
return f_penal(X, bnd=bnd, level=level)*f_penal(Y, bnd=bnd, level=level)
def norm_data(X, bounds=(-1.0, 1.0), copy=True):
X = np.array(X, copy=copy).T
for i, x in enumerate(X):
# print(len(x))
min_v, max_v = np.min(x), np.max(x)
b = (bounds[0]*max_v - bounds[1]*min_v)/(max_v-min_v)
k = float(bounds[1] - bounds[0])/(max_v-min_v)
X[i] *= k
X[i] += b
return X.T
def points_selection(X, max_n_pnts, min_n_pnts, cut_fun=None, penalty = None):
"""Function for selecting optimal parameters for dimentionality reduction method and for clustering.
Parameters
----------------
X: array with shape (number_of_pixels*number_of_features)
Initial data
"""
#MaxVol
res = rect_maxvol_cut(X, maxK=max_n_pnts, minK=min_n_pnts, cut_fun=cut_fun, penalty=penalty)[0]
return res
def add_coords(X=None, size=(285, 217), order='C', idx_good_mask=None):
"""
order can by 'C' or 'F'
"""
w, h = size
x_coord, y_coord = np.meshgrid(np.arange(h), np.arange(w))
if idx_good_mask is None:
idx_good_mask = np.arange(x_coord.size)
if X is None:
return np.hstack((
x_coord.flatten(order=order)[idx_good_mask, None],
y_coord.flatten(order=order)[idx_good_mask, None]))
else:
return np.hstack((np.array(X, copy=False),
x_coord.flatten(order=order)[idx_good_mask, None],
y_coord.flatten(order=order)[idx_good_mask, None]))
def gen_input(mode, data, shapes,mask):
modes = ['usual', 'normed',
'XY', 'XY_normed']
fn_X_embedded = modes[mode]
return [
lambda x: np.array(x),
lambda x: norm_data(x),
lambda x: add_coords(
x, size=shapes[0], idx_good_mask=mask),
lambda x: norm_data(gen_input(2, x, shapes, mask)[0], copy=False),
][mode](data), fn_X_embedded
def my_score(a, b):
a = np.array(a, copy=False)
b = np.array(b, copy=False)
n = len(a)
assert len(b) == n, 'Arrays of differnet shapes :((('
m = len(a[a==b])
return float(m)/float(n)
def f_no_cut(idx, i, copy=False):
if copy:
idx = np.copy(idx)
idx[i] = 0
return idx
def f_cut_eps(idx, i, X, eps=0.1, copy=False):
if copy:
idx = np.copy(idx)
xx = X[:, -2]
yy = X[:, -1]
#idx[i] = 0
idx[(xx - xx[i])**2 + (yy-yy[i])**2 <= eps**2] = 0
return idx
def calc_score(idx, X, y, to_ret_pred=True):
gnb = GaussianNB()
gnb_model = gnb.fit(X[idx], y[idx])
if to_ret_pred:
scores = extend_score(y, gnb_model.predict(X))
else:
scores = gnb_model.score(X, y)
return scores
def good_points_brute_force(idx, num, X, y):
sc = -1
cmb_good = None
for comb in combinations(idx, num):
comb = np.array(comb)
#print(comb)
sc_curr = calc_score(comb, X=X, y=y, to_ret_pred=True)
if sc_curr > sc:
sc = sc_curr
cmb_good = comb
return cmb_good, sc
def idx_to_idx(idx_big, idx):
hass = dict()
for i, elem in enumerate(idx_big):
hass[elem] = i
return np.array([hass[i] for i in idx])
class MaxVolSampling():
"""
Class to proccess data with MaxVol, cLHS and Random
Input: DEM, terrain features
Return: Sampling points indices
"""
def __init__(self):
self.original_data = None
self.maxvol_dist = None
self.cLHS_dist = None
self.random_dist = None
self.maxvol_indices = None
self.soil_feature = None
self.num_of_points = 15
self.path_to_file_with_indices = None
self.wd = None
self.soil_data = None
self.X = None
self.lons = None
self.lats = None
self.path_to_interpolation_file = None
self.interpolation_array = None
def data_preparation(self, wd, data_m, dem_dir):
"""
Function to orginize tif files in flatten vectos, remove NaN and stack vectors into matrix
"""
fl_names = list(filter(lambda fl: fl.endswith('.tif'), os.listdir(wd+'/ndvi_features/')))
files = list(map(lambda x: gdal.Open(os.path.join(wd+'/ndvi_features/', x)), fl_names))
arrays = list(map(lambda x: x.ReadAsArray().flatten(), files))
shapes = [x.ReadAsArray().shape for x in files]
nodatas = list(map(lambda x: x.GetRasterBand(1).GetNoDataValue(), files))
names = list(map(lambda x: x.replace('.tif','').split('.')[0], fl_names))
if dem_dir is None:
dem_raw = gdal.Open(wd+'/dem.tif')
dem = dem_raw.ReadAsArray()
else:
dem_raw = gdal.Open(dem_dir)
dem = dem_raw.ReadAsArray()
xmin, xres, xskew, ymax, yskew, yres = dem_raw.GetGeoTransform()
xmax = xmin + (dem_raw.RasterXSize * xres)
ymin = ymax + (dem_raw.RasterYSize * yres)
boundary_box = {'xmin':xmin, 'xmax':xmax, 'ymin':ymin, 'ymax':ymax}
dem_flat = dem.flatten()
dem_nodata = dem_raw.GetRasterBand(1).GetNoDataValue()
init_dem_shape = dem.shape
idx_nodata_0 = np.where(dem_flat == dem_nodata)[0]
arrays_no_nodatas = np.zeros((len(arrays[0])-len(idx_nodata_0), len(arrays)))
idx_dem_nodata = np.where(dem_flat == dem_nodata)[0]
idx_dem = np.where(dem_flat != dem_nodata)[0]
dem_no_nodata = np.delete(dem_flat, idx_dem_nodata)
#process with interp data
if self.path_to_interpolation_file is not None:
interpolation_raw_data = np.load(self.path_to_interpolation_file)[::-1]
flatten_interpolation = interpolation_raw_data.flatten()
interpolation_no_nan = np.delete(flatten_interpolation, np.isnan(flatten_interpolation))
self.interpolation_array = interpolation_no_nan
for i in range(len(arrays)):
idx_nodata = np.where(arrays[i] == nodatas[i])[0]
array = arrays[i].copy()
array[idx_nodata]=0
arrays_no_nodatas[:,i] = np.delete(array, idx_nodata_0)
data_arr = arrays_no_nodatas.copy()
# Prepare data
# U can normilize data, and/or add coords to it
mode = data_m # Change to 0, 1, 2 or 3
X, fn_X_embedded = gen_input(mode, data_arr, shapes, idx_dem)
self.X = X
# X = np.vstack((X, X[:1,:]))
return X, dem_flat, dem_nodata, init_dem_shape, idx_dem, boundary_box
def create_polygon(self, shape, vertices, value=1):
"""
Creates np.array with dimensions defined by shape
Fills polygon defined by vertices with ones, all other values zero"""
base_array = np.zeros(shape, dtype=float) # Initialize your array of zeros
fill = np.ones(base_array.shape) * True # Initialize boolean array defining shape fill
# Create check array for each edge segment, combine into fill array
for k in range(vertices.shape[0]):
fill = np.all([fill, self.check(vertices[k-1], vertices[k], base_array)], axis=0)
# Set all values inside polygon to one
base_array[fill] = value
return base_array,fill
def find_nearest(self, array, value):
array = np.asarray(array)
idx = np.unravel_index(np.argmin((np.abs(array - value)), axis=None), array.shape)
return array[idx], idx
def check(self, p1, p2, base_array):
"""
Uses the line defined by p1 and p2 to check array of
input indices against interpolated value
Returns boolean array, with True inside and False outside of shape
"""
idxs = np.indices(base_array.shape) # Create 3D array of indices
p1 = p1.astype(float)
p2 = p2.astype(float)
# Calculate max column idx for each row idx based on interpolated line between two points
if p1[0] == p2[0]:
max_col_idx = (idxs[0] - p1[0]) * idxs.shape[1]
sign = np.sign(p2[1] - p1[1])
else:
max_col_idx = (idxs[0] - p1[0]) / (p2[0] - p1[0]) * (p2[1] - p1[1]) + p1[1]
sign = np.sign(p2[0] - p1[0])
return idxs[1] * sign <= max_col_idx * sign
def original_soil_data(self, feature):
soil_data = self.soil_data
data = soil_data[feature]
self.original_data = np.array(data)
def dataframe_to_points(self):
dem_raw = gdal.Open('dem.tif') #('/home/apetrovskaya/maxvol_soil_sampling/src/dem.tif')
dem = dem_raw.ReadAsArray()
self.init_dem_shape = dem.shape
FEATURE = self.soil_feature
soil_data = self.soil_data
lons=soil_data['LON']
self.lons = lons
lats=soil_data['LAT']
self.lats = lats
data = soil_data[FEATURE]
self.original_data = np.array(data)
#coordinate mesh
xmin, ymin, xmax, ymax = [416949.0957, 5750852.2926,417891.8549,5751465.6945] #!!!HARDCODE
st = dem
xv = np.linspace(xmin,xmax, num=st.shape[1])
yv = np.linspace(ymax,ymin, num=st.shape[0])
coords = np.meshgrid(xv,yv)
number_of_points=len(lons)
points_idx=np.zeros((number_of_points,2))
for i in range(number_of_points):
a = self.find_nearest(coords[0],lons[i])[1][1]
b = self.find_nearest(coords[1],lats[i])[1][0]
points_idx[i,:]=[a,b]
points_idx = points_idx.astype(int)
return points_idx, data
def distr_from_voronoi(self):
points_idx,data = self.dataframe_to_points()
#add points for right simplex
points_idx_add = points_idx.copy()
for i in range(-50,self.init_dem_shape[0]+50,50):
points_idx_add = np.vstack((points_idx_add,[-50, i]))
points_idx_add = np.vstack((points_idx_add,[self.init_dem_shape[1]+50,i]))
for i in range(-50,self.init_dem_shape[1]+50,50):
points_idx_add = np.vstack((points_idx_add,[i, -50]))
points_idx_add = np.vstack((points_idx_add,[i,self.init_dem_shape[0]+50]))
# generate Voronoi tessellation
vor_add=Voronoi(points_idx_add)
# cycle to fill regions in numpy array
pol=np.zeros((self.init_dem_shape[1],self.init_dem_shape[0]))
for r in range(len(vor_add.point_region)):
region = vor_add.regions[vor_add.point_region[r]]
if not -1 in region:
value = data[r]
polygon = [vor_add.vertices[i] for i in region]
polygon = np.asarray(polygon)
hull = ConvexHull(polygon)
_, fill = self.create_polygon((self.init_dem_shape[1],self.init_dem_shape[0]),polygon[hull.vertices][::-1])
pol[fill] = value
pol[pol<min(data)]=min(data)
polygons_in_array=pol.T
self.voronoi_map = polygons_in_array.flatten()
return self.voronoi_map
def i_am_maxvol_function(self):
self.num_of_points
dist_pts = 0.1
wd = self.wd
data_m=3
dem_dir = None
max_n_pnts = self.num_of_points
min_n_pnts = self.num_of_points
X, dem_flat, dem_nodata, init_dem_shape, idx_dem, boundary_box = self.data_preparation(wd, data_m, dem_dir)
#function for distance between points
f_cut = lambda idx, i : f_cut_eps(idx, i, X=X, eps = dist_pts)
#function for distance from border
# f_penal = f_penal_2D(X = X[:, -2], Y = X[:, -1], bnd = 0.3, level = 0.3)
f_penal = f_penal_2D(X = X[:, -2], Y = X[:, -1], bnd = 0.2, level = 0.3) #Change to 0.2
result = points_selection(X, max_n_pnts = max_n_pnts, min_n_pnts = min_n_pnts, cut_fun = f_cut, penalty = f_penal)
#coordinates
# xmin, ymin, xmax, ymax = [37.7928399,51.90236556, 37.8064010,51.90774268]
xmin = boundary_box['xmin']
xmax = boundary_box['xmax']
ymin = boundary_box['ymin']
ymax = boundary_box['ymax']
dem_flat_img = dem_flat.copy()-np.min(dem_flat)
dem_flat_img[np.where(dem_flat == dem_nodata)] = float('NaN')
st = dem_flat_img.reshape(init_dem_shape)
xv = np.linspace(xmin,xmax, num=st.shape[1])
yv = np.linspace(ymax,ymin, num=st.shape[0])
coords = np.meshgrid(xv,yv)
mask = idx_dem
#select corresponding points by indecies
y_c,x_c = coords[0].flatten()[mask, None],coords[1].flatten()[mask, None]
y_idx, x_idx = y_c[result],x_c[result]
coord_idx = np.hstack((y_idx,x_idx))
self.maxvol_indices = result
return self.maxvol_indices
def i_am_clhs(self, num_iter):
n_pnts = self.num_of_points
#cLHS
sampled=cl.clhs(self.X[:,:-2], n_pnts, max_iterations=num_iter, progress=False)
self.cLHS_indices = sampled['sample_indices']
return self.cLHS_indices
def i_am_random(self):
random_dist = np.random.randint(low=0,high=self.X.shape[0],size=self.num_of_points)
return random_dist
if __name__ == "__main__":
SAR = MaxVolSampling()
|
import json
from datetime import datetime as dt
import d4rl
import gym
import numpy as np
import torch
from oraaclib.agent import BCQ, BEAR
from oraaclib.environment import get_env
from oraaclib.util.logger import Logger
from oraaclib.util.rollout import oraac_rollout
from oraaclib.util.utilities import get_dict_hyperparams
from oraaclib.util.torch_utilities import EarlyStopping
from torch.utils.tensorboard import SummaryWriter
from utils.utilities import (dotdict, get_names,
parse_args)
record_tensorboard = True # if 'cluster' in os.getcwd() else False
save_model = True # if 'cluster' in os.getcwd() else False
render_eval = False
args = parse_args()
if args.model_path:
args.eval = True
if args.render:
render_eval = True
torch.set_num_threads(args.num_threads)
config_name = args.config_name
date = dt.now().strftime("%Y_%m_%d_%H%M%S_%f")
if not args.eval:
with open('json_params/BEAR_BCQ/'+config_name) as f:
params = json.load(f)
for k, v in args.__dict__.items():
if v is not None:
for main_key in params.keys():
for dk in params[main_key].keys():
if k == dk:
params[main_key][dk] = v
if args.env_name is not None:
params['env']['name'] = args.env_name
if args.agent_name is not None:
params['agent']['name'] = args.agent_name
p = dotdict(params)
p.agent = dotdict(params["agent"])
p.env = dotdict(params["env"])
# Defining name_file:
name_file, name_tb, name_save, name_logger_folder = \
get_names(p, args,
date,
record_tensorboard,
save_model)
tb = SummaryWriter('{}'.format(name_tb)) if name_tb is not None else None
logger = Logger(folder=name_logger_folder, name=f'{name_file}')
torch.manual_seed(p.agent.SEED)
np.random.seed(p.agent.SEED)
hyper_params = get_dict_hyperparams(p)
env, dataset = get_env(p)
early_stopper_rew = EarlyStopping(
name_save=name_save, patience=20, verbose=True, delta=1,
evol_type='Mean_cumreward', env_properties=p.env,
agent_properties=p.agent)
early_stopper_var = EarlyStopping(
name_save=name_save, patience=20, verbose=True, delta=1,
evol_type='Cvar_cumreward', env_properties=p.env,
agent_properties=p.agent)
if p.agent.name == 'BEAR':
agent = BEAR(env=env, dataset=dataset, hyper_params=hyper_params,
eval=False,
early_stopper_rew=early_stopper_rew,
early_stopper_var=early_stopper_var,
logger=logger,
name_save=name_save)
elif p.agent.name == 'BCQ':
agent = BCQ(env=env, dataset=dataset, hyper_params=hyper_params,
eval=False,
early_stopper_rew=early_stopper_rew,
early_stopper_var=early_stopper_var,
logger=logger,
name_save=name_save)
else:
raise ValueError(f'Agent "{p.agent.name}"" is not implemented. Only'
'BEAR and BCQ available')
gradient_steps = 10000000
max_episodes = 2000
MAX_EVAL_STEPS = 200
print(f'Start training algorithm with {agent.__class__.__name__} algorithm')
oraac_rollout(env, agent,
gradient_steps=p.agent.GRADIENT_STEPS,
max_episodes=p.agent.max_episodes,
max_episode_steps=p.agent.MAX_EVAL_STEPS,
eval_freq=p.agent.eval_freq,
times_eval=20)
|
import io
import os
import json
import subprocess
import shutil
import boto3
from .storage import Storage
class S3Storage(Storage):
def __init__(self, bucket: str, key: str, local_path: str):
super().__init__()
self.bucket = bucket
self.key = key
self.index_key = key+"/index"
self.annoy_index_key = self.index_key+"/index.ann"
self.reverse_index_key = self.index_key+"/reverse_index.json"
self.vector_key = key+"/vector"
self.metadata_key = key+"/metadata"
self.local_index_path = local_path+"/index"
self.local_annoy_index_path = self.local_index_path+"/index.ann"
self.local_reverse_index_path = self.local_index_path+"/reverse_index.json"
self.local_vector_path = local_path+"/vector"
self.s3_client = boto3.client("s3")
def save(self, id: str, vector: list, metadata: dict = {}):
vector_key = self.vector_key+"/"+id+".json"
self.s3_client.upload_fileobj(io.BytesIO(json.dumps(vector, indent=4, sort_keys=True).encode("utf-8")),
self.bucket, vector_key)
metadata_key = self.metadata_key+"/"+id+".json"
self.s3_client.upload_fileobj(io.BytesIO(json.dumps(metadata, indent=4, sort_keys=True).encode("utf-8")),
self.bucket, metadata_key)
def delete(self, id: str):
pass
def vector_generator(self):
remote_path = "s3://"+self.bucket+"/"+self.vector_key
subprocess.call(["aws", "s3", "sync", remote_path,
self.local_vector_path, "--delete"])
files = os.listdir(self.local_vector_path)
files = [os.path.join(self.local_vector_path, f) for f in files]
files.sort()
for f in files:
with open(f) as fp:
yield json.load(fp), f.split("/")[-1].split(".")[0]
def save_index(self, index_path: str, reverse_index_path: str):
self.s3_client.upload_file(
index_path, self.bucket, self.annoy_index_key)
self.s3_client.upload_file(
reverse_index_path, self.bucket, self.reverse_index_key)
def get_local_index_path(self):
index_remote_path = "s3://"+self.bucket+"/"+self.index_key
subprocess.call(["aws", "s3", "sync", index_remote_path,
self.local_index_path, "--delete"])
return self.local_annoy_index_path, self.local_reverse_index_path
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.