content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.http import request
from odoo.osv import expression
from odoo.addons.account.controllers.portal import PortalAccount
class PortalAccount(PortalAccount):
def _invoice_get_page_view_values(self, invoice, access_token, **kwargs):
values = super(PortalAccount, self)._invoice_get_page_view_values(invoice, access_token, **kwargs)
domain = request.env['account.analytic.line']._timesheet_get_portal_domain()
domain = expression.AND([domain, [('timesheet_invoice_id', '=', invoice.id)]])
values['timesheets'] = request.env['account.analytic.line'].sudo().search(domain)
return values
|
from unittest.case import SkipTest
from batchy.runloop import coro_return, runloop_coroutine, use_gevent_local, use_threading_local
from batchy.batch_coroutine import batch_coroutine, class_batch_coroutine
try:
import gevent
from gevent.coros import Semaphore
import batchy.gevent as batchy_gevent
except ImportError:
batchy_gevent = None
print('Gevent not installed; skipping gevent tests.')
from . import BaseTestCase
CALL_COUNT = 0
@batch_coroutine()
def increment(arg_lists):
def increment_single(n):
return n + 1
global CALL_COUNT
CALL_COUNT += 1
coro_return([increment_single(*ar, **kw) for ar, kw in arg_lists])
yield
class GeventTests(BaseTestCase):
def setup(self):
if not batchy_gevent:
raise SkipTest()
use_gevent_local()
# Quiet gevent's internal exception printing.
self.old_print_exception = gevent.get_hub().print_exception
gevent.get_hub().print_exception = lambda context, type, value, tb: None
global CALL_COUNT
CALL_COUNT = 0
def tear_down(self):
if not batch_gevent:
return
gevent.get_hub().print_exception = self.old_print_exception
use_threading_local()
def test_simple_gevent(self):
sema = Semaphore(0)
def acq():
sema.acquire()
return 1
@runloop_coroutine()
def rel():
sema.release()
coro_return(2)
yield
@runloop_coroutine()
def test():
r1, r2 = yield batchy_gevent.spawn(acq), rel()
coro_return(r1 + r2)
self.assert_equals(3, test())
def test_gevent_exceptions(self):
def throw():
raise ValueError()
@runloop_coroutine()
def test():
yield batchy_gevent.spawn(throw)
self.assert_raises(ValueError, test)
def test_batch_with_gevent(self):
def call_increment(i):
return increment(i)
@runloop_coroutine()
def test():
a, b = yield batchy_gevent.spawn(call_increment, 2), increment(3)
coro_return(a + b)
self.assert_equals(7, test())
self.assert_equals(2, CALL_COUNT)
def test_gevent_out_of_order(self):
def acq(s):
s.acquire()
@runloop_coroutine()
def test():
s = Semaphore(0)
future1 = yield batchy_gevent.greenlet_future(gevent.spawn(acq, s))
future2 = yield batchy_gevent.greenlet_future(gevent.spawn(acq, s))
s.release()
yield future1
s.release()
yield future2
test() # shouldn't hang
|
import aiy.audio
import aiy.cloudspeech
import aiy.voicehat
from google_speech import Speech
from phue import Bridge
def main():
hue_bridge = Bridge('192.168.1.134')
hue_bridge.connect()
hue_bridge.get_api()
lights = hue_bridge.lights
light_names = hue_bridge.get_light_objects('name')
Speech('Josh Graff is present',"en").play(None)
print ('test')
button = aiy.voicehat.get_button()
led = aiy.voicehat.get_led()
aiy.audio.get_recorder().start()
jarvis_thread()
def jarvis_thread():
recognizer = aiy.cloudspeech.get_recognizer()
recognizer.expect_hotword('jarvis')
recognizer.expect_phrase('repeat after me')
print('Jarvis now listening...')
while True:
#print('Press the button and speak')
#recognizer.wait_for_hotword()
spoken_text = recognizer.recognize()
if not spoken_text:
print('Sorry, I did not hear you.')
else:
print('You said "', spoken_text, '"')
if 'turn on the' or 'turn off the' in spoken_text:
light_names['couch'].on = True
elif 'turn off the couch' in spoken_text:
light_names['couch'].on = False
elif 'repeat after me' in spoken_text:
print('repeating spoken_text')
repeatspoken_text = spoken_text.replace('repeat after me',' ',1)
aiy.audio.say(repeatspoken_text,'en-GB',60,100)
elif 'goodbye' in spoken_text:
break
if __name__ == '__main__':
main()
|
from train import *
def test():
X_A_full = T.tensor4('A')
X_B_full = T.tensor4('B')
X_A = pre_process(X_A_full)
X_B = pre_process(X_B_full)
X_A_ = nn.placeholder((bs, 3, image_size*4, image_size*4), name='A_plhd')
X_B_ = nn.placeholder((bs, 3, image_size*4, image_size*4), name='B_plhd')
net = AugmentedCycleGAN((None, 3, image_size, image_size), latent_dim, n_gen_filters, n_dis_filters, n_enc_filters, 3,
use_dropout, use_sigmoid, use_latent_gan)
nn.set_training_off()
fixed_z = T.constant(np.random.normal(size=(bs, latent_dim)), dtype='float32')
fixed_multi_z = T.constant(np.repeat(np.random.normal(size=(n_multi, latent_dim)), bs, 0), dtype='float32')
visuals = net.generate_cycle(X_A, X_B, fixed_z)
multi_fake_B = net.generate_multi(X_A, fixed_multi_z)
visualize_single = nn.function([], list(visuals.values()), givens={X_A_full: X_A_, X_B_full: X_B_},
name='test single')
visualize_multi = nn.function([], multi_fake_B, givens={X_A_full: X_A_}, name='test multi')
val_data = Edges2Shoes((X_A_, X_B_), bs, 1, 'val', False)
mon = nn.Monitor(current_folder='results/Augmented_CycleGAN/run1')
nn.utils.numpy2shared(mon.load('gen_A_B-%d.npy' % param_file_version), net.netG_A_B.params)
nn.utils.numpy2shared(mon.load('gen_B_A-%d.npy' % param_file_version), net.netG_B_A.params)
nn.utils.numpy2shared(mon.load('dis_A-%d.npy' % param_file_version), net.netD_A.params)
nn.utils.numpy2shared(mon.load('dis_B-%d.npy' % param_file_version), net.netD_B.params)
nn.utils.numpy2shared(mon.load('enc_B-%d.npy' % param_file_version), net.netE_B.params)
if use_latent_gan:
nn.utils.numpy2shared(mon.load('dis_z_B.npy', version=param_file_version), net.netD_z_B.params)
print('Testing...')
for _ in val_data:
vis_single = visualize_single()
vis_multi = visualize_multi()
for j, k in enumerate(visuals.keys()):
mon.imwrite('test_' + k, vis_single[j][:n_imgs_to_save], callback=unnormalize)
for j, fake_B in enumerate(vis_multi):
mon.imwrite('test_fake_B_multi_%d.jpg' % j, fake_B, callback=unnormalize)
mon.flush()
print('Testing finished!')
if __name__ == '__main__':
test()
|
import network
import torch
import SimpleITK as sitk
import torch.optim as optim
from torch.utils import data
import numpy as np
import skimage.io as io
import os
from skimage.transform import resize
import time
from sklearn.model_selection import train_test_split
class VoxelMorph():
"""
VoxelMorph Class is a higher level interface for both 2D and 3D
Voxelmorph classes. It makes training easier and is scalable.
"""
def __init__(self, input_dims, dim):
self.dims = input_dims
self.dim = dim
self.net = self.load_model()
self.optimizer = optim.SGD(self.net.parameters(), lr=1e-4, momentum=0.99)
self.params = {'batch_size': 3,
'shuffle': True,
'num_workers': 6,
'worker_init_fn': np.random.seed(42)
}
self.criteria = self.cc_smooth
def load_model(self):
in_channel = self.dims[0] * 2
self.net = network.VM_Net(in_channel, dim=self.dim)
torch.cuda.set_device('cuda:0')
self.net.cuda()
return self.net
def check_dims(self, x):
try:
if x.shape[1:] == self.dims:
return
else:
raise TypeError
except TypeError as e:
print("Invalid Dimension Error. The supposed dimension is ",
self.dims, "But the dimension of the input is ", x.shape[1:])
def forward(self, x):
self.check_dims(x)
###### LOSSES
def cross_correlation(self, I, J, n):
I = I.permute(0, 3, 1, 2).cuda()
J = J.permute(0, 3, 1, 2).cuda()
batch_size, channels, xdim, ydim = I.shape
I2 = torch.mul(I, I).cuda()
J2 = torch.mul(J, J).cuda()
IJ = torch.mul(I, J).cuda()
sum_filter = torch.ones((1, channels, n, n)).cuda()
I_sum = torch.conv2d(I, sum_filter, padding=1, stride=(1, 1))
J_sum = torch.conv2d(J, sum_filter, padding=1, stride=(1, 1))
I2_sum = torch.conv2d(I2, sum_filter, padding=1, stride=(1, 1))
J2_sum = torch.conv2d(J2, sum_filter, padding=1, stride=(1, 1))
IJ_sum = torch.conv2d(IJ, sum_filter, padding=1, stride=(1, 1))
win_size = n ** 2
u_I = I_sum / win_size
u_J = J_sum / win_size
cross = IJ_sum - u_J * I_sum - u_I * J_sum + u_I * u_J * win_size
I_var = I2_sum - 2 * u_I * I_sum + u_I * u_I * win_size
J_var = J2_sum - 2 * u_J * J_sum + u_J * u_J * win_size
cc = cross * cross / (I_var * J_var + np.finfo(float).eps)
return torch.mean(cc)
def smooothing(self, y_pred):
dy = torch.abs(y_pred[:, 1:, :, :] - y_pred[:, :-1, :, :])
dx = torch.abs(y_pred[:, :, 1:, :] - y_pred[:, :, :-1, :])
dx = torch.mul(dx, dx)
dy = torch.mul(dy, dy)
d = torch.mean(dx) + torch.mean(dy)
return d / 2.0
def cc_smooth(self, y, ytrue, n=9, lamda=0.01):
cc = self.cross_correlation(y, ytrue, n)
sm = self.smooothing(y)
# print("CC Loss", cc, "Gradient Loss", sm)
loss = -1.0 * cc + lamda * sm
return loss
def dice(self,pred, target):
"""This definition generalize to real valued pred and target vector. This should be differentiable.
pred: tensor with first dimension as batch
target: tensor with first dimension as batch
"""
top = 2 * torch.sum(pred * target, [1, 2, 3])
union = torch.sum(pred + target, [1, 2, 3])
eps = torch.ones_like(union) * 1e-5
bottom = torch.max(union, eps)
dice = torch.mean(top / bottom)
# print("Dice score", dice)
return dice
def mmi(self,I,J):
I = sitk.Cast(sitk.GetImageFromArray(I), sitk.sitkFloat32)
J = sitk.Cast(sitk.GetImageFromArray(J), sitk.sitkFloat32)
# Hijack Simple ITK Registration method for Mattes MutualInformation metric
R = sitk.ImageRegistrationMethod()
R.SetMetricAsMattesMutualInformation()
MMI = R.MetricEvaluate(I, J)
return MMI
def mmi_smooth(self, y, ytrue, n=9, lamda=0.01):
mmi = self.mmi(y,ytrue)
sm = self.smoothing(y)
loss = -1.0 * mmi + lamda * sm
return loss
######## TRAIN MODEL
def train_model(self, batch_moving, batch_fixed, n=9, lamda=0.01, calc_dice=False):
# Reset Gradients
self.optimizer.zero_grad()
# Move images to gpu
batch_fixed.cuda()
batch_moving.cuda()
# Forward Pass
batch_registered = self.net(batch_moving, batch_fixed)
# Calculate Loss
train_loss = self.criteria(batch_registered, batch_fixed, n, lamda)
# Have to figure out why batch_fixed pops off gpu -> cpu ?
# Backward Pass
train_loss.backward()
# Step
self.optimizer.step()
# Return metrics
if calc_dice:
train_dice = self.dice(batch_registered, batch_fixed.cuda())
return train_loss, train_dice
return train_loss
######## Calculate Losses
def get_test_loss(self, batch_moving, batch_fixed, n=9, lamda=0.01, calc_dice=False):
with torch.set_grad_enabled(False):
batch_moving.cuda()
batch_fixed.cuda()
batch_registered = self.net(batch_moving, batch_fixed)
val_loss = self.criteria(batch_registered, batch_fixed, n, lamda)
if calc_dice:
val_dice_score = self.dice(batch_registered, batch_fixed.cuda())
return val_loss, val_dice_score
return val_loss
class Dataset(data.Dataset):
"""
Dataset class for converting the data into batches.
The data.Dataset class is a pyTorch class which help
in speeding up this process with effective parallelization
"""
'Characterizes a dataset for PyTorch'
def __init__(self, list_IDs):
'Initialization'
self.list_IDs = list_IDs
def __len__(self):
'Denotes the total number of samples'
return len(self.list_IDs)
def __getitem__(self, index):
'Generates one sample of data'
# Select sample
ID = self.list_IDs[index]
# Load data and get label
fixed_image = torch.Tensor(resize(io.imread('./fire-fundus-image-registration-dataset/' + ID + '_1.jpg'), (256, 256, 3)))
moving_image = torch.Tensor(resize(io.imread('./fire-fundus-image-registration-dataset/' + ID + '_2.jpg'), (256, 256, 3)))
return fixed_image, moving_image
def main():
'''
In this I'll take example of FIRE: Fundus Image Registration Dataset
to demostrate the working of the API.
'''
vm = VoxelMorph((3, 256, 256), dim=2) # Object of the higher level class
DATA_PATH = './fire-fundus-image-registration-dataset/'
params = {'batch_size': 16,
'shuffle': True,
'num_workers': 6,
'worker_init_fn': np.random.seed(42)
}
max_epochs = 5
filename = list(set([x.split('_')[0]
for x in os.listdir('./fire-fundus-image-registration-dataset/')]))
partition = {}
partition['train'], partition['validation'] = train_test_split(
filename, test_size=0.33, random_state=42)
# Generators
training_set = Dataset(partition['train'])
training_generator = data.DataLoader(training_set, **params)
validation_set = Dataset(partition['validation'])
validation_generator = data.DataLoader(validation_set, **params)
# Loop over epochs
for epoch in range(max_epochs):
start_time = time.time()
train_loss = 0
val_loss = 0
# Training
for batch_fixed, batch_moving in training_generator:
loss = vm.train_model(batch_moving, batch_fixed)
train_loss += loss.data
elapsed = "{0:.2f}".format((time.time() - start_time) / 60)
avg_loss = train_loss * params['batch_size'] / len(training_set)
print('[', elapsed, 'mins]', epoch + 1, 'epochs, train loss = ', avg_loss)
# Validation
start_time = time.time()
for batch_fixed, batch_moving in validation_generator:
loss = vm.get_test_loss(batch_moving, batch_fixed)
val_loss += loss.data
elapsed = "{0:.2f}".format((time.time() - start_time) / 60)
avg_loss = val_loss * params['batch_size'] / len(validation_set)
print('[', elapsed, 'mins]', epoch + 1, 'epochs, val loss = ', avg_loss)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import os
import subprocess
help2md_exec='/root/help2md/help2md'
output_dir='/root/command_reference/'
def check_help2md():
if not os.path.exists(help2md_exec):
print 'Must have help2md installed.'
exit(1)
def check_anchore():
if os.system('anchore --version') != 0:
print 'Must have anchore installed first.'
exit(2)
def get_subcommands(command):
result = subprocess.check_output(command + ['--help'])
found = False
subcommands = []
for l in result.splitlines():
if l.strip().startswith('Commands:'):
found = True
continue
elif found:
tokens = l.strip().split(' ')
subcommands.append(command + [tokens[0]])
return subcommands
def generate_markdown(command, recurse=False, version=None):
print 'Processing command: ' + str(command)
if not version:
version = subprocess.check_output(command + ['--version'])
outfile = os.path.join(output_dir, '-'.join(command) + '.md')
print 'Creating manpage ' + outfile + ' from command ' + str(command)
cmd = ' '.join(command)
if os.system(cmd + ' --extended-help 2>&1 > /dev/null') != 0:
print 'Using --help instead of --extended-help'
cmd += ' --help'
else:
cmd += ' --extended-help'
if os.system(cmd + ' | ' + help2md_exec + ' > ' + outfile) != 0:
print 'Failed on ' + str(command) + ' SKIPPING'
else:
print 'Success for ' + str(command)
try:
subcommands = get_subcommands(command)
print 'Subcommands: ' + str(subcommands)
except:
print 'Got error with subcommands for command: ' + str(command)
return
print 'Got subcommands: %s' % subcommands
for cmd in subcommands:
generate_markdown(cmd, recurse=True, version=version)
if __name__ == '__main__':
check_anchore()
check_help2md()
print 'Building manpages from help output of anchore commands'
generate_markdown(['anchore'], recurse=True)
print 'Generation complete'
|
import formencode
import inspect
from blazeutils.datastructures import LazyOrderedDict
from blazeform.element import form_elements, CancelElement, CheckboxElement, \
MultiSelectElement, LogicalGroupElement
from blazeform.exceptions import ElementInvalid, ProgrammingError
from blazeform.file_upload_translators import WerkzeugTranslator
from blazeform.processors import Wrapper
from blazeform.util import HtmlAttributeHolder, NotGiven, ElementRegistrar, is_notgiven
# fix the bug in the formencode MaxLength validator
from formencode.validators import MaxLength
MaxLength._messages['__buggy_toolong'] = MaxLength._messages['tooLong']
MaxLength._messages['tooLong'] = 'Enter a value not greater than %(maxLength)i characters long'
class FormBase(HtmlAttributeHolder, ElementRegistrar):
"""
Base class for forms.
"""
def __init__(self, name, static=False, **kwargs):
HtmlAttributeHolder.__init__(self, **kwargs)
ElementRegistrar.__init__(self, self)
self.elements = LazyOrderedDict()
self.els = self.elements
self._name = name
# include a hidden field so we can check if this form was submitted
self._form_ident_field = '%s-submit-flag' % name
# registered element types
self._registered_types = {}
# our renderer
self._renderer = None
# this string is used to generate the HTML id attribute for each
# rendering element
self._element_id_formatter = '%(form_name)s-%(element_id)s'
# our validators
self._validators = []
# file upload translator
self._fu_translator = WerkzeugTranslator
# form errors
self._errors = []
# exception handlers
self._exception_handlers = []
# is the form static?
self._static = static
# init actions
self.register_elements(form_elements)
self.add_hidden(self._form_ident_field, value='submitted')
@property
def defaultable_els(self):
for el in self.els.values():
if el.is_defaultable:
yield el
@property
def submittable_els(self):
for el in self.els.values():
if el.is_submittable:
yield el
@property
def renderable_els(self):
for el in self.els.values():
if el.is_renderable and not el.renders_in_group:
yield el
@property
def returning_els(self):
for el in self.els.values():
if el.is_returning:
yield el
def register_elements(self, dic):
for type, eclass in dic.items():
self.register_element_type(type, eclass)
def register_element_type(self, type, eclass):
if type in self._registered_types:
raise ValueError('type "%s" is already registered' % type)
self._registered_types[type] = eclass
def render(self, **kwargs):
return self._renderer(self).render(**kwargs)
def is_submitted(self):
""" In a normal workflow, is_submitted will only be called once and is
therefore a good method to override if something needs to happen
after __init__ but before anything else. However, we also need to
to use is_submitted internally, but would prefer to make it a little
more user friendly. Therefore, we do this and use _is_submitted
internally.
"""
return self._is_submitted()
def _is_submitted(self):
if getattr(self.elements, self._form_ident_field).is_submitted():
return True
return False
def add_error(self, msg):
self._errors.append(msg)
def is_cancel(self):
if not self.is_submitted():
return False
# look for any CancelElement that has a non-false submit value
# which means that was the button clicked
for element in self.submittable_els:
if isinstance(element, CancelElement):
if element.is_submitted():
return True
return False
def add_validator(self, validator, msg=None):
"""
form level validators are only validators, no manipulation of
values can take place. The validator should be a formencode
validator or a callable. If a callable, the callable should take
one argument, the form object. It should raise a ValueInvalid
exception if applicable.
def validator(form):
if form.myfield.is_valid():
if form.myfield.value != 'foo':
raise ValueInvalid('My Field: must have "foo" as value')
"""
if not formencode.is_validator(validator):
if callable(validator):
validator = Wrapper(to_python=validator)
else:
raise TypeError('validator must be a Formencode validator or a callable')
else:
# FE validators may be passed as the class or an instance
# if class, then make it an instance
if inspect.isclass(validator):
validator = validator()
self._validators.append((validator, msg))
def add_field_errors(self, errors):
errors = errors.copy()
for el in self.elements.keys():
if el in errors.keys():
if isinstance(errors[el], str):
getattr(self.elements, el).errors.append(errors[el])
elif isinstance(errors[el], list):
for error in errors[el]:
getattr(self.elements, el).errors.append(error)
else:
raise TypeError('add_field_errors must be passed a dictionary with '
'values of either strings, or lists of strings')
del errors[el]
# indicate that some errors were not added
if errors:
return False
return True
def is_valid(self):
if not self.is_submitted():
return False
valid = True
# element validation
for element in self.submittable_els:
if not element.is_valid():
valid = False
# whole form validation
for validator, msg in self._validators:
try:
validator.to_python(self)
except formencode.Invalid as e:
valid = False
msg = (msg or str(e))
if msg:
self.add_error(msg)
except ElementInvalid:
# since we are getting an ElementInvalid exception, that means
# our validator needed the value of an element to complete
# validation, but that element is invalid. In that case,
# our form will already be invalid, but we don't want to issue
# an error
valid = False
return valid
def _set_submitted_values(self, values):
for el in self.submittable_els:
key = el.nameattr or el.id
if key in values:
el.submittedval = values[key]
elif isinstance(el, (CheckboxElement, MultiSelectElement, LogicalGroupElement)):
el.submittedval = None
def set_submitted(self, values):
""" values should be dict like """
# if the form is static, it shoudl not get submitted values
if self._static:
raise ProgrammingError('static forms should not get submitted values')
self._errors = []
# ident field first since we need to know that to now if we need to
# apply the submitted values
identel = getattr(self.elements, self._form_ident_field)
ident_key = identel.nameattr or identel.id
if ident_key in values:
identel.submittedval = values[ident_key]
if self._is_submitted():
self._set_submitted_values(values)
def set_defaults(self, values):
for el in self.defaultable_els:
if el.id in values:
el.defaultval = values[el.id]
def get_values(self):
"return a dictionary of element values"
retval = {}
for element in self.returning_els:
try:
key = element.nameattr or element.id
except AttributeError:
key = element.id
retval[key] = element.value
return retval
values = property(get_values)
def add_handler(self, exception_txt=NotGiven, error_msg=NotGiven, exc_type=NotGiven,
callback=NotGiven):
self._exception_handlers.append((exception_txt, error_msg, exc_type, callback))
def handle_exception(self, exc):
def can_handle(error_msg):
self._valid = False
if is_notgiven(error_msg):
error_msg = str(exc)
self.add_error(error_msg)
return True
# try element handlers first
for el in self.submittable_els:
if el.handle_exception(exc):
return True
for looking_for, error_msg, exc_type, callback in self._exception_handlers:
if not is_notgiven(exc_type):
if isinstance(exc_type, str):
if exc.__class__.__name__ != exc_type:
continue
else:
if not isinstance(exc, exc_type):
continue
if is_notgiven(callback):
if is_notgiven(looking_for):
return can_handle(error_msg)
elif looking_for in str(exc):
return can_handle(error_msg)
else:
return callback(exc)
return False
def all_errors(self, id_as_key=False):
"""
Returns a tuple with two elements. First element is a list of all
the form-level error strings. The second is a dict where (by
default) the keys are field label strings and the value is a list
of that fields's error strings.
If you set id_as_key=True, the dict of field errors will use the
field's id, instead of it's label, as the key of the dict.
"""
form_errors = list(self._errors)
field_errors = {}
for el in self.submittable_els:
for msg in el.errors:
if not id_as_key:
key = el.label.value
else:
key = el.id
if key not in field_errors:
field_errors[key] = []
field_errors[key].append(msg)
return form_errors, field_errors
class Form(FormBase):
"""
Main form class using default HTML renderer and Werkzeug file upload
translator
"""
def __init__(self, name, static=False, **kwargs):
# make the form's name the id
if 'id' not in kwargs:
kwargs['id'] = name
FormBase.__init__(self, name, static, **kwargs)
# import here or we get circular import problems
from blazeform.render import get_renderer
self._renderer = get_renderer
|
import re
import requests
from bs4 import BeautifulSoup as bs
from jfr_playoff.logger import PlayoffLogger
class RemoteUrl:
url_cache = {}
@classmethod
def fetch_raw(cls, url):
PlayoffLogger.get('remote').info(
'fetching content for: %s', url)
if url not in cls.url_cache:
request = requests.get(url)
encoding_match = re.search(
'content=".*;( )?charset=(.*?)"',
request.content, re.IGNORECASE)
if encoding_match:
PlayoffLogger.get('remote').debug(
'Content encoding: %s',
encoding_match.group(2))
request.encoding = encoding_match.group(2)
cls.url_cache[url] = request.text
PlayoffLogger.get('remote').info(
'fetched %d bytes from remote location',
len(cls.url_cache[url]))
return cls.url_cache[url]
@classmethod
def fetch(cls, url):
return bs(RemoteUrl.fetch_raw(url), 'lxml')
@classmethod
def clear_cache(cls):
cls.url_cache = {}
|
from tapis_cli.display import Verbosity
from tapis_cli.clients.services.mixins import FilesURI
from . import API_NAME, SERVICE_VERSION
from .models import PostIt, HTTP_METHODS, DEFAULT_LIFETIME, DEFAULT_MAX_USES
from .formatters import PostItsFormatOne
__all__ = ['PostItsCreate']
class PostItsCreate(PostItsFormatOne, FilesURI):
HELP_STRING = 'Create a new Postit'
LEGACY_COMMMAND_STRING = 'postits-create'
VERBOSITY = Verbosity.RECORD
EXTRA_VERBOSITY = Verbosity.RECORD_VERBOSE
def get_parser(self, prog_name):
parser = super(PostItsCreate, self).get_parser(prog_name)
parser = FilesURI.extend_parser(self, parser)
parser.add_argument(
'-L',
'--lifetime',
dest='lifetime',
default=DEFAULT_LIFETIME,
metavar='INT',
help='Lifetime (default: {0} sec)'.format(DEFAULT_LIFETIME))
parser.add_argument(
'-m',
'--max-uses',
dest='max_uses',
default=DEFAULT_MAX_USES,
metavar='INT',
help='Maximum redemptions (default: {0})'.format(DEFAULT_MAX_USES))
parser.add_argument('-M',
'--http-method',
dest='http_method',
choices=HTTP_METHODS,
default='GET',
metavar='GET|POST',
help='HTTP method for URL (default: GET)')
# parser.add_argument('-x',
# '--token-username',
# dest='token_username',
# help='Impersonation username (requires admin privileges)')
parser.add_argument('-N',
'--no-auth',
dest='no_auth',
action='store_true',
help='Do not pre-authenticate the URL')
return parser
def take_action(self, parsed_args):
parsed_args = self.preprocess_args(parsed_args)
self.requests_client.setup(API_NAME, SERVICE_VERSION)
http_url = self.get_value(parsed_args, agave=self.tapis_client)
body = {'url': http_url}
if parsed_args.lifetime is not None:
body['lifetime'] = parsed_args.lifetime
if parsed_args.max_uses is not None:
body['maxUses'] = parsed_args.max_uses
if parsed_args.http_method is not None:
body['method'] = parsed_args.http_method
if parsed_args.no_auth is True:
body['noauth'] = True
headers = self.render_headers(PostIt, parsed_args)
rec = self.tapis_client.postits.create(body=body)
data = []
for key in headers:
val = self.render_value(rec.get(key, None))
data.append(val)
# Extend response to show the full URL for the Post-It
headers.append('postit_url')
data.append(self.requests_client.build_url(rec.get('postit')))
return (tuple(headers), tuple(data))
|
for m in range(4):
for y in range(3):
print(f'({m},{y})')
|
for i in range(1,100):
if(i %2 !=0):
print"odd:",i
|
import sys, os, argparse
import json, urllib2
try:
argn = int(os.environ['ARGN'])
except KeyError:
sys.exit("No proof given")
if argn != 1:
sys.exit("No proof given")
try:
proof = os.environ['ARG0']
proof_url = "https://www.reddit.com/r/ethereumproofs/comments/" + proof + ".json"
request = urllib2.Request(proof_url)
request.add_header('User-Agent', '')
json_data = json.load(urllib2.urlopen(request))
print("[" + json.dumps(json_data[0]['data']['children'][0]['data']['author']) + "," + json.dumps(json_data[0]['data']['children'][0]['data']['title']) + "]")
except:
print("Unable to query reddit")
|
"""Test module for the Markdown paragraphs."""
import re
import sys
import unittest
from books import Books
class TestParagraphs(unittest.TestCase):
"""Unit test of the Markdown lists."""
def setUp(self):
"""Setup: get all the paragraphs."""
self.paragraphs = []
books = Books()
for book in books.books:
# we are not responsible of the content of the discord chats
if book.name == 'discord':
continue
for md_path in book.md_paths:
# Extract MD content.
args = {} if sys.version_info[0] < 3 else {'encoding': 'utf-8'}
with open(md_path, **args) as f:
content = f.read()
# Remove annoying string sequences.
# - Multiline code sections.
content = re.sub(r'```.+?(?=```)```', '\n', content, flags=re.S)
content = re.sub(r'\n`.+?(?=`\n)`\n', '\n', content, flags=re.S)
# - Notes.
content = re.sub(r'\n\s*>.+?(?=\n\n)', '\n', content, flags=re.S)
content = re.sub(r'\n\s*>.+?(?=\n$)', '\n', content, flags=re.S)
# - Showdown extensions.
content = re.sub(r'%chart.+?(?=%end)%end', '\n', content, flags=re.S)
content = re.sub(r'%figure.+?(?=%end)%end', '\n', content, flags=re.S)
content = re.sub(r'%api.+?(?=%end)%end', '\n', content, flags=re.S)
content = re.sub(r'%spoiler.+?(?=%end)%end', '\n', content, flags=re.S)
content = re.sub(r'%tab-component.+?(?=%end)%end', '\n', content, flags=re.S)
content = re.sub(r'%robot.*\n', '\n', content, flags=re.S)
# - Headers.
content = re.sub(r'^#.*', '\n', content)
content = re.sub(r'\n#.*', '\n', content)
# - Items.
content = re.sub(r'\n\s*-.+?(?=\n\n)', '\n', content, flags=re.S)
content = re.sub(r'\n\s*-.+?(?=\n$)', '\n', content, flags=re.S)
content = re.sub(r'\n\s*\d+\..+?(?=\n\n)', '\n', content, flags=re.S)
content = re.sub(r'\n\s*\d+\..+?(?=\n$)', '\n', content, flags=re.S)
content = re.sub(r'\n .+?(?=\n)', '\n', content, flags=re.S)
content = re.sub(r'\n .+?(?=\n)', '\n', content, flags=re.S)
# - HTML statements
for _ in range(10):
previous_content = content
content = re.sub(r'\n *<.+?>\n', '\n', content, flags=re.S)
if previous_content == content:
break
content = re.sub(r'\n---\n', '\n', content, flags=re.S)
# - Single hyperlinks
content = re.sub(r'\n\!?\[.+\)\n', '\n', content, flags=re.S)
# - Special statements
content = re.sub(r'\nRelease {{.+?}}\n', '\n', content, flags=re.S)
content = re.sub(r'\n\s*\*\*.+?\n', '\n', content, flags=re.S)
content = re.sub(r'\n\s*\*.+?\*\n', '\n', content, flags=re.S)
content = re.sub(r'\n\s*\{.+?\n', '\n', content, flags=re.S)
# Extract paragraphs.
for match in re.finditer(r'(?s)((?:[^\n][\n]?)+)', content):
paragraph = content[match.start():match.end() - 1]
# - Arrays.
if paragraph.startswith('| ') or paragraph.startswith('> '):
continue
self.paragraphs.append({'paragraph': paragraph, 'md': md_path})
# Debug: Uncomment to display all the acquired paragraphs.
# for p in self.paragraphs:
# print ('@@@')
# print (p)
def test_one_sentence_per_line(self):
"""Test that each sentence is written on one line."""
for p in self.paragraphs:
lines = p['paragraph'].split('\n')
for line in lines:
line = line.strip()
if not line:
continue
self.assertTrue(
line.endswith('.') or line.endswith(':') or line.endswith('!') or line.endswith('?'),
msg='"%s": The following line does not end correctly: "%s"' % (p['md'], line)
)
self.assertFalse(
re.match(r'^[a-z]', line),
msg='"%s": The following line is starting with a lower case: "%s"' % (p['md'], line)
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0019_verbose_names_cleanup'),
('home', '0003_shoppage'),
]
operations = [
migrations.CreateModel(
name='LookbookPage',
fields=[
('page_ptr', models.OneToOneField(primary_key=True, auto_created=True, serialize=False, parent_link=True, to='wagtailcore.Page')),
('page_content', wagtail.wagtailcore.fields.StreamField((('heading', wagtail.wagtailcore.blocks.CharBlock(icon='title', classname='full title')), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock()), ('lookbookimage', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('caption', wagtail.wagtailcore.blocks.TextBlock(required=False)))), icon='image', template='home/blocks/image_holder.html'))))),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField,SelectField
from wtforms.validators import Required,Email
class PostForm(FlaskForm):
'''
Class to create a wtf form for creating a post
'''
content = TextAreaField('YOUR POST')
submit = SubmitField('SUBMIT')
class CommentForm(FlaskForm):
'''
Class to create a wtf form for creating a post
'''
opinion = TextAreaField('WRITE COMMENT')
submit = SubmitField('SUBMIT')
class CategoryForm(FlaskForm):
'''
Class to create a wtf form for creating a post
'''
name = StringField('Category Name', validators=[Required()])
submit = SubmitField('Create')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
|
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
from matplotlib.colors import LinearSegmentedColormap as LSC
from matplotlib.patches import Ellipse
from matplotlib.ticker import MultipleLocator
# ran bettermoments with
# bettermoments 12CO.fits -method quadratic -clip 6 -rms 1.6e-03
def load_data(fname, mu_RA, mu_DEC):
# load the dust figure
hdul = fits.open(fname)
data = hdul[0].data
header = hdul[0].header
nx = header["NAXIS1"]
ny = header["NAXIS2"]
assert (
nx % 2 == 0 and ny % 2 == 0
), "We don't have an even number of pixels, assumptions in the routine are violated."
# RA coordinates
CDELT1 = header[
"CDELT1"
] # decimal degrees. Note that not all decimal degrees are the same distance on the sky
# since this depends on what declination we're at, too!
CRPIX1 = header["CRPIX1"] - 1.0 # Now indexed from 0
# DEC coordinates
CDELT2 = header["CDELT2"]
CRPIX2 = header["CRPIX2"] - 1.0 # Now indexed from 0
cosdec = np.cos(CDELT2 * np.pi / 180)
dRAs = (np.arange(nx) - nx / 2) * CDELT1 * cosdec
dDECs = (np.arange(ny) - ny / 2) * CDELT2
RA = 3600 * dRAs # [arcsec]
DEC = 3600 * dDECs # [arcsec]
# apply the centroid shift
ext = (RA[0] - mu_RA, RA[-1] - mu_RA, DEC[0] - mu_DEC, DEC[-1] - mu_DEC) # [arcsec]
# Get the beam info from the header, like normal
BMAJ = 3600 * header["BMAJ"]
BMIN = 3600 * header["BMIN"]
BPA = header["BPA"]
beam = (BMAJ, BMIN, BPA)
return (data, beam, ext)
def plot_beam(ax, beam, xy=(1, -1), facecolor="0.5", edgecolor="0.5"):
BMAJ, BMIN, BPA = beam
# print('BMAJ: {:.3f}", BMIN: {:.3f}", BPA: {:.2f} deg'.format(BMAJ, BMIN, BPA))
# We need to negate the BPA since the rotation is the opposite direction
# due to flipping RA.
ax.add_artist(
Ellipse(
xy=xy,
width=BMIN,
height=BMAJ,
angle=-BPA,
facecolor=facecolor,
linewidth=0.2,
edgecolor=edgecolor,
)
)
def plot_gas(ax):
"""
Load this dataset and plot it on that ax.
"""
# shift the image by setting the axes limits
mu_RA = (525 - 478.768) * 0.015 # x
mu_DEC = (538.165 - 525) * 0.015 # y from 0, 0
# center pixel is 525, 525
radius = 3.0 # arcsec
data, beam, ext = load_data("gas.fits", mu_RA, mu_DEC)
cmap = plt.get_cmap("inferno")
im = ax.imshow(data, cmap=cmap, origin="lower", extent=ext, aspect="equal")
plot_beam(ax, beam, xy=(0.91 * radius, -0.91 * radius))
ax.set_xlim(radius, -radius)
ax.set_ylim(-radius, radius)
|
import unittest
from double_preceding import double_preceding
class TestDoublePreceding(unittest.TestCase):
"""Tests for double_preceding."""
def test_double_preceding_none(self):
"""Test nothing."""
expected = []
L = []
double_preceding(L)
self.assertEqual(expected, L, "Error.")
def test_double_preceding_one(self):
"""Test one thing."""
expected = [0]
L = [2]
double_preceding(L)
self.assertEqual(expected, L, "Error.")
def test_double_preceding_two(self):
"""Test two things."""
expected = [0, 4]
L = [2, 3]
double_preceding(L)
self.assertEqual(expected, L, "Error.")
def test_double_preceding_neg(self):
"""Test negative thing."""
expected = [0]
L = [-5]
double_preceding(L)
self.assertEqual(expected, L, "Error.")
def test_double_preceding_two_neg(self):
"""Test two negative things."""
expected = [0, -12]
L = [-6, -3]
double_preceding(L)
self.assertEqual(expected, L, "Error.")
def test_double_preceding_neg_pos(self):
"""Test positive and negative things."""
expected = [0, 6, -24]
L = [3, -12, 1]
double_preceding(L)
self.assertEqual(expected, L, "Error.")
def test_double_preceding_zero(self):
"""Test zero."""
expected = [0]
L = [0]
double_preceding(L)
self.assertEqual(expected, L, "Error.")
unittest.main()
|
import sys
import math
import json
class Vertex:
def __init__(self, x, y, neighbors = None, visited = None):
self.x = x
self.y = y
self.neighbors = neighbors if neighbors is not None else []
self.visited = visited if visited is not None else False
def addNeighbor(self, v):
self.neighbors.append(v)
for arg in sys.argv:
filename = arg
pts = []
vertices = {}
dist = 0
# Get the points from the input file.
with open('../problems/input/st.txt', 'r') as input:
for line in input:
l = line.split(' ')
pts.append(((float(l[0]), float(l[1]))))
# Get their points and set the edges accordingly. Add the length of each edge to the total length of the tree.
with open(filename, 'r') as input:
for line in input:
l = line.split(' ')
x1 = float(l[0])
y1 = float(l[1])
x2 = float(l[2])
y2 = float(l[3])
if not (x1, y1) in vertices:
vertices[((x1, y1))] = Vertex(x1, y1)
if not ((x2, y2)) in vertices:
vertices[((x2, y2))] = Vertex(x2, y2)
vertices[((x1, y1))].addNeighbor(vertices[((x2, y2))])
vertices[((x2, y2))].addNeighbor(vertices[((x1, y1))])
dist += math.sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)*(y1 - y2))
# print(vertices)
# Ensure that all input points are included by their solution.
for a in pts:
if not a in vertices:
print(json.dumps({"isError": True, "message": "Your graph does not include all of the input points."}))
sys.exit(-1)
# Traverse the graph.
toVisit = [vertices[pts[0]]]
while len(toVisit) is not 0:
toVisit[0].visited = True
v = toVisit.pop(0)
for a in v.neighbors:
if not a.visited:
toVisit.append(a)
# Ensure that all input points are visited. Return distance if so or error if not.
for a in pts:
if not vertices[a].visited:
print(json.dumps({"isError": True, "message": "Your graph is not connected."}))
sys.exit(-1)
print(json.dumps({"isError": False, "score": dist, "message": "You got a score of " + str(dist) + "!"}))
|
"""
vclock
~~~~~~
Functions for manipulating vector clocks.
:copyright: (C) 2016 by Eeo Jun
:license: MIT, see LICENSE for details.
"""
import sys
import itertools
from functools import cmp_to_key
if sys.version_info[0] == 2:
zip = itertools.izip
map = itertools.imap
def from_size(n):
"""
Constructs a zeroed, *n* sized vector clock.
"""
return (0,) * n
def merge(a, b):
"""
Given two clocks, return a new clock with all
values greater or equal to those of the merged
clocks.
"""
return tuple(map(max, zip(a, b)))
def compare(a, b):
"""
Compares two vector clocks, returns -1 if ``a < b``,
1 if ``a > b`` else 0 for concurrent events
or identical values.
"""
gt = False
lt = False
for j, k in zip(a, b):
gt |= j > k
lt |= j < k
if gt and lt:
break
return int(gt) - int(lt)
def sort(xs, key=None, reverse=False):
"""
Sort iterable *xs* using the ``vclock.compare``
algorithm, optionally with a *key* function and
whether to *reverse* the sorting (defaults to
ascending order).
"""
cmpfunc = compare if key is None else lambda a, b: compare(key(a), key(b))
return sorted(xs, key=cmp_to_key(cmpfunc), reverse=reverse)
def is_concurrent(a, b):
"""
Returns whether the given clocks are concurrent.
They must not be equal in value.
"""
return (a != b) and compare(a, b) == 0
def increment(clock, index):
"""
Increment the clock at *index*.
"""
return clock[:index] \
+ (clock[index] + 1,) \
+ clock[index+1:]
|
#! /usr/bin/env python
import sys
import rospy
import os
#from mdp_plan_exec.prism_client import PrismClient
#from mdp_plan_exec.mdp import TopMapMdp, ProductMdp
from mdp_plan_exec.mdp import ProductMdp
from mdp_plan_exec.prism_mdp_manager import PrismMdpManager
from strands_executive_msgs.srv import AddMdpModel, GetExpectedTravelTime, UpdateNavStatistics, AddDeleteSpecialWaypoint, AddDeleteSpecialWaypointRequest
from strands_executive_msgs.msg import ExecutePolicyAction, ExecutePolicyFeedback, ExecutePolicyGoal, LearnTravelTimesAction
from topological_navigation.msg import GotoNodeAction, GotoNodeGoal
from actionlib import SimpleActionServer, SimpleActionClient
from std_msgs.msg import String
from actionlib_msgs.msg import GoalStatus
from strands_navigation_msgs.msg import NavStatistics, ExecutePolicyModeAction, ExecutePolicyModeFeedback, ExecutePolicyModeGoal
from mongodb_store.message_store import MessageStoreProxy
from robblog.msg import RobblogEntry
import robblog.utils
from sensor_msgs.msg import Image
import datetime
class MdpPlanner(object):
def __init__(self,top_map):
self.exp_times_handler=PrismMdpManager(8085,'exp_times', top_map)
self.policy_handler=PrismMdpManager(8086,'policy',top_map)
self.travel_time_to_node_service = rospy.Service('/mdp_plan_exec/get_expected_travel_time_to_node', GetExpectedTravelTime, self.travel_time_to_node_cb)
self.add_mdp_service = rospy.Service('/mdp_plan_exec/add_mdp_model', AddMdpModel, self.add_mdp_cb)
#self.generate_policy=rospy.Service('/mdp_plan_exec/generate_policy', GeneratePolicy, self.policy_cb)
self.update_nav_statistics=rospy.Service('mdp_plan_exec/update_nav_statistics',UpdateNavStatistics,self.update_cb)
rospy.loginfo("Creating topological navigation client.")
self.top_nav_action_client= SimpleActionClient('topological_navigation', GotoNodeAction)
self.top_nav_action_client.wait_for_server()
rospy.loginfo(" ...done")
rospy.sleep(0.3)
self.current_prod_mdp_state=None
self.policy_exec_preempted = False
rospy.loginfo("Creating topological navigation client.")
self.top_nav_policy_exec= SimpleActionClient('/topological_navigation/execute_policy_mode', ExecutePolicyModeAction)
self.top_nav_policy_exec.wait_for_server()
rospy.loginfo(" ...done")
rospy.sleep(0.3)
#rospy.loginfo("Creating monitored navigation client.")
#self.mon_nav_action_client= SimpleActionClient('monitored_navigation', MonitoredNavigationAction)
#self.mon_nav_action_client.wait_for_server()
#rospy.loginfo(" ...done")
#rospy.sleep(0.3)
self.executing_policy=False
self.mdp_navigation_action=SimpleActionServer('mdp_plan_exec/execute_policy', ExecutePolicyAction, execute_cb = self.execute_policy_cb, auto_start = False)
self.mdp_navigation_action.register_preempt_callback(self.preempt_policy_execution_cb)
self.mdp_navigation_action.start()
self.learning_travel_times=False
self.learn_travel_times_action=SimpleActionServer('mdp_plan_exec/learn_travel_times', LearnTravelTimesAction, execute_cb = self.execute_learn_travel_times_cb, auto_start = False)
self.learn_travel_times_action.register_preempt_callback(self.preempt_learning_cb)
self.learn_travel_times_action.start()
#self.top_map_mdp=TopMapMdp(top_map)
#self.top_map_mdp.update_nav_statistics()
#self.directory = os.path.expanduser("~") + '/tmp/prism'
#try:
#os.makedirs(self.directory)
#except OSError as ex:
#print 'error creating PRISM directory:', ex
#self.mdp_prism_file=self.directory+'/'+top_map+'.prism'
#self.top_map_mdp.write_prism_model(self.mdp_prism_file)
#self.prism_client.add_model('all_day',self.mdp_prism_file)
self.closest_node=None
self.current_node=None
self.nav_action_outcome=''
self.closest_state_subscriber=rospy.Subscriber('/closest_node', String, self.closest_node_cb)
self.current_state_subscriber=rospy.Subscriber('/current_node', String, self.current_node_cb)
self.nav_stats_subscriber = rospy.Subscriber('/topological_navigation/Statistics', NavStatistics, self.get_nav_status_cb)
#self.nonitored_nav_result=None
#self.monitored_nav_sub=rospy.Subscriber('/monitored_navigation/result', MonitoredNavigationActionResult, self.get_monitored_nav_status_cb)
self.get_to_exact_pose_timeout=120 #60 secs
self.forbidden_waypoints=[]
self.forbidden_waypoints_ltl_string=''
self.safe_waypoints=[]
self.safe_waypoints_ltl_string=''
self.special_waypoint_handler_service = rospy.Service('/mdp_plan_exec/add_delete_special_node', AddDeleteSpecialWaypoint, self.add_delete_special_waypoint_cb)
self.msg_store_blog = MessageStoreProxy(collection='robblog')
self.origin_waypoint=''
self.target_waypoint=''
#self.last_stuck_image=None
def add_delete_special_waypoint_cb(self,req):
if req.waypoint_type == AddDeleteSpecialWaypointRequest.FORBIDDEN:
if req.is_addition:
self.add_forbidden_waypoint(req.waypoint)
else:
self.del_forbidden_waypoint(req.waypoint)
if req.waypoint_type == AddDeleteSpecialWaypointRequest.SAFE:
if req.is_addition:
self.add_safe_waypoint(req.waypoint)
else:
self.del_safe_waypoint(req.waypoint)
return True
def add_forbidden_waypoint(self,waypoint):
if waypoint in self.forbidden_waypoints:
rospy.logwarn('Waypoint ' + waypoint + ' already in forbidden waypoint list.')
else:
self.forbidden_waypoints.append(waypoint)
self.set_forbidden_waypoints_ltl_string()
def del_forbidden_waypoint(self,waypoint):
if waypoint not in self.forbidden_waypoints:
rospy.logwarn('Waypoint ' + waypoint + ' not in forbidden waypoint list.')
else:
del self.forbidden_waypoints[self.forbidden_waypoints.index(waypoint)]
self.set_forbidden_waypoints_ltl_string()
def add_safe_waypoint(self,waypoint):
if waypoint in self.safe_waypoints:
rospy.logwarn('Waypoint ' + waypoint + ' already in safe waypoint list.')
else:
self.safe_waypoints.append(waypoint)
self.set_safe_waypoints_ltl_string()
def del_safe_waypoint(self,waypoint):
if waypoint not in self.safe_waypoints:
rospy.logwarn('Waypoint ' + waypoint + ' not in safe waypoint list.')
else:
del self.safe_waypoints[self.safe_waypoints.index(waypoint)]
self.set_safe_waypoints_ltl_string()
def set_forbidden_waypoints_ltl_string(self):
self.forbidden_waypoints_ltl_string=''
for i in range(0,len(self.forbidden_waypoints)):
self.forbidden_waypoints_ltl_string=self.forbidden_waypoints_ltl_string + '"' + self.forbidden_waypoints[i] + '" & !'
if not self.forbidden_waypoints_ltl_string=='':
self.forbidden_waypoints_ltl_string='(!' + self.forbidden_waypoints_ltl_string[:-4] + ')'
print self.forbidden_waypoints_ltl_string
def set_safe_waypoints_ltl_string(self):
self.safe_waypoints_ltl_string=''
for i in range(0,len(self.safe_waypoints)):
self.safe_waypoints_ltl_string=self.safe_waypoints_ltl_string + '"' + self.safe_waypoints[i] + '"' + ' | '
if not self.safe_waypoints_ltl_string=='':
self.safe_waypoints_ltl_string='(' + self.safe_waypoints_ltl_string[:-3] + ')'
print self.safe_waypoints_ltl_string
#-------------------------updating models for both expected time and policy generation
def add_mdp_cb(self,req):
self.exp_times_handler.prism_client.add_model(req.time_of_day,req.mdp_file)
self.policy_handler.prism_client.add_model(req.time_of_day,req.mdp_file)
return True
def update_cb(self,req):
self.exp_times_handler.update_current_top_mdp(req.time_of_day)
self.policy_handler.update_current_top_mdp(req.time_of_day)
return True
#-------------------------expected times
def travel_time_to_node_cb(self,req):
starting_node= req.start_id
self.exp_times_handler.top_map_mdp.set_initial_state_from_name(starting_node)
self.exp_times_handler.update_current_top_mdp(req.time_of_day, False)
specification='R{"time"}min=? [ ( F "' + req.target_id + '") ]'
result=self.exp_times_handler.prism_client.check_model(req.time_of_day,specification)
result=float(result)
return result
#-------------------------policy generation/execution
def execute_learn_travel_times_cb(self,goal):
if self.executing_policy:
self.preempt_policy_execution_cb()
rospy.set_param('/topological_navigation/mode', 'Node_by_node')
self.learning_travel_times=True
timer=rospy.Timer(rospy.Duration(goal.timeout), self.finish_learning_callback,oneshot=True)
n_successive_fails=0
while self.learning_travel_times:
if self.current_node == 'none' or self.current_node is None:
self.policy_handler.top_map_mdp.set_initial_state_from_name(self.closest_node)
else:
self.policy_handler.top_map_mdp.set_initial_state_from_name(self.current_node)
current_waypoint=self.policy_handler.top_map_mdp.initial_state
current_waypoint_trans=self.policy_handler.top_map_mdp.transitions[current_waypoint]
current_trans_count=self.policy_handler.top_map_mdp.transitions_transversal_count[current_waypoint]
current_min=-1
current_min_index=-1
for i in range(0,self.policy_handler.top_map_mdp.n_actions):
if current_waypoint_trans[i] is not False:
if current_min==-1:
current_min=current_trans_count[i]
current_min_index=i
elif current_trans_count[i]<current_min:
current_min=current_trans_count[i]
current_min_index=i
current_action=self.policy_handler.top_map_mdp.actions[current_min_index]
top_nav_goal=GotoNodeGoal()
top_nav_goal.target=current_action.split('_')[2]
self.top_nav_action_client.send_goal(top_nav_goal)
self.top_nav_action_client.wait_for_result()
if self.nav_action_outcome=='fatal' or self.nav_action_outcome=='failed':
n_successive_fails=n_successive_fails+1
else:
n_successive_fails=0
if n_successive_fails>10:
self.policy_handler.update_current_top_mdp('all_day')
self.learn_travel_times_action.set_aborted()
return
self.policy_handler.top_map_mdp.transitions_transversal_count[current_waypoint][current_min_index]+=1
self.exp_times_handler.update_current_top_mdp("all_day")
timer.shutdown()
def finish_learning_callback(self,event):
self.policy_handler.update_current_top_mdp('all_day')
self.learn_travel_times_action.set_succeeded()
self.learning_travel_times=False
def preempt_learning_cb(self):
self.learning_travel_times=False
self.policy_handler.update_current_top_mdp('all_day')
self.top_nav_action_client.cancel_all_goals()
self.learn_travel_times_action.set_preempted()
def generate_prod_mdp_policy(self,specification, time_of_day):
#update initial state
if self.current_node == 'none' or self.current_node is None:
self.policy_handler.top_map_mdp.set_initial_state_from_name(self.closest_node)
else:
self.policy_handler.top_map_mdp.set_initial_state_from_name(self.current_node)
self.policy_handler.update_current_top_mdp(time_of_day)
feedback=ExecutePolicyFeedback()
feedback.expected_time=float(self.policy_handler.prism_client.get_policy(time_of_day,specification))
self.mdp_navigation_action.publish_feedback(feedback)
if feedback.expected_time==float("inf"):
self.policy_handler.product_mdp=None
else:
result_dir=self.policy_handler.get_working_dir() + '/' + time_of_day
self.policy_handler.product_mdp=ProductMdp(self.policy_handler.top_map_mdp,result_dir + '/prod.sta',result_dir + '/prod.lab',result_dir + '/prod.tra')
self.policy_handler.product_mdp.set_policy(result_dir + '/adv.tra')
def execute_policy_cb(self,goal):
if self.learning_travel_times:
self.preempt_learning_cb()
if goal.target_id == self.closest_node:
rospy.loginfo("Already in goal influence area. Navigating to exact pose...")
self.top_nav_policy_exec.send_goal(ExecutePolicyModeGoal(), feedback_cb = self.top_nav_feedback_cb)
status=self.top_nav_policy_exec.get_state()
while (status==GoalStatus.PENDING or status==GoalStatus.ACTIVE):
self.top_nav_policy_exec.wait_for_result(rospy.Duration(0.2))
status= self.top_nav_policy_exec.get_state()
if self.policy_exec_preempted:
self.execute_policy_service_preempt()
return
if status != GoalStatus.SUCCEEDED:
rospy.logerr("Policy mode execution finished with status " + str(status) + ". Aborting...")
self.executing_policy=False
self.mdp_navigation_action.set_aborted()
return
else:
self.mdp_navigation_action.set_succeeded()
return
if goal.task_type==ExecutePolicyGoal.GOTO_WAYPOINT:
if goal.target_id in self.forbidden_waypoints:
rospy.logerr("The goal is a forbidden waypoint. Aborting")
self.mdp_navigation_action.set_aborted()
return
if self.forbidden_waypoints==[]:
specification='R{"time"}min=? [ (F "' + goal.target_id + '") ]'
else:
specification='R{"time"}min=? [ (' + self.forbidden_waypoints_ltl_string + ' U "' + goal.target_id + '") ]'
elif goal.task_type==ExecutePolicyGoal.LEAVE_FORBIDDEN_AREA:
if self.forbidden_waypoints==[]:
rospy.logerr("No forbidden waypoints defined. Nothing to leave.")
self.mdp_navigation_action.set_aborted()
return
elif self.closest_node not in self.forbidden_waypoints:
rospy.logerr(self.closest_node + " is not a forbidden waypoint. Staying here.")
self.mdp_navigation_action.set_aborted()
return
else:
specification='R{"time"}min=? [ (F ' + self.forbidden_waypoints_ltl_string + ') ]'
elif goal.task_type==ExecutePolicyGoal.GOTO_CLOSEST_SAFE_WAYPOINT:
if self.safe_waypoints==[]:
rospy.logerr("No safe waypoints defined. Nowhere to go to.")
self.mdp_navigation_action.set_aborted()
return
elif self.current_node in self.safe_waypoints:
rospy.logerr(self.closest_node + " is already a safe waypoint. Staying here.")
self.mdp_navigation_action.set_aborted()
return
else:
specification='R{"time"}min=? [ (F ' + self.safe_waypoints_ltl_string + ') ]'
elif goal.task_type==ExecutePolicyGoal.COSAFE_LTL:
specification = 'R{"time"}min=? [ (' + goal.target_id + ') ]'
self.generate_prod_mdp_policy(specification,goal.time_of_day)
if self.policy_handler.product_mdp is None:
rospy.logerr("The goal is unattainable. Aborting...")
self.mdp_navigation_action.set_aborted()
return
self.current_prod_mdp_state=self.policy_handler.product_mdp.initial_state
current_policy_mode = self.policy_handler.product_mdp.get_current_policy_mode(self.current_prod_mdp_state)
if self.policy_exec_preempted:
self.execute_policy_service_preempt()
return
self.executing_policy=True
replanned=False
while self.current_prod_mdp_state not in self.policy_handler.product_mdp.goal_states and self.executing_policy and not rospy.is_shutdown():
self.top_nav_policy_exec.send_goal(ExecutePolicyModeGoal(route = current_policy_mode), feedback_cb = self.top_nav_feedback_cb)
status=self.top_nav_policy_exec.get_state()
self.finishing_policy_mode_execution=False
while (status==GoalStatus.PENDING or status==GoalStatus.ACTIVE) and self.executing_policy:
#check that mdp still knows where it is
if self.current_prod_mdp_state==None:
rospy.logwarn('State transition is not in MDP model! Replanning...')
if goal.task_type==ExecutePolicyGoal.COSAFE_LTL:
rospy.logwarn("The co-safe LTL task will restart from scratch")
replanned = True
self.top_nav_policy_exec.cancel_all_goals()
self.generate_prod_mdp_policy(specification,goal.time_of_day)
if self.policy_handler.product_mdp is None:
rospy.logerr("The goal is unattainable. Aborting...")
self.executing_policy=False
self.mdp_navigation_action.set_aborted()
return
self.current_prod_mdp_state=self.policy_handler.product_mdp.initial_state
self.top_nav_policy_exec.wait_for_result(rospy.Duration(0.2))
status=self.top_nav_policy_exec.get_state()
if not replanned:
if self.policy_exec_preempted:
self.execute_policy_service_preempt()
return
if status != GoalStatus.SUCCEEDED:
rospy.logerr("Policy mode execution finished with status " + str(status) + ". Aborting...")
self.executing_policy=False
self.mdp_navigation_action.set_aborted()
return
if self.executing_policy and self.current_prod_mdp_state not in self.policy_handler.product_mdp.goal_states:
current_policy_mode = self.policy_handler.product_mdp.get_current_policy_mode(self.current_prod_mdp_state)
if self.policy_exec_preempted:
self.execute_policy_service_preempt()
return
else:
replanned=False
self.executing_policy = False
self.exp_times_handler.update_current_top_mdp(goal.time_of_day)
if self.policy_exec_preempted:
self.execute_policy_service_preempt()
return
self.mdp_navigation_action.set_succeeded()
def top_nav_feedback_cb(self,feedback):
if not self.finishing_policy_mode_execution:
self.current_node = feedback.route_status
current_action=self.policy_handler.product_mdp.policy[self.current_prod_mdp_state]
self.current_prod_mdp_state = self.policy_handler.product_mdp.get_new_state(self.current_prod_mdp_state,current_action, self.current_node)
if self.current_prod_mdp_state in self.policy_handler.product_mdp.goal_states:
self.finishing_policy_mode_execution=True
def preempt_policy_execution_cb(self):
self.policy_exec_preempted = True
self.executing_policy=False
def execute_policy_service_preempt(self):
self.top_nav_policy_exec.cancel_all_goals()
self.policy_exec_preempted = False
self.mdp_navigation_action.set_preempted()
#def execute_policy_cb(self,goal):
#if self.learning_travel_times:
#self.preempt_learning_cb()
#rospy.set_param('/topological_navigation/mode', 'Node_to_IZ')
#if self.current_node == 'none' or self.current_node is None:
#self.policy_handler.top_map_mdp.set_initial_state_from_name(self.closest_node)
#else:
#self.policy_handler.top_map_mdp.set_initial_state_from_name(self.current_node)
#self.policy_handler.update_current_top_mdp(goal.time_of_day)
#if goal.task_type==ExecutePolicyGoal.GOTO_WAYPOINT:
#if goal.target_id in self.forbidden_waypoints:
#rospy.logerr("The goal is a forbidden waypoint. Aborting")
#self.mdp_navigation_action.set_aborted()
#return
#if self.forbidden_waypoints==[]:
#specification='R{"time"}min=? [ (F "' + goal.target_id + '") ]'
#else:
#specification='R{"time"}min=? [ (' + self.forbidden_waypoints_ltl_string + ' U "' + goal.target_id + '") ]'
#elif goal.task_type==ExecutePolicyGoal.LEAVE_FORBIDDEN_AREA:
#if self.forbidden_waypoints==[]:
#rospy.logerr("No forbidden waypoints defined. Nothing to leave.")
#self.mdp_navigation_action.set_aborted()
#return
#elif self.closest_node not in self.forbidden_waypoints:
#rospy.logerr(self.closest_node + " is not a forbidden waypoint. Staying here.")
#self.mdp_navigation_action.set_aborted()
#return
#else:
#specification='R{"time"}min=? [ (F ' + self.forbidden_waypoints_ltl_string + ') ]'
#elif goal.task_type==ExecutePolicyGoal.GOTO_CLOSEST_SAFE_WAYPOINT:
#if self.safe_waypoints==[]:
#rospy.logerr("No safe waypoints defined. Nowhere to go to.")
#self.mdp_navigation_action.set_aborted()
#return
#elif self.current_node in self.safe_waypoints:
#rospy.logerr(self.closest_node + " is already a safe waypoint. Staying here.")
#self.mdp_navigation_action.set_aborted()
#return
#else:
#specification='R{"time"}min=? [ (F ' + self.safe_waypoints_ltl_string + ') ]'
#feedback=ExecutePolicyFeedback()
#feedback.expected_time=float(self.policy_handler.prism_client.get_policy(goal.time_of_day,specification))
#self.mdp_navigation_action.publish_feedback(feedback)
#if feedback.expected_time==float("inf"):
#rospy.logerr("The goal is unattainable with the current forbidden nodes. Aborting...")
#self.mdp_navigation_action.set_aborted()
#return
#result_dir=self.policy_handler.get_working_dir() + '/' + goal.time_of_day
#product_mdp=ProductMdp(self.policy_handler.top_map_mdp,result_dir + '/prod.sta',result_dir + '/prod.lab',result_dir + '/prod.tra')
#product_mdp.set_policy(result_dir + '/adv.tra')
#self.executing_policy=True
#current_mdp_state=product_mdp.initial_state
#if current_mdp_state in product_mdp.goal_states:
#rospy.set_param('/topological_navigation/mode', 'Normal')
#top_nav_goal=GotoNodeGoal()
#top_nav_goal.target=goal.target_id
#self.top_nav_action_client.send_goal(top_nav_goal)
#self.top_nav_action_client.wait_for_result()
#result=self.top_nav_action_client.get_state()
#if result==GoalStatus.SUCCEEDED:
#self.mdp_navigation_action.set_succeeded()
#if result==GoalStatus.ABORTED:
#rospy.logerr("Failure in getting to exact pose in goal waypoint")
#self.mdp_navigation_action.set_aborted()
#if result==GoalStatus.PREEMPTED:
#self.mdp_navigation_action.set_preempted()
#return
#n_successive_fails=0
#while current_mdp_state not in product_mdp.goal_states and self.executing_policy and not rospy.is_shutdown():
#current_action=product_mdp.policy[current_mdp_state]
#expected_edge_transversal_time=product_mdp.get_expected_edge_transversal_time(current_mdp_state,current_action)
#top_nav_goal=GotoNodeGoal()
#split_action=current_action.split('_')
#self.origin_waypoint=split_action[1]
#self.target_waypoint=split_action[2]
#top_nav_goal.target=self.target_waypoint
#timer=rospy.Timer(rospy.Duration(4*expected_edge_transversal_time), self.unexpected_trans_time_cb,oneshot=True)
#self.top_nav_action_client.send_goal(top_nav_goal)
#self.top_nav_action_client.wait_for_result()
#if self.current_node == 'none' or self.current_node is None:
#current_mdp_state=product_mdp.get_new_state(current_mdp_state,current_action,self.closest_node)
#else:
#current_mdp_state=product_mdp.get_new_state(current_mdp_state,current_action,self.current_node)
#timer.shutdown()
#if current_mdp_state==-1 and self.executing_policy:
#rospy.logwarn('State transition is not in MDP model! Replanning...')
#self.mon_nav_action_client.cancel_all_goals()
#self.top_nav_action_client.cancel_all_goals()
#if self.current_node == 'none' or self.current_node is None:
#self.policy_handler.top_map_mdp.set_initial_state_from_name(self.closest_node)
#else:
#self.policy_handler.top_map_mdp.set_initial_state_from_name(self.current_node)
#self.policy_handler.update_current_top_mdp(goal.time_of_day)
#feedback.expected_time=float(self.policy_handler.prism_client.get_policy(goal.time_of_day,specification))
#self.mdp_navigation_action.publish_feedback(feedback)
#if feedback.expected_time==float("inf"):
#rospy.logerr("The goal is unattainable with the current forbidden nodes. Aborting...")
#self.mdp_navigation_action.set_aborted()
#return
#product_mdp=ProductMdp(self.policy_handler.top_map_mdp,result_dir + '/prod.sta',result_dir + '/prod.lab',result_dir + '/prod.tra')
#product_mdp.set_policy(result_dir + '/adv.tra')
#current_mdp_state=product_mdp.initial_state
#if current_mdp_state in product_mdp.goal_states:
#rospy.set_param('/topological_navigation/mode', 'Normal')
#top_nav_goal=GotoNodeGoal()
#top_nav_goal.target=goal.target_id
#self.top_nav_action_client.send_goal(top_nav_goal)
#if self.nav_action_outcome=='fatal' or self.nav_action_outcome=='failed':
#n_successive_fails=n_successive_fails+1
#else:
#n_successive_fails=0
#if n_successive_fails>4:
#rospy.logerr("Five successive fails in topological navigation. Aborting...")
#self.executing_policy=False
#self.mon_nav_action_client.cancel_all_goals()
#self.top_nav_action_client.cancel_all_goals()
#self.mdp_navigation_action.set_aborted()
#return
#self.exp_times_handler.update_current_top_mdp(goal.time_of_day)
#self.monitored_nav_result=None
#timeout_counter=0
#while self.monitored_nav_result is None and self.executing_policy and timeout_counter < self.get_to_exact_pose_timeout:
#rospy.sleep(0.5)
#timeout_counter=timeout_counter+1
#if self.executing_policy:
#self.executing_policy=False
#if self.monitored_nav_result==GoalStatus.PREEMPTED:
#self.mdp_navigation_action.set_preempted()
#return
#if self.monitored_nav_result==GoalStatus.SUCCEEDED and self.current_node == goal.target_id:
#self.mdp_navigation_action.set_succeeded()
#return
#if self.monitored_nav_result==GoalStatus.ABORTED or self.monitored_nav_result is None or not self.current_node == goal.target_id:
#rospy.logerr("Failure in getting to exact pose in goal waypoint")
#self.mdp_navigation_action.set_aborted()
#return
#def get_monitored_nav_status_cb(self,msg):
#self.monitored_nav_result=msg.status.status
#def preempt_policy_execution_cb(self):
#self.executing_policy=False
#self.mon_nav_action_client.cancel_all_goals()
#self.top_nav_action_client.cancel_all_goals()
#self.mdp_navigation_action.set_preempted()
def unexpected_trans_time_cb(self,event):
last_stuck_image = None
image_topic = '/head_xtion/rgb/image_color'
#image_topic = '/head_xtion/rgb/image_mono' #simulation topic
#count = 0
#while self.last_stuck_image == None and not rospy.is_shutdown() and count < 10:
#rospy.loginfo('waiting for image of possible blocked path %s' % count)
#count += 1
#rospy.sleep(1)
last_stuck_image=rospy.wait_for_message(image_topic, Image , timeout=10.0)
e = RobblogEntry(title=datetime.datetime.now().strftime("%H:%M:%S") + ' - Possible Blocked Path')
e.body = 'It took me a lot more time to go between ' + self.origin_waypoint + ' and ' + self.target_waypoint + ' than I was expecting. Something might be blocking the way.'
if last_stuck_image != None:
img_id = self.msg_store_blog.insert(last_stuck_image)
rospy.loginfo('adding possible blockage image to blog post')
e.body += ' Here is what I saw: \n\n)' % img_id
self.msg_store_blog.insert(e)
#def img_callback(self, img):
#self.last_stuck_image = img
def closest_node_cb(self,msg):
self.closest_node=msg.data
def current_node_cb(self,msg):
self.current_node=msg.data
def get_nav_status_cb(self,msg):
self.nav_action_outcome=msg.status
def main(self):
# Wait for control-c
rospy.spin()
if rospy.is_shutdown():
self.exp_times_handler.prism_client.shutdown(True)
self.policy_handler.prism_client.shutdown(True)
if __name__ == '__main__':
rospy.init_node('mdp_planner')
if len(sys.argv)<2:
print "usage: rosrun mdp_plan_exec mdp_planner <topological_map_name>"
sys.exit(2)
mdp_planner = MdpPlanner(sys.argv[1])
mdp_planner.main()
|
#! /usr/bin/env python
"""
Several helper functions related to working with patches on drupal.org"""
import subprocess
import os
import sys
def download_patch(url):
"""Download a patch file from Drupal.org. Returns the patch filename."""
patch_filename = os.path.basename(url)
p = subprocess.Popen(
[
'curl',
'-o', patch_filename,
url,
]
)
p.wait()
try:
with open(patch_filename):
pass
except IOError:
print "Download Failed"
sys.exit(1)
return patch_filename
def open_diff_files_phpstorm(script_cwd, diff_file):
"""Open all files referenced in a diff in PHPStorm."""
diff_file_point = open(diff_file, "r")
files_to_open = []
for line in diff_file_point:
if '+++ ' in line:
files_to_open.append(
os.path.join(
script_cwd,
line.replace('+++ ', '').replace('b/', '').rstrip()
)
)
return open_files_phpstorm(files_to_open)
def open_rebase_conflicts_phpstorm(script_cwd, patch_filename, rebase_output):
"""Open all files referenced in a rebase conflict in PHPStorm."""
files_to_open = [os.path.join(script_cwd, patch_filename)]
for line in rebase_output:
if 'CONFLICT (content): Merge conflict in ' in line:
files_to_open.append(
os.path.join(
script_cwd,
line.replace('CONFLICT (content): Merge conflict in ', '').rstrip()
)
)
print files_to_open
return open_files_phpstorm(files_to_open)
def open_files_phpstorm(file_list):
"""Open a list of files in PHPStorm."""
file_open_command = ['pstorm', '~/gitDev/drupal']
file_open_command.extend(file_list)
print "Opening files in PHPStorm:"
print "\n".join(file_list)
return subprocess.call(file_open_command)
|
import logging
import zmq
from threading import Thread, Event, Condition
from psdaq.control.ControlDef import ControlDef, front_pub_port, front_rep_port, create_msg
import time
class TimedRun:
def __init__(self, control, *, daqState, args):
self.control = control
self.name = 'mydaq'
self.context = zmq.Context()
self.push_socket = self.context.socket(zmq.PUSH)
self.push_socket.bind('inproc://timed_run')
self.pull_socket = self.context.socket(zmq.PULL)
self.pull_socket.connect('inproc://timed_run')
self.comm_thread = Thread(target=self.daq_communicator_thread, args=(), daemon=True)
self.mon_thread = Thread(target=self.daq_monitor_thread, args=(), daemon=True)
self.ready = Event()
self.daqState = daqState
self.args = args
self.daqState_cv = Condition()
self.comm_thread.start()
self.mon_thread.start()
self.verbose = args.v
# this thread tells the daq to go to a state and waits for the completion
def daq_communicator_thread(self):
logging.debug('*** daq_communicator_thread')
while True:
sss = self.pull_socket.recv().decode("utf-8")
if ',' in sss:
state, phase1 = sss.split(',', maxsplit=1)
else:
state, phase1 = sss, None
logging.debug('*** received %s' % state)
if state in ControlDef.states:
# send 'daqstate(state)' and wait for complete
errMsg = self.control.setState(state)
if errMsg is not None:
logging.error('%s' % errMsg)
continue
with self.daqState_cv:
while self.daqState != state:
logging.debug('daqState \'%s\', waiting for \'%s\'...' % (self.daqState, state))
self.daqState_cv.wait(1.0)
# check for shutdown with nonblocking read
try:
ttt = self.pull_socket.recv(flags=zmq.NOBLOCK).decode("utf-8")
except Exception as ex:
pass
else:
if ttt=='shutdown':
return
logging.debug('daqState \'%s\'' % self.daqState)
if self.daqState == state:
self.ready.set()
elif state=='shutdown':
break
else:
logging.error(f'daq_communicator_thread unrecognized input: \'{state}\'')
def daq_monitor_thread(self):
logging.debug('*** daq_monitor_thread')
while True:
part1, part2, part3, part4, part5, part6, part7, part8 = self.control.monitorStatus()
if part1 is None:
break
elif part1 == 'error':
logging.error(f"{part2}")
elif part1 == 'warning':
logging.warning(f"{part2}")
elif part1 not in ControlDef.transitions:
continue
# part1=transition, part2=state, part3=config
with self.daqState_cv:
self.daqState = part2
self.daqState_cv.notify()
def sleep(self, secs):
logging.debug(f'begin {secs} second wait')
time.sleep(secs)
logging.debug(f'end {secs} second wait')
def set_connected_state(self):
self.push_socket.send_string('connected')
# wait for complete
self.ready.wait()
self.ready.clear()
def set_running_state(self):
self.push_socket.send_string('running')
# wait for complete
self.ready.wait()
self.ready.clear()
def stage(self):
# done once at start of scan
# put the daq into the right state ('connected')
self.set_connected_state()
def unstage(self):
# done once at end of scan
# put the daq into the right state ('connected')
self.set_connected_state()
|
import shutil
from sys import exit
import vigo.config as config
from vigo import git
from vigo.common import git_is_available
def init():
print("Initialize the vigo configuration")
config.vigo_dir.mkdir(parents=True)
print("Cloning openstack/governance")
if not git.clone(config.governance_url, str(config.vigo_dir)):
print("\nError during init.... fail to clone openstack/governance")
print("initialization ok!")
def sync():
print("Synchronize the vigo configuration...", end="")
governance = config.vigo_dir / "governance"
if not git.pull(str(governance)):
print("\nError during update of openstack/governance")
print("OK!")
def reset():
print("Reset vigo configuration...", end="")
config.vigo_dir = config.path / ".vigo"
if not config.vigo_dir.exists():
print("\nnothing to do...bye!")
exit(0)
shutil.rmtree(str(config.vigo_dir))
print("Done!")
def execute():
if not git_is_available():
print("Error: git is not available and it's require by vigo")
print("Please install git first and try to rerun vigo")
print("Execution aborted!")
exit(1)
if not config.vigo_dir.exists():
init()
else:
sync()
|
import requests
from django.db import models
from django.contrib.postgres.fields import JSONField
from django.conf import settings
from django.urls import reverse
class Movie(models.Model):
title = models.CharField(max_length=255)
data = JSONField(null=True)
movie_added_on_datetime = models.DateTimeField(auto_now_add=True)
movie_last_accessed_datetime = models.DateTimeField(auto_now=True)
favourite_move = models.ManyToManyField(settings.AUTH_USER_MODEL)
@classmethod
def get_movie_from_api(cls, title, max_retries):
attempt_number = 0
while attempt_number < max_retries:
payload = {'apikey': settings.OMDBAPI_KEY, 't': title}
r = requests.get(settings.OMDBAPI_URL, params=payload, timeout=10)
if r.status_code == 200:
data = r.json()
if data.get('Response') == 'True':
movie = Movie.objects.get_or_create(title=data.get('Title'), data=data)
return data, movie
if r.status_code == 401:
attempt_number += 1
print('I cannot login - please check your KEY settings')
else:
print('There was a problem with fetching the movie!')
attempt_number += 1
return None, ("", "")
def __str__(self):
return "%s" % self.title
def get_absolute_url(self):
return reverse('movie_details', args=[str(self.id)])
|
from flask_sqlalchemy import get_debug_queries, SQLAlchemy, Pagination
from flask import Flask
from flask_bcrypt import Bcrypt
import os
app = Flask(__name__)
app.config.from_object("config.BaseConfig")
bcrypt = Bcrypt(app)
db = SQLAlchemy(app)
from views import *
def sql_debug(response):
queries = list(get_debug_queries())
query_str = ''
total_duration = 0.0
for q in queries:
total_duration += q.duration
stmt = str(q.statement % q.parameters).replace('\n', '\n ')
query_str += 'Query: {0}\nDuration: {1}ms\n\n'.format(stmt, round(q.duration * 1000, 2))
print('=' * 80)
print(' SQL Queries - {0} Queries Executed in {1}ms'.format(len(queries), round(total_duration * 1000, 2)))
print('=' * 80)
print(query_str.rstrip('\n'))
print('=' * 80 + '\n')
return response
if app.debug:
app.after_request(sql_debug)
if __name__ == "__main__":
app.run()
|
from bottle import route, run, template
@route('/')
def index():
return ('<h1>Hello</h1>')
@route('/hello/<name>')
def index(name):
return template('<b>Hello {{name}}</b>!', name=name)
#run(host='localhost', port=8080, debug = True)
run(host='0.0.0.0', port=8080, debug = True, reloader = True)
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
import networkx
from networkx.algorithms.cycles import find_cycle
from networkx.algorithms.dag import topological_sort
from oslo_log import log as logging
from oslo_utils import excutils
from deckhand.common.document import DocumentDict as dd
from deckhand.common import utils
from deckhand.common.validation_message import ValidationMessage
from deckhand.engine import _replacement as replacement
from deckhand.engine import document_validation
from deckhand.engine import secrets_manager
from deckhand.engine import utils as engine_utils
from deckhand import errors
from deckhand import types
LOG = logging.getLogger(__name__)
class DocumentLayering(object):
"""Class responsible for handling document layering.
Layering is controlled in two places:
1. The ``LayeringPolicy`` control document, which defines the valid layers
and their order of precedence.
2. In the ``metadata.layeringDefinition`` section of normal
(``metadata.schema=metadata/Document/v1.0``) documents.
.. note::
Only documents with the same ``schema`` are allowed to be layered
together into a fully rendered document.
"""
__slots__ = ('_documents_by_index', '_documents_by_labels',
'_documents_by_layer', '_layer_order', '_layering_policy',
'_parents', '_sorted_documents', 'secrets_substitution')
_SUPPORTED_METHODS = (_MERGE_ACTION, _REPLACE_ACTION, _DELETE_ACTION) = (
'merge', 'replace', 'delete')
def _calc_replacements_and_substitutions(
self, substitution_sources):
# Used to track document names and schemas for documents that are not
# replacement documents
non_replacement_documents = set()
for document in self._documents_by_index.values():
parent_meta = self._parents.get(document.meta)
parent = self._documents_by_index.get(parent_meta)
if document.is_replacement:
replacement.check_document_with_replacement_field_has_parent(
parent_meta, parent, document)
replacement.check_replacement_and_parent_same_schema_and_name(
parent, document)
parent.replaced_by = document
else:
# Handles case where parent and child have replacement: false
# as in this case both documents should not be replacement
# documents, requiring them to have different schema/name pair.
replacement.check_child_and_parent_different_metadata_name(
parent, document)
replacement.check_replacement_is_false_uniqueness(
document, non_replacement_documents)
# Since a substitution source only provides the document's
# `metadata.name` and `schema`, their tuple acts as the dictionary key.
# If a substitution source has a replacement, the replacement is used
# instead.
substitution_source_map = {}
for src in substitution_sources:
src_ref = dd(src)
if src_ref.meta in self._documents_by_index:
src_ref = self._documents_by_index[src_ref.meta]
if src_ref.has_replacement:
replacement.check_only_one_level_of_replacement(src_ref)
src_ref = src_ref.replaced_by
substitution_source_map[(src_ref.schema, src_ref.name)] = src_ref
return substitution_source_map
def _replace_older_parent_with_younger_parent(self, child, parent,
all_children):
# If child has layer N, parent N+1, and current_parent N+2, then swap
# parent with current_parent. In other words, if parent's layer is
# closer to child's layer than current_parent's layer, then use parent.
parent_meta = self._parents.get(child.meta)
current_parent = self._documents_by_index.get(parent_meta, None)
if current_parent:
if (self._layer_order.index(parent.layer) >
self._layer_order.index(current_parent.layer)):
self._parents[child.meta] = parent.meta
all_children[child] -= 1
else:
self._parents.setdefault(child.meta, parent.meta)
def _is_actual_child_document(self, document, potential_child):
if document == potential_child:
return False
document_layer_idx = self._layer_order.index(document.layer)
child_layer_idx = self._layer_order.index(potential_child.layer)
parent_selector = potential_child.parent_selector
labels = document.labels
# Labels are key-value pairs which are unhashable, so use ``all``
# instead.
is_actual_child = all(
labels.get(x) == y for x, y in parent_selector.items())
if is_actual_child:
# Documents with different `schema`s are never layered together,
# so consider only documents with same schema as candidates.
if potential_child.schema != document.schema:
reason = ('Child has parentSelector which references parent, '
'but their `schema`s do not match.')
LOG.error(reason)
raise errors.InvalidDocumentParent(
parent_schema=document.schema, parent_name=document.name,
document_schema=potential_child.schema,
document_name=potential_child.name, reason=reason)
# The highest order is 0, so the parent should be lower than the
# child.
if document_layer_idx >= child_layer_idx:
reason = ('Child has parentSelector which references parent, '
'but the child layer %s must be lower than the '
'parent layer %s for layerOrder %s.' % (
potential_child.layer, document.layer,
', '.join(self._layer_order)))
LOG.error(reason)
raise errors.InvalidDocumentParent(
parent_schema=document.schema, parent_name=document.name,
document_schema=potential_child.schema,
document_name=potential_child.name, reason=reason)
return is_actual_child
def _calc_document_children(self, document):
potential_children = []
for label_key, label_val in document.labels.items():
_potential_children = self._documents_by_labels.get(
(label_key, label_val), [])
potential_children.extend(_potential_children)
unique_potential_children = set(potential_children)
for potential_child in unique_potential_children:
if self._is_actual_child_document(document, potential_child):
yield potential_child
def _calc_all_document_children(self):
"""Determine each document's children.
For each document, attempts to find the document's children. Adds a new
key called "children" to the document's dictionary.
.. note::
A document should only have exactly one parent.
If a document does not have a parent, then its layer must be
the topmost layer defined by the ``layerOrder``.
:returns: Ordered list of documents that need to be layered. Each
document contains a "children" property in addition to original
data. List of documents returned is ordered from highest to lowest
layer.
:rtype: List[:class:`DocumentDict`]
:raises IndeterminateDocumentParent: If more than one parent document
was found for a document.
"""
# ``all_children`` is a counter utility for verifying that each
# document has exactly one parent.
all_children = collections.Counter()
# Mapping of (doc.name, doc.metadata.name) => children, where children
# are the documents whose `parentSelector` references the doc.
self._parents = {}
for layer in self._layer_order:
documents_in_layer = self._documents_by_layer.get(layer, [])
for document in documents_in_layer:
children = list(self._calc_document_children(document))
if children:
all_children.update(children)
for child in children:
self._replace_older_parent_with_younger_parent(
child, document, all_children)
all_children_elements = list(all_children.elements())
secondary_documents = []
for layer, documents in self._documents_by_layer.items():
if self._layer_order and layer != self._layer_order[0]:
secondary_documents.extend(documents)
for doc in secondary_documents:
# Unless the document is the topmost document in the
# `layerOrder` of the LayeringPolicy, it should be a child document
# of another document.
if doc not in all_children_elements:
if doc.parent_selector:
LOG.debug(
'Could not find parent for document with name=%s, '
'schema=%s, layer=%s, parentSelector=%s.', doc.name,
doc.schema, doc.layer, doc.parent_selector)
# If the document is a child document of more than 1 parent, then
# the document has too many parents, which is a validation error.
elif all_children[doc] > 1:
LOG.info('%d parent documents were found for child document '
'with name=%s, schema=%s, layer=%s, parentSelector=%s'
'. Each document must have exactly 1 parent.',
all_children[doc], doc.name, doc.schema, doc.layer,
doc.parent_selector)
raise errors.IndeterminateDocumentParent(
name=doc.name, schema=doc.schema, layer=doc.layer,
found=all_children[doc])
def _get_layering_order(self, layering_policy):
# Pre-processing stage that removes empty layers from the
# ``layerOrder`` in the layering policy.
layer_order = list(layering_policy.layer_order)
for layer in layer_order[:]:
documents_by_layer = self._documents_by_layer.get(layer, [])
if not documents_by_layer:
LOG.info('%s is an empty layer with no documents. It will be '
'discarded from the layerOrder during the layering '
'process.', layer)
layer_order.remove(layer)
if not layer_order:
LOG.info('Either the layerOrder in the LayeringPolicy was empty '
'to begin with or no document layers were found in the '
'layerOrder, causing it to become empty. No layering '
'will be performed.')
return layer_order
def _topologically_sort_documents(self, substitution_sources):
"""Topologically sorts the DAG formed from the documents' layering
and substitution dependency chain.
"""
result = []
def _get_ancestor(doc, parent_meta):
parent = self._documents_by_index.get(parent_meta)
# Return the parent's replacement, but if that replacement is the
# document itself then return the parent.
use_replacement = (
parent and parent.has_replacement and
parent.replaced_by is not doc
)
if use_replacement:
parent = parent.replaced_by
return parent
g = networkx.DiGraph()
for document in self._documents_by_index.values():
if document.parent_selector:
# NOTE: A child-replacement depends on its parent-replacement
# the same way any child depends on its parent: so that the
# child layers with its parent only after the parent has
# received all layering and substitution data. But other
# non-replacement child documents must first wait for the
# child-relacement to layer with the parent, so that they
# can use the replaced data.
parent_meta = self._parents.get(document.meta)
ancestor = _get_ancestor(document, parent_meta)
if ancestor:
g.add_edge(document.meta, ancestor.meta)
for sub in document.substitutions:
# Retrieve the correct substitution source using
# ``substitution_sources``. Necessary for 2 reasons:
# 1) It accounts for document replacements.
# 2) It effectively maps a 2-tuple key to a 3-tuple document
# unique identifier (meta).
src = substitution_sources.get(
(sub['src']['schema'], sub['src']['name']))
if src:
g.add_edge(document.meta, src.meta)
try:
cycle = find_cycle(g, orientation='reverse')
except networkx.exception.NetworkXNoCycle:
pass
else:
LOG.error('Cannot determine substitution order as a dependency '
'cycle exists for the following documents: %s.', cycle)
raise errors.SubstitutionDependencyCycle(cycle=cycle)
sorted_documents = reversed(list(topological_sort(g)))
for document_meta in sorted_documents:
if document_meta in self._documents_by_index:
result.append(self._documents_by_index[document_meta])
for document in self._documents_by_index.values():
if document not in result:
result.append(document)
return result
def _pre_validate_documents(self, documents):
LOG.debug('%s performing document pre-validation.',
self.__class__.__name__)
validator = document_validation.DocumentValidation(
documents, pre_validate=True)
results = validator.validate_all()
error_list = []
for result in results:
for e in result['errors']:
for d in e['documents']:
LOG.error('Document [%s, %s] %s failed with '
'pre-validation error: "%s". Diagnostic: "%s".',
d['schema'], d['layer'], d['name'],
e['message'], e['diagnostic'])
error_list.append(
ValidationMessage(
message=e['message'],
doc_schema=d['schema'],
doc_name=d['name'],
doc_layer=d['layer']))
if error_list:
raise errors.InvalidDocumentFormat(error_list=error_list)
def __init__(self,
documents,
validate=True,
fail_on_missing_sub_src=True,
encryption_sources=None,
cleartext_secrets=False):
"""Contructor for ``DocumentLayering``.
:param layering_policy: The document with schema
``deckhand/LayeringPolicy`` needed for layering.
:param documents: List of all other documents to be layered together
in accordance with the ``layerOrder`` defined by the
LayeringPolicy document.
:type documents: List[dict]
:param validate: Whether to pre-validate documents using built-in
schema validation. Skips over externally registered ``DataSchema``
documents to avoid false positives. Default is True.
:type validate: bool
:param fail_on_missing_sub_src: Whether to fail on a missing
substitution source. Default is True.
:type fail_on_missing_sub_src: bool
:param encryption_sources: A dictionary that maps the reference
contained in the destination document's data section to the
actual unecrypted data. If encrypting data with Barbican, the
reference will be a Barbican secret reference.
:type encryption_sources: dict
:param cleartext_secrets: Whether to show unencrypted data as
cleartext.
:type cleartext_secrets: bool
:raises LayeringPolicyNotFound: If no LayeringPolicy was found among
list of ``documents``.
:raises InvalidDocumentLayer: If document layer not found in layerOrder
for provided LayeringPolicy.
:raises InvalidDocumentParent: If child references parent but they
don't have the same schema or their layers are incompatible.
:raises IndeterminateDocumentParent: If more than one parent document
was found for a document.
"""
self._documents_by_layer = {}
self._documents_by_labels = {}
self._layering_policy = None
self._sorted_documents = {}
self._documents_by_index = {}
# TODO(felipemonteiro): Add a hook for post-validation too.
if validate:
self._pre_validate_documents(documents)
layering_policies = list(
filter(lambda x: x.get('schema').startswith(
types.LAYERING_POLICY_SCHEMA), documents))
if layering_policies:
self._layering_policy = dd(layering_policies[0])
if len(layering_policies) > 1:
LOG.warning('More than one layering policy document was '
'passed in. Using the first one found: [%s] %s.',
self._layering_policy.schema,
self._layering_policy.name)
if self._layering_policy is None:
error_msg = (
'No layering policy found in the system so could not render '
'documents.')
LOG.error(error_msg)
raise errors.LayeringPolicyNotFound()
for document in documents:
document = dd(document)
self._documents_by_index.setdefault(document.meta, document)
if document.layer:
if document.layer not in self._layering_policy.layer_order:
LOG.error('Document layer %s for document [%s] %s not '
'in layerOrder: %s.', document.layer,
document.schema, document.name,
self._layering_policy.layer_order)
raise errors.InvalidDocumentLayer(
document_layer=document.layer,
document_schema=document.schema,
document_name=document.name,
layer_order=', '.join(
self._layering_policy.layer_order),
layering_policy_name=self._layering_policy.name)
self._documents_by_layer.setdefault(document.layer, [])
self._documents_by_layer[document.layer].append(document)
if document.parent_selector:
for label_key, label_val in document.parent_selector.items():
self._documents_by_labels.setdefault(
(label_key, label_val), [])
self._documents_by_labels[
(label_key, label_val)].append(document)
self._layer_order = self._get_layering_order(self._layering_policy)
self._calc_all_document_children()
substitution_sources = self._calc_replacements_and_substitutions(
[
d for d in self._documents_by_index.values()
if not d.is_abstract
])
self.secrets_substitution = secrets_manager.SecretsSubstitution(
substitution_sources,
encryption_sources=encryption_sources,
fail_on_missing_sub_src=fail_on_missing_sub_src,
cleartext_secrets=cleartext_secrets)
self._sorted_documents = self._topologically_sort_documents(
substitution_sources)
del self._documents_by_layer
del self._documents_by_labels
def _log_data_for_layering_failure(self, child, parent, action):
child_data = copy.deepcopy(child.data)
parent_data = copy.deepcopy(parent.data)
engine_utils.deep_scrub(child_data, None)
engine_utils.deep_scrub(parent_data, None)
LOG.debug('An exception occurred while attempting to layer child '
'document [%s] %s with parent document [%s] %s using '
'layering action: %s.\nScrubbed child document data: %s.\n'
'Scrubbed parent document data: %s.', child.schema,
child.name, parent.schema, parent.name, action, child_data,
parent_data)
def _log_data_for_substitution_failure(self, document):
document_data = copy.deepcopy(document.data)
engine_utils.deep_scrub(document_data, None)
LOG.debug('An exception occurred while attempting to add substitutions'
' %s into document [%s] %s\nScrubbed document data: %s.',
document.substitutions, document.schema, document.name,
document_data)
def _apply_action(self, action, child_data, overall_data):
"""Apply actions to each layer that is rendered.
Supported actions include:
* ``merge`` - a "deep" merge that layers new and modified data onto
existing data
* ``replace`` - overwrite data at the specified path and replace it
with the data given in this document
* ``delete`` - remove the data at the specified path
:raises UnsupportedActionMethod: If the layering action isn't found
among ``self.SUPPORTED_METHODS``.
:raises MissingDocumentKey: If a layering action path isn't found
in the child document.
"""
method = action['method']
if method not in self._SUPPORTED_METHODS:
raise errors.UnsupportedActionMethod(
action=action, document=child_data)
# Use copy to prevent these data from being updated referentially.
overall_data = copy.deepcopy(overall_data)
child_data = copy.deepcopy(child_data)
# If None is used, then consider it as a placeholder and coerce the
# data into a dictionary.
if overall_data is None:
overall_data = {}
if child_data is None:
child_data = {}
action_path = action['path']
if action_path.startswith('.data'):
action_path = action_path[5:]
elif action_path.startswith('$.data'):
action_path = action_path[6:]
if not (action_path.startswith('.') or action_path.startswith('$.')):
action_path = '.' + action_path
if method == self._DELETE_ACTION:
if action_path == '.':
overall_data.data = {}
else:
from_child = utils.jsonpath_parse(overall_data.data,
action_path)
if from_child is None:
raise errors.MissingDocumentKey(
child_schema=child_data.schema,
child_layer=child_data.layer,
child_name=child_data.name,
parent_schema=overall_data.schema,
parent_layer=overall_data.layer,
parent_name=overall_data.name,
action=action)
engine_utils.deep_delete(from_child, overall_data.data, None)
elif method == self._MERGE_ACTION:
from_overall = utils.jsonpath_parse(overall_data.data, action_path)
from_child = utils.jsonpath_parse(child_data.data, action_path)
if from_child is None:
raise errors.MissingDocumentKey(
child_schema=child_data.schema,
child_layer=child_data.layer,
child_name=child_data.name,
parent_schema=overall_data.schema,
parent_layer=overall_data.layer,
parent_name=overall_data.name,
action=action)
# If both the child and parent data are dictionaries, then
# traditional merging is possible using JSON path resolution.
# Otherwise, JSON path resolution is not possible, so the only
# way to perform layering is to prioritize the child data over
# that of the parent. This applies when the child data is a
# non-dict, the parent data is a non-dict, or both.
if all(isinstance(x, dict) for x in (from_overall, from_child)):
engine_utils.deep_merge(from_overall, from_child)
else:
LOG.info('Child data is type: %s for [%s, %s] %s. Parent data '
'is type: %s for [%s, %s] %s. Both must be '
'dictionaries for regular JSON path merging to work. '
'Because this is not the case, child data will be '
'prioritized over parent data for "merge" action.',
type(from_child), child_data.schema, child_data.layer,
child_data.name, type(from_overall),
overall_data.schema, overall_data.layer,
overall_data.name)
from_overall = from_child
if from_overall is not None:
overall_data.data = utils.jsonpath_replace(
overall_data.data, from_overall, action_path)
else:
overall_data.data = utils.jsonpath_replace(
overall_data.data, from_child, action_path)
elif method == self._REPLACE_ACTION:
from_child = utils.jsonpath_parse(child_data.data, action_path)
if from_child is None:
raise errors.MissingDocumentKey(
child_schema=child_data.schema,
child_layer=child_data.layer,
child_name=child_data.name,
parent_schema=overall_data.schema,
parent_layer=overall_data.layer,
parent_name=overall_data.name,
action=action)
overall_data.data = utils.jsonpath_replace(
overall_data.data, from_child, action_path)
return overall_data
def render(self):
"""Perform layering on the list of documents passed to ``__init__``.
Each concrete document will undergo layering according to the actions
defined by its ``metadata.layeringDefinition``. Documents are layered
with their parents. A parent document's ``schema`` must match that of
the child, and its ``metadata.labels`` must much the child's
``metadata.layeringDefinition.parentSelector``.
:returns: The list of concrete rendered documents.
:rtype: List[dict]
:raises UnsupportedActionMethod: If the layering action isn't found
among ``self.SUPPORTED_METHODS``.
:raises MissingDocumentKey: If a layering action path isn't found
in both the parent and child documents being layered together.
"""
for doc in self._sorted_documents:
# Control documents don't need to be layered.
if doc.is_control:
continue
# Retrieve the encrypted data for the document if its
# data has been encrypted so that future references use the actual
# secret payload, rather than the Barbican secret reference.
if doc.is_encrypted and doc.has_barbican_ref:
encrypted_data = self.secrets_substitution\
.get_unencrypted_data(
secret_ref=doc.data,
src_doc=doc,
dest_doc=doc
)
if not doc.is_abstract:
doc.data = encrypted_data
self.secrets_substitution.update_substitution_sources(
meta=doc.meta,
data=encrypted_data
)
self._documents_by_index[doc.meta].data = encrypted_data
LOG.debug("Rendering document %s:%s:%s", *doc.meta)
if doc.parent_selector:
parent_meta = self._parents.get(doc.meta)
if parent_meta:
LOG.debug("Using parent %s:%s:%s", *parent_meta)
parent = self._documents_by_index[parent_meta]
if doc.actions:
rendered_doc = parent
# Apply each action to the current document.
for action in doc.actions:
LOG.debug('Applying action %s to document with '
'schema=%s, layer=%s, name=%s.', action,
*doc.meta)
try:
rendered_doc = self._apply_action(
action, doc, rendered_doc)
except Exception:
with excutils.save_and_reraise_exception():
try:
self._log_data_for_layering_failure(
doc, parent, action)
except Exception: # nosec
pass
doc.data = rendered_doc.data
self.secrets_substitution.update_substitution_sources(
doc.meta, rendered_doc.data)
self._documents_by_index[doc.meta] = rendered_doc
else:
LOG.debug(
'Skipped layering for document [%s, %s] %s which '
'has a parent [%s, %s] %s, but no associated '
'layering actions.', doc.schema, doc.layer,
doc.name, parent.schema, parent.layer, parent.name)
# Perform substitutions on abstract data for child documents that
# inherit from it, but only update the document's data if concrete.
if doc.substitutions:
try:
substituted_doc = list(
self.secrets_substitution.substitute_all(doc))
except Exception:
with excutils.save_and_reraise_exception():
try:
self._log_data_for_substitution_failure(doc)
except Exception: # nosec
pass
if substituted_doc:
rendered_doc = substituted_doc[0]
# Update the actual document data if concrete.
doc.data = rendered_doc.data
if not doc.has_replacement:
self.secrets_substitution.update_substitution_sources(
doc.meta, rendered_doc.data)
self._documents_by_index[doc.meta] = rendered_doc
# NOTE: Since the child-replacement is always prioritized, before
# other children, as soon as the child-replacement layers with the
# parent (which already has undergone layering and substitution
# itself), replace the parent data with that of the replacement.
if doc.is_replacement:
parent.data = doc.data
# Return only concrete documents and non-replacements.
return [d for d in self._sorted_documents
if d.is_abstract is False and d.has_replacement is False]
@property
def documents(self):
return self._sorted_documents
|
from django.apps import AppConfig
class DogbreedConfig(AppConfig):
name = 'dogbreed'
|
KEYWORDS = {
"print": "keyword",
}
OPERATORS = {
"+": "PLUS",
"-": "MINUS",
"*": "MUL",
"/": "DIV",
}
TYPES = {
"string" : "STRING",
"int" : "INT",
"float" : "FLOAT",
}
DIGITS = "0123456789"
EMPTY = [" ", "\t"]
SYMBOLS = {
"(": "left-bracket",
")": "right-bracket",
",": "comma"
}
|
#considere um laço que conte de 1 a 10,
# mas apresente apenas os números ímpares desse intervalo
# a instrução continue diz a Python para ignorar o restante do laço e voltar ao início
numero_atual = 0
while numero_atual < 10:
numero_atual += 1
if numero_atual % 2 == 0:
continue
print(numero_atual)
|
from pytz import utc
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
jobstores = {
'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
}
executors = {
'default': ThreadPoolExecutor(1),
'processpool': ProcessPoolExecutor(1)
}
job_defaults = {
'coalesce': False,
'max_instances': 2
}
scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)
|
#!/bin/env python
# -*- coding: utf-8 -*-
#
# Created on 13.11.20
#
# Created for py_bacy
#
# @author: Tobias Sebastian Finn, tobias.sebastian.finn@uni-hamburg.de
#
# Copyright (C) {2020} {Tobias Sebastian Finn}
#
# System modules
import logging
import os
# External modules
import matplotlib as mpl
mpl.use('agg')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Internal modules
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
def plot_rank_hist(fg_values, obs_values):
stacked_fg = fg_values.stack(grid_time=['time', 'obs_grid_1'])
ens_vals = stacked_fg.transpose('grid_time', 'ensemble').values
ens_vals = np.concatenate([
np.array([-np.inf]).repeat(ens_vals.shape[0])[:, None],
np.sort(ens_vals, axis=-1)
], axis=1)
obs_vals = obs_values.values.reshape(-1)
rank = (obs_vals[:, None] > ens_vals).argmin(axis=-1) - 1
rank[rank < 0] = 40
fig, ax = plt.subplots()
ax.hist(rank, bins=np.arange(-0.5, ens_vals.shape[1]+0.5, 1))
ax.set_ylabel('Number of occurence')
ax.set_xlabel('Rank')
return fig, ax
def plot_histogram(values, x_axis='Differences obs-mean', bins=50):
fig, ax = plt.subplots()
hist = ax.hist(values, bins=bins, log=True)
ax.axvline(ymax=np.array(hist[0]).max() + 10, color='black')
ax.set_ylabel('Log-Number of occurence')
ax.set_xlabel(x_axis)
if isinstance(hist[0], list):
ax.legend(
hist[-1], pd.to_datetime(values.time.values).strftime('%H:%M:%S')
)
return fig, ax
def write_obs_plots(fg_grouped, obs_grouped, run_dir, suffix):
fg_mean = fg_grouped.mean('ensemble')
diff_mean = obs_grouped['observations'] - fg_mean
out_dir = os.path.join(run_dir, 'output')
for group in diff_mean['obs_group']:
fig, ax = plot_histogram(
diff_mean.sel(obs_group=group).values.flatten()
)
file_name = 'hist_mean_diff_{0}_{1:s}.png'.format(int(group), suffix)
fig.savefig(os.path.join(out_dir, file_name))
fig, ax = plot_histogram(
diff_mean.sel(obs_group=group)
)
file_name = 'hist_mean_time_diff_{0}_{1:s}.png'.format(
int(group), suffix
)
fig.savefig(os.path.join(out_dir, file_name))
diff_ens = obs_grouped['observations'] - fg_grouped
for group in obs_grouped['observations']['obs_group']:
group = group.drop('ensemble')
fig, ax = plot_rank_hist(
fg_grouped.sel(obs_group=group),
obs_grouped['observations'].sel(obs_group=group)
)
file_name = 'rank_hist_{0}_{1:s}.png'.format(int(group), suffix)
fig.savefig(os.path.join(out_dir, file_name))
fig, ax = plot_histogram(
diff_ens.sel(obs_group=group).values.flatten()
)
file_name = 'hist_ens_diff_{0}_{1:s}.png'.format(
int(group), suffix
)
fig.savefig(os.path.join(out_dir, file_name))
plt.cla()
|
# coding=utf-8
def shift_command():
# return 225
return 0
def command_command():
# return 227
return 0
def control_command():
# return 224
return 0
def alt_command():
# return 226
return 0
def escape_command():
return 41
def function_command():
return 0
def singlequote():
return 52
def exclamation():
return 30
def doublequote():
return 52
def bang():
return 32
def dollar():
return 33
def percent():
return 196
def ampersand():
return 199
def left_bracket():
return 47
def right_bracket():
return 48
def plus():
return 87
def comma():
return 54
def minus():
return 45
def underscore():
return 45
def period():
return 55
def forward_slash():
return 56
def zero():
return 39
def one():
return 30
def two():
return 31
def three():
return 32
def four():
return 33
def five():
return 34
def six():
return 35
def seven():
return 36
def eight():
return 37
def nine():
return 38
def colon():
return 203
def semicolon():
return 51
def less_than():
return 197
def equals_to():
return 103
def more_than():
return 198
def questionmark():
return 56
def at_symbol():
return 206
def open_square_bracket():
return 47
def backward_slash():
return 49
def close_square_bracket():
return 48
def power_of():
return 35
def backtick():
return 53
def backspace():
return 42
def letter_a():
return 4
def letter_b():
return 5
def letter_c():
return 6
def letter_d():
return 7
def delete_char():
return 76
def down():
return 81
def letter_e():
return 8
def enter_char():
return 40
def letter_f():
return 9
def function_one():
return 58
def function_ten():
return 67
def function_eleven():
return 68
def function_twelve():
return 69
def function_two():
return 59
def function_three():
return 60
def function_four():
return 61
def function_five():
return 62
def function_six():
return 63
def function_seven():
return 64
def function_eight():
return 65
def function_nine():
return 66
def free_button():
return 4
def letter_g():
return 10
def letter_h():
return 11
def letter_i():
return 12
def letter_j():
return 13
def letter_k():
return 14
def letter_l():
return 15
def left():
return 80
def letter_m():
return 16
def letter_n():
return 17
def letter_o():
return 18
def letter_p():
return 19
def null_key():
return 0
def letter_q():
return 20
def letter_r():
return 21
def right():
return 79
def letter_s():
return 22
def space_char():
return 44
def letter_t():
return 23
def tab_char():
return 43
def letter_u():
return 24
def up():
return 82
def letter_v():
return 25
def letter_w():
return 26
def letter_x():
return 27
def letter_y():
return 28
def letter_z():
return 29
def open_squigly_bracket():
return 47
def bar():
return 201
def close_squigly_bracket():
return 48
def tilde():
return 53
def pound():
return 204
def euro():
return 4
def home():
return 74
def end():
return 77
def pgup():
return 75
def pgdown():
return 78
def volup():
return 128
def voldown():
return 129
def mute():
return 127
def power_button():
return 161
def dim():
return 102
def brighten():
return 0
lookup = {
'0': zero,
'1': one,
'2': two,
'3': three,
'4': four,
'5': five,
'6': six,
'7': seven,
'8': eight,
'9': nine,
'a': letter_a,
'b': letter_b,
'c': letter_c,
'd': letter_d,
'e': letter_e,
'f': letter_f,
'g': letter_g,
'h': letter_h,
'i': letter_i,
'j': letter_j,
'k': letter_k,
'l': letter_l,
'm': letter_m,
'n': letter_n,
'o': letter_o,
'p': letter_p,
'q': letter_q,
'r': letter_r,
's': letter_s,
't': letter_t,
'u': letter_u,
'v': letter_v,
'w': letter_w,
'x': letter_x,
'y': letter_y,
'z': letter_z,
'Alt': alt_command,
'Backspace': backspace,
'Command': command_command,
'Ctrl': control_command,
'Del': delete_char,
'Down': down,
'Enter': enter_char,
'Esc': escape_command,
'F1': function_one,
'F10': function_ten,
'F11': function_eleven,
'F12': function_twelve,
'F2': function_two,
'F3': function_three,
'F4': function_four,
'F5': function_five,
'F6': function_six,
'F7': function_seven,
'F8': function_eight,
'F9': function_nine,
'Fn': function_command,
'Free1': free_button,
'Left': left,
'phantom': null_key,
'Right': right,
'Shift': shift_command,
'Space': space_char,
'Tab': tab_char,
'Up': up,
'Home': home,
'End': end,
'PgUp': pgup,
'PgDown': pgdown,
'VolUp': volup,
'VolDown': voldown,
'Mute': mute,
'Power': power_button,
'Dim': dim,
'Brighten': brighten,
'{': open_squigly_bracket,
'|': bar,
'}': close_squigly_bracket,
'~': tilde,
'£': pound,
'€': euro,
"'": singlequote,
'!': exclamation,
'"': doublequote,
'#': bang,
'$': dollar,
'%': percent,
'&': ampersand,
'(': left_bracket,
')': right_bracket,
'+': plus,
',': comma,
'-': minus,
'_': underscore,
'.': period,
'/': forward_slash,
':': colon,
';': semicolon,
'<': less_than,
'equals': equals_to,
'>': more_than,
'?': questionmark,
'@': at_symbol,
'[': open_square_bracket,
'backslash': backward_slash,
']': close_square_bracket,
'^': power_of,
'`': backtick
}
|
"""
Games __init__.
"""
try:
from spikey.games.gym_wrapper import gym_wrapper
from spikey.games.CartPole import CartPole
from spikey.games.Logic import Logic
except ImportError as e:
raise ImportError(f"games/__init__.py failed: {e}")
|
import click
import pprint
import os
import configparser
from .core import NetDefine
if os.path.exists('netdefine.cfg'):
config = configparser.ConfigParser()
config.read('netdefine.cfg')
root = config['DEFAULT']['root']
netdefine = NetDefine(root=root)
else:
# if no config file exists, assume the current working dir as project root
netdefine = NetDefine(root=".")
printer = pprint.PrettyPrinter(indent=2)
@click.group()
def cli():
pass
@cli.command()
def plan():
result = netdefine.plan()
if result != {'templates_changed': None,
'features_changed': None,
'components_changed': None,
'templates_added': None,
'templates_removed': []}:
templates = result['templates_changed']
features = result['features_changed']
components = result['components_changed']
templates_added = result['templates_added']
templates_removed = result['templates_removed']
print(f"\nNetdefine has detected changes.")
if features:
print('\nThe following features have been changed:')
for feature in features:
print(f" - {feature}")
if components:
print('\nThe following components have been changed:')
for component in components:
print(f" - {component}")
if templates:
print(f"\nThe following device configurations are affected by changes")
for template in templates:
print(f" - Device Template: {template}")
if templates_added:
print(f"\nThe following device templates have been added")
for template in templates_added:
print(f" - Device Template: {template}")
if templates_removed:
print(f"\nThe following device templates have been removed")
for template in templates_removed:
print(f" - Device Template: {template}")
print("\n Run apply to update device configurations")
else:
print('The state has not changed since the last apply')
@cli.command()
@click.argument('change')
@click.option('--difference', is_flag=True)
@click.option('--dry_run', is_flag=True)
@click.option('--display', is_flag=True)
@click.option('--target')
def apply(change, difference, dry_run, display, target):
if difference:
templates = netdefine.apply(change=change, difference=True, dry_run=dry_run, target=target)
else:
templates = netdefine.apply(change=change, dry_run=dry_run, target=target)
if templates:
print(f'apply success for change {change}\n')
if display:
for template in templates:
print(f'Template: \n {template["template"]} \n')
print(f'Config: \n\n{template["config"]}\n')
if __name__=='__main__':
cli()
|
import sys
import socket
import os
import struct
import random
def readuntil( s, u ):
z = ''
while z.endswith( u ) == False:
z += s.recv(1)
return z
def readline( s ):
return readuntil( s, '\n')
def eatprompt(s):
return readuntil( s, '>>> ')
def main( argv ):
if len(argv) == 3:
HOST=argv[1]
PORT=int(argv[2])
else:
if 'HOST' not in os.environ or 'PORT' not in os.environ:
print '[ERROR] target and port must be specified via arguments or variables.'
sys.exit(1)
HOST=os.environ['HOST']
PORT=int(os.environ['PORT'])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((HOST, PORT))
except:
print '[FAIL] Failed to connect to target %s:%d' %(HOST, PORT)
sys.exit(1)
print '[INFO] Connected to %s:%d' %(HOST, PORT)
print 'RAX=0xdeadbeefcafebabf'
print 'ST0=0x403edeadbeefcafebac0'
eatprompt(s)
### init a larger file
s.send('A = [ 16045690984503098046.0 16045690984503098046.0 16045690984503098046.0 16045690984503098047.0 16045690984503098048.0 16045690984503098046.0 16045690984503098047.0 ]\n')
eatprompt(s)
s.settimeout(5.0)
s.send('B = %reshape(A, [1 1])\n')
try:
y = s.recv(4)
if len(y) == 0:
raise Exception('Read length was zero')
except:
print '[SUCCESS] Read failed which likely indicates a crash'
s.close()
sys.exit(0)
print '[FAIL] Successfully read the prompt which means a crash did not occur'
s.send('exit\n')
s.close()
sys.exit(1)
if __name__ == '__main__':
main(sys.argv)
|
from typing import List
from py_client.aidm.aidm_base_classes import _HasID, _HasCode, _HasDebugString
from py_client.aidm.aidm_train_path_node_classes import AlgorithmTrainPathNode
class AlgorithmNodeTrack(_HasID, _HasCode, _HasDebugString):
def __init__(self, id: int, code: str, debug_string: str):
_HasID.__init__(self, id)
_HasCode.__init__(self, code)
_HasDebugString.__init__(self, debug_string)
class AlgorithmNode(_HasID, _HasCode, _HasDebugString):
__node_tracks: [AlgorithmNodeTrack]
def __init__(self, id: int, code: str, debug_string: str, node_tracks: List[AlgorithmNodeTrack]):
_HasID.__init__(self, id)
_HasCode.__init__(self, code)
_HasDebugString.__init__(self, debug_string)
self.__node_tracks = node_tracks
@property
def node_tracks(self) -> List[AlgorithmNodeTrack]:
return self.__node_tracks
class AlgorithmSectionTrack(_HasID, _HasCode, _HasDebugString):
__distance_units: int
__section_code: str
def __init__(self, id: int, code: str, debug_string: str, distance_units: int, section_code: str):
_HasID.__init__(self, id)
_HasCode.__init__(self, code)
_HasDebugString.__init__(self, debug_string)
self.__distance_units = distance_units
self.__section_code = section_code
@property
def distance_units(self) -> int:
return self.__distance_units
@property
def section_code(self) -> str:
return self.__section_code
class AlgorithmTrain(_HasID, _HasCode, _HasDebugString):
__train_path_nodes: List[AlgorithmTrainPathNode]
def __init__(self, id: int, code: str, debug_string: str, train_path_nodes: List[AlgorithmTrainPathNode]):
_HasID.__init__(self, id)
_HasCode.__init__(self, code)
_HasDebugString.__init__(self, debug_string)
self.__train_path_nodes = train_path_nodes
@property
def train_path_nodes(self) -> List[AlgorithmTrainPathNode]:
return self.__train_path_nodes
class AlgorithmFormation(_HasID, _HasDebugString):
__places_first_class: int
__places_second_class: int
__vehicle_type_ids: List[int]
def __init__(self,
id: int,
debug_string: str,
vehicle_type_ids: List[int],
places_first_class: int,
places_second_class: int):
_HasID.__init__(self, id)
_HasDebugString.__init__(self, debug_string)
self.__places_first_class = places_first_class
self.__places_second_class = places_second_class
self.__vehicle_type_ids = vehicle_type_ids
@property
def vehicle_type_ids(self) -> List[int]:
return self.__vehicle_type_ids
@property
def places_first_class(self) -> int:
return self.__places_first_class
@property
def places_second_class(self) -> int:
return self.__places_second_class
class AlgorithmVehicleType(_HasID, _HasDebugString):
__is_engine: bool
__places_first_class: int
__places_second_class: int
def __init__(self, id: int, debug_string: str, is_engine: bool, places_first_class: int, places_second_class: int):
_HasID.__init__(self, id)
_HasDebugString.__init__(self, debug_string)
self.__places_first_class = places_first_class
self.__places_second_class = places_second_class
self.__is_engine = is_engine
@property
def is_engine(self) -> bool:
return self.__is_engine
@property
def places_first_class(self) -> int:
return self.__places_first_class
@property
def places_second_class(self) -> int:
return self.__places_second_class
|
# password validation using a regular expression.
import re
def password_validation():
i = 0
while i == 0:
try:
pattern = re.compile(r"[a-zA-Z\d!@#$%^&*]{8,}")
first_password = input("\nEnter a new password \nValid characters are A-Z, a-z, 0-9, $%*!@&#^ : ")
second_password = input("\nPlease enter your new password again: ")
check = pattern.fullmatch(first_password)
if len(first_password) < 8:
print('Your password is only', len(first_password), "characters. Use 8 or more characters.")
elif check == None:
print("Acceptable characters to create a password are: a-z, A-Z, 0-9, $ % * ! @ & # ^.")
elif first_password != second_password:
print("The passwords do not match, please try again")
else:
i += 1
print("Your new password has been saved.")
with open("password_file.txt", mode='a') as output_file:
output_file.write(first_password)
output_file.write("\n")
except ValueError:
print("Only use the following characters: a-z, A-Z, 0-9, $ % * ! @ & # ^")
password_validation()
|
import CalculoCripto
print("************************")
print("Opções de Calculos Crito")
print("************************")
print("DIGITE:")
print("(1) para calcular o PREÇO baseado em SUPLY e MARKETCAP")
print("(2) para calcular o MARKETCAP baseado em PREÇO e SUPLY")
calculo = int(input("Digite o calulo Pretendido:"))
if(calculo == 1):
CalculoCripto.calculo_cripto_price()
elif(calculo == 2):
CalculoCripto.calculo_cripto_marketcap()
|
import tensorflow as tf
import numpy as np
from core.PPO.models import pi_model, v_model, pi_gaussian_model
from core.PPO.policy_base import PolicyBase
import tensorflow.keras.losses as kls
from utils.logger import log
class Policy_PPO_Continuous(PolicyBase):
def __init__(self,
policy_params=dict(),
num_actions = None):
super().__init__(**policy_params, num_actions= num_actions)
self.pi = pi_gaussian_model(hidden_sizes= self.hidden_sizes_pi, num_outputs= self.num_actions)
self.v = v_model(self.hidden_sizes_v)
def update(self, observations, actions, advs, returns, logp_t):
for i in range(self.train_pi_iters):
loss_pi, loss_entropy, approx_ent, kl = self.train_pi_continuous_one_step(self.pi, self.optimizer_pi, observations, actions, advs, logp_t)
if kl > 1.5 * self.target_kl:
log("Early stopping at step %d due to reaching max kl." %i)
break
# Value Update Cycle for iter steps
for _ in range(self.train_v_iters):
loss_v = self.train_v_one_step(self.v, self.optimizer_v, observations, returns)
# Return Metrics
return loss_pi.numpy().mean(), loss_entropy.numpy().mean(), approx_ent.numpy().mean(), kl.numpy().mean(), loss_v.numpy().mean()
def _value_loss(self, returns, value):
# Mean Squared Error
loss = tf.reduce_mean((returns - value)**2)
return loss # return kls.mean_squared_error(returns, value)
def _pi_loss_continuous(self, mu, logp_old, act, adv):
log_std = tf.Variable(name= 'log_std', initial_value= -0.5 * np.ones(self.num_actions, dtype=np.float32))
logp = self.pi.gaussian_likelihood(act, mu, log_std)
ratio = tf.exp(logp-logp_old)
min_adv = tf.where(adv > 0, (1+ self.clip_ratio) * adv, (1-self.clip_ratio) * adv)
# Policy Gradient Loss
pi_loss = -tf.reduce_mean(tf.minimum(ratio * adv, min_adv))
# Entropy term
entropy = self.entropy(log_std)
entropy_loss = tf.reduce_mean(entropy)
# Total Loss
pi_loss -= self.ent_coef * entropy_loss
# Approximated Kullback Leibler Divergence from OLD and NEW Policy
approx_kl = tf.reduce_mean(logp_old-logp)
approx_ent = tf.reduce_mean(-logp)
return pi_loss, entropy_loss, approx_ent, approx_kl
@tf.function
def train_pi_continuous_one_step(self, model, optimizer, obs, act, adv, logp_old):
with tf.GradientTape() as tape:
mu = model(obs)
pi_loss, entropy_loss, approx_ent, approx_kl = self._pi_loss_continuous(mu, logp_old, act, adv)
grads = tape.gradient(pi_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return pi_loss, entropy_loss, approx_ent, approx_kl
@tf.function()
def train_v_one_step(self, model, optimizer_v, obs, returns):
with tf.GradientTape() as tape:
values = model(obs)
v_loss = self._value_loss(returns, values)
grads = tape.gradient(v_loss, model.trainable_variables)
optimizer_v.apply_gradients(zip(grads, model.trainable_variables))
return v_loss
def entropy(self, log_std):
'''
Entropy term for more randomness which means more exploration in ppo ->
'''
entropy = tf.reduce_sum(log_std + 0.5 * np.log(2.0 * np.pi * np.e), axis=-1)
return entropy
|
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
from blog.models.users import Details
class Nav(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=20, unique=True)
link = models.SlugField(max_length=10, unique=True)
body = models.TextField()
pub_date = models.DateTimeField('date published', auto_now_add=True)
parent = models.ForeignKey('self', related_name='children', null=True, blank=True, on_delete=models.CASCADE,
limit_choices_to={'parent': None})
def get_absolute_url(self):
return reverse('blog:post', kwargs={'pk': self.pk})
def __str__(self):
return '"{title}" by {name}'.format(title=self.title,
name=Details.name(self.user))
def has_children(self):
""" Do we have child navs """
return Nav.objects.filter(parent=self).count() > 0
def kids(self):
return self.children.order_by('title').all()
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='S.EX.', version='2.0', description='S.EX.', author='huku',
author_email='huku@grhack.net', url='https://github.com/huku-/sex',
scripts=['bin/sex'], packages=['sex'])
|
import operator
import inspect
try:
from collections.abc import Mapping, Iterable
except ImportError:
from collections import Mapping, Iterable
import enum
import six
from .exceptions import format_trace
from .reporting import Absent, fmt, NATIVE_TYPES, callable_name
def is_regex(obj):
"""Cannot do type check against SRE_Pattern, so we use duck typing."""
return hasattr(obj, 'match') and hasattr(obj, 'pattern')
def basic_compare(first, second, strict=False):
"""
Comparison used for custom match functions,
can do pattern matching, function evaluation or simple equality.
Returns traceback if something goes wrong.
"""
try:
if is_regex(second):
if not isinstance(first, six.string_types) and not strict:
first = str(first)
result = bool(second.match(first))
elif callable(second):
result = bool(second(first))
else:
result = first == second
return result, None
except Exception as exc:
return None, format_trace(inspect.trace(), exc)
def is_comparator(value):
"""Utility for finding out a value is a custom comparator or not."""
return callable(value) or is_regex(value)
def check_dict_keys(data, has_keys=None, absent_keys=None):
"""
Check if a dictionary contains given
keys and/or has given keys missing.
"""
if not (has_keys or absent_keys):
raise ValueError(
'Either `has_keys` or `absent_keys` must be provided.')
keys = set(data.keys())
has_keys = set(has_keys) if has_keys else set()
absent_keys = set(absent_keys) if absent_keys else set()
existing_diff = has_keys - keys
absent_intersection = absent_keys & keys
return existing_diff, absent_intersection
class Callable(object):
"""
Some of our assertions can make use of callables that accept a
single argument as comparator values. We also provide the helper
classes below that are composable (via bitwise operators
or meta callables) and reporting friendly.
"""
def __call__(self, value):
raise NotImplementedError
def __or__(self, other):
return Or(self, other)
def __and__(self, other):
return And(self, other)
def __eq__(self, other):
raise NotImplementedError
def __invert__(self):
return Not(self)
class OperatorCallable(Callable):
"""Base class for simple operator based callables."""
func = None
func_repr = None
def __init__(self, reference):
self.reference = reference
def __call__(self, value):
return self.func(value, self.reference) # pylint: disable=not-callable
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, repr(self.reference))
def __eq__(self, other):
return self.reference == other.reference
def __str__(self):
return 'VAL {} {}'.format(self.func_repr, self.reference)
class Less(OperatorCallable):
func = operator.lt
func_repr = '<'
class LessEqual(OperatorCallable):
func = operator.le
func_repr = '<='
class Greater(OperatorCallable):
func = operator.gt
func_repr = '>'
class GreaterEqual(OperatorCallable):
func = operator.ge
func_repr = '>='
class Equal(OperatorCallable):
func = operator.eq
func_repr = '=='
class NotEqual(OperatorCallable):
func = operator.ne
func_repr = '!='
class In(Callable):
def __init__(self, container):
self.container = container
def __call__(self, value):
return value in self.container
def __eq__(self, other):
return self.container == other.container
def __str__(self):
return 'VAL in {}'.format(self.container)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.container)
class NotIn(In):
def __call__(self, value):
return value not in self.container
def __str__(self):
return 'VAL not in {}'.format(self.container)
class IsTrue(Callable):
def __call__(self, value):
return bool(value)
def __str__(self):
return 'bool(VAL) is True'
def __eq__(self, other):
return self.__class__ == other.__class__
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class IsFalse(IsTrue):
def __call__(self, value):
return not bool(value)
def __str__(self):
return 'bool(VAL) is False'
class MetaCallable(Callable):
delimiter = None
def __init__(self, *callables):
assert [isinstance(clb, Callable) for clb in callables]
self.callables = callables
def __eq__(self, other):
return self.callables == other.callables
def __repr__(self):
args = ', '.join(repr(clb) for clb in self.callables)
return '{}({})'.format(self.__class__.__name__, args)
def __str__(self):
delimiter = ' {} '.format(self.delimiter)
return '({})'.format(
delimiter.join(str(clb) for clb in self.callables)
)
class Or(MetaCallable):
delimiter = 'or'
def __call__(self, value):
for clb in self.callables:
if clb(value):
return True
return False
class And(MetaCallable):
delimiter = 'and'
def __call__(self, value):
for clb in self.callables:
if not clb(value):
return False
return True
class Not(Callable):
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__, repr(self.callable_obj))
def __str__(self):
return 'not ({})'.format(self.callable_obj)
def __init__(self, callable_obj):
assert isinstance(callable_obj, Callable)
self.callable_obj = callable_obj
def __call__(self, value):
return not self.callable_obj(value)
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.callable_obj == other.callable_obj
class Custom(Callable):
"""
Utility that allows attaching descriptions to arbitrary functions.
Useful if you are making use of lambda functions
and want to provide more context in the reports.
Usage:
Custom(
callable_obj=lambda value: value.custom_method() is True,
description='`value.custom_method()` returns True'
)
"""
def __init__(self, callable_obj, description):
self.callable_obj = callable_obj
self.description = description
def __call__(self, value):
return self.callable_obj(value)
def __str__(self):
return self.description
def __repr__(self):
return '{}({}, description={})'.format(
self.__class__.__name__,
repr(self.callable_obj),
self.description
)
def __eq__(self, other):
return all([
self.__class__ == other.__class__,
self.callable_obj == other.callable_obj,
self.description == other.description
])
########################################################################
# The non-trivial logic below is used for recursive dict & Fix matching.
# It is not fully compatible with our class based assertion & serialization
# flow, so what we do is generate serializable native data implicitly.
# This may be refactored in future.
########################################################################
# Do not change unless you know what you are doing
# Making this larger will considerably slow down the comparison
MAX_UNORDERED_COMPARE = 16
def compare_with_callable(callable_obj, value):
try:
return bool(callable_obj(value)), None
except Exception as exc:
return False, format_trace(inspect.trace(), exc)
class RegexAdapter(object):
"""This is being used for internal compatibility."""
@classmethod
def check(cls, obj):
return is_regex(obj)
@classmethod
def serialize(cls, obj):
# TODO: add distinction for flags (e.g. multiline)
return 0, 'REGEX', obj.pattern
@classmethod
def match(cls, regex, value):
return Match.from_bool(bool(regex.match(value)))
@staticmethod
def compare(lhs, rhs):
"""Compare two regular expressions - just do string equality."""
return Match.from_bool(lhs == rhs)
class Category(object):
"""
Internal enum. Categorises objects for comparison
"""
ABSENT = 0
VALUE = 1
CALLABLE = 2
REGEX = 3
ITERABLE = 4
DICT = 5
def _categorise(obj, _regex_adapter=RegexAdapter):
"""
Check type of the object
"""
if obj is Absent:
return Category.ABSENT
obj_t = type(obj)
if issubclass(obj_t, NATIVE_TYPES):
return Category.VALUE
elif callable(obj):
return Category.CALLABLE
elif _regex_adapter.check(obj):
return Category.REGEX
elif issubclass(obj_t, Mapping):
return Category.DICT
elif issubclass(obj_t, Iterable):
return Category.ITERABLE
else: # catch-all for types like decimal.Decimal, uuid.UUID, et cetera
return Category.VALUE
class Match(object):
"""
Internal enum. Represents the result of a match.
"""
IGNORED = "i"
FAIL = "f"
PASS = "p"
@staticmethod
def combine(lhs_match, rhs_match):
"""
Combines to match levels into a single match level
"""
lhs_match = lhs_match or Match.IGNORED
rhs_match = rhs_match or Match.IGNORED
if lhs_match == Match.IGNORED:
return rhs_match
if rhs_match == Match.IGNORED:
return lhs_match
if lhs_match == Match.FAIL:
return Match.FAIL
if rhs_match == Match.FAIL:
return Match.FAIL
return Match.PASS
@staticmethod
def from_bool(passed):
"""
Constructs a match description from a boolean value
"""
if passed:
return Match.PASS
else:
return Match.FAIL
@staticmethod
def to_bool(match):
"""
Converts a match value to a bool
"""
if match == Match.FAIL:
return False
else: # if (match == Match.PASS) or (match == Match.IGNORED)
return True
def _build_res(key, match, lhs, rhs):
"""
Builds a result tuple object for CouchDB.
"""
return key, match[0], lhs, rhs
def _idictzip_all(lhs_dict, rhs_dict, default=Absent):
"""
.. warning::
Internal API.
Generator that loops through all the keys
in the left and right hand side dicts.
Yields key, lhs_val, rhs_val.
If a key is missing from one of the sides,
then its value is set to the value of the default
argument (by default Absent).
"""
for key, lhs_val in lhs_dict.items():
yield key, lhs_val, rhs_dict.get(key, default)
for key, rhs_val in rhs_dict.items():
if key not in lhs_dict: # if not previously iterated
yield key, default, rhs_val
def _partition(results):
"""
.. warning::
Internal API.
Splits a list of value results into a two lists of objects for reporting
"""
lhs_vals = []
rhs_vals = []
for result in results:
key, match, lhs, rhs = result
if key:
lhs_vals.append((key, match, lhs))
rhs_vals.append((key, match, rhs))
elif match:
# indicates a list entry containing match information
lhs_vals.append((3, match, lhs))
rhs_vals.append((3, match, rhs))
else:
lhs_vals.append(lhs)
rhs_vals.append(rhs)
return lhs_vals, rhs_vals
def _cmp_dicts(lhs, rhs, ignore, only, report_mode, value_cmp_func):
"""
Compares dictionaries
"""
def should_ignore_key(key):
"""
Decide if a key should be ignored.
Decision is based on ``ignore`` and ``only``.
If ``only`` is ``True`` then keys that are
not in ``lhs`` will be ignored.
"""
if key in ignore:
should_ignore = True
elif only is not None:
should_ignore = key not in only
else:
should_ignore = False
return should_ignore
results = []
match = Match.IGNORED
for iter_key, lhs_val, rhs_val in _idictzip_all(lhs, rhs):
if should_ignore_key(iter_key):
if report_mode == ReportOptions.ALL:
results.append(_build_res(
key=iter_key,
match=Match.IGNORED,
lhs=fmt(lhs_val),
rhs=fmt(rhs_val)))
else:
result = _rec_compare(
lhs_val,
rhs_val,
ignore,
only,
iter_key,
report_mode,
value_cmp_func)
# Decide whether to keep or discard the result, depending on the
# reporting mode.
if report_mode in (ReportOptions.ALL, ReportOptions.NO_IGNORED):
keep_result = True
elif report_mode == ReportOptions.FAILS_ONLY:
keep_result = not Match.to_bool(result[1])
else:
raise ValueError('Invalid report mode {}'.format(report_mode))
if keep_result:
results.append(result)
match = Match.combine(match, result[1])
return match, results
def _rec_compare(lhs,
rhs,
ignore,
only,
key,
report_mode,
value_cmp_func,
_regex_adapter=RegexAdapter):
"""
Recursive deep comparison implementation
"""
# pylint: disable=unidiomatic-typecheck
lhs_cat = _categorise(lhs)
rhs_cat = _categorise(rhs)
## NO VALS
if ((lhs_cat == Category.ABSENT) or (rhs_cat == Category.ABSENT)) and \
(lhs_cat != Category.CALLABLE) and (rhs_cat != Category.CALLABLE):
return _build_res(
key=key,
match=Match.PASS if lhs_cat == rhs_cat else Match.FAIL,
lhs=fmt(lhs),
rhs=fmt(rhs))
## CALLABLES
if lhs_cat == rhs_cat == Category.CALLABLE:
match = Match.from_bool(lhs == rhs)
return _build_res(
key=key,
match=match,
lhs=(0, 'func', callable_name(lhs)),
rhs=(0, 'func', callable_name(rhs)))
if lhs_cat == Category.CALLABLE:
result, error = compare_with_callable(callable_obj=lhs, value=rhs)
return _build_res(
key=key,
match=Match.from_bool(result),
lhs=(0, 'func', callable_name(lhs)),
rhs='Value: {}, Error: {}'.format(
rhs, error) if error else fmt(rhs))
if rhs_cat == Category.CALLABLE:
result, error = compare_with_callable(callable_obj=rhs, value=lhs)
return _build_res(
key=key,
match=Match.from_bool(result),
lhs='Value: {}, Error: {}'.format(
lhs, error) if error else fmt(lhs),
rhs=(0, 'func', callable_name(rhs)))
## REGEXES
if lhs_cat == rhs_cat == Category.REGEX:
match = _regex_adapter.compare(lhs, rhs)
return _build_res(
key=key,
match=match,
lhs=_regex_adapter.serialize(lhs),
rhs=_regex_adapter.serialize(rhs))
if lhs_cat == Category.REGEX:
match = _regex_adapter.match(regex=lhs, value=rhs)
return _build_res(
key=key,
match=match,
lhs=_regex_adapter.serialize(lhs),
rhs=fmt(rhs))
if rhs_cat == Category.REGEX:
match = _regex_adapter.match(regex=rhs, value=lhs)
return _build_res(
key=key,
match=match,
lhs=fmt(lhs),
rhs=_regex_adapter.serialize(rhs))
## VALUES
if lhs_cat == rhs_cat == Category.VALUE:
response = value_cmp_func(lhs, rhs)
match = Match.from_bool(response)
return _build_res(
key=key,
match=match,
lhs=fmt(lhs),
rhs=fmt(rhs))
## ITERABLE
if lhs_cat == rhs_cat == Category.ITERABLE:
results = []
match = Match.IGNORED
for lhs_item, rhs_item in six.moves.zip_longest(lhs, rhs):
# iterate all elems in both iterable non-mapping objects
result = _rec_compare(
lhs_item,
rhs_item,
ignore,
only,
key=None,
report_mode=report_mode,
value_cmp_func=value_cmp_func)
match = Match.combine(match, result[1])
results.append(result)
# two lists of formatted objects from a
# list of objects with lhs/rhs attributes
lhs_vals, rhs_vals = _partition(results)
return _build_res(
key=key,
match=match,
lhs=(1, lhs_vals),
rhs=(1, rhs_vals))
## DICTS
if lhs_cat == rhs_cat == Category.DICT:
match, results = _cmp_dicts(
lhs, rhs, ignore, only, report_mode, value_cmp_func)
lhs_vals, rhs_vals = _partition(results)
return _build_res(
key=key,
match=match,
lhs=(2, lhs_vals),
rhs=(2, rhs_vals))
## DIFF TYPES -- catch-all for unhandled
# combinations, e.g. VALUE vs ITERABLE
return _build_res(
key=key,
match=Match.FAIL,
lhs=fmt(lhs),
rhs=fmt(rhs))
# Built-in functions for comparing values in a dict.
COMPARE_FUNCTIONS = {
# Compare values in their native types using operator.eq.
'native_equality': operator.eq,
# Enforce that object types must be strictly equal before comparing using
# operator.eq.
'check_types': lambda x, y: (type(x) == type(y)) and (x == y),
# Convert all objects to strings using str() before making the comparison.
'stringify': lambda x, y: str(x) == str(y)
}
@enum.unique
class ReportOptions(enum.Enum):
"""
Options to control reporting behaviour for comparison results:
ALL: report all comparisons.
NO_IGNORED: do not report comparisons of ignored keys, include everything
else.
FAILS_ONLY: only report comparisons that have failed.
Control of reporting behaviour is provided for two main reasons. Firstly,
to give control of what information is included in the final report.
Secondly, as an optimization to allow comparison information to be
discarded when comparing very large collections.
"""
ALL = 1
NO_IGNORED = 2
FAILS_ONLY = 3
def compare(lhs,
rhs,
ignore=None,
only=None,
report_mode=ReportOptions.ALL,
value_cmp_func=COMPARE_FUNCTIONS['native_equality']):
"""
Compare two iterable key, value objects (e.g. dict or dict-like mapping)
and return a status and a detailed comparison table, useful for reporting.
Ignore has precedence over only.
:param lhs: object compared against rhs
:type lhs: ``dict`` interface (``__contains__`` and ``.items()``)
:param rhs: object compared against lhs
:type rhs: ``dict`` interface (``__contains__`` and ``.items()``)
:param ignore: list of keys to ignore in the comparison
:type ignore: ``list``
:param only: list of keys to exclusively consider in the comparison
:type only: ``list``
:param report_mode: Specify which comparisons should be kept and reported.
Default option is to report all comparisons but this
can be restricted if desired. See ReportOptions enum
for more detail.
:type report_mode: ``ReportOptions``
:param value_cmp_func: function to compare values in a dict. Defaults
to COMPARE_FUNCTIONS['native_equality'].
:type value_cmp_func: Callable[[Any, Any], bool]
:return: Tuple of comparison bool ``(passed: True, failed: False)`` and
a description object for the testdb report
:rtype: ``tuple`` of (``bool``, ``list`` of ``tuple``)
"""
if (lhs is None) and (rhs is None):
return (True, [])
if (lhs is None) or (lhs is Absent):
return (False, [_build_res(key=entry[0],
match=Match.FAIL,
lhs=fmt(lhs),
rhs=entry[1])
for entry in fmt(rhs)[1]])
if (rhs is None) or (rhs is Absent):
return (False, [_build_res(key=entry[0],
match=Match.FAIL,
lhs=entry[1],
rhs=fmt(rhs))
for entry in fmt(lhs)[1]])
ignore = ignore or []
match, comparisons = _cmp_dicts(
lhs, rhs, ignore, only, report_mode, value_cmp_func)
# For the keys in only not matching anything,
# we report them as absent in expected and value.
if isinstance(only, list) and only and comparisons is not None:
keys_found = set()
for elem in comparisons:
keys_found.add(elem[0])
for key in only:
if key not in keys_found:
comparisons.append(
(key, Match.IGNORED, Absent.descr, Absent.descr))
return Match.to_bool(match), comparisons
def _best_permutation(grid):
"""
Given a square matrix of errors comparing actual
value vs. expected value, finds the permutation which
associates actual vs expected with the least error.
Be careful running this on of large grids, as the
runtime is expotential O(a^n). Sample run times on desktop hardware::
size: 0, ms: 0.002 size: 1, ms: 0.010 size: 2, ms: 0.022
size: 3, ms: 0.046 size: 4, ms: 0.105 size: 5, ms: 0.207
size: 6, ms: 0.434 size: 7, ms: 0.540 size: 8, ms: 1.351
size: 9, ms: 2.338 size: 10, ms: 5.470 size: 11, ms: 9.398
size: 12, ms: 21.651 size: 13, ms: 36.114 size: 14, ms: 79.871
size: 15, ms: 166.488 size: 16, ms: 363.120 size: 17, ms: 943.494
size: 18, ms: 1818.761 size: 19, ms: 2370.988 size: 20, ms: 9989.508
e.g. for the grid::
>>> grid = [[1000, 2000, 2000],
... [1000, 2000, 2000],
... [ 0, 2000, 2000]]
[1, 2, 0]
Where [1, 2, 0] is a list of indices mapping::
- row 0 to col 1
- row 1 to col 2
- row 2 to col 0
"""
def bp_loop(outstanding, level, grid, grid_len, cache):
"""
Recursively finds a solution by
progressively excluding poor permutations "paths"
"""
if not outstanding:
return 0, []
# [(cost:int, indx:int)]
level_permutations = [(grid[level][indx], indx)
for indx in outstanding]
level_permutations.sort()
min_cost = None
min_path = None
for cost, indx in level_permutations:
remaining = outstanding - frozenset([indx])
# memoise calls, for large grids
# this cuts down the amount of calls significantly
pair = cache.get(remaining, None)
if pair is None:
pair = bp_loop(remaining, level + 1, grid, grid_len, cache)
cache[remaining] = pair
sub_cost, sub_path = pair
this_cost = cost + sub_cost
this_path = [indx] + sub_path
if (min_cost is None) or (this_cost < min_cost):
min_cost = this_cost
min_path = this_path
return min_cost, min_path
grid_len = len(grid)
cache = {}
# list of int indices
return bp_loop(frozenset(range(grid_len)), 0, grid, grid_len, cache)[1]
# helper func, used to generate errors matrix
def _to_error(cmpr_tuple, weights):
"""
Converts a comparison tuple (as returned by compare) to an error.
Each key may have its own weight. The default weight is 100,
however this may be otherwise specified in the "weights" dict.
"""
def is_missed_message(comparisons):
"""
Returns True if all lhs or rhs values of a dict match are Absent
"""
absent_side = (0, None, Absent.descr)
return (
(
sum([(0 if entry[2] == absent_side else 1)
for entry in comparisons]) == 0
) or
(
sum([(0 if entry[3] == absent_side else 1)
for entry in comparisons]) == 0
)
)
pass_flag, comparisons = cmpr_tuple
if pass_flag is True:
return 0 # perfect match
if pass_flag is False and is_missed_message(comparisons):
return 100000 # missed message
# worst possible error: value to normalise against
worst_error = 0
current_error = 0
for comparison in comparisons:
comparison_match = comparison[1]
# tag exists and matches, or ignored
if (comparison_match == Match.PASS) or\
(comparison_match == Match.IGNORED):
match_err = 0
else: # tag exists, but wrong data or tag is missing
match_err = 1
tag_weight = weights.get(str(comparison[0]), 100)
worst_error += tag_weight
current_error += (match_err * tag_weight)
return int(current_error * 10000.0 / worst_error + 0.5)
class Expected(object):
"""
An object representing an expected message,
along with additional comparison flags.
Input to the "unordered_compare" function.
"""
def __init__(self, value, ignore=None, only=None):
"""
:param value: object compared against
each actual value in unordered_compare
:type value: ``dict``-like interface (__contains__ and .items())
:param ignore: list of keys to ignore in the comparison
:type ignore: ``list``
:param only: list of keys to exclusively consider in the comparison
:type only: ``list``
"""
self.value = value
self.ignore = ignore
self.only = only
def unordered_compare(
match_name, values, comparisons, description=None, tag_weightings=None
):
"""
Matches a list of expected values against a list of expected comparisons.
The values and comparisons may be specified in
any order, and the returned value represents the best
overall match between values and comparisons.
Initially all value/expected comparison combinations
are evaluated and converted to an error weight.
If certain keys/tags are more imporant than others (e.g. ID FIX tags),
it is possible to give them additional weighting during the comparison,
by specifying a "tag_weightings" dict.
The values/comparisons permutation that results in the least
error is then returned as a list of dicts that can be included
in the testing report.
.. note::
It is possible to specify up to a maximum of
16 values or expected comparisons.
.. note::
``len(values)`` and ``len(comparison)`` need not be the same.
:param match_name: name that will appear on comparison report descriptions.
For example "fixmatch" will produce a comparison description such as
"unordered fixmatch 2/3: expected[2] vs values[1]"
:type match_name: ``str``
:param values: Actual values: an iterable object
(e.g. list or generator) of values.
Each value needs to support a dict-like interface.
:type values: ``generator`` or ``list`` of ``dict``-like objects
:param comparisons: Expected values and comparison flags.
:type comparisons: ``list`` of ``Expected``
:param description: Message used in each reported match.
:type description: ``str``
:param tag_weightings: Per-key overrides that specify a
different weight for different keys.
:type tag_weightings: ``dict`` of ``str`` to ``int``
:return: A list of test reports that can be appended to the result object
:rtype: ``list`` of ``dict``
(keys: 'comparison', 'time', 'description', 'passed', 'objdisplay')
"""
# make sure that all keys are strings
weights = {
str(key): int(val) for key, val in (tag_weightings or {}).items()}
# input may be generators, we need lists from this point onwards
list_msgs = list(values)
list_cmps = list(comparisons)
# if either the values or expected comparisons
# exceed 16, then raise an exception:
# it would take too long to process
# (expotential complexity algorithm involved)
if max(len(list_msgs), len(list_cmps)) > MAX_UNORDERED_COMPARE:
raise Exception("Too many values being compared. "+
"Unordered matching supports up to 16 comparisons")
# Generate fake comparisons or values in case that the number of values
# is different from what was expected.
# This makes it possible to match whatever is possible in the report
# and mark the rest as either missing or unexpected
synth_msgs = [Absent] * max(0, len(list_cmps) - len(list_msgs))
synth_cmps = [Expected(Absent)] * max(0, len(list_msgs) - len(list_cmps))
# Have to lists of equal sizes to process
proc_msgs = list_msgs + synth_msgs
proc_cmps = list_cmps + synth_cmps
assert len(proc_msgs) == len(proc_cmps)
# generate a 2D square "matrix" of match (bool pass, list) tuples
# by calling compare on every message / comparison combination
# This matrix is organised as:
#
# # cmp0 cmp1 cmp2 cmp3 # vs:
# match_matrix = [[tpl00, tpl01, tpl02, tpl03], # msg0
# [tpl10, tpl11, tpl12, tpl13], # msg1
# [tpl20, tpl21, tpl22, tpl23], # msg2
# [tpl30, tpl31, tpl32, tpl33]] # msg3
#
match_matrix = [[compare(cmpr.value,
msg,
ignore=cmpr.ignore,
only=cmpr.only)
for cmpr in proc_cmps] for msg in proc_msgs]
# generate a 2D square "matrix" of error integers (0 <= err <= 1000000)
# where:
# - 0 indicates a perfect message match (no tag mismatches)
# - 10000 indicates every tag being wrong between existing messages
# - 1000000 indicates a missed or extra
# message (when len(msgs) != len(comparisons))
#
# Each object in "match_matrix" is mapped to this error int.
# The shape and position of the matrix is preserved.
#
errors_matrix = [[_to_error(cmpr_tuple, weights)
for cmpr_tuple in row] for row in match_matrix]
# compute the optimal matching based on the permutation between actual and
# expected message that results in the least error
matched_indices = _best_permutation(errors_matrix)
# construct a list of report entries
base_descr = description or "unordered {}".format(match_name)
def build_descr(msg_indx, cmp_indx, expected_msg, received_msg):
"""
Build an additional description that indicates
if the message was missed or unexpected.
"""
prefix = "{} {}/{}:".format(
base_descr, msg_indx+1, len(matched_indices))
if received_msg is Absent:
return '{} expected[{}] vs Absent'.format(prefix, cmp_indx)
elif expected_msg is Absent:
return '{} Absent vs values[{}]'.format(prefix, msg_indx)
else:
return '{} expected[{}] vs values[{}]'.format(
prefix, cmp_indx, msg_indx)
return [{'description': build_descr(msg_indx,
cmp_indx,
proc_cmps[cmp_indx].value,
proc_msgs[msg_indx]),
# 'time': now(), # TODO: use local and UTC times
'comparison': match_matrix[msg_indx][cmp_indx][1],
'passed': bool(match_matrix[msg_indx][cmp_indx][0]),
'comparison_index': cmp_indx}
for msg_indx, cmp_indx in enumerate(matched_indices)]
def tuplefy_item(item, list_entry=False):
"""
Convert a dictionary report item in order to
consume less space in json representation.
"""
# TODO: Replace magical numbers with constants
if 'list' in item:
ret = (1, [tuplefy_item(obj, list_entry=True) for obj in item['list']])
match = item.get('match')
elif 'dict' in item:
ret = (2, [(pair['key'], pair['match'][0], tuplefy_item(pair))
if 'match' in pair
else (pair['key'], tuplefy_item(pair))
for pair in item['dict']])
match = item.get('match')
elif 'value' in item:
if isinstance(item['value'], int):
ret = (0, item.get('type'), str(item['value']))
else:
ret = (0, item.get('type'), item['value'])
match = item.get('match')
else:
raise ValueError('Unmatched type for tuplefy')
if list_entry and match:
# list entry that contains match information
return 3, match[0], ret
else:
return ret
def tuplefy_comparisons(comparisons, table=False):
"""
Convert dictionary report comparisons to list and tuples composition.
"""
if table:
return [
(tuplefy_comparisons(entry['cols']), entry['idx'])
for entry in comparisons
]
else:
return [
(
comparison['key'],
comparison['match'][0],
tuplefy_item(comparison['lhs']),
tuplefy_item(comparison['rhs'])
)
if 'lhs' in comparison and 'rhs' in comparison
else (comparison['key'], tuplefy_item(comparison['lhs']))
for comparison in comparisons
]
class DictmatchAllResult(object):
"""
When cast to a ``bool``, evaluates to ``True`` when all values
were matched without errors or ``False`` if one or more values mis-matched.
This object exposes two fields:
- ``passed``: a boolean indicating if the assertion passed completely
- ``index_match_levels``: a list containing
tuples of index and match level:
- ``MATCH``
- ``MISMATCH``
- ``LHS_NONE``
- ``RHS_NONE``
The following are examples of what the fields
return under various scenarios:
.. code-block:: bash
+-----------------------------------------+--------------------------+
| DICTMATCH ASSERTION INPUT | DictmatchAllResult |
+====================+====================+=========+================+
| Expected (LHS) | Actual (RHS) | .passed | match levels |
+--------------------+--------------------+---------+----------------+
| [{'id':0,'x':'a'}, | [{'id':0,'x':'a'}, | | [(0,MATCH), |
| {'id':1,'x':'b'}, | {'id':2,'x':'c'}, | True | (2,MATCH), |
| {'id':2,'x':'c'}] | {'id':1,'x':'b'}] | | (1,MATCH)] |
+--------------------+--------------------+---------+----------------+
| [{'id':0,'x':'a'}, | [{'id':0,'x':'a'}, | | [(0,MATCH), |
| {'id':1,'x':'b'}, | {'id':2,'x':'c'}, | False | (2,MATCH), |
| {'id':2,'x':'c'}] | {'id':1}] | | (1,MISMATCH)] |
+--------------------+--------------------+---------+----------------+
| [{'id':0,'x':'a'}, | [{'id':0,'x':'a'}, | | [(0,MATCH), |
| {'id':1,'x':'b'}, | {'id':3,'x':'d'}, | False | (3,LHS_NONE), |
| {'id':2,'x':'c'}] | {'id':1,'x':'b'}, | | (1,MATCH), |
| | {'id':2,'x':'c'}] | | (2,MATCH)] |
+--------------------+--------------------+---------+----------------+
| [{'id':0,'x':'a'}, | [{'id':0,'x':'a'}, | | [(0,MATCH), |
| {'id':1,'x':'b'}, | {'id':1,'x':'b'}, | False | (1,MATCH), |
| {'id':2,'x':'c'}, | {'id':3,'x':'d'}] | | (3,MATCH), |
| {'id':3,'x':'d'}] | | | (2,RHS_NONE)] |
+--------------------+--------------------+---------+----------------+
Indices are to be read as mappings from RHS values to LHS values.
i.e.:
[(1,..),(0,..),(2,..)]
maps: RHS:0 -> LHS:1, RHS:0 -> LHS:1, RHS:2 -> LHS:2.
"""
MATCH = 0
# pylint: disable=W0105
"""
Perfect match between identified and expected value.
If all index_match_levels are MATCH, then passed is ``True``.
"""
MISMATCH = 1
# pylint: disable=W0105
"""
The identified and expected values are matched with some errors.
If any entry in index_match_levels is MISMATCH, then passed is ``False``.
"""
LHS_NONE = 2
# pylint: disable=W0105
"""
A value is present on the right hand side but not matched with
a value on the left hand side. (e.g. an unexpected message).
If any entry in index_match_levels is LHS_NONE, then passed is ``False``.
"""
RHS_NONE = 3
# pylint: disable=W0105
"""
A value is present on the left hand side but not matched with
a value on the right hand side. (e.g. a missed message)
If any entry in index_match_levels is RHS_NONE, then passed is ``False``.
"""
def __init__(self, passed, index_match_levels):
"""
Constructs a new DictmatchAllResult object.
:param passed: Set to True if the assertion passed on
all its inputs, False otherwise
:type passed: ``bool``
:param index_match_levels: A list of mappings between
matched index and level of matching.
:type index_match_levels: ``list`` of
(``int``, ``MATCH``, ``MISMATCH``, ``LHS_NONE`` or ``RHS_NONE``)
tuples.
"""
self.passed = passed
self.index_match_levels = index_match_levels
def __bool__(self): # python 3 bool()
"""
:return: True if assertion passed, False otherwise
:rtype: ``bool``
"""
return self.passed
def __nonzero__(self): # python 2 bool()
"""
:return: True if assertion passed, False otherwise
:rtype: ``bool``
"""
return self.passed
def dictmatch_all_compat(
match_name, comparisons, values,
description, key_weightings,
):
"""This is being used for internal compatibility."""
matches = unordered_compare(
match_name=match_name,
values=values,
comparisons=comparisons,
description=description,
tag_weightings=key_weightings
)
all_passed = True
indices = []
levels = []
for mtch in matches:
# mtch['is_fix'] = is_fix
passed = mtch['passed']
cmp_indx = mtch['comparison_index']
indices.append(cmp_indx)
if passed:
level = DictmatchAllResult.MATCH
elif (cmp_indx < len(comparisons)) and (cmp_indx < len(values)):
level = DictmatchAllResult.MISMATCH
# (implicit) : and (cmp_indx >= len(values))
elif cmp_indx < len(comparisons):
level = DictmatchAllResult.RHS_NONE
# cmp_indx < len(values) and (cmp_indx >= len())
else:
level = DictmatchAllResult.LHS_NONE
levels.append(level)
if not passed:
all_passed = False
res = DictmatchAllResult(all_passed, zip(indices, levels))
return matches, res
|
from .base_renderer import *
import os
import re
import docutils.writers.html4css1
from docutils.core import publish_parts
from docutils.writers.html4css1 import Writer, HTMLTranslator
docutils_dir = os.path.dirname(docutils.writers.html4css1.__file__)
class GitHubHTMLTranslator(HTMLTranslator):
def visit_literal_block(self, node):
classes = node.attributes['classes']
if len(classes) >= 2 and classes[0] == 'code':
language = classes[1]
del classes[:]
self.body.append(self.starttag(node, 'pre', lang=language, CLASS='codehilite'))
else:
self.body.append(self.starttag(node, 'pre', CLASS='codehilite'))
@renderer
class RstRenderer(MarkupRenderer):
FILENAME_PATTERN_RE = re.compile(r'\.re?st$')
@classmethod
def is_enabled(cls, filename, syntax):
if syntax == "text.restructuredtext":
return True
return cls.FILENAME_PATTERN_RE.search(filename) is not None
def render(self, text, **kwargs):
settings_overrides = {
'cloak_email_addresses': True,
'file_insertion_enabled': False,
'raw_enabled': False,
'strip_comments': True,
'doctitle_xform': False,
'report_level': 5,
'syntax_highlight': 'short',
'math_output': 'latex',
'input_encoding': 'utf-8',
'output_encoding': 'utf-8',
'stylesheet_dirs': [os.path.normpath(os.path.join(docutils_dir, Writer.default_stylesheet))],
'template': os.path.normpath(os.path.join(docutils_dir, Writer.default_template))
}
writer = Writer()
writer.translator_class = GitHubHTMLTranslator
output = publish_parts(
text, writer=writer, settings_overrides=settings_overrides
)
if 'html_body' in output:
return output['html_body']
return ''
|
from django.contrib import admin
from .models import User
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = ('username', 'email', 'first_name', 'last_name','last_login')
serach_fields = ('username', 'email')
list_filter=('is_superuser',)
ordering = ('username',)
filter_horizontal = ("groups", "user_permissions")
fieldsets = (
('User', {'fields' : ('username', 'password')}),
('Persona Info' , {'fields' : ('first_name',
'last_name',
'email',
'avatar'
)}),
('Permissions' , {'fields' : ('is_active',
'is_staff',
'is_superuser',
'groups',
'user_permissions')}),
)
|
from datetime import datetime
from flask import Flask, request, render_template, send_from_directory, url_for, redirect, session, Response, jsonify
from flask_login import current_user, login_required, LoginManager, login_user, logout_user
from flask_wtf import FlaskForm
from flask_wtf.csrf import CSRFProtect, CSRFError
from scapy.utils import rdpcap
from wtforms import StringField, PasswordField, SubmitField, validators
import argparse
import io
import json
import logging
import os
import re
import shutil
import tempfile
import threading
import uuid
from looking_glass import app
from NmapQueryTool.lib.scan_data import ScanData
from looking_glass.lib.data_graph import DataGraph
from looking_glass.lib.tables import User
from looking_glass.lib.arp import parse_arp_data
from looking_glass.lib.internal_error import InternalError
application = app # Needed by Elastic Beanstalk / WSGI
# Flask WTF CSRF configuration
app.config['SECRET_KEY'] = os.urandom(32)
app.config['WTF_CSRF_TIME_LIMIT'] = None # Makes CSRF token valid for the duration of the session
app.config['TMP_UPLOAD_DIR'] = tempfile.mkdtemp()
csrf = CSRFProtect(app)
login_manager = LoginManager()
login_manager.login_view = 'login'
login_manager.init_app(app)
data_graph = DataGraph()
# How to run locally (note: do not use this in production):
#
# FLASK_APP=application.py flask run --host=0.0.0.0
#
class LoginForm(FlaskForm):
username = StringField('Username')
password = PasswordField('Password')
submit = SubmitField('Submit')
class RegistrationForm(FlaskForm):
username = StringField('Username', [validators.DataRequired()])
password = PasswordField('Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('Confirm', [validators.DataRequired()])
displayname = StringField('DisplayName')
submit = SubmitField('Submit')
# Static helper functions
def defaultWorkspaceName():
return current_user.get_username() + ": default"
def pcap_filename(username):
# Use timestamp to keep a record of when the PCAP was uploaded, but add a UUID in case the same user submits
# multiple PCAP files in the same second (which is unlikely, but technically possible)
return "%s-%s-%s.pcap" % (username, datetime.utcnow().strftime('%Y%m%dT%H%M%S'), str(uuid.uuid4()))
def create_node(node_ip):
return {
'id': str(uuid.uuid4()),
'ip': node_ip
}
# API Implementations
@login_manager.user_loader
def load_user(username):
return data_graph.load_user(username)
@app.teardown_request
def remove_session(ex=None):
data_graph.close_session()
if os.path.isdir(app.config['TMP_UPLOAD_DIR']):
shutil.rmtree(app.config['TMP_UPLOAD_DIR'])
@app.route('/', methods=['GET'])
def index():
if current_user.is_authenticated:
if session.get('workspace_id') is None:
session['workspace_id'] = data_graph.default_workspace_for_user(current_user.get_username()).id
session['workspaces'] = [ { 'name': w.name, 'id': w.id } for w in data_graph.workspaces_for_user(current_user.get_username()) ]
return render_template('infrastructure_graph.html')
else:
return render_template('login.html', form=LoginForm())
def ensure_workspace_is_initialized():
available_workspaces = data_graph.workspaces_for_user(current_user.get_username())
if available_workspaces is None or len(available_workspaces) == 0:
new_workspace = data_graph.create_workspace(current_user.get_username(), defaultWorkspaceName(), True)
session['workspace_id'] = new_workspace.id
else:
session['workspace_id'] = available_workspaces[0].id
# Reference: https://flask-login.readthedoces.io/en/latest
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
error = None
if form.validate_on_submit():
user = data_graph.load_user(form.username.data)
if not user is None and user.validate_password(form.password.data.encode('utf-8')):
login_user(user, remember=True)
ensure_workspace_is_initialized()
print('Logged in successfully')
return redirect(url_for('index'))
else:
error = 'Invalid credentials'
return render_template('login.html', form=form, error=error)
@app.route('/logout', methods=['GET', 'POST'])
@login_required
def logout():
logout_user()
session['workspace_id'] = None
return redirect(url_for('login'))
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm(request.form)
error = None
if request.method == 'POST':
if form.validate():
user = data_graph.load_user(form.username.data)
if user is None:
if form.password.data != form.confirm.data:
error = 'Passwords do not match'
else:
data_graph.create_user(form.username.data, form.password.data.encode('utf-8'))
print('User %s has been registered' % form.username.data)
return redirect(url_for('login'))
else:
error = 'User with username %s already exists' % form.username.data
else:
error = 'Form validation failed; passwords must match'
return render_template('register_user.html', form=form, error=error)
@app.route('/create_workspace', methods=['POST'])
@login_required
def create_workspace():
workspace_name = request.headers.get('user_id') + ': ' + request.json['workspace_name']
success = data_graph.create_workspace(request.headers.get('user_id'), workspace_name)
if success:
response = Response()
response.status_code = 200
return response
else:
response = Response('Unable to create workspace %s' % workspace_name)
response.status_code = 409 # Conflict error code
return response
@app.route('/delete_workspace', methods=['POST'])
@login_required
def delete_workspace():
workspace_id = request.headers.get('workspace_id')
success = data_graph.delete_workspace(request.headers.get('user_id'), workspace_id)
if success:
session.pop('workspace_id') # Remove the workspace_id value for the current session
ensure_workspace_is_initialized()
response = Response()
response.status_code = 200
return response
else:
response = Response('Unable to delete workspace (ID: %s)' % workspace_id)
response.status_code = 400
return response
@app.route('/share_workspace', methods=['POST'])
@login_required
def share_workspace():
workspace_id = request.headers.get('workspace_id')
success = data_graph.grant_workspace_access(request.headers.get('user_id'), workspace_id, request.json['authorized_user'])
if success:
response = Response()
response.status_code = 200
return response
else:
response = Response('Unable to share workspace (ID: %s)' % workspace_id)
response.status_code = 403 # Forbidden
return response
@app.route('/unshare_workspace', methods=['POST'])
@login_required
def unshare_workspace():
workspace_id = request.headers.get('workspace_id')
success = data_graph.revoke_workspace_access(request.headers.get('user_id'), workspace_id, request.json['unauthorized_user'])
if success:
response = Response()
response.status_code = 200
return response
else:
response = Response('Unable to revoke permissions for the %s user for workspace with ID: %s' % (request.json['unauthorized_user'], workspace_id))
response.status_code = 403 # Forbidden
return response
@app.route('/workspaces', methods=['GET'])
@login_required
def get_workspaces_for_user():
session['workspaces'] = [ { 'name': w.name, 'id': w.id } for w in data_graph.workspaces_for_user(current_user.get_username()) ]
return Response(json.dumps(session['workspaces']))
@app.route('/graph_data', methods=['GET'])
def get_graph_data():
try:
if current_user.is_authenticated:
graph_json = data_graph.current_graph_json(request.args.get('user_id'), request.args.get('workspace_id'))
if graph_json is None:
return Response('User not allowed to access the requested workspace', status=403)
else:
return json.dumps(graph_json)
else:
return redirect(url_for('login'))
except Exception as err:
print("[ERROR] %s" % err)
return Response(str(err), status=400)
@app.route('/upsert_node', methods=['POST'])
@login_required
def upsert_node():
data_graph.upsert_node(request.json, request.headers.get('user_id'), request.headers.get('workspace_id'))
return 'ok'
@app.route('/upsert_edge', methods=['POST'])
@login_required
def upsert_edge():
data_graph.upsert_edge(request.json, request.headers.get('user_id'), request.headers.get('workspace_id'))
return 'ok'
@app.route('/remove_node', methods=['POST'])
@login_required
def remove_node():
data_graph.remove_node(request.json, request.headers.get('user_id'), request.headers.get('workspace_id'))
return 'ok'
@app.route('/remove_edge', methods=['POST'])
@login_required
def remove_edge():
edge_data = request.json
data_graph.remove_edge(edge_data['from'], edge_data['to'], request.headers.get('user_id'), request.headers.get('workspace_id'))
return 'ok'
def merge_new_node_data(node, new_data):
node_updated = False
for key in new_data:
if not key in node:
node[key] = new_data[key]
node_updated = True
elif type(node[key]) is set:
node[key].add(new_data[key])
node_updated = True
elif node[key] != new_data[key]:
print("Existing value for key '%s' exists; creating set of previous value (%s) and new value (%s)" % (key, node[key], new_data[key]))
curr_val = node[key]
# TODO: Make sure this gets visualized correctly (need test NMAP data for this)
node[key] = { curr_val, new_data[key] } # Create a value set
node_updated = True
return { 'node_updated': node_updated, 'node': node }
@app.route('/upload_nmap_data', methods=['POST'])
@login_required
def upload_nmap_data():
nmap_data = request.json
username = request.headers.get('user_id')
session['workspace_id'] = request.headers.get('workspace_id')
data = ScanData.create_from_nmap_data(io.StringIO(nmap_data))
for host in data.host_data_list():
node = data_graph.get_node_by_ip(host.ip, username, session['workspace_id'])
node_updated = False
if node == None:
node_updated = True
node = create_node(host.ip)
host_dict = host.as_dict()
if 'os_list' in host_dict:
windows_os_pattern = re.compile('.*[Ww]indows.*')
linux_os_pattern = re.compile('.*[Ll]inux.*')
if any([ windows_os_pattern.match(x) for x in host_dict['os_list'] ]):
node['group'] = 'windows_host'
elif any([ linux_os_pattern.match(x) for x in host_dict['os_list'] ]):
node['group'] = 'linux_host'
results = merge_new_node_data(node, host_dict)
node_updated |= results['node_updated']
node = results['node']
if node_updated:
data_graph.upsert_node(node, username, session['workspace_id'])
return 'ok'
# Reference: https://flask.palletsprojects.com/en/1.1.x/patterns/apierrors/
@app.errorhandler(InternalError)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route('/upload_arp_data', methods=['POST'])
@login_required
def upload_arp_data():
username = request.headers.get('user_id')
session['workspace_id'] = request.headers.get('workspace_id')
arp_records = parse_arp_data(request.json)
# TODO: Figure how to identify switch in ARP records and use that to connect all records
for arp_record in arp_records:
node = data_graph.get_node_by_ip(arp_record.ip_address, username, session['workspace_id'])
if node == None:
node = create_node(arp_record.ip_address)
data_graph.upsert_node(node, username, session['workspace_id'])
data_graph.upsert_network_interface(arp_record, node['id'], username, session['workspace_id'])
return 'ok'
# Reference: http://flask.pocoo.org/docs/1.0/patterns/fileuploads/
@app.route('/upload_pcap_data', methods=['POST'])
@login_required
def upload_pcap_data():
username = request.headers.get('user_id')
session['workspace_id'] = request.headers.get('workspace_id')
# TODO: Enforce max size limit on files (also add check on client side)
directory_path = os.path.join(app.config['TMP_UPLOAD_DIR'], username, 'pcaps')
if not os.path.exists(directory_path):
os.makedirs(directory_path)
file_path = os.path.join(directory_path, pcap_filename(username))
with open(file_path, 'w') as f:
f.write(request.data)
# TODO: Implement functionality for parsing the PCAP file and pulling information into the graph
return 'ok'
# Expected data format:
#
# 192.168.0.1 172.3.4.35
# 10.3.4.5 9.2.34.5
#
@app.route('/upload_net_flow_data', methods=['POST'])
@login_required
def upload_net_flow():
username = request.headers.get('user_id')
session['workspace_id'] = request.headers.get('workspace_id')
error = None
for line in request.data.decode('utf-8').replace('\\n', '\n').split("\n"):
stripped_line = line.strip().replace('\"', '').replace('\'', '')
if len(stripped_line) == 0:
continue
addresses = stripped_line.split(' ')
if len(addresses) != 2:
response = Response("Provided net flow data did not have the expected format; expect each line to be of the form '<Source IP Address> <Destination IP Address>'")
response.status_code = 400 # Bad request
return response
src_ip = addresses[0].strip()
dst_ip = addresses[1].strip()
src_node = data_graph.get_node_by_ip(src_ip, username, session['workspace_id'])
dst_node = data_graph.get_node_by_ip(dst_ip, username, session['workspace_id'])
new_edge = False
if src_node == None:
src_node = create_node(src_ip)
data_graph.upsert_node(src_node, username, session['workspace_id'])
new_edge = True
if dst_node == None:
dst_node = create_node(dst_ip)
data_graph.upsert_node(dst_node, username, session['workspace_id'])
new_edge = True
if new_edge or not data_graph.does_edge_exist(src_ip, dst_ip, username, session['workspace_id']):
edge = { 'from': src_node['id'], 'to': dst_node['id'] }
data_graph.add_edge(edge, username, session['workspace_id'])
return 'ok'
@app.errorhandler(CSRFError)
def handle_csrf_error(e):
return render_template('csrf_error.html', reason=e.description), 400
# TODO: Also add support for importing SiLK NetFlow data (can convert PCAP's using the rwp2yaf2silk tool
# For running locally in debug mode
if __name__ == '__main__':
verbose = False
parser = argparse.ArgumentParser(description='Development mode command line options')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
if not args.verbose:
# Silence Flask server logging
log = logging.getLogger('werkzeug')
log.disabled = True
app.logger.disabled = True
# host='0.0.0.0' enables connections using the local network interface
app.run(host='0.0.0.0', threaded=True)
|
"""
Passed a filename that contains lines output by [TSPRegistry sharedRegistry] in lldb/Xcode:
182 -> 0x10f536100 KN.CommandSlideResetMasterBackgroundObjectsArchive
181 -> 0x10f5362e0 KN.ActionGhostSelectionTransformerArchive
...
...this script will print a sorted JSON object definition of that mapping from class
definition ID to Protobuf message type name. This is some of my hackiest code. Please
don't use this for anything important. It's mostly here for next time I need it.
"""
import sys
import json
def parse_proto_mapping(input_file):
split = [x.strip().split(" -> ") for x in open(input_file).split("\n")]
print(
json.dumps(
dict(sorted([(int(a), b.split(" ")[-1]) for a, b in split if 'null' not in b])),
indent=2,
)
)
if __name__ == "__main__":
parse_proto_mapping(sys.argv[-1])
|
"""
Simple spider to demonstrate how to use the XLSX exporter.
This spider produces the following output:
+-----+----+-----+
| foo | 42 | bar |
+-----+----+-----+
"""
from scrapy import Spider
from scrapy_xlsx import XlsxItemExporter
from ..items import ExampleItem
class CustomExporter(XlsxItemExporter):
def __init__(self, file, **kwargs):
super().__init__(file, include_header_row=False, **kwargs)
class Example2Spider(Spider):
name = "example2"
allowed_domains = ["example.com"]
start_urls = ["http://example.com/"]
custom_settings = {
"FEED_EXPORTERS": {"xlsx": "example.spiders.example2.CustomExporter"}
}
def parse(self, response):
return ExampleItem(a="foo", b=42, c="bar")
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetBotResult',
'AwaitableGetBotResult',
'get_bot',
'get_bot_output',
]
@pulumi.output_type
class GetBotResult:
def __init__(__self__, arn=None, data_privacy=None, description=None, id=None, idle_session_ttl_in_seconds=None, name=None, role_arn=None, test_bot_alias_settings=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if data_privacy and not isinstance(data_privacy, dict):
raise TypeError("Expected argument 'data_privacy' to be a dict")
pulumi.set(__self__, "data_privacy", data_privacy)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if idle_session_ttl_in_seconds and not isinstance(idle_session_ttl_in_seconds, int):
raise TypeError("Expected argument 'idle_session_ttl_in_seconds' to be a int")
pulumi.set(__self__, "idle_session_ttl_in_seconds", idle_session_ttl_in_seconds)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if role_arn and not isinstance(role_arn, str):
raise TypeError("Expected argument 'role_arn' to be a str")
pulumi.set(__self__, "role_arn", role_arn)
if test_bot_alias_settings and not isinstance(test_bot_alias_settings, dict):
raise TypeError("Expected argument 'test_bot_alias_settings' to be a dict")
pulumi.set(__self__, "test_bot_alias_settings", test_bot_alias_settings)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="dataPrivacy")
def data_privacy(self) -> Optional['outputs.DataPrivacyProperties']:
"""
Data privacy setting of the Bot.
"""
return pulumi.get(self, "data_privacy")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleSessionTTLInSeconds")
def idle_session_ttl_in_seconds(self) -> Optional[int]:
"""
IdleSessionTTLInSeconds of the resource
"""
return pulumi.get(self, "idle_session_ttl_in_seconds")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[str]:
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="testBotAliasSettings")
def test_bot_alias_settings(self) -> Optional['outputs.BotTestBotAliasSettings']:
return pulumi.get(self, "test_bot_alias_settings")
class AwaitableGetBotResult(GetBotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBotResult(
arn=self.arn,
data_privacy=self.data_privacy,
description=self.description,
id=self.id,
idle_session_ttl_in_seconds=self.idle_session_ttl_in_seconds,
name=self.name,
role_arn=self.role_arn,
test_bot_alias_settings=self.test_bot_alias_settings)
def get_bot(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBotResult:
"""
Amazon Lex conversational bot performing automated tasks such as ordering a pizza, booking a hotel, and so on.
"""
__args__ = dict()
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:lex:getBot', __args__, opts=opts, typ=GetBotResult).value
return AwaitableGetBotResult(
arn=__ret__.arn,
data_privacy=__ret__.data_privacy,
description=__ret__.description,
id=__ret__.id,
idle_session_ttl_in_seconds=__ret__.idle_session_ttl_in_seconds,
name=__ret__.name,
role_arn=__ret__.role_arn,
test_bot_alias_settings=__ret__.test_bot_alias_settings)
@_utilities.lift_output_func(get_bot)
def get_bot_output(id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBotResult]:
"""
Amazon Lex conversational bot performing automated tasks such as ordering a pizza, booking a hotel, and so on.
"""
...
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'c:\Users\Kula\Petrobras\OpenPulse\data\user_input\ui\Model\Info\getGroupInformationPerforatedPlate.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(401, 659)
Dialog.setMinimumSize(QtCore.QSize(0, 0))
Dialog.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.frame = QtWidgets.QFrame(Dialog)
self.frame.setGeometry(QtCore.QRect(0, 0, 401, 39))
self.frame.setMinimumSize(QtCore.QSize(0, 0))
self.frame.setMaximumSize(QtCore.QSize(450, 16777215))
self.frame.setFrameShape(QtWidgets.QFrame.Box)
self.frame.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame.setLineWidth(1)
self.frame.setObjectName("frame")
self.title_label = QtWidgets.QLabel(self.frame)
self.title_label.setGeometry(QtCore.QRect(2, 4, 401, 33))
self.title_label.setMinimumSize(QtCore.QSize(0, 0))
self.title_label.setMaximumSize(QtCore.QSize(446, 16777215))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.title_label.setFont(font)
self.title_label.setTextFormat(QtCore.Qt.AutoText)
self.title_label.setAlignment(QtCore.Qt.AlignCenter)
self.title_label.setObjectName("title_label")
self.frame_2 = QtWidgets.QFrame(Dialog)
self.frame_2.setGeometry(QtCore.QRect(0, 38, 401, 621))
self.frame_2.setMinimumSize(QtCore.QSize(0, 0))
self.frame_2.setMaximumSize(QtCore.QSize(450, 16777215))
self.frame_2.setFrameShape(QtWidgets.QFrame.Box)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_2.setObjectName("frame_2")
self.treeWidget_group_info = QtWidgets.QTreeWidget(self.frame_2)
self.treeWidget_group_info.setGeometry(QtCore.QRect(8, 16, 381, 51))
self.treeWidget_group_info.setMinimumSize(QtCore.QSize(0, 0))
self.treeWidget_group_info.setMaximumSize(QtCore.QSize(402, 430))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.treeWidget_group_info.setFont(font)
self.treeWidget_group_info.setTextElideMode(QtCore.Qt.ElideRight)
self.treeWidget_group_info.setIndentation(0)
self.treeWidget_group_info.setUniformRowHeights(False)
self.treeWidget_group_info.setAnimated(False)
self.treeWidget_group_info.setAllColumnsShowFocus(False)
self.treeWidget_group_info.setHeaderHidden(False)
self.treeWidget_group_info.setColumnCount(1)
self.treeWidget_group_info.setObjectName("treeWidget_group_info")
self.treeWidget_group_info.headerItem().setTextAlignment(0, QtCore.Qt.AlignCenter)
self.treeWidget_group_info.header().setVisible(True)
self.gridLayoutWidget = QtWidgets.QWidget(self.frame_2)
self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 70, 381, 162))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setVerticalSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout_7 = QtWidgets.QVBoxLayout()
self.verticalLayout_7.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.Label_dh = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Label_dh.sizePolicy().hasHeightForWidth())
self.Label_dh.setSizePolicy(sizePolicy)
self.Label_dh.setMinimumSize(QtCore.QSize(140, 35))
self.Label_dh.setMaximumSize(QtCore.QSize(140, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.Label_dh.setFont(font)
self.Label_dh.setAlignment(QtCore.Qt.AlignCenter)
self.Label_dh.setObjectName("Label_dh")
self.verticalLayout_7.addWidget(self.Label_dh)
self.Label_tp = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Label_tp.sizePolicy().hasHeightForWidth())
self.Label_tp.setSizePolicy(sizePolicy)
self.Label_tp.setMinimumSize(QtCore.QSize(140, 35))
self.Label_tp.setMaximumSize(QtCore.QSize(140, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.Label_tp.setFont(font)
self.Label_tp.setAlignment(QtCore.Qt.AlignCenter)
self.Label_tp.setObjectName("Label_tp")
self.verticalLayout_7.addWidget(self.Label_tp)
self.Label_phi = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Label_phi.sizePolicy().hasHeightForWidth())
self.Label_phi.setSizePolicy(sizePolicy)
self.Label_phi.setMinimumSize(QtCore.QSize(140, 35))
self.Label_phi.setMaximumSize(QtCore.QSize(140, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.Label_phi.setFont(font)
self.Label_phi.setAlignment(QtCore.Qt.AlignCenter)
self.Label_phi.setObjectName("Label_phi")
self.verticalLayout_7.addWidget(self.Label_phi)
self.Label_sigma = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Label_sigma.sizePolicy().hasHeightForWidth())
self.Label_sigma.setSizePolicy(sizePolicy)
self.Label_sigma.setMinimumSize(QtCore.QSize(140, 35))
self.Label_sigma.setMaximumSize(QtCore.QSize(140, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.Label_sigma.setFont(font)
self.Label_sigma.setAlignment(QtCore.Qt.AlignCenter)
self.Label_sigma.setObjectName("Label_sigma")
self.verticalLayout_7.addWidget(self.Label_sigma)
self.gridLayout.addLayout(self.verticalLayout_7, 0, 1, 1, 1)
self.verticalLayout_8 = QtWidgets.QVBoxLayout()
self.verticalLayout_8.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.label_17 = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_17.sizePolicy().hasHeightForWidth())
self.label_17.setSizePolicy(sizePolicy)
self.label_17.setMinimumSize(QtCore.QSize(40, 35))
self.label_17.setMaximumSize(QtCore.QSize(40, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_17.setFont(font)
self.label_17.setAlignment(QtCore.Qt.AlignCenter)
self.label_17.setObjectName("label_17")
self.verticalLayout_8.addWidget(self.label_17)
self.label_14 = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_14.sizePolicy().hasHeightForWidth())
self.label_14.setSizePolicy(sizePolicy)
self.label_14.setMinimumSize(QtCore.QSize(40, 35))
self.label_14.setMaximumSize(QtCore.QSize(40, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_14.setFont(font)
self.label_14.setAlignment(QtCore.Qt.AlignCenter)
self.label_14.setObjectName("label_14")
self.verticalLayout_8.addWidget(self.label_14)
self.label_15 = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_15.sizePolicy().hasHeightForWidth())
self.label_15.setSizePolicy(sizePolicy)
self.label_15.setMinimumSize(QtCore.QSize(40, 35))
self.label_15.setMaximumSize(QtCore.QSize(40, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_15.setFont(font)
self.label_15.setAlignment(QtCore.Qt.AlignCenter)
self.label_15.setObjectName("label_15")
self.verticalLayout_8.addWidget(self.label_15)
self.label_16 = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_16.sizePolicy().hasHeightForWidth())
self.label_16.setSizePolicy(sizePolicy)
self.label_16.setMinimumSize(QtCore.QSize(40, 35))
self.label_16.setMaximumSize(QtCore.QSize(40, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_16.setFont(font)
self.label_16.setAlignment(QtCore.Qt.AlignCenter)
self.label_16.setObjectName("label_16")
self.verticalLayout_8.addWidget(self.label_16)
self.gridLayout.addLayout(self.verticalLayout_8, 0, 2, 1, 1)
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_6.setContentsMargins(0, -1, 0, -1)
self.verticalLayout_6.setSpacing(0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.label_HoleDiameter = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_HoleDiameter.sizePolicy().hasHeightForWidth())
self.label_HoleDiameter.setSizePolicy(sizePolicy)
self.label_HoleDiameter.setMinimumSize(QtCore.QSize(160, 35))
self.label_HoleDiameter.setMaximumSize(QtCore.QSize(160, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_HoleDiameter.setFont(font)
self.label_HoleDiameter.setAlignment(QtCore.Qt.AlignCenter)
self.label_HoleDiameter.setObjectName("label_HoleDiameter")
self.verticalLayout_6.addWidget(self.label_HoleDiameter)
self.label_thickness = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_thickness.sizePolicy().hasHeightForWidth())
self.label_thickness.setSizePolicy(sizePolicy)
self.label_thickness.setMinimumSize(QtCore.QSize(160, 35))
self.label_thickness.setMaximumSize(QtCore.QSize(160, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_thickness.setFont(font)
self.label_thickness.setAlignment(QtCore.Qt.AlignCenter)
self.label_thickness.setObjectName("label_thickness")
self.verticalLayout_6.addWidget(self.label_thickness)
self.label_porosity = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_porosity.sizePolicy().hasHeightForWidth())
self.label_porosity.setSizePolicy(sizePolicy)
self.label_porosity.setMinimumSize(QtCore.QSize(160, 35))
self.label_porosity.setMaximumSize(QtCore.QSize(160, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_porosity.setFont(font)
self.label_porosity.setAlignment(QtCore.Qt.AlignCenter)
self.label_porosity.setObjectName("label_porosity")
self.verticalLayout_6.addWidget(self.label_porosity)
self.label_discharge = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_discharge.sizePolicy().hasHeightForWidth())
self.label_discharge.setSizePolicy(sizePolicy)
self.label_discharge.setMinimumSize(QtCore.QSize(160, 35))
self.label_discharge.setMaximumSize(QtCore.QSize(160, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_discharge.setFont(font)
self.label_discharge.setAlignment(QtCore.Qt.AlignCenter)
self.label_discharge.setObjectName("label_discharge")
self.verticalLayout_6.addWidget(self.label_discharge)
self.gridLayout.addLayout(self.verticalLayout_6, 0, 0, 1, 1)
self.gridLayoutWidget_6 = QtWidgets.QWidget(self.frame_2)
self.gridLayoutWidget_6.setGeometry(QtCore.QRect(10, 390, 381, 80))
self.gridLayoutWidget_6.setObjectName("gridLayoutWidget_6")
self.gridLayout_6 = QtWidgets.QGridLayout(self.gridLayoutWidget_6)
self.gridLayout_6.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.gridLayout_6.setContentsMargins(0, 0, 0, 0)
self.gridLayout_6.setVerticalSpacing(0)
self.gridLayout_6.setObjectName("gridLayout_6")
self.verticalLayout_19 = QtWidgets.QVBoxLayout()
self.verticalLayout_19.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_19.setObjectName("verticalLayout_19")
self.label_22 = QtWidgets.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_22.sizePolicy().hasHeightForWidth())
self.label_22.setSizePolicy(sizePolicy)
self.label_22.setMinimumSize(QtCore.QSize(40, 35))
self.label_22.setMaximumSize(QtCore.QSize(40, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_22.setFont(font)
self.label_22.setText("")
self.label_22.setAlignment(QtCore.Qt.AlignCenter)
self.label_22.setObjectName("label_22")
self.verticalLayout_19.addWidget(self.label_22)
self.label_28 = QtWidgets.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_28.sizePolicy().hasHeightForWidth())
self.label_28.setSizePolicy(sizePolicy)
self.label_28.setMinimumSize(QtCore.QSize(40, 35))
self.label_28.setMaximumSize(QtCore.QSize(40, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_28.setFont(font)
self.label_28.setAlignment(QtCore.Qt.AlignCenter)
self.label_28.setObjectName("label_28")
self.verticalLayout_19.addWidget(self.label_28)
self.gridLayout_6.addLayout(self.verticalLayout_19, 0, 2, 1, 1)
self.verticalLayout_18 = QtWidgets.QVBoxLayout()
self.verticalLayout_18.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_18.setContentsMargins(6, -1, 6, -1)
self.verticalLayout_18.setSpacing(6)
self.verticalLayout_18.setObjectName("verticalLayout_18")
self.label_correction_2 = QtWidgets.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_correction_2.sizePolicy().hasHeightForWidth())
self.label_correction_2.setSizePolicy(sizePolicy)
self.label_correction_2.setMinimumSize(QtCore.QSize(160, 35))
self.label_correction_2.setMaximumSize(QtCore.QSize(160, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_correction_2.setFont(font)
self.label_correction_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_correction_2.setObjectName("label_correction_2")
self.verticalLayout_18.addWidget(self.label_correction_2)
self.label_bias = QtWidgets.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_bias.sizePolicy().hasHeightForWidth())
self.label_bias.setSizePolicy(sizePolicy)
self.label_bias.setMinimumSize(QtCore.QSize(160, 35))
self.label_bias.setMaximumSize(QtCore.QSize(160, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_bias.setFont(font)
self.label_bias.setAlignment(QtCore.Qt.AlignCenter)
self.label_bias.setObjectName("label_bias")
self.verticalLayout_18.addWidget(self.label_bias)
self.gridLayout_6.addLayout(self.verticalLayout_18, 0, 0, 1, 1)
self.verticalLayout_17 = QtWidgets.QVBoxLayout()
self.verticalLayout_17.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.verticalLayout_17.setObjectName("verticalLayout_17")
self.Label_bias_effects = QtWidgets.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Label_bias_effects.sizePolicy().hasHeightForWidth())
self.Label_bias_effects.setSizePolicy(sizePolicy)
self.Label_bias_effects.setMinimumSize(QtCore.QSize(140, 35))
self.Label_bias_effects.setMaximumSize(QtCore.QSize(140, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.Label_bias_effects.setFont(font)
self.Label_bias_effects.setAlignment(QtCore.Qt.AlignCenter)
self.Label_bias_effects.setObjectName("Label_bias_effects")
self.verticalLayout_17.addWidget(self.Label_bias_effects)
self.Label_bias_coefficient = QtWidgets.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Label_bias_coefficient.sizePolicy().hasHeightForWidth())
self.Label_bias_coefficient.setSizePolicy(sizePolicy)
self.Label_bias_coefficient.setMinimumSize(QtCore.QSize(140, 35))
self.Label_bias_coefficient.setMaximumSize(QtCore.QSize(140, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.Label_bias_coefficient.setFont(font)
self.Label_bias_coefficient.setAlignment(QtCore.Qt.AlignCenter)
self.Label_bias_coefficient.setObjectName("Label_bias_coefficient")
self.verticalLayout_17.addWidget(self.Label_bias_coefficient)
self.gridLayout_6.addLayout(self.verticalLayout_17, 0, 1, 1, 1)
self.gridLayoutWidget_5 = QtWidgets.QWidget(self.frame_2)
self.gridLayoutWidget_5.setGeometry(QtCore.QRect(10, 250, 381, 121))
self.gridLayoutWidget_5.setObjectName("gridLayoutWidget_5")
self.gridLayout_5 = QtWidgets.QGridLayout(self.gridLayoutWidget_5)
self.gridLayout_5.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.gridLayout_5.setContentsMargins(0, 0, 0, 0)
self.gridLayout_5.setVerticalSpacing(0)
self.gridLayout_5.setObjectName("gridLayout_5")
self.verticalLayout_12 = QtWidgets.QVBoxLayout()
self.verticalLayout_12.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.Label_nl_effects = QtWidgets.QLabel(self.gridLayoutWidget_5)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Label_nl_effects.sizePolicy().hasHeightForWidth())
self.Label_nl_effects.setSizePolicy(sizePolicy)
self.Label_nl_effects.setMinimumSize(QtCore.QSize(140, 35))
self.Label_nl_effects.setMaximumSize(QtCore.QSize(140, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.Label_nl_effects.setFont(font)
self.Label_nl_effects.setAlignment(QtCore.Qt.AlignCenter)
self.Label_nl_effects.setObjectName("Label_nl_effects")
self.verticalLayout_12.addWidget(self.Label_nl_effects)
self.Label_nl_sigma = QtWidgets.QLabel(self.gridLayoutWidget_5)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Label_nl_sigma.sizePolicy().hasHeightForWidth())
self.Label_nl_sigma.setSizePolicy(sizePolicy)
self.Label_nl_sigma.setMinimumSize(QtCore.QSize(140, 35))
self.Label_nl_sigma.setMaximumSize(QtCore.QSize(140, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.Label_nl_sigma.setFont(font)
self.Label_nl_sigma.setAlignment(QtCore.Qt.AlignCenter)
self.Label_nl_sigma.setObjectName("Label_nl_sigma")
self.verticalLayout_12.addWidget(self.Label_nl_sigma)
self.Label_correction = QtWidgets.QLabel(self.gridLayoutWidget_5)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Label_correction.sizePolicy().hasHeightForWidth())
self.Label_correction.setSizePolicy(sizePolicy)
self.Label_correction.setMinimumSize(QtCore.QSize(140, 35))
self.Label_correction.setMaximumSize(QtCore.QSize(140, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.Label_correction.setFont(font)
self.Label_correction.setAlignment(QtCore.Qt.AlignCenter)
self.Label_correction.setObjectName("Label_correction")
self.verticalLayout_12.addWidget(self.Label_correction)
self.gridLayout_5.addLayout(self.verticalLayout_12, 0, 1, 1, 1)
self.verticalLayout_13 = QtWidgets.QVBoxLayout()
self.verticalLayout_13.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_13.setContentsMargins(6, -1, 6, -1)
self.verticalLayout_13.setSpacing(6)
self.verticalLayout_13.setObjectName("verticalLayout_13")
self.label_porosity_2 = QtWidgets.QLabel(self.gridLayoutWidget_5)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_porosity_2.sizePolicy().hasHeightForWidth())
self.label_porosity_2.setSizePolicy(sizePolicy)
self.label_porosity_2.setMinimumSize(QtCore.QSize(160, 35))
self.label_porosity_2.setMaximumSize(QtCore.QSize(160, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_porosity_2.setFont(font)
self.label_porosity_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_porosity_2.setObjectName("label_porosity_2")
self.verticalLayout_13.addWidget(self.label_porosity_2)
self.label_nonlinDischarge_2 = QtWidgets.QLabel(self.gridLayoutWidget_5)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_nonlinDischarge_2.sizePolicy().hasHeightForWidth())
self.label_nonlinDischarge_2.setSizePolicy(sizePolicy)
self.label_nonlinDischarge_2.setMinimumSize(QtCore.QSize(160, 35))
self.label_nonlinDischarge_2.setMaximumSize(QtCore.QSize(160, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_nonlinDischarge_2.setFont(font)
self.label_nonlinDischarge_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_nonlinDischarge_2.setObjectName("label_nonlinDischarge_2")
self.verticalLayout_13.addWidget(self.label_nonlinDischarge_2)
self.label_bias_3 = QtWidgets.QLabel(self.gridLayoutWidget_5)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_bias_3.sizePolicy().hasHeightForWidth())
self.label_bias_3.setSizePolicy(sizePolicy)
self.label_bias_3.setMinimumSize(QtCore.QSize(160, 35))
self.label_bias_3.setMaximumSize(QtCore.QSize(160, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_bias_3.setFont(font)
self.label_bias_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_bias_3.setObjectName("label_bias_3")
self.verticalLayout_13.addWidget(self.label_bias_3)
self.gridLayout_5.addLayout(self.verticalLayout_13, 0, 0, 1, 1)
self.verticalLayout_14 = QtWidgets.QVBoxLayout()
self.verticalLayout_14.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_14.setObjectName("verticalLayout_14")
self.label_30 = QtWidgets.QLabel(self.gridLayoutWidget_5)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_30.sizePolicy().hasHeightForWidth())
self.label_30.setSizePolicy(sizePolicy)
self.label_30.setMinimumSize(QtCore.QSize(40, 35))
self.label_30.setMaximumSize(QtCore.QSize(40, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_30.setFont(font)
self.label_30.setText("")
self.label_30.setAlignment(QtCore.Qt.AlignCenter)
self.label_30.setObjectName("label_30")
self.verticalLayout_14.addWidget(self.label_30)
self.label_18 = QtWidgets.QLabel(self.gridLayoutWidget_5)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_18.sizePolicy().hasHeightForWidth())
self.label_18.setSizePolicy(sizePolicy)
self.label_18.setMinimumSize(QtCore.QSize(40, 35))
self.label_18.setMaximumSize(QtCore.QSize(40, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_18.setFont(font)
self.label_18.setAlignment(QtCore.Qt.AlignCenter)
self.label_18.setObjectName("label_18")
self.verticalLayout_14.addWidget(self.label_18)
self.label_19 = QtWidgets.QLabel(self.gridLayoutWidget_5)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_19.sizePolicy().hasHeightForWidth())
self.label_19.setSizePolicy(sizePolicy)
self.label_19.setMinimumSize(QtCore.QSize(40, 35))
self.label_19.setMaximumSize(QtCore.QSize(40, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_19.setFont(font)
self.label_19.setAlignment(QtCore.Qt.AlignCenter)
self.label_19.setObjectName("label_19")
self.verticalLayout_14.addWidget(self.label_19)
self.gridLayout_5.addLayout(self.verticalLayout_14, 0, 2, 1, 1)
self.line = QtWidgets.QFrame(self.frame_2)
self.line.setGeometry(QtCore.QRect(0, 230, 401, 20))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.line_2 = QtWidgets.QFrame(self.frame_2)
self.line_2.setGeometry(QtCore.QRect(0, 370, 401, 20))
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.pushButton_close = QtWidgets.QPushButton(self.frame_2)
self.pushButton_close.setGeometry(QtCore.QRect(150, 570, 102, 32))
self.pushButton_close.setMinimumSize(QtCore.QSize(102, 32))
self.pushButton_close.setMaximumSize(QtCore.QSize(102, 32))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(14)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.pushButton_close.setFont(font)
self.pushButton_close.setObjectName("pushButton_close")
self.gridLayoutWidget_7 = QtWidgets.QWidget(self.frame_2)
self.gridLayoutWidget_7.setGeometry(QtCore.QRect(10, 490, 381, 41))
self.gridLayoutWidget_7.setObjectName("gridLayoutWidget_7")
self.gridLayout_7 = QtWidgets.QGridLayout(self.gridLayoutWidget_7)
self.gridLayout_7.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.gridLayout_7.setContentsMargins(0, 0, 0, 0)
self.gridLayout_7.setVerticalSpacing(0)
self.gridLayout_7.setObjectName("gridLayout_7")
self.verticalLayout_20 = QtWidgets.QVBoxLayout()
self.verticalLayout_20.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_20.setObjectName("verticalLayout_20")
self.label_29 = QtWidgets.QLabel(self.gridLayoutWidget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_29.sizePolicy().hasHeightForWidth())
self.label_29.setSizePolicy(sizePolicy)
self.label_29.setMinimumSize(QtCore.QSize(40, 35))
self.label_29.setMaximumSize(QtCore.QSize(40, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_29.setFont(font)
self.label_29.setAlignment(QtCore.Qt.AlignCenter)
self.label_29.setObjectName("label_29")
self.verticalLayout_20.addWidget(self.label_29)
self.gridLayout_7.addLayout(self.verticalLayout_20, 0, 2, 1, 1)
self.verticalLayout_21 = QtWidgets.QVBoxLayout()
self.verticalLayout_21.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_21.setContentsMargins(6, -1, 6, -1)
self.verticalLayout_21.setSpacing(6)
self.verticalLayout_21.setObjectName("verticalLayout_21")
self.label_bias_2 = QtWidgets.QLabel(self.gridLayoutWidget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_bias_2.sizePolicy().hasHeightForWidth())
self.label_bias_2.setSizePolicy(sizePolicy)
self.label_bias_2.setMinimumSize(QtCore.QSize(160, 35))
self.label_bias_2.setMaximumSize(QtCore.QSize(160, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_bias_2.setFont(font)
self.label_bias_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_bias_2.setObjectName("label_bias_2")
self.verticalLayout_21.addWidget(self.label_bias_2)
self.gridLayout_7.addLayout(self.verticalLayout_21, 0, 0, 1, 1)
self.verticalLayout_22 = QtWidgets.QVBoxLayout()
self.verticalLayout_22.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.verticalLayout_22.setObjectName("verticalLayout_22")
self.Label_dimensionless_impedance = QtWidgets.QLabel(self.gridLayoutWidget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Label_dimensionless_impedance.sizePolicy().hasHeightForWidth())
self.Label_dimensionless_impedance.setSizePolicy(sizePolicy)
self.Label_dimensionless_impedance.setMinimumSize(QtCore.QSize(140, 35))
self.Label_dimensionless_impedance.setMaximumSize(QtCore.QSize(140, 35))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.Label_dimensionless_impedance.setFont(font)
self.Label_dimensionless_impedance.setAlignment(QtCore.Qt.AlignCenter)
self.Label_dimensionless_impedance.setObjectName("Label_dimensionless_impedance")
self.verticalLayout_22.addWidget(self.Label_dimensionless_impedance)
self.gridLayout_7.addLayout(self.verticalLayout_22, 0, 1, 1, 1)
self.line_3 = QtWidgets.QFrame(self.frame_2)
self.line_3.setGeometry(QtCore.QRect(0, 470, 401, 20))
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Information of selected group"))
self.title_label.setText(_translate("Dialog", "<html><head/><body><p align=\"center\"><span style=\" font-size:12pt;\">Information of selected group</span></p></body></html>"))
self.treeWidget_group_info.headerItem().setText(0, _translate("Dialog", "Elements"))
self.Label_dh.setText(_translate("Dialog", "Label_dh"))
self.Label_tp.setText(_translate("Dialog", "Label_tp"))
self.Label_phi.setText(_translate("Dialog", "Label_phi"))
self.Label_sigma.setText(_translate("Dialog", "Label_sigma"))
self.label_17.setText(_translate("Dialog", "[m]"))
self.label_14.setText(_translate("Dialog", "[m]"))
self.label_15.setText(_translate("Dialog", "[-]"))
self.label_16.setText(_translate("Dialog", "[-]"))
self.label_HoleDiameter.setText(_translate("Dialog", "Hole diameter:"))
self.label_thickness.setText(_translate("Dialog", "Plate thickness:"))
self.label_porosity.setText(_translate("Dialog", "Area porosity:"))
self.label_discharge.setText(_translate("Dialog", "Discharge coefficient:"))
self.label_28.setText(_translate("Dialog", "[-]"))
self.label_correction_2.setText(_translate("Dialog", "Bias flow effects:"))
self.label_bias.setText(_translate("Dialog", "Bias flow \n"
" coefficient:"))
self.Label_bias_effects.setText(_translate("Dialog", "Label_gf_effects"))
self.Label_bias_coefficient.setText(_translate("Dialog", "Label_gf_coefficient"))
self.Label_nl_effects.setText(_translate("Dialog", "Label_nl_effects"))
self.Label_nl_sigma.setText(_translate("Dialog", "Label_nl_sigma"))
self.Label_correction.setText(_translate("Dialog", "Label_correction"))
self.label_porosity_2.setText(_translate("Dialog", " Nonlinear effects:"))
self.label_nonlinDischarge_2.setText(_translate("Dialog", "Nonlinear \n"
"discharge coefficient:"))
self.label_bias_3.setText(_translate("Dialog", "Correction factor:"))
self.label_18.setText(_translate("Dialog", "[-]"))
self.label_19.setText(_translate("Dialog", "[-]"))
self.pushButton_close.setText(_translate("Dialog", "Close"))
self.label_29.setText(_translate("Dialog", "[-]"))
self.label_bias_2.setText(_translate("Dialog", "Dimensionless \n"
"impedance"))
self.Label_dimensionless_impedance.setText(_translate("Dialog", "Label_dimensionless_impedance"))
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import moto
import pytest
from pipeline_control.entrypoints import lambda_entry_points
class TestNotifyUserHandler:
def test_1_notify_users(
self,
entrypoint_fake_notify_user_handler,
):
lambda_entry_points.handle_notify_user({}, {})
entrypoint_fake_notify_user_handler.assert_called_once()
|
"""IBM Middleware product installation. Supports for Installation manager, Packaging utility and other products.
"""
import logging
import sys
import os
import stat
from lib import *
import diomreader
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt="")
logger = logging.getLogger("ibm.ibm")
class InstallationManager():
"""Install/Remove Installation Manager.
"""
def __init__(self):
pass
@staticmethod
def install(exec_script, im_install_root, imdl_install_root, imshared_root, repository,
packagename, packagebuild=None, offering_features=None):
"""Installs installation manager
"""
logger.debug("Setting up imcl cmd")
offering_str = packagename
if packagebuild != None:
offering_str += '_' + packagebuild
if offering_features != None:
offering_str += ',' + offering_features
cmd = ( exec_script +
" install " + packagename +
" -repositories " + repository +
" -installationDirectory " + os.path.join(im_install_root, 'eclipse') +
" -dataLocation " + imdl_install_root +
" -sharedResourcesDirectory " + imshared_root +
" -acceptLicense"
)
(ret_code, output) = shell.Shell.runCmd(cmd)
@staticmethod
def uninstall(imdl_install_root):
"""Uninstalls installation manager
"""
cmd = ( os.path.join(imdl_install_root, 'uninstall', 'uninstallc') +
" -silent" )
(ret_code, output) = shell.Shell.runCmd(cmd)
class Package():
"""Install/rollback/uninstall Packages
For Packaging utility, copy or delete packages
"""
__profileIM = 'InstallationManager'
__repoOnline = 'Online_Repo'
__repoLocal = 'Local_Repo'
__pkg_nameIM = 'com.ibm.cic.agent'
__pkg_namePU = 'com.ibm.cic.packagingUtility'
def __init__(self, vendorname, profile, configFile, version=None):
self.profile = profile
self.configFile = configFile
self.version = version
self.setConfig(vendorname)
def setConfig(self, vendorname):
self.config = {}
self.config['vendorname'] = vendorname
(self.sysName, nodeName, release, version, self.machine) = os.uname()
logger.debug("System Information: %s(OS), %s(HOST), %s(Release), %s(ARCH)", self.sysName, nodeName, release, self.machine)
self.config.update(propsparser.ini(self.configFile, scope=['Root'])['Root'])
## load Root data if file found
if self.config.has_key('root_config_file'):
self.config['root_config_file'] = os.path.join(os.path.dirname(self.configFile),
self.config['root_config_file'])
self.config.update(propsparser.ini(self.config['root_config_file'],
scope=['Root'])['Root'])
self.config.update(propsparser.ini(self.configFile, scope=['Root'])['Root'])
self.config['root_config_file'] = os.path.join(os.path.dirname(self.configFile),
self.config['root_config_file'])
self.config.update(propsparser.ini(self.configFile, scope=[self.profile])[self.profile])
## Load IM if Package is not IM
if self.config['pkg_name'] != Package.__pkg_nameIM:
self.config.update(propsparser.ini(self.configFile,
scope=[Package.__profileIM])[Package.__profileIM])
## load IM data if file found
if self.config.has_key('im_config_file'):
self.config['im_config_file'] = os.path.join(os.path.dirname(self.configFile),
self.config['im_config_file'])
self.config.update(propsparser.ini(self.config['im_config_file'],
scope=[Package.__profileIM])[Package.__profileIM])
self.config.update(propsparser.ini(self.configFile,
scope=[Package.__profileIM])[Package.__profileIM])
self.config['im_config_file'] = os.path.join(os.path.dirname(self.configFile),
self.config['im_config_file'])
self.config.update(propsparser.ini(self.configFile, scope=[self.profile])[self.profile])
self.config['profile'] = self.profile
self.config['imcl'] = os.path.join(self.config['im_install_root'],'eclipse','tools','imcl')
## Load local repo if package is IM or PU based on root data file
if self.config['pkg_name'] == Package.__pkg_nameIM or \
self.config['pkg_name'] == Package.__pkg_namePU:
if self.config.has_key('root_config_file'):
self.config.update(propsparser.ini(self.config['root_config_file'],
scope=[Package.__repoLocal])[Package.__repoLocal])
else:
self.config.update(propsparser.ini(self.configFile,
scope=[Package.__repoLocal])[Package.__repoLocal])
else:
if self.config.has_key('root_config_file'):
self.config.update(propsparser.ini(self.config['root_config_file'],
scope=[self.config['repo_option']])[self.config['repo_option']])
else:
self.config.update(propsparser.ini(self.configFile,
scope=[self.config['repo_option']])[self.config['repo_option']])
def install(self):
#print self.config
if self.config['packagename'] != Package.__pkg_nameIM and \
not os.path.isdir(self.config['im_install_root']):
raise Exception('Installation Manager not found installed')
if self.config['repo_option'] == Package.__repoLocal:
download.Download(self.config['url'], self.config['fileName'],
self.config['target_loc'], realm=self.config['url_realm'],
user=self.config['url_user'], passwd=self.config['url_passwd'])
if self.config['packagename'] == Package.__pkg_nameIM:
InstallationManager.install(os.path.join(self.config['target_loc'],
self.config['fileName'].rstrip('.zip'),
'tools', 'imcl'),
self.config['im_install_root'],
self.config['imdl_install_root'],
self.config['imshared_root'],
os.path.join(self.config['target_loc'],
self.config['fileName'].rstrip('.zip')),
self.config['packagename'],
self.config['packagebuild']
)
elif self.config['packagename'] == Package.__pkg_namePU:
Package.imcl_install(self.config['imcl'], self.config['install_root'], self.config['imshared_root'],
os.path.join(self.config['target_loc'], self.config['fileName'].rstrip('.zip')),
self.config['packagename'], self.config['packagebuild']
)
else:
if not self.config.has_key('offering_properties') or self.config['offering_properties'] == "":
self.config['offering_properties'] = None
if not self.config.has_key('offering_preferences') or self.config['offering_preferences'] == "":
self.config['offering_preferences'] = None
if self.config['repo_option'] == Package.__repoLocal:
Package.imcl_install(self.config['imcl'], self.config['install_root'], self.config['imshared_root'],
os.path.join(self.config['target_loc'], self.config['fileName'].rstrip('.zip')),
self.config['packagename'], self.config['packagebuild'], self.config['offering_features'],
self.config['offering_properties'], self.config['offering_preferences']
)
elif self.config['repo_option'] == Package.__repoOnline:
Package.imcl_install(self.config['imcl'], self.config['install_root'], self.config['imshared_root'],
os.path.join(self.config['target_loc'], self.config['packagename']),
self.config['packagename'], self.config['packagebuild'], self.config['offering_features'],
self.config['offering_properties'], self.config['offering_preferences']
)
def uninstall(self):
if self.config['pkg_name'] == Package.__pkg_nameIM:
InstallationManager.uninstall(self.config['imdl_install_root'])
else:
cmd = (self.config['imcl'] +
" listInstalledPackages " +
" -long"
)
(ret_code, output) = shell.Shell.runCmd(cmd)
for line in output.split("\n"):
if line == '': continue
(installed_loc,packageid_ver,displayName,displayVersion) = line.split(" : ")
if installed_loc == self.config['install_root']:
self.config['pkg_name'], self.config['packagebuild'] = packageid_ver.split('_',1)
Package.imcl_uninstall(self.config['imcl'], self.config['install_root'],
self.config['pkg_name'], self.config['packagebuild']
)
def rollback(self):
##Get offering version to rollback to
if self.version != None:
if self.config['repo_option'] == Package.__repoLocal:
xml = diomreader.XMLReader(url=self.config['url'], file=self.config['dm_file'],
sysName=self.sysName,sysBit=self.machine,vendorName=self.config['vendorname'],
packageName=self.config['pkg_name'], version=self.version,
realm=self.config['realm'], user=self.config['url_user'],
passwd=self.config['url_passwd'])
self.config.update(xml.getSWDownloadDetails())
elif self.config['repo_option'] == Package.__repoOnline:
for line in self.imcl_getAvailablePackages(self.config['target_loc']).split("\n"):
if line == '': continue
(repo,packageid_ver,displayName,displayVersion) = line.split(" : ")
if displayVersion == self.version and str(displayName) == self.config['pkg_name']:
self.config['packagename'], self.config['packagebuild'] = packageid_ver.split('_',1)
else:
cmd = (self.config['imcl'] +
" listInstalledPackages " +
" -long"
)
(ret_code, output) = shell.Shell.runCmd(cmd)
for line in output.split("\n"):
if line == '': continue
(installed_loc,packageid_ver,displayName,displayVersion) = line.split(" : ")
if installed_loc == self.config['install_root']:
self.config['packagename'] = packageid_ver.split('_',1)[0]
self.config['packagebuild'] = None
Package.imcl_rollback(self.config['imcl'], self.config['install_root'],
self.config['packagename'], self.config['packagebuild']
)
def copy_package(self, packageName):
xml = diomreader.XMLReader(url=self.config['url'], file=self.config['dm_file'],
sysName=self.sysName,sysBit=self.machine,vendorName=self.config['vendorname'],
packageName=packageName, version=self.version,
realm=self.config['url_realm'], user=self.config['url_user'],
passwd=self.config['url_passwd'])
self.config.update(xml.getSWDownloadDetails())
download.Download(self.config['url'], self.config['fileName'], self.config['target_loc'],
realm=self.config['url_realm'],user=self.config['url_user'], passwd=self.config['url_passwd'])
Package.pucl_copy(os.path.join(self.config['install_root'], 'PUCL'), 'copy',
os.path.join(self.config['target_loc'],self.config['fileName'].rstrip('.zip')),
os.path.join(self.config['pu_local_target'],self.config['packagename']),
self.config['packagename'], self.config['packagebuild']
)
def delete_package(self, packageName):
for line in self.imcl_getAvailablePackages \
(os.path.join(self.config['pu_local_target'],
packageName)).split("\n"):
if line == '': continue
(repo,packageid_ver,displayName,displayVersion) = line.split(" : ")
if displayVersion == self.version:
self.config['packagename'], self.config['packagebuild'] = packageid_ver.split('_',1)
Package.pucl_delete(os.path.join(self.config['install_root'], 'PUCL'), 'delete',
os.path.join(self.config['pu_local_target'],self.config['packagename']),
self.config['packagename'], self.config['packagebuild']
)
def imcl_getAvailablePackages(self, repo):
cmd = (self.config['imcl'] +
" listAvailablePackages " +
" -repositories " + repo +
" -long"
)
(ret_code, output) = shell.Shell.runCmd(cmd)
return output
@staticmethod
def imcl_install(exec_script, install_root, imshared_root, repository,
packagename, packagebuild=None, offering_features=None,
offering_properties=None, offering_preferences=None):
logger.debug("Setting up cmd")
offering_str = packagename
if packagebuild != None:
offering_str += '_' + packagebuild
if offering_features != None:
offering_str += ',' + offering_features
cmd = ( exec_script +
" install " + offering_str +
" -repositories " + repository +
" -installationDirectory " + install_root +
" -sharedResourcesDirectory " + imshared_root +
" -acceptLicense"
)
if offering_properties != None:
cmd = cmd + ' -properties ' + offering_properties
if offering_preferences != None:
cmd = cmd + ' -preferences ' + offering_preferences
(ret_code, output) = shell.Shell.runCmd(cmd)
@staticmethod
def imcl_rollback(exec_script, install_root,
packagename, packagebuild=None, offering_features=None):
logger.debug("Setting up cmd")
offering_str = packagename
if packagebuild != None:
offering_str += '_' + packagebuild
if offering_features != None:
offering_str += ',' + offering_features
cmd = ( exec_script +
" rollback " + offering_str +
#" -repositories " + repository +
" -installationDirectory " + install_root +
#" -sharedResourcesDirectory " + imshared_root +
" -acceptLicense"
)
(ret_code, output) = shell.Shell.runCmd(cmd)
@staticmethod
def imcl_uninstall(exec_script, install_root, packagename, packagebuild=None, offering_features=None):
offering_str = packagename
if packagebuild != None:
offering_str += '_' + packagebuild
if offering_features != None:
offering_str += ',' + offering_features
cmd = (exec_script +
" uninstall " + offering_str +
" -installationDirectory " + install_root
)
(ret_code, output) = shell.Shell.runCmd(cmd)
@staticmethod
def pucl_copy(exec_script, command, repository, target, packagename=None, packagebuild=None):
offering_str = ""
if packagename != None:
offering_str = packagename
if packagebuild != None:
offering_str += '_' + packagebuild
cmd = (exec_script +
" " + command + " " + offering_str +
" -repositories " + repository +
" -target " + target +
" -acceptLicense"
)
(ret_code, output) = shell.Shell.runCmd(cmd)
@staticmethod
def pucl_delete(exec_script, command, target, packagename, packagebuild=None):
offering_str = packagename
if packagebuild != None:
offering_str += '_' + packagebuild
cmd = (exec_script +
" " + command + " " + offering_str +
" -target " + target
)
(ret_code, output) = shell.Shell.runCmd(cmd)
|
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Callable, Iterable, List, Mapping, NamedTuple, Optional, Tuple, Union
import ciso8601
from typing_extensions import TypeAlias, TypedDict
from aioinfluxdb import constants
from aioinfluxdb.flux_table import FluxRecord
TagType: TypeAlias = Tuple[str, str]
TagSetType: TypeAlias = Iterable[Tuple[str, str]]
FieldType: TypeAlias = Union[int, float, bool, str]
FieldSetType: TypeAlias = Iterable[Tuple[str, FieldType]]
TimestampType: TypeAlias = Union[datetime, int, float]
MinimalRecordTuple: TypeAlias = Tuple[str, FieldSetType]
""" measurement, field-set """
RecordTuple: TypeAlias = Tuple[str, Optional[TagSetType], FieldSetType, Optional[TimestampType]]
""" measurement, field_set, field-set, timestamp """
class Record(NamedTuple):
measurement: str
field_set: FieldSetType
tag_set: Optional[TagSetType] = None
timestamp: Optional[TimestampType] = None
def __repr__(self) -> str:
body = ', '.join((f'{key}={getattr(self, key)}' for key in self._fields if getattr(self, key) is not None))
return f'<{self.__class__.__name__} {body}>'
class _RetentionRule(TypedDict, total=False):
everySeconds: int
shardGroupDurationSeconds: int
type: str
@dataclass(frozen=True)
class RetentionRule:
every_seconds: int
shard_group_duration_seconds: Optional[int]
type: str
@classmethod
def from_json(cls, data: _RetentionRule) -> RetentionRule:
return cls(
every_seconds=data['everySeconds'],
shard_group_duration_seconds=data.get('shardGroupDurationSeconds'),
type=data['type'],
)
def to_json(self) -> _RetentionRule:
ret = _RetentionRule(
everySeconds=self.every_seconds,
type=self.type,
)
if self.shard_group_duration_seconds is not None:
ret['shardGroupDurationSeconds'] = self.shard_group_duration_seconds
return ret
class _Label(TypedDict):
id: str
name: str
orgID: str
properties: Mapping[str, Any]
@dataclass(frozen=True)
class Label:
id: str
name: str
organization_id: str
properties: Mapping[str, Any]
@classmethod
def from_json(cls, data: _Label) -> Label:
return cls(
id=data['id'],
name=data['name'],
organization_id=data['orgID'],
properties=data['properties'],
)
class _Bucket(TypedDict, total=False):
createdAt: str
description: str
id: str
labels: Iterable[_Label]
name: str
orgID: str
retentionRules: Iterable[_RetentionRule]
rp: str
schemaType: str
type: str
updatedAt: str
@dataclass(frozen=True)
class Bucket:
created_at: Optional[datetime]
description: Optional[str]
id: Optional[str]
labels: Tuple[Label, ...]
name: str
organization_id: Optional[str]
retention_rules: Tuple[RetentionRule, ...]
rp: Optional[str]
schema_type: Optional[str]
type: str
updated_at: Optional[datetime]
@classmethod
def from_json(cls, data: _Bucket) -> Bucket:
return cls(
created_at=ciso8601.parse_datetime(data['createdAt']) if 'createdAt' in data else None,
description=data.get('description'),
id=data.get('id'),
labels=tuple(map(Label.from_json, data.get('labels', ()))),
name=data['name'],
organization_id=data.get('orgID'),
retention_rules=tuple(map(RetentionRule.from_json, data['retentionRules'])),
rp=data.get('rp'),
schema_type=constants.BucketSchemaType(data['schemaType']) if 'schemaType' in data else None,
type=constants.BucketType(data.get('type', constants.BucketType.User)),
updated_at=ciso8601.parse_datetime(data['updatedAt']) if 'updatedAt' in data else None,
)
class _Organization(TypedDict, total=False):
createdAt: str
description: str
id: str
name: str
status: str
updatedAt: str
@dataclass(frozen=True)
class Organization:
created_at: Optional[datetime]
description: Optional[str]
id: Optional[str]
name: str
status: constants.OrganizationStatus
updated_at: Optional[datetime]
@classmethod
def from_json(cls, data: _Organization) -> Organization:
return cls(
created_at=ciso8601.parse_datetime(data['createdAt']) if 'createdAt' in data else None,
description=data.get('description'),
id=data.get('id'),
name=data['name'],
status=constants.OrganizationStatus(data.get('status', constants.OrganizationStatus.Active)),
updated_at=ciso8601.parse_datetime(data['updatedAt']) if 'updatedAt' in data else None,
)
class QueryOptions:
"""Query options."""
profilers: Optional[List[str]]
profiler_callback: Optional[Callable[[FluxRecord], Any]]
def __init__(
self,
profilers: Optional[List[str]] = None,
profiler_callback: Optional[Callable[[FluxRecord], Any]] = None,
) -> None:
"""
Initialize query options.
:param profilers: list of enabled flux profilers
:param profiler_callback: callback function return profilers (FluxRecord)
"""
self.profilers = profilers
self.profiler_callback = profiler_callback
|
from unittest import TestCase
from webassets.filter import register_filter
from webassets.test import TempEnvironmentHelper
from webassets_webpack import Webpack
register_filter(Webpack)
class WebpackFilterTestCase(TempEnvironmentHelper, TestCase):
default_files = {
'main.js': """var odds = evens.map(v => v + 1)"""
}
def setUp(self):
super(WebpackFilterTestCase, self).setup()
def test_webpack_filter(self):
self.env.config['WEBPACK_BIN'] = './node_modules/.bin/webpack'
self.env.config['WEBPACK_CONFIG'] = './webpack.config.js'
self.env.config['WEBPACK_TEMP'] = 'temp.js'
# if not find_executable('webpack'):
# raise SkipTest()
self.mkbundle('main.js', filters='webpack',
output='bundle.js').build()
# print(self.get('bundle.js'))
with open("bundle-read.js","w+") as read_file:
read_file.write(self.get('bundle.js'))
|
# Copyright 2021 The WAX-ML Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax.numpy as jnp
from wax.modules import VMap
from wax.unroll import unroll
def test_vmap_without_prng_key():
"""Test that VMap module work without PRNG key specified"""
x = jnp.arange(10).reshape(2, 5)
def outer_fun(x):
def fun(x):
return x
return VMap(fun)(x)
unroll(outer_fun)(x)
|
from enum import Enum
class ColorSpace(Enum):
BGR = 1
RGB = 2
HSV = 3
|
import streamlit as st
from WebScraping.raiffeisen import get_raiffaisen_balance
from WebScraping.flatex import get_flatex_balance
from WebScraping.bitpanda import get_bitpanda_balance
from WebScraping.bank99 import get_bank99_balance
from WebScraping.dvag import get_dvag_balance
from mystreamlitapp import render_web_data, render_df
from API.n26 import get_n26_balance
def main():
# call functions
total_flatex_value,absolute_delta_day_before = get_flatex_balance()
total_raiffeisen_giro_value,total_raiffeisen_creditcard_value = get_raiffaisen_balance()
n26_balance, n26_last_transaction = get_n26_balance()
total_bank99_balance = get_bank99_balance()
#total_dvag_value = get_dvag_balance()
#get_bitpanda_balance()
# render overview of account balances
render_web_data(total_flatex_value=total_flatex_value,absolute_delta_day_before=absolute_delta_day_before,total_raiffeisen_giro_value = total_raiffeisen_giro_value,total_raiffeisen_creditcard_value=total_raiffeisen_creditcard_value,n26_balance=n26_balance,n26_last_transaction=n26_last_transaction, total_bank99_balance=total_bank99_balance, total_dvag_value=0)
# render dataframe from money control
render_df('../../data/MoneyControl/MoneyControl-CSVExport_2022-03-18.csv')
# execute main function
if __name__ == "__main__":
main()
|
import sys
import re
import string
import os
import numpy as np
import codecs
from collections import defaultdict
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import KFold
from typing import Sequence
import time
# From scikit learn that got words from:
# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
class defaultintdict(dict):
"""
Behaves exactly like defaultdict(int) except d['foo'] does NOT
add 'foo' to dictionary d. (Booo for that default behavior in
defaultdict!)
"""
def __init__(self):
self._factory=int
super().__init__()
def __missing__(self, key):
return 0
def filelist(root) -> Sequence[str]:
"""Return a fully-qualified list of filenames under root directory; sort names alphabetically."""
allfiles = []
for path, subdirs, files in os.walk(root):
for name in files:
allfiles.append(os.path.join(path, name))
return sorted(allfiles)
def get_text(filename:str) -> str:
"""
Load and return the text of a text file, assuming latin-1 encoding as that
is what the BBC corpus uses. Use codecs.open() function not open().
"""
f = codecs.open(filename, encoding='latin-1', mode='r')
s = f.read()
f.close()
return s
def words(text:str) -> Sequence[str]:
"""
Given a string, return a list of words normalized as follows.
Split the string to make words first by using regex compile() function
and string.punctuation + '0-9\\r\\t\\n]' to replace all those
char with a space character.
Split on space to get word list.
Ignore words < 3 char long.
Lowercase all words
Remove English stop words
"""
ctrl_chars = '\x00-\x1f'
regex = re.compile(r'[' + ctrl_chars + string.punctuation + '0-9\r\t\n]')
nopunct = regex.sub(" ", text) # delete stuff but leave at least a space to avoid clumping together
words = nopunct.split(" ")
words = [w for w in words if len(w) > 2] # ignore a, an, to, at, be, ...
words = [w.lower() for w in words]
words = [w for w in words if w not in ENGLISH_STOP_WORDS]
return words
def load_docs(docs_dirname:str) -> Sequence[Sequence]:
"""
Load all .txt files under docs_dirname and return a list of word lists, one per doc.
Ignore empty and non ".txt" files.
"""
docs = []
...
return docs
def vocab(neg:Sequence[Sequence], pos:Sequence[Sequence]) -> dict:
"""
Given neg and pos lists of word lists, construct a mapping from word to word index.
Use index 0 to mean unknown word, '__unknown__'. The real words start from index one.
The words should be sorted so the first vocabulary word is index one.
The length of the dictionary is |uniquewords|+1 because of "unknown word".
|V| is the length of the vocabulary including the unknown word slot.
Sort the unique words in the vocab alphabetically so we standardize which
word is associated with which word vector index.
E.g., given neg = [['hi']] and pos=[['mom']], return:
V = {'__unknown__':0, 'hi':1, 'mom:2}
and so |V| is 3
"""
V = defaultintdict()
...
return V
def vectorize(V:dict, docwords:Sequence) -> np.ndarray:
"""
Return a row vector (based upon V) for docwords. The first element of the
returned vector is the count of unknown words. So |V| is |uniquewords|+1.
"""
...
def vectorize_docs(docs:Sequence, V:dict) -> np.ndarray:
"""
Return a matrix where each row represents a documents word vector.
Each column represents a single word feature. There are |V|+1
columns because we leave an extra one for the unknown word in position 0.
Invoke vector(V,docwords) to vectorize each doc for each row of matrix
:param docs: list of word lists, one per doc
:param V: Mapping from word to index; e.g., first word -> index 1
:return: numpy 2D matrix with word counts per doc: ndocs x nwords
"""
...
return D
class NaiveBayes621:
"""
This object behaves like a sklearn model with fit(X,y) and predict(X) functions.
Limited to two classes, 0 and 1 in the y target.
"""
def fit(self, X:np.ndarray, y:np.ndarray) -> None:
"""
Given 2D word vector matrix X, one row per document, and 1D binary vector y
train a Naive Bayes classifier assuming a multinomial distribution for
p(w,c), the probability of word exists in class c. p(w,c) is estimated by
the number of times w occurs in all documents of class c divided by the
total words in class c. p(c) is estimated by the number of documents
in c divided by the total number of documents.
The first column of X is a column of zeros to represent missing vocab words.
"""
...
def predict(self, X:np.ndarray) -> np.ndarray:
"""
Given 2D word vector matrix X, one row per document, return binary vector
indicating class 0 or 1 for each row of X.
"""
...
def kfold_CV(model, X:np.ndarray, y:np.ndarray, k=4) -> np.ndarray:
"""
Run k-fold cross validation using model and 2D word vector matrix X and binary
y class vector. Return a 1D numpy vector of length k with the accuracies, the
ratios of correctly-identified documents to the total number of documents. You
can use KFold from sklearn to get the splits but must loop through the splits
with a loop to implement the cross-fold testing. Pass random_state=999 to KFold
so we always get same sequence (wrong in practice) so student eval unit tests
are consistent. Shuffle the elements before walking the folds.
"""
...
return np.array(accuracies)
|
from setuptools import setup
execfile('respite/version.py')
setup(
name = 'django-respite',
version = __version__,
description = "Respite conforms Django to Representational State Transfer (REST)",
long_description = open('README.rst').read(),
author = "Johannes Gorset",
author_email = "jgorset@gmail.com",
url = "http://github.com/jgorset/django-respite",
packages = ['respite', 'respite.lib', 'respite.serializers', 'respite.urls', 'respite.views', 'respite.utils']
)
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
import os
from programy.config.file.xml_file import XMLConfigurationFile
from programy.clients.events.console.config import ConsoleConfiguration
from programy.utils.substitutions.substitues import Substitutions
from programytest.config.file.base_file_tests import ConfigurationBaseFileTests
class XMLConfigurationFileTests(ConfigurationBaseFileTests):
@unittest.skip('単体操作が必要')
def test_get_methods(self):
config_data = XMLConfigurationFile()
self.assertIsNotNone(config_data)
configuration = config_data.load_from_text("""
<root>
<brain>
<overrides>
<allow_system_aiml>true</allow_system_aiml>
<allow_learn_aiml>true</allow_learn_aiml>
<allow_learnf_aiml>true</allow_learnf_aiml>
</overrides>
</brain>
</root>
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
section = config_data.get_section("brainx")
self.assertIsNone(section)
section = config_data.get_section("brain")
self.assertIsNotNone(section)
child_section = config_data.get_section("overrides", section)
self.assertIsNotNone(child_section)
keys = list(config_data.get_child_section_keys("overrides", section))
self.assertIsNotNone(keys)
self.assertEqual(3, len(keys))
self.assertTrue("allow_system_aiml" in keys)
self.assertTrue("allow_learn_aiml" in keys)
self.assertTrue("allow_learnf_aiml" in keys)
self.assertIsNone(config_data.get_child_section_keys("missing", section))
self.assertEqual(True, config_data.get_option(child_section, "allow_system_aiml"))
self.assertEqual(True, config_data.get_option(child_section, "missing", missing_value=True))
self.assertEqual(True, config_data.get_bool_option(child_section, "allow_system_aiml"))
self.assertEqual(False, config_data.get_bool_option(child_section, "other_value"))
self.assertEqual(0, config_data.get_int_option(child_section, "other_value"))
@unittest.skip('単体操作が必要')
def test_load_from_file(self):
xml = XMLConfigurationFile()
self.assertIsNotNone(xml)
configuration = xml.load_from_file(os.path.dirname(__file__) + os.sep + "test_xml.xml", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
self.assert_configuration(configuration)
def test_load_from_text_multis_one_value(self):
xml = XMLConfigurationFile()
self.assertIsNotNone(xml)
configuration = xml.load_from_text("""
<root>
<bot>
<brain>bot1</brain>
</bot>
</root>
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
self.assertEqual(1, len(configuration.client_configuration.configurations[0].configurations))
@unittest.skip('単体操作が必要')
def test_load_from_text_multis_multiple_values(self):
xml = XMLConfigurationFile()
self.assertIsNotNone(xml)
configuration = xml.load_from_text("""
<root>
<console>
<bot>bot</bot>
</console>
<bot>
<brain>bot1 bot2</brain>
</bot>
</root>
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
self.assertEqual(2, len(configuration.client_configuration.configurations[0].configurations))
@unittest.skip('単体操作が必要')
def test_load_from_text(self):
xml = XMLConfigurationFile()
self.assertIsNotNone(xml)
configuration = xml.load_from_text("""
<root>
<console>
<bot>bot</bot>
<prompt>>>></prompt>
<scheduler>
<name>Scheduler1</name>
<debug_level>50</debug_level>
<add_listeners>false</add_listeners>
<remove_all_jobs>false</remove_all_jobs>
</scheduler>
<storage>
<entities>
<users>sql</users>
<linked_accounts>sql</linked_accounts>
<links>sql</links>
<properties>file</properties>
<conversations>file</conversations>
<categories>file</categories>
<maps>file</maps>
<sets>file</sets>
<rdf>file</rdf>
<denormal>file</denormal>
<normal>file</normal>
<gender>file</gender>
<person>file</person>
<person2>file</person2>
<spelling_corpus>file</spelling_corpus>
<license_keys>file</license_keys>
<nodes>file</nodes>
<binaries>file</binaries>
<braintree>file</braintree>
<preprocessors>file</preprocessors>
<postprocessors>file</postprocessors>
<regex_templates>file</regex_templates>
<usergroups>file</usergroups>
<learnf>file</learnf>
</entities>
<stores>
<sql>
<type>sql</type>
<config>
<url>sqlite:///:memory</url>
<echo>false</echo>
<encoding>utf-8</encoding>
<create_db>true</create_db>
<drop_all_first>true</drop_all_first>
</config>
</sql>
<mongo>
<type>mongo</type>
<config>
<url>mongodb://localhost:27017/</url>
<database>programy</database>
<drop_all_first>true</drop_all_first>
</config>
</mongo>
<redis>
<type>redis</type>
<config>
<host>localhost</host>
<port>6379</port>
<password>null</password>
<db>0</db>
<prefix>programy</prefix>
<drop_all_first>True</drop_all_first>
</config>
</redis>
<file>
<type>file</type>
<config>
<category_storage>
<files>./storage/categories</files>
</category_storage>
<conversations_storage>
<files>./storage/conversations</files>
</conversations_storage>
<sets_storage>
<files>./storage/sets</files>
<extension>.txt</extension>
<directories>false</directories>
</sets_storage>
<maps_storage>
<files>./storage/maps</files>
<extension>.txt</extension>
<directories>false</directories>
</maps_storage>
<regex_templates>
<files>./storage/regex</files>
</regex_templates>
<lookups_storage>
<files>./storage/lookups</files>
<extension>.txt</extension>
<directories>false</directories>
</lookups_storage>
<properties_storage>
<file>./storage/properties.txt</file>
</properties_storage>
<defaults_storage>
<file>./storage/defaults.txt</file>
</defaults_storage>
<rdf_storage>
<files>./storage/rdfs</files>
<extension>.txt</extension>
<directories>true</directories>
</rdf_storage>
<spelling_corpus>
<file>./storage/spelling/corpus.txt</file>
</spelling_corpus>
<license_keys>
<file>./storage/license.keys</file>
</license_keys>
<nodes>
<files>./storage/nodes</files>
</nodes>
<binaries>
<files>./storage/binaries</files>
</binaries>
<braintree>
<file>./storage/braintree/braintree.xml</file>
<format>xml</format>
</braintree>
<preprocessors>
<file>./storage/processing/preprocessors.txt</file>
</preprocessors>
<postprocessors>
<file>./storage/processing/postprocessing.txt</file>
</postprocessors>
<usergroups>
<files>./storage/security/usergroups.txt</files>
</usergroups>
<learnf>
<files>./storage/categories/learnf</files>
</learnf>
</config>
</file>
</stores>
<logger>
<type>logger</type>
<config>
<conversation_logger>conversation</conversation_logger>
</config>
</logger>
</storage>
</console>
<voice>
<license_keys>$BOT_ROOT/config/license.keys</license_keys>
<tts>osx</tts>
<stt>azhang</stt>
<osx>
<classname>talky.clients.voice.tts.osxsay.OSXSayTextToSpeach</classname>
</osx>
<pytts>
<classname>talky.clients.voice.tts.pyttssay.PyTTSSayTextToSpeach</classname>
<rate_adjust>10</rate_adjust>
</pytts>
<azhang>
<classname>talky.clients.voice.stt.azhang.AnthonyZhangSpeechToText</classname>
<ambient_adjust>3</ambient_adjust>
<service>ibm</service>
</azhang>
</voice>
<rest>
<host>0.0.0.0</host>
<port>8989</port>
<debug>false</debug>
<workers>4</workers>
<license_keys>$BOT_ROOT/config/license.keys</license_keys>
</rest>
<webchat>
<host>0.0.0.0</host>
<port>8090</port>
<debug>false</debug>
<license_keys>$BOT_ROOT/config/license.keys</license_keys>
<api>/api/web/v1.0/ask</api>
</webchat>
<twitter>
<polling>true</polling>
<polling_interval>49</polling_interval>
<streaming>false</streaming>
<use_status>true</use_status>
<use_direct_message>true</use_direct_message>
<auto_follow>true</auto_follow>
<storage>file</storage>
<welcome_message>Thanks for following me, send me a message and I'll try and help</welcome_message>
<license_keys>file</license_keys>
</twitter>
<xmpp>
<server>talk.google.com</server>
<port>5222</port>
<xep_0030>true</xep_0030>
<xep_0004>true</xep_0004>
<xep_0060>true</xep_0060>
<xep_0199>true</xep_0199>
<license_keys>file</license_keys>
</xmpp>
<socket>
<host>127.0.0.1</host>
<port>9999</port>
<queue>5</queue>
<debug>true</debug>
<license_keys>file</license_keys>
</socket>
<telegram>
<unknown_command>Sorry, that is not a command I have been taught yet!</unknown_command>
<license_keys>file</license_keys>
</telegram>
<facebook>
<host>127.0.0.1</host>
<port>5000</port>
<debug>false</debug>
<license_keys>file</license_keys>
</facebook>
<twilio>
<host>127.0.0.1</host>
<port>5000</port>
<debug>false</debug>
<license_keys>file</license_keys>
</twilio>
<slack>
<polling_interval>1</polling_interval>
<license_keys>file</license_keys>
</slack>
<viber>
<name>Servusai</name>
<avatar>http://viber.com/avatar.jpg</avatar>
<license_keys>file</license_keys>
</viber>
<line>
<host>127.0.0.1</host>
<port>8084</port>
<debug>false</debug>
<license_keys>file</license_keys>
</line>
<kik>
<bot_name>servusai</bot_name>
<webhook>https://93638f7a.ngrok.io/api/kik/v1.0/ask</webhook>
<host>127.0.0.1</host>
<port>8082</port>
<debug>false</debug>
<license_keys>file</license_keys>
</kik>
<bot>
<brain>brain</brain>
<initial_question>Hi, how can I help you today?</initial_question>
<initial_question_srai>YINITIALQUESTION</initial_question_srai>
<default_response>Sorry, I don't have an answer for that!</default_response>
<default_response_srai>YEMPTY</default_response_srai>
<empty_string>YEMPTY</empty_string>
<exit_response>So long, and thanks for the fish!</exit_response>
<exit_response_srai>YEXITRESPONSE</exit_response_srai>
<override_properties>true</override_properties>
<max_question_recursion>1000</max_question_recursion>
<max_question_timeout>60</max_question_timeout>
<max_search_depth>100</max_search_depth>
<max_search_timeout>60</max_search_timeout>
<spelling>
<load>true</load>
<classname>programy.spelling.norvig.NorvigSpellingChecker</classname>
<alphabet>ABCDEFGHIJKLMNOPQRSTUVWXYZ</alphabet>
<check_before>true</check_before>
<check_and_retry>true</check_and_retry>
</spelling>
<conversations>
<save>true</save>
<load>false</load>
<max_histories>100</max_histories>
<restore_last_topic>false</restore_last_topic>
<initial_topic>TOPIC1</initial_topic>
<empty_on_start>false</empty_on_start>
</conversations>
</bot>
<brain>
<overrides>
<allow_system_aiml>true</allow_system_aiml>
<allow_learn_aiml>true</allow_learn_aiml>
<allow_learnf_aiml>true</allow_learnf_aiml>
</overrides>
<defaults>
<default-get>unknown</default-get>
<default-property>unknown</default-property>
<default-map>unknown</default-map>
<learnf-path>file</learnf-path>
</defaults>
<binaries>
<save_binary>true</save_binary>
<load_binary>true</load_binary>
<load_aiml_on_binary_fail>true</load_aiml_on_binary_fail>
</binaries>
<braintree>
<create>true</create>
</braintree>
<services>
<REST>
<classname>programy.services.rest.GenericRESTService</classname>
<method>GET</method>
<host>0.0.0.0</host>
<port>8080</port>
</REST>
<Pannous>
<classname>programy.services.pannous.PannousService</classname>
<url>http://weannie.pannous.com/api</url>
</Pannous>
</services>
<security>
<authentication>
<classname>programy.security.authenticate.passthrough.BasicPassThroughAuthenticationService</classname>
<denied_srai>AUTHENTICATION_FAILED</denied_srai>
</authentication>
<authorisation>
<classname>programy.security.authorise.usergroupsauthorisor.BasicUserGroupAuthorisationService</classname>
<denied_srai>AUTHORISATION_FAILED</denied_srai>
<usergroups>
<storage>file</storage>
</usergroups>
</authorisation>
</security>
<oob>
<default>
<classname>programy.oob.defaults.default.DefaultOutOfBandProcessor</classname>
</default>
<alarm>
<classname>programy.oob.defaults.alarm.AlarmOutOfBandProcessor</classname>
</alarm>
<camera>
<classname>programy.oob.defaults.camera.CameraOutOfBandProcessor</classname>
</camera>
<clear>
<classname>programy.oob.defaults.clear.ClearOutOfBandProcessor</classname>
</clear>
<dial>
<classname>programy.oob.defaults.dial.DialOutOfBandProcessor</classname>
</dial>
<dialog>
<classname>programy.oob.defaults.dialog.DialogOutOfBandProcessor</classname>
</dialog>
<email>
<classname>programy.oob.defaults.email.EmailOutOfBandProcessor</classname>
</email>
<geomap>
<classname>programy.oob.defaults.map.MapOutOfBandProcessor</classname>
</geomap>
<schedule>
<classname>programy.oob.defaults.schedule.ScheduleOutOfBandProcessor</classname>
</schedule>
<search>
<classname>programy.oob.defaults.search.SearchOutOfBandProcessor</classname>
</search>
<sms>
<classname>programy.oob.defaults.sms.SMSOutOfBandProcessor</classname>
</sms>
<url>
<classname>programy.oob.defaults.url.URLOutOfBandProcessor</classname>
</url>
<wifi>
<classname>programy.oob.defaults.wifi.WifiOutOfBandProcessor</classname>
</wifi>
</oob>
<dynamic>
<variables>
<gettime>programy.dynamic.variables.datetime.GetTime</gettime>
</variables>
<sets>
<numeric>programy.dynamic.sets.numeric.IsNumeric</numeric>
<roman>programy.dynamic.sets.roman.IsRomanNumeral</roman>
</sets>
<maps>
<romantodec>programy.dynamic.maps.roman.MapRomanToDecimal</romantodec>
<dectoroman>programy.dynamic.maps.roman.MapDecimalToRoman</dectoroman>
</maps>
</dynamic>
</brain>
</root>
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
self.assert_configuration(configuration)
@unittest.skip('単体操作が必要')
def test_load_additionals(self):
xml = XMLConfigurationFile()
self.assertIsNotNone(xml)
configuration = xml.load_from_text("""
<root>
<console>
<bot>bot</bot>
</console>
<bot>
<brain>brain</brain>
</bot>
<brain>
<security>
<authentication>
<classname>programy.security.authenticate.passthrough.PassThroughAuthenticationService</classname>
<denied_srai>ACCESS_DENIED</denied_srai>
</authentication>
</security>
</brain>
</root>
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
auth_service = configuration.client_configuration.configurations[0].configurations[0].security.authentication
self.assertIsNotNone(auth_service)
self.assertEqual("ACCESS_DENIED", auth_service.denied_srai)
@unittest.skip('単体操作が必要')
def test_load_with_subs(self):
subs = Substitutions()
subs.add_substitute("$ALLOW_SYSTEM", True)
config_data = XMLConfigurationFile()
self.assertIsNotNone(config_data)
configuration = config_data.load_from_text("""
<root>
<brain>
<overrides>
<allow_system_aiml>true</allow_system_aiml>
<allow_learn_aiml>true</allow_learn_aiml>
<allow_learnf_aiml>true</allow_learnf_aiml>
</overrides>
</brain>
</root>
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
section = config_data.get_section("brainx")
self.assertIsNone(section)
section = config_data.get_section("brain")
self.assertIsNotNone(section)
child_section = config_data.get_section("overrides", section)
self.assertIsNotNone(child_section)
self.assertEqual(True, config_data.get_option(child_section, "allow_system_aiml"))
self.assertEqual(True, config_data.get_bool_option(child_section, "allow_system_aiml"))
self.assertEqual(False, config_data.get_bool_option(child_section, "other_value"))
|
from enum import Enum
from typing import Any, List
import h5py
import numpy as np
H5File = h5py.File
Group = h5py.Group
Dataset = h5py.Dataset
Array = np.ndarray
class InputChannel(Enum):
time = 'time'
perp = 'perp'
par = 'par'
ref = 'ref'
class DeltaAChannel(Enum):
time = 'time'
perp = 'perp'
par = 'par'
cd = 'cd'
class PumpStatus(Enum):
present = 'pump'
absent = 'nopump'
def subgroups(group: Group, fullpath=False) -> List[str]:
"""Returns the groups that are directly below the specified group as a list of strings
Args:
group (Group): The group whose subgroups you want to query
fullpath (bool): A boolean parameter indicating that the full paths of the
groups should be returned i.e. '/foo/bar/baz' vs. 'baz'
Note:
This may become a generator rather than a function in the future
Returns:
The names of any groups that are directly below the specified group
"""
subgroups = [key for key in group.keys() if isinstance(group[key], Group)]
if fullpath:
full_paths = [f'{group.name}/{s}' for s in subgroups]
subgroups = full_paths
return subgroups
def datasets(group: Group, fullpath=False) -> List[str]:
"""Returns the datasets that are directly below the specified group as a list of strings
Args:
group (Group): The group whose datasets you want to query
fullpath (bool): A boolean parameter indicating that the full paths of the
datasets should be returned i.e. '/spectrum6/FMO_dataset' vs. 'FMO_dataset'
Note:
This may become a generator in the future
Returns:
The names of the datasets contained in the group
"""
datasets = [k for k in group.keys() if isinstance(group[k], Dataset)]
if fullpath:
full_paths = [f'{group.name}/{d}' for d in datasets]
datasets = full_paths
return datasets
def add_name_attributes(file: H5File) -> None:
"""Adds the filename as an attribute to the root group, then adds the group or dataset
name as an attribute to the corresponding objects recursively
Args:
file (DataFile): The file to add attributes to
Note:
Not covered by tests yet
"""
filename = file.filename[0:-3]
file.attrs['name'] = filename
file.attrs['filename'] = filename
recursive_add_name_attribute(file)
return
def recursive_add_name_attribute(group: Group) -> None:
"""Adds the name of the group as an attribute called 'name' to each group or dataset
The Group.name and Dataset.name properties return the full path to the object, rather than
just the name of the object. Setting the name as an attribute of the object makes it trivial
to programmatically generate names of plots, log messages about certain objects, etc.
Args:
group (Group): The group to recursively add name attributes to
Note:
Not covered by tests yet
"""
for dataset_name in datasets(group):
group[dataset_name].attrs['name'] = dataset_name
for group_name in subgroups(group):
group[group_name].attrs['name'] = group_name
recursive_add_name_attribute(group[group_name])
return
def recursive_set_attribute(group: Group, attr: str, value: Any) -> None:
"""Sets the specified attribute to the provided value for all descendants
of the group (groups and datasets alike)
Args:
group (Group): The group whose descendants will have their attributes set
attr (str): The attribute to set
value (Any): The value to set the attribute to
Note:
Not covered by tests yet
"""
for d in datasets(group):
group[d].attrs[attr] = value
for sg in subgroups(group):
group[sg].attrs[attr] = value
recursive_set_attribute(group[sg], attr, value)
return
def copy_all_attributes(old_file: H5File, new_file: H5File) -> None:
"""Recursively copies all of the attributes from the old file to the new file
Args:
old_file (DataFile): the source file
new_file (DataFile): the destination file
Note:
The two files must have the same group and dataset structure
Note:
Not covered by tests yet
"""
def copy_func(_: str, src: Group) -> None:
src_path = src.name
for item in src.attrs.keys():
if item == 'filename':
continue
new_file[src_path].attrs[item] = src.attrs[item]
return
old_file.visititems(copy_func)
recursive_set_attribute(new_file, 'filename', new_file.filename[0:-3])
return
def count_signal_datasets(group: Group) -> int:
"""Recursively counts the number of signal, i.e. not time, datasets in the group
Args:
group (Group): A group potentially containing a mix of datasets and subgroups
Returns:
The total number of signal datasets contained by the group at any depth
"""
count = 0
for d_name in datasets(group):
if 'time' in d_name:
continue
count += 1
for g_name in subgroups(group):
count += count_signal_datasets(group[g_name])
return count
|
module_id = 'gl'
report_name = 'tb_bf_maj'
table_name = 'gl_totals'
report_type = 'bf_cf'
groups = []
groups.append([
'code', # dim
['code_maj', []], # grp_name, filter
])
include_zeros = True
# allow_select_loc_fun = True
expand_subledg = True
columns = [
['op_date', 'op_date', 'Op date', 'DTE', 85, None, 'Total:'],
['cl_date', 'cl_date', 'Cl date', 'DTE', 85, None, False],
['code_maj', 'code_maj', 'Maj', 'TEXT', 80, None, False],
['op_bal', 'op_bal', 'Op bal', 'DEC', 100, None, True],
['mvmt', 'cl_bal - op_bal', 'Mvmt', 'DEC', 100, None, True],
['cl_bal', 'cl_bal', 'Cl bal', 'DEC', 100, None, True],
]
|
#!/usr/bin/env python
from distutils.core import setup
setup(name="Systori", packages=["systori"])
|
# Copyright MelisaDev 2022 - Present
# Full MIT License can be found in `LICENSE.txt` at the project root.
from __future__ import annotations
from typing import Type, TypeVar, Any
def remove_none(obj):
if isinstance(obj, list):
return [i for i in obj if i is not None]
elif isinstance(obj, tuple):
return tuple(i for i in obj if i is not None)
elif isinstance(obj, set):
return obj - {None}
elif isinstance(obj, dict):
return {k: v for k, v in obj.items() if None not in (k, v)}
T = TypeVar("T")
def try_enum(cls: Type[T], val: Any) -> T:
try:
return cls(val)
except (KeyError, TypeError, AttributeError, ValueError):
return val
|
# To build out the data you'll need to jump into the Django shell
#
# $ python manage.py shell
#
# and run the build script with
#
# $ from data.v2.build import build_all
# $ build_all()
#
# Each time the build script is run it will iterate over each table in the database,
# wipe it and rewrite each row using the data found in data/v2/csv.
# If you don't need all of the data just go into data/v2/build.py and
# just call one of the build functions found in this script
# support python3
from __future__ import print_function
import csv
import os
import os.path
import re
import json
from django.db import connection
from pokemon_v2.models import * # NOQA
# why this way? how about use `__file__`
DATA_LOCATION = 'data/v2/csv/'
DATA_LOCATION2 = os.path.join(os.path.dirname(__file__), 'csv')
GROUP_RGX = r"\[(.*?)\]\{(.*?)\}"
SUB_RGX = r"\[.*?\]\{.*?\}"
db_cursor = connection.cursor()
DB_VENDOR = connection.vendor
imageDir = os.getcwd() + '/data/v2/sprites/'
resourceImages = []
for root, dirs, files in os.walk(imageDir):
for file in files:
resourceImages.append(os.path.join(root.replace(imageDir, ""), file))
mediaDir = '/media/sprites/{0}'
def filePathOrNone(fileName):
return mediaDir.format(fileName) if fileName in resourceImages else None
def with_iter(context, iterable=None):
if iterable is None:
iterable = context
with context:
for value in iterable:
yield value
def load_data(fileName):
# with_iter closes the file when it has finished
return csv.reader(with_iter(open(DATA_LOCATION + fileName, 'rt')), delimiter=',')
def clear_table(model):
table_name = model._meta.db_table
model.objects.all().delete()
print('building ' + table_name)
# Reset DB auto increments to start at 1
if DB_VENDOR == 'sqlite':
db_cursor.execute("DELETE FROM sqlite_sequence WHERE name = " + "'" + table_name + "'")
else:
db_cursor.execute(
"SELECT setval(pg_get_serial_sequence(" + "'" + table_name + "'" + ",'id'), 1, false);")
def process_csv(file_name, data_to_models):
daten = load_data(file_name)
next(daten, None) # skip header
for data in daten:
for model in data_to_models(data):
model.save()
def build_generic(model_classes, file_name, data_to_models):
for model_class in model_classes:
clear_table(model_class)
process_csv(file_name, data_to_models)
def scrubStr(str):
"""
The purpose of this function is to scrub the weird template mark-up out of strings
that Veekun is using for their pokedex.
Example:
[]{move:dragon-tail} will effect the opponents [HP]{mechanic:hp}.
Becomes:
dragon tail will effect the opponents HP.
If you find this results in weird strings please take a stab at improving or re-writing.
"""
groups = re.findall(GROUP_RGX, str)
for group in groups:
if group[0]:
sub = group[0]
else:
sub = group[1].split(":")[1]
sub = sub.replace("-", " ")
str = re.sub(SUB_RGX, sub, str, 1)
return str
##############
# LANGUAGE #
##############
def _build_languages():
def data_to_language(info):
yield Language(
id=int(info[0]),
iso639=info[1],
iso3166=info[2],
name=info[3],
official=bool(int(info[4])),
order=info[5],
)
build_generic((Language,), 'languages.csv', data_to_language)
def build_languages():
_build_languages()
clear_table(LanguageName)
data = load_data('language_names.csv')
for index, info in enumerate(data):
if index > 0:
languageName = LanguageName(
language=Language.objects.get(pk=int(info[0])),
local_language=Language.objects.get(pk=int(info[1])),
name=info[2]
)
languageName.save()
############
# REGION #
############
def build_regions():
clear_table(Region)
data = load_data('regions.csv')
for index, info in enumerate(data):
if index > 0:
model = Region(
id=int(info[0]),
name=info[1]
)
model.save()
clear_table(RegionName)
data = load_data('region_names.csv')
for index, info in enumerate(data):
if index > 0:
model = RegionName (
region = Region.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
################
# GENERATION #
################
def build_generations():
clear_table(Generation)
data = load_data('generations.csv')
for index, info in enumerate(data):
if index > 0:
model = Generation (
id = int(info[0]),
region = Region.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(GenerationName)
data = load_data('generation_names.csv')
for index, info in enumerate(data):
if index > 0:
model = GenerationName (
generation = Generation.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
#############
# VERSION #
#############
def build_versions():
clear_table(VersionGroup)
data = load_data('version_groups.csv')
for index, info in enumerate(data):
if index > 0:
versionGroup = VersionGroup (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
order = int(info[3])
)
versionGroup.save()
clear_table(VersionGroupRegion)
data = load_data('version_group_regions.csv')
for index, info in enumerate(data):
if index > 0:
versionGroupRegion = VersionGroupRegion (
version_group = VersionGroup.objects.get(pk = int(info[0])),
region = Region.objects.get(pk = int(info[1])),
)
versionGroupRegion.save()
clear_table(Version)
data = load_data('versions.csv')
for index, info in enumerate(data):
if index > 0:
version = Version (
id = int(info[0]),
version_group = VersionGroup.objects.get(pk = int(info[1])),
name = info[2]
)
version.save()
clear_table(VersionName)
data = load_data('version_names.csv')
for index, info in enumerate(data):
if index > 0:
versionName = VersionName (
version = Version.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
versionName.save()
##################
# DAMAGE CLASS #
##################
def build_damage_classes():
clear_table(MoveDamageClass)
data = load_data('move_damage_classes.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveDamageClass (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(MoveDamageClassName)
clear_table(MoveDamageClassDescription)
data = load_data('move_damage_class_prose.csv')
for index, info in enumerate(data):
if index > 0:
model_name = MoveDamageClassName (
move_damage_class = MoveDamageClass.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model_name.save()
model_description = MoveDamageClassDescription (
move_damage_class = MoveDamageClass.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[3]
)
model_description.save()
###########
# STATS #
###########
def build_stats():
clear_table(Stat)
data = load_data('stats.csv')
for index, info in enumerate(data):
if index > 0:
stat = Stat (
id = int(info[0]),
move_damage_class = MoveDamageClass.objects.get(pk = int(info[1])) if info[1] != '' else None,
name = info[2],
is_battle_only = bool(int(info[3])),
game_index = int(info[4]) if info[4] else 0,
)
stat.save()
clear_table(StatName)
data = load_data('stat_names.csv')
for index, info in enumerate(data):
if index > 0:
statName = StatName (
stat = Stat.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
statName.save()
clear_table(PokeathlonStat)
data = load_data('pokeathlon_stats.csv')
for index, info in enumerate(data):
if index > 0:
stat = PokeathlonStat (
id = int(info[0]),
name = info[1],
)
stat.save()
clear_table(PokeathlonStatName)
data = load_data('pokeathlon_stat_names.csv')
for index, info in enumerate(data):
if index > 0:
statName = PokeathlonStatName (
pokeathlon_stat = PokeathlonStat.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
statName.save()
# ###############
# # ABILITIES #
# ###############
def build_abilities():
clear_table(Ability)
data = load_data('abilities.csv')
for index, info in enumerate(data):
if index > 0:
ability = Ability (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
is_main_series = bool(int(info[3]))
)
ability.save()
clear_table(AbilityName)
data = load_data('ability_names.csv')
for index, info in enumerate(data):
if index > 0:
abilityName = AbilityName (
ability = Ability.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
abilityName.save()
clear_table(AbilityChange)
data = load_data('ability_changelog.csv')
for index, info in enumerate(data):
if index > 0:
abilityName = AbilityChange (
id = int(info[0]),
ability = Ability.objects.get(pk = int(info[1])),
version_group = VersionGroup.objects.get(pk = int(info[2]))
)
abilityName.save()
clear_table(AbilityEffectText)
data = load_data('ability_prose.csv')
for index, info in enumerate(data):
if index > 0:
abilityDesc = AbilityEffectText (
ability = Ability.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
short_effect = scrubStr(info[2]),
effect = scrubStr(info[3])
)
abilityDesc.save()
clear_table(AbilityChangeEffectText)
data = load_data('ability_changelog_prose.csv')
for index, info in enumerate(data):
if index > 0:
abilityChangeEffectText = AbilityChangeEffectText (
ability_change = AbilityChange.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
effect = scrubStr(info[2])
)
abilityChangeEffectText.save()
clear_table(AbilityFlavorText)
data = load_data('ability_flavor_text.csv')
for index, info in enumerate(data):
if index > 0:
abilityFlavorText = AbilityFlavorText (
ability = Ability.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
language = Language.objects.get(pk = int(info[2])),
flavor_text = info[3]
)
abilityFlavorText.save()
####################
# CHARACTERISTIC #
####################
def build_characteristics():
clear_table(Characteristic)
data = load_data('characteristics.csv')
for index, info in enumerate(data):
if index > 0:
model = Characteristic (
id = int(info[0]),
stat = Stat.objects.get(pk = int(info[1])),
gene_mod_5 = int(info[2])
)
model.save()
clear_table(CharacteristicDescription)
data = load_data('characteristic_text.csv')
for index, info in enumerate(data):
if index > 0:
model = CharacteristicDescription (
characteristic = Characteristic.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[2]
)
model.save()
###############
# EGG GROUP #
###############
def build_egg_groups():
clear_table(EggGroup)
data = load_data('egg_groups.csv')
for index, info in enumerate(data):
if index > 0:
model = EggGroup (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(EggGroupName)
data = load_data('egg_group_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EggGroupName (
egg_group = EggGroup.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
#################
# GROWTH RATE #
#################
def build_growth_rates():
clear_table(GrowthRate)
data = load_data('growth_rates.csv')
for index, info in enumerate(data):
if index > 0:
model = GrowthRate (
id = int(info[0]),
name = info[1],
formula = info[2]
)
model.save()
clear_table(GrowthRateDescription)
data = load_data('growth_rate_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = GrowthRateDescription (
growth_rate = GrowthRate.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[2]
)
model.save()
# ###########
# # ITEMS #
# ###########
def build_items():
clear_table(ItemPocket)
data = load_data('item_pockets.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemPocket (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(ItemPocketName)
data = load_data('item_pocket_names.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemPocketName (
item_pocket = ItemPocket.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(ItemFlingEffect)
data = load_data('item_fling_effects.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemFlingEffect (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(ItemFlingEffectEffectText)
data = load_data('item_fling_effect_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemFlingEffectEffectText (
item_fling_effect = ItemFlingEffect.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
effect = scrubStr(info[2])
)
model.save()
clear_table(ItemCategory)
data = load_data('item_categories.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemCategory (
id = int(info[0]),
item_pocket = ItemPocket.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(ItemCategoryName)
data = load_data('item_category_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemCategoryName (
item_category = ItemCategory.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(Item)
clear_table(ItemSprites)
data = load_data('items.csv')
for index, info in enumerate(data):
if index > 0:
model = Item (
id = int(info[0]),
name = info[1],
item_category = ItemCategory.objects.get(pk = int(info[2])),
cost = int(info[3]),
fling_power = int(info[4]) if info[4] != '' else None,
item_fling_effect = ItemFlingEffect.objects.get(pk = int(info[5])) if info[5] != '' else None
)
model.save()
if re.search(r"^data-card", info[1]):
fileName = 'data-card.png'
elif re.search(r"^tm[0-9]", info[1]):
fileName = 'tm-normal.png'
elif re.search(r"^hm[0-9]", info[1]):
fileName = 'hm-normal.png'
else:
fileName = '%s.png' % info[1]
itemSprites = 'items/{0}';
sprites = {
'default': filePathOrNone(itemSprites.format(fileName)),
}
imageModel = ItemSprites (
id = index,
item = Item.objects.get(pk=int(info[0])),
sprites = json.dumps(sprites)
)
imageModel.save()
clear_table(ItemName)
data = load_data('item_names.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemName (
item = Item.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(ItemEffectText)
data = load_data('item_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemEffectText (
item = Item.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
short_effect = scrubStr(info[2]),
effect = scrubStr(info[3])
)
model.save()
clear_table(ItemGameIndex)
data = load_data('item_game_indices.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemGameIndex (
item = Item.objects.get(pk = int(info[0])),
generation = Generation.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
model.save()
clear_table(ItemFlavorText)
data = load_data('item_flavor_text.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemFlavorText (
item = Item.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
language = Language.objects.get(pk = int(info[2])),
flavor_text = info[3]
)
model.save()
clear_table(ItemAttribute)
data = load_data('item_flags.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemAttribute (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(ItemAttributeName)
clear_table(ItemAttributeDescription)
data = load_data('item_flag_prose.csv')
for index, info in enumerate(data):
if index > 0:
model_name = ItemAttributeName (
item_attribute = ItemAttribute.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model_name.save()
model_description = ItemAttributeDescription (
item_attribute = ItemAttribute.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[3]
)
model_description.save()
clear_table(ItemAttributeMap)
data = load_data('item_flag_map.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemAttributeMap (
item = Item.objects.get(pk = int(info[0])),
item_attribute = ItemAttribute.objects.get(pk = int(info[1]))
)
model.save()
###########
# TYPES #
###########
def build_types():
clear_table(Type)
data = load_data('types.csv')
for index, info in enumerate(data):
if index > 0:
type = Type (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
move_damage_class = MoveDamageClass.objects.get(pk = int(info[3])) if info[3] != '' else None
)
type.save()
clear_table(TypeName)
data = load_data('type_names.csv')
for index, info in enumerate(data):
if index > 0:
typeName = TypeName (
type = Type.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
typeName.save()
clear_table(TypeGameIndex)
data = load_data('type_game_indices.csv')
for index, info in enumerate(data):
if index > 0:
typeGameIndex = TypeGameIndex (
type = Type.objects.get(pk = int(info[0])),
generation = Generation.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
typeGameIndex.save()
clear_table(TypeEfficacy)
data = load_data('type_efficacy.csv')
for index, info in enumerate(data):
if index > 0:
typeEfficacy = TypeEfficacy (
damage_type = Type.objects.get(pk = int(info[0])),
target_type = Type.objects.get(pk = int(info[1])),
damage_factor = int(info[2])
)
typeEfficacy.save()
#############
# CONTEST #
#############
def build_contests():
clear_table(ContestType)
data = load_data('contest_types.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestType (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(ContestTypeName)
data = load_data('contest_type_names.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestTypeName (
contest_type = ContestType.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
flavor = info[3],
color = info[4]
)
model.save()
clear_table(ContestEffect)
data = load_data('contest_effects.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestEffect (
id = int(info[0]),
appeal = int(info[1]),
jam = int(info[2])
)
model.save()
clear_table(ContestEffectEffectText)
data = load_data('contest_effect_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestEffectEffectText (
contest_effect = ContestEffect.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
effect = info[3]
)
model.save()
model = ContestEffectFlavorText (
contest_effect = ContestEffect.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
flavor_text = info[2]
)
model.save()
clear_table(SuperContestEffect)
data = load_data('super_contest_effects.csv')
for index, info in enumerate(data):
if index > 0:
model = SuperContestEffect (
id = int(info[0]),
appeal = int(info[1])
)
model.save()
clear_table(SuperContestEffectFlavorText)
data = load_data('super_contest_effect_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = SuperContestEffectFlavorText (
super_contest_effect = SuperContestEffect.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
flavor_text = info[2]
)
model.save()
###########
# MOVES #
###########
def build_moves():
clear_table(MoveEffect)
data = load_data('move_effects.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveEffect (
id = int(info[0])
)
model.save()
clear_table(MoveEffectEffectText)
data = load_data('move_effect_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveEffectEffectText (
move_effect = MoveEffect.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
short_effect = scrubStr(info[2]),
effect = scrubStr(info[3])
)
model.save()
clear_table(MoveEffectChange)
data = load_data('move_effect_changelog.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveEffectChange (
id = int(info[0]),
move_effect = MoveEffect.objects.get(pk = int(info[1])),
version_group = VersionGroup.objects.get(pk = int(info[2]))
)
model.save()
clear_table(MoveEffectChangeEffectText)
data = load_data('move_effect_changelog_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveEffectChangeEffectText (
move_effect_change = MoveEffectChange.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
effect = scrubStr(info[2])
)
model.save()
clear_table(MoveLearnMethod)
data = load_data('pokemon_move_methods.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveLearnMethod (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(VersionGroupMoveLearnMethod)
data = load_data('version_group_pokemon_move_methods.csv')
for index, info in enumerate(data):
if index > 0:
versionGroupMoveLearnMethod = VersionGroupMoveLearnMethod (
version_group = VersionGroup.objects.get(pk = int(info[0])),
move_learn_method = MoveLearnMethod.objects.get(pk = int(info[1])),
)
versionGroupMoveLearnMethod.save()
clear_table(MoveLearnMethodName)
clear_table(MoveLearnMethodDescription)
data = load_data('pokemon_move_method_prose.csv')
for index, info in enumerate(data):
if index > 0:
model_name = MoveLearnMethodName (
move_learn_method = MoveLearnMethod.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model_name.save()
model_description = MoveLearnMethodDescription (
move_learn_method = MoveLearnMethod.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[3]
)
model_description.save()
clear_table(MoveTarget)
data = load_data('move_targets.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveTarget (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(MoveTargetName)
clear_table(MoveTargetDescription)
data = load_data('move_target_prose.csv')
for index, info in enumerate(data):
if index > 0:
model_name = MoveTargetName (
move_target = MoveTarget.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model_name.save()
model_description = MoveTargetDescription (
move_target = MoveTarget.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[3]
)
model_description.save()
clear_table(Move)
data = load_data('moves.csv')
for index, info in enumerate(data):
if index > 0:
model = Move (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
type = Type.objects.get(pk = int(info[3])),
power = int(info[4]) if info[4] != '' else None,
pp = int(info[5]) if info[5] != '' else None,
accuracy = int(info[6]) if info[6] != '' else None,
priority = int(info[7]) if info[7] != '' else None,
move_target = MoveTarget.objects.get(pk = int(info[8])),
move_damage_class = MoveDamageClass.objects.get(pk = int(info[9])),
move_effect = MoveEffect.objects.get(pk = int(info[10])),
move_effect_chance = int(info[11]) if info[11] != '' else None,
contest_type = ContestType.objects.get(pk = int(info[12])) if info[12] != '' else None,
contest_effect = ContestEffect.objects.get(pk = int(info[13])) if info[13] != '' else None,
super_contest_effect = SuperContestEffect.objects.get(pk = int(info[14])) if info[14] != '' else None
)
model.save()
clear_table(MoveName)
data = load_data('move_names.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveName (
move = Move.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(MoveFlavorText)
data = load_data('move_flavor_text.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveFlavorText (
move = Move.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
language = Language.objects.get(pk = int(info[2])),
flavor_text = info[3]
)
model.save()
clear_table(MoveChange)
data = load_data('move_changelog.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveChange (
move = Move.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
type = Type.objects.get(pk = int(info[2])) if info[2] != '' else None,
power = int(info[3]) if info[3] != '' else None,
pp = int(info[4]) if info[4] != '' else None,
accuracy = int(info[5]) if info[5] != '' else None,
move_effect = MoveEffect.objects.get(pk = int(info[6])) if info[6] != '' else None,
move_effect_chance = int(info[7]) if info[7] != '' else None
)
model.save()
clear_table(MoveBattleStyle)
data = load_data('move_battle_styles.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveBattleStyle (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(MoveBattleStyleName)
data = load_data('move_battle_style_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveBattleStyleName (
move_battle_style = MoveBattleStyle.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(MoveAttribute)
data = load_data('move_flags.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveAttribute (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(MoveAttributeMap)
data = load_data('move_flag_map.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveAttributeMap (
move = Move.objects.get(pk = int(info[0])),
move_attribute = MoveAttribute.objects.get(pk = int(info[1])),
)
model.save()
clear_table(MoveAttributeName)
clear_table(MoveAttributeDescription)
data = load_data('move_flag_prose.csv')
for index, info in enumerate(data):
if index > 0:
name_model = MoveAttributeName (
move_attribute = MoveAttribute.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
name_model.save()
description_model = MoveAttributeDescription (
move_attribute = MoveAttribute.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = scrubStr(info[3])
)
description_model.save()
clear_table(MoveMetaAilment)
data = load_data('move_meta_ailments.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaAilment (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(MoveMetaAilmentName)
data = load_data('move_meta_ailment_names.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaAilmentName (
move_meta_ailment = MoveMetaAilment.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(MoveMetaCategory)
data = load_data('move_meta_categories.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaCategory (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(MoveMetaCategoryDescription)
data = load_data('move_meta_category_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaCategoryDescription (
move_meta_category = MoveMetaCategory.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[2]
)
model.save()
clear_table(MoveMeta)
data = load_data('move_meta.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMeta (
move = Move.objects.get(pk = int(info[0])),
move_meta_category = MoveMetaCategory.objects.get(pk = int(info[1])),
move_meta_ailment = MoveMetaAilment.objects.get(pk = int(info[2])),
min_hits = int(info[3]) if info[3] != '' else None,
max_hits = int(info[4]) if info[4] != '' else None,
min_turns = int(info[5]) if info[5] != '' else None,
max_turns = int(info[6]) if info[6] != '' else None,
drain = int(info[7]) if info[7] != '' else None,
healing = int(info[8]) if info[8] != '' else None,
crit_rate = int(info[9]) if info[9] != '' else None,
ailment_chance = int(info[10]) if info[10] != '' else None,
flinch_chance = int(info[11]) if info[11] != '' else None,
stat_chance = int(info[12]) if info[12] != '' else None
)
model.save()
clear_table(MoveMetaStatChange)
data = load_data('move_meta_stat_changes.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaStatChange (
move = Move.objects.get(pk = int(info[0])),
stat = Stat.objects.get(pk = int(info[1])),
change = int(info[2])
)
model.save()
clear_table(ContestCombo)
data = load_data('contest_combos.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestCombo (
first_move = Move.objects.get(pk = int(info[0])),
second_move = Move.objects.get(pk = int(info[1]))
)
model.save()
clear_table(SuperContestCombo)
data = load_data('super_contest_combos.csv')
for index, info in enumerate(data):
if index > 0:
model = SuperContestCombo (
first_move = Move.objects.get(pk = int(info[0])),
second_move = Move.objects.get(pk = int(info[1]))
)
model.save()
#############
# BERRIES #
#############
def build_berries():
clear_table(BerryFirmness)
data = load_data('berry_firmness.csv')
for index, info in enumerate(data):
if index > 0:
model = BerryFirmness (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(BerryFirmnessName)
data = load_data('berry_firmness_names.csv')
for index, info in enumerate(data):
if index > 0:
model = BerryFirmnessName (
berry_firmness = BerryFirmness.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(Berry)
data = load_data('berries.csv')
for index, info in enumerate(data):
if index > 0:
item = Item.objects.get(pk = int(info[1]))
model = Berry (
id = int(info[0]),
item = item,
name = item.name[:item.name.index('-')],
berry_firmness = BerryFirmness.objects.get(pk = int(info[2])),
natural_gift_power = int(info[3]),
natural_gift_type = Type.objects.get(pk = int(info[4])),
size = int(info[5]),
max_harvest = int(info[6]),
growth_time = int(info[7]),
soil_dryness = int(info[8]),
smoothness = int(info[9])
)
model.save()
clear_table(BerryFlavor)
data = load_data('contest_types.csv') #this is not an error
for index, info in enumerate(data):
if index > 0:
# get the english name for this contest type
contest_type_name = ContestTypeName.objects.get(contest_type_id=int(info[0]), language_id=9)
model = BerryFlavor (
id = int(info[0]),
name = contest_type_name.flavor.lower(),
contest_type = ContestType.objects.get(pk = int(info[0]))
)
model.save()
clear_table(BerryFlavorName)
data = load_data('contest_type_names.csv') #this is not an error
for index, info in enumerate(data):
if index > 0:
model = BerryFlavorName (
berry_flavor = BerryFlavor.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[3]
)
model.save()
clear_table(BerryFlavorMap)
data = load_data('berry_flavors.csv') #this is not an error
for index, info in enumerate(data):
if index > 0:
model = BerryFlavorMap (
berry = Berry.objects.get(pk = int(info[0])),
berry_flavor = BerryFlavor.objects.get(pk = int(info[1])),
potency = int(info[2])
)
model.save()
############
# NATURE #
############
def build_natures():
clear_table(Nature)
data = load_data('natures.csv')
for index, info in enumerate(data):
if index > 0:
decreased_stat = None
increased_stat = None
hates_flavor = None
likes_flavor = None
if (info[2] != info[3]):
decreased_stat = Stat.objects.get(pk = int(info[2]))
increased_stat = Stat.objects.get(pk = int(info[3]))
if (info[4] != info[5]):
hates_flavor = BerryFlavor.objects.get(pk = int(info[4]))
likes_flavor = BerryFlavor.objects.get(pk = int(info[5]))
nature = Nature (
id = int(info[0]),
name = info[1],
decreased_stat = decreased_stat,
increased_stat = increased_stat,
hates_flavor = hates_flavor,
likes_flavor = likes_flavor,
game_index = info[6]
)
nature.save()
clear_table(NatureName)
data = load_data('nature_names.csv')
for index, info in enumerate(data):
if index > 0:
natureName = NatureName (
nature = Nature.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
natureName.save()
clear_table(NaturePokeathlonStat)
data = load_data('nature_pokeathlon_stats.csv')
for index, info in enumerate(data):
if index > 0:
naturePokeathlonStat = NaturePokeathlonStat (
nature = Nature.objects.get(pk = int(info[0])),
pokeathlon_stat = PokeathlonStat.objects.get(pk = int(info[1])),
max_change = info[2]
)
naturePokeathlonStat.save()
clear_table(NatureBattleStylePreference)
data = load_data('nature_battle_style_preferences.csv')
for index, info in enumerate(data):
if index > 0:
model = NatureBattleStylePreference (
nature = Nature.objects.get(pk = int(info[0])),
move_battle_style = MoveBattleStyle.objects.get(pk = int(info[1])),
low_hp_preference = info[2],
high_hp_preference = info[3]
)
model.save()
###########
# GENDER #
###########
def build_genders():
clear_table(Gender)
data = load_data('genders.csv')
for index, info in enumerate(data):
if index > 0:
model = Gender (
id = int(info[0]),
name = info[1]
)
model.save()
################
# EXPERIENCE #
################
def build_experiences():
clear_table(Experience)
data = load_data('experience.csv')
for index, info in enumerate(data):
if index > 0:
model = Experience (
growth_rate = GrowthRate.objects.get(pk = int(info[0])),
level = int(info[1]),
experience = int(info[2])
)
model.save()
##############
# MACHINES #
##############
def build_machines():
clear_table(Machine)
data = load_data('machines.csv')
for index, info in enumerate(data):
if index > 0:
model = Machine (
machine_number = int(info[0]),
version_group = VersionGroup.objects.get(pk = int(info[1])),
item = Item.objects.get(pk = int(info[2])),
move = Move.objects.get(pk = int(info[3])),
)
model.save()
###############
# EVOLUTION #
###############
def build_evolutions():
clear_table(EvolutionChain)
data = load_data('evolution_chains.csv')
for index, info in enumerate(data):
if index > 0:
model = EvolutionChain (
id = int(info[0]),
baby_trigger_item = Item.objects.get(pk = int(info[1])) if info[1] != '' else None,
)
model.save()
clear_table(EvolutionTrigger)
data = load_data('evolution_triggers.csv')
for index, info in enumerate(data):
if index > 0:
model = EvolutionTrigger (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(EvolutionTriggerName)
data = load_data('evolution_trigger_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EvolutionTriggerName (
evolution_trigger = EvolutionTrigger.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
#############
# POKEDEX #
#############
def build_pokedexes():
clear_table(Pokedex)
data = load_data('pokedexes.csv')
for index, info in enumerate(data):
if index > 0:
model = Pokedex (
id = int(info[0]),
region = Region.objects.get(pk = int(info[1])) if info[1] != '' else None,
name = info[2],
is_main_series = bool(int(info[3]))
)
model.save()
clear_table(PokedexName)
clear_table(PokedexDescription)
data = load_data('pokedex_prose.csv')
for index, info in enumerate(data):
if index > 0:
name_model = PokedexName (
pokedex = Pokedex.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
)
name_model.save()
description_model = PokedexDescription (
pokedex = Pokedex.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[3]
)
description_model.save()
clear_table(PokedexVersionGroup)
data = load_data('pokedex_version_groups.csv')
for index, info in enumerate(data):
if index > 0:
model = PokedexVersionGroup (
pokedex = Pokedex.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1]))
)
model.save()
##############
# LOCATION #
##############
def build_locations():
clear_table(Location)
data = load_data('locations.csv')
for index, info in enumerate(data):
if index > 0:
model = Location (
id = int(info[0]),
region = Region.objects.get(pk = int(info[1])) if info[1] != '' else None,
name = info[2]
)
model.save()
clear_table(LocationName)
data = load_data('location_names.csv')
for index, info in enumerate(data):
if index > 0:
model = LocationName (
location = Location.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(LocationGameIndex)
data = load_data('location_game_indices.csv')
for index, info in enumerate(data):
if index > 0:
model = LocationGameIndex (
location = Location.objects.get(pk = int(info[0])),
generation = Generation.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
model.save()
clear_table(LocationArea)
data = load_data('location_areas.csv')
for index, info in enumerate(data):
if index > 0:
location = Location.objects.get(pk = int(info[1]))
model = LocationArea (
id = int(info[0]),
location = location,
game_index = int(info[2]),
name = '{}-{}'.format(location.name, info[3]) if info[3] else '{}-{}'.format(location.name, 'area')
)
model.save()
clear_table(LocationAreaName)
data = load_data('location_area_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = LocationAreaName (
location_area = LocationArea.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
#############
# POKEMON #
#############
def build_pokemons():
clear_table(PokemonColor)
data = load_data('pokemon_colors.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonColor (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(PokemonColorName)
data = load_data('pokemon_color_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonColorName (
pokemon_color = PokemonColor.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(PokemonShape)
data = load_data('pokemon_shapes.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonShape (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(PokemonShapeName)
data = load_data('pokemon_shape_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonShapeName (
pokemon_shape = PokemonShape.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
awesome_name = info[3]
)
model.save()
clear_table(PokemonHabitat)
data = load_data('pokemon_habitats.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonHabitat (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(PokemonSpecies)
data = load_data('pokemon_species.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonSpecies (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
evolves_from_species = None,
evolution_chain = EvolutionChain.objects.get(pk = int(info[4])),
pokemon_color = PokemonColor.objects.get(pk = int(info[5])),
pokemon_shape = PokemonShape.objects.get(pk = int(info[6])),
pokemon_habitat = PokemonHabitat.objects.get(pk = int(info[7])) if info[7] != '' else None,
gender_rate = int(info[8]),
capture_rate = int(info[9]),
base_happiness = int(info[10]),
is_baby = bool(int(info[11])),
hatch_counter = int(info[12]),
has_gender_differences = bool(int(info[13])),
growth_rate = GrowthRate.objects.get(pk = int(info[14])),
forms_switchable = bool(int(info[15])),
order = int(info[16])
)
model.save()
data = load_data('pokemon_species.csv')
for index, info in enumerate(data):
if index > 0:
evolves = PokemonSpecies.objects.get(pk = int(info[3])) if info[3] != '' else None
if evolves:
species = PokemonSpecies.objects.get(pk = int(info[0]))
species.evolves_from_species = evolves
species.save()
clear_table(PokemonSpeciesName)
data = load_data('pokemon_species_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonSpeciesName (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
genus = info[3]
)
model.save()
clear_table(PokemonSpeciesDescription)
data = load_data('pokemon_species_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonSpeciesDescription (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = scrubStr(info[2])
)
model.save()
clear_table(PokemonSpeciesFlavorText)
data = load_data('pokemon_species_flavor_text.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonSpeciesFlavorText (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
version = Version.objects.get(pk = int(info[1])),
language = Language.objects.get(pk = int(info[2])),
flavor_text = info[3]
)
model.save()
clear_table(Pokemon)
clear_table(PokemonSprites)
data = load_data('pokemon.csv')
for index, info in enumerate(data):
if index > 0:
model = Pokemon (
id = int(info[0]),
name = info[1],
pokemon_species = PokemonSpecies.objects.get(pk = int(info[2])),
height = int(info[3]),
weight = int(info[4]),
base_experience = int(info[5]),
order = int(info[6]),
is_default = bool(int(info[7]))
)
model.save()
fileName = '%s.png' % info[0]
pokeSprites = 'pokemon/{0}';
sprites = {
'front_default' : filePathOrNone(pokeSprites.format(fileName)),
'front_female' : filePathOrNone(pokeSprites.format('female/'+fileName)),
'front_shiny' : filePathOrNone(pokeSprites.format('shiny/'+fileName)),
'front_shiny_female' : filePathOrNone(pokeSprites.format('shiny/female/'+fileName)),
'back_default' : filePathOrNone(pokeSprites.format('back/'+fileName)),
'back_female' : filePathOrNone(pokeSprites.format('back/female/'+fileName)),
'back_shiny' : filePathOrNone(pokeSprites.format('back/shiny/'+fileName)),
'back_shiny_female' : filePathOrNone(pokeSprites.format('back/shiny/female/'+fileName)),
}
imageModel = PokemonSprites (
id = index,
pokemon = Pokemon.objects.get(pk=int(info[0])),
sprites = json.dumps(sprites)
)
imageModel.save()
clear_table(PokemonAbility)
data = load_data('pokemon_abilities.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonAbility (
pokemon = Pokemon.objects.get(pk = int(info[0])),
ability = Ability.objects.get(pk = int(info[1])),
is_hidden = bool(int(info[2])),
slot = int(info[3])
)
model.save()
clear_table(PokemonDexNumber)
data = load_data('pokemon_dex_numbers.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonDexNumber (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
pokedex = Pokedex.objects.get(pk = int(info[1])),
pokedex_number = int(info[2])
)
model.save()
clear_table(PokemonEggGroup)
data = load_data('pokemon_egg_groups.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonEggGroup (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
egg_group = EggGroup.objects.get(pk = int(info[1]))
)
model.save()
clear_table(PokemonEvolution)
data = load_data('pokemon_evolution.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonEvolution (
id = int(info[0]),
evolved_species = PokemonSpecies.objects.get(pk = int(info[1])),
evolution_trigger = EvolutionTrigger.objects.get(pk = int(info[2])),
evolution_item = Item.objects.get(pk = int(info[3])) if info[3] != '' else None,
min_level = int(info[4]) if info[4] != '' else None,
gender = Gender.objects.get(pk = int(info[5])) if info[5] != '' else None,
location = Location.objects.get(pk = int(info[6])) if info[6] != '' else None,
held_item = Item.objects.get(pk = int(info[7])) if info[7] != '' else None,
time_of_day = info[8],
known_move = Move.objects.get(pk = int(info[9])) if info[9] != '' else None,
known_move_type = Type.objects.get(pk = int(info[10])) if info[10] != '' else None,
min_happiness = int(info[11]) if info[11] != '' else None,
min_beauty = int(info[12]) if info[12] != '' else None,
min_affection = int(info[13]) if info[13] != '' else None,
relative_physical_stats = int(info[14]) if info[14] != '' else None,
party_species = PokemonSpecies.objects.get(pk = int(info[15])) if info[15] != '' else None,
party_type = Type.objects.get(pk = int(info[16])) if info[16] != '' else None,
trade_species = PokemonSpecies.objects.get(pk = int(info[17])) if info[17] != '' else None,
needs_overworld_rain = bool(int(info[18])),
turn_upside_down = bool(int(info[19]))
)
model.save()
clear_table(PokemonForm)
clear_table(PokemonFormSprites)
data = load_data('pokemon_forms.csv')
for index, info in enumerate(data):
if index > 0:
pokemon = Pokemon.objects.get(pk = int(info[3]))
model = PokemonForm (
id = int(info[0]),
name = info[1],
form_name = info[2],
pokemon = pokemon,
version_group = VersionGroup.objects.get(pk = int(info[4])),
is_default = bool(int(info[5])),
is_battle_only = bool(int(info[6])),
is_mega = bool(int(info[7])),
form_order = int(info[8]),
order = int(info[9])
)
model.save()
if info[2]:
if re.search(r"^mega", info[2]):
fileName = '%s.png' % info[3]
else:
fileName = '%s-%s.png' % (getattr(pokemon, 'pokemon_species_id'), info[2])
else:
fileName = '%s.png' % getattr(pokemon, 'pokemon_species_id')
pokeSprites = 'pokemon/{0}'
sprites = {
'front_default' : filePathOrNone(pokeSprites.format(fileName)),
'front_shiny' : filePathOrNone(pokeSprites.format('shiny/'+fileName)),
'back_default' : filePathOrNone(pokeSprites.format('back/'+fileName)),
'back_shiny' : filePathOrNone(pokeSprites.format('back/shiny/'+fileName)),
}
imageModel = PokemonFormSprites (
id = index,
pokemon_form = PokemonForm.objects.get(pk=int(info[0])),
sprites = json.dumps(sprites)
)
imageModel.save()
clear_table(PokemonFormName)
data = load_data('pokemon_form_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonFormName (
pokemon_form = PokemonForm.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
pokemon_name = info[3]
)
model.save()
clear_table(PokemonFormGeneration)
data = load_data('pokemon_form_generations.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonFormGeneration (
pokemon_form = PokemonForm.objects.get(pk = int(info[0])),
generation = Generation.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
model.save()
clear_table(PokemonGameIndex)
data = load_data('pokemon_game_indices.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonGameIndex (
pokemon = Pokemon.objects.get(pk = int(info[0])),
version = Version.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
model.save()
clear_table(PokemonHabitatName)
data = load_data('pokemon_habitat_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonHabitatName (
pokemon_habitat = PokemonHabitat.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(PokemonItem)
data = load_data('pokemon_items.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonItem (
pokemon = Pokemon.objects.get(pk = int(info[0])),
version = Version.objects.get(pk = int(info[1])),
item = Item.objects.get(pk = int(info[2])),
rarity = int(info[3])
)
model.save()
clear_table(PokemonMove)
data = load_data('pokemon_moves.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonMove (
pokemon = Pokemon.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
move = Move.objects.get(pk = int(info[2])),
move_learn_method = MoveLearnMethod.objects.get(pk = int(info[3])),
level = int(info[4]),
order = int(info[5]) if info[5] != '' else None,
)
model.save()
clear_table(PokemonStat)
data = load_data('pokemon_stats.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonStat (
pokemon = Pokemon.objects.get(pk = int(info[0])),
stat = Stat.objects.get(pk = int(info[1])),
base_stat = int(info[2]),
effort = int(info[3])
)
model.save()
clear_table(PokemonType)
data = load_data('pokemon_types.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonType (
pokemon = Pokemon.objects.get(pk = int(info[0])),
type = Type.objects.get(pk = int(info[1])),
slot = int(info[2])
)
model.save()
###############
# ENCOUNTER #
###############
def build_encounters():
clear_table(EncounterMethod)
data = load_data('encounter_methods.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterMethod (
id = int(info[0]),
name = info[1],
order = int(info[2])
)
model.save()
# LocationAreaEncounterRate/EncounterMethod associations
"""
I tried handling this the same way Berry/Natures are handled
but for some odd reason it resulted in a ton of db table issues.
It was easy enough to move LocationAreaEncounterRates below
Encounter population and for some reason things works now.
"""
clear_table(LocationAreaEncounterRate)
data = load_data('location_area_encounter_rates.csv')
for index, info in enumerate(data):
if index > 0:
model = LocationAreaEncounterRate (
location_area = LocationArea.objects.get(pk = int(info[0])),
encounter_method = EncounterMethod.objects.get(pk=info[1]),
version = Version.objects.get(pk = int(info[2])),
rate = int(info[3])
)
model.save()
clear_table(EncounterMethodName)
data = load_data('encounter_method_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterMethodName (
encounter_method = EncounterMethod.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(EncounterSlot)
data = load_data('encounter_slots.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterSlot (
id = int(info[0]),
version_group = VersionGroup.objects.get(pk = int(info[1])),
encounter_method = EncounterMethod.objects.get(pk = int(info[2])),
slot = int(info[3]) if info[3] != '' else None,
rarity = int(info[4])
)
model.save()
clear_table(EncounterCondition)
data = load_data('encounter_conditions.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterCondition (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(EncounterConditionName)
data = load_data('encounter_condition_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterConditionName (
encounter_condition = EncounterCondition.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(Encounter)
data = load_data('encounters.csv')
for index, info in enumerate(data):
if index > 0:
model = Encounter (
id = int(info[0]),
version = Version.objects.get(pk = int(info[1])),
location_area = LocationArea.objects.get(pk = int(info[2])),
encounter_slot = EncounterSlot.objects.get(pk = int(info[3])),
pokemon = Pokemon.objects.get(pk = int(info[4])),
min_level = int(info[5]),
max_level = int(info[6])
)
model.save()
clear_table(EncounterConditionValue)
data = load_data('encounter_condition_values.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterConditionValue (
id = int(info[0]),
encounter_condition = EncounterCondition.objects.get(pk = int(info[1])),
name = info[2],
is_default = bool(int(info[3]))
)
model.save()
clear_table(EncounterConditionValueName)
data = load_data('encounter_condition_value_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterConditionValueName (
encounter_condition_value = EncounterConditionValue.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
)
model.save()
clear_table(EncounterConditionValueMap)
data = load_data('encounter_condition_value_map.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterConditionValueMap (
encounter = Encounter.objects.get(pk = int(info[0])),
encounter_condition_value = EncounterConditionValue.objects.get(pk = int(info[1]))
)
model.save()
##############
# PAL PARK #
##############
def build_pal_parks():
clear_table(PalParkArea)
data = load_data('pal_park_areas.csv')
for index, info in enumerate(data):
if index > 0:
model = PalParkArea (
id = int(info[0]),
name = info[1]
)
model.save()
clear_table(PalParkAreaName)
data = load_data('pal_park_area_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PalParkAreaName (
pal_park_area = PalParkArea.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clear_table(PalPark)
data = load_data('pal_park.csv')
for index, info in enumerate(data):
if index > 0:
model = PalPark (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
pal_park_area = PalParkArea.objects.get(pk = int(info[1])),
base_score = int(info[2]),
rate = int(info[3])
)
model.save()
def build_all():
build_languages()
build_regions()
build_generations()
build_versions()
build_damage_classes()
build_stats()
build_abilities()
build_characteristics()
build_egg_groups()
build_growth_rates()
build_items()
build_types()
build_contests()
build_moves()
build_berries()
build_natures()
build_genders()
build_experiences()
build_machines()
build_evolutions()
build_pokedexes()
build_locations()
build_pokemons()
build_encounters()
build_pal_parks()
if __name__ == '__main__':
build_all()
|
# coding: utf-8
sfx_names = [
'DexFanfare5079',
'Item',
'CaughtMon',
'PokeballsPlacedOnTable',
'Potion',
'FullHeal',
'Menu',
'ReadText',
'ReadText2',
'DexFanfare2049',
'DexFanfare80109',
'Poison',
'GotSafariBalls',
'BootPc',
'ShutDownPc',
'ChoosePcOption',
'EscapeRope',
'PushButton',
'SecondPartOfItemfinder',
'WarpTo',
'WarpFrom',
'ChangeDexMode',
'JumpOverLedge',
'GrassRustle',
'Fly',
'Wrong',
'Squeak',
'Strength',
'Boat',
'WallOpen',
'PlacePuzzlePieceDown',
'EnterDoor',
'SwitchPokemon',
'Tally',
'Transaction',
'ExitBuilding',
'Bump',
'Save',
'Pokeflute',
'ElevatorEnd',
'ThrowBall',
'BallPoof',
'Unknown3A',
'Run',
'SlotMachineStart',
'Fanfare',
'Peck',
'Kinesis',
'Lick',
'Pound',
'MovePuzzlePiece',
'CometPunch',
'MegaPunch',
'Scratch',
'Vicegrip',
'RazorWind',
'Cut',
'WingAttack',
'Whirlwind',
'Bind',
'VineWhip',
'DoubleKick',
'MegaKick',
'Headbutt',
'HornAttack',
'Tackle',
'PoisonSting',
'Powder',
'Doubleslap',
'Bite',
'JumpKick',
'Stomp',
'TailWhip',
'KarateChop',
'Submission',
'WaterGun',
'SwordsDance',
'Thunder',
'Supersonic',
'Leer',
'Ember',
'Bubblebeam',
'HydroPump',
'Surf',
'Psybeam',
'Charge',
'Thundershock',
'Psychic',
'Screech',
'BoneClub',
'Sharpen',
'EggBomb',
'Sing',
'HyperBeam',
'Shine',
'Unknown5F',
'Unknown60',
'Unknown61',
'Unknown62',
'Unknown63',
'Burn',
'TitleScreenEntrance',
'Unknown66',
'GetCoinFromSlots',
'PayDay',
'Metronome',
'Call',
'HangUp',
'NoSignal',
'Sandstorm',
'Elevator',
'Protect',
'Sketch',
'RainDance',
'Aeroblast',
'Spark',
'Curse',
'Rage',
'Thief',
'Thief2',
'SpiderWeb',
'MindReader',
'Nightmare',
'Snore',
'SweetKiss',
'SweetKiss2',
'BellyDrum',
'Unknown7F',
'SludgeBomb',
'Foresight',
'Spite',
'Outrage',
'PerishSong',
'GigaDrain',
'Attract',
'Kinesis2',
'ZapCannon',
'MeanLook',
'HealBell',
'Return',
'ExpBar',
'MilkDrink',
'Present',
'MorningSun',
'LevelUp',
'KeyItem',
'Fanfare2',
'RegisterPhoneNumber',
'3RdPlace',
'GetEggFromDaycareMan',
'GetEggFromDaycareLady',
'MoveDeleted',
'2ndPlace',
'1stPlace',
'ChooseACard',
'GetTm',
'GetBadge',
'QuitSlots',
'EggCrack',
'DexFanfareLessThan20',
'DexFanfare140169',
'DexFanfare170199',
'DexFanfare200229',
'DexFanfare230Plus',
'Evolved',
'MasterBall',
'EggHatch',
'GsIntroCharizardFireball',
'GsIntroPokemonAppears',
'Flash',
'GameFreakLogoGs',
'NotVeryEffective',
'Damage',
'SuperEffective',
'BallBounce',
'Moonlight',
'Encore',
'BeatUp',
'BatonPass',
'BallWiggle',
'SweetScent',
'SweetScent2',
'HitEndOfExpBar',
'GiveTrademon',
'GetTrademon',
'TrainArrived',
'StopSlot',
'2Boops',
'GlassTing',
'GlassTing2',
'IntroUnown1',
'IntroUnown2',
'IntroUnown3',
'DittoPopUp',
'DittoTransform',
'IntroSuicune1',
'IntroPichu',
'IntroSuicune2',
'IntroSuicune3',
'DittoBounce',
'IntroSuicune4',
'GameFreakPresents',
'Tingle',
'UnknownCB',
'TwoPcBeeps',
'4NoteDitty',
'Twinkle',
]
|
import os
import numpy as np
import argparse
from ensemble_utils import *
from CNN import *
import argparse
import keras
import pickle
from tensorflow.python.client import device_lib
import os
import tensorflow as tf
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "4"
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
print(device_lib.list_local_devices())
parser = argparse.ArgumentParser()
parser.add_argument('testfasta')
parser.add_argument('model',help="model folder")
parser.add_argument('outfile')
parser.add_argument('-ioutfile','--ioutfile',default=None)
opts=parser.parse_args()
X = fa_to_onehot(opts.testfasta)
model_folders = [opts.model+"/"+d for d in os.listdir(opts.model) if os.path.isdir(opts.model+"/"+d)]
with open(opts.model+"/model_acc.pkl","rb") as f:
accuracies = pickle.load(f)
print(X.shape)
total_pred = []
for mi,model in enumerate(model_folders):
cnn = keras.models.load_model(model+"/model.h5")
print(cnn.summary())
pred=cnn.predict(X)
total_pred.append(pred)
if opts.ioutfile != None:
np.savetxt(model+"/"+opts.ioutfile,pred)
del cnn
pred_mat = np.zeros(total_pred[0].shape)
print(accuracies)
for mi,model in enumerate(model_folders):
pred_mat += accuracies[model]*total_pred[mi]
pred_mat = pred_mat/sum(accuracies.values())
np.savetxt(opts.outfile,pred_mat)
|
#!/usr/bin/python
class Employee:
'Common base class for all employees'
empCount = 0
def __init__(self, name, salary):
self.name = name
self.salary = salary
Employee.empCount += 1
def displayCount(self):
print("Total Employee %d" % self.empCount)
def displayEmployee(self):
print("Name : %s, Salary: %d" % (self.name, self.salary))
"This would create first object of Employee class"
emp1 = Employee("Zara", 2000)
"This would create second object of Employee class"
emp2 = Employee("Manni", 5000)
emp1.displayEmployee()
emp2.displayEmployee()
print("Total Employee %d" % Employee.empCount)
print("Employee.__doc__:", Employee.__doc__)
print("Employee.__name__:", Employee.__name__)
print("Employee.__module__:", Employee.__module__)
print("Employee.__bases__:", Employee.__bases__)
print("Employee.__dict__:", Employee.__dict__)
|
import pytest
from tests.common import data_file
from inputs import format_kml
@pytest.fixture
def kml_file():
return data_file("kml/kml-with-extended-data.kml")
def test_open(kml_file):
data_source = format_kml.open(kml_file)
assert data_source
def test_get_nonspatial_field_names(kml_file):
data_source = format_kml.open(kml_file)
names = list(data_source.get_non_spatial_field_names())
assert set([
'Name',
'description',
# 'timestamp',
# 'begin',
# 'end',
'altitudeMode',
'tessellate',
'extrude',
'visibility',
'drawOrder',
'icon',
'edited',
'blueyardage',
'whiteyardage',
'menshandicap',
'menspar',
'redyardage',
'womenshandicap',
'womenspar']) == set(names)
def test_iterate_features(kml_file):
data_source = format_kml.open(kml_file)
assert 3 == len(list(data_source.get_features()))
def test_get_features(kml_file):
data_source = format_kml.open(kml_file)
field_names = set(data_source.get_non_spatial_field_names())
for f in data_source.get_features():
values = data_source.get_non_spatial_field_values_for_feature(f)
assert set(values.keys()) == field_names
assert not f.GetGeometryRef().IsEmpty()
|
import os, slackclient
import glob
# delay in seconds before checking for new events
SocketDelay= 1
# slackbot environment variables
SlackToken = ''
Slack = slackclient.SlackClient(SlackToken)
try:
f = open("images_uploaded.txt", "rt")
ax = f.readlines()
for line in ax:
print (line.strip())
print(Slack.api_call('files.delete', file = line.strip())['ok'])
f.close()
os.remove("images_uploaded.txt")
filelist = glob.glob('*png')
for png in filelist:
try:
os.remove(png)
except OSError:
print ('Error deleting %s' %png)
except OSError:
print ('error abriendo fichero con imagenes')
|
import requests
from django.conf import settings
from django.shortcuts import render
from django.http import HttpResponse
def error404(request):
if '/play/' in request.path:
return render(request, 'play_404.html', {'play_404': True}, status=404)
else:
return render(request, '404.html', status=404)
def newsletter_subsribe(request):
if request.is_ajax() and request.GET.get('email'):
requests.post(
"https://us10.api.mailchimp.com/2.0/lists/subscribe",
json={'apikey': settings.MAILCHIMP_KEY,
'id': settings.MAILING_LIST_ID,
'email': {'email': request.GET.get('email')}}
)
return HttpResponse()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
from ...ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ...utils import common_utils, loss_utils
from .roi_head_template import RoIHeadTemplate
from .target_assigner.center_target_layer_mtasks import CenterTargetLayerMTasks
class CenterRCNNTasks(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.tasks = self.model_cfg.TASKS
self.num_tasks = len(self.tasks)
self.num_rois = self.model_cfg.TARGET_CONFIG.ROI_PER_IMAGE
self.proposal_target_layer = CenterTargetLayerMTasks(roi_sampler_cfg=self.model_cfg.TARGET_CONFIG, task_cfg=self.model_cfg.TASKS)
mlps = self.model_cfg.ROI_GRID_POOL.MLPS
for k in range(len(mlps)):
mlps[k] = [input_channels] + mlps[k]
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
c_out = sum([x[-1] for x in mlps])
self.roi_grid_pool_layers = nn.ModuleList()
self.shared_fc_layers = nn.ModuleList()
self.cls_layers = nn.ModuleList()
self.reg_layers = nn.ModuleList()
for task in self.tasks:
self.roi_grid_pool_layers.append(
pointnet2_stack_modules.StackSAModuleMSG(
radii=self.model_cfg.ROI_GRID_POOL.POOL_RADIUS,
nsamples=self.model_cfg.ROI_GRID_POOL.NSAMPLE,
mlps=copy.deepcopy(mlps),
use_xyz=True,
pool_method=self.model_cfg.ROI_GRID_POOL.POOL_METHOD,
)
)
shared_fc_list = []
pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * c_out
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layers.append(nn.Sequential(*shared_fc_list))
self.cls_layers.append(
self.make_fc_layers(
input_channels=pre_channel, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
)
)
self.reg_layers.append(
self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC
)
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
for i in range(len(self.tasks)):
nn.init.normal_(self.reg_layers[i][-1].weight, mean=0, std=0.001)
def get_global_grid_points_of_roi(self, rois, grid_size):
rois = rois.reshape(-1, rois.shape[-1])
batch_size_rcnn = rois.shape[0]
local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3)
global_roi_grid_points = common_utils.rotate_points_along_z(
local_roi_grid_points.clone(), rois[:, 6]
).squeeze(dim=1)
global_center = rois[:, 0:3].clone()
global_roi_grid_points += global_center.unsqueeze(dim=1)
return global_roi_grid_points, local_roi_grid_points
@staticmethod
def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
faked_features = rois.new_ones((grid_size, grid_size, grid_size))
dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx]
dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3)
local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6]
roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \
- (local_roi_size.unsqueeze(dim=1) / 2) # (B, 6x6x6, 3)
return roi_grid_points
def forward(self, batch_dict, targets_dict):
if self.training:
batch_dict = self.train_rcnn(batch_dict, targets_dict)
else:
batch_dict = self.test_rcnn(batch_dict, targets_dict)
return batch_dict
def train_rcnn(self, batch_dict, targets_dict):
rois = batch_dict['rois'] # (B, ntask * N, code_size)
point_coords = batch_dict['point_coords']
point_features = batch_dict['point_features']
point_features = point_features * batch_dict['point_cls_scores'].view(-1, 1)
batch_size, _, code_size = rois.shape
num_grid_points = self.model_cfg.ROI_GRID_POOL.GRID_SIZE ** 3
xyz = point_coords[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
batch_idx = point_coords[:, 0]
for k in range(batch_size):
xyz_batch_cnt[k] = (batch_idx == k).sum()
rois = rois.reshape(batch_size, self.num_tasks, self.num_rois, code_size)
rcnn_cls = []
rcnn_reg = []
for task_id, task in enumerate(self.tasks):
roi_single = rois[:, task_id, :, :]
global_roi_grid_points, local_roi_grid_points = self.get_global_grid_points_of_roi(
roi_single, grid_size=self.model_cfg.ROI_GRID_POOL.GRID_SIZE
) # (BxN, 6x6x6, 3)
global_roi_grid_points = global_roi_grid_points.view(batch_size, -1, 3) # (B, Nx6x6x6, 3)
new_xyz = global_roi_grid_points.view(-1, 3)
new_xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(global_roi_grid_points.shape[1])
pooled_points, pooled_features = self.roi_grid_pool_layers[task_id](
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features.contiguous(),
) # (M1 + M2 ..., C)
num_features = pooled_features.shape[-1]
pooled_features = pooled_features.reshape(-1, num_grid_points, num_features) # (BN, 6x6x6, C)
pooled_features = pooled_features.transpose(1, 2).contiguous()
pooled_features = pooled_features.reshape(-1, num_features * num_grid_points, 1)
shared_features = self.shared_fc_layers[task_id](pooled_features)
rcnn_cls_single = self.cls_layers[task_id](shared_features).transpose(1, 2).contiguous().squeeze(
dim=1) # (BN, 1 or 2)
rcnn_reg_single = self.reg_layers[task_id](shared_features).transpose(1, 2).contiguous().squeeze(
dim=1) # (BN, C)
rcnn_cls_single = rcnn_cls_single.reshape(batch_size, self.num_rois, 1)
rcnn_reg_single = rcnn_reg_single.reshape(batch_size, self.num_rois, rcnn_reg_single.shape[-1])
rcnn_cls.append(rcnn_cls_single)
rcnn_reg.append(rcnn_reg_single)
rcnn_cls = torch.cat(rcnn_cls, dim = 1)
rcnn_reg = torch.cat(rcnn_reg, dim = 1)
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
def test_rcnn(self, batch_dict, targets_dict):
batch_size = batch_dict['batch_size']
rois = batch_dict['rois'] # (B, N, code_size)
roi_labels = batch_dict['roi_labels'] # (B, N)
point_coords = batch_dict['point_coords']
point_features = batch_dict['point_features']
point_features = point_features * batch_dict['point_cls_scores'].view(-1, 1)
num_rois = rois.shape[1]
num_grid_points = self.model_cfg.ROI_GRID_POOL.GRID_SIZE ** 3
global_roi_grid_points, local_roi_grid_points = self.get_global_grid_points_of_roi(
rois, grid_size=self.model_cfg.ROI_GRID_POOL.GRID_SIZE
) # (BxN, 6x6x6, 3)
xyz = point_coords[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
batch_idx = point_coords[:, 0]
for k in range(batch_size):
xyz_batch_cnt[k] = (batch_idx == k).sum()
rcnn_cls = rois.new_zeros((batch_size * num_rois, 1))
rcnn_reg = rois.new_zeros((batch_size * num_rois, rois.shape[-1]))
rcnn_dummy_cls, rcnn_dummy_reg = [], []
for task_id, task in enumerate(self.tasks):
mask = roi_labels.new_zeros(roi_labels.shape, dtype=bool)
for cls_id in task['class_ids']:
mask |= (roi_labels == cls_id)
roi_cnt = mask.sum(1) # (B) num_rois selected in each batch
mask = mask.reshape(-1) # (BxN)
if mask.sum().item() == 0: # this task does not exist, set dummy to avoid bugs
new_xyz = torch.zeros((batch_size * num_grid_points, 3)).to(mask.device)
new_xyz_batch_cnt = torch.ones(batch_size).to(mask.device) * num_grid_points
pooled_points, pooled_features = self.roi_grid_pool_layers[task_id](
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt.int(),
features=point_features.contiguous(),
) # (M1 + M2 ..., C)
num_features = pooled_features.shape[-1]
pooled_features = pooled_features.reshape(-1, num_grid_points, num_features) # (M, 6x6x6, C)
pooled_features = pooled_features.transpose(1, 2).contiguous()
pooled_features = pooled_features.reshape(-1, num_features * num_grid_points, 1)
shared_features = self.shared_fc_layers[task_id](pooled_features)
rcnn_cls_single = self.cls_layers[task_id](shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg_single = self.reg_layers[task_id](shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
rcnn_dummy_cls.append(rcnn_cls_single)
rcnn_dummy_reg.append(rcnn_reg_single)
else:
assert global_roi_grid_points.shape[0] == batch_size * num_rois
new_xyz = global_roi_grid_points[mask].reshape(-1, 3)
new_xyz_batch_cnt = roi_cnt.int() * num_grid_points
pooled_points, pooled_features = self.roi_grid_pool_layers[task_id](
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features.contiguous(),
) # (M1 + M2 ..., C)
num_features = pooled_features.shape[-1]
pooled_features = pooled_features.reshape(-1, num_grid_points, num_features) # (M, 6x6x6, C)
pooled_features = pooled_features.transpose(1, 2).contiguous()
pooled_features = pooled_features.reshape(-1, num_features * num_grid_points, 1)
shared_features = self.shared_fc_layers[task_id](pooled_features)
rcnn_cls_single = self.cls_layers[task_id](shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (M, 1 or 2)
rcnn_reg_single = self.reg_layers[task_id](shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (M, C)
rcnn_cls[mask] = rcnn_cls_single
rcnn_reg[mask] = rcnn_reg_single
dummy_flag = rcnn_dummy_cls or rcnn_dummy_reg
if dummy_flag:
rcnn_dummy_cls = torch.cat(rcnn_dummy_cls, dim = 0)
rcnn_dummy_reg = torch.cat(rcnn_dummy_reg, dim = 0)
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
return batch_dict
def get_box_reg_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
code_size = self.box_coder.code_size
task_mask = forward_ret_dict['task_mask'].view(-1)
reg_valid_mask = forward_ret_dict['reg_valid_mask'].view(-1)
gt_boxes3d_ct = forward_ret_dict['gt_of_rois'][..., 0:code_size]
gt_of_rois_src = forward_ret_dict['gt_of_rois_src'][..., 0:code_size].view(-1, code_size)
rcnn_reg = forward_ret_dict['rcnn_reg'] # (rcnn_batch_size, C)
roi_boxes3d = forward_ret_dict['rois']
rcnn_batch_size = gt_boxes3d_ct.view(-1, code_size).shape[0]
# skip empty task
reg_valid_mask = reg_valid_mask * task_mask
fg_mask = (reg_valid_mask > 0)
fg_sum = fg_mask.long().sum().item()
tb_dict = {}
if loss_cfgs.REG_LOSS == 'smooth-l1':
rois_anchor = roi_boxes3d.clone().detach().view(-1, code_size)
rois_anchor[:, 0:3] = 0
rois_anchor[:, 6] = 0
reg_targets = self.box_coder.encode_torch(
gt_boxes3d_ct.view(rcnn_batch_size, code_size), rois_anchor
)
rcnn_loss_reg = self.reg_loss_func(
rcnn_reg.view(rcnn_batch_size, -1).unsqueeze(dim=0),
reg_targets.unsqueeze(dim=0),
) # [B, M, 7]
rcnn_loss_reg = (rcnn_loss_reg.view(rcnn_batch_size, -1) * fg_mask.unsqueeze(dim=-1).float()).sum() / max(fg_sum, 1)
rcnn_loss_reg = rcnn_loss_reg * loss_cfgs.LOSS_WEIGHTS['rcnn_reg_weight']
tb_dict['rcnn_loss_reg'] = rcnn_loss_reg.item()
if loss_cfgs.CORNER_LOSS_REGULARIZATION and fg_sum > 0:
# TODO: NEED to BE CHECK
fg_rcnn_reg = rcnn_reg.view(rcnn_batch_size, -1)[fg_mask]
fg_roi_boxes3d = roi_boxes3d.view(-1, code_size)[fg_mask]
fg_roi_boxes3d = fg_roi_boxes3d.view(1, -1, code_size)
batch_anchors = fg_roi_boxes3d.clone().detach()
roi_ry = fg_roi_boxes3d[:, :, 6].view(-1)
roi_xyz = fg_roi_boxes3d[:, :, 0:3].view(-1, 3)
batch_anchors[:, :, 0:3] = 0
rcnn_boxes3d = self.box_coder.decode_torch(
fg_rcnn_reg.view(batch_anchors.shape[0], -1, code_size), batch_anchors
).view(-1, code_size)
rcnn_boxes3d = common_utils.rotate_points_along_z(
rcnn_boxes3d.unsqueeze(dim=1), roi_ry
).squeeze(dim=1)
rcnn_boxes3d[:, 0:3] += roi_xyz
loss_corner = loss_utils.get_corner_loss_lidar(
rcnn_boxes3d[:, 0:7],
gt_of_rois_src[fg_mask][:, 0:7]
)
loss_corner = loss_corner.mean()
loss_corner = loss_corner * loss_cfgs.LOSS_WEIGHTS['rcnn_corner_weight']
rcnn_loss_reg += loss_corner
tb_dict['rcnn_loss_corner'] = loss_corner.item()
else:
raise NotImplementedError
return rcnn_loss_reg, tb_dict
def get_box_cls_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
rcnn_cls = forward_ret_dict['rcnn_cls']
task_mask = forward_ret_dict['task_mask'].view(-1)
rcnn_cls_labels = forward_ret_dict['rcnn_cls_labels'].view(-1)
if loss_cfgs.CLS_LOSS == 'BinaryCrossEntropy':
rcnn_cls_flat = rcnn_cls.view(-1)
batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat), rcnn_cls_labels.float(), reduction='none')
cls_valid_mask = (rcnn_cls_labels >= 0).float()
cls_valid_mask = cls_valid_mask * task_mask
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
elif loss_cfgs.CLS_LOSS == 'CrossEntropy':
batch_loss_cls = F.cross_entropy(rcnn_cls, rcnn_cls_labels, reduction='none', ignore_index=-1)
cls_valid_mask = (rcnn_cls_labels >= 0).float()
cls_valid_mask = cls_valid_mask * task_mask
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
else:
raise NotImplementedError
rcnn_loss_cls = rcnn_loss_cls * loss_cfgs.LOSS_WEIGHTS['rcnn_cls_weight']
tb_dict = {'rcnn_loss_cls': rcnn_loss_cls.item()}
return rcnn_loss_cls, tb_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
rcnn_loss = 0
rcnn_loss_cls, cls_tb_dict = self.get_box_cls_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_cls
tb_dict.update(cls_tb_dict)
rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_reg
tb_dict.update(reg_tb_dict)
tb_dict['rcnn_loss'] = rcnn_loss.item()
return rcnn_loss, tb_dict
|
import torch
import torch.nn.functional as F
from modules import nn_utils, losses, pretrained_models, baseline_utils
from nnlib.nnlib import utils
from methods import BaseClassifier
class StandardClassifier(BaseClassifier):
""" Standard classifier trained with cross-entropy loss.
Has an option to work on pretrained representation of x.
Optionally, can add noise to the gradient wrt to the output logit.
"""
@utils.capture_arguments_of_init
def __init__(self, input_shape, architecture_args, pretrained_arg=None,
device='cuda', loss_function='ce', add_noise=False, noise_type='Gaussian',
noise_std=0.0, loss_function_param=None, load_from=None, **kwargs):
super(StandardClassifier, self).__init__(**kwargs)
self.args = None # this will be modified by the decorator
self.input_shape = [None] + list(input_shape)
self.architecture_args = architecture_args
self.pretrained_arg = pretrained_arg
self.loss_function = loss_function
self.add_noise = add_noise
self.noise_type = noise_type
self.noise_std = noise_std
self.loss_function_param = loss_function_param
self.load_from = load_from
# initialize the network
self.repr_net = pretrained_models.get_pretrained_model(self.pretrained_arg, self.input_shape, device)
self.repr_shape = self.repr_net.output_shape
self.classifier, output_shape = nn_utils.parse_network_from_config(args=self.architecture_args['classifier'],
input_shape=self.repr_shape)
self.num_classes = output_shape[-1]
self.classifier = self.classifier.to(device)
self.grad_noise_class = nn_utils.get_grad_noise_class(standard_dev=noise_std, q_dist=noise_type)
if self.load_from is not None:
print("Loading the classifier model from {}".format(load_from))
import methods
stored_net = utils.load(load_from, methods=methods, device='cpu')
stored_net_params = dict(stored_net.classifier.named_parameters())
for key, param in self.classifier.named_parameters():
param.data = stored_net_params[key].data.to(device)
def on_epoch_start(self, partition, epoch, loader, **kwargs):
super(StandardClassifier, self).on_epoch_start(partition=partition, epoch=epoch,
loader=loader, **kwargs)
# In case of FW model, estimate the transition matrix and pass it to the model
if partition == 'train' and epoch == 0 and self.loss_function == 'fw':
T_est = baseline_utils.estimate_transition(load_from=self.load_from, data_loader=loader,
device=self.device)
self.loss_function_param = T_est
def forward(self, inputs, grad_enabled=False, **kwargs):
torch.set_grad_enabled(grad_enabled)
x = inputs[0].to(self.device)
pred = self.classifier(self.repr_net(x))
if self.add_noise:
pred = self.grad_noise_class.apply(pred)
out = {
'pred': pred
}
return out
def compute_loss(self, inputs, labels, outputs, grad_enabled, **kwargs):
torch.set_grad_enabled(grad_enabled)
pred = outputs['pred']
y = labels[0].to(self.device)
# classification loss
y_one_hot = F.one_hot(y, num_classes=self.num_classes).float()
classifier_loss = losses.get_classification_loss(target=y_one_hot, pred=pred,
loss_function=self.loss_function,
loss_function_param=self.loss_function_param)
batch_losses = {
'classifier': classifier_loss,
}
return batch_losses, outputs
class StandardClassifierWithNoise(BaseClassifier):
""" Standard classifier trained with cross-entropy loss and noisy gradients.
Has an option to work on pretrained representation of x.
"""
@utils.capture_arguments_of_init
def __init__(self, input_shape, architecture_args, pretrained_arg=None,
device='cuda', loss_function='ce', add_noise=False, noise_type='Gaussian',
noise_std=0.0, loss_function_param=None, **kwargs):
super(StandardClassifierWithNoise, self).__init__(**kwargs)
self.args = None # this will be modified by the decorator
self.input_shape = [None] + list(input_shape)
self.architecture_args = architecture_args
self.pretrained_arg = pretrained_arg
self.loss_function = loss_function
self.add_noise = add_noise
self.noise_type = noise_type
self.noise_std = noise_std
self.loss_function_param = loss_function_param
# initialize the network
self.repr_net = pretrained_models.get_pretrained_model(self.pretrained_arg, self.input_shape, device)
self.repr_shape = self.repr_net.output_shape
self.classifier, output_shape = nn_utils.parse_network_from_config(args=self.architecture_args['classifier'],
input_shape=self.repr_shape)
self.num_classes = output_shape[-1]
self.classifier = self.classifier.to(device)
def forward(self, inputs, grad_enabled=False, **kwargs):
torch.set_grad_enabled(grad_enabled)
x = inputs[0].to(self.device)
pred = self.classifier(self.repr_net(x))
out = {
'pred': pred
}
return out
def compute_loss(self, inputs, labels, outputs, grad_enabled, **kwargs):
torch.set_grad_enabled(grad_enabled)
pred = outputs['pred']
y = labels[0].to(self.device)
# classification loss
y_one_hot = F.one_hot(y, num_classes=self.num_classes).float()
classifier_loss = losses.get_classification_loss(target=y_one_hot, pred=pred,
loss_function=self.loss_function,
loss_function_param=self.loss_function_param)
batch_losses = {
'classifier': classifier_loss,
}
return batch_losses, outputs
def before_weight_update(self, **kwargs):
if not self.add_noise:
return
for param in self.parameters():
if param.requires_grad:
if self.noise_type == 'Gaussian':
param.grad += self.noise_std * torch.randn(size=param.shape, device=self.device)
else:
raise NotImplementedError()
|
import time
import requests
from hookee.conf import Config
from hookee import HookeeManager
from tests.testcase import HookeeTestCase
__author__ = "Alex Laird"
__copyright__ = "Copyright 2020, Alex Laird"
__version__ = "1.2.0"
class TestHookeeManagerEdges(HookeeTestCase):
def test_not_click_logging(self):
self.assertFalse(self.config.click_logging)
def test_hookee_manager(self):
# GIVEN
hookee_manager = HookeeManager()
hookee_manager._init_server_and_tunnel()
self.assertIsNotNone(hookee_manager.server._thread)
self.assertIsNotNone(hookee_manager.tunnel._thread)
# WHEN
hookee_manager.stop()
# Wait for things to tear down
time.sleep(2)
# THEN
self.assertIsNone(hookee_manager.server._thread)
self.assertIsNone(hookee_manager.tunnel._thread)
def test_custom_response(self):
# GIVEN
response_body = "<Response>Ok</Response>"
config = Config(response=response_body)
hookee_manager = HookeeManager(config=config)
hookee_manager._init_server_and_tunnel()
webhook_url = "{}/webhook".format(hookee_manager.tunnel.public_url)
# WHEN
response = requests.get(webhook_url)
# THEN
self.assertEqual(response.content.decode("utf-8"), response_body)
# WHEN
hookee_manager.stop()
# Wait for things to tear down
time.sleep(2)
|
#!/ufs/cluster/yao/bin/python
import re, os
import string
import sys
import array
import random
import shutil
from os import walk
def genTrace(input_filename):
print "Parsing " + input_filename
# output filename
output_filename = input_filename.split('.')[0] + "_total_misses.trc"
# open input file
inputfile = open(input_filename, "r")
# open output file
outputfile = open(output_filename, "w")
# read lines from input file
linePattern = "Miss count"
lineCount = 0
for line in inputfile:
searchResult = line.find(linePattern)
if searchResult != -1:
missCount = line.split(' ')[-1]
try:
int(missCount)
lineCount += 1
outputfile.write("%s %s" % (lineCount, missCount))
except ValueError:
print missCount
inputfile.close()
outputfile.close()
# # create output folder if not exist
# dn = os.path.dirname(input_filename)
# print dn
# output_folder = dn + "/traces"
# print output_folder
# if not os.path.exists(output_folder):
# os.makedirs(output_folder)
# # move the output file to output folder
# shutil.move(output_filename, output_folder)
if len(sys.argv) != 2:
sys.exit("Must specify folder")
folder = sys.argv[1]
# get file names in the folder
f = []
for (dirpath, dirnames, filenames) in walk(folder):
f.extend(filenames)
break
# generate traces for all the files
for file in f:
if file[-3:] != 'trc':
genTrace(folder+file)
|
# -*- coding: utf-8 -*-
#
# Copyright © 2021–2022 martin f. krafft <tctools@pobox.madduck.net>
# Released under the MIT Licence
#
GAME_NAMES_BY_DRAW_SIZE = {
6: {
"401": ("Championship Final", "Final"),
"402": ("Plate Final", "Plate"),
},
8: {
"301": ("Championship Final", "Final"),
"302": ("Special Plate Final", "Sp.Plate"),
"303": ("Plate Final", "Plate"),
"304": ("Consolation Plate Final", "Co.Plate"),
},
}
def get_game_name(drawsize, gamecode, *, short=False):
names = GAME_NAMES_BY_DRAW_SIZE.get(drawsize)
if not names:
return gamecode
name = names.get(gamecode)
if name:
return name[1 if short else 0]
else:
return gamecode
|
"""
各行程旅游网站买票订酒店的订单数据
create by judy 2018/10/18
"""
from commonbaby.helpers import helper_crypto
from datacontract.idowndataset import Task
from datacontract.outputdata import EStandardDataType
from .feedbackbase import FeedDataBase, InnerDataBase, OrderData
class ITRAVELORDER_ONE(InnerDataBase, OrderData):
"""表示一个行程订单"""
def __init__(self, task: Task, apptype: int, userid, orderid):
super(ITRAVELORDER_ONE, self).__init__(task, apptype)
OrderData.__init__(self)
if userid is None:
raise Exception('Userid cant be None')
if orderid is None:
raise Exception("Orderid cannot be None.")
self._userid = userid
self._orderid = orderid
# 写入这个字段时,必须搞成标准的时间字符串:
# 2019-01-01 00:00:00
self.ordertime: str = None
self.host = None
def _get_output_fields(self) -> dict:
""""""
self.append_to_fields('userid', self._userid)
self.append_to_fields('orderid', self._orderid)
self.append_to_fields('ordertime', self.ordertime)
self.append_to_fields('host', self.host)
self.append_to_fields('order', self._format_order())
# self.append_to_fields('order', json.dumps(self._order, ensure_ascii=False))
return self._fields
# def _get_write_lines(self):
# lines = ''
# lines += 'userid:{}\r\n'.format(self._userid)
# # if self.orderid is not None:
# lines += 'orderid:{}\r\n'.format(self._orderid)
# if self.ordertime is not None:
# lines += 'ordertime:{}\r\n'.format(self.ordertime)
# if self.host is not None:
# lines += 'host:{}\r\n'.format(helper_str.base64format(self.host))
# if isinstance(self._order, dict) and len(self._order) > 0:
# lines += 'order:{}\r\n'.format(
# helper_str.base64format(
# json.dumps(self._order).encode().decode('unicode_escape')))
# return lines
def get_display_name(self):
return self._orderid
def get_uniqueid(self):
return helper_crypto.get_md5_from_str("{}{}{}".format(self._apptype, self._userid, self._orderid))
class ITRAVELORDER(FeedDataBase):
"""表示一个账号的行程订单数据"""
def __init__(self, clientid: str, tsk: Task, apptype: int):
FeedDataBase.__init__(self, '.itravel_order',
EStandardDataType.TravelOrder, tsk, apptype,
clientid, True)
|
import numpy as np
from sklearn.preprocessing import MinMaxScaler
class Processor:
def __init__(self, data):
self.data = [list(item) for item in data]
self.attr_count = len(self.data[0])
# # Used to map strings in numbers
# def apply_nominal_conversation(self):
# for i in range(0, self.attr_count):
# if(type(self.data[0][i]) is np.string_):
# strings = set([x[i] for x in self.data])
# nums = range(0, len(strings))
# table = dict(zip(strings, nums))
# for j, item in enumerate(self.data):
# item[i] = table[item[i]]
def apply_scaling(self):
scaler = MinMaxScaler()
scaler.fit(self.data)
self.data = scaler.transform(self.data)
def get_data(self):
return self.data
# # Only for classification or clustering comparison
# def get_structured_data(self):
# return StructuredData(self.data[:, :self.attr_count - 1], self.data[:, self.attr_count - 1])
class StructuredData:
def __init__(self, X, y):
self.data = (X)
self.target = (y)
|
# Generated by Django 2.1.1 on 2019-02-09 10:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('log', '0002_auto_20190209_0933'),
]
operations = [
migrations.RemoveField(
model_name='log',
name='obj',
),
migrations.AddField(
model_name='log',
name='_id',
field=models.IntegerField(default=0, verbose_name='ID'),
),
migrations.AddField(
model_name='log',
name='category',
field=models.TextField(blank=True, null=True, verbose_name='类型'),
),
]
|
#!/usr/bin/python
########################################################################################
#
# |B|I|G| |F|A|C|E| |R|O|B|O|T|I|C|S|
#
# HSV Colour selector for object detection using OpenCV
#
#
# Author : Peter Neal
#
# Date : 17 March 2015
# Last Update : 17 March 2015
#
########################################################################################
import cv2
import numpy as np
from colorama import init,Fore
init(autoreset=True)
def nothing(x):
pass
print(Fore.GREEN + "Starting OpenCV")
capture = cv2.VideoCapture(0)
capture.set(3,640)
capture.set(4,480)
cv2.namedWindow("camera", 0)
print (Fore.GREEN + "Creating OpenCV windows")
cv2.resizeWindow("camera", 640,480)
cv2.moveWindow("camera", 400,30)
print (Fore.GREEN + "Moving OpenCV window")
cv2.waitKey(50)
# create trackbars for HSV Selection
cv2.createTrackbar('HLow','camera',0,179,nothing)
cv2.createTrackbar('SLow','camera',0,255,nothing)
cv2.createTrackbar('VLow','camera',0,255,nothing)
cv2.createTrackbar('HHigh','camera',0,179,nothing)
cv2.createTrackbar('SHigh','camera',0,255,nothing)
cv2.createTrackbar('VHigh','camera',0,255,nothing)
while True:
HLow = cv2.getTrackbarPos('HLow','camera')
SLow = cv2.getTrackbarPos('SLow','camera')
VLow = cv2.getTrackbarPos('VLow','camera')
HHigh = cv2.getTrackbarPos('HHigh','camera')
SHigh = cv2.getTrackbarPos('SHigh','camera')
VHigh = cv2.getTrackbarPos('VHigh','camera')
ret,img = capture.read() #get a bunch of frames to make sure current frame is the most recent
ret,img = capture.read()
ret,img = capture.read()
ret,img = capture.read()
ret,img = capture.read() #5 seems to be enough
imgHSV = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) #convert img to HSV and store result in imgHSVyellow
lower = np.array([HLow, SLow, VLow]) #np arrays for upper and lower thresholds
upper = np.array([HHigh, SHigh, VHigh])
imgthreshed = cv2.inRange(imgHSV, lower, upper) #threshold imgHSV
imgthreshed = cv2.blur(imgthreshed,(3,3))
cv2.imshow("camera", imgthreshed)
cv2.waitKey(10)
cv2.destroyAllWindows()
|
# -*- coding: utf-8 -*-
from functools import partial
import dask.bag as db
from kartothek.core import naming
from kartothek.core.factory import _ensure_factory
from kartothek.core.utils import _check_callable
from kartothek.core.uuid import gen_uuid
from kartothek.io.eager import read_dataset_as_metapartitions
from kartothek.io_components.docs import default_docs
from kartothek.io_components.index import update_indices_from_partitions
from kartothek.io_components.metapartition import (
MetaPartition,
parse_input_to_metapartition,
)
from kartothek.io_components.utils import normalize_args
from kartothek.io_components.write import (
raise_if_dataset_exists,
store_dataset_from_partitions,
)
def _store_dataset_from_partitions_flat(mpss, *args, **kwargs):
return store_dataset_from_partitions(
[mp for sublist in mpss for mp in sublist], *args, **kwargs
)
@default_docs
@normalize_args
def store_bag_as_dataset(
bag,
store,
dataset_uuid=None,
metadata=None,
df_serializer=None,
overwrite=False,
metadata_merger=None,
metadata_version=naming.DEFAULT_METADATA_VERSION,
partition_on=None,
metadata_storage_format=naming.DEFAULT_METADATA_STORAGE_FORMAT,
secondary_indices=None,
):
"""
Transform and store a dask.bag of dictionaries containing
dataframes to a kartothek dataset in store.
This is the dask.bag-equivalent of
:func:`store_delayed_as_dataset`. See there
for more detailed documentation on the different possible input types.
Parameters
----------
bag: dask.bag
A dask bag containing dictionaries of dataframes or dataframes.
Returns
-------
A dask.bag.Item dataset object.
"""
_check_callable(store)
if dataset_uuid is None:
dataset_uuid = gen_uuid()
if not overwrite:
raise_if_dataset_exists(dataset_uuid=dataset_uuid, store=store)
input_to_mps = partial(
parse_input_to_metapartition, metadata_version=metadata_version
)
mps = bag.map(input_to_mps)
if partition_on:
mps = mps.map(MetaPartition.partition_on, partition_on=partition_on)
if secondary_indices:
mps = mps.map(MetaPartition.build_indices, columns=secondary_indices)
mps = mps.map(
MetaPartition.store_dataframes,
store=store,
df_serializer=df_serializer,
dataset_uuid=dataset_uuid,
)
aggregate = partial(
_store_dataset_from_partitions_flat,
dataset_uuid=dataset_uuid,
store=store,
dataset_metadata=metadata,
metadata_merger=metadata_merger,
metadata_storage_format=metadata_storage_format,
)
return mps.reduction(perpartition=list, aggregate=aggregate, split_every=False)
@default_docs
def build_dataset_indices__bag(
store, dataset_uuid, columns, partition_size=None, factory=None
):
"""
Function which builds a :class:`~kartothek.core.index.ExplicitSecondaryIndex`.
This function loads the dataset, computes the requested indices and writes
the indices to the dataset. The dataset partitions itself are not mutated.
Parameters
----------
partition_size: Optional[int]
Dask bag partition size. Use a larger numbers to decrease scheduler load and overhead, use smaller numbers for a
fine-grained scheduling and better resilience against worker errors.
Returns
-------
A dask.delayed computation object.
"""
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=False,
)
mps = read_dataset_as_metapartitions(factory=ds_factory)
return (
db.from_sequence(seq=mps, partition_size=partition_size)
.map(MetaPartition.build_indices, columns=columns)
.map(MetaPartition.remove_dataframes)
.reduction(list, list, split_every=False, out_type=db.Bag)
.flatten()
.map_partitions(list)
.map_partitions(
update_indices_from_partitions, dataset_metadata_factory=ds_factory
)
)
|
from dialect import *
from xdsl.pattern_rewriter import RewritePattern, GreedyRewritePatternApplier, op_type_rewrite_pattern, PatternRewriter, PatternRewriteWalker
from xdsl.ir import MLContext, Operation, SSAValue, Region, Block, Attribute
from dataclasses import dataclass
import xdsl.dialects.memref as memref
import xdsl.dialects.arith as arith
import xdsl.dialects.scf as scf
import xdsl.dialects.builtin as builtin
tensor_shape: dict[str, int] = {}
tensor_shape["P"] = 3
tensor_shape["Q"] = 4
tensor_type = builtin.f32
output_buf = 1
@dataclass
class IndexRewriter(RewritePattern):
@op_type_rewrite_pattern
def match_and_rewrite(self, index_op: IndexOp, rewriter: PatternRewriter):
load_op = memref.Load.get(index_op.tensor, index_op.indices)
store_op = memref.Store.get(load_op, index_op.tensor, index_op.indices)
id_op = arith.Constant.from_int_constant(3, 32)
rewriter.replace_op(index_op, [load_op, store_op, id_op])
@dataclass
class DeIndexOpRewriter(RewritePattern):
@op_type_rewrite_pattern
def match_and_rewrite(self, deindex_op: DeIndexOp,
rewriter: PatternRewriter):
new_ops = []
outer_len = tensor_shape[
deindex_op.body.blocks[0].args[0].typ.parameters[0].data]
inner_len = tensor_shape[
deindex_op.body.blocks[0].args[1].typ.parameters[0].data]
output = memref.Alloca.get(tensor_type, 4, [outer_len, inner_len])
output_buf = output
new_ops.append(output)
outer_ind_op = arith.Constant.from_int_constant(0, 32)
new_ops.append(outer_ind_op)
outer_len_op = arith.Constant.from_int_constant(outer_len, 32)
new_ops.append(outer_len_op)
inner_ind_op = arith.Constant.from_int_constant(0, 32)
new_ops.append(inner_ind_op)
inner_len_op = arith.Constant.from_int_constant(inner_len, 32)
new_ops.append(inner_len_op)
one_op = arith.Constant.from_int_constant(1, 32)
new_ops.append(one_op)
outer_comp_op = arith.Cmpi.get(outer_ind_op, outer_len_op, 6)
outer_inc_op = arith.Addi.get(outer_ind_op, one_op)
outer_comp_ops = [outer_comp_op]
inner_comp_op = arith.Cmpi.get(inner_ind_op, inner_len_op, 6)
inner_inc_op = arith.Addi.get(inner_ind_op, one_op)
inner_comp_ops = [inner_comp_op]
inner_while = scf.While.build(
operands=[[]],
result_types=[[
memref.MemRefType.from_type_and_list(IntAttr.from_int(3),
[outer_len, inner_len])
]],
regions=[
Region.from_operation_list(inner_comp_ops),
Region.from_operation_list([])
])
block = deindex_op.body.detach_block(deindex_op.body.blocks[0])
inner_while.after_region.insert_block(block, 0)
inner_while.after_region.blocks[0].add_op(inner_inc_op)
outer_while = scf.While.build(
operands=[[]],
result_types=[[
memref.MemRefType.from_type_and_list(IntAttr.from_int(3),
[outer_len, inner_len])
]],
regions=[
Region.from_operation_list(outer_comp_ops),
Region.from_operation_list([inner_while])
])
outer_while.after_region.blocks[0].add_op(outer_inc_op)
new_ops.append(outer_while)
rewriter.replace_op(deindex_op, new_ops)
@dataclass
class LambdaRewriter():
@op_type_rewrite_pattern
def match_and_rewrite(self, lambda_op: LambdaOp,
rewriter: PatternRewriter):
outer_len = tensor_shape[
lambda_op.body.blocks[0].args[0].typ.parameters[0].data[0].data]
inner_len = tensor_shape[
lambda_op.body.blocks[0].args[0].typ.parameters[0].data[1].data]
type_ = memref.MemRefType.from_type_and_list(IntAttr.from_int(2),
[outer_len, inner_len])
lambda_op.body.blocks[0].args[0].typ = type_
def transform_dtl(ctx: MLContext, op: Operation):
applier = PatternRewriteWalker(GreedyRewritePatternApplier(
[DeIndexOpRewriter(), LambdaRewriter(), IndexRewriter()]),
walk_regions_first=False)
applier.rewrite_module(op)
|
import numpy as np
import cv2
import tensorflow as tf
def get_clean_name(string):
if "depth" in string.lower() and "kernel" in string.lower():
return string.split('/')[0] + '/' + 'Kernel'
elif "depth" in string.lower() and "bias" in string.lower():
return string.split('/')[0] + '/' + 'Bias'
elif "conv2d" in string.lower() and "kernel" in string.lower():
return string.split('/')[0] + '/' + 'Kernel'
elif "conv2d" in string.lower() and "bias" in string.lower():
return string.split('/')[0] + '/' + 'Bias'
elif "lu" in string.lower():
return string.split('/')[0] + '/' + "Alpha"
elif "kernel" in string.lower():
return string.split('/')[0] + '/' + "Kernel"
elif "bias" in string.lower():
return string.split('/')[0] + '/' + "Bias"
else:
raise ValueError("Input string not understood")
exception_mapping = {
"depthwise_conv2d_18/depthwise_kernel" : "depthwise_conv2d_22/Kernel",
"depthwise_conv2d_18/bias": "depthwise_conv2d_22/Bias",
"conv2d_21/kernel" : "conv2d_27/Kernel",
"conv2d_21/bias": "conv2d_27/Bias",
"p_re_lu_20/alpha": "p_re_lu_25/Alpha"
}
def restore_variables(model,tf_lite_mapping, data_format):
channels_first = True if data_format == "channels_first" else False
restored = 0
total_params = 0
for var in model.variables:
try:
name = get_clean_name(var.name)
weight = tf_lite_mapping[name]
except KeyError:
map_string = exception_mapping[var.name[:-2]]
name = get_clean_name(map_string)
weight = tf_lite_mapping[name]
if weight.ndim == 4:
weight = np.transpose(weight, (1,2,3,0)) # conv transpose
elif weight.ndim ==3:
if channels_first: weight = np.transpose(weight, (2, 0, 1)) #prelu_transpose
total_params += np.product(weight.shape)
var.assign(weight)
print("{} assinged with {}".format(var.name, name))
restored += 1
print("Restored {} variables from tflite file".format(restored))
print("Restore {} float values".format(total_params))
def xywh_to_tlbr(boxes, y_first=False):
"""
boxes - (N, 4)
"""
final_boxes = boxes.copy()
if not y_first:
final_boxes[:, 0:2] = np.clip(boxes[:, 0:2] - (boxes[:, 2:4]/2), 0, None) #clipping at 0 since image dim starts at 0
final_boxes[:, 2:4] = boxes[:, 0:2] + (boxes[:, 2:4]/2)
else:
final_boxes[:, 0:2] = np.clip(boxes[:, [1,0]] - (boxes[:, [3,2]]/2), 0, None)
final_boxes[:, 2:4] = boxes[:, [1,0]] + (boxes[:, [3,2]]/2)
return final_boxes
def create_letterbox_image(frame, dim):
h, w = frame.shape[0:2]
scale = min(dim/h, dim/w)
nh, nw = int(scale*h), int(scale*w)
resized = cv2.resize(frame, (nw, nh))
new_image = np.zeros((dim, dim, 3), np.uint8)
new_image.fill(256)
dx = (dim-nw)//2
dy = (dim-nh)//2
new_image[dy:dy+nh, dx:dx+nw,:] = resized
return new_image
#takes the letterbox dimensions and the original dimensions to map the results in letterbox image coordinates
#to original image coordinates
def convert_to_orig_points(results, orig_dim, letter_dim):
if results.ndim == 1: np.expand_dims(results, 0)
inter_scale = min(letter_dim/orig_dim[0], letter_dim/orig_dim[1])
inter_h, inter_w = int(inter_scale*orig_dim[0]), int(inter_scale*orig_dim[1])
offset_x, offset_y = (letter_dim - inter_w)/2.0/letter_dim, (letter_dim - inter_h)/2.0/letter_dim
scale_x, scale_y = letter_dim/inter_w, letter_dim/inter_h
results[:, 0:2] = (results[:, 0:2] - [offset_x, offset_y]) * [scale_x, scale_y]
results[:, 2:4] = results[:, 2:4] * [scale_x, scale_y]
results[:, 4:16:2] = (results[:, 4:16:2] - offset_x) * scale_x
results[:, 5:17:2] = (results[:, 5:17:2] - offset_y) * scale_y
#converting from 0-1 range to (orign_dim) range
results[:, 0:16:2] *= orig_dim[1]
results[:, 1:17:2] *= orig_dim[0]
return results.astype(np.int32)
def process_detections(results, orig_dim, max_boxes=5, score_threshold=0.75, iou_threshold=0.5, pad_ratio=0.5):
box_tlbr = xywh_to_tlbr(results[:, 0:4], y_first=True)
out_boxes = tf.image.non_max_suppression(box_tlbr, results[:, -1], max_boxes,
score_threshold=score_threshold, iou_threshold=iou_threshold)
filter_boxes = results[out_boxes.numpy(), :-1]
orig_points = convert_to_orig_points(filter_boxes, orig_dim, 128)
landmarks_xywh = orig_points.copy()
landmarks_xywh[:, 2:4] += (landmarks_xywh[:, 2:4] * pad_ratio).astype(np.int32) #adding some padding around detection for landmark detection step.
landmarks_xywh[:, 1:2] -= (landmarks_xywh[:, 3:4]*0.08).astype(np.int32) #adjusting center_y since the detector outputs boxes from forehead and to account for that bias
final_boxes = xywh_to_tlbr(orig_points).astype(np.int32)
return final_boxes, landmarks_xywh
def get_landmarks_crop(orig_frame, landmarks_proposals, input_dim):
landmarks_proposals = xywh_to_tlbr(landmarks_proposals).astype(np.int32)
proposals = []
for prop in landmarks_proposals:
proposals.append(cv2.cvtColor(cv2.resize(orig_frame[prop[1]:prop[3], prop[0]:prop[2], :], (input_dim[1], input_dim[0])), cv2.COLOR_BGR2RGB))
proposals = np.array(proposals).astype(np.float32)/127.5 - 1
return proposals
def process_landmarks(landmarks_result, landmarks_proposals, orig_dim, land_dim):
landmarks_result = np.reshape(landmarks_result, (-1, 468, 3))[:, :, :2]
proposal_orig_scale = landmarks_proposals[:, 2:4] / land_dim
landmarks_result[:, :, :2] *= proposal_orig_scale[:, np.newaxis, :]
landmarks_prop_tlwh = xywh_to_tlbr(landmarks_proposals)
landmarks_result += landmarks_prop_tlwh[:,np.newaxis, 0:2]
landmarks_result = landmarks_result.astype(np.int32)
return landmarks_result
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from girder import events
from girder.models.model_base import ValidationException
from . import constants
def validateSettings(event):
key, val = event.info['key'], event.info['value']
if key == constants.PluginSettings.GOOGLE_ANALYTICS_TRACKING_ID:
if not val:
raise ValidationException(
'Google Analytics Tracking ID must not be empty.', 'value')
event.preventDefault().stopPropagation()
def load(info):
events.bind('model.setting.validate', 'google_analytics', validateSettings)
|
#!/usr/bin/env python
# coding=utf-8
import os
import logging
import datetime
from keras.utils.visualize_util import plot
from model_io import read_model
module_dir = os.path.dirname(os.path.abspath(__file__))
module_name = os.path.basename(__file__).split('.')[0]
log_path = os.path.join(module_dir, os.path.pardir, 'logs', module_name + '_' + datetime.date.today().strftime('%Y%m%d') + '.log')
logger = logging.getLogger(module_name)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_path)
ch = logging.StreamHandler()
fh.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s]: %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
if __name__ == "__main__":
# load model
model = read_keras_model(os.path.join(module_dir, os.path.pardir, 'models', 'distracted_driver'))
plot(model, to_file = 'model.png')
|
from multiprocessing import Process,Pool
def f(x):
return x*x
if __name__ == "__main__":
try:
p = Pool(5)
result = p.map(f,[1,2,3,4,5])
print(result)
except Exception as e:
print("The Actual Error is:",e)
|
import os
import json
import copy
import logging
import collections
import datetime
from abc import ABCMeta, abstractmethod
import six
import openpype
from .constants import (
GLOBAL_SETTINGS_KEY,
SYSTEM_SETTINGS_KEY,
PROJECT_SETTINGS_KEY,
PROJECT_ANATOMY_KEY,
LOCAL_SETTING_KEY,
M_OVERRIDEN_KEY
)
from .lib import load_json_file
JSON_EXC = getattr(json.decoder, "JSONDecodeError", ValueError)
@six.add_metaclass(ABCMeta)
class SettingsHandler:
@abstractmethod
def save_studio_settings(self, data):
"""Save studio overrides of system settings.
Do not use to store whole system settings data with defaults but only
it's overrides with metadata defining how overrides should be applied
in load function. For loading should be used function
`studio_system_settings`.
Args:
data(dict): Data of studio overrides with override metadata.
"""
pass
@abstractmethod
def save_project_settings(self, project_name, overrides):
"""Save studio overrides of project settings.
Data are saved for specific project or as defaults for all projects.
Do not use to store whole project settings data with defaults but only
it's overrides with metadata defining how overrides should be applied
in load function. For loading should be used function
`get_studio_project_settings_overrides` for global project settings
and `get_project_settings_overrides` for project specific settings.
Args:
project_name(str, null): Project name for which overrides are
or None for global settings.
data(dict): Data of project overrides with override metadata.
"""
pass
@abstractmethod
def save_project_anatomy(self, project_name, anatomy_data):
"""Save studio overrides of project anatomy data.
Args:
project_name(str, null): Project name for which overrides are
or None for global settings.
data(dict): Data of project overrides with override metadata.
"""
pass
@abstractmethod
def get_studio_system_settings_overrides(self):
"""Studio overrides of system settings."""
pass
@abstractmethod
def get_studio_project_settings_overrides(self):
"""Studio overrides of default project settings."""
pass
@abstractmethod
def get_studio_project_anatomy_overrides(self):
"""Studio overrides of default project anatomy data."""
pass
@abstractmethod
def get_project_settings_overrides(self, project_name):
"""Studio overrides of project settings for specific project.
Args:
project_name(str): Name of project for which data should be loaded.
Returns:
dict: Only overrides for entered project, may be empty dictionary.
"""
pass
@abstractmethod
def get_project_anatomy_overrides(self, project_name):
"""Studio overrides of project anatomy for specific project.
Args:
project_name(str): Name of project for which data should be loaded.
Returns:
dict: Only overrides for entered project, may be empty dictionary.
"""
pass
@six.add_metaclass(ABCMeta)
class LocalSettingsHandler:
"""Handler that should handle about storing and loading of local settings.
Local settings are "workstation" specific modifications that modify how
system and project settings look on the workstation and only there.
"""
@abstractmethod
def save_local_settings(self, data):
"""Save local data of local settings.
Args:
data(dict): Data of local data with override metadata.
"""
pass
@abstractmethod
def get_local_settings(self):
"""Studio overrides of system settings."""
pass
class CacheValues:
cache_lifetime = 10
def __init__(self):
self.data = None
self.creation_time = None
def data_copy(self):
if not self.data:
return {}
return copy.deepcopy(self.data)
def update_data(self, data):
self.data = data
self.creation_time = datetime.datetime.now()
def update_from_document(self, document):
data = {}
if document:
if "data" in document:
data = document["data"]
elif "value" in document:
value = document["value"]
if value:
data = json.loads(value)
self.data = data
def to_json_string(self):
return json.dumps(self.data or {})
@property
def is_outdated(self):
if self.creation_time is None:
return True
delta = (datetime.datetime.now() - self.creation_time).seconds
return delta > self.cache_lifetime
class MongoSettingsHandler(SettingsHandler):
"""Settings handler that use mongo for storing and loading of settings."""
global_general_keys = ("openpype_path", "admin_password", "disk_mapping")
def __init__(self):
# Get mongo connection
from openpype.lib import OpenPypeMongoConnection
from avalon.api import AvalonMongoDB
settings_collection = OpenPypeMongoConnection.get_mongo_client()
self._anatomy_keys = None
self._attribute_keys = None
# TODO prepare version of pype
# - pype version should define how are settings saved and loaded
database_name = os.environ["OPENPYPE_DATABASE_NAME"]
# TODO modify to not use hardcoded keys
collection_name = "settings"
self.settings_collection = settings_collection
self.database_name = database_name
self.collection_name = collection_name
self.collection = settings_collection[database_name][collection_name]
self.avalon_db = AvalonMongoDB()
self.system_settings_cache = CacheValues()
self.project_settings_cache = collections.defaultdict(CacheValues)
self.project_anatomy_cache = collections.defaultdict(CacheValues)
def _prepare_project_settings_keys(self):
from .entities import ProjectSettings
# Prepare anatomy keys and attribute keys
# NOTE this is cached on first import
# - keys may change only on schema change which should not happen
# during production
project_settings_root = ProjectSettings(
reset=False, change_state=False
)
anatomy_entity = project_settings_root["project_anatomy"]
anatomy_keys = set(anatomy_entity.keys())
anatomy_keys.remove("attributes")
attribute_keys = set(anatomy_entity["attributes"].keys())
self._anatomy_keys = anatomy_keys
self._attribute_keys = attribute_keys
@property
def anatomy_keys(self):
if self._anatomy_keys is None:
self._prepare_project_settings_keys()
return self._anatomy_keys
@property
def attribute_keys(self):
if self._attribute_keys is None:
self._prepare_project_settings_keys()
return self._attribute_keys
def _extract_global_settings(self, data):
"""Extract global settings data from system settings overrides.
This is now limited to "general" key in system settings which must be
set as group in schemas.
Returns:
dict: Global settings extracted from system settings data.
"""
output = {}
if "general" not in data:
return output
general_data = data["general"]
# Add predefined keys to global settings if are set
for key in self.global_general_keys:
if key not in general_data:
continue
# Pop key from values
output[key] = general_data.pop(key)
# Pop key from overriden metadata
if (
M_OVERRIDEN_KEY in general_data
and key in general_data[M_OVERRIDEN_KEY]
):
general_data[M_OVERRIDEN_KEY].remove(key)
return output
def _apply_global_settings(
self, system_settings_document, globals_document
):
"""Apply global settings data to system settings.
Applification is skipped if document with global settings is not
available or does not have set data in.
System settings document is "faked" like it exists if global document
has set values.
Args:
system_settings_document (dict): System settings document from
MongoDB.
globals_document (dict): Global settings document from MongoDB.
Returns:
Merged document which has applied global settings data.
"""
# Skip if globals document is not available
if (
not globals_document
or "data" not in globals_document
or not globals_document["data"]
):
return system_settings_document
globals_data = globals_document["data"]
# Check if data contain any key from predefined keys
any_key_found = False
if globals_data:
for key in self.global_general_keys:
if key in globals_data:
any_key_found = True
break
# Skip if any key from predefined key was not found in globals
if not any_key_found:
return system_settings_document
# "Fake" system settings document if document does not exist
# - global settings document may exist but system settings not yet
if not system_settings_document:
system_settings_document = {}
if "data" in system_settings_document:
system_settings_data = system_settings_document["data"]
else:
system_settings_data = {}
system_settings_document["data"] = system_settings_data
if "general" in system_settings_data:
system_general = system_settings_data["general"]
else:
system_general = {}
system_settings_data["general"] = system_general
overriden_keys = system_general.get(M_OVERRIDEN_KEY) or []
for key in self.global_general_keys:
if key not in globals_data:
continue
system_general[key] = globals_data[key]
if key not in overriden_keys:
overriden_keys.append(key)
if overriden_keys:
system_general[M_OVERRIDEN_KEY] = overriden_keys
return system_settings_document
def save_studio_settings(self, data):
"""Save studio overrides of system settings.
Do not use to store whole system settings data with defaults but only
it's overrides with metadata defining how overrides should be applied
in load function. For loading should be used function
`studio_system_settings`.
Args:
data(dict): Data of studio overrides with override metadata.
"""
# Update cache
self.system_settings_cache.update_data(data)
# Get copy of just updated cache
system_settings_data = self.system_settings_cache.data_copy()
# Extract global settings from system settings
global_settings = self._extract_global_settings(
system_settings_data
)
# Store system settings
self.collection.replace_one(
{
"type": SYSTEM_SETTINGS_KEY
},
{
"type": SYSTEM_SETTINGS_KEY,
"data": system_settings_data
},
upsert=True
)
# Store global settings
self.collection.replace_one(
{
"type": GLOBAL_SETTINGS_KEY
},
{
"type": GLOBAL_SETTINGS_KEY,
"data": global_settings
},
upsert=True
)
def save_project_settings(self, project_name, overrides):
"""Save studio overrides of project settings.
Data are saved for specific project or as defaults for all projects.
Do not use to store whole project settings data with defaults but only
it's overrides with metadata defining how overrides should be applied
in load function. For loading should be used function
`get_studio_project_settings_overrides` for global project settings
and `get_project_settings_overrides` for project specific settings.
Args:
project_name(str, null): Project name for which overrides are
or None for global settings.
data(dict): Data of project overrides with override metadata.
"""
data_cache = self.project_settings_cache[project_name]
data_cache.update_data(overrides)
self._save_project_data(
project_name, PROJECT_SETTINGS_KEY, data_cache
)
def save_project_anatomy(self, project_name, anatomy_data):
"""Save studio overrides of project anatomy data.
Args:
project_name(str, null): Project name for which overrides are
or None for global settings.
data(dict): Data of project overrides with override metadata.
"""
data_cache = self.project_anatomy_cache[project_name]
data_cache.update_data(anatomy_data)
if project_name is not None:
self._save_project_anatomy_data(project_name, data_cache)
else:
self._save_project_data(
project_name, PROJECT_ANATOMY_KEY, data_cache
)
@classmethod
def prepare_mongo_update_dict(cls, in_data):
data = {}
for key, value in in_data.items():
if not isinstance(value, dict):
data[key] = value
continue
new_value = cls.prepare_mongo_update_dict(value)
for _key, _value in new_value.items():
new_key = ".".join((key, _key))
data[new_key] = _value
return data
def _save_project_anatomy_data(self, project_name, data_cache):
# Create copy of data as they will be modified during save
new_data = data_cache.data_copy()
# Prepare avalon project document
collection = self.avalon_db.database[project_name]
project_doc = collection.find_one({
"type": "project"
})
if not project_doc:
raise ValueError((
"Project document of project \"{}\" does not exists."
" Create project first."
).format(project_name))
# Project's data
update_dict_data = {}
project_doc_data = project_doc.get("data") or {}
attributes = new_data.pop("attributes")
_applications = attributes.pop("applications", None) or []
for key, value in attributes.items():
if (
key in project_doc_data
and project_doc_data[key] == value
):
continue
update_dict_data[key] = value
update_dict_config = {}
applications = []
for application in _applications:
if not application:
continue
if isinstance(application, six.string_types):
applications.append({"name": application})
new_data["apps"] = applications
for key, value in new_data.items():
project_doc_value = project_doc.get(key)
if key in project_doc and project_doc_value == value:
continue
update_dict_config[key] = value
if not update_dict_data and not update_dict_config:
return
data_changes = self.prepare_mongo_update_dict(update_dict_data)
# Update dictionary of changes that will be changed in mongo
update_dict = {}
for key, value in data_changes.items():
new_key = "data.{}".format(key)
update_dict[new_key] = value
for key, value in update_dict_config.items():
new_key = "config.{}".format(key)
update_dict[new_key] = value
collection.update_one(
{"type": "project"},
{"$set": update_dict}
)
def _save_project_data(self, project_name, doc_type, data_cache):
is_default = bool(project_name is None)
replace_filter = {
"type": doc_type,
"is_default": is_default
}
replace_data = {
"type": doc_type,
"data": data_cache.data,
"is_default": is_default
}
if not is_default:
replace_filter["project_name"] = project_name
replace_data["project_name"] = project_name
self.collection.replace_one(
replace_filter,
replace_data,
upsert=True
)
def get_studio_system_settings_overrides(self):
"""Studio overrides of system settings."""
if self.system_settings_cache.is_outdated:
system_settings_document = None
globals_document = None
docs = self.collection.find({
# Use `$or` as system settings may have more filters in future
"$or": [
{"type": GLOBAL_SETTINGS_KEY},
{"type": SYSTEM_SETTINGS_KEY},
]
})
for doc in docs:
doc_type = doc["type"]
if doc_type == GLOBAL_SETTINGS_KEY:
globals_document = doc
elif doc_type == SYSTEM_SETTINGS_KEY:
system_settings_document = doc
merged_document = self._apply_global_settings(
system_settings_document, globals_document
)
self.system_settings_cache.update_from_document(merged_document)
return self.system_settings_cache.data_copy()
def _get_project_settings_overrides(self, project_name):
if self.project_settings_cache[project_name].is_outdated:
document_filter = {
"type": PROJECT_SETTINGS_KEY,
}
if project_name is None:
document_filter["is_default"] = True
else:
document_filter["project_name"] = project_name
document = self.collection.find_one(document_filter)
self.project_settings_cache[project_name].update_from_document(
document
)
return self.project_settings_cache[project_name].data_copy()
def get_studio_project_settings_overrides(self):
"""Studio overrides of default project settings."""
return self._get_project_settings_overrides(None)
def get_project_settings_overrides(self, project_name):
"""Studio overrides of project settings for specific project.
Args:
project_name(str): Name of project for which data should be loaded.
Returns:
dict: Only overrides for entered project, may be empty dictionary.
"""
if not project_name:
return {}
return self._get_project_settings_overrides(project_name)
def project_doc_to_anatomy_data(self, project_doc):
"""Convert project document to anatomy data.
Probably should fill missing keys and values.
"""
if not project_doc:
return {}
attributes = {}
project_doc_data = project_doc.get("data") or {}
for key in self.attribute_keys:
value = project_doc_data.get(key)
if value is not None:
attributes[key] = value
project_doc_config = project_doc.get("config") or {}
app_names = set()
if not project_doc_config or "apps" not in project_doc_config:
set_applications = False
else:
set_applications = True
for app_item in project_doc_config["apps"]:
if not app_item:
continue
app_name = app_item.get("name")
if app_name:
app_names.add(app_name)
if set_applications:
attributes["applications"] = list(app_names)
output = {"attributes": attributes}
for key in self.anatomy_keys:
value = project_doc_config.get(key)
if value is not None:
output[key] = value
return output
def _get_project_anatomy_overrides(self, project_name):
if self.project_anatomy_cache[project_name].is_outdated:
if project_name is None:
document_filter = {
"type": PROJECT_ANATOMY_KEY,
"is_default": True
}
document = self.collection.find_one(document_filter)
self.project_anatomy_cache[project_name].update_from_document(
document
)
else:
collection = self.avalon_db.database[project_name]
project_doc = collection.find_one({"type": "project"})
self.project_anatomy_cache[project_name].update_data(
self.project_doc_to_anatomy_data(project_doc)
)
return self.project_anatomy_cache[project_name].data_copy()
def get_studio_project_anatomy_overrides(self):
"""Studio overrides of default project anatomy data."""
return self._get_project_anatomy_overrides(None)
def get_project_anatomy_overrides(self, project_name):
"""Studio overrides of project anatomy for specific project.
Args:
project_name(str): Name of project for which data should be loaded.
Returns:
dict: Only overrides for entered project, may be empty dictionary.
"""
if not project_name:
return {}
return self._get_project_anatomy_overrides(project_name)
class MongoLocalSettingsHandler(LocalSettingsHandler):
"""Settings handler that use mongo for store and load local settings.
Data have 2 query criteria. First is key "type" stored in constant
`LOCAL_SETTING_KEY`. Second is key "site_id" which value can be obstained
with `get_local_site_id` function.
"""
def __init__(self, local_site_id=None):
# Get mongo connection
from openpype.lib import (
OpenPypeMongoConnection,
get_local_site_id
)
if local_site_id is None:
local_site_id = get_local_site_id()
settings_collection = OpenPypeMongoConnection.get_mongo_client()
# TODO prepare version of pype
# - pype version should define how are settings saved and loaded
database_name = os.environ["OPENPYPE_DATABASE_NAME"]
# TODO modify to not use hardcoded keys
collection_name = "settings"
self.settings_collection = settings_collection
self.database_name = database_name
self.collection_name = collection_name
self.collection = settings_collection[database_name][collection_name]
self.local_site_id = local_site_id
self.local_settings_cache = CacheValues()
def save_local_settings(self, data):
"""Save local settings.
Args:
data(dict): Data of studio overrides with override metadata.
"""
data = data or {}
self.local_settings_cache.update_data(data)
self.collection.replace_one(
{
"type": LOCAL_SETTING_KEY,
"site_id": self.local_site_id
},
{
"type": LOCAL_SETTING_KEY,
"site_id": self.local_site_id,
"data": self.local_settings_cache.data
},
upsert=True
)
def get_local_settings(self):
"""Local settings for local site id."""
if self.local_settings_cache.is_outdated:
document = self.collection.find_one({
"type": LOCAL_SETTING_KEY,
"site_id": self.local_site_id
})
self.local_settings_cache.update_from_document(document)
return self.local_settings_cache.data_copy()
|
from qpsolvers import *
from operators import *
import numpy as np
def w_init(w0, Sinv):
"""
Initialize w0, the vectorized upper triangular coefficients of the adjacency matrix
"""
if type(w0) is str:
if (w0 == "qp"):
R = vecLmat(Sinv.shape[1])
qp = 0
assert False,"idk"
#quadprog::solve.QP(crossprod(R), t(R) %*% vec(Sinv), diag(ncol(R)))
w0 = qp#qp$solution
elif (w0 == "naive"):
w0 = Linv(Sinv)
w0[w0 < 0] = 0 # Should not happen
return w0
def laplacian_w_update(w, Lw, U, beta, lambd, K, p):
"""
Update w according to equation 38
"""
t = lambd[:, None]**0.5 * U.T
c = Lstar(t.T@t - K / beta)
grad_f = Lstar(Lw) - c
if 1:
M_grad_f = - Lstar(La(grad_f))
wT_M_grad_f = np.sum(w * M_grad_f)
dwT_M_dw = np.sum(grad_f * M_grad_f)
# exact line search
t = (wT_M_grad_f - np.sum(c * grad_f)) / dwT_M_dw
else:
t=1/(2*p)
w_update = w - t * grad_f
w_update[w_update < 0] = 0
return w_update
def joint_w_update(w, Lw, Aw, U, V, lambd, psi, beta, nu, K):
t=lambd[:, None]**0.5*U.T
ULmdUT = t.T@t
VPsiVT = V @ np.diag(psi) @ V.T
c1 = Lstar(beta * ULmdUT - K)
c2 = nu * Astar(VPsiVT)
Mw = Lstar(Lw)
Pw = 2 * w
grad_f1 = beta * Mw - c1
M_grad_f1 = Lstar(La(grad_f1))
grad_f2 = nu * Pw - c2
P_grad_f2 = 2 * grad_f2
grad_f = grad_f1 + grad_f2
t = np.sum((beta * Mw + nu * Pw - (c1 + c2)) * grad_f) / np.sum(grad_f * (beta * M_grad_f1 + nu * P_grad_f2))
w_update = w - t * (grad_f1 + grad_f2)
w_update[w_update < 0] = 0
return w_update
def bipartite_w_update(w, Aw, V, nu, psi, K, J, Lips):
reg_eps = 0
grad_h = 2 * w - Astar(V @ np.diag(psi) @ V.T) #+ Lstar(K) / beta#
w_update = w - (Lstar(np.linalg.inv(La(w) + J+np.eye(J.shape[0])*reg_eps) + K) + nu * grad_h) / (2 * nu + Lips)
w_update[w_update < 0] = 0#TODO faire en sorte que la régularisation ligne précédent ne soit pas nécessaire
return w_update
def laplacian_U_update(Lw, k):
"""
Return all but the k first eigenvectors of the Laplacian Lw
"""
return np.linalg.eigh(Lw)[1][:, k:]
def bipartite_V_update(Aw, z):
n = Aw.shape[1]
V = np.linalg.eigh(Aw)[1]
return np.concatenate([V[:, :(n - z)//2], V[:,(n + z)//2:n]],axis=1)
def joint_U_update(Lw,k):
return np.linalg.eigh(Lw)[1][:, k:]
def joint_V_update(Aw,z):
return bipartite_V_update(Aw,z)
def laplacian_lambda_update(lb, ub, beta, U, Lw, k):
"""
Update lambda according to algorithm 1
"""
q = Lw.shape[1] - k
d = np.diagonal(U.T @ Lw @ U)
# unconstrained solution as initial point
lambd = .5 * (d + (d**2 + 4 / beta)**0.5)
eps = 1e-9
condition_ub = np.array([(lambd[q-1] - ub) <= eps])
condition_lb = np.array([(lambd[0] - lb) >= -eps])
condition_ordered = (lambd[1:q] - lambd[0:(q-1)]) >= -eps
condition = np.concatenate([condition_ub,\
condition_lb,\
condition_ordered])
if np.all(condition):
return lambd
else:
greater_ub = lambd > ub
lesser_lb = lambd < lb
lambd[greater_ub] = ub
lambd[lesser_lb] = lb
condition_ub = np.array([(lambd[q-1] - ub) <= eps])
condition_lb = np.array([(lambd[0] - lb) >= -eps])
condition_ordered = (lambd[1:q] - lambd[:(q-1)]) >= -eps
condition = np.concatenate([condition_ub,\
condition_lb,\
condition_ordered])
if np.all(condition):
return (lambd)
else:
print(lambd)
raise ValueError('eigenvalues are not in increasing order consider increasing the value of beta')
def bipartite_psi_update(V, Aw, lb = -np.inf, ub = np.inf):
c = np.diagonal(V.T @ Aw @ V)
n = c.shape[0]
c_tilde = .5 * (c[(n//2):][::-1] - c[:(n//2)])
x = isoreg(c_tilde[::-1])
#x <- stats::isoreg(rev(c_tilde))$yf # R
x = np.concatenate((-x[::-1], x))
#x <- c(-rev(x), x) # R
x[x < lb] = lb
x[x > ub] = ub
return x
"""joint.lambda_update <- function(...) {
return(laplacian.lambda_update(...))
}
def joint_psi_update(...):
return(bipartite.psi_update(...))
"""
|
from sklearn.datasets import make_blobs
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from LogisticRegression import LogisticRegression
X, y = make_blobs(n_samples=100, centers=2, n_features=2, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)
model = LogisticRegression()
model.fit(X_train, y_train)
print('Accuracy score for train data: {}'.format(accuracy_score(y_train, model.predict(X_train))))
print('Accuracy score for train data: {}'.format(accuracy_score(y_test, model.predict(X_test))))
|
import os.path
def sdss_band_name(b):
if b in ['u','g','r','i','z']:
return b
if b in [0,1,2,3,4]:
return 'ugriz'[b]
raise Exception('Invalid SDSS band: "' + str(b) + '"')
def sdss_band_index(b):
if b in ['u','g','r','i','z']:
return 'ugriz'.index(b)
if b in [0,1,2,3,4]:
return b
raise Exception('Invalid SDSS band: "' + str(b) + '"')
def sdss_filename(filetype, run, camcol, field, band=None, rerun=0):
if band is not None:
band = sdss_band_name(band)
x = dict(run=run, band=band, camcol=camcol, field=field, rerun=rerun)
ftmap = {
'fpC': 'fpC-%(run)06i-%(band)s%(camcol)i-%(field)04i.fit',
'fpAtlas': 'fpAtlas-%(run)06i-%(camcol)i-%(field)04i.fit',
'fpM': 'fpM-%(run)06i-%(band)s%(camcol)i-%(field)04i.fit',
'fpObjc': 'fpObjc-%(run)06i-%(camcol)i-%(field)04i.fit',
'psField': 'psField-%(run)06i-%(camcol)i-%(field)04i.fit',
'tsObj': 'tsObj-%(run)06i-%(camcol)i-%(rerun)i-%(field)04i.fit',
'tsField': 'tsField-%(run)06i-%(camcol)i-%(rerun)i-%(field)04i.fit',
#'http://das.sdss.org/imaging/125/40/astrom/asTrans-000125.fit'
# http://das.sdss.org/imaging/125/40/calibChunks/3/tsField-000125-3-40-0196.fit
}
format = ftmap.get(filetype, None)
if format is None:
return None
#print 'format', format, 'x', x
return format % x
def sdss_path(filetype, run, camcol, field, band=None, rerun=None):
x = dict(run=run, band=band, camcol=camcol, field=field, rerun=rerun)
y = (run, camcol, field, band, rerun)
if filetype in ['fpC']:
return '%(run)i/%(rerun)i/corr/%(camcol)i/' % x + sdss_filename(filetype, *y)
elif filetype in ['psField', 'fpAtlas', 'fpObjc', 'fpM']:
return '%(run)i/%(rerun)i/objcs/%(camcol)i/' % x + sdss_filename(filetype, *y)
elif filetype in ['tsObj', 'tsField']:
return '%(run)i/%(rerun)i/calibChunks/%(camcol)i/' % x + sdss_filename(filetype, *y)
else:
return None
def sdss_find_file(filetype, run, camcol, field, band=None, reruns=None, datadir=None, reduxdir=None):
if filetype == 'psField':
basedir = datadir
for rerun in reruns:
pth = os.path.join(basedir, sdss_path(filetype, run, camcol, field, band=band, rerun=rerun))
print 'trying path', pth
if os.path.exists(pth):
return pth
return None
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 27 00:30:31 2019
dogName1 = "One"
dogName2 = "Two"
dogName3 = "Three"
dogName4 = "Four"
dogName5 = "Five"
dogName6 = "Six"
@author: Gunardi Saputra
"""
print("Enter the name of dog 1: ")
dogName1 = input()
print("Enter the name of dog 2: ")
dogName2 = input()
print("Enter the name of dog 3: ")
dogName3 = input()
print("Enter the name of dog 4: ")
dogName4 = input()
print("Enter the name of dog 5: ")
dogName5 = input()
print("Enter the name of dog 6: ")
dogName6 = input()
print("The dog names are: " + dogName1 + " "
+ dogName2 + " "
+ dogName3 + " "
+ dogName4 + " "
+ dogName5 + " "
+ dogName6 + " ")
|
/*
sum_(k=1)^t (2^k - 1) (n + 1 - (k + 1) a) (n + 1 - (k + 1) b) = 1/6 (a (b (-2 t^3 + 3 (2^(t + 2) - 3) t^2 - 13 t + 24 (2^t - 1)) + 3 (n + 1) t (t - 2^(t + 2) + 3)) + 3 (n + 1) (b t^2 - t (b (2^(t + 2) - 3) + 2) + n (-2 t + 2^(t + 2) - 4) + 4 (2^t - 1)))
vertical and horizontal : warasumm(1, 0, n, n-1, p)
cross : warasumm(1, 1, n, n-1, p)
other (gradient (a, b)) : 2*warasumm(a, b, n, floor(n/a)-1, p)
*/
warasum(a, b, n, t) = (a * (b * (-2 * t^3 + 3 * (2^(t + 2) - 3) * t^2 - 13 * t + 24 * (2^t - 1)) + 3 * (n + 1) * t * (t - 2^(t + 2) + 3)) + 3 * (n + 1) * (b * t^2 - t * (b * (2^(t + 2) - 3) + 2) + n * (-2 * t + 2^(t + 2) - 4) + 4 * (2^t - 1))) / 6
warasumm(a, b, n, t, p) = (a* (b* (-2* t^3 + 3* (Mod(2, p)^(t + 2) - 3)* t^2 - 13 *t + 24* (Mod(2, p)^t - 1)) + 3* (n + 1)* t* (t - Mod(2, p)^(t + 2) + 3)) + 3* (n + 1)* (b* t^2 - t *(b* (Mod(2, p)^(t + 2) - 3) + 2) + n *(-2* t + Mod(2, p)^(t + 2) - 4) + 4* (Mod(2, p)^t - 1)))/3
/* unit a, unit b, n, iteration, modulo sympy: Sum((n+1-m*a)*(n+1-m*b)*(2**(m-1)-1), (m, 2, s)).doit()*2 */
/* ratsum(a, b, n, s, p) = -(-6*Mod(2, p)^s*a*b*s^2 + 12*Mod(2, p)^s*a*b*s - 18*Mod(2, p)^s*a*b + 6*Mod(2, p)^s*a*n*s - 6*Mod(2, p)^s*a*n + 6*Mod(2, p)^s*a*s - 6*Mod(2, p)^s*a + 6*Mod(2, p)^s*b*n*s - 6*Mod(2, p)^s*b*n + 6*Mod(2, p)^s*b*s - 6*Mod(2, p)^s*b - 6*Mod(2, p)^s*n^2 - 12*Mod(2, p)^s*n - 6*Mod(2, p)^s + 2*a*b*s^3 + 3*a*b*s^2 + a*b*s + 18*a*b - 3*a*n*s^2 - 3*a*n*s + 6*a*n - 3*a*s^2 - 3*a*s + 6*a - 3*b*n*s^2 - 3*b*n*s + 6*b*n - 3*b*s^2 - 3*b*s + 6*b + 6*n^2*s + 6*n^2 + 12*n*s + 12*n + 6*s + 6)/3 */
/* rat(a, b, n, m, p) = (Mod(2, p)^(m - 1) - 1)*(n + 1 - m*a)*(n + 1 - m*b) */
/* Sum((n+1-m*a)*(n+1-m*b)*(2**(m-1)-1), (b, 1, s)).doit()*4 */
karasum(a, n, m, s, p) = s*(Mod(2, p)^m*a*m^2*(s + 1) - Mod(2, p)^m*m*n*(s + 1) - Mod(2, p)^m*m*(s + 1) - Mod(2, p)^(m + 1)*a*m*n - Mod(2, p)^(m + 1)*a*m + Mod(2, p)^(m + 1)*n^2 + Mod(2, p)^(m + 1) + Mod(2, p)^(m + 2)*n - 2*a*m^2*(s + 1) + 4*a*m*n + 4*a*m + 2*m*n*(s + 1) + 2*m*(s + 1) - 4*n^2 - 8*n - 4)
n = 111
p = 10^8
complement = 0
{
for(a=2, n, fact = factor(a);
fact = factor(a)[,1];
apply(x->complement += rat(), fact)
print(fact)
);
}
/* vertical and horizontal */
complement += warasumm(1, 0, n, n-1, p) + warasumm(1, 1, n, n-1, p)
/* empty set and sets with size 1 */
complement += 1 + (n+1)^2
/* final result */
result = Mod(2, p)^((n+1)^2) - complement
print(result)
|
"""This module provides the XML code for some basic example networks."""
NET_ELECTRICAL_PIECEWISE = """<network>
<edges>
<edge from="s" to="v">
<cost>
<piecewisequadratic>
<functionpart a=".5" b="0" c="0" tau="-inf"/>
<functionpart a="2.5" b="-12" c="18" tau="3"/>
</piecewisequadratic>
</cost>
</edge>
<edge from="v" to="t">
<cost>
<piecewisequadratic>
<functionpart a=".5" b="0" c="0" tau="-inf"/>
<functionpart a="1.5" b="-4" c="4" tau="2"/>
</piecewisequadratic>
</cost>
</edge>
<edge from="s" to="t">
<cost>
<piecewisequadratic>
<functionpart a=".5" b="0" c="0" tau="-inf"/>
<functionpart a="2" b="-3" c="1.5" tau="1"/>
</piecewisequadratic>
</cost>
</edge>
</edges>
<commodities>
<commodity from="s" to="t" rate="1.0" />
</commodities>
<nodes>
<node node="s" x="0" y="0" />
<node node="v" x="1" y="1" />
<node node="t" x="2" y="0" />
</nodes>
</network>"""
NET_ELECTRICAL_BRAESS = """<network>
<edges>
<edge from="s" to="v">
<cost>
<piecewisequadratic>
<functionpart a="1" b="0" c="0" tau="-inf"/>
</piecewisequadratic>
</cost>
</edge>
<edge from="s" to="w">
<cost>
<piecewisequadratic>
<functionpart a="2.5" b="0" c="0" tau="-inf"/>
<functionpart a="0.5" b="4" c="0" tau="1"/>
</piecewisequadratic>
</cost>
</edge>
<edge from="v" to="w">
<cost>
<piecewisequadratic>
<functionpart a="2.25" b="0" c="0" tau="-inf"/>
<functionpart a="0.5" b="0" c="0" tau="0"/>
</piecewisequadratic>
</cost>
</edge>
<edge from="v" to="t">
<cost>
<piecewisequadratic>
<functionpart a="2.5" b="0" c="0" tau="-inf"/>
<functionpart a="0.5" b="4" c="0" tau="1"/>
</piecewisequadratic>
</cost>
</edge>
<edge from="w" to="t">
<cost>
<piecewisequadratic>
<functionpart a="1" b="0" c="0" tau="-inf"/>
</piecewisequadratic>
</cost>
</edge>
</edges>
<commodities>
<commodity from="s" to="t" rate="1.0" />
</commodities>
<nodes>
<node node="s" x="0" y="0" />
<node node="v" x="1" y="1" />
<node node="w" x="1" y="-1" />
<node node="t" x="2" y="0" />
</nodes>
</network>
"""
NET_DISCONTINUOUS_COST = """<network>
<edges>
<edge from="s" to="v">
<cost>
<piecewisequadratic>
<functionpart a="0.5" b="0" c="0" tau="-inf"/>
<functionpart a="inf" b="2" c="0" tau="2"/>
<functionpart a="0.5" b="1" c="0" tau="2"/>
</piecewisequadratic>
</cost>
</edge>
<edge from="v" to="t">
<cost>
<piecewisequadratic>
<functionpart a="0.5" b="0" c="0" tau="-inf"/>
<functionpart a="inf" b="1" c="0" tau="1"/>
<functionpart a="1" b="1" c="0" tau="1"/>
</piecewisequadratic>
</cost>
</edge>
<edge from="s" to="t">
<cost>
<piecewisequadratic>
<functionpart a="0.5" b="0" c="0" tau="-inf"/>
<functionpart a="inf" b="3" c="0" tau="3"/>
<functionpart a="1.5" b="-4" c="0" tau="3"/>
</piecewisequadratic>
</cost>
</edge>
</edges>
<commodities>
<commodity from="s" to="t" rate="1.0" />
</commodities>
<nodes>
<node node="s" x="0" y="0" />
<node node="v" x="1" y="1" />
<node node="t" x="2" y="0" />
</nodes>
</network>"""
NET_SIMPLE_POLYNOMIAL = """<network>
<edges>
<edge from="s" to="t">
<cost>
<polynomial>
<coefficient i="2">3.0</coefficient>
<coefficient i="3">1.0</coefficient>
</polynomial>
</cost>
</edge>
<edge from="t" to="v" lb="0" ub="inf">
<cost>
<polynomial signed="True">
<coefficient i="3">1</coefficient>
</polynomial>
</cost>
</edge>
<edge from="v" to="w" lb="-inf" ub="inf">
<cost>
<polynomial signed="True">
<coefficient i="3">1</coefficient>
</polynomial>
</cost>
</edge>
<edge from="w" to="u" lb="-1000" ub="inf">
<cost>
<polynomial signed="True">
<coefficient i="3">1</coefficient>
</polynomial>
</cost>
</edge>
<edge from="u" to="u1" lb="-inf" ub="1000">
<cost>
<polynomial signed="True">
<coefficient i="3">1</coefficient>
</polynomial>
</cost>
</edge>
<edge from="u1" to="u2" lb="-1000" ub="1000">
<cost>
<polynomial signed="True">
<coefficient i="3">1</coefficient>
</polynomial>
</cost>
</edge>
</edges>
<commodities>
<commodity from="s" to="t" rate="1.0" />
</commodities>
<nodes>
<node node="s" x="0" y="0" />
<node node="t" x="2" y="0" />
<node node="v" x="4" y="0" />
<node node="w" x="6" y="0" />
<node node="u" x="8" y="0" />
<node node="u1" x="10" y="0" />
<node node="u2" x="12" y="0" />
</nodes>
</network>
"""
|
#!/usr/bin/python
#
# A script to convert blob from the MS spec to array of byte to use in unitary tests
#
# 00000000 c7 01 00 01 20 54 e2
# 00000008 c7 01 00 01 20 54 e2
# taken from the spec, will give:
# 0xc7, 0x01, 0x00, 0x01, 0x20, 0x54, 0xe2,
# 0xc7, 0x01, 0x00, 0x01, 0x20, 0x54, 0xe2,
#
# Notes:
# * the script reads the two first lines to detect the number of items per lines, so you need a blob with at least 2 lines
# * the script detects if items are hex values by searching for + or -
#
# sample usage:
# $ python scripts/specBytesToCode.py < image.txt > image.c
# then go edit image.c and paste that in your code
import sys
def getOffset(l):
token = l.split(' ')[0]
return int(token, 16)
def isHex(l):
return l.find('+') == -1 and l.find('-') == -1
if __name__ == '__main__':
lines = []
itemPerLine = 16
doHex = True
# parse the offset to know how many items per line we have
l1 = sys.stdin.readline().strip()
l2 = sys.stdin.readline().strip()
itemsPerLine = getOffset(l2) - getOffset(l1)
#
doHex = isHex(l1)
for l in [l1, l2] + sys.stdin.readlines():
# 00000000 c7 01 00 01 20 54 e2 cc 00 jh.kjkjhkhk
l = l.strip() # in case we have spaces before the offset
pos = l.find(' ')
l = l[pos+1:]
items = []
tokens = l.strip().split(' ')
ntokens = 0
for t in tokens:
if not t: # empty token
continue
if ntokens == itemPerLine:
break
item = ''
if doHex:
item += '0x'
item += t
items.append(item)
ntokens += 1
lines.append(', '.join(items))
print(",\n".join(lines))
|
#!/bin/python
from setuptools import setup
extensions=[]
setup(
name='gmt-extensions',
packages=['gmt_extensions'],
# requires=['numpy (>=1.8)', 'scipy (>=0.14)',
# 'pythonigraph (>=0.7)'],
provides=['gmt_extensions'],
scripts=[],
license='BSD',
)
|
"""
Harness for running sql commands against a spatialite db from python.
This is work in progress towards a schema translation task.
From the project directory run:
./manage.py runscript spatialite_test --settings=hot_exports.settings -v2
Depends on django-extensions.
"""
import os
from string import Template
import sqlite3
def run(*script_args):
path = os.path.dirname(os.path.realpath(__file__))
translations = open(path + '/translations.txt')
trans = []
for line in translations.readlines():
if line.strip() == '':
continue
pair = line.strip().split(',')
trans.append(pair)
for entry in trans:
print(entry[0], entry[1])
print(trans)
conn = sqlite3.connect('/home/ubuntu/export_downloads/0c937545-cb43-4f9a-97b4-6e90e0c791a7/query.sqlite')
# load spatialite extension
conn.enable_load_extension(True)
cmd = "SELECT load_extension('libspatialite')"
cur = conn.cursor()
cur.execute(cmd)
# drop the planet_osm_point table and related indexes
cur.execute("SELECT DiscardGeometryColumn('planet_osm_point', 'GEOMETRY')")
cur.execute("DROP TABLE idx_planet_osm_point_GEOMETRY")
cur.execute('DROP TABLE planet_osm_point')
# get column info
cur.execute('PRAGMA table_info(planet_osm_point_temp)')
point_columns = cur.fetchall()
new_columns = []
for column in point_columns:
column_name = column[1]
column_type = column[2]
# translate column
new_columns.append('{0} {1}'.format(column_name, column_type))
# create the new table
sql_templ = Template("""
CREATE TABLE planet_osm_point(
$columns
);
""")
colstr = ','.join(new_columns)
sql = sql_templ.safe_substitute({'columns': colstr})
# cursor = conn.execute('ALTER TABLE planet_osm_point RENAME TO planet_osm_point_temp;')
cur.execute(sql)
# add the geometry column and spatial index
cur.execute("SELECT RecoverGeometryColumn('planet_osm_point', 'GEOMETRY', 4326, 'POINT', 'XY')")
cur.execute("SELECT CreateSpatialIndex('planet_osm_point', 'GEOMETRY')")
# copy data from planet_osm_point_temp to planet_osm_point
cur.execute('INSERT INTO planet_osm_point SELECT * FROM planet_osm_point_temp;')
conn.commit()
cur.close()
|
#!/usr/bin/env python3
# coding: utf-8
import torch
import torch.nn as nn
import torch.utils.data as data
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
import time, os
import numpy as np
import matplotlib.pyplot as plt
os.environ['CUDA_VISIBLE_DEVICES']='1'
#from benchmark_aflw1998 import calc_nme as calc_nme_alfw1998
#from benchmark_aflw1998 import ana as ana_alfw1998
from benchmark_aflw import calc_nme as calc_nme_alfw
from benchmark_aflw import ana as ana_aflw
from resnet_xgtu_4chls import resnet50
from ddfa_utils2 import ToTensorGjz, NormalizeGjz, DDFATestDataset, reconstruct_vertex
import argparse
from io_utils import _load, _numpy_to_cuda, _numpy_to_tensor
import os.path as osp
import numpy as np
from math import sqrt
from io_utils import _load
d = 'test.configs'
fail_detect = [1082-1, 1799-1]
yaws_list = _load(osp.join(d, 'AFLW2000-3D.pose.npy'))
yaws_list = [yaws_list[idx] for idx in range(len(yaws_list)) if idx not in fail_detect]
# pts21 = _load(osp.join(d, 'AFLW2000-3D.pts21.npy'))
# origin
pts68_all_ori = _load(osp.join(d, 'AFLW2000-3D.pts68.npy'))
pts68_all_ori = [pts68_all_ori[idx] for idx in range(len(pts68_all_ori)) if idx not in fail_detect]
# reannonated
pts68_all_re = _load(osp.join(d, 'AFLW2000-3D-Reannotated.pts68.npy'))
pts68_all_re = [pts68_all_re[idx] for idx in range(len(pts68_all_re)) if idx not in fail_detect]
roi_boxs = _load(osp.join(d, 'AFLW2000-3D_crop.roi_box.npy'))
roi_boxs = [roi_boxs[idx] for idx in range(len(roi_boxs)) if idx not in fail_detect]
def ana_alfw1998(nme_list):
yaw_list_abs = np.abs(yaws_list)
ind_yaw_1 = yaw_list_abs <= 30
ind_yaw_2 = np.bitwise_and(yaw_list_abs > 30, yaw_list_abs <= 60)
ind_yaw_3 = yaw_list_abs > 60
nme_1 = nme_list[ind_yaw_1]
nme_2 = nme_list[ind_yaw_2]
nme_3 = nme_list[ind_yaw_3]
mean_nme_1 = np.mean(nme_1) * 100
mean_nme_2 = np.mean(nme_2) * 100
mean_nme_3 = np.mean(nme_3) * 100
# mean_nme_all = np.mean(nme_list) * 100
std_nme_1 = np.std(nme_1) * 100
std_nme_2 = np.std(nme_2) * 100
std_nme_3 = np.std(nme_3) * 100
# std_nme_all = np.std(nme_list) * 100
mean_all = [mean_nme_1, mean_nme_2, mean_nme_3]
mean = np.mean(mean_all)
std = np.std(mean_all)
s1 = '[ 0, 30]\tMean: \x1b[32m{:.3f}\x1b[0m, Std: {:.3f}'.format(mean_nme_1, std_nme_1)
s2 = '[30, 60]\tMean: \x1b[32m{:.3f}\x1b[0m, Std: {:.3f}'.format(mean_nme_2, std_nme_2)
s3 = '[60, 90]\tMean: \x1b[32m{:.3f}\x1b[0m, Std: {:.3f}'.format(mean_nme_3, std_nme_3)
# s4 = '[ 0, 90]\tMean: \x1b[31m{:.3f}\x1b[0m, Std: {:.3f}'.format(mean_nme_all, std_nme_all)
s5 = '[ 0, 90]\tMean: \x1b[31m{:.3f}\x1b[0m, Std: \x1b[31m{:.3f}\x1b[0m'.format(mean, std)
s = '\n'.join([s1, s2, s3, s5])
print(s)
return mean_nme_1, mean_nme_2, mean_nme_3, mean, std
def convert_to_ori(lms, i):
std_size = 120
sx, sy, ex, ey = roi_boxs[i]
scale_x = (ex - sx) / std_size
scale_y = (ey - sy) / std_size
lms[0, :] = lms[0, :] * scale_x + sx
lms[1, :] = lms[1, :] * scale_y + sy
return lms
def calc_nme_alfw1998(pts68_fit_all, option='ori'):
if option == 'ori':
pts68_all = pts68_all_ori
elif option == 're':
pts68_all = pts68_all_re
std_size = 120
nme_list = []
for i in range(len(roi_boxs)):
pts68_fit = pts68_fit_all[i]
pts68_gt = pts68_all[i]
sx, sy, ex, ey = roi_boxs[i]
scale_x = (ex - sx) / std_size
scale_y = (ey - sy) / std_size
pts68_fit[0, :] = pts68_fit[0, :] * scale_x + sx
pts68_fit[1, :] = pts68_fit[1, :] * scale_y + sy
# build bbox
minx, maxx = np.min(pts68_gt[0, :]), np.max(pts68_gt[0, :])
miny, maxy = np.min(pts68_gt[1, :]), np.max(pts68_gt[1, :])
llength = sqrt((maxx - minx) * (maxy - miny))
#
dis = pts68_fit - pts68_gt[:2, :]
dis = np.sqrt(np.sum(np.power(dis, 2), 0))
dis = np.mean(dis)
nme = dis / llength
nme_list.append(nme)
nme_list = np.array(nme_list, dtype=np.float32)
return nme_list
def get_lms_crop_pts(anno):
xx = anno.split(',')[1:1+68*2]
pts = np.empty([2, 68])
pts[0,:] = [xx[i] for i in range(len(xx)) if i % 2 == 0]
pts[1,:] = [xx[i] for i in range(len(xx)) if i % 2 == 1]
return pts
def map_2d_18pts(lms68, _18_indx_3d22d):
lms18 = lms68[:,:,_18_indx_3d22d][:,:2,:]
lms18[:,:,7] = (lms68[:,:2,37] + lms68[:,:2,40])/2
lms18[:,:,10] = (lms68[:,:2,43] + lms68[:,:2,46])/2
lms18[:,:,16] = (lms68[:,:2,62] + lms68[:,:2,66])/2
return lms18
def map_2d_18pts_2d(lms2d_68):
_18_indx_3d22d = [17, 19, 21, 22, 24, 26, 36, 40, 39, 42, 46, 45, 31, 30, 35, 48, 66, 54]
lms2d = lms2d_68[:,_18_indx_3d22d]
lms2d[:,7] = (lms2d_68[:,37] + lms2d_68[:,40])/2
lms2d[:,10] = (lms2d_68[:,43] + lms2d_68[:,46])/2
lms2d[:,16] = (lms2d_68[:,62] + lms2d_68[:,66])/2
return lms2d
def obtain_18pts_map(pts):
pts = map_2d_18pts_2d(pts)
ptsMap = np.zeros([120, 120]) - 1
indx = np.int32(np.floor(pts))
# print(pts)
ptsMap[indx[1], indx[0]] = 1
'''
aa = ptsMap
fig = plt.figure(figsize=plt.figaspect(.5))
ax = fig.add_subplot(1, 2, 1)
ax.imshow(aa)
for ind in range(18):
ax.plot(indx[0, ind], indx[1, ind], marker='o', linestyle='None', markersize=4, color='w', markeredgecolor='black', alpha=0.8)
ax.axis('off')
cv2.imwrite(('./imgs/lms_18pts/' + lms.split(',')[0]), ptsMap*255)
'''
return ptsMap
def comb_inputs(imgs, lmsMaps, permu=False):
lmsMaps = np.array(lmsMaps).astype(np.float32)
if permu == True:
imgs = imgs.permute(0, 2, 3, 1)
else:
imgs = imgs
outputs = [np.dstack((imgs[idx].cpu().numpy(),lmsMaps[idx])) for idx in range(imgs.shape[0])]
outputs = np.array(outputs).astype(np.float32)
return outputs
def extract_param(checkpoint_fp, root='', filelists=None, arch='resnet50', num_classes=62, device_ids=[0],
batch_size=1, num_workers=4):
map_location = {f'cuda:{i}': 'cuda:0' for i in range(8)}
checkpoint = torch.load(checkpoint_fp, map_location=map_location)['res_state_dict']
torch.cuda.set_device(device_ids[0])
model = resnet50(pretrained=False, num_classes=num_classes)
model = nn.DataParallel(model, device_ids=device_ids).cuda()
model.load_state_dict(checkpoint)
dataset = DDFATestDataset(filelists=filelists, root=root,
transform=transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]))
data_loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
cudnn.benchmark = True
model.eval()
end = time.time()
outputs = []
ff = open('./test.data/detect_testData_1998.txt')
# ff = open('./test.data/AFLW2000-3D_crop/list_landmarks_align_AFLW2000_3D_crop_pts_xgtu_21.txt')
lmsList = ff.readlines()
ff.close()
with torch.no_grad():
for idx, inputs in enumerate(data_loader):
# print(idx)
batLms_files = lmsList[idx * batch_size:(idx + 1) * batch_size]
pts68 = [get_lms_crop_pts(bb) for bb in batLms_files]
pts68 = np.array(pts68).astype(np.float32)
pts68 = pts68[:,:2,:]
pts68[pts68>119] = 119
# print(pts68)
inputs = inputs.cuda()
'''
img = inputs.data.cpu().numpy()[0]
img = img.transpose(1,2,0)
fig = plt.figure(figsize=plt.figaspect(.5))
ax = fig.add_subplot(1, 2, 1)
ax.imshow(img)
lms = pts68[0]
nums = [0, 17, 22, 27, 31, 36, 42, 48, 60, 68]
for ind in range(len(nums) - 1):
l, r = nums[ind], nums[ind + 1]
ax.plot(lms[0, l:r], lms[1, l:r], color='w', lw=1.5, alpha=0.7)
ax.plot(lms[0, l:r], lms[1, l:r], marker='o', linestyle='None', markersize=4, color='w', markeredgecolor='black', alpha=0.8)
ax.axis('off')
lms = pts68[0]
for ind in range(18):
ax.plot(lms[0, ind], lms[1, ind], marker='o', linestyle='None', markersize=4, color='w', markeredgecolor='black', alpha=0.8)
ax.axis('off')
'''
lmsMap = [obtain_18pts_map(aa) for aa in pts68]
comInput1 = comb_inputs(inputs, lmsMap, permu=True)
comInput1 = _numpy_to_cuda(comInput1)
comInput1 = comInput1.permute(0, 3, 1, 2)
output = model(comInput1)
for i in range(output.shape[0]):
param_prediction = output[i].cpu().numpy().flatten()
outputs.append(param_prediction)
outputs = np.array(outputs, dtype=np.float32)
print(f'Extracting params take {time.time() - end: .3f}s')
return outputs
def _benchmark_aflw(outputs):
return ana_aflw(calc_nme_alfw(outputs))
def _benchmark_aflw1998(outputs):
return ana_alfw1998(calc_nme_alfw1998(outputs))
def benchmark_alfw_params(params):
outputs = []
for i in range(params.shape[0]):
lm = reconstruct_vertex(params[i])
outputs.append(lm[:2, :])
return _benchmark_aflw(outputs)
def benchmark_aflw1998_params(params):
outputs = []
for i in range(params.shape[0]):
lm = reconstruct_vertex(params[i])
outputs.append(lm[:2, :])
return _benchmark_aflw1998(outputs)
def benchmark_pipeline(arch, checkpoint_fp):
device_ids = [0]
def aflw1998():
params = extract_param(
checkpoint_fp=checkpoint_fp,
root='test.data/AFLW2000-3D_crop',
filelists='test.data/AFLW1998-3D_crop.list',
arch=arch,
device_ids=device_ids,
batch_size=1)
benchmark_aflw1998_params(params)
aflw1998()
def main():
preMol = '../models/2DASL_checkpoint_epoch_allParams_stage2.pth.tar'
parser = argparse.ArgumentParser(description='3DDFA Benchmark')
parser.add_argument('--arch', default='mobilenet_v2', type=str)
parser.add_argument('-c', '--checkpoint-fp', default=preMol, type=str)
args = parser.parse_args()
benchmark_pipeline(args.arch, args.checkpoint_fp)
if __name__ == '__main__':
main()
|
import RPi.GPIO as GPIO
import time
PWMFrequency = 50
InitialDutyCycle = 7.5
class OutputClock:
pin = None
frequency = None
pwm = None
name = None
def stop(self):
self.pwm.stop()
def __init__(self, pin, name, frequency):
print("Initializing clock on pin " + str(pin) + " with output frequency of " + str(frequency) + "Hz")
self.pin = pin
GPIO.setup(self.pin, GPIO.OUT)
self.frequency = frequency
self.name = name
self.pwm = GPIO.PWM(self.pin, frequency)
self.pwm.start(50)
class Input:
function = None
pin = None
name = None
def __init__(self, pin, name, function):
print("Initializing Input Listener on pin " + str(pin) + " named: " + name)
self.pin = pin
self.name = name
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
self.function = function
GPIO.add_event_detect(pin, GPIO.RISING, callback=self.function, bouncetime=300)
class Servo:
pwm = None
pin = None
def rotate(self, dutyCycle):
print("Rotating servo to duty cycle " + str(dutyCycle))
self.pwm = GPIO.PWM(self.pin, PWMFrequency)
self.pwm.start(dutyCycle)
time.sleep(0.5)
self.pwm.stop()
def __init__(self, pin):
print("Initializing Servo on pin " + str(pin) + " with frequency " + str(PWMFrequency) + "Hz")
self.pin = pin
GPIO.setup(self.pin, GPIO.OUT)
self.rotate(InitialDutyCycle)
class IO:
__instance = None
panServo = None
tiltServo = None
@staticmethod
def getInstance():
""" Static access method. """
if IO.__instance == None:
IO()
return IO.__instance
def __init__(self):
print("Initializing IO")
if IO.__instance != None:
print("IO was already initialized, throwing exception")
raise Exception("This class is a singleton!")
else:
print("Setting board mode")
GPIO.setmode(GPIO.BCM)
IO.__instance = self
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-07-02 01:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bims', '0148_auto_20190630_1300'),
]
operations = [
migrations.AlterField(
model_name='biologicalcollectionrecord',
name='category',
field=models.CharField(blank=True, choices=[(b'alien', b'Non-Native'), (b'indigenous', b'Native')], max_length=50, null=True),
),
]
|
import requests
class HttpSession(requests.Session):
def __init__(self, *args, **kwargs):
self.timeout = kwargs.pop("timeout", None)
super().__init__(*args, **kwargs)
def request(self, *args, **kwargs):
kwargs.setdefault("timeout", self.timeout)
return super().request(*args, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.