code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
import unittest
from convert import convert
class TestConvert(unittest.TestCase):
def testEmptyJsonParse(self):
generated = convert.parse(convert._load_json_files("./jsonSamples/minimal.json")[0])
def testGlossaryJsonParse(self):
generated = convert.parse(convert._load_json_files("./jsonSamples/Glossary.json")[0])
generated = convert.generate("Test", ["cs"], generated)
for f in generated:
print "".join(f["content"]) | HenrikPoulsen/Json2Class | test/testConvert.py | Python | mit | 475 |
#! /usr/bin/python2
# vim: set fileencoding=utf-8
from dateutil.parser import parse
from subprocess import check_output
from shutil import copy
import datetime
import sys
import os.path
import isoweek
DATE_FORMAT = '%Y%m%d'
START = """\documentclass[a4paper,oneside,draft,
notitlepage,11pt,svgnames]{scrreprt}
\\newcommand{\workingDate}{\\today}
\input{preambule}
\\begin{document}
"""
END = """
\printbibliography{}
\end{document}"""
MD_ACTIVITY = """# Activity {.unnumbered}
~~~~
"""
def create(date):
filename = date.strftime(DATE_FORMAT)
month = date.strftime('%B')
day = date.strftime('%d')
with open('template.tex', 'r') as t:
content = t.read()
content = content.replace('MONTH', month)
content = content.replace('DAY', day)
content = content.replace('content', filename+'.tex')
with open('current.tex', 'w') as f:
f.write(content)
copy('content.md', filename+'.md')
print('gvim {}'.format(filename+'.md'))
def week(date):
week = isoweek.Week.withdate(date)
name = 'w{}.tex'.format(week.week)
together([week.day(d) for d in range(7)], name)
def together(dates, name):
include = '\chapter{{{}}}\n\input{{{}}}'
res = [include.format(d.strftime('%B %d'),
d.strftime(DATE_FORMAT)) for d in dates
if os.path.exists(d.strftime(DATE_FORMAT)+'.tex')]
with open(name, 'w') as f:
f.write(START+'\n'.join(res)+END)
print('mv {} w.tex'.format(name))
def log(date):
cmd = "git whatchanged --since='{}' --pretty=format:'%B'"
cmd += "|sed '/^$/d'|sed 's/^.*\.\.\. //'"
since = date.replace(hour=4)
log = check_output(cmd.format(str(since)),
shell=True).strip()+"\n\n~~~~"
log = MD_ACTIVITY + log
print(log)
return log.replace('\t', ' ')
def since(date):
today = datetime.datetime.now()
name = date.strftime(DATE_FORMAT) + '_' + today.strftime(DATE_FORMAT)
days = [(date + datetime.timedelta(days=i)).date()
for i in range(1, (today-date).days+1)]
together(days, name+'.tex')
def finish(date):
today = datetime.datetime.now()
name = today.strftime(DATE_FORMAT)
with open(name+'.md', 'a') as f:
f.write(log(today))
cmd = 'pandoc -f markdown -t latex {}.md'
cmd += " |grep -v addcontent|sed -e '/^\\\\sec/ s/\\\\label.*$//'"
print(cmd.format(name))
latex = check_output(cmd.format(name), shell=True)
with open(name+'.tex', 'w') as today_log:
today_log.write(latex)
print('latexmk -pdf -pvc current')
print('mv current.pdf {}.pdf'.format(name))
if __name__ == '__main__':
date = datetime.datetime.now()
command = 'create'
if len(sys.argv) > 1:
command = sys.argv[1].strip()
if len(sys.argv) > 2:
date = parse(sys.argv[2], dayfirst=True)
globals()[command](date)
| daureg/illalla | diary/manage.py | Python | mit | 2,888 |
"""Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 10
validate_train_set = True
save_every = 10
restart_from_save = False
dump_network_loaded_data = False
# Training (schedule) parameters
# - batch sizes
batch_size = 32
sunny_batch_size = 4
batches_per_chunk = 16
AV_SLICE_PER_PAT = 11
num_epochs_train = 50 * AV_SLICE_PER_PAT
# - learning rate and method
base_lr = .0001
learning_rate_schedule = {
0: base_lr,
4*num_epochs_train/5: base_lr/10,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-16, 16),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
}
preprocess_train = preprocess.preprocess_normscale
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
# Input sizes
image_size = 128
data_sizes = {
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 100 * AV_SLICE_PER_PAT # More augmentations since a we only use single slices
# Architecture
def build_model():
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:singleslice"]
l0 = nn.layers.InputLayer(input_size)
l1a = nn.layers.dnn.Conv2DDNNLayer(l0, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1b = nn.layers.dnn.Conv2DDNNLayer(l1a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2,2), stride=(2,2))
l2a = nn.layers.dnn.Conv2DDNNLayer(l1, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2b = nn.layers.dnn.Conv2DDNNLayer(l2a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2,2), stride=(2,2))
l3a = nn.layers.dnn.Conv2DDNNLayer(l2, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3b = nn.layers.dnn.Conv2DDNNLayer(l3a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3c = nn.layers.dnn.Conv2DDNNLayer(l3b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2,2), stride=(2,2))
l4a = nn.layers.dnn.Conv2DDNNLayer(l3, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4b = nn.layers.dnn.Conv2DDNNLayer(l4a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4c = nn.layers.dnn.Conv2DDNNLayer(l4b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2,2), stride=(2,2))
l5a = nn.layers.dnn.Conv2DDNNLayer(l4, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5b = nn.layers.dnn.Conv2DDNNLayer(l5a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5c = nn.layers.dnn.Conv2DDNNLayer(l5b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2,2), stride=(2,2))
# Systole Dense layers
ldsys1 = nn.layers.DenseLayer(l5, num_units=1024, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
ldsys2 = nn.layers.DenseLayer(ldsys1drop, num_units=1024, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
ldsys3 = nn.layers.DenseLayer(ldsys2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax)
ldsys3drop = nn.layers.dropout(ldsys3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
l_systole = layers.CumSumLayer(ldsys3)
# Diastole Dense layers
lddia1 = nn.layers.DenseLayer(l5, num_units=1024, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia1drop = nn.layers.dropout(lddia1, p=0.5)
lddia2 = nn.layers.DenseLayer(lddia1drop, num_units=1024, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia2drop = nn.layers.dropout(lddia2, p=0.5)
lddia3 = nn.layers.DenseLayer(lddia2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax)
lddia3drop = nn.layers.dropout(lddia3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
l_diastole = layers.CumSumLayer(lddia3drop)
return {
"inputs":{
"sliced:data:singleslice": l0
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": {
ldsys1: l2_weight,
ldsys2: l2_weight,
ldsys3: l2_weight_out,
lddia1: l2_weight,
lddia2: l2_weight,
lddia3: l2_weight_out,
},
}
| 317070/kaggle-heart | configurations/je_ss_smcrps_nrmsc_dropoutput.py | Python | mit | 8,198 |
"""
"""
import sys
import re
import keyword
import logging
import traceback
from functools import partial
from PySide import QtCore
from PySide.QtCore import QObject
from maya import cmds
from mampy.pyside.utils import get_qt_object
from mamprefs import config
from mamprefs.base import BaseManager, deleteUI, file_to_pyobject
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ACTIVE_MENU = None
def get_parent_panel():
"""
Return current panels parent.
"""
panel = cmds.getPanel(up=True)
if cmds.panel(panel, q=True, ex=True):
panel_layout = cmds.layout(panel, q=True, p=True)
while not cmds.paneLayout(panel_layout, q=True, ex=True):
panel_layout = cmds.control(panel_layout, q=True, p=True)
if cmds.paneLayout(panel_layout, q=True, ex=True):
return panel_layout
else:
return 'viewPanes'
class MarkingMenuManager(BaseManager):
"""
"""
def __init__(self):
super(MarkingMenuManager, self).__init__('.markingmenu')
def __getitem__(self, key):
for menu_list in self.map.itervalues():
for menu in menu_list:
if menu.name == key:
return menu
else:
raise KeyError('"{}" is not in menu map.'.format(key))
def add_menu_items(self):
"""
Create menu items for every existing marking menu.
"""
cmds.menuItem(divider=True)
for file_name, menu_list in self.map.iteritems():
for menu in menu_list:
cmds.menuItem(
l=menu.name.title(),
c=partial(self.output, menu),
)
cmds.menuItem(ob=True, c=partial(self.edit, file_name))
cmds.menuItem(divider=True)
def initUI(self):
"""
Creates the user interface, can be used to update it aswell.
"""
super(MarkingMenuManager, self).initUI()
# UI element names
main_menu = config['MENU_MAIN_NAME']
marking_menu = config['MENU_MARKING_NAME']
layout_menu = config['MENU_LAYOUT_NAME']
# Delete UI elements if they exists.
deleteUI(marking_menu)
# Create the UI
cmds.menuItem(
marking_menu,
label='Marking Menus',
subMenu=True,
allowOptionBoxes=True,
insertAfter=layout_menu,
parent=main_menu,
tearOff=True,
)
cmds.menuItem(l='Update', c=lambda *args: self.reload_marking_menus())
if self.map:
self.add_menu_items()
else:
cmds.menuItem(l='No Marking Menus', enable=False)
cmds.menuItem(l='Clean Scene', c=lambda *args: self.clean_menu())
def parse_files(self):
for file_name, f in self.files.iteritems():
file_map = file_to_pyobject(f)
self.map[file_name] = [
MarkingMenu(**menu)
for menu in file_map
# for name, item in menu.iteritems()
]
def reload_marking_menus(self):
"""
Rebuild menus and re-parse files. Then rebuild the UI.
"""
self.reload()
self.initUI()
def clean_menu(self):
"""
.. note:: Might be redundant.
"""
deleteUI(config['MENU_MARKING_POPUP_NAME'])
def output(self, menu, *args):
"""
Outputs to script editor.
"""
if not any('radialPosition' in item for item in menu.items):
for item in menu.items:
print item
else:
for radial in ["N", "NW", "W", "SW", "S", "SE", "E", "NE"]:
for item in menu.items:
try:
if radial == item['radialPosition']:
print '{}: {}'.format(radial, item)
except KeyError:
pass
class MarkingMenu(object):
"""
"""
def __init__(self, name, button, marking_menu, modifiers, items,
option_boxes=False):
self.name = name
self.marking_menu = marking_menu
self.button = button
self.option_boxes = option_boxes
self.items = list()
self.modifiers = {'{}Modifier'.format(i): True for i in modifiers}
self.pane_widget = None
self.closing_event = MarkingMenuEventFilter()
self.parse_items(items)
logger.debug([name, button, marking_menu, modifiers, items])
def __str__(self):
return '{}({})'.format(self.__class__.__name__, self.name)
__repr__ = __str__
def parse_items(self, items):
logger.debug('New menu.')
for item in items:
logger.debug(item)
if 'sub_menu' in item:
logging.debug('building sub menu')
sub_list = item.pop('sub_menu', [])
sub_list.append({'set_parent': True})
logging.debug(sub_list)
item['subMenu'] = True
self.items.append(MarkingMenuItem(**item))
self.parse_items(sub_list)
else:
self.items.append(MarkingMenuItem(**item))
def build_menu(self):
"""
Creates menu items.
"""
try:
cmds.popupMenu(
config['MENU_MARKING_POPUP_NAME'],
button=self.button,
allowOptionBoxes=self.option_boxes,
markingMenu=self.marking_menu,
parent=get_parent_panel(),
**self.modifiers
)
logger.debug('building menu items:')
for item in self.items:
logger.debug(item)
if 'set_parent' in item:
cmds.setParent('..', m=True)
else:
cmds.menuItem(**item.unpack())
except:
traceback.print_exc(file=sys.stdout)
def show(self):
"""
Shows marking menu on hotkey press.
"""
try:
self.pane_widget.removeEventFilter(self.closing_event)
except AttributeError:
pass
deleteUI(config['MENU_MARKING_POPUP_NAME'])
self.build_menu()
self.pane_widget = get_qt_object(get_parent_panel())
self.pane_widget.installEventFilter(self.closing_event)
def hide(self):
try:
self.pane_widget.removeEventFilter(self.closing_event)
except AttributeError:
pass
deleteUI(config['MENU_MARKING_POPUP_NAME'])
class MarkingMenuEventFilter(QObject):
"""
Filter to handle events when building and hiding marking menus.
"""
key_release = False
is_child = False
destroy = False
def eventFilter(self, obj, event):
"""Make marking menus behave like other maya marking menus."""
# Destroy the menu in a new event cycle. If we don't do this we will
# delete the menu before the commands or sub menus are shown and crash
# maya.
if self.destroy:
self.destroy = False
hide_menu()
etype = event.type()
if etype == QtCore.QEvent.ChildRemoved:
self.is_child = False
if self.key_release:
self.destroy = True
if etype == QtCore.QEvent.ChildAdded:
self.is_child = True
else:
if etype == QtCore.QEvent.ShortcutOverride:
if event.isAutoRepeat():
self.key_release = False
return True
elif etype == QtCore.QEvent.KeyRelease:
if not self.is_child:
hide_menu()
self.key_release = True
return super(MarkingMenuEventFilter, self).eventFilter(obj, event)
class MarkingMenuItem(object):
"""
"""
default_menu = {
# Requiered
# 'label': None,
# 'command': None,
# 'radialPosition': None,
# Optional
'divider': False,
'subMenu': False,
'tearOff': False,
'altModifier': False,
'ctrlModifier': False,
'shiftModifier': False,
'optionModifier': False,
'commandModifier': False,
'optionBox': False,
'enable': True,
'data': False,
'allowOptionBoxes': True,
'postMenuCommandOnce': False,
'enableCommandRepeat': True,
'echoCommand': False,
'italicized': False,
'boldFont': True,
'sourceType': 'python',
}
def __init__(self, **kwargs):
self.menu_kwargs = {}
if 'divider' in kwargs:
self.menu_kwargs = {'divider': True}
elif 'set_parent' in kwargs:
self.menu_kwargs['set_parent'] = '..'
# self.menu_kwargs['label'] = 'set_parent'
else:
self.menu_kwargs = self.default_menu.copy()
if 'position' in kwargs:
kwargs['radialPosition'] = kwargs.pop('position', None)
if 'command' in kwargs:
kwargs['command'] = str(Command(kwargs['command']))
self.menu_kwargs.update(kwargs)
def __str__(self):
if 'label' not in self.menu_kwargs:
return '{}()'.format(self.__class__.__name__)
return '{}({})'.format(self.__class__.__name__, self.menu_kwargs['label'])
def __getitem__(self, key):
return self.menu_kwargs[key]
def __contains__(self, key):
return key in self.menu_kwargs
__repr__ = __str__
def unpack(self):
return self.menu_kwargs
class Command(object):
regex = re.compile(ur'^\w+')
def __init__(self, command_string):
self.command_string = command_string
self._module = None
self._parsed_command = None
def __str__(self):
return '{}'.format(self.parsed_command)
@property
def module(self):
if self._module is None:
try:
_module = re.findall(self.regex, self.command_string)[0]
except IndexError:
_module = None
return _module
@property
def is_module_keyword(self):
return keyword.iskeyword(self.module)
@property
def is_maya_keyword(self):
return self.module in ['cmds', 'mel']
@property
def parsed_command(self):
if self._parsed_command is None:
self._parsed_command = self.parse()
return self._parsed_command
def parse(self):
tmpcommand = ''
if self.module is None:
return 'null'
if self.is_module_keyword or self.is_maya_keyword:
tmpcommand = self.command_string
else:
tmpcommand = 'import {0.module}; {0.command_string}'.format(self)
logger.debug('parsed command to: {}'.format(tmpcommand))
return tmpcommand
MARKING_MENU_MANAGER = MarkingMenuManager()
def init():
MARKING_MENU_MANAGER.initUI()
def show_menu(menu):
global ACTIVE_MENU
ACTIVE_MENU = menu
logger.debug(MARKING_MENU_MANAGER[menu])
try:
MARKING_MENU_MANAGER[menu].show()
except KeyError:
logger.exception(traceback.format_exc())
def hide_menu():
logger.debug(MARKING_MENU_MANAGER[ACTIVE_MENU])
try:
MARKING_MENU_MANAGER[ACTIVE_MENU].hide()
except KeyError:
logger.exception(traceback.format_exc())
if __name__ == '__main__':
pass
| arubertoson/maya-mamprefs | mamprefs/markingmenus.py | Python | mit | 11,879 |
import argparse
import sys
import rospy
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
from std_msgs.msg import Header
from baxter_core_msgs.srv import (
SolvePositionIK,
SolvePositionIKRequest,
)
def ik_solve(limb, pos, orient):
#~ rospy.init_node("rsdk_ik_service_client")
ns = "ExternalTools/" + limb + "/PositionKinematicsNode/IKService"
iksvc = rospy.ServiceProxy(ns, SolvePositionIK)
ikreq = SolvePositionIKRequest()
print "iksvc: ", iksvc
print "ikreq: ", ikreq
hdr = Header(stamp=rospy.Time.now(), frame_id='base')
poses = {
str(limb): PoseStamped(header=hdr,
pose=Pose(position=pos, orientation=orient))}
ikreq.pose_stamp.append(poses[limb])
try:
rospy.wait_for_service(ns, 5.0)
resp = iksvc(ikreq)
except (rospy.ServiceException, rospy.ROSException), e:
rospy.logerr("Service call failed: %s" % (e,))
return 1
if (resp.isValid[0]):
print("SUCCESS - Valid Joint Solution Found:")
# Format solution into Limb API-compatible dictionary
limb_joints = dict(zip(resp.joints[0].name, resp.joints[0].position))
print limb_joints
return limb_joints
else:
print("INVALID POSE - No Valid Joint Solution Found.")
return -1
| calumk/ROS-Blocks | ros_blocks/scripts/ik_solver.py | Python | mit | 1,346 |
from classytags.helpers import InclusionTag
from django import template
from django.conf import settings
from django.template.loader import render_to_string
register = template.Library()
class Banner(InclusionTag):
"""
Displays a checkout mode banner.
"""
template = 'sagepay/checkout_mode_banner.html'
def render_tag(self, context, **kwargs):
template = self.get_template(context, **kwargs)
if settings.SAGEPAY_MODE == "Live":
return ''
data = self.get_context(context, **kwargs)
return render_to_string(template, data)
register.tag(Banner) | glynjackson/django-oscar-sagepay | sagepay/templatetags/checkout_mode_tag.py | Python | mit | 613 |
#!/usr/bin/env python
import sys, inspect, cgi, os, subprocess, pipes, json, select, time, threading
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from urlparse import parse_qs
from threading import Thread
from gpiozero import RGBLED
from gpiozero import PWMLED
# SETTINGS
serverPort = 40001
baseDir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
secret = '4468e5deabf5e6d0740cd1a77df56f67093ec943'
# TOOLS
def cleanColor(color):
color = color.replace("#", "")
color = color[:6]
return color
# CLASSES
class ledControl(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.color = '000000'
self.red = 0
self.green = 0
self.blue = 0
self.bRed = 0.4
self.bGreen = 0
self.bBlue = 0
self.on = True
self.blink = False
self.RGB = RGBLED(22,23,24,False,(self.bRed, self.bGreen, self.bBlue))
def convertColor(self):
try:
divider = 256
red = float(int("0x"+self.color[0:2], 0))
# red = red-(red/2)
green = float(int("0x"+self.color[2:4], 0))
green = green-(green/2.5)
blue = float(int("0x"+self.color[4:6], 0))
# blue = blue-(blue/2)
if red>=divider:
red=divider
if green>=divider:
green=divider
if blue>=divider:
blue=divider
self.red = red/divider
self.green = green/divider
self.blue = blue/divider
return True
except ValueError:
return False
def setColor(self, red, green, blue):
self.RGB.color = (red,green,blue)
def setHexColor(self, color):
self.color = cleanColor(color)
return self.convertColor()
def loadAndSetColor(self, color):
self.color = cleanColor(color)
self.convertColor()
self.setColor(self.red, self.green, self.blue)
return
def blinkLeds(self, interval, fadeTime, blinks, on):
if not self.blink:
Thread(target=self.blinkingThread, args=(interval,fadeTime,blinks,on, )).start()
return True
return False
def blinkingThread(self, interval, fadeTime, blinks, on):
fInterval = float(interval)
fFadeTime = float(fadeTime)
fBlinks = int(blinks)
if not self.blink:
self.blink = True
blinker = self.RGB.blink(fInterval, fInterval, fFadeTime, fFadeTime, (self.red, self.green, self.blue), (self.bRed, self.bGreen, self.bBlue), fBlinks, False)
self.blink = False
blinker = None
if on:
self.setColor(self.red,self.green,self.blue)
else:
self.setColor(self.bRed, self.bGreen, self.bBlue)
def rainbow(self):
self.loadAndSetColor('#000000')
time.sleep(0.5)
self.loadAndSetColor('#110000')
time.sleep(0.05)
self.loadAndSetColor('#220000')
time.sleep(0.05)
self.loadAndSetColor('#330000')
time.sleep(0.05)
self.loadAndSetColor('#440000')
time.sleep(0.05)
self.loadAndSetColor('#550000')
time.sleep(0.05)
self.loadAndSetColor('#660000')
time.sleep(0.05)
self.loadAndSetColor('#770000')
time.sleep(0.05)
self.loadAndSetColor('#880000')
time.sleep(0.05)
self.loadAndSetColor('#990000')
time.sleep(0.05)
self.loadAndSetColor('#AA0000')
time.sleep(0.05)
self.loadAndSetColor('#BB0000')
time.sleep(0.05)
self.loadAndSetColor('#CC0000')
time.sleep(0.05)
self.loadAndSetColor('#DD0000')
time.sleep(0.05)
self.loadAndSetColor('#EE0000')
time.sleep(0.05)
self.loadAndSetColor('#FF0000')
time.sleep(0.05)
self.loadAndSetColor('#FF1100')
time.sleep(0.05)
self.loadAndSetColor('#FF2200')
time.sleep(0.05)
self.loadAndSetColor('#FF3300')
time.sleep(0.05)
self.loadAndSetColor('#FF4400')
time.sleep(0.05)
self.loadAndSetColor('#FF5500')
time.sleep(0.05)
self.loadAndSetColor('#FF6600')
time.sleep(0.05)
self.loadAndSetColor('#FF7700')
time.sleep(0.05)
self.loadAndSetColor('#FF8800')
time.sleep(0.05)
self.loadAndSetColor('#FF9900')
time.sleep(0.05)
self.loadAndSetColor('#FFAA00')
time.sleep(0.05)
self.loadAndSetColor('#FFBB00')
time.sleep(0.05)
self.loadAndSetColor('#FFCC00')
time.sleep(0.05)
self.loadAndSetColor('#FFDD00')
time.sleep(0.05)
self.loadAndSetColor('#FFEE00')
time.sleep(0.05)
self.loadAndSetColor('#FFFF00')
time.sleep(0.05)
self.loadAndSetColor('#EEFF00')
time.sleep(0.05)
self.loadAndSetColor('#DDFF00')
time.sleep(0.05)
self.loadAndSetColor('#CCFF00')
time.sleep(0.05)
self.loadAndSetColor('#BBFF00')
time.sleep(0.05)
self.loadAndSetColor('#AAFF00')
time.sleep(0.05)
self.loadAndSetColor('#AAFF00')
time.sleep(0.05)
self.loadAndSetColor('#99FF00')
time.sleep(0.05)
self.loadAndSetColor('#88FF00')
time.sleep(0.05)
self.loadAndSetColor('#77FF00')
time.sleep(0.05)
self.loadAndSetColor('#66FF00')
time.sleep(0.05)
self.loadAndSetColor('#55FF00')
time.sleep(0.05)
self.loadAndSetColor('#44FF00')
time.sleep(0.05)
self.loadAndSetColor('#33FF00')
time.sleep(0.05)
self.loadAndSetColor('#22FF00')
time.sleep(0.05)
self.loadAndSetColor('#11FF00')
time.sleep(0.05)
self.loadAndSetColor('#00FF00')
time.sleep(0.05)
self.loadAndSetColor('#00FF11')
time.sleep(0.05)
self.loadAndSetColor('#00FF22')
time.sleep(0.05)
self.loadAndSetColor('#00FF33')
time.sleep(0.05)
self.loadAndSetColor('#00FF44')
time.sleep(0.05)
self.loadAndSetColor('#00FF55')
time.sleep(0.05)
self.loadAndSetColor('#00FF66')
time.sleep(0.05)
self.loadAndSetColor('#00FF77')
time.sleep(0.05)
self.loadAndSetColor('#00FF88')
time.sleep(0.05)
self.loadAndSetColor('#00FF99')
time.sleep(0.05)
self.loadAndSetColor('#00FFAA')
time.sleep(0.05)
self.loadAndSetColor('#00FFBB')
time.sleep(0.05)
self.loadAndSetColor('#00FFCC')
time.sleep(0.05)
self.loadAndSetColor('#00FFEE')
time.sleep(0.05)
self.loadAndSetColor('#00FFFF')
time.sleep(0.05)
self.loadAndSetColor('#00EEFF')
time.sleep(0.05)
self.loadAndSetColor('#00DDFF')
time.sleep(0.05)
self.loadAndSetColor('#00CCFF')
time.sleep(0.05)
self.loadAndSetColor('#00BBFF')
time.sleep(0.05)
self.loadAndSetColor('#00AAFF')
time.sleep(0.05)
self.loadAndSetColor('#0099FF')
time.sleep(0.05)
self.loadAndSetColor('#0088FF')
time.sleep(0.05)
self.loadAndSetColor('#0077FF')
time.sleep(0.05)
self.loadAndSetColor('#0066FF')
time.sleep(0.05)
self.loadAndSetColor('#0055FF')
time.sleep(0.05)
self.loadAndSetColor('#0044FF')
time.sleep(0.05)
self.loadAndSetColor('#0033FF')
time.sleep(0.05)
self.loadAndSetColor('#0022FF')
time.sleep(0.05)
self.loadAndSetColor('#0011FF')
time.sleep(0.05)
self.loadAndSetColor('#0000FF')
time.sleep(0.05)
self.loadAndSetColor('#1100FF')
time.sleep(0.05)
self.loadAndSetColor('#2200FF')
time.sleep(0.05)
self.loadAndSetColor('#3300FF')
time.sleep(0.05)
self.loadAndSetColor('#4400FF')
time.sleep(0.05)
self.loadAndSetColor('#5500FF')
time.sleep(0.05)
self.loadAndSetColor('#6600FF')
time.sleep(0.05)
self.loadAndSetColor('#7700FF')
time.sleep(0.05)
self.loadAndSetColor('#8800FF')
time.sleep(0.05)
self.loadAndSetColor('#9900FF')
time.sleep(0.05)
self.loadAndSetColor('#AA00FF')
time.sleep(0.05)
self.loadAndSetColor('#BB00FF')
time.sleep(0.05)
self.loadAndSetColor('#CC00FF')
time.sleep(0.05)
self.loadAndSetColor('#DD00FF')
time.sleep(0.05)
self.loadAndSetColor('#EE00FF')
time.sleep(0.05)
self.loadAndSetColor('#FF00FF')
time.sleep(0.05)
self.loadAndSetColor('#FF00EE')
time.sleep(0.05)
self.loadAndSetColor('#FF00DD')
time.sleep(0.05)
self.loadAndSetColor('#FF00CC')
time.sleep(0.05)
self.loadAndSetColor('#FF00BB')
time.sleep(0.05)
self.loadAndSetColor('#FF00AA')
time.sleep(0.05)
self.loadAndSetColor('#FF0099')
time.sleep(0.05)
self.loadAndSetColor('#FF0088')
time.sleep(0.05)
self.loadAndSetColor('#FF0077')
time.sleep(0.05)
self.loadAndSetColor('#FF0066')
time.sleep(0.05)
self.loadAndSetColor('#FF0055')
time.sleep(0.05)
self.loadAndSetColor('#FF0044')
time.sleep(0.05)
self.loadAndSetColor('#FF0033')
time.sleep(0.05)
self.loadAndSetColor('#FF0022')
time.sleep(0.05)
self.loadAndSetColor('#FF0011')
time.sleep(0.05)
self.loadAndSetColor('#FF0000')
time.sleep(0.05)
self.loadAndSetColor('#EE0000')
time.sleep(0.05)
self.loadAndSetColor('#DD0000')
time.sleep(0.05)
self.loadAndSetColor('#CC0000')
time.sleep(0.05)
self.loadAndSetColor('#BB0000')
time.sleep(0.05)
self.loadAndSetColor('#AA0000')
time.sleep(0.05)
self.loadAndSetColor('#990000')
time.sleep(0.05)
self.loadAndSetColor('#880000')
time.sleep(0.05)
self.loadAndSetColor('#770000')
time.sleep(0.05)
self.loadAndSetColor('#660000')
time.sleep(0.05)
self.loadAndSetColor('#550000')
time.sleep(0.05)
self.loadAndSetColor('#440000')
time.sleep(0.05)
self.loadAndSetColor('#330000')
time.sleep(0.05)
self.loadAndSetColor('#220000')
time.sleep(0.05)
self.loadAndSetColor('#110000')
time.sleep(0.05)
self.loadAndSetColor('#000000')
time.sleep(0.5)
self.setColor(0.4,0,0)
#SETUP
leds = ledControl()
leds.daemon = True
leds.setHexColor('#00ff00')
leds.blinkLeds(0,0.3,5,False)
# SERVER
def handleRequest(request):
command = ''
if 'command' in request:
command = request['command'][0]
value = ''
if 'value' in request:
value = request['value'][0]
if command == 'loadSetColor':
return leds.loadAndSetColor(value)
elif command == 'setHexColor':
return leds.setHexColor(value)
elif command == 'blinkLeds':
try:
values = json.loads(value)
interval = float(values['interval'])
fadeTime = float(values['fadeTime'])
blinks = int(values['blinks'])
if values['on']:
on = True
elif not values['on']:
on = False
return leds.blinkLeds(interval,fadeTime,blinks,on)
except KeyError:
return False
elif command == 'rainbow':
Thread(target=leds.rainbow, args=()).start()
return
class S(BaseHTTPRequestHandler):
def do_GET(self):
try:
requestData = parse_qs(self.path[2:])
returnData = None
if requestData:
if requestData['secret'][0] == secret:
returnData = handleRequest(requestData)
self.send_response(200)
else :
self.send_response(401)
else:
self.send_response(404)
except KeyError:
self.send_response(500)
self.send_header("Content-type", "application/json")
self.send_header("Access-Control-Allow-Origin", "*")
self.end_headers()
if returnData:
self.wfile.write(returnData)
def do_POST(self):
try:
requestData = parse_qs(self.path[2:])
if requestData:
if requestData['secret'][0] == secret:
handleRequest(requestData)
self.send_response(200)
else :
self.send_response(401)
else:
self.send_response(404)
except KeyError:
self.send_response(500)
self.send_header("Content-type", "application/json")
self.send_header("Access-Control-Allow-Origin", "*")
self.end_headers()
def run(server_class=HTTPServer, handler_class=S, port=serverPort):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print('Starting server')
httpd.serve_forever()
# MAIN LOOP
def main(args):
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
if __name__ == "__main__":
main(sys.argv)
| fedtemis/Fonograf | servers/ledServer.py | Python | mit | 13,546 |
'''
Convert a table from a nested list to a nested dictionary and back.
-----------------------------------------------------------
(c) 2013 Allegra Via and Kristian Rother
Licensed under the conditions of the Python License
This code appears in section 7.4.3 of the book
"Managing Biological Data with Python".
-----------------------------------------------------------
'''
table = [
['protein', 'ext1', 'ext2', 'ext3'],
[0.16, 0.038, 0.044, 0.040],
[0.33, 0.089, 0.095, 0.091],
[0.66, 0.184, 0.191, 0.191],
[1.00, 0.280, 0.292, 0.283],
[1.32, 0.365, 0.367, 0.365],
[1.66, 0.441, 0.443, 0.444]
]
# convert nested list to nested dict
nested_dict = {}
n = 0
key = table[0]
# to include the header , run the for loop over
# All table elements (including the first one)
for row in table[1:]:
n = n + 1
entry = {key[0]: row[0], key[1]: row[1], key[2]: row[2],
key[3]: row[3]}
nested_dict['row'+str(n)] = entry
# Test
# print(table[1:])
print(nested_dict)
nested_list = []
for entry in nested_dict:
key = nested_dict[entry]
nested_list.append([key['protein'], key['ext1'], key['ext2'],
key['ext3']])
print(nested_list)
| raymonwu/Managing_Your_Biological_Data_with_Python_3 | 07-tabular_data/7.4.3_convert_table.py | Python | mit | 1,222 |
#!/usr/bin/env python
import commands, sys
# Get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
# Append path that contains scripts, to be able to load modules
sys.path.append(path_sct + '/scripts')
from msct_parser import Parser
from nibabel import load, save, Nifti1Image
import os
import time
import sct_utils as sct
from sct_process_segmentation import extract_centerline
from sct_orientation import get_orientation
# DEFAULT PARAMETERS
class Param:
## The constructor
def __init__(self):
self.debug = 0
self.verbose = 1 # verbose
self.remove_temp_files = 1
self.type_window = 'hanning' # for smooth_centerline @sct_straighten_spinalcord
self.window_length = 80 # for smooth_centerline @sct_straighten_spinalcord
self.algo_fitting = 'nurbs'
# self.parameter = "binary_centerline"
self.list_file = []
self.output_file_name = ''
def main(list_file, param, output_file_name=None, remove_temp_files = 1, verbose = 0):
path, file, ext = sct.extract_fname(list_file[0])
# create temporary folder
path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
sct.run('mkdir '+path_tmp)
# copy files into tmp folder
sct.printv('\nCopy files into tmp folder...', verbose)
for i in range(len(list_file)):
file_temp = os.path.abspath(list_file[i])
sct.run('cp '+file_temp+' '+path_tmp)
# go to tmp folder
os.chdir(path_tmp)
## Concatenation of the files
# Concatenation : sum of matrices
file_0 = load(file+ext)
data_concatenation = file_0.get_data()
hdr_0 = file_0.get_header()
orientation_file_0 = get_orientation(list_file[0])
if len(list_file)>0:
for i in range(1, len(list_file)):
orientation_file_temp = get_orientation(list_file[i])
if orientation_file_0 != orientation_file_temp :
print "ERROR: The files ", list_file[0], " and ", list_file[i], " are not in the same orientation. Use sct_orientation to change the orientation of a file."
sys.exit(2)
file_temp = load(list_file[i])
data_temp = file_temp.get_data()
data_concatenation = data_concatenation + data_temp
# Save concatenation as a file
print '\nWrite NIFTI volumes...'
img = Nifti1Image(data_concatenation, None, hdr_0)
save(img,'concatenation_file.nii.gz')
# Applying nurbs to the concatenation and save file as binary file
fname_output = extract_centerline('concatenation_file.nii.gz', remove_temp_files = remove_temp_files, verbose = verbose, algo_fitting=param.algo_fitting, type_window=param.type_window, window_length=param.window_length)
# Rename files after processing
if output_file_name != None:
output_file_name = output_file_name
else : output_file_name = "generated_centerline.nii.gz"
os.rename(fname_output, output_file_name)
path_binary, file_binary, ext_binary = sct.extract_fname(output_file_name)
os.rename('concatenation_file_centerline.txt', file_binary+'.txt')
# Process for a binary file as output:
sct.run('cp '+output_file_name+' ../')
# Process for a text file as output:
sct.run('cp '+file_binary+ '.txt'+ ' ../')
os.chdir('../')
# Remove temporary files
if remove_temp_files:
print('\nRemove temporary files...')
sct.run('rm -rf '+path_tmp)
# Display results
# The concatenate centerline and its fitted curve are displayed whithin extract_centerline
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
# initialize parameters
# Initialize the parser
parser = Parser(__file__)
parser.usage.set_description('Compute a centerline from a list of segmentation and label files. It concatenates the parts, then extract the centerline. The output is a NIFTI image and a text file with the float coordinates (z, x, y) of the centerline.')
parser.add_option(name="-i",
type_value=[[','],'file'],
description="List containing segmentation NIFTI file and label NIFTI files. They must be 3D. Names must be separated by commas without spaces.",
mandatory=True,
example= "data_seg.nii.gz,label1.nii.gz,label2.nii.gz")
parser.add_option(name="-o",
type_value="file_output",
description="Name of the output NIFTI image with the centerline and of the output text file with the coordinates (z, x, y) (but text file will have '.txt' extension).",
mandatory=False,
default_value='generated_centerline.nii.gz')
parser.add_option(name="-r",
type_value="multiple_choice",
description="Remove temporary files. Specify 0 to get access to temporary files.",
mandatory=False,
example=['0','1'],
default_value="1")
parser.add_option(name="-v",
type_value="multiple_choice",
description="Verbose. 0: nothing. 1: basic. 2: extended.",
mandatory=False,
default_value='0',
example=['0', '1', '2'])
arguments = parser.parse(sys.argv[1:])
remove_temp_files = int(arguments["-r"])
verbose = int(arguments["-v"])
if "-i" in arguments:
list_file = arguments["-i"]
else: list_file = None
if "-o" in arguments:
output_file_name = arguments["-o"]
else: output_file_name = None
param = Param()
param.verbose = verbose
param.remove_temp_files =remove_temp_files
main(list_file, param, output_file_name, remove_temp_files, verbose)
| 3324fr/spinalcordtoolbox | dev/sct_detect_spinalcord/sct_get_centerline_from_labels.py | Python | mit | 5,974 |
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port
'''
This is a tool for comparing two or more mocacoinds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
class RejectResult(object):
'''
Outcome that expects rejection of a transaction or block.
'''
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks):
# print [ c.cb.block_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
# print [ c.cb.tx_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
print 'Block not in reject map: %064x' % (blockhash)
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
print 'Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash)
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
# print c.cb.bestblockhash, blockhash, outcome
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
# print c.rpc.getrawmempool()
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
print 'Tx not in reject map: %064x' % (txhash)
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
print 'Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash)
return False
elif ((txhash in c.cb.lastInv) != outcome):
# print c.rpc.getrawmempool(), c.cb.lastInv
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| mocacinno/mocacoin | qa/rpc-tests/test_framework/comptool.py | Python | mit | 18,040 |
class BaseWidget(object):
def __init__(self, path):
self._path = path
def expand(self):
"Implement this in sub classes"
raise Exception("Not implemented in Base class.")
| evojimmy/linux-server-backup | scripts/widgets/BaseWidget.py | Python | mit | 202 |
#!/usr/bin/env python
# mongodb_replicaset_status.py
# Author: Tyler Stroud <ststroud@gmail.com>
# Date: 2012-11-06
"""
This script monitors replication status of a replicaset
"""
from daemon import runner
import logging
from pymongo import Connection
from pymongo.errors import AutoReconnect
from time import sleep
import smtplib
from email.mime.text import MIMEText
import sys
from argparse import ArgumentParser
from ConfigParser import RawConfigParser, NoOptionError
class MongoDBReplicationStatus(object):
last_primary = None
def __init__(self, host, poll_interval=5, lag_threshold=30,
max_connect_retries=5, log_level=logging.INFO,
pidfile='/tmp/mongodb_replication_status.pid',
logfile='/var/log/mongodb_replication_status.log'):
self.poll_interval = poll_interval
self.lag_threshold = lag_threshold
self.max_connect_retries = max_connect_retries
self.stdin_path = '/dev/null'
self.stdout_path = logfile
self.stderr_path = logfile
self.pidfile_path = pidfile
self.pidfile_timeout = 5
self.hostnames = host
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.getLevelName(log_level))
self.logger_handler = logging.FileHandler(logfile)
self.logger_handler.setFormatter(logging.Formatter('[%(asctime)s] %(message)s'))
self.logger.addHandler(self.logger_handler)
def set_notifier(self, notifier):
assert isinstance(notifier, Notifier), ('"notifier" must be an instance'
'of "Notifier"')
self.notifier = notifier
def get_members(self):
""" Connect to the primary member and refresh the replica set status """
if self.last_primary is not None:
connection = self.get_connection(self.last_primary)
if connection is not None and connection.is_primary:
return connection['admin'].command('replSetGetStatus')['members']
for hostname in [h for h in self.hostnames if h != self.last_primary]:
connection = self.get_connection(hostname)
if not isinstance(connection, Connection):
continue # failed to connect to the current iteration's hostname, so continue and try the next hostname
if connection.is_primary:
self.last_primary = hostname
return connection['admin'].command('replSetGetStatus')['members']
# There is no primary, so wait 5 seconds and try again
sleep(5)
return self.get_members()
def get_primary_optime(self, members):
""" Returns the optime of the primary member """
for member in members:
if 'PRIMARY' == member['stateStr']:
return member['optime'].time
def get_connection(self, hostname):
""" Attempt to create a mongodb Connection to the given hostname """
retries = self.max_connect_retries
while retries > 0:
try:
return Connection(hostname)
except AutoReconnect:
self.logger.warning(
'WARNING: Failed to connect to hostname "%s". Trying again in 5 seconds. (%s tries left).'
% (hostname, retries))
retries -= 1
sleep(5)
errmsg = 'ERROR: All %s attempts to connect to hostname "%s" failed. Host may be down.'\
% (self.max_connect_retries, hostname)
self.logger.error(errmsg)
self.notifier.send_to_all(errmsg, '[ALERT] Host %s may be down' % hostname)
def run(self):
while True:
members = self.get_members()
message = ''
for member in members:
lag = self.get_primary_optime(members) - member['optime'].time
if lag > self.lag_threshold:
message += 'WARNING: Member "%s" is %s seconds behind the primary\n' % (member['name'], lag)
self.logger.warning(message)
self.logger.debug('DEBUG: Member "%s" is %s seconds behind the primary' % (member['name'], lag))
if message is not '':
self.notifier.send_to_all(message)
sleep(self.poll_interval)
class Notifier(object):
def __init__(self, from_email, recipient_emails, smtp_host='localhost'):
self.from_email = from_email
self.recipient_emails = recipient_emails
self.smtp_host = smtp_host
def send_to_all(self, message, subject='[ALERT] Replication Status Warning'):
message = MIMEText(message)
message['Subject'] = subject
mailer = smtplib.SMTP(self.smtp_host)
return mailer.sendmail(self.from_email, self.recipient_emails, str(message))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-c', '--config',help='The path to the configuration file', dest='FILE', required=True)
parser.add_argument('action', choices=('start', 'stop', 'restart'))
args = parser.parse_args()
config_parser = RawConfigParser()
config_file = open(args.FILE)
try:
config_parser.readfp(config_file)
finally:
config_file.close()
status = MongoDBReplicationStatus(
config_parser.get('main', 'host').split(','),
config_parser.getint('main', 'poll_interval'),
config_parser.getint('main', 'lag_threshold'),
config_parser.getint('main', 'max_connect_retries'),
config_parser.get('main', 'log_level'),
config_parser.get('main', 'pidfile'),
config_parser.get('main', 'logfile'),
)
notifier = Notifier(config_parser.get('main', 'from_email'),
config_parser.get('main', 'recipients'),
config_parser.get('main', 'smtp_host'))
status.set_notifier(notifier)
sys.argv = sys.argv[0], args.action # overwrite sys.argv to be what daemon_runner expects
daemon_runner = runner.DaemonRunner(status)
daemon_runner.daemon_context.files_preserve = [status.logger_handler.stream]
daemon_runner.do_action()
| tystr/mongodb-replication-status | mongodb_replication_status.py | Python | mit | 6,156 |
import numpy as np
from matplotlib import pyplot as plt
from astropy.io import fits
from glob import glob
import os
#Author: Trevor Dorn-Wallenstein
#Date: 9.20.17
#Author's note: the data reduction portion of this does almost exactly what acronym
#(Weisenburger et al. 2017, https://github.com/kweis/acronym) does, with the exception of
#performing an overscan subtraction before trimming. I wrote this to be slightly more explicit
#(read: slower) so that a PreMAP student can run any portion of the reduction and see what it does.
#If you want to reduce ARCTIC data and you are not my PreMAP student, PLEASE use Kolby's code!
#Hell, even if you are my PreMAP student, try Kolby's code too!
#If you want to run the pipeline from the command line without any customization,
#do python adapt.py datadir caldir reddir
#where datadir is where the data are, caldir is where master cals go, and reddir is
#where the reduced data go
def split_data_overscan(hdu):
"""
Search through the image header and return python indices
for where the overscan region is
Parameters
---------
hdu : `~astropy.io.fits.hdu.image.PrimaryHDU`
HDU object containing the raw FITS image from which overscan and data arrays
are generated.
Returns
-------
data : `~numpy.ndarray`
Trimmed data
overscan : `~numpy.ndarray`
Overscan region
"""
#Give me the overscan and the header
image = hdu.data
header = hdu.header
#Some string manipulation to get the actual values.
overscan_str = hdu.header['BSEC11'].lstrip('[').rstrip(']').split(',')
data_str = hdu.header['DSEC11'].lstrip('[').rstrip(']').split(',')
overscan_x_min,overscan_x_max = overscan_str[1].split(':')
overscan_y_min,overscan_y_max = overscan_str[0].split(':')
data_x_min,data_x_max = data_str[1].split(':')
data_y_min,data_y_max = data_str[0].split(':')
#Remember, FITS is 1-indexed, and sections are inclusive,
#whereas python is 0-indexed, and exlusive to the end of a section
data = image[int(data_x_min)-1:int(data_x_max),int(data_y_min)-1:int(data_y_max)]
overscan = image[int(overscan_x_min)-1:int(overscan_x_max),int(overscan_y_min)-1:int(overscan_y_max)]
return data, overscan
def trim_subtract_overscan(hdu,fit_degree = 8):
"""
Search through the image header and return python indices
for where the overscan region is
Parameters
---------
hdu : `~astropy.io.fits.hdu.image.PrimaryHDU`
HDU object containing the raw FITS image from which overscan is trimmed, then
fit and subtracted
fit_degree: int, optional
Order of the polynomial used to fit the overscan
Returns
-------
data_subtracted : `~numpy.ndarray`
Trimmed and overscan-subtracted data
header : `~astropy.io.fits.header.Header`
`~astropy.io.fits.header.Header` object of original fits file,
modified to say the data have been trimmed and overscan subtracted
"""
#Trim data
data,overscan = split_data_overscan(hdu)
header = hdu.header
#Average along columns
avg_overscan = np.mean(overscan,axis=1)
#Index array, then fit!
idx = np.arange(len(avg_overscan))
p = np.polyfit(idx,avg_overscan,deg=fit_degree)
#Calculate array from fit, then transpose into a column
fit_overscan = np.poly1d(p)(idx)
fit_overscan_col = fit_overscan[:,np.newaxis]
#Subtract column!
data_subtracted = data - fit_overscan_col
#Edit the header
header.set('COMMENT','Overscan Subtracted + Trimmed')
header.set('COMMENT','Overscan Fit Order = {0}'.format(fit_degree))
return data_subtracted,header
def master_bias(biaslist,overscan_fit_degree = 8, caldir = None, overwrite = False):
"""
Construct a master bias using median combination
Parameters
---------
biaslist : list
List of filenames, should be complete filenames. Use glob to construct.
If the list is empty, nothing will happen
overscan_fit_degree : int, optional
Order of polynomial to fit overscan with
caldir : str, optional
Directory to place master bias into.
overwrite : bool, optional
If True, and caldir/master_bias.fits exists, it will be overwritten
Returns
-------
masterbias : `~numpy.ndarray`
Bias array. Note: this will be saved as outdir/master_bias.fits
"""
if len(biaslist) == 0:
print('feed me biases!')
return None
master_biases =[]
for bias_name in biaslist:
hdu = fits.open(bias_name)[0]
data,header = trim_subtract_overscan(hdu,fit_degree=overscan_fit_degree)
master_biases.append(data)
master_biases = np.array(master_biases)
master_bias = np.median(master_biases,axis=0)
header.set('COMMENT','Biases median-combined')
header.set('COMMENT','Composed of raw bias frames:')
for bias_name in biaslist:
header.set('COMMENT',bias_name)
bias_hdu = fits.PrimaryHDU(master_bias,header)
if caldir == None:
bias_hdu.writeto('master_bias.fits', overwrite=overwrite)
else:
bias_hdu.writeto(caldir+'master_bias.fits', overwrite=overwrite)
print('Master bias constructed')
return master_bias
def master_dark(darklist,exptime,overscan_fit_degree = 8, caldir = None, overwrite = False):
"""
Construct a master dark frame using median combination
Parameters
---------
darklist : list
List of filenames, should be complete filenames. Use glob to construct.
If the list is empty, nothing will happen
exptime : float
float of exposure time for the dark in seconds. Will be appended to the filename
overscan_fit_degree : int, optional
Order of polynomial to fit overscan with
caldir : str, optional
Directory to place master dark into.
overwrite : bool, optional
If True, and caldir/master_dark_exptime.fits exists, it will be overwritten
Returns
-------
masterdark : `~numpy.ndarray`
dark array. Note: this will be saved as outdir/master_dark_exptime.fits
"""
if len(darklist) == 0:
print('feed me darks!')
return None
if caldir == None:
bias = fits.getdata('master_bias.fits')
else:
bias = fits.getdata(caldir+'master_bias.fits')
master_darks = []
for dark_name in darklist:
hdu = fits.open(dark_name)[0]
data,header = trim_subtract_overscan(hdu,fit_degree=overscan_fit_degree)
data -= bias
master_darks.append(data)
master_darks = np.array(master_darks)
master_dark = np.median(master_darks,axis=0)
#Some bookkeeping
header.set('COMMENT','Darks median-combined')
header.set('COMMENT','Composed of raw dark frames:')
for dark_name in darklist:
header.set('COMMENT',dark_name)
if caldir == None:
header.set('COMMENT', 'Bias subtraction done with master_bias.fits')
else:
header.set('COMMENT', 'Bias subtraction done with {0}master_bias.fits'.format(caldir))
dark_hdu = fits.PrimaryHDU(master_dark,header)
if caldir == None:
dark_hdu.writeto('master_dark_{0}.fits'.format(exptime), overwrite=overwrite)
else:
dark_hdu.writeto(caldir+'master_dark_{0}.fits'.format(exptime), overwrite=overwrite)
print('Master dark for {0}s constructed'.format(exptime))
return master_dark
def get_dark(exptime,caldir = None):
"""
Fetch the appropriate dark frame! If it doesn't exist, scale the longest dark
Parameters
---------
exptime : str
float of exposure time for the bias in seconds. Will be appended to the filename
caldir : str, optional
Directory to search for master dark.
Returns
-------
dark : `~np.ndarray`
master dark array.
darkname : str
name of the file for later reference
"""
#Search for all possible dark frames
available_darks = glob(caldir+'master_dark*')
available_times = []
#Check the exposure time. If any match, use that dark.
for darkname in available_darks:
dark_hdu = fits.open(darkname)[0]
dark_time = dark_hdu.header['EXPTIME']
available_times.append(dark_time)
if exptime == dark_time:
dark = dark_hdu.data
return dark,darkname
#If we're here, then no darks with matching exposure times were found. Scale the longest
#dark down to the given exposure time!
#Find the index with the longest time, grab that time and the corresponding dark frame
max_dark_idx = np.argmax(available_times)
max_dark_time = available_times[max_dark_idx]
darkname = available_darks[max_dark_idx]
long_dark = fits.getdata(darkname)
#Scale to the exposure time!
dark = long_dark * exptime / max_dark_time
return dark,darkname
def master_flat(flatlist,filt,overscan_fit_degree = 8, caldir = None, overwrite = False):
"""
Construct a master flat using median combination
Parameters
---------
flatlist : list
List of filenames, should be complete filenames. Use glob to construct.
If the list is empty, nothing will happen
filt : str
Name of filter that you're constructing a flat field for.
overscan_fit_degree : int, optional
Order of polynomial to fit overscan with
caldir : str, optional
Directory to place master dark into.
overwrite : bool, optional
If True, and caldir/master_dark_exptime.fits exists, it will be overwritten
Returns
-------
masterdark : `~numpy.ndarray`
dark array. Note: this will be saved as outdir/master_dark_exptime.fits
"""
if len(flatlist) == 0:
print('feed me flats!')
return None
if caldir == None:
bias = fits.getdata('master_bias.fits')
else:
bias = fits.getdata(caldir+'master_bias.fits')
master_flats = []
for flat_name in flatlist:
hdu = fits.open(flat_name)[0]
data,header = trim_subtract_overscan(hdu,fit_degree=overscan_fit_degree)
flat_exptime = hdu.header['EXPTIME']
dark,darkname = get_dark(flat_exptime, caldir = caldir)
data -= bias
data -= dark
master_flats.append(data)
master_flats = np.array(master_flats)
master_flat = np.median(master_flats,axis=0)
master_flat /= np.max(master_flat)
#Some bookkeeping
header.set('COMMENT','Flats median-combined')
header.set('COMMENT','Composed of raw flat frames:')
for flat_name in flatlist:
header.set('COMMENT',flat_name)
if caldir == None:
header.set('COMMENT', 'Bias subtraction done with master_bias.fits')
else:
header.set('COMMENT', 'Bias subtraction done with {0}master_bias.fits'.format(caldir))
header.set('COMMENT', 'Dark subtraction done with {0}'.format(darkname))
flat_hdu = fits.PrimaryHDU(master_flat,header)
if caldir == None:
flat_hdu.writeto('master_flat_{0}.fits'.format(filt), overwrite=overwrite)
else:
flat_hdu.writeto(caldir+'master_flat_{0}.fits'.format(filt), overwrite=overwrite)
print('Master flat for {0} filter constructed'.format(filt))
return master_flat
def get_flat(hdu,caldir = None):
"""
Fetch the appropriate master flat! If it doesn't exist, return 1.0
Parameters
---------
hdu : `~astropy.io.fits.hdu.image.PrimaryHDU`
`~astropy.io.fits.hdu.image.PrimaryHDU` to find a flat for
caldir : str, optional
Directory to search for master flat.
Returns
-------
flat : `~np.ndarray`
master flat array.
"""
our_filt = hdu.header['FILTER']
#Search for all possible dark frames
available_flats = glob(caldir+'master_flat*')
#Check the filter. If any match, use that dark.
for flatname in available_flats:
flat_hdu = fits.open(flatname)[0]
flat_filt = flat_hdu.header['FILTER']
if our_filt == flat_filt:
flat = flat_hdu.data
return flat,flatname
#If we're here, then no matching master flats with the same filter were found.
print('No flat for {0} found! Setting flat = 1'.format(hdu.header['FILENAME']))
flat = 1.0
flatname = 'NONE FOUND'
return flat,flatname
def reduce_science(sciencelist,overscan_fit_degree = 8, caldir = None, reddir = None, overwrite = False, out_pref = 'red_'):
"""
Reduce the science!
Parameters
---------
sciencelist : list
list of filenames to reduce!
overscan_fit_degree : int, optional
Order of polynomial to fit overscan with
caldir : str, optional
Directory to place master dark into.
reddir : str, optional
Directory to place reduced science image into
overwrite : bool, optional
If True, and reddir/master_dark_exptime.fits exists, it will be overwritten
out_pref : str, optional
Appends this string to the beginning of the filename
Returns
-------
reduced_hdu : `~astropy.io.fits.hdu.image.PrimaryHDU`
Reduced hdu.
"""
print('Reducing {0} science frames!'.format(len(sciencelist)))
for filename in sciencelist:
#Read data
hdu = fits.open(filename)[0]
#Trim and subtract overscan
data,header = trim_subtract_overscan(hdu,fit_degree=overscan_fit_degree)
#Bias subtract!
if caldir == None:
bias = fits.getdata('master_bias.fits')
header.set('COMMENT', 'Bias subtraction done with master_bias.fits')
else:
bias = fits.getdata(caldir+'master_bias.fits')
header.set('COMMENT', 'Bias subtraction done with {0}master_bias.fits'.format(caldir))
data -= bias
#Dark subtract!!
exptime = header['EXPTIME']
dark,darkname = get_dark(exptime, caldir=caldir)
header.set('COMMENT', 'Dark subtraction done with {0}'.format(darkname))
data -= dark
#Flat field!
flat,flatname = get_flat(hdu, caldir=caldir)
header.set('COMMENT', 'Flat fielding done with {0}'.format(flatname))
data /= flat
#Mess with some filename stuff so it saves to the right place...
just_filename = filename.split('/')[-1]
if reddir == None:
outname = out_pref+just_filename
else:
outname = reddir+out_pref+just_filename
reduced_hdu = fits.PrimaryHDU(data,header)
reduced_hdu.writeto(outname,overwrite=overwrite)
return 'Complete!! Hooray!'
def generate_lists(datadir = './',bias_keyword = 'Bias', dark_keyword = 'Dark', flat_keyword = 'Flat', science_keyword = 'Object'):
"""
Generates lists of filetypes to feed into the pipeline
Parameters
---------
datadir : str, optional
Directory where the data are stored. Should end in /
bias_keyword : str, optional
How are bias files named?
dark_keyword : str, optional
How are dark files named?
flat_keyword : str, optional
How are flat files named?
science_keyword : str, optional
How are science files named?
Returns
-------
biaslist : list
list of biases
darklists : list
list of lists of darks, one list for each dark exposure time taken
exptimes : list
list of exposures times, one for each list of darks in darklists
flatlists : list
list of lists of flats, one for each filter taken
filters : list
list of filter names, one for each list of flats in flatlists. Note: because filter
names are messy, this just uses the last letter of the filter. So SDSS g -> g, but
CU Ha -> a. Sorry...
sciencelist :
list of science images
"""
files = glob(datadir+'*.fits')
biaslist = []
tmp_darklist = []
dark_times = []
tmp_flatlist = []
flat_filts = []
sciencelist = []
#sort by file type
for file in files:
hdu = fits.open(file)[0]
filetype = hdu.header['IMAGETYP']
if filetype == bias_keyword:
biaslist.append(file)
elif filetype == dark_keyword:
tmp_darklist.append(file)
exptime = hdu.header['EXPTIME']
dark_times.append(exptime)
elif filetype == flat_keyword:
tmp_flatlist.append(file)
filt = hdu.header['FILTER']
flat_filts.append(filt)
elif filetype == science_keyword:
sciencelist.append(file)
#now sort darks by exptime and flats by filter
darklists = []
exptimes = []
for dark_time in np.unique(dark_times):
darklist = np.array(tmp_darklist)[(dark_times == dark_time)]
darklists.append(list(darklist))
exptimes.append(dark_time)
flatlists = []
filters = []
for filt in np.unique(flat_filts):
flatlist = np.array(tmp_flatlist)[np.array(flat_filts) == filt]
flatlists.append(list(flatlist))
filters.append(filt[-1])
return biaslist,darklists,exptimes,flatlists,filters,sciencelist
def run_pipeline_run(datadir = './', caldir = None, reddir = None,overscan_fit_degree = 8, overwrite = True, out_pref = 'red_'):
"""
Runs the entire pipeline. Overscan subtract, trim, bias, dark, flat, you name it.
Parameters
---------
datadir : str, optional
Directory where the data are stored. Should end in /
caldir : str, optional
Directory where the master calibration data should go.
reddir : str,optional
Directory where reduced data should go.
overscan_fit_degree : int, optional
Order of polynomial used to fit overscan region.
overwrite : bool, optional
If output files exist already and overwrite = True, overwrites files. Otherwise, exits
with an error
out_pref : str, optional
String attached to filenames of reduced science data
Returns
-------
biaslist : list
list of biases
darklists : list
list of lists of darks, one list for each dark exposure time taken
exptimes : list
list of exposures times, one for each list of darks in darklists
flatlists : list
list of lists of flats, one for each filter taken
filters : list
list of filter names, one for each list of flats in flatlists. Note: because filter
names are messy, this just uses the last letter of the filter. So SDSS g -> g, but
CU Ha -> a. Sorry...
sciencelist :
list of science images
"""
biaslist,darklists,exptimes,flatlists,filters,sciencelist = generate_lists(datadir=datadir)
if caldir != None:
if not os.path.isdir(caldir):
os.makedirs(caldir)
if reddir != None:
if not os.path.isdir(reddir):
os.makedirs(reddir)
print('Making Biases...')
master_bias(biaslist=biaslist,overscan_fit_degree=overscan_fit_degree,caldir=caldir,overwrite=overwrite)
print('Making Darks...')
for darklist,exptime in zip(darklists,exptimes):
master_dark(darklist=darklist,exptime=exptime,overscan_fit_degree=overscan_fit_degree,caldir=caldir,overwrite=overwrite)
print('Making Flats')
for flatlist,filt in zip(flatlists,filters):
master_flat(flatlist=flatlist,filt=filt,overscan_fit_degree=overscan_fit_degree,caldir=caldir,overwrite=overwrite)
print('Reducing Science!')
reduce_science(sciencelist=sciencelist,overscan_fit_degree=overscan_fit_degree,caldir=caldir,reddir=reddir,overwrite=overwrite,out_pref=out_pref)
return 'Complete!!'
if __name__ == '__main__':
from sys import argv
if len(argv) == 4:
datadir = argv[1]
caldir = argv[2]
reddir = argv[3]
else:
print('Wrong number of arguments, using defaults')
datadir = './'
caldir = None
reddir = None
run_pipeline_run(datadir=datadir,caldir=caldir,reddir=reddir)
| tzdwi/adapt | adapt.py | Python | mit | 20,553 |
'''
Created on Dec 3, 2014
@author: gearsad
'''
import sys
from roverpylot import rover
from bot_update_t import bot_update_t
from bot_control_command_t import bot_control_command_t
import lcm
# Try to start OpenCV for video
try:
import cv
except:
cv = None
class LCMRover(rover.Rover):
'''
A rover using LCM for control and camera feed upstream
'''
def Initialize(self, botname):
'''
Init the rover and store the name
'''
self.__botname = botname
self.__lcm = lcm.LCM("udpm://239.255.76.67:7667?ttl=1")
self.__controlSubscription = self.__lcm.subscribe("ARNerve_Bot_Control_" + self.__botname, self.UpdateBotControlHandler)
self.__lightsOn = 0
self.__infraredOn = 0
def processVideo(self, jpegbytes):
#try:
camUpdate = bot_update_t()
camUpdate.name = self.__botname
camUpdate.numBytes_cameraFrameJpeg = len(jpegbytes)
camUpdate.cameraFrameJpeg = jpegbytes
# Get the battery health as well
battery = self.getBatteryPercentage()
camUpdate.batteryPercentage = battery
self.__lcm.publish("ARNerve_Bot_Update_" + self.__botname, camUpdate.encode())
#except:
# print "Exception", sys.exc_info()[0]
# pass
def Update(self):
'''
Update the LCM
'''
self.__lcm.handle()
def Disconnect(self):
self.lc.unsubscribe(self.__controlSubscription)
def UpdateBotControlHandler(self, channel, data):
'''
Get the updated bot parameters and send them to the bot.
'''
controlParams = bot_control_command_t.decode(data)
# Check if it is the right bot.
if self.__botname != controlParams.name:
return
self.setTreads(controlParams.botTreadVelLeft, controlParams.botTreadVelright)
print "Setting the treads to {0}, {1}".format(controlParams.botTreadVelLeft, controlParams.botTreadVelright)
if self.__lightsOn != controlParams.isLightsOn:
if controlParams.isLightsOn != 0:
self.turnLightsOn()
else:
self.turnLightsOff()
self.__lightsOn = controlParams.isLightsOn
if self.__infraredOn != controlParams.isInfraredOn:
if controlParams.isInfraredOn != 0:
self.turnInfraredOn()
else:
self.turnInfraredOff()
self.__infraredOn = controlParams.isInfraredOn
| GearsAD/semisorted_arnerve | arnerve_bot/arnerve_bot/LCMRover.py | Python | mit | 2,720 |
'''
Created on 10/5/2015
@author: johnPortella
@version: 1.0
'''
import paramiko, os
from ftplib import FTP
#from config_parser import ConfigUtils
class PyFileTransfer(object):
'''
def __init__(self, transfName):
transfConf = ConfigUtils.read_trasnfer_config(transfName)
#hostname
self.__hostname = transfConf['host']
#username
self.__username = transfConf['user']
#password
self.__password = transfConf['password']
#protocol
self.__typeProtocol = transfConf['type']
#so
if transfConf['so'] == 'unix':
self.__SEP = '/'
elif transfConf['so'] == 'win':
self.__SEP = chr(92)
#port
if 'port' in transfConf:
self.__port = transfConf['port']
else:
self.__port = None
#open transfering
if self.__typeProtocol == 'ftp':
if self.__port is None:
self.__port = 21
#open
self.__t = FTP()
self.__t.connect(self.__hostname, self.__port, self.__timeout)
elif self.__typeProtocol == 'sftp':
if self.__port is None:
self.__port = 22
#open
self.__ssh = paramiko.Transport((self.__hostname, self.__port))
def connection(self):
if self.__typeProtocol == 'ftp':
self.__t.login(self.__username, self.__password)
#default directory
self.__defaultDirectory = self.__t.pwd()
elif self.__typeProtocol == 'sftp':
self.__ssh.connect(username = self.__username, password = self.__password)
self.__t = paramiko.SFTPClient.from_transport(self.__ssh)
#default directory
self.__defaultDirectory = None
'''
def __init__(self, typeProtocol='ftp', hostname = 'localhost', so='unix', port = None, timeout = None):
#Protocol
self.__typeProtocol = typeProtocol
#host
self.__hostname = hostname
#so
if so == 'unix':
self.__SEP = '/'
elif so == 'win':
self.__SEP = chr(92)
#timeout
self.__timeout = timeout
#port
if port:
self.__port = port
#open transfering
if self.__typeProtocol == 'ftp':
if not port:
self.__port = 21
#open
self.__t = FTP()
self.__t.connect(self.__hostname, self.__port, self.__timeout)
elif self.__typeProtocol == 'sftp':
if not port:
self.__port = 22
#open
self.__ssh = paramiko.Transport((self.__hostname, self.__port))
def connection(self, username, password):
if self.__typeProtocol == 'ftp':
self.__t.login(username, password)
#default directory
self.__defaultDirectory = self.__t.pwd()
elif self.__typeProtocol == 'sftp':
self.__ssh.connect(username = username, password = password)
self.__t = paramiko.SFTPClient.from_transport(self.__ssh)
self.__t.sock.settimeout(self.__timeout)
#default directory
self.__defaultDirectory = None
def get(self, filename, remoteDirectory=None, localDirectory=None):
if localDirectory is None:
localDirectory = os.path.dirname(os.path.realpath(__file__))
if self.__typeProtocol == 'ftp':
pwdAux = self.__t.pwd()
if remoteDirectory is not None:
self.__t.cwd(remoteDirectory)
remoteFile = open(os.path.join(localDirectory, filename), 'wb').write
self.__t.retrbinary("RETR " + filename, remoteFile)
self.__t.cwd(pwdAux)
remoteFile.close()
elif self.__typeProtocol == 'sftp':
if remoteDirectory is not None:
self.__t.chdir(remoteDirectory)
self.__t.get(filename, os.path.join(localDirectory, filename))
self.__t.chdir(None)
def put(self, filename, remoteDirectory=None, localDirectory=None):
if localDirectory is None:
localDirectory = os.path.dirname(os.path.realpath(__file__))
if self.__typeProtocol == 'ftp':
pwdAux = self.__t.pwd()
if remoteDirectory is not None:
self.__t.cwd(remoteDirectory)
localFile = open(filename, 'r')
self.__t.storbinary('RETR %s' % filename, localFile.write)
self.__t.cwd(pwdAux)
localFile.close()
elif self.__typeProtocol == 'sftp':
if remoteDirectory is not None:
self.__t.chdir(remoteDirectory)
self.__t.put(os.path.join(localDirectory, filename), filename)
self.__t.chdir(None)
def disconnect(self):
if self.__typeProtocol == 'ftp':
self.__t.quit()
elif self.__typeProtocol == 'sftp':
self.__t.close()
self.__ssh.close()
def pwd(self):
if self.__typeProtocol == 'ftp':
return self.__t.pwd()
elif self.__typeProtocol == 'sftp':
return self.__t.getcwd()
def cwd(self, remoteDirectory = None):
if self.__typeProtocol == 'ftp':
self.__t.cwd(remoteDirectory)
elif self.__typeProtocol == 'sftp':
self.__t.chdir(remoteDirectory)
def setDefaultDirectory(self):
if self.__typeProtocol == 'ftp':
self.__t.cwd(self.__defaultDirectory)
elif self.__typeProtocol == 'sftp':
self.__t.chdir(None)
def remotePathJoin (self, *paths):
"""Returns separate paths to string"""
if len(paths)== 0:
return None
if len(paths)== 1:
return paths[0]
else:
path = paths[0]
for i in paths[1:]:
path += self.__SEP + i
return path
t = PyFileTransfer('sftp', 'test.rebex.net', 'unix')
t.connection("demo", "password")
#t.get("WinFormClient.png", '/pub/example')
print t.pwd()
t.cwd('pub/example')
print t.pwd()
t.setDefaultDirectory()
print t.pwd()
t.disconnect()
| JohnPortella/pyfile_transfer | pyfile_transfer.py | Python | mit | 6,740 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AzureFirewallsOperations(object):
"""AzureFirewallsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
azure_firewall_name=azure_firewall_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AzureFirewall"
"""Gets the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureFirewall, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.AzureFirewall
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
parameters, # type: "_models.AzureFirewall"
**kwargs # type: Any
):
# type: (...) -> "_models.AzureFirewall"
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AzureFirewall')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
parameters, # type: "_models.AzureFirewall"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.AzureFirewall"]
"""Creates or updates the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:param parameters: Parameters supplied to the create or update Azure Firewall operation.
:type parameters: ~azure.mgmt.network.v2019_08_01.models.AzureFirewall
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either AzureFirewall or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_08_01.models.AzureFirewall]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
azure_firewall_name=azure_firewall_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
parameters, # type: "_models.AzureFirewall"
**kwargs # type: Any
):
# type: (...) -> "_models.AzureFirewall"
"""Updates tags for an Azure Firewall resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:param parameters: Parameters supplied to the create or update Azure Firewall operation.
:type parameters: ~azure.mgmt.network.v2019_08_01.models.AzureFirewall
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureFirewall, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.AzureFirewall
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AzureFirewall')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AzureFirewallListResult"]
"""Lists all Azure Firewalls in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.AzureFirewallListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewallListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AzureFirewallListResult"]
"""Gets all the Azure Firewalls in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.AzureFirewallListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewallListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/azureFirewalls'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/operations/_azure_firewalls_operations.py | Python | mit | 26,909 |
import unittest
def rotate_matrix(matrix):
end = len(matrix) - 1 # The index of the last row
# Rotating the matrix layer by layer
for row in range((end + 1) / 2):
for col in range(row, end - row):
temp = matrix[row][col]
matrix[row][col] = matrix[end - col][row]
matrix[end - col][row] = matrix[end - row][end - col]
matrix[end - row][end - col] = matrix[col][end - row]
matrix[col][end - row] = temp
class TestRotateMatrix(unittest.TestCase):
def test_empty_matrix(self):
matrix = []
rotate_matrix(matrix)
self.assertEquals(matrix, [])
def test_rotate_matrix_1(self):
matrix = [[1]]
rotate_matrix(matrix)
self.assertEqual(matrix, [[1]])
def test_rotate_matrix_2(self):
matrix = [[1, 2],
[2, 1]]
rotated = [[2, 1],
[1, 2]]
rotate_matrix(matrix)
self.assertEqual(matrix, rotated)
def test_rotate_matrix_4(self):
matrix = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]
rotated = [[13, 9, 5, 1],
[14, 10, 6, 2],
[15, 11, 7, 3],
[16, 12, 8, 4]]
rotate_matrix(matrix)
self.assertEquals(matrix, rotated)
def test_rotate_matrix_6(self):
matrix = [[1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12],
[13, 14, 15, 16, 17, 18],
[19, 20, 21, 22, 23, 24],
[25, 26, 27, 28, 29, 30],
[31, 32, 33, 34, 35, 36]]
rotated = [[31, 25, 19, 13, 7, 1],
[32, 26, 20, 14, 8, 2],
[33, 27, 21, 15, 9, 3],
[34, 28, 22, 16, 10, 4],
[35, 29, 23, 17, 11, 5],
[36, 30, 24, 18, 12, 6]]
rotate_matrix(matrix)
self.assertEquals(matrix, rotated)
if __name__ == '__main__':
unittest.main()
| heitorschueroff/ctci | ch1/1.07_Rotate_Matrix/rotate_matrix.py | Python | mit | 2,061 |
#!/usr/bin/env python
import logging
import sys
from urllib.parse import urljoin
import requests
from lxml import html
from six.moves.urllib.parse import urlparse
from django_docutils.exc import BasedException
logger = logging.getLogger(__name__)
def _request_favicon(url):
"""Tries to download favicon from URL and checks if it's valid."""
r = requests.get(url)
r.raise_for_status()
if 'image' not in r.headers['Content-Type']:
raise BasedException('Not an image')
return r.content
def get_favicon(url):
try:
r = requests.get(url)
r.raise_for_status()
# update url if redirected
if r.url != url:
url = r.url
doc = html.fromstring(r.content)
except requests.exceptions.ConnectionError as e:
raise BasedException(f"The website {url} isn't connecting:", e)
paths = ['//link[@rel="shortcut icon"]/@href', '//link[@rel="icon"]/@href']
for path in paths:
# Method 1: to find favicon via "shortcut icon"
favicons = doc.xpath(path)
if len(favicons): # Is pattern found?
try:
favicon_url = favicons[0]
favicon_url = urljoin(url, favicon_url)
return _request_favicon(favicon_url)
except Exception as e:
logger.debug(
'Could not retrieve {favicon_url}: \n{e}'.format(
favicon_url=favicon_url, e=e
)
)
# Method 2: site root/favicon.ico
try:
parsed = urlparse(url)
parsed = parsed._replace(path='/favicon.ico')
favicon_url = parsed.geturl()
return _request_favicon(favicon_url)
except Exception as e:
logger.debug(
'Could not retrieve {favicon_url}.\n{e}'.format(
favicon_url=favicon_url, e=e
)
)
raise BasedException(
"""
Could not retrieve favicon for {url}. Both strategies failed
""".format(
url=url
)
)
if __name__ == '__main__':
favicon = get_favicon(sys.argv[1])
file_ = open('/Users/me/favicon.ico', 'wb')
file_.write(favicon)
file_.close()
| tony/django-docutils | django_docutils/favicon/scrape.py | Python | mit | 2,207 |
import re
import pytest
from flask import url_for
from freezegun import freeze_time
from app.formatters import normalize_spaces
from tests.conftest import (
SERVICE_ONE_ID,
create_active_caseworking_user,
create_active_user_with_permissions,
create_platform_admin_user,
)
@pytest.mark.parametrize('extra_permissions', (
pytest.param(
[],
marks=pytest.mark.xfail(raises=AssertionError),
),
pytest.param(
['upload_letters'],
marks=pytest.mark.xfail(raises=AssertionError),
),
['letter'],
['letter', 'upload_letters'],
))
def test_upload_letters_button_only_with_letters_permission(
client_request,
service_one,
mock_get_uploads,
mock_get_jobs,
mock_get_no_contact_lists,
extra_permissions,
):
service_one['permissions'] += extra_permissions
page = client_request.get('main.uploads', service_id=SERVICE_ONE_ID)
assert page.find('a', text=re.compile('Upload a letter'))
@pytest.mark.parametrize('user', (
create_platform_admin_user(),
create_active_user_with_permissions(),
))
def test_all_users_have_upload_contact_list(
client_request,
mock_get_uploads,
mock_get_jobs,
mock_get_no_contact_lists,
user,
):
client_request.login(user)
page = client_request.get('main.uploads', service_id=SERVICE_ONE_ID)
button = page.find('a', text=re.compile('Upload an emergency contact list'))
assert button
assert button['href'] == url_for(
'main.upload_contact_list', service_id=SERVICE_ONE_ID,
)
@pytest.mark.parametrize('extra_permissions, expected_empty_message', (
([], (
'You have not uploaded any files recently.'
)),
(['letter'], (
'You have not uploaded any files recently. '
'Upload a letter and Notify will print, pack and post it for you.'
)),
))
def test_get_upload_hub_with_no_uploads(
mocker,
client_request,
service_one,
mock_get_no_uploads,
mock_get_no_contact_lists,
extra_permissions,
expected_empty_message,
):
mocker.patch('app.job_api_client.get_jobs', return_value={'data': []})
service_one['permissions'] += extra_permissions
page = client_request.get('main.uploads', service_id=SERVICE_ONE_ID)
assert normalize_spaces(' '.join(
paragraph.text for paragraph in page.select('main p')
)) == expected_empty_message
assert not page.select('.file-list-filename')
@freeze_time('2017-10-10 10:10:10')
def test_get_upload_hub_page(
mocker,
client_request,
service_one,
mock_get_uploads,
mock_get_no_contact_lists,
):
mocker.patch('app.job_api_client.get_jobs', return_value={'data': []})
service_one['permissions'] += ['letter', 'upload_letters']
page = client_request.get('main.uploads', service_id=SERVICE_ONE_ID)
assert page.find('h1').text == 'Uploads'
assert page.find('a', text=re.compile('Upload a letter')).attrs['href'] == url_for(
'main.upload_letter', service_id=SERVICE_ONE_ID
)
uploads = page.select('tbody tr')
assert len(uploads) == 3
assert normalize_spaces(uploads[0].text.strip()) == (
'Uploaded letters '
'Printing today at 5:30pm '
'33 letters'
)
assert uploads[0].select_one('a.file-list-filename-large')['href'] == url_for(
'main.uploaded_letters',
service_id=SERVICE_ONE_ID,
letter_print_day='2017-10-10',
)
assert normalize_spaces(uploads[1].text.strip()) == (
'some.csv '
'Sent 1 January 2016 at 11:09am '
'0 sending 8 delivered 2 failed'
)
assert uploads[1].select_one('a.file-list-filename-large')['href'] == (
'/services/{}/jobs/job_id_1'.format(SERVICE_ONE_ID)
)
assert normalize_spaces(uploads[2].text.strip()) == (
'some.pdf '
'Sent 1 January 2016 at 11:09am '
'Firstname Lastname '
'123 Example Street'
)
assert normalize_spaces(str(uploads[2].select_one('.govuk-body'))) == (
'<p class="govuk-body letter-recipient-summary"> '
'Firstname Lastname<br/> '
'123 Example Street<br/> '
'</p>'
)
assert uploads[2].select_one('a.file-list-filename-large')['href'] == (
'/services/{}/notification/letter_id_1'.format(SERVICE_ONE_ID)
)
@freeze_time('2020-02-02 14:00')
def test_get_uploaded_letters(
mocker,
client_request,
service_one,
mock_get_uploaded_letters,
):
page = client_request.get(
'main.uploaded_letters',
service_id=SERVICE_ONE_ID,
letter_print_day='2020-02-02'
)
assert page.select_one('.govuk-back-link')['href'] == url_for(
'main.uploads',
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(
page.select_one('h1').text
) == (
'Uploaded letters'
)
assert normalize_spaces(
page.select('main p')[0].text
) == (
'1,234 letters'
)
assert normalize_spaces(
page.select('main p')[1].text
) == (
'Printing starts today at 5:30pm'
)
assert [
normalize_spaces(row.text)
for row in page.select('tbody tr')
] == [
(
'Homer-Simpson.pdf '
'742 Evergreen Terrace '
'2 February at 1:59pm'
),
(
'Kevin-McCallister.pdf '
'671 Lincoln Avenue, Winnetka '
'2 February at 12:59pm'
),
]
assert [
link['href'] for link in page.select('tbody tr a')
] == [
url_for(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id='03e34025-be54-4d43-8e6a-fb1ea0fd1f29',
from_uploaded_letters='2020-02-02',
),
url_for(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id='fc090d91-e761-4464-9041-9c4594c96a35',
from_uploaded_letters='2020-02-02',
),
]
next_page_link = page.select_one('a[rel=next]')
prev_page_link = page.select_one('a[rel=previous]')
assert next_page_link['href'] == url_for(
'main.uploaded_letters', service_id=SERVICE_ONE_ID, letter_print_day='2020-02-02', page=2
)
assert normalize_spaces(next_page_link.text) == (
'Next page '
'page 2'
)
assert prev_page_link['href'] == url_for(
'main.uploaded_letters', service_id=SERVICE_ONE_ID, letter_print_day='2020-02-02', page=0
)
assert normalize_spaces(prev_page_link.text) == (
'Previous page '
'page 0'
)
mock_get_uploaded_letters.assert_called_once_with(
SERVICE_ONE_ID,
letter_print_day='2020-02-02',
page=1,
)
@freeze_time('2020-02-02 14:00')
def test_get_empty_uploaded_letters_page(
mocker,
client_request,
service_one,
mock_get_no_uploaded_letters,
):
page = client_request.get(
'main.uploaded_letters',
service_id=SERVICE_ONE_ID,
letter_print_day='2020-02-02'
)
page.select_one('main table')
assert not page.select('tbody tr')
assert not page.select_one('a[rel=next]')
assert not page.select_one('a[rel=previous]')
@freeze_time('2020-02-02')
def test_get_uploaded_letters_passes_through_page_argument(
mocker,
client_request,
service_one,
mock_get_uploaded_letters,
):
client_request.get(
'main.uploaded_letters',
service_id=SERVICE_ONE_ID,
letter_print_day='2020-02-02',
page=99,
)
mock_get_uploaded_letters.assert_called_once_with(
SERVICE_ONE_ID,
letter_print_day='2020-02-02',
page=99,
)
def test_get_uploaded_letters_404s_for_bad_page_arguments(
mocker,
client_request,
):
client_request.get(
'main.uploaded_letters',
service_id=SERVICE_ONE_ID,
letter_print_day='2020-02-02',
page='one',
_expected_status=404,
)
def test_get_uploaded_letters_404s_for_invalid_date(
mocker,
client_request,
):
client_request.get(
'main.uploaded_letters',
service_id=SERVICE_ONE_ID,
letter_print_day='1234-56-78',
_expected_status=404,
)
@pytest.mark.parametrize('user', (
create_active_caseworking_user(),
create_active_user_with_permissions(),
))
@freeze_time("2012-12-12 12:12")
def test_uploads_page_shows_scheduled_jobs(
mocker,
client_request,
mock_get_no_uploads,
mock_get_jobs,
mock_get_no_contact_lists,
user,
):
client_request.login(user)
page = client_request.get('main.uploads', service_id=SERVICE_ONE_ID)
assert [
normalize_spaces(row.text) for row in page.select('tr')
] == [
(
'File Status'
),
(
'even_later.csv '
'Sending 1 January 2016 at 11:09pm '
'1 text message waiting to send'
),
(
'send_me_later.csv '
'Sending 1 January 2016 at 11:09am '
'1 text message waiting to send'
),
]
assert not page.select('.table-empty-message')
@freeze_time('2020-03-15')
def test_uploads_page_shows_contact_lists_first(
mocker,
client_request,
mock_get_no_uploads,
mock_get_jobs,
mock_get_contact_lists,
mock_get_service_data_retention,
):
page = client_request.get('main.uploads', service_id=SERVICE_ONE_ID)
assert [
normalize_spaces(row.text) for row in page.select('tr')
] == [
(
'File Status'
),
(
'phone number list.csv '
'Used twice in the last 7 days '
'123 saved phone numbers'
),
(
'EmergencyContactList.xls '
'Not used in the last 7 days '
'100 saved email addresses'
),
(
'UnusedList.tsv '
'Not used yet '
'1 saved phone number'
),
(
'even_later.csv '
'Sending 1 January 2016 at 11:09pm '
'1 text message waiting to send'
),
(
'send_me_later.csv '
'Sending 1 January 2016 at 11:09am '
'1 text message waiting to send'
),
]
assert page.select_one('.file-list-filename-large')['href'] == url_for(
'main.contact_list',
service_id=SERVICE_ONE_ID,
contact_list_id='d7b0bd1a-d1c7-4621-be5c-3c1b4278a2ad',
)
def test_get_uploads_shows_pagination(
client_request,
active_user_with_permissions,
mock_get_jobs,
mock_get_uploads,
mock_get_no_contact_lists,
):
page = client_request.get('main.uploads', service_id=SERVICE_ONE_ID)
assert normalize_spaces(page.select_one('.next-page').text) == (
'Next page '
'page 2'
)
assert normalize_spaces(page.select_one('.previous-page').text) == (
'Previous page '
'page 0'
)
| alphagov/notifications-admin | tests/app/main/views/uploads/test_upload_hub.py | Python | mit | 10,939 |
import numpy as np
import fnmatch, os
import h5py
class Hdf5Loader():
def loadDirectory(self, dirname):
"""
Loads all hdf5 files in the directory dirname
@param dirname: The directory which contains the files to load
@returns: list of h5py File objects
"""
cachelist=os.listdir(dirname)
testlist=fnmatch.filter(cachelist,'*.hdf5')
for file_ in testlist:
print("Using {0}".format(file_))
files = [h5py.File(os.path.join(dirname, fn),'r') for fn in testlist]
return files
def getDatasets(self, dirname, dataset_list):
"""
Loads all hdf5 files in a given directory. It extracts all datasets
which are specified in :dataset_list and merges the datasets from
all files.
Finally it returns a numpy array for each dataset in the :dataset_list
@param dirname: The directory containing the hdf5 files
@param dataset_list: List of datasets to load
@returns: A list of numpy arrays loaded from the dataset files
"""
files = self.loadDirectory(dirname)
result = []
for dataset_name in dataset_list:
arr = np.concatenate([f[dataset_name] for f in files])
result.append(arr)
return result
class LoadData():
"""
This class extracts data from features and corresponding powervalues and returns them as array
"""
def __init__(self, sep=";", groundtruth_elements=2, skiprows=1, skipcols=1):
self.sep = sep
self.num_groundtruth_elements = groundtruth_elements
self.skiprows=1
self.skipcols = skipcols
def getFeatureCount(self, file_):
fd = open(file_, 'r')
fd.readline()
count = len(fd.readline().split(self.sep))
return count - self.num_groundtruth_elements
def getFeaturesData(self,csvname):
cols = range(self.skipcols, self.getFeatureCount(csvname))
print cols
log = np.loadtxt(csvname,delimiter=self.sep,skiprows=self.skiprows,usecols=cols)
return log
def getPowerData(self,csvname):
cols = [self.getFeatureCount(csvname)]
power = np.loadtxt(csvname,delimiter=self.sep,skiprows=self.skiprows,usecols=cols)
return power
def load_dir(self, dirname):
"""
Loads all files of a directory to a single feature and power data set
"""
cachelist=os.listdir(dirname)
testlist=fnmatch.filter(cachelist,'*.csv')
testFeatureDataLst = []
testPowerDataLst = []
"""Testdaten laden"""
for file_ in testlist:
testFeatureDataLst.append(self.getFeaturesData(os.path.join(dirname,file_)))
testPowerDataLst.append(self.getPowerData(os.path.join(dirname,file_)))
testFeatureData = np.concatenate(testFeatureDataLst)
testPowerData = np.concatenate(testPowerDataLst)
return testPowerData, testFeatureData | nglrt/virtual_energy_sensor | virtual_energy_sensor/loadtrain.py | Python | mit | 3,264 |
"""
This script investigates how calculating phasic currents from voltage clamp
recordings may benefit from subtracting-out the "noise" determined from a
subset of the quietest pieces of the recording, rather than using smoothing
or curve fitting to guess a guassian-like RMS noise function.
"""
import os
import sys
sys.path.append("../")
sys.path.append("../../")
sys.path.append("../../../")
sys.path.append("../../../../")
import swhlab
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
POINTS_PER_SEC=20000
POINTS_PER_MS=int(POINTS_PER_SEC/1000)
CHUNK_POINTS=POINTS_PER_MS*10 # size of Y pieces to calculate variance from
PERCENT_STEP=10 # percentile steps to display
HIST_RESOLUTION=.1 # pA per bin
COLORMAP=plt.get_cmap('jet') # which color scheme do we want to use?
#COLORMAP=plt.get_cmap('winter') # which color scheme do we want to use?
def quietParts(data,percentile=10):
"""
Given some data (Y) break it into chunks and return just the quiet ones.
Returns data where the variance for its chunk size is below the given percentile.
CHUNK_POINTS should be adjusted so it's about 10ms of data.
"""
nChunks=int(len(Y)/CHUNK_POINTS)
chunks=np.reshape(Y[:nChunks*CHUNK_POINTS],(nChunks,CHUNK_POINTS))
variances=np.var(chunks,axis=1)
percentiles=np.empty(len(variances))
for i,variance in enumerate(variances):
percentiles[i]=sorted(variances).index(variance)/len(variances)*100
selected=chunks[np.where(percentiles<=percentile)[0]].flatten()
return selected
def ndist(data,Xs):
"""
given some data and a list of X posistions, return the normal
distribution curve as a Y point at each of those Xs.
"""
sigma=np.sqrt(np.var(data))
center=np.average(data)
curve=mlab.normpdf(Xs,center,sigma)
curve*=len(data)*HIST_RESOLUTION
return curve
if __name__=="__main__":
Y=np.load("sweepdata.npy")
baseline=swhlab.common.lowpass(Y,POINTS_PER_MS*250)
plt.figure(figsize=(15,5))
plt.plot(Y)
plt.plot(baseline,color='r',alpha=.5,lw=5)
plt.savefig("baseline.png")
plt.figure(figsize=(15,5))
plt.plot(Y-baseline)
plt.axhline(0,color='r',alpha=.5,lw=5)
plt.savefig("baseline2.png")
plt.show()
if __name__=="__main__" and False:
# apply baseline
Y=Y-baseline
# predict what our histogram will look like
padding=50
histCenter=int(np.average(Y))
histRange=(histCenter-padding,histCenter+padding)
histBins=int(abs(histRange[0]-histRange[1])/HIST_RESOLUTION)
# FIRST CALCULATE THE 10-PERCENTILE CURVE
data=quietParts(Y,10) # assume 10% is a good percentile to use
hist,bins=np.histogram(data,bins=histBins,range=histRange,density=False)
hist=hist.astype(np.float) # histogram of data values
curve=ndist(data,bins[:-1]) # normal distribution curve
hist[hist == 0] = np.nan
histValidIs=np.where(~np.isnan(hist))
histX,histY=bins[:-1][histValidIs],hist[histValidIs] # remove nans
baselineCurve=curve/np.max(curve) # max is good for smooth curve
# THEN CALCULATE THE WHOLE-SWEEP HISTOGRAM
hist,bins=np.histogram(Y,bins=histBins,range=histRange,density=False)
hist=hist.astype(np.float) # histogram of data values
hist[hist == 0] = np.nan
histValidIs=np.where(~np.isnan(hist))
histX,histY=bins[:-1][histValidIs],hist[histValidIs] # remove nans
histY/=np.percentile(histY,98) # percentile is needed for noisy data
# DETERMINE THE DIFFERENCE
diffX=bins[:-1][histValidIs]
diffY=histY-baselineCurve[histValidIs]
diffY[diffY<0]=np.nan
# NOW PLOT THE DIFFERENCE
plt.figure(figsize=(10,10))
plt.subplot(211)
plt.grid()
plt.plot(histX,histY,'b.',ms=10,alpha=.5,label="data points")
plt.plot(bins[:-1],baselineCurve,'r-',lw=3,alpha=.5,label="10% distribution")
plt.legend(loc='upper left',shadow=True)
plt.ylabel("normalized distribution")
plt.axis([histCenter-20,histCenter+20,0,1.5])
plt.subplot(212)
plt.grid()
plt.plot(diffX,diffY,'.',ms=10,alpha=.5,color='b')
plt.axvline(histCenter,color='r',lw=3,alpha=.5,ls='--')
plt.legend(loc='upper left',shadow=True)
plt.ylabel("difference")
plt.xlabel("histogram data points (pA)")
plt.margins(0,.1)
plt.axis([histCenter-20,histCenter+20,0,None])
plt.tight_layout()
plt.savefig("2016-12-16-tryout-yesSub.png")
plt.show()
print("DONE") | swharden/SWHLab | doc/uses/EPSCs-and-IPSCs/variance method/2016-12-16 tryout2.py | Python | mit | 4,500 |
from __future__ import print_function
import re
import traceback
class MiniObject(object):
def __init__(self, py_object, **meta):
( "The following python types map to the following mini types:\n"
" bool -> boolean\n"
" str -> string\n"
" int -> integer\n"
" float -> float\n"
" tuple -> list (may contain different types)\n"
" list -> vector (may only contain one type)\n"
" dict -> map\n"
" MiniSymbol -> symbol\n"
" Pair -> pair"
"mini vectors and maps should be treated as though immutable"
"s-expressions should be parsed as tuples"
)
self.py_object = py_object
self.meta = meta
def __repr__(self):
if self.py_object == None:
return 'nil'
if isinstance(self.py_object,bool):
return 'true' if self.py_object else 'false'
return repr(self.py_object)
def __str__(self):
if isinstance(self.py_object,str):
return self.py_object
return repr(self)
class Identifier(object):
def __init__(self,symbol,**kwargs):
assert isinstance(symbol,str)
self.symbol = symbol
self.start = kwargs.get('start')
self.end = kwargs.get('end')
def __repr__(self):
return '<identifier {}>'.format(self.symbol)
def is_identifier(mini_object):
assert isinstance(mini_object, MiniObject)
if isinstance(mini_object.py_object, Identifier):
return TRUE
return FALSE
SYMBOLS = {}
class MiniSymbol(object):
def __init__(self,string):
self.string = string
def __eq__(self,other):
return self is other
def __repr__(self):
return '<symbol :{}>'.format(self.string)
class MiniPair(object):
def __init__(self, car, cdr):
assert isinstance(car, MiniObject)
assert isinstance(cdr, MiniObject)
self.car = car
self.cdr = cdr
def __repr__(self):
return '<pair {}, {}>'.format(self.car, self.cdr)
def evaluate_arguments(arguments_cons_list, environment):
if arguments_cons_list == NIL:
return NIL
return cons(
evaluate(car(arguments_cons_list), environment),
evaluate_arguments(cdr(arguments_cons_list), environment))
class MiniEnvironment(MiniObject):
'This acts like a dict in Python code and a cons-dict in mini code'
def __init__(self):
super(self.__class__, self).__init__(None)
def __getitem__(self,key):
assert isinstance(key,str)
key_symbol = create_symbol(key)
return cons_dict_get(self,key_symbol)
def __setitem__(self,key,value):
assert isinstance(key,str)
key_symbol = create_symbol(key)
assert isinstance(value, MiniObject)
self.py_object = cons_dict_set(self,key_symbol,value).py_object
def __contains__(self,key):
assert isinstance(key,str)
key_symbol = create_symbol(key)
return cons_dict_has_key(self,key_symbol) == TRUE
def get(self,key):
assert isinstance(key,str)
if key in self:
return self[key]
return None
def dict_to_environment(dictionary):
result = MiniEnvironment()
for key,value in dictionary.iteritems():
result[key] = value
return result
class MiniApplicative(object):
def __init__(self, operative):
assert callable(operative)
self.operative = operative
def __call__(self, pattern, environment):
assert isinstance(pattern, MiniObject)
return self.operative(pattern, environment)
class MiniWrapper(object):
def __init__(self, operative):
assert isinstance(operative,MiniObject)
assert isinstance(operative.py_object, MiniApplicative) or isinstance(operative.py_object, MiniWrapper)
self.operative = operative
def __call__(self, pattern, environment):
assert isinstance(pattern, MiniObject)
return self.operative.py_object(evaluate_arguments(pattern, environment), environment)
def __repr__(self):
return "<wrapper {}>".format(repr(self.operative))
def wrap(thing):
return MiniObject(MiniWrapper(thing))
def unwrap(thing):
if isinstance(thing.py_object, MiniWrapper):
return thing.py_object.operative
raise Exception('UnwrapError')
def create_symbol(string,**kwargs):
if string in SYMBOLS:
return SYMBOLS[string]
k = MiniObject(MiniSymbol(string), **kwargs)
SYMBOLS[string] = k
return k
def create_cons_collection(py_collection):
result = NIL
for item in reversed(py_collection):
result = MiniObject(MiniPair(item, result))
return result
def cons_collection_to_py_collection(cons_collection):
while cons_collection != NIL:
yield car(cons_collection)
cons_collection = cdr(cons_collection)
token_regex = re.compile(r'''(?mx)
(\s*|\#.*?\n)*(?:
(?P<open_parenthese>\()|
(?P<close_parenthese>\))|
(?P<number>\-?\d+\.\d+|\-?\d+)|
(?P<string>"[^"]*")|
(?P<identifier>[_A-Za-z\?\-\+\*/=\>\<]+)|
(?P<symbol>\:[_A-Za-z\?\-\+\*/=\>\<]*)
)''')
def parse_all(source):
def parse(matches, index_holder):
match = matches[index_holder[0]]
index_holder[0] += 1
if match.group('open_parenthese'):
r = []
while index_holder[0] < len(matches) and not matches[index_holder[0]].group('close_parenthese'):
r.append(parse(matches, index_holder))
if index_holder[0] == len(matches):
raise Exception('Unmatched parenthese (')
index_holder[0] += 1
return create_cons_collection(r)
if match.group('close_parenthese'):
raise Exception("Unmatched parenthese )")
if match.group('number'):
v = float(match.group('number'))
if v.is_integer(): v = int(v)
return MiniObject(
v,
start = match.start('number'),
end = match.end('number'))
if match.group('string'):
return MiniObject(
match.group('string')[1:-1],
start = match.start('string'),
end = match.end('string'))
if match.group('identifier'):
return MiniObject(Identifier(
match.group('identifier'),
start = match.start('identifier'),
end = match.end('identifier')))
if match.group('symbol'):
return create_symbol(
match.group('symbol')[1:],
start = match.start('symbol'),
end = match.end('symbol'))
assert False, "I'm not sure how this happened"
def parse_all_internal(matches, index_holder):
if index_holder[0] == len(matches):
return NIL
parsed_atom = parse(matches, index_holder)
return cons(parsed_atom, parse_all_internal(matches, index_holder))
matches = list(token_regex.finditer(source))
match_index_wrapped = [0]
return parse_all_internal(matches, match_index_wrapped)
NIL = MiniObject(None)
class Boolean(MiniObject):
def __init__(self, py_object, **kwargs):
super(Boolean,self).__init__(py_object, **kwargs)
TRUE = Boolean(True)
FALSE = Boolean(False)
def is_number(arg):
if isinstance(arg, float):
return True
# isinstance(True, int) returns True
return isinstance(arg, int) and not isinstance(arg, bool)
def py_to_mini(py_object):
assert callable(py_object)
def wrapped(pattern, environment):
result = py_object(*cons_collection_to_py_collection(pattern))
if is_number(result) or isinstance(result,MiniPair):
return MiniObject(result)
if isinstance(result,str):
return MiniObject(result)
return {
True : TRUE,
False : FALSE,
None : NIL,
}.get(result, result)
return MiniObject(MiniWrapper(MiniObject(MiniApplicative(wrapped))))
def apply(applicative, pattern, environment):
assert isinstance(applicative, MiniObject)
return applicative.py_object(pattern, environment)
def evaluate(expression, environment):
assert isinstance(expression, MiniObject)
if isinstance(expression.py_object, str) or is_number(expression.py_object):
return expression
if isinstance(expression.py_object, MiniSymbol):
return expression
if isinstance(expression.py_object, MiniPair):
applicative = evaluate(car(expression), environment)
arguments = cdr(expression)
assert isinstance(applicative, MiniObject)
assert isinstance(arguments, MiniObject)
if isinstance(applicative.py_object, MiniApplicative) or isinstance(applicative.py_object, MiniWrapper):
return apply(applicative, arguments, environment)
raise Exception("Expected applicative, got {}".format(applicative.py_object))
if isinstance(expression.py_object, Identifier):
parent_symbol = create_symbol('__parent__')
while environment != None:
if cons_dict_has_key(environment, create_symbol(expression.py_object.symbol)) == TRUE:
return cons_dict_get(environment, create_symbol(expression.py_object.symbol))
if cons_dict_has_key(environment, parent_symbol) == TRUE:
environment = cons_dict_get(environment, create_symbol('__parent__'))
else:
raise Exception('UndefinedIdentifierError: Undefined identifier {}'.format(expression.py_object.symbol))
def length(string):
assert isinstance(string, MiniObject)
if isinstance(string.py_object, str):
return len(string.py_object)
raise Exception("TypeError")
def concatenate(l,r):
# TODO Implement ropes: http://citeseer.ist.psu.edu/viewdoc/download?doi=10.1.1.14.9450&rep=rep1&type=pdf
# TODO Apply this to other collection types
if isinstance(l.py_object,str) and isinstance(r.py_object, str):
return MiniObject(l.py_object + r.py_object)
raise Exception('TypeError')
def is_integer(arg):
return isinstance(arg, int) and not isinstance(arg, bool)
def slice(string, start, end):
if not isinstance(string.py_object, str):
raise Exception('TypeError')
py_string = string.py_object
if is_integer(start.py_object):
py_start = start.py_object
elif start.py_object == None:
py_start = 0
else:
raise Exception('TypeError')
if is_integer(end.py_object):
py_end = end.py_object
elif end.py_object == None:
py_end = len(py_string)
else:
raise Exception('TypeError')
return MiniObject(py_string[py_start:py_end])
def _assert(pattern, environment):
def assert_internal(*arguments):
if len(arguments) == 0:
raise Exception("ArgumentError: assert expects 1 or more arguments, received none")
if len(arguments) == 1:
description = 'assertion failed'
assertion = arguments
else:
description = arguments[0].py_object
assertion = arguments[1:]
if not isinstance(assertion[-1].py_object, bool):
raise Exception("TypeError: `assert` expected Boolean assertion but received {} {}".format(type(assertion[-1].py_object), assertion[-1]))
if assertion[-1] is TRUE:
return None
if assertion[-1] is FALSE:
raise Exception("AssertionError: {}".format(description))
assert False
# Execute in nested scope
return py_to_mini(assert_internal).py_object(pattern, nest(environment))
def throws(pattern, environment):
if cons_collection_len(pattern) != 2:
raise Exception("throws? expects 2 argument, received {}".format(len(pattern)))
expression = car(pattern)
exception = evaluate(car(cdr(pattern)), environment)
if not isinstance(exception.py_object, str):
raise Exception('throws? expects a string as the second argument')
try:
evaluate(expression, environment)
return FALSE
except Exception as e:
if ':' in e.message:
exception_type, message = e.message.split(':',1)
else:
exception_type = e.message
if exception_type == exception.py_object:
return TRUE
raise
def _not(argument):
if not isinstance(argument, Boolean):
raise Exception('TypeError: Expected Boolean but received {}'.format(type(argument)))
if argument == TRUE:
return FALSE
if argument == FALSE:
return TRUE
assert False
def evaluate_expressions(expressions, environment):
result = NIL
while expressions != NIL:
result = evaluate(car(expressions), environment)
expressions = cdr(expressions)
return result
def cons_collection_len(cons_collection):
result = 0
while cons_collection != NIL:
result += 1
cons_collection = cdr(cons_collection)
return result
def define(pattern, environment):
if cons_collection_len(pattern) < 2:
raise Exception('DefineError: `define` expected two arguments, received {}'.format(cons_collection_len(pattern)))
head = car(pattern)
body = cdr(pattern)
if isinstance(head.py_object, Identifier):
identifier = head.py_object.symbol
if is_defined(head, environment) == TRUE:
raise Exception('AlreadyDefinedError: the identifier {} is already defined'.format(identifier))
environment[identifier] = evaluate_expressions(body, environment)
return NIL
elif isinstance(head.py_object, MiniPair):
raise Exception('NotImplementedError: Defining patterns is not yet implemented')
else:
raise Exception("DefineError")
def defined_p(pattern, environment):
if cons_collection_len(pattern) != 1:
raise Exception("ArgumentError: `defined?` expects 1 argument, received {}".format(len(pattern)))
if not isinstance(car(pattern).py_object, Identifier):
raise Exception("TypeError: Expected Identifier but got {}".format(type(car(pattern).py_object)))
return is_defined(car(pattern), environment)
def is_defined(identifier, environment):
assert isinstance(identifier, MiniObject)
assert isinstance(environment, MiniObject)
identifier_symbol = identifier_to_symbol(identifier)
parent_symbol = create_symbol('__parent__')
while True:
if cons_dict_has_key(environment, identifier_symbol) == TRUE:
return TRUE
elif cons_dict_has_key(environment, parent_symbol) == TRUE:
environment = cons_dict_get(environment, parent_symbol)
else:
return FALSE
def _if(pattern, environment):
if not cons_collection_len(pattern) in [2,3]:
raise Exception("ArgumentError")
condition = car(pattern)
if_result_true = car(cdr(pattern))
if_result_false = car(cdr(cdr(pattern)))
result = evaluate(condition, environment)
if result is TRUE:
return evaluate(if_result_true, environment)
if result is FALSE:
return evaluate(if_result_false, environment)
raise Exception("TypeError: `if` expects boolean, received {}".format(type(result)))
def nest(environment):
isinstance(environment,MiniEnvironment)
result = MiniEnvironment()
result['__parent__'] = environment
return result
# This is vau from John N. Shutt's seminal paper
# https://www.wpi.edu/Pubs/ETD/Available/etd-090110-124904/unrestricted/jshutt.pdf
# While Greek letters are appropriate for an academic, theoretical context, they make for
# poor variable names, so this is tentatively named `operative`
def operative(pattern, defining_environment):
argument_list_identifier = None
argument_identifiers = None
calling_environment_identifier = car(cdr(pattern)).py_object.symbol
if isinstance(car(pattern).py_object, Identifier):
argument_list_identifier = car(pattern).py_object.symbol
if calling_environment_identifier == argument_list_identifier:
raise Exception("ArgumentError: Argument list identifier `{}` may not be the same as calling environment identifier".format(ai))
elif car(pattern).py_object == None or isinstance(car(pattern).py_object, MiniPair):
if not all([isinstance(arg.py_object, Identifier) for arg in cons_collection_to_py_collection(car(pattern))]):
raise Exception("ArgumentError: Unexpected {} {}".format(type(arg),arg))
argument_identifiers = [ai.py_object.symbol for ai in cons_collection_to_py_collection(car(pattern))]
existing = set()
for ai in argument_identifiers:
if ai in existing:
raise Exception("ArgumentError: Argument `{}` already defined".format(ai))
if calling_environment_identifier == ai:
raise Exception("ArgumentError: Argument `{}` may not be the same as calling environment identifier".format(ai))
existing.add(ai)
else:
raise Exception("ArgumentError: `operative` expected identifier or cons-list as first argument, received {}".format(type(car(pattern).py_object)))
if not isinstance(car(cdr(pattern)).py_object,Identifier):
raise Exception("ArgumentError: The second argument to `operative` should be an identifer")
def result(calling_pattern, calling_environment):
local_environment = nest(defining_environment)
assert (argument_list_identifier == None) != (argument_identifiers == None)
if argument_list_identifier != None:
local_environment[argument_list_identifier] = calling_pattern
if argument_identifiers != None:
if not cons_collection_len(calling_pattern) == len(argument_identifiers):
raise Exception("ArgumentError: operative expected {} arguments, received {}".format(len(argument_identifiers),len(calling_pattern)))
calling_pattern = list(cons_collection_to_py_collection(calling_pattern))
for i in range(len(argument_identifiers)):
local_environment[argument_identifiers[i]] = calling_pattern[i]
local_environment[calling_environment_identifier] = calling_environment
return evaluate_expressions(cdr(cdr(pattern)), local_environment)
return MiniObject(MiniApplicative(result))
def read_file(filename):
assert isinstance(filename, MiniObject)
with open(filename.py_object, 'r') as f:
return f.read()
def write_file(filename, string):
assert isinstance(filename, MiniObject)
assert isinstance(string, MiniObject)
with open(filename.py_object, 'w') as f:
f.write(string.py_object)
def add(l,r):
if isinstance(l, MiniObject) and isinstance(r, MiniObject):
l = l.py_object
r = r.py_object
if is_number(l) and is_number(r):
return l + r
raise Excepion('TypeError')
def subtract(l,r):
if isinstance(l, MiniObject) and isinstance(r, MiniObject):
l = l.py_object
r = r.py_object
if is_number(l) and is_number(r):
return l - r
raise Excepion('TypeError')
def multiply(l,r):
if isinstance(l, MiniObject) and isinstance(r, MiniObject):
l = l.py_object
r = r.py_object
if is_number(l) and is_number(r):
return l * r
raise Excepion('TypeError')
def divide(l,r):
if isinstance(l, MiniObject) and isinstance(r, MiniObject):
l = l.py_object
r = r.py_object
if is_number(l) and is_number(r):
if isinstance(l,int) and isinstance(r,int) and l % r != 0:
l = float(l)
return l / r
raise Excepion('TypeError')
def idivide(l,r):
if isinstance(l, MiniObject) and isinstance(r, MiniObject):
l = l.py_object
r = r.py_object
if is_number(l) and is_number(r):
return l // r
raise Excepion('TypeError')
def mod(l,r):
if isinstance(l, MiniObject) and isinstance(r, MiniObject):
l = l.py_object
r = r.py_object
if is_number(l) and is_number(r):
return l % r
raise Excepion('TypeError')
def eq(l,r):
assert isinstance(l,MiniObject)
assert isinstance(r,MiniObject)
return l.py_object == r.py_object
def lt(l,r):
assert isinstance(l,MiniObject)
assert isinstance(r,MiniObject)
if is_number(l.py_object) and is_number(r.py_object):
return l.py_object < r.py_object
if isinstance(l.py_object,str) and isinstance(r.py_object,str):
return l.py_object < r.py_object
if isinstance(l.py_object,MiniSymbol) and isinstance(r.py_object,MiniSymbol):
return l.py_object.string < r.py_object.string
raise TypeError('`<` expected number or string, received {} and {}'.format(l.py_object, r.py_object))
def gt(l,r):
assert isinstance(l,MiniObject)
assert isinstance(r,MiniObject)
if is_number(l.py_object) and is_number(r.py_object):
return l.py_object > r.py_object
if isinstance(l.py_object,str) and isinstance(r.py_object,str):
return l.py_object > r.py_object
if isinstance(l.py_object,MiniSymbol) and isinstance(r.py_object,MiniSymbol):
return l.py_object.string > r.py_object.string
raise TypeError('`>` expected number or string, received {} and {}'.format(l.py_object, r.py_object))
def le(l,r):
return lt(l,r) or eq(l,r)
def ge(l,r):
return gt(l,r) or eq(l,r)
def cons(l,r):
return MiniObject(MiniPair(l,r))
def car(p):
return p.py_object.car
def cdr(p):
return p.py_object.cdr
def is_cons_list(mini_object):
assert isinstance(mini_object,MiniObject)
if eq(mini_object,NIL) or isinstance(mini_object.py_object,MiniPair):
return TRUE
return FALSE
def cons_dict_set(dictionary,key,value):
assert isinstance(dictionary,MiniObject)
assert isinstance(key,MiniObject)
assert isinstance(value,MiniObject)
if eq(dictionary,NIL):
return cons(cons(key,value),cons(NIL,NIL))
current_node_key = car(car(dictionary))
if lt(key,current_node_key):
return cons(
car(dictionary),
cons(
cons_dict_set(car(cdr(dictionary)), key, value),
cdr(cdr(dictionary))))
if gt(key,current_node_key):
return cons(
car(dictionary),
cons(
car(cdr(dictionary)),
cons_dict_set(cdr(cdr(dictionary)), key, value)))
if eq(key,current_node_key):
return cons(cons(key,value), cdr(dictionary))
assert False
def cons_dict_get(dictionary,key):
assert isinstance(dictionary, MiniObject)
assert isinstance(key, MiniObject)
if eq(dictionary,NIL):
raise Exception('KeyError: Dictionary does not contain key "{}"'.format(key))
current_node_key = car(car(dictionary))
if lt(key, current_node_key):
return cons_dict_get(car(cdr(dictionary)), key)
if gt(key, current_node_key):
return cons_dict_get(cdr(cdr(dictionary)), key)
if eq(key, current_node_key):
return cdr(car(dictionary))
def cons_dict_has_key(dictionary,key):
assert isinstance(dictionary, MiniObject)
assert isinstance(key, MiniObject)
if eq(dictionary,NIL):
return FALSE
current_node_key = car(car(dictionary))
if lt(key, current_node_key):
return cons_dict_has_key(car(cdr(dictionary)), key)
if gt(key, current_node_key):
return cons_dict_has_key(cdr(cdr(dictionary)), key)
if eq(key, current_node_key):
return TRUE
def identifier_to_symbol(identifier):
assert isinstance(identifier, MiniObject)
if not isinstance(identifier.py_object, Identifier):
raise Exception('`identifier->symbol` expected identifier, received {}'.format(type(identifier.py_object)))
return create_symbol(identifier.py_object.symbol)
def read(string):
assert isinstance(string,MiniObject)
if not isinstance(string.py_object,str):
raise Exception("TypeError: `read` expected string, got {}".format(type(strin.py_object)))
result = parse_all(string.py_object)
assert cdr(result) == NIL
return car(result)
builtins = {
# Builtin constants
'true' : TRUE,
'false' : FALSE,
'nil' : NIL,
# Builtin comparison functions
'=' : py_to_mini(eq),
'<' : py_to_mini(lt),
'>' : py_to_mini(gt),
'<=' : py_to_mini(le),
'>=' : py_to_mini(ge),
# Builtin conversion functions
'identifier->symbol' : py_to_mini(identifier_to_symbol),
# Builtin type test functions
'cons-list?' : py_to_mini(is_cons_list),
'identifier?' : py_to_mini(is_identifier),
# Builtin general functions
'evaluate' : py_to_mini(evaluate),
'evaluate-expressions' : py_to_mini(evaluate_expressions),
'print' : py_to_mini(print),
'prompt' : py_to_mini(raw_input),
'read-file' : py_to_mini(read_file),
'write-file' : py_to_mini(write_file),
'read' : py_to_mini(read),
'wrap' : py_to_mini(wrap),
'unwrap' : py_to_mini(unwrap),
# Builtin number functions
'+' : py_to_mini(add),
'-' : py_to_mini(subtract),
'*' : py_to_mini(multiply),
'/' : py_to_mini(divide),
'//' : py_to_mini(idivide),
'mod' : py_to_mini(mod),
# Builtin pair functions
'cons' : py_to_mini(cons),
'car' : py_to_mini(car),
'cdr' : py_to_mini(cdr),
# Builtin cons dictionary functions
'cons-dict-set' : py_to_mini(cons_dict_set),
'cons-dict-get' : py_to_mini(cons_dict_get),
# Builtin string functions
'concatenate' : py_to_mini(concatenate),
'length' : py_to_mini(length),
'slice' : py_to_mini(slice),
# Builtin boolean functions
'not' : py_to_mini(_not),
# Builtin special forms
'assert' : MiniObject(MiniApplicative(_assert)),
'define' : MiniObject(MiniApplicative(define)),
'defined?' : MiniObject(MiniApplicative(defined_p)),
'if' : MiniObject(MiniApplicative(_if)),
'operative' : MiniObject(MiniApplicative(operative)),
'throws?' : MiniObject(MiniApplicative(throws)),
}
builtins = dict_to_environment(builtins)
if __name__ == '__main__':
import os.path
import sys
arguments = sys.argv[1:]
predefineds = nest(builtins)
predefineds_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'predefineds.mini')
with open(predefineds_filename, 'r') as predefineds_file:
predefineds_source = predefineds_file.read()
try:
evaluate_expressions(parse_all(predefineds_source), predefineds)
except:
traceback.print_exc()
if len(arguments) == 0:
environment = nest(predefineds)
while True:
source = raw_input('>>> ')
try:
print(evaluate_expressions(parse_all(source), environment))
except:
traceback.print_exc()
else:
filename = arguments[0]
arguments = arguments[1:]
environment = nest(predefineds)
environment['__file__'] = MiniObject(os.path.join(os.path.realpath(filename)))
environment['__arguments__'] = create_cons_collection(map(MiniObject,arguments))
with open(filename,'r') as f:
source = f.read()
try:
print(evaluate_expressions(parse_all(source), environment))
except:
traceback.print_exc()
| kerkeslager/sandbox | mini/mini.py | Python | mit | 27,970 |
from qtpy.QtWidgets import QFileDialog
import logging
import pandas as pd
class Export:
header = ['x0', 'y0', 'x1', 'y1', 'row_index', 'column_index',
'lock', 'active',
'fitting_confidence', 'd_spacing_value', 'd_spacing_err',
'sigma_value', 'sigma_err',
'intensity_value', 'intensity_err',
'alpha_value', 'alpha_err',
'a1_value', 'a1_err',
'a2_value', 'a2_err',
'a5_value', 'a5_err',
'a6_value', 'a6_err']
def __init__(self, parent=None, grand_parent=None):
self.parent = parent
self.grand_parent = grand_parent
def run(self):
logging.info("Exporting table")
default_file_name = str(self.grand_parent.ui.normalized_folder.text()) + '_fitting_table.csv'
table_file = QFileDialog.getSaveFileName(self.grand_parent,
'Select or Define Name of File!',
default_file_name,
"CSV (*.csv)")
if table_file[0]:
table_file = table_file[0]
logging.info(f" table file selected: {table_file}")
table_dictionary = self.grand_parent.march_table_dictionary
o_table_formatted = FormatTableForExport(table=table_dictionary)
pandas_data_frame = o_table_formatted.pandas_data_frame
header = self.header
pandas_data_frame.to_csv(table_file, header=header)
logging.info(f" Table has been exporting in {table_file}")
else:
logging.info(" User canceled exporting table!")
class FormatTableForExport(object):
pandas_data_frame = []
def __init__(self, table={}):
pandas_table = []
for _key in table:
_entry = table[_key]
x0 = _entry['bin_coordinates']['x0']
y0 = _entry['bin_coordinates']['y0']
x1 = _entry['bin_coordinates']['x1']
y1 = _entry['bin_coordinates']['y1']
row_index = _entry['row_index']
column_index = _entry['column_index']
lock = _entry['lock']
active = _entry['active']
fitting_confidence = _entry['fitting_confidence']
[d_spacing_val,
d_spacing_err] = FormatTableForExport.get_val_err_fixed(_entry['d_spacing'])
[sigma_val,
sigma_err] = FormatTableForExport.get_val_err_fixed(_entry['sigma'])
[intensity_val,
intensity_err] = FormatTableForExport.get_val_err_fixed(_entry['intensity'])
[alpha_val,
alpha_err] = FormatTableForExport.get_val_err_fixed(_entry['alpha'])
[a1_val,
a1_err] = FormatTableForExport.get_val_err_fixed(_entry['a1'])
[a2_val,
a2_err] = FormatTableForExport.get_val_err_fixed(_entry['a2'])
[a5_val,
a5_err] = FormatTableForExport.get_val_err_fixed(_entry['a5'])
[a6_val,
a6_err] = FormatTableForExport.get_val_err_fixed(_entry['a6'])
_row = [x0, x1, y0, y1,
row_index, column_index,
lock, active,
fitting_confidence,
d_spacing_val, d_spacing_err,
sigma_val, sigma_err,
intensity_val, intensity_err,
alpha_val, alpha_err,
a1_val, a1_err,
a2_val, a2_err,
a5_val, a5_err,
a6_val, a6_err,
]
pandas_table.append(_row)
pandas_data_frame = pd.DataFrame.from_dict(pandas_table)
self.pandas_data_frame = pandas_data_frame
@staticmethod
def get_val_err_fixed(item):
return [item['val'], item['err']]
| ornlneutronimaging/iBeatles | src/iBeatles/table_dictionary/export.py | Python | mit | 3,916 |
from contextional import GCM
with GCM("A") as A:
@GCM.add_setup
def setUp():
pass
@GCM.add_teardown
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
raise Exception
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup
def setUp():
raise Exception
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown
def tearDown():
raise Exception()
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup
def setUp():
pass
@GCM.add_teardown
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
pass
A.create_tests()
expected_stream_output = [
"A",
" B",
" some test ... ok",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown w/ description ",
" # teardown w/ description ",
"A",
" # setup w/ description ERROR",
" B",
" some test ... FAIL",
" # teardown w/ description ",
"A",
" # setup (1/1) ERROR",
" B",
" some test ... FAIL",
" # teardown w/ description ",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown (1/2) ERROR",
" # teardown w/ description ",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown (1/2) ERROR",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown (1/2) ERROR",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown (1/2) ERROR",
" # teardown (1/2) ERROR",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown w/ description ",
" # teardown (2/3) ERROR",
" # teardown w/ description ",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown w/ description ERROR",
" # teardown w/ description ",
"A",
" B",
" some test ... ok",
]
| SalmonMode/contextional | contextional/test_resources/verbose_fixtures.py | Python | mit | 7,418 |
#!/usr/bin/env python
import sys
import os
import logging
from extensions import valid_tagging_extensions
from readSettings import ReadSettings
from mkvtomp4 import MkvtoMp4
from tmdb_mp4 import tmdb_mp4
from autoprocess import plex
from post_processor import PostProcessor
from logging.config import fileConfig
logpath = '/var/log/sickbeard_mp4_automator'
if os.name == 'nt':
logpath = os.path.dirname(sys.argv[0])
elif not os.path.isdir(logpath):
try:
os.mkdir(logpath)
except:
logpath = os.path.dirname(sys.argv[0])
configPath = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'logging.ini')).replace("\\", "\\\\")
logPath = os.path.abspath(os.path.join(logpath, 'index.log')).replace("\\", "\\\\")
fileConfig(configPath, defaults={'logfilename': logPath})
log = logging.getLogger("CouchPotatoPostConversion")
log.info('MP4 Automator - Post processing script initialized')
settings = ReadSettings(os.path.dirname(sys.argv[0]), "autoProcess.ini")
converter = MkvtoMp4(settings)
imdbid = sys.argv[1]
inputfile = sys.argv[2]
original = sys.argv[3]
log.debug("IMDBID: %s" % imdbid)
log.debug("Input file path: %s" % inputfile)
log.debug("Original file name: %s" % original)
try:
log.info('Processing file: %s', inputfile)
if MkvtoMp4(settings).validSource(inputfile):
log.info('File is valid')
output = converter.process(inputfile, original=original)
if output:
# Tag with metadata
if settings.tagfile and output['output_extension'] in valid_tagging_extensions:
log.info('Tagging file with IMDB ID %s', imdbid)
try:
tagmp4 = tmdb_mp4(imdbid, original=original, language=settings.taglanguage)
tagmp4.setHD(output['x'], output['y'])
tagmp4.writeTags(output['output'], settings.artwork)
except:
log.error("Unable to tag file")
# Copy to additional locations
output_files = converter.replicate(output['output'])
# Run any post process scripts
if settings.postprocess:
post_processor = PostProcessor(output_files, log)
post_processor.setMovie(imdbid)
post_processor.run_scripts()
plex.refreshPlex(settings, 'movie', log)
else:
log.info('File %s is invalid, ignoring' % inputfile)
except:
log.exception('File processing failed: %s' % inputfile)
| Collisionc/sickbeard_mp4_automator | postCouchPotato.py | Python | mit | 2,496 |
# http://codingbat.com/prob/p145834
def last2(str):
if len(str) < 2:
return 0
count = 0
for i in range(len(str)-2):
if str[i:i+2] == str[len(str)-2:]:
count += 1
return count
| dvt32/cpp-journey | Python/CodingBat/last2.py | Python | mit | 213 |
DESCRIPTION = """\
ACQ4 is a python-based platform for experimental neurophysiology.
It includes support for standard electrophysiology, multiphoton imaging,
scanning laser photostimulation, and many other experimental techniques. ACQ4 is
highly modular and extensible, allowing support to be added for new types of
devices, techniques, user-interface modules, and analyses.
"""
setupOpts = dict(
name='acq4',
description='Neurophysiology acquisition and analysis platform',
long_description=DESCRIPTION,
license='MIT',
url='http://www.acq4.org',
author='Luke Campagnola',
author_email='luke.campagnola@gmail.com',
)
from setuptools import setup
import distutils.dir_util
import distutils.sysconfig
import os, sys, re
from subprocess import check_output
## generate list of all sub-packages
path = os.path.abspath(os.path.dirname(__file__))
n = len(path.split(os.path.sep))
subdirs = [i[0].split(os.path.sep)[n:] for i in os.walk(os.path.join(path, 'acq4')) if '__init__.py' in i[2]]
allPackages = ['.'.join(p) for p in subdirs]
## Make sure build directory is clean before installing
buildPath = os.path.join(path, 'build')
if os.path.isdir(buildPath):
distutils.dir_util.remove_tree(buildPath)
## Determine current version string
initfile = os.path.join(path, 'acq4', '__init__.py')
init = open(initfile).read()
m = re.search(r'__version__ = (\S+)\n', init)
if m is None or len(m.groups()) != 1:
raise Exception("Cannot determine __version__ from init file: '%s'!" % initfile)
version = m.group(1).strip('\'\"')
initVersion = version
# If this is a git checkout, try to generate a more decriptive version string
try:
if os.path.isdir(os.path.join(path, '.git')):
def gitCommit(name):
commit = check_output(['git', 'show', name], universal_newlines=True).split('\n')[0]
assert commit[:7] == 'commit '
return commit[7:]
# Find last tag matching "acq4-.*"
tagNames = check_output(['git', 'tag'], universal_newlines=True).strip().split('\n')
while True:
if len(tagNames) == 0:
raise Exception("Could not determine last tagged version.")
lastTagName = tagNames.pop()
if re.match(r'acq4-.*', lastTagName):
break
# is this commit an unchanged checkout of the last tagged version?
lastTag = gitCommit(lastTagName)
head = gitCommit('HEAD')
if head != lastTag:
branch = re.search(r'\* (.*)', check_output(['git', 'branch'], universal_newlines=True)).group(1)
version = version + "-%s-%s" % (branch, head[:10])
# any uncommitted modifications?
modified = False
status = check_output(['git', 'status', '-s'], universal_newlines=True).strip().split('\n')
for line in status:
if line.strip() != '' and line[:2] != '??':
modified = True
break
if modified:
version = version + '+'
sys.stderr.write("Detected git commit; will use version string: '%s'\n" % version)
except:
version = initVersion
sys.stderr.write("This appears to be a git checkout, but an error occurred "
"while attempting to determine a version string for the "
"current commit.\nUsing the unmodified version string "
"instead: '%s'\n" % version)
sys.excepthook(*sys.exc_info())
print("__init__ version: %s current version: %s" % (initVersion, version))
if 'upload' in sys.argv and version != initVersion:
print("Base version does not match current; stubbornly refusing to upload.")
exit()
import distutils.command.build
class Build(distutils.command.build.build):
def run(self):
ret = distutils.command.build.build.run(self)
# If the version in __init__ is different from the automatically-generated
# version string, then we will update __init__ in the build directory
global path, version, initVersion
if initVersion == version:
return ret
initfile = os.path.join(path, self.build_lib, 'acq4', '__init__.py')
if not os.path.isfile(initfile):
sys.stderr.write("Warning: setup detected a git install and attempted "
"to generate a descriptive version string; however, "
"the expected build file at %s was not found. "
"Installation will use the original version string "
"%s instead.\n" % (initfile, initVersion)
)
else:
data = open(initfile, 'r').read()
open(initfile, 'w').write(re.sub(r"__version__ = .*", "__version__ = '%s'" % version, data))
# If this is windows, we need to update acq4.bat to reference the correct python executable.
if sys.platform == 'win32':
runner = os.path.join(path, self.build_scripts, 'acq4.bat')
runcmd = "%s -m acq4" % sys.executable
data = open(runner, 'r').read()
open(runner, 'w').write(re.sub(r'python -m acq4', runcmd, data))
return ret
# copy config tree to system location
# if sys.platform == 'win32':
# dataRoot = os.path.join(os.environ['ProgramFiles'], 'acq4')
# elif sys.platform == 'darwin':
# dataRoot = 'Library/Application Support/acq4'
# else:
# dataRoot = '/etc/acq4'
# instead, just install config example to same path as package.
if sys.platform == 'win32':
#dataRoot = distutils.sysconfig.get_python_lib().replace(sys.prefix, '')
dataRoot = 'Lib/site-packages/acq4'
else:
#dataRoot = 'python%d.%d/site-packages/acq4' % (sys.version_info.major, sys.version_info.minor)
dataRoot = distutils.sysconfig.get_python_lib().replace(sys.prefix+'/', '') + '/acq4'
dataFiles = []
configRoot = os.path.join(path, 'config')
for subpath, _, files in os.walk(configRoot):
endPath = subpath[len(path):].lstrip(os.path.sep)
files = [os.path.join(endPath, f) for f in files]
dataFiles.append((os.path.join(dataRoot, endPath), files))
# print dataFiles[-1]
packageData = []
pkgRoot = os.path.join(path, 'acq4')
for subpath, _, files in os.walk(pkgRoot):
for f in files:
addTo = None
for ext in ['.png', '.cache', '.h', '.hpp', '.dll']:
if f.endswith(ext):
packageData.append(os.path.join(subpath, f)[len(pkgRoot):].lstrip(os.path.sep))
if sys.platform == 'win32':
scripts = ['bin/acq4.bat']
else:
scripts = ['bin/acq4']
setup(
version=version,
cmdclass={'build': Build},
packages=allPackages,
package_dir={},
package_data={'acq4': packageData},
data_files=dataFiles,
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering",
],
install_requires = [
'numpy',
'scipy',
'h5py',
'pillow',
],
scripts = scripts,
**setupOpts
)
| mgraupe/acq4 | setup.py | Python | mit | 7,496 |
# -*- coding: utf-8 -*-
"""Unit test suite for the models of the application."""
from nose.tools import eq_
from tg2express.model import DBSession
from tg2express.tests import load_app
from tg2express.tests import setup_db, teardown_db
__all__ = ['ModelTest']
def setup():
"""Setup test fixture for all model tests."""
load_app()
setup_db()
def teardown():
"""Tear down test fixture for all model tests."""
teardown_db()
class ModelTest(object):
"""Base unit test case for the models."""
klass = None
attrs = {}
def setUp(self):
"""Setup test fixture for each model test method."""
try:
new_attrs = {}
new_attrs.update(self.attrs)
new_attrs.update(self.do_get_dependencies())
self.obj = self.klass(**new_attrs)
DBSession.add(self.obj)
DBSession.flush()
return self.obj
except:
DBSession.rollback()
raise
def tearDown(self):
"""Tear down test fixture for each model test method."""
DBSession.rollback()
def do_get_dependencies(self):
"""Get model test dependencies.
Use this method to pull in other objects that need to be created
for this object to be build properly.
"""
return {}
def test_create_obj(self):
"""Model objects can be created"""
pass
def test_query_obj(self):
"""Model objects can be queried"""
obj = DBSession.query(self.klass).one()
for key, value in self.attrs.items():
eq_(getattr(obj, key), value)
| archsh/tg2ext.express | example/tg2express/tests/models/__init__.py | Python | mit | 1,626 |
import datetime
import sys
import os.path
import pprint
from inspect import getmembers, isclass
from collections import defaultdict
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.db.models.base import ModelBase
from jinja2 import FileSystemLoader, Environment, PackageLoader, ChoiceLoader
from operis.log import log
from operis.utils import clean, convert, convert_friendly, underscore, firstLower
class Command(BaseCommand):
help = 'Creates Generic API Scaffolding'
logger = None
option_list = BaseCommand.option_list + (
make_option('--regenerate',
action='store_true',
dest='regenerate',
default=False,
help='Wipe Prior instances'),
)
def handle(self, *args, **options):
self.logger = log( self )
wipe = False
if options['regenerate']:
wipe = True
modules = map(__import__, settings.EMBER_MODELS)
model_history = []
model_instances = []
for model in modules:
for name, obj in getmembers(model.models):
if isclass(obj):
if isinstance(obj, ModelBase):
self.logger.log("Object Name is: %s",[obj.__name__],"notice")
if obj.__name__ in settings.EMBER_MODEL_EXCLUSIONS:
continue
field_history = []
fields_extended = []
for aname in obj._meta.get_all_field_names():
f, modelItem, direct, m2m = obj._meta.get_field_by_name(aname)
#self.logger.log("Field Name :: %s",[f.name],"info")
#self.logger.log("Field Class :: %s",[type(f).__name__],"info")
try:
assert(f.get_internal_type())
#self.logger.log("Field Type :: %s",[f.get_internal_type()],"debug")
except:
#for a in dir(f):
# print "%s = %s" % (a,getattr(f,a))
continue
#sys.exit(0)
if hasattr(obj ,"Ember") and hasattr(obj.Ember,'fields'):
if f.name not in obj.Ember.fields:
continue
if convert(f.name) not in field_history:
field = {}
field['name'] = convert(f.name)
field['name_underscore'] = underscore(f.name)
field['name_friendly'] = convert_friendly(f.name)
field['type'] = f.get_internal_type()
field['class'] = type(f).__name__
if field['type'] == "ForeignKey" or field['type'] == "ManyToManyField":
has_parent = True
field['parent'] = underscore(f.rel.to.__name__)
field['parent_class'] = f.rel.to.__name__
field['parent_class_app'] = str(f.rel.to._meta.app_label)
field_history.append(convert(f.name))
fields_extended.append(field)
#print fields_extended
index_list = ['id']
index_converted = ''
fields = ['id']
fields_converted = ''
filter_fields = ['id']
filter_fields_converted = ''
search_fields = ['id']
search_fields_converted = ''
singular = None
plural = None
fixture_seed = 1
if hasattr(obj._meta ,"verbose_name"):
singular = str(obj._meta.verbose_name)
else:
singular = obj.__name__.title()
if singular.lower() in model_history:
continue
if hasattr(obj._meta ,"verbose_name_plural"):
plural = str(obj._meta.verbose_name_plural)
else:
plural = obj.__name__
#Add to our Plural-Item Controllers
if hasattr(obj ,"Ember"):
if hasattr(obj.Ember,'fields'):
fields = []
for f in obj.Ember.fields:
fields.append(convert(f))
if hasattr(obj.Ember,'index_list'):
index_list = []
for f in obj.Ember.index_list:
index_list.append(convert(f))
if hasattr(obj.Ember,'filter_fields'):
filter_fields = obj.Ember.filter_fields
if hasattr(obj.Ember,'search_fields'):
search_fields = obj.Ember.search_fields
if hasattr(obj.Ember,'fixture_seed'):
fixture_seed = obj.Ember.fixture_seed
fields_converted = "fields = ('" + "','".join(fields) + "')"
index_converted = "fields = ('" + "','".join(index_list) + "')"
filter_fields_converted = "filter_fields = ['" + "','".join(filter_fields) + "']"
search_fields_converted = "search_fields = ['" + "','".join(search_fields) + "']"
item = { "model": name,
"fixture_seed": fixture_seed,
"singular": clean(singular),
"singular_converted": convert(singular),
"plural": clean(plural),
"plural_converted": convert(plural),
"emberCase": firstLower(plural),
"index_converted": index_converted,
"fields": fields_extended,
"fields_converted": fields_converted,
"filter_fields_converted": filter_fields_converted,
"search_fields_converted": search_fields_converted,
}
model_history.append(singular.lower())
model_instances.append(item)
print "=============================="
#print obj.__name__
#sys.exit(0)
global_exts = getattr(settings, 'JINJA_EXTS', ())
#env = Environment(extensions=global_exts,loader=FileSystemLoader('templates'))
env = Environment(extensions=global_exts,loader=PackageLoader('operis','templates'))
basedir = settings.PROJECT_DIR + "/../" + settings.API_APP_NAME
#Create Operis Subdirectories
if not os.path.exists(basedir):
os.makedirs(basedir)
if not os.path.exists(basedir+ "/django_operis"):
os.makedirs(basedir+ "/django_operis")
if not os.path.isfile(basedir+ "/django_operis/__init__.py"):
file = open(basedir+ "/django_operis/__init__.py", "w")
file.write('')
file.close()
if not os.path.exists(basedir+ "/tests"):
os.makedirs(basedir+ "/tests")
if not os.path.isfile(basedir+ "/tests/__init__.py"):
file = open(basedir+ "/tests/__init__.py", "w")
file.write('')
file.close()
self.logger.log("Directory is %s",[basedir],"notice")
source = "%s/../../templates/api" % (os.path.dirname(__file__))
self.logger.log("Source is %s",[source],"notice")
self.logger.log("Creating Base Serializers",[],"success")
template = env.get_template('api/serializers.py')
filename = basedir + "/django_operis/serializers.py"
args = {"app":settings.API_APP_NAME,"imports":settings.EMBER_MODELS,"models":model_instances,"app_name":settings.API_APP_NAME}
output = template.render(args)
file = open(filename, "w")
file.write(output)
file.close()
self.logger.log("Creating Base Views",[],"success")
template = env.get_template('api/views.py')
filename = basedir + "/django_operis/views.py"
args = {"app":settings.API_APP_NAME,"imports":settings.EMBER_MODELS,"models":model_instances,"app_name":settings.API_APP_NAME}
output = template.render(args)
file = open(filename, "w")
file.write(output)
file.close()
self.logger.log("Creating Base URLs",[],"success")
template = env.get_template('api/urls.py')
filename = basedir + "/django_operis/urls.py"
args = {"app":settings.API_APP_NAME,"imports":settings.EMBER_MODELS,"models":model_instances,"app_name":settings.API_APP_NAME}
output = template.render(args)
file = open(filename, "w")
file.write(output)
file.close()
self.logger.log("Creating Base Tests",[],"success")
template = env.get_template('api/test.py')
filename = basedir + "/tests/test.py"
args = {"app":settings.API_APP_NAME,"imports":settings.EMBER_MODELS,"fields":fields, "models":model_instances,"app_name":settings.API_APP_NAME}
output = template.render(args)
file = open(filename, "w")
file.write(output)
file.close()
self.logger.log("Done, templates are in %s",[settings.EMBER_APP_NAME],"info")
| Goldcap/django-operis | operis/management/commands/generate-api.py | Python | mit | 11,299 |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.desk.doctype.notification_settings.notification_settings import (is_notifications_enabled, is_email_notifications_enabled_for_type, set_seen_value)
class NotificationLog(Document):
def after_insert(self):
frappe.publish_realtime('notification', after_commit=True, user=self.for_user)
set_notifications_as_unseen(self.for_user)
if is_email_notifications_enabled_for_type(self.for_user, self.type):
send_notification_email(self)
def get_permission_query_conditions(for_user):
if not for_user:
for_user = frappe.session.user
if for_user == 'Administrator':
return
return '''(`tabNotification Log`.for_user = '{user}')'''.format(user=for_user)
def get_title(doctype, docname, title_field=None):
if not title_field:
title_field = frappe.get_meta(doctype).get_title_field()
title = docname if title_field == "name" else \
frappe.db.get_value(doctype, docname, title_field)
return title
def get_title_html(title):
return '<b class="subject-title">{0}</b>'.format(title)
def enqueue_create_notification(users, doc):
'''
During installation of new site, enqueue_create_notification tries to connect to Redis.
This breaks new site creation if Redis server is not running.
We do not need any notifications in fresh installation
'''
if frappe.flags.in_install:
return
doc = frappe._dict(doc)
if isinstance(users, str):
users = [user.strip() for user in users.split(',') if user.strip()]
users = list(set(users))
frappe.enqueue(
'frappe.desk.doctype.notification_log.notification_log.make_notification_logs',
doc=doc,
users=users,
now=frappe.flags.in_test
)
def make_notification_logs(doc, users):
from frappe.social.doctype.energy_point_settings.energy_point_settings import is_energy_point_enabled
for user in users:
if frappe.db.exists('User', {"email": user, "enabled": 1}):
if is_notifications_enabled(user):
if doc.type == 'Energy Point' and not is_energy_point_enabled():
return
_doc = frappe.new_doc('Notification Log')
_doc.update(doc)
_doc.for_user = user
if _doc.for_user != _doc.from_user or doc.type == 'Energy Point' or doc.type == 'Alert':
_doc.insert(ignore_permissions=True)
def send_notification_email(doc):
if doc.type == 'Energy Point' and doc.email_content is None:
return
from frappe.utils import get_url_to_form, strip_html
doc_link = get_url_to_form(doc.document_type, doc.document_name)
header = get_email_header(doc)
email_subject = strip_html(doc.subject)
frappe.sendmail(
recipients = doc.for_user,
subject = email_subject,
template = "new_notification",
args = {
'body_content': doc.subject,
'description': doc.email_content,
'document_type': doc.document_type,
'document_name': doc.document_name,
'doc_link': doc_link
},
header = [header, 'orange'],
now=frappe.flags.in_test
)
def get_email_header(doc):
docname = doc.document_name
header_map = {
'Default': _('New Notification'),
'Mention': _('New Mention on {0}').format(docname),
'Assignment': _('Assignment Update on {0}').format(docname),
'Share': _('New Document Shared {0}').format(docname),
'Energy Point': _('Energy Point Update on {0}').format(docname),
}
return header_map[doc.type or 'Default']
@frappe.whitelist()
def mark_all_as_read():
unread_docs_list = frappe.db.get_all('Notification Log', filters = {'read': 0, 'for_user': frappe.session.user})
unread_docnames = [doc.name for doc in unread_docs_list]
if unread_docnames:
filters = {'name': ['in', unread_docnames]}
frappe.db.set_value('Notification Log', filters, 'read', 1, update_modified=False)
@frappe.whitelist()
def mark_as_read(docname):
if docname:
frappe.db.set_value('Notification Log', docname, 'read', 1, update_modified=False)
@frappe.whitelist()
def trigger_indicator_hide():
frappe.publish_realtime('indicator_hide', user=frappe.session.user)
def set_notifications_as_unseen(user):
try:
frappe.db.set_value('Notification Settings', user, 'seen', 0)
except frappe.DoesNotExistError:
return
| saurabh6790/frappe | frappe/desk/doctype/notification_log/notification_log.py | Python | mit | 4,285 |
'''
File: input.py
Author: Tristan van Vaalen
Handles user input
'''
import signal
import sys
import verbose
v = verbose.Verbose()
class InputHandler():
def __init__(self):
v.debug('Initializing input handler').indent()
self.running = True
self.signal_level = 0
v.debug('Registering signal handler').unindent()
signal.signal(signal.SIGINT, self.signal_handler)
def test(self):
pass
def signal_handler(self, signal, frame):
self.signal_level += 1
if self.signal_level == 1:
self.running = False
else:
sys.exit(0)
def output_options(self):
v.write(
'Available options:\n' +
' - help: prints this message\n' +
' - exit: exit program'
' - test: magic'
)
def get(self):
v.debug('Entering input loop')
v.write('AUDIOLYZE v0.01\nPress ctrl+D to exit')
while self.running:
try:
self._parse_input(raw_input('>>> '))
except EOFError:
v.write('EOF received')
self.running = False
v.write('Goodbye')
def _parse_input(self, raw):
raw = raw.strip()
if raw in ['help', 'h', '?']:
self.output_options()
elif raw in ['quit', 'exit', 'stop', 'abort']:
self.running = False
elif raw in ['test']:
self.test()
else:
v.write(
'Invalid command \'{}\'. Try \'help\' for a list of commands'
.format(raw)
)
| Scoudem/audiolyze | inputhandler.py | Python | mit | 1,620 |
index = 20
bets = 25
names = ("Plain", "Cheval H", "Cheval V", "Trans", "Trans S", "Carre", "Colonne", "Simple")
for bet in range(bets):
col = 40
# --------------------------------------- money
print("""
when %d =>
if bets_index > %d then
fb_a_addr <= std_logic_vector(to_unsigned(COLS*21 + COLS*%d + %d, 14));
fb_a_dat_in <= x"24"; -- $
end if;""" % (index, bet, bet, col))
index += 1
col += 2 # extra space
for m in range(5, -1, -1):
print("""when %d =>
if bets_index > %d then
fb_a_addr <= std_logic_vector(to_unsigned(COLS*21 + COLS*%d + %d, 14));
fb_a_dat_in <= ascii_i(bets(%d).money, %d);
end if;""" % (index, bet, bet, col, bet, m))
index += 1
col += 1
if m == 5:
col += 1 # extra space
if m == 2:
print("""when %d =>
if bets_index > %d then
fb_a_addr <= std_logic_vector(to_unsigned(COLS*21 + COLS*%d + %d, 14));
fb_a_dat_in <= x"2e"; -- .
end if;""" % (index, bet, bet, col))
index += 1
col += 1
# --------------------------------------- name
col += 1
for n in range(8): # n = index of letter
print("""when %d =>
if bets_index > %d then
fb_a_addr <= std_logic_vector(to_unsigned(COLS*21 + COLS*%d + %d, 14));
case bets(%d).kind is""" % (index, bet, bet, col, bet))
for kind in range(1, 9):
if n < len(names[kind-1]) and names[kind-1][n] != ' ':
print(""" when %d => fb_a_dat_in <= x"%02x"; -- %c""" % (kind, ord(names[kind-1][n]), names[kind-1][n]))
print(""" when others => fb_a_dat_in <= x"20"; -- space
end case;
fb_a_dat_in <= x"2e"; -- .
end if;""")
index += 1
col += 1
| dries007/Basys3 | python/RouletteMaker.py | Python | mit | 1,814 |
from django import forms
from .models import Author
class RegisterNewAuthorForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
repeated_password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = Author
fields = ['email', 'first_name', 'last_name']
def __init__(self, *args, **kwargs):
super(RegisterNewAuthorForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
def clean(self):
cleaned_data = super(RegisterNewAuthorForm, self).clean()
password = cleaned_data.get("password")
repeated_password = cleaned_data.get("repeated_password")
if password == repeated_password:
return cleaned_data
raise forms.ValidationError("Passwords does not match! ")
class LoginForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput())
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
def clean(self):
cleaned_data = super(LoginForm, self).clean()
email = cleaned_data.get("email")
password = cleaned_data.get("password")
try:
author = Author.objects.get(email=email)
if author.check_password(password):
return cleaned_data
raise forms.ValidationError("Wrong password")
except Author.DoesNotExist:
error_msg = "User with email {} does not exist".format(email)
raise forms.ValidationError(error_msg)
| jokuf/hack-blog | users/forms.py | Python | mit | 1,762 |
#Faça um programa que o usuário informe a quantidade de alunos de uma turma, o sistema deve ler o peso e altura de cada
# aluno, ao final informar o imc.
T= int(input("entre com a quantidade de alunos"))
x=0
soma=0
somapesos=0
somaltura=0
while (x<T):
P= float(input("entre com o peso do aluno"))
A= float(input("entre com a altura do aluno"))
IMC=P/(A*A)
print("IMC é ", IMC)
x=x+1
| erikaklein/algoritmo---programas-em-Python | AlunosPesoAlturaImc.py | Python | mit | 439 |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '2.3-dev'
register_custom_mimetypes()
| mic4ael/indico | indico/__init__.py | Python | mit | 329 |
a = 1100087778366101931
b = 7540113804746346429
while (not(a <= b and b <= a)):
if (b <= a):
a = a - b
else:
b = b - a
print(a)
| ellisonch/kinc | languages/imp/programs/prog4.py | Python | mit | 134 |
import warnings
from sqlalchemy.testing import eq_, is_, assert_raises, assert_raises_message
from sqlalchemy import *
from sqlalchemy import exc as sa_exc, util, event
from sqlalchemy.orm import *
from sqlalchemy.orm.util import instance_str
from sqlalchemy.orm import exc as orm_exc, attributes
from sqlalchemy.testing.assertsql import AllOf, CompiledSQL, RegexSQL, Or
from sqlalchemy.sql import table, column
from sqlalchemy import testing
from sqlalchemy.testing import engines
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy import inspect
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.testing.util import gc_collect
class O2MTest(fixtures.MappedTest):
"""deals with inheritance and one-to-many relationships"""
@classmethod
def define_tables(cls, metadata):
global foo, bar, blub
foo = Table('foo', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(20)))
bar = Table('bar', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
Column('bar_data', String(20)))
blub = Table('blub', metadata,
Column('id', Integer, ForeignKey('bar.id'), primary_key=True),
Column('foo_id', Integer, ForeignKey('foo.id'), nullable=False),
Column('blub_data', String(20)))
def test_basic(self):
class Foo(object):
def __init__(self, data=None):
self.data = data
def __repr__(self):
return "Foo id %d, data %s" % (self.id, self.data)
mapper(Foo, foo)
class Bar(Foo):
def __repr__(self):
return "Bar id %d, data %s" % (self.id, self.data)
mapper(Bar, bar, inherits=Foo)
class Blub(Bar):
def __repr__(self):
return "Blub id %d, data %s" % (self.id, self.data)
mapper(Blub, blub, inherits=Bar, properties={
'parent_foo':relationship(Foo)
})
sess = create_session()
b1 = Blub("blub #1")
b2 = Blub("blub #2")
f = Foo("foo #1")
sess.add(b1)
sess.add(b2)
sess.add(f)
b1.parent_foo = f
b2.parent_foo = f
sess.flush()
compare = ','.join([repr(b1), repr(b2), repr(b1.parent_foo),
repr(b2.parent_foo)])
sess.expunge_all()
l = sess.query(Blub).all()
result = ','.join([repr(l[0]), repr(l[1]),
repr(l[0].parent_foo), repr(l[1].parent_foo)])
eq_(compare, result)
eq_(l[0].parent_foo.data, 'foo #1')
eq_(l[1].parent_foo.data, 'foo #1')
class PolyExpressionEagerLoad(fixtures.DeclarativeMappedTest):
run_setup_mappers = 'once'
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(fixtures.ComparableEntity, Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
discriminator = Column(String(50), nullable=False)
child_id = Column(Integer, ForeignKey('a.id'))
child = relationship('A')
p_a = case([
(discriminator == "a", "a"),
], else_="b")
__mapper_args__ = {
'polymorphic_identity': 'a',
"polymorphic_on": p_a,
}
class B(A):
__mapper_args__ = {
'polymorphic_identity': 'b'
}
@classmethod
def insert_data(cls):
A = cls.classes.A
session = Session(testing.db)
session.add_all([
A(id=1, discriminator='a'),
A(id=2, discriminator='b', child_id=1),
A(id=3, discriminator='c', child_id=1),
])
session.commit()
def test_joinedload(self):
A = self.classes.A
B = self.classes.B
session = Session(testing.db)
result = session.query(A).filter_by(child_id=None).\
options(joinedload('child')).one()
eq_(
result,
A(id=1, discriminator='a', child=[B(id=2), B(id=3)]),
)
class PolymorphicResolutionMultiLevel(fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
run_setup_mappers = 'once'
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
class B(A):
__tablename__ = 'b'
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
class C(A):
__tablename__ = 'c'
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
class D(B):
__tablename__ = 'd'
id = Column(Integer, ForeignKey('b.id'), primary_key=True)
def test_ordered_b_d(self):
a_mapper = inspect(self.classes.A)
eq_(
a_mapper._mappers_from_spec(
[self.classes.B, self.classes.D], None),
[a_mapper, inspect(self.classes.B), inspect(self.classes.D)]
)
def test_a(self):
a_mapper = inspect(self.classes.A)
eq_(
a_mapper._mappers_from_spec(
[self.classes.A], None),
[a_mapper]
)
def test_b_d_selectable(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D, self.classes.B]
eq_(
a_mapper._mappers_from_spec(
spec,
self.classes.B.__table__.join(self.classes.D.__table__)
),
[inspect(self.classes.B), inspect(self.classes.D)]
)
def test_d_selectable(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D]
eq_(
a_mapper._mappers_from_spec(
spec,
self.classes.B.__table__.join(self.classes.D.__table__)
),
[inspect(self.classes.D)]
)
def test_reverse_d_b(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D, self.classes.B]
eq_(
a_mapper._mappers_from_spec(
spec, None),
[a_mapper, inspect(self.classes.B), inspect(self.classes.D)]
)
mappers, selectable = a_mapper._with_polymorphic_args(spec=spec)
self.assert_compile(selectable,
"a LEFT OUTER JOIN b ON a.id = b.id "
"LEFT OUTER JOIN d ON b.id = d.id")
def test_d_b_missing(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D]
eq_(
a_mapper._mappers_from_spec(
spec, None),
[a_mapper, inspect(self.classes.B), inspect(self.classes.D)]
)
mappers, selectable = a_mapper._with_polymorphic_args(spec=spec)
self.assert_compile(selectable,
"a LEFT OUTER JOIN b ON a.id = b.id "
"LEFT OUTER JOIN d ON b.id = d.id")
def test_d_c_b(self):
a_mapper = inspect(self.classes.A)
spec = [self.classes.D, self.classes.C, self.classes.B]
ms = a_mapper._mappers_from_spec(spec, None)
eq_(
ms[-1], inspect(self.classes.D)
)
eq_(ms[0], a_mapper)
eq_(
set(ms[1:3]), set(a_mapper._inheriting_mappers)
)
class PolymorphicOnNotLocalTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('x', String(10)),
Column('q', String(10)))
t2 = Table('t2', metadata,
Column('t2id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('y', String(10)),
Column('xid', ForeignKey('t1.id')))
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Child(Parent):
pass
def test_non_col_polymorphic_on(self):
Parent = self.classes.Parent
t2 = self.tables.t2
assert_raises_message(
sa_exc.ArgumentError,
"Can't determine polymorphic_on "
"value 'im not a column' - no "
"attribute is mapped to this name.",
mapper,
Parent, t2, polymorphic_on="im not a column"
)
def test_polymorphic_on_non_expr_prop(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent = self.classes.Parent
t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias()
def go():
interface_m = mapper(Parent, t2,
polymorphic_on=lambda:"hi",
polymorphic_identity=0)
assert_raises_message(
sa_exc.ArgumentError,
"Only direct column-mapped property or "
"SQL expression can be passed for polymorphic_on",
go
)
def test_polymorphic_on_not_present_col(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent = self.classes.Parent
t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias()
def go():
t1t2_join_2 = select([t1.c.q], from_obj=[t1.join(t2)]).alias()
interface_m = mapper(Parent, t2,
polymorphic_on=t1t2_join.c.x,
with_polymorphic=('*', t1t2_join_2),
polymorphic_identity=0)
assert_raises_message(
sa_exc.InvalidRequestError,
"Could not map polymorphic_on column 'x' to the mapped table - "
"polymorphic loads will not function properly",
go
)
def test_polymorphic_on_only_in_with_poly(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent = self.classes.Parent
t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias()
# if its in the with_polymorphic, then its OK
mapper(Parent, t2,
polymorphic_on=t1t2_join.c.x,
with_polymorphic=('*', t1t2_join),
polymorphic_identity=0)
def test_polymorpic_on_not_in_with_poly(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent = self.classes.Parent
t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias()
# if with_polymorphic, but its not present, not OK
def go():
t1t2_join_2 = select([t1.c.q], from_obj=[t1.join(t2)]).alias()
interface_m = mapper(Parent, t2,
polymorphic_on=t1t2_join.c.x,
with_polymorphic=('*', t1t2_join_2),
polymorphic_identity=0)
assert_raises_message(
sa_exc.InvalidRequestError,
"Could not map polymorphic_on column 'x' "
"to the mapped table - "
"polymorphic loads will not function properly",
go
)
def test_polymorphic_on_expr_explicit_map(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
mapper(Parent, t1, properties={
"discriminator":column_property(expr)
}, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, t2, inherits=Parent,
polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_expr_implicit_map_no_label_joined(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
mapper(Parent, t1, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, t2, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_expr_implicit_map_w_label_joined(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
]).label(None)
mapper(Parent, t1, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, t2, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_expr_implicit_map_no_label_single(self):
"""test that single_table_criterion is propagated
with a standalone expr"""
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
mapper(Parent, t1, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_expr_implicit_map_w_label_single(self):
"""test that single_table_criterion is propagated
with a standalone expr"""
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
]).label(None)
mapper(Parent, t1, polymorphic_identity="parent",
polymorphic_on=expr)
mapper(Child, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_column_prop(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
cprop = column_property(expr)
mapper(Parent, t1, properties={
"discriminator":cprop
}, polymorphic_identity="parent",
polymorphic_on=cprop)
mapper(Child, t2, inherits=Parent,
polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_column_str_prop(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([
(t1.c.x=="p", "parent"),
(t1.c.x=="c", "child"),
])
cprop = column_property(expr)
mapper(Parent, t1, properties={
"discriminator":cprop
}, polymorphic_identity="parent",
polymorphic_on="discriminator")
mapper(Child, t2, inherits=Parent,
polymorphic_identity="child")
self._roundtrip(parent_ident='p', child_ident='c')
def test_polymorphic_on_synonym(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
cprop = column_property(t1.c.x)
assert_raises_message(
sa_exc.ArgumentError,
"Only direct column-mapped property or "
"SQL expression can be passed for polymorphic_on",
mapper, Parent, t1, properties={
"discriminator":cprop,
"discrim_syn":synonym(cprop)
}, polymorphic_identity="parent",
polymorphic_on="discrim_syn")
def _roundtrip(self, set_event=True, parent_ident='parent', child_ident='child'):
Parent, Child = self.classes.Parent, self.classes.Child
# locate the "polymorphic_on" ColumnProperty. This isn't
# "officially" stored at the moment so do some heuristics to find it.
parent_mapper = inspect(Parent)
for prop in parent_mapper.column_attrs:
if not prop.instrument:
break
else:
prop = parent_mapper._columntoproperty[
parent_mapper.polymorphic_on]
# then make sure the column we will query on matches.
is_(
parent_mapper.polymorphic_on,
prop.columns[0]
)
if set_event:
@event.listens_for(Parent, "init", propagate=True)
def set_identity(instance, *arg, **kw):
ident = object_mapper(instance).polymorphic_identity
if ident == 'parent':
instance.x = parent_ident
elif ident == 'child':
instance.x = child_ident
else:
assert False, "Got unexpected identity %r" % ident
s = Session(testing.db)
s.add_all([
Parent(q="p1"),
Child(q="c1", y="c1"),
Parent(q="p2"),
])
s.commit()
s.close()
eq_(
[type(t) for t in s.query(Parent).order_by(Parent.id)],
[Parent, Child, Parent]
)
eq_(
[type(t) for t in s.query(Child).all()],
[Child]
)
class SortOnlyOnImportantFKsTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('b_id', Integer,
ForeignKey('b.id', use_alter=True, name='b'))
)
Table('b', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True)
)
@classmethod
def setup_classes(cls):
Base = declarative_base()
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
b_id = Column(Integer, ForeignKey('b.id'))
class B(A):
__tablename__ = "b"
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
__mapper_args__ = {'inherit_condition': id == A.id}
cls.classes.A = A
cls.classes.B = B
def test_flush(self):
s = Session(testing.db)
s.add(self.classes.B())
s.flush()
class FalseDiscriminatorTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global t1
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', Boolean, nullable=False))
def test_false_on_sub(self):
class Foo(object):
pass
class Bar(Foo):
pass
mapper(Foo, t1, polymorphic_on=t1.c.type, polymorphic_identity=True)
mapper(Bar, inherits=Foo, polymorphic_identity=False)
sess = create_session()
b1 = Bar()
sess.add(b1)
sess.flush()
assert b1.type is False
sess.expunge_all()
assert isinstance(sess.query(Foo).one(), Bar)
def test_false_on_base(self):
class Ding(object):pass
class Bat(Ding):pass
mapper(Ding, t1, polymorphic_on=t1.c.type, polymorphic_identity=False)
mapper(Bat, inherits=Ding, polymorphic_identity=True)
sess = create_session()
d1 = Ding()
sess.add(d1)
sess.flush()
assert d1.type is False
sess.expunge_all()
assert sess.query(Ding).one() is not None
class PolymorphicSynonymTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global t1, t2
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10), nullable=False),
Column('info', String(255)))
t2 = Table('t2', metadata,
Column('id', Integer, ForeignKey('t1.id'),
primary_key=True),
Column('data', String(10), nullable=False))
def test_polymorphic_synonym(self):
class T1(fixtures.ComparableEntity):
def info(self):
return "THE INFO IS:" + self._info
def _set_info(self, x):
self._info = x
info = property(info, _set_info)
class T2(T1):pass
mapper(T1, t1, polymorphic_on=t1.c.type, polymorphic_identity='t1',
properties={
'info':synonym('_info', map_column=True)
})
mapper(T2, t2, inherits=T1, polymorphic_identity='t2')
sess = create_session()
at1 = T1(info='at1')
at2 = T2(info='at2', data='t2 data')
sess.add(at1)
sess.add(at2)
sess.flush()
sess.expunge_all()
eq_(sess.query(T2).filter(T2.info=='at2').one(), at2)
eq_(at2.info, "THE INFO IS:at2")
class PolymorphicAttributeManagementTest(fixtures.MappedTest):
"""Test polymorphic_on can be assigned, can be mirrored, etc."""
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
Table('table_a', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('class_name', String(50))
)
Table('table_b', metadata,
Column('id', Integer, ForeignKey('table_a.id'),
primary_key=True),
Column('class_name', String(50)),
)
Table('table_c', metadata,
Column('id', Integer, ForeignKey('table_b.id'),
primary_key=True),
Column('data', String(10))
)
@classmethod
def setup_classes(cls):
table_b, table_c, table_a = (cls.tables.table_b,
cls.tables.table_c,
cls.tables.table_a)
class A(cls.Basic):
pass
class B(A):
pass
class C(B):
pass
class D(B):
pass
mapper(A, table_a,
polymorphic_on=table_a.c.class_name,
polymorphic_identity='a')
mapper(B, table_b, inherits=A,
polymorphic_on=table_b.c.class_name,
polymorphic_identity='b',
properties=dict(class_name=[table_a.c.class_name, table_b.c.class_name]))
mapper(C, table_c, inherits=B,
polymorphic_identity='c')
mapper(D, inherits=B,
polymorphic_identity='d')
def test_poly_configured_immediate(self):
A, C, B = (self.classes.A,
self.classes.C,
self.classes.B)
a = A()
b = B()
c = C()
eq_(a.class_name, 'a')
eq_(b.class_name, 'b')
eq_(c.class_name, 'c')
def test_base_class(self):
A, C, B = (self.classes.A,
self.classes.C,
self.classes.B)
sess = Session()
c1 = C()
sess.add(c1)
sess.commit()
assert isinstance(sess.query(B).first(), C)
sess.close()
assert isinstance(sess.query(A).first(), C)
def test_valid_assignment_upwards(self):
"""test that we can assign 'd' to a B, since B/D
both involve the same set of tables.
"""
D, B = self.classes.D, self.classes.B
sess = Session()
b1 = B()
b1.class_name = 'd'
sess.add(b1)
sess.commit()
sess.close()
assert isinstance(sess.query(B).first(), D)
def test_invalid_assignment_downwards(self):
"""test that we warn on assign of 'b' to a C, since this adds
a row to the C table we'd never load.
"""
C = self.classes.C
sess = Session()
c1 = C()
c1.class_name = 'b'
sess.add(c1)
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'b'; the object may not "
"refresh and/or load correctly" % instance_str(c1),
sess.flush
)
def test_invalid_assignment_upwards(self):
"""test that we warn on assign of 'c' to a B, since we will have a
"C" row that has no joined row, which will cause object
deleted errors.
"""
B = self.classes.B
sess = Session()
b1 = B()
b1.class_name = 'c'
sess.add(b1)
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'c'; the object may not "
"refresh and/or load correctly" % instance_str(b1),
sess.flush
)
def test_entirely_oob_assignment(self):
"""test warn on an unknown polymorphic identity.
"""
B = self.classes.B
sess = Session()
b1 = B()
b1.class_name = 'xyz'
sess.add(b1)
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'xyz'; the object may not "
"refresh and/or load correctly" % instance_str(b1),
sess.flush
)
def test_not_set_on_upate(self):
C = self.classes.C
sess = Session()
c1 = C()
sess.add(c1)
sess.commit()
sess.expire(c1)
c1.data = 'foo'
sess.flush()
def test_validate_on_upate(self):
C = self.classes.C
sess = Session()
c1 = C()
sess.add(c1)
sess.commit()
sess.expire(c1)
c1.class_name = 'b'
assert_raises_message(
sa_exc.SAWarning,
"Flushing object %s with incompatible "
"polymorphic identity 'b'; the object may not "
"refresh and/or load correctly" % instance_str(c1),
sess.flush
)
class CascadeTest(fixtures.MappedTest):
"""that cascades on polymorphic relationships continue
cascading along the path of the instance's mapper, not
the base mapper."""
@classmethod
def define_tables(cls, metadata):
global t1, t2, t3, t4
t1= Table('t1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30))
)
t2 = Table('t2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('t1id', Integer, ForeignKey('t1.id')),
Column('type', String(30)),
Column('data', String(30))
)
t3 = Table('t3', metadata,
Column('id', Integer, ForeignKey('t2.id'),
primary_key=True),
Column('moredata', String(30)))
t4 = Table('t4', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('t3id', Integer, ForeignKey('t3.id')),
Column('data', String(30)))
def test_cascade(self):
class T1(fixtures.BasicEntity):
pass
class T2(fixtures.BasicEntity):
pass
class T3(T2):
pass
class T4(fixtures.BasicEntity):
pass
mapper(T1, t1, properties={
't2s':relationship(T2, cascade="all")
})
mapper(T2, t2, polymorphic_on=t2.c.type, polymorphic_identity='t2')
mapper(T3, t3, inherits=T2, polymorphic_identity='t3', properties={
't4s':relationship(T4, cascade="all")
})
mapper(T4, t4)
sess = create_session()
t1_1 = T1(data='t1')
t3_1 = T3(data ='t3', moredata='t3')
t2_1 = T2(data='t2')
t1_1.t2s.append(t2_1)
t1_1.t2s.append(t3_1)
t4_1 = T4(data='t4')
t3_1.t4s.append(t4_1)
sess.add(t1_1)
assert t4_1 in sess.new
sess.flush()
sess.delete(t1_1)
assert t4_1 in sess.deleted
sess.flush()
class M2OUseGetTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', String(30))
)
Table('sub', metadata,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
)
Table('related', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('sub_id', Integer, ForeignKey('sub.id')),
)
def test_use_get(self):
base, sub, related = (self.tables.base,
self.tables.sub,
self.tables.related)
# test [ticket:1186]
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
class Related(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='b')
mapper(Sub, sub, inherits=Base, polymorphic_identity='s')
mapper(Related, related, properties={
# previously, this was needed for the comparison to occur:
# the 'primaryjoin' looks just like "Sub"'s "get" clause (based on the Base id),
# and foreign_keys since that join condition doesn't actually have any fks in it
#'sub':relationship(Sub, primaryjoin=base.c.id==related.c.sub_id, foreign_keys=related.c.sub_id)
# now we can use this:
'sub':relationship(Sub)
})
assert class_mapper(Related).get_property('sub').strategy.use_get
sess = create_session()
s1 = Sub()
r1 = Related(sub=s1)
sess.add(r1)
sess.flush()
sess.expunge_all()
r1 = sess.query(Related).first()
s1 = sess.query(Sub).first()
def go():
assert r1.sub
self.assert_sql_count(testing.db, go, 0)
class GetTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global foo, bar, blub
foo = Table('foo', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', String(30)),
Column('data', String(20)))
bar = Table('bar', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
Column('bar_data', String(20)))
blub = Table('blub', metadata,
Column('blub_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('foo_id', Integer, ForeignKey('foo.id')),
Column('bar_id', Integer, ForeignKey('bar.id')),
Column('blub_data', String(20)))
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
class Bar(Foo):
pass
class Blub(Bar):
pass
def test_get_polymorphic(self):
self._do_get_test(True)
def test_get_nonpolymorphic(self):
self._do_get_test(False)
def _do_get_test(self, polymorphic):
foo, Bar, Blub, blub, bar, Foo = (self.tables.foo,
self.classes.Bar,
self.classes.Blub,
self.tables.blub,
self.tables.bar,
self.classes.Foo)
if polymorphic:
mapper(Foo, foo, polymorphic_on=foo.c.type, polymorphic_identity='foo')
mapper(Bar, bar, inherits=Foo, polymorphic_identity='bar')
mapper(Blub, blub, inherits=Bar, polymorphic_identity='blub')
else:
mapper(Foo, foo)
mapper(Bar, bar, inherits=Foo)
mapper(Blub, blub, inherits=Bar)
sess = create_session()
f = Foo()
b = Bar()
bl = Blub()
sess.add(f)
sess.add(b)
sess.add(bl)
sess.flush()
if polymorphic:
def go():
assert sess.query(Foo).get(f.id) is f
assert sess.query(Foo).get(b.id) is b
assert sess.query(Foo).get(bl.id) is bl
assert sess.query(Bar).get(b.id) is b
assert sess.query(Bar).get(bl.id) is bl
assert sess.query(Blub).get(bl.id) is bl
# test class mismatches - item is present
# in the identity map but we requested a subclass
assert sess.query(Blub).get(f.id) is None
assert sess.query(Blub).get(b.id) is None
assert sess.query(Bar).get(f.id) is None
self.assert_sql_count(testing.db, go, 0)
else:
# this is testing the 'wrong' behavior of using get()
# polymorphically with mappers that are not configured to be
# polymorphic. the important part being that get() always
# returns an instance of the query's type.
def go():
assert sess.query(Foo).get(f.id) is f
bb = sess.query(Foo).get(b.id)
assert isinstance(b, Foo) and bb.id==b.id
bll = sess.query(Foo).get(bl.id)
assert isinstance(bll, Foo) and bll.id==bl.id
assert sess.query(Bar).get(b.id) is b
bll = sess.query(Bar).get(bl.id)
assert isinstance(bll, Bar) and bll.id == bl.id
assert sess.query(Blub).get(bl.id) is bl
self.assert_sql_count(testing.db, go, 3)
class EagerLazyTest(fixtures.MappedTest):
"""tests eager load/lazy load of child items off inheritance mappers, tests that
LazyLoader constructs the right query condition."""
@classmethod
def define_tables(cls, metadata):
global foo, bar, bar_foo
foo = Table('foo', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)))
bar = Table('bar', metadata,
Column('id', Integer, ForeignKey('foo.id'), primary_key=True),
Column('bar_data', String(30)))
bar_foo = Table('bar_foo', metadata,
Column('bar_id', Integer, ForeignKey('bar.id')),
Column('foo_id', Integer, ForeignKey('foo.id'))
)
def test_basic(self):
class Foo(object): pass
class Bar(Foo): pass
foos = mapper(Foo, foo)
bars = mapper(Bar, bar, inherits=foos)
bars.add_property('lazy', relationship(foos, bar_foo, lazy='select'))
bars.add_property('eager', relationship(foos, bar_foo, lazy='joined'))
foo.insert().execute(data='foo1')
bar.insert().execute(id=1, data='bar1')
foo.insert().execute(data='foo2')
bar.insert().execute(id=2, data='bar2')
foo.insert().execute(data='foo3') #3
foo.insert().execute(data='foo4') #4
bar_foo.insert().execute(bar_id=1, foo_id=3)
bar_foo.insert().execute(bar_id=2, foo_id=4)
sess = create_session()
q = sess.query(Bar)
self.assert_(len(q.first().lazy) == 1)
self.assert_(len(q.first().eager) == 1)
class EagerTargetingTest(fixtures.MappedTest):
"""test a scenario where joined table inheritance might be
confused as an eagerly loaded joined table."""
@classmethod
def define_tables(cls, metadata):
Table('a_table', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50)),
Column('type', String(30), nullable=False),
Column('parent_id', Integer, ForeignKey('a_table.id'))
)
Table('b_table', metadata,
Column('id', Integer, ForeignKey('a_table.id'), primary_key=True),
Column('b_data', String(50)),
)
def test_adapt_stringency(self):
b_table, a_table = self.tables.b_table, self.tables.a_table
class A(fixtures.ComparableEntity):
pass
class B(A):
pass
mapper(A, a_table, polymorphic_on=a_table.c.type, polymorphic_identity='A',
properties={
'children': relationship(A, order_by=a_table.c.name)
})
mapper(B, b_table, inherits=A, polymorphic_identity='B', properties={
'b_derived':column_property(b_table.c.b_data + "DATA")
})
sess=create_session()
b1=B(id=1, name='b1',b_data='i')
sess.add(b1)
sess.flush()
b2=B(id=2, name='b2', b_data='l', parent_id=1)
sess.add(b2)
sess.flush()
bid=b1.id
sess.expunge_all()
node = sess.query(B).filter(B.id==bid).all()[0]
eq_(node, B(id=1, name='b1',b_data='i'))
eq_(node.children[0], B(id=2, name='b2',b_data='l'))
sess.expunge_all()
node = sess.query(B).options(joinedload(B.children)).filter(B.id==bid).all()[0]
eq_(node, B(id=1, name='b1',b_data='i'))
eq_(node.children[0], B(id=2, name='b2',b_data='l'))
class FlushTest(fixtures.MappedTest):
"""test dependency sorting among inheriting mappers"""
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('email', String(128)),
Column('password', String(16)),
)
Table('roles', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('description', String(32))
)
Table('user_roles', metadata,
Column('user_id', Integer, ForeignKey('users.id'), primary_key=True),
Column('role_id', Integer, ForeignKey('roles.id'), primary_key=True)
)
Table('admins', metadata,
Column('admin_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('users.id'))
)
def test_one(self):
admins, users, roles, user_roles = (self.tables.admins,
self.tables.users,
self.tables.roles,
self.tables.user_roles)
class User(object):pass
class Role(object):pass
class Admin(User):pass
role_mapper = mapper(Role, roles)
user_mapper = mapper(User, users, properties = {
'roles' : relationship(Role, secondary=user_roles, lazy='joined')
}
)
admin_mapper = mapper(Admin, admins, inherits=user_mapper)
sess = create_session()
adminrole = Role()
sess.add(adminrole)
sess.flush()
# create an Admin, and append a Role. the dependency processors
# corresponding to the "roles" attribute for the Admin mapper and the User mapper
# have to ensure that two dependency processors don't fire off and insert the
# many to many row twice.
a = Admin()
a.roles.append(adminrole)
a.password = 'admin'
sess.add(a)
sess.flush()
eq_(select([func.count('*')]).select_from(user_roles).scalar(), 1)
def test_two(self):
admins, users, roles, user_roles = (self.tables.admins,
self.tables.users,
self.tables.roles,
self.tables.user_roles)
class User(object):
def __init__(self, email=None, password=None):
self.email = email
self.password = password
class Role(object):
def __init__(self, description=None):
self.description = description
class Admin(User):pass
role_mapper = mapper(Role, roles)
user_mapper = mapper(User, users, properties = {
'roles' : relationship(Role, secondary=user_roles, lazy='joined')
}
)
admin_mapper = mapper(Admin, admins, inherits=user_mapper)
# create roles
adminrole = Role('admin')
sess = create_session()
sess.add(adminrole)
sess.flush()
# create admin user
a = Admin(email='tim', password='admin')
a.roles.append(adminrole)
sess.add(a)
sess.flush()
a.password = 'sadmin'
sess.flush()
eq_(select([func.count('*')]).select_from(user_roles).scalar(), 1)
class PassiveDeletesTest(fixtures.MappedTest):
__requires__ = ('foreign_keys',)
@classmethod
def define_tables(cls, metadata):
Table(
"a", metadata,
Column('id', Integer, primary_key=True),
Column('type', String(30))
)
Table(
"b", metadata,
Column(
'id', Integer, ForeignKey('a.id', ondelete="CASCADE"),
primary_key=True),
Column('data', String(10))
)
Table(
"c", metadata,
Column('cid', Integer, primary_key=True),
Column('bid', ForeignKey('b.id', ondelete="CASCADE"))
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
class C(B):
pass
def _fixture(self, a_p=False, b_p=False, c_p=False):
A, B, C = self.classes("A", "B", "C")
a, b, c = self.tables("a", "b", "c")
mapper(
A, a, passive_deletes=a_p,
polymorphic_on=a.c.type, polymorphic_identity='a')
mapper(
B, b, inherits=A, passive_deletes=b_p, polymorphic_identity='b')
mapper(
C, c, inherits=B, passive_deletes=c_p, polymorphic_identity='c')
def test_none(self):
A, B, C = self.classes("A", "B", "C")
self._fixture()
s = Session()
a1, b1, c1 = A(id=1), B(id=2), C(cid=1, id=3)
s.add_all([a1, b1, c1])
s.commit()
# want to see if the 'C' table loads even though
# a and b are loaded
c1 = s.query(B).filter_by(id=3).first()
s.delete(c1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
RegexSQL(
"SELECT .* "
"FROM c WHERE :param_1 = c.bid",
[{'param_1': 3}]
),
CompiledSQL(
"DELETE FROM c WHERE c.cid = :cid",
[{'cid': 1}]
),
CompiledSQL(
"DELETE FROM b WHERE b.id = :id",
[{'id': 3}]
),
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 3}]
)
)
def test_c_only(self):
A, B, C = self.classes("A", "B", "C")
self._fixture(c_p=True)
s = Session()
a1, b1, c1 = A(id=1), B(id=2), C(cid=1, id=3)
s.add_all([a1, b1, c1])
s.commit()
s.delete(a1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"SELECT a.id AS a_id, a.type AS a_type "
"FROM a WHERE a.id = :param_1",
[{'param_1': 1}]
),
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 1}]
)
)
b1.id
s.delete(b1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"DELETE FROM b WHERE b.id = :id",
[{'id': 2}]
),
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 2}]
)
)
# want to see if the 'C' table loads even though
# a and b are loaded
c1 = s.query(A).filter_by(id=3).first()
s.delete(c1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"DELETE FROM b WHERE b.id = :id",
[{'id': 3}]
),
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 3}]
)
)
def test_b_only(self):
A, B, C = self.classes("A", "B", "C")
self._fixture(b_p=True)
s = Session()
a1, b1, c1 = A(id=1), B(id=2), C(cid=1, id=3)
s.add_all([a1, b1, c1])
s.commit()
s.delete(a1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"SELECT a.id AS a_id, a.type AS a_type "
"FROM a WHERE a.id = :param_1",
[{'param_1': 1}]
),
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 1}]
)
)
b1.id
s.delete(b1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 2}]
)
)
c1.id
s.delete(c1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 3}]
)
)
def test_a_only(self):
A, B, C = self.classes("A", "B", "C")
self._fixture(a_p=True)
s = Session()
a1, b1, c1 = A(id=1), B(id=2), C(cid=1, id=3)
s.add_all([a1, b1, c1])
s.commit()
s.delete(a1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"SELECT a.id AS a_id, a.type AS a_type "
"FROM a WHERE a.id = :param_1",
[{'param_1': 1}]
),
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 1}]
)
)
b1.id
s.delete(b1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 2}]
)
)
# want to see if the 'C' table loads even though
# a and b are loaded
c1 = s.query(A).filter_by(id=3).first()
s.delete(c1)
with self.sql_execution_asserter(testing.db) as asserter:
s.flush()
asserter.assert_(
CompiledSQL(
"DELETE FROM a WHERE a.id = :id",
[{'id': 3}]
)
)
class OptimizedGetOnDeferredTest(fixtures.MappedTest):
"""test that the 'optimized get' path accommodates deferred columns."""
@classmethod
def define_tables(cls, metadata):
Table(
"a", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
)
Table(
"b", metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('data', String(10))
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
@classmethod
def setup_mappers(cls):
A, B = cls.classes("A", "B")
a, b = cls.tables("a", "b")
mapper(A, a)
mapper(B, b, inherits=A, properties={
'data': deferred(b.c.data),
'expr': column_property(b.c.data + 'q', deferred=True)
})
def test_column_property(self):
A, B = self.classes("A", "B")
sess = Session()
b1 = B(data='x')
sess.add(b1)
sess.flush()
eq_(b1.expr, 'xq')
def test_expired_column(self):
A, B = self.classes("A", "B")
sess = Session()
b1 = B(data='x')
sess.add(b1)
sess.flush()
sess.expire(b1, ['data'])
eq_(b1.data, 'x')
class JoinedNoFKSortingTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("a", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
)
Table("b", metadata,
Column('id', Integer, primary_key=True)
)
Table("c", metadata,
Column('id', Integer, primary_key=True)
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
class C(A):
pass
@classmethod
def setup_mappers(cls):
A, B, C = cls.classes.A, cls.classes.B, cls.classes.C
mapper(A, cls.tables.a)
mapper(B, cls.tables.b, inherits=A,
inherit_condition=cls.tables.a.c.id == cls.tables.b.c.id,
inherit_foreign_keys=cls.tables.b.c.id)
mapper(C, cls.tables.c, inherits=A,
inherit_condition=cls.tables.a.c.id == cls.tables.c.c.id,
inherit_foreign_keys=cls.tables.c.c.id)
def test_ordering(self):
B, C = self.classes.B, self.classes.C
sess = Session()
sess.add_all([B(), C(), B(), C()])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO a () VALUES ()",
{}
),
CompiledSQL(
"INSERT INTO a () VALUES ()",
{}
),
CompiledSQL(
"INSERT INTO a () VALUES ()",
{}
),
CompiledSQL(
"INSERT INTO a () VALUES ()",
{}
),
AllOf(
CompiledSQL(
"INSERT INTO b (id) VALUES (:id)",
[{"id": 1}, {"id": 3}]
),
CompiledSQL(
"INSERT INTO c (id) VALUES (:id)",
[{"id": 2}, {"id": 4}]
)
)
)
class VersioningTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('version_id', Integer, nullable=False),
Column('value', String(40)),
Column('discriminator', Integer, nullable=False)
)
Table('subtable', metadata,
Column('id', None, ForeignKey('base.id'), primary_key=True),
Column('subdata', String(50))
)
Table('stuff', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('parent', Integer, ForeignKey('base.id'))
)
@testing.emits_warning(r".*updated rowcount")
@engines.close_open_connections
def test_save_update(self):
subtable, base, stuff = (self.tables.subtable,
self.tables.base,
self.tables.stuff)
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
class Stuff(Base):
pass
mapper(Stuff, stuff)
mapper(Base, base,
polymorphic_on=base.c.discriminator,
version_id_col=base.c.version_id,
polymorphic_identity=1, properties={
'stuff':relationship(Stuff)
})
mapper(Sub, subtable, inherits=Base, polymorphic_identity=2)
sess = create_session()
b1 = Base(value='b1')
s1 = Sub(value='sub1', subdata='some subdata')
sess.add(b1)
sess.add(s1)
sess.flush()
sess2 = create_session()
s2 = sess2.query(Base).get(s1.id)
s2.subdata = 'sess2 subdata'
s1.subdata = 'sess1 subdata'
sess.flush()
assert_raises(orm_exc.StaleDataError,
sess2.query(Base).with_lockmode('read').get,
s1.id)
if not testing.db.dialect.supports_sane_rowcount:
sess2.flush()
else:
assert_raises(orm_exc.StaleDataError, sess2.flush)
sess2.refresh(s2)
if testing.db.dialect.supports_sane_rowcount:
assert s2.subdata == 'sess1 subdata'
s2.subdata = 'sess2 subdata'
sess2.flush()
@testing.emits_warning(r".*(update|delete)d rowcount")
def test_delete(self):
subtable, base = self.tables.subtable, self.tables.base
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
mapper(Base, base,
polymorphic_on=base.c.discriminator,
version_id_col=base.c.version_id, polymorphic_identity=1)
mapper(Sub, subtable, inherits=Base, polymorphic_identity=2)
sess = create_session()
b1 = Base(value='b1')
s1 = Sub(value='sub1', subdata='some subdata')
s2 = Sub(value='sub2', subdata='some other subdata')
sess.add(b1)
sess.add(s1)
sess.add(s2)
sess.flush()
sess2 = create_session()
s3 = sess2.query(Base).get(s1.id)
sess2.delete(s3)
sess2.flush()
s2.subdata = 'some new subdata'
sess.flush()
s1.subdata = 'some new subdata'
if testing.db.dialect.supports_sane_rowcount:
assert_raises(
orm_exc.StaleDataError,
sess.flush
)
else:
sess.flush()
class DistinctPKTest(fixtures.MappedTest):
"""test the construction of mapper.primary_key when an inheriting relationship
joins on a column other than primary key column."""
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
global person_table, employee_table, Person, Employee
person_table = Table("persons", metadata,
Column("id", Integer, primary_key=True, test_needs_autoincrement=True),
Column("name", String(80)),
)
employee_table = Table("employees", metadata,
Column("eid", Integer, primary_key=True, test_needs_autoincrement=True),
Column("salary", Integer),
Column("person_id", Integer, ForeignKey("persons.id")),
)
class Person(object):
def __init__(self, name):
self.name = name
class Employee(Person): pass
@classmethod
def insert_data(cls):
person_insert = person_table.insert()
person_insert.execute(id=1, name='alice')
person_insert.execute(id=2, name='bob')
employee_insert = employee_table.insert()
employee_insert.execute(id=2, salary=250, person_id=1) # alice
employee_insert.execute(id=3, salary=200, person_id=2) # bob
def test_implicit(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table, inherits=person_mapper)
assert list(class_mapper(Employee).primary_key) == [person_table.c.id]
def test_explicit_props(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table, inherits=person_mapper,
properties={'pid':person_table.c.id,
'eid':employee_table.c.eid})
self._do_test(False)
def test_explicit_composite_pk(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table,
inherits=person_mapper,
properties=dict(id=[employee_table.c.eid, person_table.c.id]),
primary_key=[person_table.c.id, employee_table.c.eid])
assert_raises_message(sa_exc.SAWarning,
r"On mapper Mapper\|Employee\|employees, "
"primary key column 'persons.id' is being "
"combined with distinct primary key column 'employees.eid' "
"in attribute 'id'. Use explicit properties to give "
"each column its own mapped attribute name.",
self._do_test, True
)
def test_explicit_pk(self):
person_mapper = mapper(Person, person_table)
mapper(Employee, employee_table, inherits=person_mapper, primary_key=[person_table.c.id])
self._do_test(False)
def _do_test(self, composite):
session = create_session()
query = session.query(Employee)
if composite:
alice1 = query.get([1,2])
bob = query.get([2,3])
alice2 = query.get([1,2])
else:
alice1 = query.get(1)
bob = query.get(2)
alice2 = query.get(1)
assert alice1.name == alice2.name == 'alice'
assert bob.name == 'bob'
class SyncCompileTest(fixtures.MappedTest):
"""test that syncrules compile properly on custom inherit conds"""
@classmethod
def define_tables(cls, metadata):
global _a_table, _b_table, _c_table
_a_table = Table('a', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data1', String(128))
)
_b_table = Table('b', metadata,
Column('a_id', Integer, ForeignKey('a.id'), primary_key=True),
Column('data2', String(128))
)
_c_table = Table('c', metadata,
# Column('a_id', Integer, ForeignKey('b.a_id'), primary_key=True), #works
Column('b_a_id', Integer, ForeignKey('b.a_id'), primary_key=True),
Column('data3', String(128))
)
def test_joins(self):
for j1 in (None, _b_table.c.a_id==_a_table.c.id, _a_table.c.id==_b_table.c.a_id):
for j2 in (None, _b_table.c.a_id==_c_table.c.b_a_id,
_c_table.c.b_a_id==_b_table.c.a_id):
self._do_test(j1, j2)
for t in reversed(_a_table.metadata.sorted_tables):
t.delete().execute().close()
def _do_test(self, j1, j2):
class A(object):
def __init__(self, **kwargs):
for key, value in list(kwargs.items()):
setattr(self, key, value)
class B(A):
pass
class C(B):
pass
mapper(A, _a_table)
mapper(B, _b_table, inherits=A,
inherit_condition=j1
)
mapper(C, _c_table, inherits=B,
inherit_condition=j2
)
session = create_session()
a = A(data1='a1')
session.add(a)
b = B(data1='b1', data2='b2')
session.add(b)
c = C(data1='c1', data2='c2', data3='c3')
session.add(c)
session.flush()
session.expunge_all()
assert len(session.query(A).all()) == 3
assert len(session.query(B).all()) == 2
assert len(session.query(C).all()) == 1
class OverrideColKeyTest(fixtures.MappedTest):
"""test overriding of column attributes."""
@classmethod
def define_tables(cls, metadata):
global base, subtable, subtable_two
base = Table('base', metadata,
Column('base_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(255)),
Column('sqlite_fixer', String(10))
)
subtable = Table('subtable', metadata,
Column('base_id', Integer, ForeignKey('base.base_id'), primary_key=True),
Column('subdata', String(255))
)
subtable_two = Table('subtable_two', metadata,
Column('base_id', Integer, primary_key=True),
Column('fk_base_id', Integer, ForeignKey('base.base_id')),
Column('subdata', String(255))
)
def test_plain(self):
# control case
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
# Sub gets a "base_id" property using the "base_id"
# column of both tables.
eq_(
class_mapper(Sub).get_property('base_id').columns,
[subtable.c.base_id, base.c.base_id]
)
def test_override_explicit(self):
# this pattern is what you see when using declarative
# in particular, here we do a "manual" version of
# what we'd like the mapper to do.
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base, properties={
'id':base.c.base_id
})
mapper(Sub, subtable, inherits=Base, properties={
# this is the manual way to do it, is not really
# possible in declarative
'id':[base.c.base_id, subtable.c.base_id]
})
eq_(
class_mapper(Sub).get_property('id').columns,
[base.c.base_id, subtable.c.base_id]
)
s1 = Sub()
s1.id = 10
sess = create_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).get(10) is s1
def test_override_onlyinparent(self):
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base, properties={
'id':base.c.base_id
})
mapper(Sub, subtable, inherits=Base)
eq_(
class_mapper(Sub).get_property('id').columns,
[base.c.base_id]
)
eq_(
class_mapper(Sub).get_property('base_id').columns,
[subtable.c.base_id]
)
s1 = Sub()
s1.id = 10
s2 = Sub()
s2.base_id = 15
sess = create_session()
sess.add_all([s1, s2])
sess.flush()
# s1 gets '10'
assert sess.query(Sub).get(10) is s1
# s2 gets a new id, base_id is overwritten by the ultimate
# PK col
assert s2.id == s2.base_id != 15
def test_override_implicit(self):
# this is originally [ticket:1111].
# the pattern here is now disallowed by [ticket:1892]
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base, properties={
'id':base.c.base_id
})
def go():
mapper(Sub, subtable, inherits=Base, properties={
'id':subtable.c.base_id
})
# Sub mapper compilation needs to detect that "base.c.base_id"
# is renamed in the inherited mapper as "id", even though
# it has its own "id" property. It then generates
# an exception in 0.7 due to the implicit conflict.
assert_raises(sa_exc.InvalidRequestError, go)
def test_pk_fk_different(self):
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, base)
def go():
mapper(Sub, subtable_two, inherits=Base)
assert_raises_message(
sa_exc.SAWarning,
"Implicitly combining column base.base_id with "
"column subtable_two.base_id under attribute 'base_id'",
go
)
def test_plain_descriptor(self):
"""test that descriptors prevent inheritance from propigating properties to subclasses."""
class Base(object):
pass
class Sub(Base):
@property
def data(self):
return "im the data"
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
s1 = Sub()
sess = create_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).one().data == "im the data"
def test_custom_descriptor(self):
"""test that descriptors prevent inheritance from propigating properties to subclasses."""
class MyDesc(object):
def __get__(self, instance, owner):
if instance is None:
return self
return "im the data"
class Base(object):
pass
class Sub(Base):
data = MyDesc()
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
s1 = Sub()
sess = create_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).one().data == "im the data"
def test_sub_columns_over_base_descriptors(self):
class Base(object):
@property
def subdata(self):
return "this is base"
class Sub(Base):
pass
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
sess = create_session()
b1 = Base()
assert b1.subdata == "this is base"
s1 = Sub()
s1.subdata = "this is sub"
assert s1.subdata == "this is sub"
sess.add_all([s1, b1])
sess.flush()
sess.expunge_all()
assert sess.query(Base).get(b1.base_id).subdata == "this is base"
assert sess.query(Sub).get(s1.base_id).subdata == "this is sub"
def test_base_descriptors_over_base_cols(self):
class Base(object):
@property
def data(self):
return "this is base"
class Sub(Base):
pass
mapper(Base, base)
mapper(Sub, subtable, inherits=Base)
sess = create_session()
b1 = Base()
assert b1.data == "this is base"
s1 = Sub()
assert s1.data == "this is base"
sess.add_all([s1, b1])
sess.flush()
sess.expunge_all()
assert sess.query(Base).get(b1.base_id).data == "this is base"
assert sess.query(Sub).get(s1.base_id).data == "this is base"
class OptimizedLoadTest(fixtures.MappedTest):
"""tests for the "optimized load" routine."""
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(50)),
Column('type', String(50)),
Column('counter', Integer, server_default="1")
)
Table('sub', metadata,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
Column('sub', String(50)),
Column('subcounter', Integer, server_default="1"),
Column('subcounter2', Integer, server_default="1")
)
Table('subsub', metadata,
Column('id', Integer, ForeignKey('sub.id'), primary_key=True),
Column('subsubcounter2', Integer, server_default="1")
)
Table('with_comp', metadata,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
Column('a', String(10)),
Column('b', String(10))
)
def test_no_optimize_on_map_to_join(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.ComparableEntity):
pass
class JoinBase(fixtures.ComparableEntity):
pass
class SubJoinBase(JoinBase):
pass
mapper(Base, base)
mapper(JoinBase, base.outerjoin(sub), properties=util.OrderedDict(
[('id', [base.c.id, sub.c.id]),
('counter', [base.c.counter, sub.c.subcounter])])
)
mapper(SubJoinBase, inherits=JoinBase)
sess = Session()
sess.add(Base(data='data'))
sess.commit()
sjb = sess.query(SubJoinBase).one()
sjb_id = sjb.id
sess.expire(sjb)
# this should not use the optimized load,
# which assumes discrete tables
def go():
eq_(sjb.data, 'data')
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT base.id AS base_id, sub.id AS sub_id, "
"base.counter AS base_counter, sub.subcounter AS sub_subcounter, "
"base.data AS base_data, base.type AS base_type, "
"sub.sub AS sub_sub, sub.subcounter2 AS sub_subcounter2 "
"FROM base LEFT OUTER JOIN sub ON base.id = sub.id "
"WHERE base.id = :param_1",
{'param_1': sjb_id}
),
)
def test_optimized_passes(self):
""""test that the 'optimized load' routine doesn't crash when
a column in the join condition is not available."""
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.ComparableEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
# redefine Sub's "id" to favor the "id" col in the subtable.
# "id" is also part of the primary join condition
mapper(Sub, sub, inherits=Base,
polymorphic_identity='sub',
properties={'id':[sub.c.id, base.c.id]})
sess = sessionmaker()()
s1 = Sub(data='s1data', sub='s1sub')
sess.add(s1)
sess.commit()
sess.expunge_all()
# load s1 via Base. s1.id won't populate since it's relative to
# the "sub" table. The optimized load kicks in and tries to
# generate on the primary join, but cannot since "id" is itself unloaded.
# the optimized load needs to return "None" so regular full-row loading proceeds
s1 = sess.query(Base).first()
assert s1.sub == 's1sub'
def test_column_expression(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.ComparableEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub', properties={
'concat': column_property(sub.c.sub + "|" + sub.c.sub)
})
sess = sessionmaker()()
s1 = Sub(data='s1data', sub='s1sub')
sess.add(s1)
sess.commit()
sess.expunge_all()
s1 = sess.query(Base).first()
assert s1.concat == 's1sub|s1sub'
def test_column_expression_joined(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.ComparableEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub', properties={
'concat': column_property(base.c.data + "|" + sub.c.sub)
})
sess = sessionmaker()()
s1 = Sub(data='s1data', sub='s1sub')
s2 = Sub(data='s2data', sub='s2sub')
s3 = Sub(data='s3data', sub='s3sub')
sess.add_all([s1, s2, s3])
sess.commit()
sess.expunge_all()
# query a bunch of rows to ensure there's no cartesian
# product against "base" occurring, it is in fact
# detecting that "base" needs to be in the join
# criterion
eq_(
sess.query(Base).order_by(Base.id).all(),
[
Sub(data='s1data', sub='s1sub', concat='s1data|s1sub'),
Sub(data='s2data', sub='s2sub', concat='s2data|s2sub'),
Sub(data='s3data', sub='s3sub', concat='s3data|s3sub')
]
)
def test_composite_column_joined(self):
base, with_comp = self.tables.base, self.tables.with_comp
class Base(fixtures.BasicEntity):
pass
class WithComp(Base):
pass
class Comp(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __composite_values__(self):
return self.a, self.b
def __eq__(self, other):
return (self.a == other.a) and (self.b == other.b)
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(WithComp, with_comp, inherits=Base, polymorphic_identity='wc', properties={
'comp': composite(Comp, with_comp.c.a, with_comp.c.b)
})
sess = sessionmaker()()
s1 = WithComp(data='s1data', comp=Comp('ham', 'cheese'))
s2 = WithComp(data='s2data', comp=Comp('bacon', 'eggs'))
sess.add_all([s1, s2])
sess.commit()
sess.expunge_all()
s1test, s2test = sess.query(Base).order_by(Base.id).all()
assert s1test.comp
assert s2test.comp
eq_(s1test.comp, Comp('ham', 'cheese'))
eq_(s2test.comp, Comp('bacon', 'eggs'))
def test_load_expired_on_pending(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base')
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub')
sess = Session()
s1 = Sub(data='s1')
sess.add(s1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO base (data, type) VALUES (:data, :type)",
[{'data':'s1','type':'sub'}]
),
CompiledSQL(
"INSERT INTO sub (id, sub) VALUES (:id, :sub)",
lambda ctx:{'id':s1.id, 'sub':None}
),
)
def go():
eq_( s1.subcounter2, 1 )
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT base.counter AS base_counter, sub.subcounter AS sub_subcounter, "
"sub.subcounter2 AS sub_subcounter2 FROM base JOIN sub "
"ON base.id = sub.id WHERE base.id = :param_1",
lambda ctx:{'param_1': s1.id}
),
)
def test_dont_generate_on_none(self):
base, sub = self.tables.base, self.tables.sub
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
mapper(Base, base, polymorphic_on=base.c.type,
polymorphic_identity='base')
m = mapper(Sub, sub, inherits=Base, polymorphic_identity='sub')
s1 = Sub()
assert m._optimized_get_statement(attributes.instance_state(s1),
['subcounter2']) is None
# loads s1.id as None
eq_(s1.id, None)
# this now will come up with a value of None for id - should reject
assert m._optimized_get_statement(attributes.instance_state(s1),
['subcounter2']) is None
s1.id = 1
attributes.instance_state(s1)._commit_all(s1.__dict__, None)
assert m._optimized_get_statement(attributes.instance_state(s1),
['subcounter2']) is not None
def test_load_expired_on_pending_twolevel(self):
base, sub, subsub = (self.tables.base,
self.tables.sub,
self.tables.subsub)
class Base(fixtures.BasicEntity):
pass
class Sub(Base):
pass
class SubSub(Sub):
pass
mapper(Base, base, polymorphic_on=base.c.type,
polymorphic_identity='base')
mapper(Sub, sub, inherits=Base, polymorphic_identity='sub')
mapper(SubSub, subsub, inherits=Sub, polymorphic_identity='subsub')
sess = Session()
s1 = SubSub(data='s1', counter=1, subcounter=2)
sess.add(s1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO base (data, type, counter) VALUES "
"(:data, :type, :counter)",
[{'data':'s1','type':'subsub','counter':1}]
),
CompiledSQL(
"INSERT INTO sub (id, sub, subcounter) VALUES "
"(:id, :sub, :subcounter)",
lambda ctx:[{'subcounter': 2, 'sub': None, 'id': s1.id}]
),
CompiledSQL(
"INSERT INTO subsub (id) VALUES (:id)",
lambda ctx:{'id':s1.id}
),
)
def go():
eq_(
s1.subcounter2, 1
)
self.assert_sql_execution(
testing.db,
go,
Or(
CompiledSQL(
"SELECT subsub.subsubcounter2 AS subsub_subsubcounter2, "
"sub.subcounter2 AS sub_subcounter2 FROM subsub, sub "
"WHERE :param_1 = sub.id AND sub.id = subsub.id",
lambda ctx: {'param_1': s1.id}
),
CompiledSQL(
"SELECT sub.subcounter2 AS sub_subcounter2, "
"subsub.subsubcounter2 AS subsub_subsubcounter2 "
"FROM sub, subsub "
"WHERE :param_1 = sub.id AND sub.id = subsub.id",
lambda ctx: {'param_1': s1.id}
),
)
)
class TransientInheritingGCTest(fixtures.TestBase):
__requires__ = ('cpython', 'no_coverage')
def _fixture(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
data = Column(String(10))
self.A = A
return Base
def setUp(self):
self.Base = self._fixture()
def tearDown(self):
self.Base.metadata.drop_all(testing.db)
#clear_mappers()
self.Base = None
def _do_test(self, go):
B = go()
self.Base.metadata.create_all(testing.db)
sess = Session(testing.db)
sess.add(B(data='some b'))
sess.commit()
b1 = sess.query(B).one()
assert isinstance(b1, B)
sess.close()
del sess
del b1
del B
gc_collect()
eq_(
len(self.A.__subclasses__()),
0)
def test_single(self):
def go():
class B(self.A):
pass
return B
self._do_test(go)
@testing.fails_if(lambda: True,
"not supported for joined inh right now.")
def test_joined(self):
def go():
class B(self.A):
__tablename__ = 'b'
id = Column(Integer, ForeignKey('a.id'),
primary_key=True)
return B
self._do_test(go)
class NoPKOnSubTableWarningTest(fixtures.TestBase):
def _fixture(self):
metadata = MetaData()
parent = Table('parent', metadata,
Column('id', Integer, primary_key=True)
)
child = Table('child', metadata,
Column('id', Integer, ForeignKey('parent.id'))
)
return parent, child
def tearDown(self):
clear_mappers()
def test_warning_on_sub(self):
parent, child = self._fixture()
class P(object):
pass
class C(P):
pass
mapper(P, parent)
assert_raises_message(
sa_exc.SAWarning,
"Could not assemble any primary keys for locally mapped "
"table 'child' - no rows will be persisted in this Table.",
mapper, C, child, inherits=P
)
def test_no_warning_with_explicit(self):
parent, child = self._fixture()
class P(object):
pass
class C(P):
pass
mapper(P, parent)
mc = mapper(C, child, inherits=P, primary_key=[parent.c.id])
eq_(mc.primary_key, (parent.c.id,))
class InhCondTest(fixtures.TestBase):
def test_inh_cond_nonexistent_table_unrelated(self):
metadata = MetaData()
base_table = Table("base", metadata,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column("owner_id", Integer, ForeignKey("owner.owner_id"))
)
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
# succeeds, despite "owner" table not configured yet
m2 = mapper(Derived, derived_table,
inherits=Base)
assert m2.inherit_condition.compare(
base_table.c.id==derived_table.c.id
)
def test_inh_cond_nonexistent_col_unrelated(self):
m = MetaData()
base_table = Table("base", m,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", m,
Column("id", Integer, ForeignKey('base.id'),
primary_key=True),
Column('order_id', Integer, ForeignKey('order.foo'))
)
order_table = Table('order', m, Column('id', Integer, primary_key=True))
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
# succeeds, despite "order.foo" doesn't exist
m2 = mapper(Derived, derived_table, inherits=Base)
assert m2.inherit_condition.compare(
base_table.c.id==derived_table.c.id
)
def test_inh_cond_no_fk(self):
metadata = MetaData()
base_table = Table("base", metadata,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", metadata,
Column("id", Integer, primary_key=True),
)
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
assert_raises_message(
sa_exc.ArgumentError,
"Can't find any foreign key relationships between "
"'base' and 'derived'.",
mapper,
Derived, derived_table, inherits=Base
)
def test_inh_cond_nonexistent_table_related(self):
m1 = MetaData()
m2 = MetaData()
base_table = Table("base", m1,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", m2,
Column("id", Integer, ForeignKey('base.id'),
primary_key=True),
)
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
# the ForeignKey def is correct but there are two
# different metadatas. Would like the traditional
# "noreferencedtable" error to raise so that the
# user is directed towards the FK definition in question.
assert_raises_message(
sa_exc.NoReferencedTableError,
"Foreign key associated with column 'derived.id' "
"could not find table 'base' with which to generate "
"a foreign key to target column 'id'",
mapper,
Derived, derived_table, inherits=Base
)
def test_inh_cond_nonexistent_col_related(self):
m = MetaData()
base_table = Table("base", m,
Column("id", Integer, primary_key=True)
)
derived_table = Table("derived", m,
Column("id", Integer, ForeignKey('base.q'),
primary_key=True),
)
class Base(object):
pass
class Derived(Base):
pass
mapper(Base, base_table)
assert_raises_message(
sa_exc.NoReferencedColumnError,
"Could not initialize target column for ForeignKey "
"'base.q' on table "
"'derived': table 'base' has no column named 'q'",
mapper,
Derived, derived_table, inherits=Base
)
class PKDiscriminatorTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
parents = Table('parents', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(60)))
children = Table('children', metadata,
Column('id', Integer, ForeignKey('parents.id'),
primary_key=True),
Column('type', Integer,primary_key=True),
Column('name', String(60)))
def test_pk_as_discriminator(self):
parents, children = self.tables.parents, self.tables.children
class Parent(object):
def __init__(self, name=None):
self.name = name
class Child(object):
def __init__(self, name=None):
self.name = name
class A(Child):
pass
mapper(Parent, parents, properties={
'children': relationship(Child, backref='parent'),
})
mapper(Child, children, polymorphic_on=children.c.type,
polymorphic_identity=1)
mapper(A, inherits=Child, polymorphic_identity=2)
s = create_session()
p = Parent('p1')
a = A('a1')
p.children.append(a)
s.add(p)
s.flush()
assert a.id
assert a.type == 2
p.name='p1new'
a.name='a1new'
s.flush()
s.expire_all()
assert a.name=='a1new'
assert p.name=='p1new'
class NoPolyIdentInMiddleTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(50), nullable=False),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(A):
pass
class C(B):
pass
class D(B):
pass
class E(A):
pass
@classmethod
def setup_mappers(cls):
A, C, B, E, D, base = (cls.classes.A,
cls.classes.C,
cls.classes.B,
cls.classes.E,
cls.classes.D,
cls.tables.base)
mapper(A, base, polymorphic_on=base.c.type)
mapper(B, inherits=A, )
mapper(C, inherits=B, polymorphic_identity='c')
mapper(D, inherits=B, polymorphic_identity='d')
mapper(E, inherits=A, polymorphic_identity='e')
def test_load_from_middle(self):
C, B = self.classes.C, self.classes.B
s = Session()
s.add(C())
o = s.query(B).first()
eq_(o.type, 'c')
assert isinstance(o, C)
def test_load_from_base(self):
A, C = self.classes.A, self.classes.C
s = Session()
s.add(C())
o = s.query(A).first()
eq_(o.type, 'c')
assert isinstance(o, C)
def test_discriminator(self):
C, B, base = (self.classes.C,
self.classes.B,
self.tables.base)
assert class_mapper(B).polymorphic_on is base.c.type
assert class_mapper(C).polymorphic_on is base.c.type
def test_load_multiple_from_middle(self):
C, B, E, D, base = (self.classes.C,
self.classes.B,
self.classes.E,
self.classes.D,
self.tables.base)
s = Session()
s.add_all([C(), D(), E()])
eq_(
s.query(B).order_by(base.c.type).all(),
[C(), D()]
)
class DeleteOrphanTest(fixtures.MappedTest):
"""Test the fairly obvious, that an error is raised
when attempting to insert an orphan.
Previous SQLA versions would check this constraint
in memory which is the original rationale for this test.
"""
@classmethod
def define_tables(cls, metadata):
global single, parent
single = Table('single', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('type', String(50), nullable=False),
Column('data', String(50)),
Column('parent_id', Integer, ForeignKey('parent.id'), nullable=False),
)
parent = Table('parent', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(50))
)
def test_orphan_message(self):
class Base(fixtures.BasicEntity):
pass
class SubClass(Base):
pass
class Parent(fixtures.BasicEntity):
pass
mapper(Base, single, polymorphic_on=single.c.type, polymorphic_identity='base')
mapper(SubClass, inherits=Base, polymorphic_identity='sub')
mapper(Parent, parent, properties={
'related':relationship(Base, cascade="all, delete-orphan")
})
sess = create_session()
s1 = SubClass(data='s1')
sess.add(s1)
assert_raises(sa_exc.DBAPIError, sess.flush)
class PolymorphicUnionTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def _fixture(self):
t1 = table('t1', column('c1', Integer),
column('c2', Integer),
column('c3', Integer))
t2 = table('t2', column('c1', Integer), column('c2', Integer),
column('c3', Integer),
column('c4', Integer))
t3 = table('t3', column('c1', Integer),
column('c3', Integer),
column('c5', Integer))
return t1, t2, t3
def test_type_col_present(self):
t1, t2, t3 = self._fixture()
self.assert_compile(
polymorphic_union(
util.OrderedDict([("a", t1), ("b", t2), ("c", t3)]),
'q1'
),
"SELECT t1.c1, t1.c2, t1.c3, CAST(NULL AS INTEGER) AS c4, "
"CAST(NULL AS INTEGER) AS c5, 'a' AS q1 FROM t1 UNION ALL "
"SELECT t2.c1, t2.c2, t2.c3, t2.c4, CAST(NULL AS INTEGER) AS c5, "
"'b' AS q1 FROM t2 UNION ALL SELECT t3.c1, "
"CAST(NULL AS INTEGER) AS c2, t3.c3, CAST(NULL AS INTEGER) AS c4, "
"t3.c5, 'c' AS q1 FROM t3"
)
def test_type_col_non_present(self):
t1, t2, t3 = self._fixture()
self.assert_compile(
polymorphic_union(
util.OrderedDict([("a", t1), ("b", t2), ("c", t3)]),
None
),
"SELECT t1.c1, t1.c2, t1.c3, CAST(NULL AS INTEGER) AS c4, "
"CAST(NULL AS INTEGER) AS c5 FROM t1 UNION ALL SELECT t2.c1, "
"t2.c2, t2.c3, t2.c4, CAST(NULL AS INTEGER) AS c5 FROM t2 "
"UNION ALL SELECT t3.c1, CAST(NULL AS INTEGER) AS c2, t3.c3, "
"CAST(NULL AS INTEGER) AS c4, t3.c5 FROM t3"
)
def test_no_cast_null(self):
t1, t2, t3 = self._fixture()
self.assert_compile(
polymorphic_union(
util.OrderedDict([("a", t1), ("b", t2), ("c", t3)]),
'q1', cast_nulls=False
),
"SELECT t1.c1, t1.c2, t1.c3, NULL AS c4, NULL AS c5, 'a' AS q1 "
"FROM t1 UNION ALL SELECT t2.c1, t2.c2, t2.c3, t2.c4, NULL AS c5, "
"'b' AS q1 FROM t2 UNION ALL SELECT t3.c1, NULL AS c2, t3.c3, "
"NULL AS c4, t3.c5, 'c' AS q1 FROM t3"
)
class NameConflictTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
content = Table('content', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(30))
)
foo = Table('foo', metadata,
Column('id', Integer, ForeignKey('content.id'),
primary_key=True),
Column('content_type', String(30))
)
def test_name_conflict(self):
class Content(object):
pass
class Foo(Content):
pass
mapper(Content, self.tables.content,
polymorphic_on=self.tables.content.c.type)
mapper(Foo, self.tables.foo, inherits=Content,
polymorphic_identity='foo')
sess = create_session()
f = Foo()
f.content_type = 'bar'
sess.add(f)
sess.flush()
f_id = f.id
sess.expunge_all()
assert sess.query(Content).get(f_id).content_type == 'bar'
| robin900/sqlalchemy | test/orm/inheritance/test_basic.py | Python | mit | 94,636 |
#Encryption algorithms transform their input data, or plaintext, in some way that
#is dependent on a variable key, producing ciphertext. This transformation can
#easily be reversed, if (and, hopefully, only if) one knows the key.
#The key can be varied by the user or application and chosen from some very large space of possible keys.
#Private key ciphers: the same key is used for both encryption and decryption, so all correspondents must know it.
#Block ciphers take multibyte inputs of a fixed size (frequently 8 or 16 bytes long) and encrypt them
#Electronic Code Book (ECB mode)
#Cipher Block Chaining (CBC mode)
#Cipher FeedBack (CFB mode)
#PGP mode
# Cipher Key Size/Block Size
# ARC2 Variable/8 bytes
# Blowfish Variable/8 bytes
# CAST Variable/8 bytes
# DES 8 bytes/8 bytes
# DES3 (Triple DES) 16 bytes/8 bytes
# IDEA 16 bytes/8 bytes
# RC5 Variable/8 bytes
#Stream ciphers encrypt data bit-by-bit; practically, stream ciphers work on a character-by-character basis.
#Stream ciphers use exactly the same interface as block ciphers, with a block length that will always be 1;
#this is how block and stream ciphers can be distinguished. The only feedback mode available for stream ciphers is ECB mode.
# Cipher Key Size
# ARC4(Alleged RC4) Variable
#An all-or-nothing package transformation is one in which some text is transformed into message blocks, such that all blocks must be obtained before the reverse transformation can be applied. Thus, if any blocks are corrupted or lost, the original message cannot be reproduced. An all-or-nothing package transformation is not encryption, although a block cipher algorithm is used. The encryption key is randomly generated and is extractable from the message blocks.
#Winnowing and chaffing is a technique for enhancing privacy without requiring strong encryption. In short, the technique takes a set of authenticated message blocks (the wheat) and adds a number of chaff blocks which have randomly chosen data and MAC(message authentication code) fields. This means that to an adversary, the chaff blocks look as valid as the wheat blocks, and so the authentication would have to be performed on every block. By tailoring the number of chaff blocks added to the message, the sender can make breaking the message computationally infeasible. There are many other interesting properties of the winnow/chaff technique.
from Crypto.Cipher import DES
print DES.block_size, DES.key_size
obj=DES.new('abcdefgh', DES.MODE_ECB)
plain="Guido van Rossum is a space alien."
ciph=obj.encrypt(plain+'XXXXXX')
print ciph
print obj.decrypt(ciph)
#Public key cryptography
#In a public key system, there are two different keys: one for encryption and one for decryption. The encryption key can be made public by listing it in a directory or mailing it to your correspondent, while you keep the decryption key secret. Your correspondent then sends you data encrypted with your public key, and you use the private key to decrypt it.
#The currently available public key algorithms are listed in the following table:
# Algorithm Capabilities
# RSA Encryption, authentication/signatures
# ElGamal Encryption, authentication/signatures
# DSA Authentication/signatures
# qNEW Authentication/signatures
#An example of using the RSA module to sign a message:
from Crypto.Hash import MD5
from Crypto.PublicKey import RSA
from Crypto.Util.randpool import RandomPool
RSAkey=RSA.generate(384, RandomPool().get_bytes) # This will take a while...
hash=MD5.new(plain).digest()
signature=RSAkey.sign(hash, "")
print signature # Print what an RSA sig looks like--you don't really care.
RSAkey.verify(hash, signature) # This sig will check out
| jingriver/stocktracker | pytoolkit/cryptography/testCipher.py | Python | mit | 3,810 |
from test import test_support
from test.test_support import bigmemtest, _1G, _2G, _4G, precisionbigmemtest
import unittest
import operator
import string
import sys
# Bigmem testing houserules:
#
# - Try not to allocate too many large objects. It's okay to rely on
# refcounting semantics, but don't forget that 's = create_largestring()'
# doesn't release the old 's' (if it exists) until well after its new
# value has been created. Use 'del s' before the create_largestring call.
#
# - Do *not* compare large objects using assertEquals or similar. It's a
# lengty operation and the errormessage will be utterly useless due to
# its size. To make sure whether a result has the right contents, better
# to use the strip or count methods, or compare meaningful slices.
#
# - Don't forget to test for large indices, offsets and results and such,
# in addition to large sizes.
#
# - When repeating an object (say, a substring, or a small list) to create
# a large object, make the subobject of a length that is not a power of
# 2. That way, int-wrapping problems are more easily detected.
#
# - While the bigmemtest decorator speaks of 'minsize', all tests will
# actually be called with a much smaller number too, in the normal
# test run (5Kb currently.) This is so the tests themselves get frequent
# testing. Consequently, always make all large allocations based on the
# passed-in 'size', and don't rely on the size being very large. Also,
# memuse-per-size should remain sane (less than a few thousand); if your
# test uses more, adjust 'size' upward, instead.
class StrTest(unittest.TestCase):
@bigmemtest(minsize=_2G, memuse=2)
def test_capitalize(self, size):
SUBSTR = ' abc def ghi'
s = '-' * size + SUBSTR
caps = s.capitalize()
self.assertEquals(caps[-len(SUBSTR):],
SUBSTR.capitalize())
self.assertEquals(caps.lstrip('-'), SUBSTR)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_center(self, size):
SUBSTR = ' abc def ghi'
s = SUBSTR.center(size)
self.assertEquals(len(s), size)
lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2
if len(s) % 2:
lpadsize += 1
self.assertEquals(s[lpadsize:-rpadsize], SUBSTR)
self.assertEquals(s.strip(), SUBSTR.strip())
@precisionbigmemtest(size=_2G - 1, memuse=1)
def test_center_unicode(self, size):
SUBSTR = u' abc def ghi'
try:
s = SUBSTR.center(size)
except OverflowError:
pass # acceptable on 32-bit
else:
self.assertEquals(len(s), size)
lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2
if len(s) % 2:
lpadsize += 1
self.assertEquals(s[lpadsize:-rpadsize], SUBSTR)
self.assertEquals(s.strip(), SUBSTR.strip())
del s
@bigmemtest(minsize=_2G, memuse=2)
def test_count(self, size):
SUBSTR = ' abc def ghi'
s = '.' * size + SUBSTR
self.assertEquals(s.count('.'), size)
s += '.'
self.assertEquals(s.count('.'), size + 1)
self.assertEquals(s.count(' '), 3)
self.assertEquals(s.count('i'), 1)
self.assertEquals(s.count('j'), 0)
@bigmemtest(minsize=_2G + 2, memuse=3)
def test_decode(self, size):
s = '.' * size
self.assertEquals(len(s.decode('utf-8')), size)
def basic_encode_test(self, size, enc, c=u'.', expectedsize=None):
if expectedsize is None:
expectedsize = size
s = c * size
self.assertEquals(len(s.encode(enc)), expectedsize)
@bigmemtest(minsize=_2G + 2, memuse=3)
def test_encode(self, size):
return self.basic_encode_test(size, 'utf-8')
@precisionbigmemtest(size=_4G // 6 + 2, memuse=2)
def test_encode_raw_unicode_escape(self, size):
try:
return self.basic_encode_test(size, 'raw_unicode_escape')
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_4G // 5 + 70, memuse=3)
def test_encode_utf7(self, size):
try:
return self.basic_encode_test(size, 'utf7')
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_4G // 4 + 5, memuse=6)
def test_encode_utf32(self, size):
try:
return self.basic_encode_test(size, 'utf32', expectedsize=4*size+4)
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_2G-1, memuse=2)
def test_decodeascii(self, size):
return self.basic_encode_test(size, 'ascii', c='A')
@precisionbigmemtest(size=_4G // 5, memuse=6+2)
def test_unicode_repr_oflw(self, size):
try:
s = u"\uAAAA"*size
r = repr(s)
except MemoryError:
pass # acceptable on 32-bit
else:
self.failUnless(s == eval(r))
@bigmemtest(minsize=_2G, memuse=2)
def test_endswith(self, size):
SUBSTR = ' abc def ghi'
s = '-' * size + SUBSTR
self.failUnless(s.endswith(SUBSTR))
self.failUnless(s.endswith(s))
s2 = '...' + s
self.failUnless(s2.endswith(s))
self.failIf(s.endswith('a' + SUBSTR))
self.failIf(SUBSTR.endswith(s))
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_expandtabs(self, size):
s = '-' * size
tabsize = 8
self.assertEquals(s.expandtabs(), s)
del s
slen, remainder = divmod(size, tabsize)
s = ' \t' * slen
s = s.expandtabs(tabsize)
self.assertEquals(len(s), size - remainder)
self.assertEquals(len(s.strip(' ')), 0)
@bigmemtest(minsize=_2G, memuse=2)
def test_find(self, size):
SUBSTR = ' abc def ghi'
sublen = len(SUBSTR)
s = ''.join([SUBSTR, '-' * size, SUBSTR])
self.assertEquals(s.find(' '), 0)
self.assertEquals(s.find(SUBSTR), 0)
self.assertEquals(s.find(' ', sublen), sublen + size)
self.assertEquals(s.find(SUBSTR, len(SUBSTR)), sublen + size)
self.assertEquals(s.find('i'), SUBSTR.find('i'))
self.assertEquals(s.find('i', sublen),
sublen + size + SUBSTR.find('i'))
self.assertEquals(s.find('i', size),
sublen + size + SUBSTR.find('i'))
self.assertEquals(s.find('j'), -1)
@bigmemtest(minsize=_2G, memuse=2)
def test_index(self, size):
SUBSTR = ' abc def ghi'
sublen = len(SUBSTR)
s = ''.join([SUBSTR, '-' * size, SUBSTR])
self.assertEquals(s.index(' '), 0)
self.assertEquals(s.index(SUBSTR), 0)
self.assertEquals(s.index(' ', sublen), sublen + size)
self.assertEquals(s.index(SUBSTR, sublen), sublen + size)
self.assertEquals(s.index('i'), SUBSTR.index('i'))
self.assertEquals(s.index('i', sublen),
sublen + size + SUBSTR.index('i'))
self.assertEquals(s.index('i', size),
sublen + size + SUBSTR.index('i'))
self.assertRaises(ValueError, s.index, 'j')
@bigmemtest(minsize=_2G, memuse=2)
def test_isalnum(self, size):
SUBSTR = '123456'
s = 'a' * size + SUBSTR
self.failUnless(s.isalnum())
s += '.'
self.failIf(s.isalnum())
@bigmemtest(minsize=_2G, memuse=2)
def test_isalpha(self, size):
SUBSTR = 'zzzzzzz'
s = 'a' * size + SUBSTR
self.failUnless(s.isalpha())
s += '.'
self.failIf(s.isalpha())
@bigmemtest(minsize=_2G, memuse=2)
def test_isdigit(self, size):
SUBSTR = '123456'
s = '9' * size + SUBSTR
self.failUnless(s.isdigit())
s += 'z'
self.failIf(s.isdigit())
@bigmemtest(minsize=_2G, memuse=2)
def test_islower(self, size):
chars = ''.join([ chr(c) for c in range(255) if not chr(c).isupper() ])
repeats = size // len(chars) + 2
s = chars * repeats
self.failUnless(s.islower())
s += 'A'
self.failIf(s.islower())
@bigmemtest(minsize=_2G, memuse=2)
def test_isspace(self, size):
whitespace = ' \f\n\r\t\v'
repeats = size // len(whitespace) + 2
s = whitespace * repeats
self.failUnless(s.isspace())
s += 'j'
self.failIf(s.isspace())
@bigmemtest(minsize=_2G, memuse=2)
def test_istitle(self, size):
SUBSTR = '123456'
s = ''.join(['A', 'a' * size, SUBSTR])
self.failUnless(s.istitle())
s += 'A'
self.failUnless(s.istitle())
s += 'aA'
self.failIf(s.istitle())
@bigmemtest(minsize=_2G, memuse=2)
def test_isupper(self, size):
chars = ''.join([ chr(c) for c in range(255) if not chr(c).islower() ])
repeats = size // len(chars) + 2
s = chars * repeats
self.failUnless(s.isupper())
s += 'a'
self.failIf(s.isupper())
@bigmemtest(minsize=_2G, memuse=2)
def test_join(self, size):
s = 'A' * size
x = s.join(['aaaaa', 'bbbbb'])
self.assertEquals(x.count('a'), 5)
self.assertEquals(x.count('b'), 5)
self.failUnless(x.startswith('aaaaaA'))
self.failUnless(x.endswith('Abbbbb'))
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_ljust(self, size):
SUBSTR = ' abc def ghi'
s = SUBSTR.ljust(size)
self.failUnless(s.startswith(SUBSTR + ' '))
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_lower(self, size):
s = 'A' * size
s = s.lower()
self.assertEquals(len(s), size)
self.assertEquals(s.count('a'), size)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_lstrip(self, size):
SUBSTR = 'abc def ghi'
s = SUBSTR.rjust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.lstrip(), SUBSTR.lstrip())
del s
s = SUBSTR.ljust(size)
self.assertEquals(len(s), size)
stripped = s.lstrip()
self.failUnless(stripped is s)
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_replace(self, size):
replacement = 'a'
s = ' ' * size
s = s.replace(' ', replacement)
self.assertEquals(len(s), size)
self.assertEquals(s.count(replacement), size)
s = s.replace(replacement, ' ', size - 4)
self.assertEquals(len(s), size)
self.assertEquals(s.count(replacement), 4)
self.assertEquals(s[-10:], ' aaaa')
@bigmemtest(minsize=_2G, memuse=2)
def test_rfind(self, size):
SUBSTR = ' abc def ghi'
sublen = len(SUBSTR)
s = ''.join([SUBSTR, '-' * size, SUBSTR])
self.assertEquals(s.rfind(' '), sublen + size + SUBSTR.rfind(' '))
self.assertEquals(s.rfind(SUBSTR), sublen + size)
self.assertEquals(s.rfind(' ', 0, size), SUBSTR.rfind(' '))
self.assertEquals(s.rfind(SUBSTR, 0, sublen + size), 0)
self.assertEquals(s.rfind('i'), sublen + size + SUBSTR.rfind('i'))
self.assertEquals(s.rfind('i', 0, sublen), SUBSTR.rfind('i'))
self.assertEquals(s.rfind('i', 0, sublen + size),
SUBSTR.rfind('i'))
self.assertEquals(s.rfind('j'), -1)
@bigmemtest(minsize=_2G, memuse=2)
def test_rindex(self, size):
SUBSTR = ' abc def ghi'
sublen = len(SUBSTR)
s = ''.join([SUBSTR, '-' * size, SUBSTR])
self.assertEquals(s.rindex(' '),
sublen + size + SUBSTR.rindex(' '))
self.assertEquals(s.rindex(SUBSTR), sublen + size)
self.assertEquals(s.rindex(' ', 0, sublen + size - 1),
SUBSTR.rindex(' '))
self.assertEquals(s.rindex(SUBSTR, 0, sublen + size), 0)
self.assertEquals(s.rindex('i'),
sublen + size + SUBSTR.rindex('i'))
self.assertEquals(s.rindex('i', 0, sublen), SUBSTR.rindex('i'))
self.assertEquals(s.rindex('i', 0, sublen + size),
SUBSTR.rindex('i'))
self.assertRaises(ValueError, s.rindex, 'j')
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_rjust(self, size):
SUBSTR = ' abc def ghi'
s = SUBSTR.ljust(size)
self.failUnless(s.startswith(SUBSTR + ' '))
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_rstrip(self, size):
SUBSTR = ' abc def ghi'
s = SUBSTR.ljust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.rstrip(), SUBSTR.rstrip())
del s
s = SUBSTR.rjust(size)
self.assertEquals(len(s), size)
stripped = s.rstrip()
self.failUnless(stripped is s)
# The test takes about size bytes to build a string, and then about
# sqrt(size) substrings of sqrt(size) in size and a list to
# hold sqrt(size) items. It's close but just over 2x size.
@bigmemtest(minsize=_2G, memuse=2.1)
def test_split_small(self, size):
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2)
SUBSTR = 'a' + ' ' * chunksize
s = SUBSTR * chunksize
l = s.split()
self.assertEquals(len(l), chunksize)
self.assertEquals(set(l), set(['a']))
del l
l = s.split('a')
self.assertEquals(len(l), chunksize + 1)
self.assertEquals(set(l), set(['', ' ' * chunksize]))
# Allocates a string of twice size (and briefly two) and a list of
# size. Because of internal affairs, the s.split() call produces a
# list of size times the same one-character string, so we only
# suffer for the list size. (Otherwise, it'd cost another 48 times
# size in bytes!) Nevertheless, a list of size takes
# 8*size bytes.
@bigmemtest(minsize=_2G + 5, memuse=10)
def test_split_large(self, size):
s = ' a' * size + ' '
l = s.split()
self.assertEquals(len(l), size)
self.assertEquals(set(l), set(['a']))
del l
l = s.split('a')
self.assertEquals(len(l), size + 1)
self.assertEquals(set(l), set([' ']))
@bigmemtest(minsize=_2G, memuse=2.1)
def test_splitlines(self, size):
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2) // 2
SUBSTR = ' ' * chunksize + '\n' + ' ' * chunksize + '\r\n'
s = SUBSTR * chunksize
l = s.splitlines()
self.assertEquals(len(l), chunksize * 2)
self.assertEquals(set(l), set([' ' * chunksize]))
@bigmemtest(minsize=_2G, memuse=2)
def test_startswith(self, size):
SUBSTR = ' abc def ghi'
s = '-' * size + SUBSTR
self.failUnless(s.startswith(s))
self.failUnless(s.startswith('-' * size))
self.failIf(s.startswith(SUBSTR))
@bigmemtest(minsize=_2G, memuse=1)
def test_strip(self, size):
SUBSTR = ' abc def ghi '
s = SUBSTR.rjust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
del s
s = SUBSTR.ljust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G, memuse=2)
def test_swapcase(self, size):
SUBSTR = "aBcDeFG12.'\xa9\x00"
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.swapcase()
self.assertEquals(len(s), sublen * repeats)
self.assertEquals(s[:sublen * 3], SUBSTR.swapcase() * 3)
self.assertEquals(s[-sublen * 3:], SUBSTR.swapcase() * 3)
@bigmemtest(minsize=_2G, memuse=2)
def test_title(self, size):
SUBSTR = 'SpaaHAaaAaham'
s = SUBSTR * (size // len(SUBSTR) + 2)
s = s.title()
self.failUnless(s.startswith((SUBSTR * 3).title()))
self.failUnless(s.endswith(SUBSTR.lower() * 3))
@bigmemtest(minsize=_2G, memuse=2)
def test_translate(self, size):
trans = string.maketrans('.aZ', '-!$')
SUBSTR = 'aZz.z.Aaz.'
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.translate(trans)
self.assertEquals(len(s), repeats * sublen)
self.assertEquals(s[:sublen], SUBSTR.translate(trans))
self.assertEquals(s[-sublen:], SUBSTR.translate(trans))
self.assertEquals(s.count('.'), 0)
self.assertEquals(s.count('!'), repeats * 2)
self.assertEquals(s.count('z'), repeats * 3)
@bigmemtest(minsize=_2G + 5, memuse=2)
def test_upper(self, size):
s = 'a' * size
s = s.upper()
self.assertEquals(len(s), size)
self.assertEquals(s.count('A'), size)
@bigmemtest(minsize=_2G + 20, memuse=1)
def test_zfill(self, size):
SUBSTR = '-568324723598234'
s = SUBSTR.zfill(size)
self.failUnless(s.endswith('0' + SUBSTR[1:]))
self.failUnless(s.startswith('-0'))
self.assertEquals(len(s), size)
self.assertEquals(s.count('0'), size - len(SUBSTR))
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_format(self, size):
s = '-' * size
sf = '%s' % (s,)
self.failUnless(s == sf)
del sf
sf = '..%s..' % (s,)
self.assertEquals(len(sf), len(s) + 4)
self.failUnless(sf.startswith('..-'))
self.failUnless(sf.endswith('-..'))
del s, sf
size //= 2
edge = '-' * size
s = ''.join([edge, '%s', edge])
del edge
s = s % '...'
self.assertEquals(len(s), size * 2 + 3)
self.assertEquals(s.count('.'), 3)
self.assertEquals(s.count('-'), size * 2)
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_repr_small(self, size):
s = '-' * size
s = repr(s)
self.assertEquals(len(s), size + 2)
self.assertEquals(s[0], "'")
self.assertEquals(s[-1], "'")
self.assertEquals(s.count('-'), size)
del s
# repr() will create a string four times as large as this 'binary
# string', but we don't want to allocate much more than twice
# size in total. (We do extra testing in test_repr_large())
size = size // 5 * 2
s = '\x00' * size
s = repr(s)
self.assertEquals(len(s), size * 4 + 2)
self.assertEquals(s[0], "'")
self.assertEquals(s[-1], "'")
self.assertEquals(s.count('\\'), size)
self.assertEquals(s.count('0'), size * 2)
@bigmemtest(minsize=_2G + 10, memuse=5)
def test_repr_large(self, size):
s = '\x00' * size
s = repr(s)
self.assertEquals(len(s), size * 4 + 2)
self.assertEquals(s[0], "'")
self.assertEquals(s[-1], "'")
self.assertEquals(s.count('\\'), size)
self.assertEquals(s.count('0'), size * 2)
@bigmemtest(minsize=2**32 // 5, memuse=6+2)
def test_unicode_repr(self, size):
s = u"\uAAAA" * size
self.failUnless(len(repr(s)) > size)
# This test is meaningful even with size < 2G, as long as the
# doubled string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(minsize=_1G + 2, memuse=3)
def test_concat(self, size):
s = '.' * size
self.assertEquals(len(s), size)
s = s + s
self.assertEquals(len(s), size * 2)
self.assertEquals(s.count('.'), size * 2)
# This test is meaningful even with size < 2G, as long as the
# repeated string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(minsize=_1G + 2, memuse=3)
def test_repeat(self, size):
s = '.' * size
self.assertEquals(len(s), size)
s = s * 2
self.assertEquals(len(s), size * 2)
self.assertEquals(s.count('.'), size * 2)
@bigmemtest(minsize=_2G + 20, memuse=1)
def test_slice_and_getitem(self, size):
SUBSTR = '0123456789'
sublen = len(SUBSTR)
s = SUBSTR * (size // sublen)
stepsize = len(s) // 100
stepsize = stepsize - (stepsize % sublen)
for i in range(0, len(s) - stepsize, stepsize):
self.assertEquals(s[i], SUBSTR[0])
self.assertEquals(s[i:i + sublen], SUBSTR)
self.assertEquals(s[i:i + sublen:2], SUBSTR[::2])
if i > 0:
self.assertEquals(s[i + sublen - 1:i - 1:-3],
SUBSTR[sublen::-3])
# Make sure we do some slicing and indexing near the end of the
# string, too.
self.assertEquals(s[len(s) - 1], SUBSTR[-1])
self.assertEquals(s[-1], SUBSTR[-1])
self.assertEquals(s[len(s) - 10], SUBSTR[0])
self.assertEquals(s[-sublen], SUBSTR[0])
self.assertEquals(s[len(s):], '')
self.assertEquals(s[len(s) - 1:], SUBSTR[-1])
self.assertEquals(s[-1:], SUBSTR[-1])
self.assertEquals(s[len(s) - sublen:], SUBSTR)
self.assertEquals(s[-sublen:], SUBSTR)
self.assertEquals(len(s[:]), len(s))
self.assertEquals(len(s[:len(s) - 5]), len(s) - 5)
self.assertEquals(len(s[5:-5]), len(s) - 10)
self.assertRaises(IndexError, operator.getitem, s, len(s))
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1)
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1<<31)
@bigmemtest(minsize=_2G, memuse=2)
def test_contains(self, size):
SUBSTR = '0123456789'
edge = '-' * (size // 2)
s = ''.join([edge, SUBSTR, edge])
del edge
self.failUnless(SUBSTR in s)
self.failIf(SUBSTR * 2 in s)
self.failUnless('-' in s)
self.failIf('a' in s)
s += 'a'
self.failUnless('a' in s)
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_compare(self, size):
s1 = '-' * size
s2 = '-' * size
self.failUnless(s1 == s2)
del s2
s2 = s1 + 'a'
self.failIf(s1 == s2)
del s2
s2 = '.' * size
self.failIf(s1 == s2)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_hash(self, size):
# Not sure if we can do any meaningful tests here... Even if we
# start relying on the exact algorithm used, the result will be
# different depending on the size of the C 'long int'. Even this
# test is dodgy (there's no *guarantee* that the two things should
# have a different hash, even if they, in the current
# implementation, almost always do.)
s = '\x00' * size
h1 = hash(s)
del s
s = '\x00' * (size + 1)
self.failIf(h1 == hash(s))
class TupleTest(unittest.TestCase):
# Tuples have a small, fixed-sized head and an array of pointers to
# data. Since we're testing 64-bit addressing, we can assume that the
# pointers are 8 bytes, and that thus that the tuples take up 8 bytes
# per size.
# As a side-effect of testing long tuples, these tests happen to test
# having more than 2<<31 references to any given object. Hence the
# use of different types of objects as contents in different tests.
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_compare(self, size):
t1 = (u'',) * size
t2 = (u'',) * size
self.failUnless(t1 == t2)
del t2
t2 = (u'',) * (size + 1)
self.failIf(t1 == t2)
del t2
t2 = (1,) * size
self.failIf(t1 == t2)
# Test concatenating into a single tuple of more than 2G in length,
# and concatenating a tuple of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_concat_test(self, size):
t = ((),) * size
self.assertEquals(len(t), size)
t = t + t
self.assertEquals(len(t), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_concat_test(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_concat_test(size)
@bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
t = (1, 2, 3, 4, 5) * size
self.assertEquals(len(t), size * 5)
self.failUnless(5 in t)
self.failIf((1, 2, 3, 4, 5) in t)
self.failIf(0 in t)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_hash(self, size):
t1 = (0,) * size
h1 = hash(t1)
del t1
t2 = (0,) * (size + 1)
self.failIf(h1 == hash(t2))
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
t = (None,) * size
self.assertEquals(len(t), size)
self.assertEquals(t[-1], None)
self.assertEquals(t[5], None)
self.assertEquals(t[size - 1], None)
self.assertRaises(IndexError, operator.getitem, t, size)
self.assertEquals(t[:5], (None,) * 5)
self.assertEquals(t[-5:], (None,) * 5)
self.assertEquals(t[20:25], (None,) * 5)
self.assertEquals(t[-25:-20], (None,) * 5)
self.assertEquals(t[size - 5:], (None,) * 5)
self.assertEquals(t[size - 5:size], (None,) * 5)
self.assertEquals(t[size - 6:size - 2], (None,) * 4)
self.assertEquals(t[size:size], ())
self.assertEquals(t[size:size+5], ())
# Like test_concat, split in two.
def basic_test_repeat(self, size):
t = ('',) * size
self.assertEquals(len(t), size)
t = t * 2
self.assertEquals(len(t), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_1G - 1, memuse=12)
def test_repeat_large_2(self, size):
return self.basic_test_repeat(size)
@precisionbigmemtest(size=_1G - 1, memuse=9)
def test_from_2G_generator(self, size):
try:
t = tuple(xrange(size))
except MemoryError:
pass # acceptable on 32-bit
else:
count = 0
for item in t:
self.assertEquals(item, count)
count += 1
self.assertEquals(count, size)
@precisionbigmemtest(size=_1G - 25, memuse=9)
def test_from_almost_2G_generator(self, size):
try:
t = tuple(xrange(size))
count = 0
for item in t:
self.assertEquals(item, count)
count += 1
self.assertEquals(count, size)
except MemoryError:
pass # acceptable, expected on 32-bit
# Like test_concat, split in two.
def basic_test_repr(self, size):
t = (0,) * size
s = repr(t)
# The repr of a tuple of 0's is exactly three times the tuple length.
self.assertEquals(len(s), size * 3)
self.assertEquals(s[:5], '(0, 0')
self.assertEquals(s[-5:], '0, 0)')
self.assertEquals(s.count('0'), size)
@bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(minsize=_2G + 2, memuse=8 + 3)
def test_repr_large(self, size):
return self.basic_test_repr(size)
class ListTest(unittest.TestCase):
# Like tuples, lists have a small, fixed-sized head and an array of
# pointers to data, so 8 bytes per size. Also like tuples, we make the
# lists hold references to various objects to test their refcount
# limits.
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_compare(self, size):
l1 = [u''] * size
l2 = [u''] * size
self.failUnless(l1 == l2)
del l2
l2 = [u''] * (size + 1)
self.failIf(l1 == l2)
del l2
l2 = [2] * size
self.failIf(l1 == l2)
# Test concatenating into a single list of more than 2G in length,
# and concatenating a list of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_test_concat(self, size):
l = [[]] * size
self.assertEquals(len(l), size)
l = l + l
self.assertEquals(len(l), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_test_concat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_test_concat(size)
def basic_test_inplace_concat(self, size):
l = [sys.stdout] * size
l += l
self.assertEquals(len(l), size * 2)
self.failUnless(l[0] is l[-1])
self.failUnless(l[size - 1] is l[size + 1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_inplace_concat_small(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_inplace_concat_large(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEquals(len(l), size * 5)
self.failUnless(5 in l)
self.failIf([1, 2, 3, 4, 5] in l)
self.failIf(0 in l)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_hash(self, size):
l = [0] * size
self.failUnlessRaises(TypeError, hash, l)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
l = [None] * size
self.assertEquals(len(l), size)
self.assertEquals(l[-1], None)
self.assertEquals(l[5], None)
self.assertEquals(l[size - 1], None)
self.assertRaises(IndexError, operator.getitem, l, size)
self.assertEquals(l[:5], [None] * 5)
self.assertEquals(l[-5:], [None] * 5)
self.assertEquals(l[20:25], [None] * 5)
self.assertEquals(l[-25:-20], [None] * 5)
self.assertEquals(l[size - 5:], [None] * 5)
self.assertEquals(l[size - 5:size], [None] * 5)
self.assertEquals(l[size - 6:size - 2], [None] * 4)
self.assertEquals(l[size:size], [])
self.assertEquals(l[size:size+5], [])
l[size - 2] = 5
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], [None, 5, None])
self.assertEquals(l.count(5), 1)
self.assertRaises(IndexError, operator.setitem, l, size, 6)
self.assertEquals(len(l), size)
l[size - 7:] = [1, 2, 3, 4, 5]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[-7:], [None, None, 1, 2, 3, 4, 5])
l[:7] = [1, 2, 3, 4, 5]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[:7], [1, 2, 3, 4, 5, None, None])
del l[size - 1]
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[-1], 4)
del l[-2:]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[-1], 2)
del l[0]
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[0], 2)
del l[:2]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[0], 4)
# Like test_concat, split in two.
def basic_test_repeat(self, size):
l = [] * size
self.failIf(l)
l = [''] * size
self.assertEquals(len(l), size)
l = l * 2
self.assertEquals(len(l), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
def basic_test_inplace_repeat(self, size):
l = ['']
l *= size
self.assertEquals(len(l), size)
self.failUnless(l[0] is l[-1])
del l
l = [''] * size
l *= 2
self.assertEquals(len(l), size * 2)
self.failUnless(l[size - 1] is l[-1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=16)
def test_inplace_repeat_small(self, size):
return self.basic_test_inplace_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_inplace_repeat_large(self, size):
return self.basic_test_inplace_repeat(size)
def basic_test_repr(self, size):
l = [0] * size
s = repr(l)
# The repr of a list of 0's is exactly three times the list length.
self.assertEquals(len(s), size * 3)
self.assertEquals(s[:5], '[0, 0')
self.assertEquals(s[-5:], '0, 0]')
self.assertEquals(s.count('0'), size)
@bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(minsize=_2G + 2, memuse=8 + 3)
def test_repr_large(self, size):
return self.basic_test_repr(size)
# list overallocates ~1/8th of the total size (on first expansion) so
# the single list.append call puts memuse at 9 bytes per size.
@bigmemtest(minsize=_2G, memuse=9)
def test_append(self, size):
l = [object()] * size
l.append(object())
self.assertEquals(len(l), size+1)
self.failUnless(l[-3] is l[-2])
self.failIf(l[-2] is l[-1])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_count(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEquals(l.count(1), size)
self.assertEquals(l.count("1"), 0)
def basic_test_extend(self, size):
l = [file] * size
l.extend(l)
self.assertEquals(len(l), size * 2)
self.failUnless(l[0] is l[-1])
self.failUnless(l[size - 1] is l[size + 1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=16)
def test_extend_small(self, size):
return self.basic_test_extend(size)
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_extend_large(self, size):
return self.basic_test_extend(size)
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_index(self, size):
l = [1L, 2L, 3L, 4L, 5L] * size
size *= 5
self.assertEquals(l.index(1), 0)
self.assertEquals(l.index(5, size - 5), size - 1)
self.assertEquals(l.index(5, size - 5, size), size - 1)
self.assertRaises(ValueError, l.index, 1, size - 4, size)
self.assertRaises(ValueError, l.index, 6L)
# This tests suffers from overallocation, just like test_append.
@bigmemtest(minsize=_2G + 10, memuse=9)
def test_insert(self, size):
l = [1.0] * size
l.insert(size - 1, "A")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], [1.0, "A", 1.0])
l.insert(size + 1, "B")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], ["A", 1.0, "B"])
l.insert(1, "C")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[:3], [1.0, "C", 1.0])
self.assertEquals(l[size - 3:], ["A", 1.0, "B"])
@bigmemtest(minsize=_2G // 5 + 4, memuse=8 * 5)
def test_pop(self, size):
l = [u"a", u"b", u"c", u"d", u"e"] * size
size *= 5
self.assertEquals(len(l), size)
item = l.pop()
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, u"e")
self.assertEquals(l[-2:], [u"c", u"d"])
item = l.pop(0)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, u"a")
self.assertEquals(l[:2], [u"b", u"c"])
item = l.pop(size - 2)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, u"c")
self.assertEquals(l[-2:], [u"b", u"d"])
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_remove(self, size):
l = [10] * size
self.assertEquals(len(l), size)
l.remove(10)
size -= 1
self.assertEquals(len(l), size)
# Because of the earlier l.remove(), this append doesn't trigger
# a resize.
l.append(5)
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-2:], [10, 5])
l.remove(5)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[-2:], [10, 10])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_reverse(self, size):
l = [1, 2, 3, 4, 5] * size
l.reverse()
self.assertEquals(len(l), size * 5)
self.assertEquals(l[-5:], [5, 4, 3, 2, 1])
self.assertEquals(l[:5], [5, 4, 3, 2, 1])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_sort(self, size):
l = [1, 2, 3, 4, 5] * size
l.sort()
self.assertEquals(len(l), size * 5)
self.assertEquals(l.count(1), size)
self.assertEquals(l[:10], [1] * 10)
self.assertEquals(l[-10:], [5] * 10)
class BufferTest(unittest.TestCase):
@precisionbigmemtest(size=_1G, memuse=4)
def test_repeat(self, size):
try:
with test_support._check_py3k_warnings():
b = buffer("AAAA")*size
except MemoryError:
pass # acceptable on 32-bit
else:
count = 0
for c in b:
self.assertEquals(c, 'A')
count += 1
self.assertEquals(count, size*4)
def test_main():
test_support.run_unittest(StrTest, TupleTest, ListTest, BufferTest)
if __name__ == '__main__':
if len(sys.argv) > 1:
test_support.set_memlimit(sys.argv[1])
test_main()
| babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/test/test_bigmem.py | Python | mit | 39,354 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-19 20:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20161017_1814'),
]
operations = [
migrations.AddField(
model_name='printjob',
name='file_uploaded',
field=models.FileField(default='default.png', upload_to=''),
preserve_default=False,
),
]
| aabmass/print-web | print_web_django/api/migrations/0004_printjob_file_uploaded.py | Python | mit | 512 |
import numpy as np
from cs231n.classifiers.linear_svm import *
from cs231n.classifiers.softmax import *
class LinearClassifier(object):
def __init__(self):
self.W = None
def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,
batch_size=200, verbose=False):
"""
Train this linear classifier using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
- y: A numpy array of shape (N,) containing training labels; y[i] = c
means that X[i] has label 0 <= c < C for C classes.
- learning_rate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- num_iters: (integer) number of steps to take when optimizing
- batch_size: (integer) number of training examples to use at each step.
- verbose: (boolean) If true, print progress during optimization.
Outputs:
A list containing the value of the loss function at each training iteration.
"""
num_train, dim = X.shape
num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes
if self.W is None:
# lazily initialize W
self.W = 0.001 * np.random.randn(dim, num_classes)
# Run stochastic gradient descent to optimize W
loss_history = []
for it in xrange(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: #
# Sample batch_size elements from the training data and their #
# corresponding labels to use in this round of gradient descent. #
# Store the data in X_batch and their corresponding labels in #
# y_batch; after sampling X_batch should have shape (dim, batch_size)??? #
# and y_batch should have shape (batch_size,) #
# #
# Hint: Use np.random.choice to generate indices. Sampling with #
# replacement is faster than sampling without replacement. #
#########################################################################
pass
indices = np.random.choice(num_train, batch_size, replace=False)
X_batch = X[indices]
y_batch = y[indices]
#########################################################################
# END OF YOUR CODE #
#########################################################################
# evaluate loss and gradient
loss, grad = self.loss(X_batch, y_batch, reg)
loss_history.append(loss)
# perform parameter update
#########################################################################
# TODO: #
# Update the weights using the gradient and the learning rate. #
#########################################################################
self.W += - learning_rate * grad
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print 'iteration %d / %d: loss %f' % (it, num_iters, loss)
return loss_history
def predict(self, X):
"""
Use the trained weights of this linear classifier to predict labels for
data points.
Inputs:
- X: D x N array of training data. Each column is a D-dimensional point.
Returns:
- y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
"""
y_pred = np.zeros(X.shape[1])
###########################################################################
# TODO: #
# Implement this method. Store the predicted labels in y_pred. #
###########################################################################
pass
scores = X.dot(self.W)
y_pred = scores.argmax(axis=1)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
def loss(self, X_batch, y_batch, reg):
"""
Compute the loss function and its derivative.
Subclasses will override this.
Inputs:
- X_batch: A numpy array of shape (N, D) containing a minibatch of N
data points; each point has dimension D.
- y_batch: A numpy array of shape (N,) containing labels for the minibatch.
- reg: (float) regularization strength.
Returns: A tuple containing:
- loss as a single float
- gradient with respect to self.W; an array of the same shape as W
"""
pass
class LinearSVM(LinearClassifier):
""" A subclass that uses the Multiclass SVM loss function """
def loss(self, X_batch, y_batch, reg):
return svm_loss_vectorized(self.W, X_batch, y_batch, reg)
class Softmax(LinearClassifier):
""" A subclass that uses the Softmax + Cross-entropy loss function """
def loss(self, X_batch, y_batch, reg):
return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)
| hanezu/cs231n-assignment | assignment1/cs231n/classifiers/linear_classifier.py | Python | mit | 5,634 |
import pandas as pd
from typing import List
from datetime import datetime
from .clean_list import clean_dates
class CleanDates():
def clean_dates(self, inputs: pd.DataFrame) -> pd.DataFrame:
return inputs.apply(clean_dates)
| NewKnowledge/punk | punk/preppy/cleanDates.py | Python | mit | 238 |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import threading
import time
import xmlrpclib
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_dgb import bitcoin, util
from electrum_dgb import transaction
from electrum_dgb.plugins import BasePlugin, hook
from electrum_dgb.i18n import _
from electrum_dgb.wallet import Multisig_Wallet
from electrum_dgb_gui.qt.transaction_dialog import show_transaction
import sys
import traceback
#PORT = 12344
#HOST = ''
server = xmlrpclib.ServerProxy('http://%s:%d'%(HOST,PORT), allow_none=True)
class Listener(util.DaemonThread):
def __init__(self, parent):
util.DaemonThread.__init__(self)
self.daemon = True
self.parent = parent
self.received = set()
self.keyhashes = []
def set_keyhashes(self, keyhashes):
self.keyhashes = keyhashes
def clear(self, keyhash):
server.delete(keyhash)
self.received.remove(keyhash)
def run(self):
while self.running:
if not self.keyhashes:
time.sleep(2)
continue
for keyhash in self.keyhashes:
if keyhash in self.received:
continue
try:
message = server.get(keyhash)
except Exception as e:
self.print_error("cannot contact cosigner pool")
time.sleep(30)
continue
if message:
self.received.add(keyhash)
self.print_error("received message for", keyhash)
self.parent.obj.emit(SIGNAL("cosigner:receive"), keyhash,
message)
# poll every 30 seconds
time.sleep(30)
class Plugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.listener = None
self.obj = QObject()
self.obj.connect(self.obj, SIGNAL('cosigner:receive'), self.on_receive)
self.keys = []
self.cosigner_list = []
@hook
def init_qt(self, gui):
for window in gui.windows:
self.on_new_window(window)
@hook
def on_new_window(self, window):
self.update(window)
@hook
def on_close_window(self, window):
self.update(window)
def is_available(self):
return True
def update(self, window):
wallet = window.wallet
if type(wallet) != Multisig_Wallet:
return
if self.listener is None:
self.print_error("starting listener")
self.listener = Listener(self)
self.listener.start()
elif self.listener:
self.print_error("shutting down listener")
self.listener.stop()
self.listener = None
self.keys = []
self.cosigner_list = []
for key, keystore in wallet.keystores.items():
xpub = keystore.get_master_public_key()
K = bitcoin.deserialize_xkey(xpub)[-1].encode('hex')
_hash = bitcoin.Hash(K).encode('hex')
if not keystore.is_watching_only():
self.keys.append((key, _hash, window))
else:
self.cosigner_list.append((window, xpub, K, _hash))
if self.listener:
self.listener.set_keyhashes([t[1] for t in self.keys])
@hook
def transaction_dialog(self, d):
d.cosigner_send_button = b = QPushButton(_("Send to cosigner"))
b.clicked.connect(lambda: self.do_send(d.tx))
d.buttons.insert(0, b)
self.transaction_dialog_update(d)
@hook
def transaction_dialog_update(self, d):
if d.tx.is_complete() or d.wallet.can_sign(d.tx):
d.cosigner_send_button.hide()
return
for window, xpub, K, _hash in self.cosigner_list:
if window.wallet == d.wallet and self.cosigner_can_sign(d.tx, xpub):
d.cosigner_send_button.show()
break
else:
d.cosigner_send_button.hide()
def cosigner_can_sign(self, tx, cosigner_xpub):
from electrum_dgb.keystore import is_xpubkey, parse_xpubkey
xpub_set = set([])
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
xpub_set.add(xpub)
return cosigner_xpub in xpub_set
def do_send(self, tx):
for window, xpub, K, _hash in self.cosigner_list:
if not self.cosigner_can_sign(tx, xpub):
continue
message = bitcoin.encrypt_message(tx.raw, K)
try:
server.put(_hash, message)
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_message("Failed to send transaction to cosigning pool.")
return
window.show_message("Your transaction was sent to the cosigning pool.\nOpen your cosigner wallet to retrieve it.")
def on_receive(self, keyhash, message):
self.print_error("signal arrived for", keyhash)
for key, _hash, window in self.keys:
if _hash == keyhash:
break
else:
self.print_error("keyhash not found")
return
wallet = window.wallet
if wallet.has_password():
password = window.password_dialog('An encrypted transaction was retrieved from cosigning pool.\nPlease enter your password to decrypt it.')
if not password:
return
else:
password = None
if not window.question(_("An encrypted transaction was retrieved from cosigning pool.\nDo you want to open it now?")):
return
xprv = wallet.keystore.get_master_private_key(password)
if not xprv:
return
try:
k = bitcoin.deserialize_xkey(xprv)[-1].encode('hex')
EC = bitcoin.EC_KEY(k.decode('hex'))
message = EC.decrypt_message(message)
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_message(str(e))
return
self.listener.clear(keyhash)
tx = transaction.Transaction(message)
show_transaction(tx, window, prompt_if_unsaved=True)
| protonn/Electrum-Cash | plugins/cosigner_pool/qt.py | Python | mit | 7,561 |
from django.db import models
from django.utils import timezone
from pytz import common_timezones
from .validators import YoutubeValidator
class TimezoneField(models.CharField):
"""
A field for selecting a timezone from the common timezones list.
"""
def __init__(self, *args, **kwargs):
common_timezone_names = [tz.replace('_', ' ') for tz in common_timezones]
the_kwargs = {
'choices': zip(common_timezones, common_timezone_names),
'default': timezone.get_default_timezone_name(),
'max_length': 50,
}
the_kwargs.update(kwargs)
super().__init__(*args, **the_kwargs)
class YoutubeField(models.CharField):
"""
Field representing a YouTube video, essentially just a text field
but with automatic validation that given values are valid YouTube URLs
"""
default_validators = [YoutubeValidator()]
| gamernetwork/gn-django | gn_django/fields.py | Python | mit | 909 |
'''
urlresolver XBMC Addon
Copyright (C) 2017
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from urlresolver.plugins.__generic_resolver__ import GenericResolver
class BitPornoResolver(GenericResolver):
#print "print UR BitPorno"
name = 'BitPorno'
domains = ['bitporno.com']
pattern = '(?://|\.)(bitporno\.com)/(?:\?v=|embed/)([a-zA-Z0-9]+)'
def get_url(self, host, media_id):
print "print UR BitPorno self, host, media_id", self,host, media_id
print "print return", self._default_get_url(host, media_id, template='http://{host}/?v={media_id}')
return self._default_get_url(host, media_id, template='http://{host}/?v={media_id}')
return "https://www.bitporno.com/?v=FM11XRJLMP"
@classmethod
def _is_enabled(cls):
return True
| dbiesecke/dbiesecke.github.io | repo/script.module.urlresolver/lib/urlresolver/plugins/bitporno.py | Python | mit | 1,372 |
"""
@name: PyHouse/Project/src/Modules/Computer/Web/web_rootMenu.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2013-2019 by D. Brian Kimmel
@license: MIT License
@note: Created on May 30, 2013
@summary: Handle the Main menu.
"""
__updated__ = '2019-10-31'
# Import system type stuff
from twisted.web._element import renderer, Element
# Import PyMh files and modules.
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.webRootMenu ')
class RootMenuElement(Element):
"""
"""
# docFactory = loaders.xmlfile(os.path.join(templatepath, 'rootMenuElement.html'))
jsClass = u'rootMenu.RootMenuWidget'
def __init__(self, p_workspace_obj):
self.m_pyhouse_obj = p_workspace_obj.m_pyhouse_obj
@renderer
def XXdoRootMenuReload(self, _p_json):
""" Process a message for a XML save/reload from the browser/client.
"""
LOG.info("Self: {}".format(self))
self.m_pyhouse_obj.XXPyHouseMainApi.SaveXml(self.m_pyhouse_obj)
@renderer
def doRootMenuQuit(self, p_json):
""" Process a message for a browser logoff and quit that came from the browser/client.
"""
LOG.info("Self: {}; JSON: {}".format(self, p_json))
# ## END DBK
| DBrianKimmel/PyHouse | Project/src/Modules/Computer/Web/web_rootMenu.py | Python | mit | 1,294 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
JS and CSS minification
============================
Author: Toni Heittola (toni.heittola@gmail.com)
This plugin will create dynamic datatable with charting features from given yaml-datafile.
"""
import os
import sys
import io
import argparse
import textwrap
from IPython import embed
__version_info__ = ('0', '1', '0')
__version__ = '.'.join(__version_info__)
def main(argv):
parser = argparse.ArgumentParser(
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
JS and CSS minification
---------------------------------------------
Author: Toni Heittola ( toni.heittola@gmail.com )
'''))
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
print("JS and CSS minification")
print("-----------------------")
minify_css_directory2(source='css', target='css.min')
minify_js_directory(source='js', target='js.min')
def minify_css_directory(source, target):
"""
Move CSS resources from source directory to target directory and minify. Using csscompressor.
"""
from csscompressor import compress
if os.path.isdir(source):
if not os.path.exists(target):
os.makedirs(target)
for root, dirs, files in os.walk(source):
for current_file in files:
if current_file.endswith(".css"):
current_file_path = os.path.join(root, current_file)
print(" " + current_file_path)
with open(current_file_path) as css_file:
with open(os.path.join(target, current_file.replace('.css', '.min.css')), "w") as minified_file:
minified_file.write(compress(css_file.read()))
def minify_css_directory2(source, target):
"""
Move CSS resources from source directory to target directory and minify. Using rcssmin.
"""
import rcssmin
if os.path.isdir(source):
if not os.path.exists(target):
os.makedirs(target)
for root, dirs, files in os.walk(source):
for current_file in files:
if current_file.endswith(".css"):
current_file_path = os.path.join(root, current_file)
print(" " + current_file_path)
with open(current_file_path) as css_file:
with open(os.path.join(target, current_file.replace('.css', '.min.css')), "w") as minified_file:
minified_file.write(rcssmin.cssmin(css_file.read(), keep_bang_comments=True))
bundle_data = []
for root, dirs, files in os.walk(target):
for current_file in files:
if current_file.endswith(".css") and current_file != 'datatable.bundle.min.css':
current_file_path = os.path.join(root, current_file)
css_file = open(current_file_path, "r")
bundle_data += css_file.readlines()
css_file.close()
bundle_filename = os.path.join(target, 'datatable.bundle.min.css')
bundle_file = open(bundle_filename, 'w+')
bundle_file.write(''.join(bundle_data))
bundle_file.close()
print(" " + bundle_filename)
def minify_js_directory(source, target):
"""
Move JS resources from source directory to target directory and minify.
"""
from jsmin import jsmin
if os.path.isdir(source):
if not os.path.exists(target):
os.makedirs(target)
for root, dirs, files in os.walk(source):
for current_file in files:
if current_file.endswith(".js"):
current_file_path = os.path.join(root, current_file)
print(" " + current_file_path)
with open(current_file_path) as js_file:
with open(os.path.join(target, current_file.replace('.js', '.min.js')), "w") as minified_file:
minified_file.write(jsmin(js_file.read()))
bundle_data = []
for root, dirs, files in os.walk(target):
for current_file in files:
if current_file.endswith(".js") and current_file != 'datatable.bundle.min.js':
current_file_path = os.path.join(root, current_file)
js_file = open(current_file_path, "r")
bundle_data += js_file.readlines()
js_file.close()
bundle_filename = os.path.join(target, 'datatable.bundle.min.js')
bundle_file = open(bundle_filename, 'w+')
bundle_file.write(''.join(bundle_data))
bundle_file.close()
print(" " + bundle_filename)
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e) | toni-heittola/js-datatable | minification.py | Python | mit | 4,998 |
#!/usr/bin/env python
# encoding: utf-8
# Copyright (C) Alibaba Cloud Computing
# All rights reserved.
import unittest
from aliyun.log.es_migration.mapping_index_converter import \
MappingIndexConverter
class TestMappingIndexConverter(unittest.TestCase):
def test_to_index_config(self):
mapping = {
"properties": {
"es_text": {
"type": "text"
},
"es_keyword": {
"type": "keyword"
},
"es_long": {
"type": "long"
},
"es_integer": {
"type": "integer"
},
"es_short": {
"type": "short"
},
"es_byte": {
"type": "byte"
},
"es_double": {
"type": "double"
},
"es_float": {
"type": "float"
},
"es_half_float": {
"type": "half_float"
},
"es_scaled_float": {
"type": "scaled_float",
"scaling_factor": 100
},
"es_date": {
"type": "date",
"format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"
},
"es_boolean": {
"type": "boolean"
},
"es_binary": {
"type": "binary"
},
"es_integer_range": {
"type": "integer_range"
},
"es_float_range": {
"type": "float_range"
},
"es_long_range": {
"type": "long_range"
},
"es_double_range": {
"type": "double_range"
},
"es_date_range": {
"type": "date_range",
"format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"
},
"es_ip_range": {
"type": "ip_range"
},
"es_object": {
"properties": {
"sub_text": {"type": "text"},
"sub_long": {"type": "long"},
"sub_double": {"type": "double"},
"sub_boolean": {"type": "boolean"},
"sub_date": {
"type": "date",
"format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"
},
"sub_byte": {"type": "byte"},
"sub_double_range": {
"type": "double_range"
},
"sub_object": {
"properties": {
"sub_text": {"type": "text"},
"sub_boolean": {"type": "boolean"}
}
}
}
},
"es_geo_point": {
"type": "geo_point"
},
"es_geo_shape": {
"type": "geo_shape"
}
}
}
index_config = MappingIndexConverter.to_index_config(mapping)
line_config = index_config.line_config
self.assertEqual(MappingIndexConverter.DEFAULT_TOKEN_LIST, line_config.token_list)
self.assertTrue(line_config.chn)
def test_to_index_config_with_none(self):
index_config = MappingIndexConverter.to_index_config(None)
self.assertEqual(None, index_config)
if __name__ == '__main__':
unittest.main()
| wjo1212/aliyun-log-python-sdk | tests/es_migration/test_mapping_index_converter.py | Python | mit | 3,908 |
#!/usr/bin/env python3
import os
import errno
import requests
url='http://www.jeep.com/hostd/getlocatedealers.json?zipCode=60202&zipDistance=2500'
directory_name=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'data'))
try:
os.makedirs(directory_name)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(directory_name):
pass
else:
raise
file_name=os.path.join(directory_name, 'jeep.json')
response = requests.get(url, stream=True)
with open(file_name, 'wb') as fd:
for chunk in response.iter_content(chunk_size=1024):
fd.write(chunk)
| simon-wenmouth/dealerships | dealerships/manufacturers/jeep/download_data.py | Python | mit | 628 |
#!/usr/bin/env python
"""
$ python cmdln_main2.py
This is my shell.
$ python cmdln_main2.py foo
hello from foo
"""
import sys
import cmdln
class Shell(cmdln.RawCmdln):
"This is my shell."
name = "shell"
def do_foo(self, argv):
print("hello from foo")
if __name__ == "__main__":
shell = Shell()
retval = shell.cmd(sys.argv[1:]) # just run one command
sys.exit(retval)
| hfeeki/cmdln | test/cmdln_main2.py | Python | mit | 421 |
# -*- coding: utf-8 -*-
import re
RATEBEER_BASE_URL = 'http://www.ratebeer.com/beer'
def ratebeer_url(ratebeer_id, short_name):
fixed_name = re.sub(
'[^A-Za-z0-9\-]+',
'',
short_name.replace(' ', '-')
)
return "%s/%s/%s/" % (RATEBEER_BASE_URL, fixed_name, ratebeer_id)
| atlefren/beerdatabase | web/util.py | Python | mit | 309 |
class Medicine(object):
def __init__(self, name):
self.name = name
self.prescriptions = []
def add_prescription(self, prescription):
self.prescriptions.append(prescription)
| emilybache/KataMedicineClash | Python/medicine.py | Python | mit | 228 |
import unittest
from ebird.api.validation import clean_provisional
class CleanProvisionalTests(unittest.TestCase):
"""Tests for the clean_provisional validation function."""
def test_converts_bool(self):
self.assertEqual("true", clean_provisional(True))
self.assertEqual("false", clean_provisional(False))
def test_converts_integer(self):
self.assertEqual("true", clean_provisional(1))
self.assertEqual("false", clean_provisional(0))
| ProjectBabbler/ebird-api | tests/validation/test_clean_provisional.py | Python | mit | 483 |
import _plotly_utils.basevalidators
class XsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="xsrc", parent_name="histogram2d", **kwargs):
super(XsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/histogram2d/_xsrc.py | Python | mit | 388 |
import numpy as np
class Basis(object):
__resolution = 1E-9
__eqdecimals = 9 # how many decimals to round off to for relative coordinates
def __init__(self, basis=None, transform=None):
if basis is not None:
self.origin = basis.origin.copy()
if transform is not None:
self.matrix = np.asmatrix(transform) * basis.matrix
else:
self.matrix = basis.matrix.copy()
else:
self.origin = np.matrix([[0,0,0]], dtype='float64')
self.matrix = np.matrix([[1,0,0],[0,1,0],[0,0,1]], dtype='float64')
@staticmethod
def using(origin_abs, ei, ej, ek):
basis = Basis()
basis.origin = np.matrix(origin_abs, dtype='float64')
basis.matrix = np.matrix([ei, ej, ek], dtype='float64')
return basis
@staticmethod
def set_resolution(decimals):
Basis.__eqdecimals = decimals
Basis.__resolution = 10**(-decimals)
@staticmethod
def zero_if_negligible(value):
if (value > -Basis.__resolution) and (value < Basis.__resolution):
return 0
return value
@staticmethod
def is_positive(value):
return value > -Basis.__resolution
@staticmethod
def is_negative(value):
return value < Basis.__resolution
@staticmethod
def is_strictly_positive(value):
return value > Basis.__resolution
@staticmethod
def is_strictly_negative(value):
return value < -Basis.__resolution
def rel_to_abs(self, coord_rel):
coord_rel = np.asarray(coord_rel)
mat_rel = np.asarray(self.origin + np.asmatrix(coord_rel) * self.matrix)
if coord_rel.shape == (3,):
return mat_rel[0]
return mat_rel
def abs_to_rel(self, coord_abs):
mat_abs = (np.asmatrix(coord_abs) - self.origin) * self.matrix.getT()
#if Basis.__eqdecimals > 0:
# mat_abs = np.around(mat_abs, decimals=Basis.__eqdecimals)
if coord_abs.shape == (3,):
return np.asarray(mat_abs)[0]
return np.asarray(mat_abs)
def e_i(self):
return np.asarray(self.matrix[0,:])[0]
def e_j(self):
return np.asarray(self.matrix[1,:])[0]
def e_k(self):
return np.asarray(self.matrix[2,:])[0]
@staticmethod
def separation(coord_1, coord_2): # distance between two points; can be abs or rel, so long as the bases are the same
dp = np.linalg.norm(coord_2 - coord_1)
if Basis.__resolution > 0:
if dp < 2 * Basis.__resolution:
dp = 0
return dp
def offset(self, offset_origin_rel):
basis = Basis(self)
basis.origin = self.rel_to_abs(offset_origin_rel)
return basis
def jki(self, offset_origin_rel=None):
basis = Basis(self, [[0,1,0],[0,0,1],[1,0,0]])
if offset_origin_rel is not None:
basis.origin = self.rel_to_abs(offset_origin_rel)
return basis
def rotate_i(self, angle, offset_origin_rel=None): # angle [degrees] anti-clockwise rotation of self about self.i; optionally offset the origin
if angle == 90:
basis = Basis(self, [[1,0,0],[0,0,1],[0,-1,0]])
elif angle == 180:
basis = Basis(self, [[1,0,0],[0,-1,0],[0,0,-1]])
elif angle == 270:
basis = Basis(self, [[1,0,0],[0,0,-1],[0,1,0]])
elif angle:
sin_theta = np.sin(angle * np.pi / 180)
cos_theta = np.cos(angle * np.pi / 180)
basis = Basis(self, [[1,0,0],[0,cos_theta,sin_theta],[0,-sin_theta,cos_theta]])
else:
basis = Basis(self)
if offset_origin_rel is not None:
basis.origin = self.rel_to_abs(offset_origin_rel)
return basis
def rotate_j(self, angle, offset_origin_rel=None): # angle [degrees] anti-clockwise rotation of self about self.j; optionally offset the origin
if angle == 90:
basis = Basis(self, [[0,0,-1],[0,1,0],[1,0,0]])
elif angle == 180:
basis = Basis(self, [[-1,0,0],[0,1,0],[0,0,-1]])
elif angle == 270:
basis = Basis(self, [[0,0,1],[0,1,0],[-1,0,0]])
elif angle:
sin_theta = np.sin(angle * np.pi / 180)
cos_theta = np.cos(angle * np.pi / 180)
basis = Basis(self, [[cos_theta,0,-sin_theta],[0,1,0],[sin_theta,0,cos_theta]])
else:
basis = Basis(self)
if offset_origin_rel is not None:
basis.origin = self.rel_to_abs(offset_origin_rel)
return basis
def rotate_k(self, angle, offset_origin_rel=None): # angle [degrees] anti-clockwise rotation of self about self.k; optionally offset the origin
if angle == 90:
basis = Basis(self, [[0,1,0],[-1,0,0],[0,0,1]])
elif angle == 180:
basis = Basis(self, [[-1,0,0],[0,-1,0],[0,0,1]])
elif angle == 270:
basis = Basis(self, [[0,-1,0],[1,0,0],[0,0,1]])
elif angle:
sin_theta = np.sin(angle * np.pi / 180)
cos_theta = np.cos(angle * np.pi / 180)
basis = Basis(self, [[cos_theta,sin_theta,0],[-sin_theta,cos_theta,0],[0,0,1]])
else:
basis = Basis(self)
if offset_origin_rel is not None:
basis.origin = self.rel_to_abs(offset_origin_rel)
return basis
| FJFranklin/wifi-py-rpi-car-controller | noise/Noise/Basis.py | Python | mit | 5,396 |
"""
WSGI config for my_doku_application project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my_doku_application.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| jhcodeh/my-doku | my_doku_application/my_doku_application/wsgi.py | Python | mit | 413 |
"""
RUN FROM THIS FILE
Alexandre Yang
ITP 115
Final Project
05/08/2014
Description:
Refer to readme.txt
"""
import pygame
from Oto import Oto
from Button import Button
from Label import Label
# Input: pygame.Surface, tuple, int, int, int, int
# Output: none
# Side-effect: Draws the grid on the screen
def drawBoard(surface, color, w, h, tileWidth, tileHeight):
# Draw lines
for x in range(tileWidth, w+1, tileWidth):
pygame.draw.line(surface, color, (x, 0), (x, h))
for y in range(tileHeight, h+1, tileHeight):
pygame.draw.line(surface, color, (0, y), (w, y))
# Input: int, int
# Output: pygame.sprite.Sprite
# Side-effect: none
# Description: Creates a sprite to represent the position of the mouse-click
def createMouseClick(mouseX, mouseY):
mouseClick = pygame.sprite.Sprite()
mouseClick.image = pygame.Surface((1, 1))
mouseClick.rect = mouseClick.image.get_rect()
mouseClick.rect.x = mouseX
mouseClick.rect.y = mouseY
return mouseClick
def main():
# Set general variables
screenW = 850
screenH = 775
boardW = 675
boardH = 675
tileWidth = 75
tileHeight = 75
running = True
screen = pygame.display.set_mode((screenW, screenH)) # Create pygame Surface
clock = pygame.time.Clock() # Create pygame Clock
BPM = 4
active = False
bgColor = 0, 0, 0
lineColor = 255, 255, 255
# Create sprite groups (necessary to call draw() method)
otoList = pygame.sprite.Group()
buttonList = pygame.sprite.Group()
labelList = pygame.sprite.Group()
# Create Menu Buttons and add them to buttonList sprite group
playButton = Button(screen, 100, boardH+40, 50, 50, "Play")
buttonList.add(playButton)
pauseButton = Button(screen, 200, boardH+40, 75, 50, "Pause")
buttonList.add(pauseButton)
clearButton = Button(screen, 320, boardH+40, 70, 50, "Clear")
buttonList.add(clearButton)
plusBPMButton = Button(screen, 430, boardH+40, 65, 50, "BPM+")
buttonList.add(plusBPMButton)
minusBPMButton = Button(screen, 530, boardH+40, 65, 50, "BPM-")
buttonList.add(minusBPMButton)
originalButton = Button(screen, 700, 30, 140, 50, "Original")
buttonList.add(originalButton)
clarinetButton = Button(screen, 700, 130, 140, 50, "Clarinet")
buttonList.add(clarinetButton)
guitarButton = Button(screen, 700, 220, 140, 50, "Guitar")
buttonList.add(guitarButton)
synthButton = Button(screen, 700, 320, 140, 50, "Synth")
buttonList.add(synthButton)
pianoButton = Button(screen, 700, 420, 140, 50, "Piano")
buttonList.add(pianoButton)
piano2Button = Button(screen, 700, 520, 140, 50, "Piano2")
buttonList.add(piano2Button)
trumpetButton = Button(screen, 700, 620, 140, 50, "Trumpet")
buttonList.add(trumpetButton)
# main Pygame loop
while running:
# Resets the screen
screen.fill(bgColor)
# Draws the grid
drawBoard(screen, lineColor, boardW, boardH, tileWidth, tileHeight)
# Draw menu
buttonList.draw(screen)
# Listen for events
for event in pygame.event.get():
# If user closes window
if event.type == pygame.QUIT:
running = False
# If user clicks mouse
elif event.type == pygame.MOUSEBUTTONDOWN:
mouseX, mouseY = pygame.mouse.get_pos()
# Rounds mouse positions down to nearest hundred (Used to position the cells and for simplicity)
otoPosX = (mouseX // tileWidth) * tileWidth
otoPosY = (mouseY//tileHeight) * tileHeight
# Create a tiny sprite where the mouse was clicked to use in collision detection
mouseClick = createMouseClick(mouseX, mouseY)
# If left button was clicked
if event.button == 1:
# Check to see if mouseClick collided with any sprite in the otoList
clickedBlock = pygame.sprite.spritecollide(mouseClick, otoList, False)
# Check to see if mouseClick collided with any menu button
clickedMenu = pygame.sprite.spritecollide(mouseClick, buttonList, False)
# If a cell was clicked, then delete it
if clickedBlock:
otoList.remove(clickedBlock[0])
# Handle the menu button click events
elif clickedMenu:
if clickedMenu[0] == playButton:
active = True
elif clickedMenu[0] == pauseButton:
active = False
elif clickedMenu[0] == clearButton:
otoList.empty()
elif clickedMenu[0] == plusBPMButton:
BPM += 1
elif clickedMenu[0] == minusBPMButton and BPM != 1:
BPM -= 1
elif clickedMenu[0] == originalButton:
Oto.changeInstrument("")
elif clickedMenu[0] == clarinetButton:
Oto.changeInstrument("clarinet")
elif clickedMenu[0] == guitarButton:
Oto.changeInstrument("Guitar")
elif clickedMenu[0] == synthButton:
Oto.changeInstrument("Synth")
elif clickedMenu[0] == pianoButton:
Oto.changeInstrument("Piano")
elif clickedMenu[0] == piano2Button:
Oto.changeInstrument("Piano2")
elif clickedMenu[0] == trumpetButton:
Oto.changeInstrument("trumpet")
# If the grid was clicked then create a new cell at the position (an 'Oto' object)
else:
if mouseY < boardH and mouseX < boardW:
oto = Oto(screen, tileWidth, tileHeight, boardW, boardH)
oto.rect.x = otoPosX
oto.rect.y = otoPosY
otoList.add(oto)
# if right button was clicked
elif event.button == 3:
clickedBlock = pygame.sprite.spritecollide(mouseClick, otoList, False)
# Rotate cell clockwise
if clickedBlock:
clickedBlock[0].changeState()
# Draw every cell to the screen
otoList.draw(screen)
# Move the cells
if active:
otoList.update()
# Check to see if any cells collided
for oto in otoList:
oto.checkCollision(otoList)
# Draw and update BPM label
BPMLabel = Label(screen, 620, boardH+40, 50, 50, str(BPM))
labelList.empty()
labelList.add(BPMLabel)
labelList.draw(screen)
# Update the screen
pygame.display.flip()
# Set the Frames Per Second
clock.tick(BPM)
main() | yangalex/Otomata-python | Otomata.py | Python | mit | 7,139 |
# -*- coding: utf-8 -*-
"""
flango.template
~~~~~~~~~~~~~~
template module provide a simple template system that compiles
templates to Python code which like django and tornado template
modules.
Usage
-----
Well, you can view the tests file directly for the usage under tests.
Basically::
>>> import template
>>> template.Template('Hello, {{ name }}').render(name='flango')
Hello, flango
If, else, for...::
>>> template.Template('''
... {% for i in l %}
... {% if i > 3 %}
... {{ i }}
... {% else %}
... less than 3
... {% endif %}
... {% endfor %})
... ''' ).render(l=[2, 4])
less than 3
4
Then, user define class object maybe also works well::
>>> class A(object):
...
... def __init__(self, a, b):
... self.a = a
... self.b = b
...
>>> o = A("I am o.a", [1, 2, 3])
>>> template.Template('''
... {{ o.a }}
... {% for i in o.b %}
... {{ i }}
... {% endfor %}
... ''').render(o=o)
I am o.a
1
2
3
and Wow, function maybe suprise you::
>>> template.Template('{{ abs(-3) }}').render()
'3'
>>> template.Template('{{ len([1, 2, 3]) }}').render()
'3'
>>> template.Template('{{ [1, 2, 3].index(2) }}').render()
'1'
and complex function like lambda expression maybe works::
>>> template.Template('{{ list(map(lambda x: x * 2, [1, 2, 3])) }}').render()
'[2, 4, 6]'
and lastly, inheritance of template, extends and include::
{% extends 'base.html' %}
{% include 'included.html' %}
Hacking with fun and joy.
"""
import re
import os
import collections
# LRU Cache capacity:
_CACHE_CAPACITY = 128
class Scanner(object):
""" Scanner is a inner class of Template which provide
custom template source reading operations.
"""
def __init__(self, source):
# pattern for variable, function, block, statement.
self.pattern = re.compile(r'''
{{\s*(?P<var>.+?)\s*}} # variable: {{ name }} or function like: {{ abs(-2) }}
| # or
{%\s*(?P<endtag>end(if|for|while|block))\s*%} # endtag: {% endfor %}
| # or
{%\s*(?P<statement>(?P<keyword>\w+)\s*(.+?))\s*%} # statement: {% for i in range(10) %}
''', re.VERBOSE)
# the pre-text before token.
self.pretext = ''
# the remaining text which have not been processed.
self.remain = source
def next_token(self):
""" Get the next token which match the pattern semantic.
return `None` if there is no more tokens, otherwise,
return matched regular expression group of token `t`, get
the pre-text and the remain text at the same time.
"""
t = self.pattern.search(self.remain)
if not t:
return None
self.pretext = self.remain[:t.start()]
self.remain = self.remain[t.end():]
return t
@property
def empty(self):
""" Return `True` if the source have been processed."""
return self.remain == ''
class BaseNode(object):
""" Base abstract class for nodes.
Subclass of BaseNode must implement 'generate' interface for
output Python intermediate code generating.
"""
def __init__(self, text, indent, block):
self.text = text
self.indent = indent
self.block = block
def generate(self):
raise NotImplementedError()
class TextNode(BaseNode):
""" Node for normal text. """
def generate(self):
return '{0}_stdout.append(\'\'\'{1}\'\'\')\n'.format(' '*self.indent, self.text)
class VariableNode(BaseNode):
""" Node for variables: such as {{ name }}. """
def generate(self):
return '{0}_stdout.append({1})\n'.format(' '*self.indent, self.text)
class KeyNode(BaseNode):
""" Node for keywords like if else... """
def generate(self):
return '{0}{1}\n'.format(' '*self.indent, self.text)
class TemplateException(Exception):
pass
class Template(object):
""" Main class for compiled template instance.
A initialized template instance will parse and compile
all the template source to Python intermediate code,
and instance function `render` will use Python builtin function
`exec` to execute the intermediate code in Python
runtime.
As function `exec` own very strong power and the ability to
execute all the python code in the runtime with given
namespace dict, so this template engine can perform all
the python features even lambda function. But, function
`exec` also has a huge problem in security, so be careful
and be serious, and I am very serious too.
"""
def __init__(self, source, path='', autoescape=False):
if not source:
raise ValueError('Invalid parameter')
self.scanner = Scanner(source)
# path for extends and include
self.path = path
self.nodes = []
# parent template
self.parent = None
self.autoescape = autoescape
self._parse()
# compiled intermediate code.
self.intermediate = self._compile()
def _parse(self):
python_keywords = ['if', 'for', 'while', 'try', 'else', 'elif', 'except', 'finally']
indent = 0
block_stack = []
def block_stack_top():
return block_stack[-1] if block_stack else None
while not self.scanner.empty:
token = self.scanner.next_token()
if not token:
self.nodes.append(TextNode(self.scanner.remain, indent, block_stack_top()))
break
# get the pre-text before token.
if self.scanner.pretext:
self.nodes.append(TextNode(self.scanner.pretext, indent, block_stack_top()))
variable, endtag, tag, statement, keyword, suffix = token.groups()
if variable:
node_text = 'escape(str({0}))'.format(variable) if self.autoescape else variable
self.nodes.append(VariableNode(node_text, indent, block_stack_top()))
elif endtag:
if tag != 'block':
indent -= 1
continue
# block placeholder in parent template nodes
if not self.parent:
node_text = 'endblock%{0}'.format(block_stack_top())
self.nodes.append(KeyNode(node_text, indent, block_stack_top()))
block_stack.pop()
elif statement:
if keyword == 'include':
filename = re.sub(r'\'|\"', '', suffix)
nodes = Loader(self.path).load(filename).nodes
for node in nodes:
node.indent += indent
self.nodes.extend(nodes)
elif keyword == 'extends':
if self.nodes:
raise TemplateException('Template syntax error: extends tag must be '
'at the beginning of the file.')
filename = re.sub(r'\'|\"', '', suffix)
self.parent = Loader(self.path).load(filename)
elif keyword == 'block':
block_stack.append(suffix)
if not self.parent:
node_text = 'block%{0}'.format(suffix)
self.nodes.append(KeyNode(node_text, indent, block_stack_top()))
elif keyword in python_keywords:
node_text = '{0}:'.format(statement)
if keyword in ['else', 'elif', 'except', 'finally']:
key_indent = indent - 1
else:
key_indent = indent
indent += 1
self.nodes.append(KeyNode(node_text, key_indent, block_stack_top()))
else:
raise TemplateException('Invalid keyword: {0}.'.format(keyword))
else:
raise TemplateException('Template syntax error.')
def _compile(self):
block = {}
if self.parent:
generate_code = ''.join(node.generate() for node in self.parent.nodes)
pattern = re.compile(r'block%(?P<start_block>\w+)(?P<block_code>.*?)endblock%(?P<end_block>\w+)', re.S)
for node in self.nodes:
block.setdefault(node.block, []).append(node.generate())
for token in pattern.finditer(generate_code):
block_name = token.group('start_block')
if block_name != token.group('end_block'):
raise TemplateException('Template syntax error.')
block_code = ''.join(block[block_name]) if block_name in block.keys() else token.group('block_code')
generate_code = generate_code.replace(token.group(), block_code)
else:
generate_code = ''.join(node.generate() for node in self.nodes)
return compile(generate_code, '<string>', 'exec')
def render(self, **context):
# `context['_stdout']`: Compiled template source code
# which is a Python list, contain all the output
# statement of Python code.
context.update({'_stdout': [], 'escape': escape})
exec(self.intermediate, context)
return re.sub(r'(\s+\n)+', r'\n', ''.join(map(str, context['_stdout'])))
class LRUCache(object):
""" Simple LRU cache for template instance caching.
in fact, the OrderedDict in collections module or
@functools.lru_cache is working well too.
"""
def __init__(self, capacity):
self.capacity = capacity
self.cache = collections.OrderedDict()
def get(self, key):
""" Return -1 if catched KeyError exception."""
try:
value = self.cache.pop(key)
self.cache[key] = value
return value
except KeyError:
return -1
def set(self, key, value):
try:
self.cache.pop(key)
except KeyError:
if len(self.cache) >= self.capacity:
self.cache.popitem(last=False)
self.cache[key] = value
class Loader(object):
""" A template Loader which loads the environments of
main application, or just give the template system a root
directory to search the template files.
loader = template.Loader("home/to/root/of/templates/")
loader.load("index.html").render()
Loader class use a LRU cache system to cache the recently used
templates for performance consideration.
"""
def __init__(self, path='', engine=Template, cache_capacity=_CACHE_CAPACITY):
self.path = path
self.engine = engine
self.cache = LRUCache(capacity=cache_capacity)
def load(self, filename):
if not self.path.endswith(os.sep) and self.path != '':
self.path = self.path + os.sep
p = ''.join([self.path, filename])
cache_instance = self.cache.get(p)
if cache_instance != -1:
return cache_instance
if not os.path.isfile(p):
raise TemplateException('Template file {0} is not exist.'.format(p))
with open(p) as f:
self.cache.set(p, self.engine(f.read(), path=self.path))
return self.cache.get(p)
def escape(content):
""" Escapes a string's HTML. """
return content.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
| hziling/template | template.py | Python | mit | 11,977 |
import sqlite3
import sys
import os
def menu():
sane = 1
while sane == 1:
print "[ - ] Please enter absolute path to cred. database to be created: "
in_path = raw_input()
if os.path.exists(in_path):
os.system('cls' if os.name == 'nt' else 'clear')
print "[ - ] Invalid path, try again."
else:
sane = 0
return(in_path)
def main(dbPath):
createQ = "CREATE TABLE "+'"main"'+" ('pri_Index' INTEGER PRIMARY KEY AUTOINCREMENT, 'identifier' TEXT , 'clearTextP' TEXT , 'srcMD5' TEXT , 'srcSHA1' TEXT , 'srcBCRYPT' TEXT , 'rainTableMD5' TEXT , 'rainTableSHA1' TEXT , 'rainTableBCRYPT' TEXT)"
try:
db_conn = sqlite3.connect(dbPath)
except:
print "[ - ] Unable to create, check path and try again."
sys.exit(1)
cur = db_conn.cursor()
cur.execute(createQ)
print "[ - ] DB created at "+dbPath+"\nPress enter to exit."
end = raw_input()
try:
main(menu())
except KeyboardInterrupt:
print "[ - ] CTRL+C caught, exiting."
| 0xhughes/credential_db | createDB.py | Python | mit | 975 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'exampleLoaderTemplate.ui'
#
# Created: Sat Dec 17 23:46:27 2011
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(762, 302)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setMargin(0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.splitter = QtGui.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.layoutWidget = QtGui.QWidget(self.splitter)
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.exampleTree = QtGui.QTreeWidget(self.layoutWidget)
self.exampleTree.setObjectName(_fromUtf8("exampleTree"))
self.exampleTree.headerItem().setText(0, _fromUtf8("1"))
self.exampleTree.header().setVisible(False)
self.verticalLayout.addWidget(self.exampleTree)
self.loadBtn = QtGui.QPushButton(self.layoutWidget)
self.loadBtn.setObjectName(_fromUtf8("loadBtn"))
self.verticalLayout.addWidget(self.loadBtn)
self.codeView = QtGui.QTextBrowser(self.splitter)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Monospace"))
font.setPointSize(10)
self.codeView.setFont(font)
self.codeView.setObjectName(_fromUtf8("codeView"))
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.loadBtn.setText(QtGui.QApplication.translate("Form", "Load Example", None, QtGui.QApplication.UnicodeUTF8))
| robertsj/poropy | pyqtgraph/examples/exampleLoaderTemplate.py | Python | mit | 2,302 |
"""
Nonnegative CP decomposition by Hierarchical alternating least squares (HALS).
Author: N. Benjamin Erichson <erichson@uw.edu>
"""
import numpy as np
import numba
from tensortools.operations import unfold, khatri_rao
from tensortools.tensors import KTensor
from tensortools.optimize import FitResult, optim_utils
def ncp_hals(
X, rank, mask=None, random_state=None, init='rand',
skip_modes=[], negative_modes=[], **options):
"""
Fits nonnegtaive CP Decomposition using the Hierarcial Alternating Least
Squares (HALS) Method.
Parameters
----------
X : (I_1, ..., I_N) array_like
A real array with nonnegative entries and ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
mask : (I_1, ..., I_N) array_like
Binary tensor, same shape as X, specifying censored or missing data values
at locations where (mask == 0) and observed data where (mask == 1).
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
init : str, or KTensor, optional (default ``'rand'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
skip_modes : iterable, optional (default ``[]``).
Specifies modes of the tensor that are not fit. This can be
used to fix certain factor matrices that have been previously
fit.
negative_modes : iterable, optional (default ``[]``).
Specifies modes of the tensor whose factors are not constrained
to be nonnegative.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
This implemenation is using the Hierarcial Alternating Least Squares Method.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Examples
--------
"""
# Mask missing elements.
if mask is not None:
X = np.copy(X)
X[~mask] = np.mean(X[mask])
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_HALS', **options)
# Store problem dimensions.
normX = np.linalg.norm(X)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Iterate the HALS algorithm until convergence or maxiter is reached
# i) compute the N gram matrices and multiply
# ii) Compute Khatri-Rao product
# iii) Update component U_1, U_2, ... U_N
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
while result.still_optimizing:
for n in range(X.ndim):
# Skip modes that are specified as fixed.
if n in skip_modes:
continue
# Select all components, but U_n
components = [U[j] for j in range(X.ndim) if j != n]
# i) compute the N-1 gram matrices
grams = np.prod([arr.T @ arr for arr in components], axis=0)
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
Xmkr = unfold(X, n).dot(kr)
# iii) Update component U_n
_hals_update(U[n], grams, Xmkr, n not in negative_modes)
# iv) Update masked elements.
if mask is not None:
pred = U.full()
X[~mask] = pred[~mask]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if mask is None:
# Determine mode that was fit last.
n = np.setdiff1d(np.arange(X.ndim), skip_modes).max()
# Add contribution of last fit factors to gram matrix.
grams *= U[n].T @ U[n]
residsq = np.sum(grams) - 2 * np.sum(U[n] * Xmkr) + (normX ** 2)
result.update(np.sqrt(residsq) / normX)
else:
result.update(np.linalg.norm(X - pred) / normX)
# end optimization loop, return result.
return result.finalize()
@numba.jit(nopython=True)
def _hals_update(factors, grams, Xmkr, nonneg):
dim = factors.shape[0]
rank = factors.shape[1]
indices = np.arange(rank)
# Handle special case of rank-1 model.
if rank == 1:
if nonneg:
factors[:] = np.maximum(0.0, Xmkr / grams[0, 0])
else:
factors[:] = Xmkr / grams[0, 0]
# Do a few inner iterations.
else:
for itr in range(3):
for p in range(rank):
idx = (indices != p)
Cp = factors[:, idx] @ grams[idx][:, p]
r = (Xmkr[:, p] - Cp) / np.maximum(grams[p, p], 1e-6)
if nonneg:
factors[:, p] = np.maximum(r, 0.0)
else:
factors[:, p] = r
| ahwillia/tensortools | tensortools/optimize/ncp_hals.py | Python | mit | 6,382 |
"""
Lingui docs extensions
Inspired by Django Docs
https://github.com/django/django/blob/main/docs/_ext/djangodocs.py
"""
from docutils import nodes
from sphinx import addnodes
from sphinx.domains.std import Cmdoption
from sphinx.locale import _
from sphinx.util.docfields import TypedField
class jsxmacro(nodes.Inline, nodes.TextElement):
pass
def visit_react_macro_html(self, node):
self.body.append('<')
def depart_react_macro_html(self, node):
self.body.append('>')
class react_component(nodes.Inline, nodes.TextElement):
pass
def visit_react_component_html(self, node):
self.body.append('<')
def depart_react_component_html(self, node):
self.body.append('>')
class jsmacro(nodes.Inline, nodes.TextElement):
pass
def visit_jsmacro_html(self, node):
pass
def depart_jsmacro_html(self, node):
pass
def parse_lingui_cli_node(env, sig, signode):
command = sig.split(' ')[0]
env.ref_context['std:program'] = command
title = "lingui %s" % sig
signode += addnodes.desc_name(title, title)
return command
def setup(app):
app.add_object_type(
directivename='jsxmacro',
rolename='jsxmacro',
indextemplate="pair: %s; jsxmacro",
ref_nodeclass=jsxmacro,
objname='React macro',
doc_field_types=[
TypedField('props', label=_('Props'),
names=('prop',),
typerolename='jsxmacro',
typenames=('proptype', 'type')),
]
)
app.add_node(jsxmacro,
html=(visit_react_macro_html, depart_react_macro_html))
app.add_object_type(
directivename='component',
rolename='component',
indextemplate="pair: %s; component",
ref_nodeclass=react_component,
objname='Component',
doc_field_types=[
TypedField('props', label=_('Props'),
names=('prop',),
typerolename='component',
typenames=('proptype', 'type')),
]
)
app.add_node(react_component,
html=(visit_react_component_html, depart_react_component_html))
app.add_object_type(
directivename='jsmacro',
rolename='jsmacro',
indextemplate="pair: %s; jsmacro",
ref_nodeclass=jsmacro,
objname='JS Macro'
)
app.add_node(jsmacro,
html=(visit_jsmacro_html, depart_jsmacro_html))
app.add_crossref_type('config', 'conf')
app.add_crossref_type('icu', 'icu')
app.add_object_type(
directivename="lingui-cli",
rolename="cli",
indextemplate="pair: %s; lingui-cli command",
parse_node=parse_lingui_cli_node,
)
app.add_directive('lingui-cli-option', Cmdoption)
| lingui/js-lingui | docs/_ext/linguidocs.py | Python | mit | 2,809 |
import logging
import time
import cStringIO
from PIL import Image
from libmproxy.protocol.http import decoded
import re
def request(context, flow):
try:
logging.debug("request")
if (flow.request.pretty_host(hostheader=True).endswith("docs.google.com")):
#logging.debug("Before:")
#logging.debug(flow.request.content)
m = re.match(r'(?P<msg_start>[\w\W]+)(?P<msg_info>\[null,\d+,[^\]]+\])(?P<msg_end>[\w\W]+)', flow.request.content)
if not m:
# logging.debug("Match failed")
return 0
replace = (m.group('msg_start') + '[null,2, "You have been pwned!!!"]'+m.group('msg_end'))
flow.request.content = replace
logging.debug("Google table request was changed!")
#logging.debug(flow.request.content)
except Exception as e:
logging.debug("CHECK CODE, IDIOT!!!!!!!!!!!")
logging.debug(type(e))
logging.debug(e)
def start (context, argv):
logging.basicConfig(filename="log.log",level=logging.DEBUG)
logging.debug("============================================\n")
logging.debug(time.time())
logging.debug("Startup:\n")
context.log("start")
| infosec-216/alice | google_table_repl.py | Python | mit | 1,227 |
from django.shortcuts import render_to_response, get_object_or_404
from django.http import JsonResponse, HttpResponseRedirect
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from django.db.models import Avg
from django.db import IntegrityError
from .models import (Category,
TemplateFormat,
TemplateElement,
Document,
ExtractedElements)
from .forms import CreateCategoryForm, TemplateFormatForm, DocumentForm
from .serializers import get_templates_for_category
import json
def home(request):
"""
View for home page.
"""
return render_to_response("home.html", {},
context_instance=RequestContext(request))
def get_all_formats(request, category):
"""
View to get all template formats that exist for the particular category
"""
try:
category = Category.objects.get(slug=category)
except Category.DoesNotExist:
message = "Invalid category selected"
return JsonResponse({"error": "true", "message": message})
all_templates = get_templates_for_category(category)
return JsonResponse({"error": "false", "data": all_templates})
def create_category(request):
"""
View to create category
"""
if request.method == "GET":
form = CreateCategoryForm()
return render_to_response("create_category.html", {"form": form},
context_instance=RequestContext(request))
elif request.method == "POST":
form = CreateCategoryForm(data=request.POST)
if not form.is_valid():
return render_to_response("create_category.html",
{"form": form, "errors": form.errors},
context_instance=RequestContext(request))
try:
category = Category.objects.create(
category_name=form.cleaned_data['category_name'],
description=form.cleaned_data['description'])
except IntegrityError:
message = "Category with the same name already exist"
return render_to_response("create_category.html",
{"form": form, "errors": message},
context_instance=RequestContext(request))
redirect_url = reverse('create-template')
redirect_url += "?categ=%s" %(category.slug)
return HttpResponseRedirect(redirect_url)
def create_template_format(request):
"""
View to create new template format.
"""
if request.method == "GET":
form = TemplateFormatForm()
return render_to_response("create_format.html",
{"form": form},
context_instance=RequestContext(request))
elif request.method == "POST":
form = TemplateFormatForm(data=request.POST)
if not form.is_valid():
return render_to_response("create_format.html",
{"form": form, "errors": form.errors},
context_instance=RequestContext(request))
category = get_object_or_404(Category,
slug=form.cleaned_data['category'])
try:
template = TemplateFormat.objects.create(
category=category,
template_name=form.cleaned_data['template_name']
)
except IntegrityError:
message = "Template Name Already exist"
return render_to_response("create_format.html",
{"form": form, "errors": message},
context_instance=RequestContext(request))
redirect_url = reverse('upload_document')
redirect_url += "?categ=%s&format=%s" %(category.slug,
template.slug)
return HttpResponseRedirect(redirect_url)
def upload_document(request):
"""
View for handling document upload
"""
if request.method == "GET":
form = DocumentForm()
return render_to_response("upload_document.html",
{"form": form},
context_instance=RequestContext(request))
elif request.method == "POST":
form = DocumentForm(request.POST, request.FILES)
if not form.is_valid():
return render_to_response("upload_document.html",
{"form": form, "errors": form.errors},
context_instance=RequestContext(request))
template = get_object_or_404(TemplateFormat,
slug=form.cleaned_data['template'])
document = Document.objects.create(
template_format=template,
document_name=form.cleaned_data['document_name'],
document=request.FILES['document']
)
return HttpResponseRedirect(
reverse('particular_document',
kwargs={"unique_id": document.id}
))
@csrf_exempt
def particular_document(request, unique_id):
"""
View to display a particular document and let the end user to select
elements from it on the frontend and save them
"""
document = get_object_or_404(Document, id=unique_id)
all_elements = document.template_format.templateelement_set.all()
if request.method == "GET":
if document.extractedelements_set.all().count() > 0 :
return HttpResponseRedirect(reverse('preview_document',
kwargs={"unique_id":document.id}))
return render_to_response('document_selector.html',
{"document": document,
"elements": all_elements},
context_instance=RequestContext(request))
elif request.method == "POST":
data = json.loads(json.loads(request.POST['data']))
if document.image_resolution_x and document.image_resolution_y:
pass
else:
document.image_resolution_x = data["image_width"]
document.image_resolution_y = data["image_height"]
document.save()
template = document.template_format
document.extractedelements_set.all().delete()
for element_name in data["elements"]:
element = TemplateElement.objects.get_or_create(
template=template, element_name=element_name)[0]
extracted_element = ExtractedElements.objects.get_or_create(
document=document, element=element)[0]
extracted_element.x1_coordinate = data[element_name]["x"]
extracted_element.y1_coordinate = data[element_name]["y"]
extracted_element.block_width = data[element_name]["width"]
extracted_element.block_height = data[element_name]["height"]
extracted_element.save()
return JsonResponse({"error": "false",
"message": "Successfully saved elements"})
def all_documents(request):
"""
View to display all documents
"""
documents = Document.objects.all()
if request.method == "GET":
return render_to_response("all_documents.html",
{"documents": documents},
context_instance=RequestContext(request))
def document_preview(request, unique_id):
"""
View to preview/ update a document. Any document for which the elements
have been created is eligible for preview/ update
"""
document = get_object_or_404(Document, id=unique_id)
elements = document.template_format.templateelement_set.all()
return render_to_response("document_elements.html",
{"document": document, "elements": elements},
context_instance=RequestContext(request))
def get_element_coordinates(request, unique_id, element):
"""
Get approx coordinates of a particular element for a given template format
Average of all values of the particular element for various documents is
considered.
"""
try:
document = Document.objects.get(id=unique_id)
except Document.DoesNotExist:
return JsonResponse({
"error": "true",
"message": "Document Does not exist"
})
template = document.template_format
try:
element = TemplateElement.objects.get(template=template,
element_name__iexact=element)
except TemplateElement.DoesNotExist:
return JsonResponse({"error": "true",
"message": "Element Does not exist"})
avg_x = ExtractedElements.objects.filter(
element=element).aggregate(Avg('x1_coordinate'))
avg_y = ExtractedElements.objects.filter(
element=element).aggregate(Avg('y1_coordinate'))
avg_height = ExtractedElements.objects.filter(
element=element).aggregate(Avg('block_height'))
avg_width = ExtractedElements.objects.filter(
element=element).aggregate(Avg('block_width'))
return JsonResponse({"error": "false", "x": avg_x, "y": avg_y,
"height": avg_height, "width": avg_width})
| pulkitpahwa/smart-image-coordinates | smarter/base/views.py | Python | mit | 9,603 |
<% if (djangoVersion != '2.0') { %>
from django.conf.urls import url, include
<% } else { %>
from django.urls import path, include
<% } %>
from django.contrib import admin
from <%= appName %> import views
<% if (includeLoginPage == true) { %>
from <%= appName %>.forms import AuthenticationForm
from django.contrib.auth.views import login, logout
<% } %>
<% if (djangoVersion != '2.0') { %>
urlpatterns = [
url(r'^$', views.index, name="index"),
url(r'^', include('<%= appName %>.urls')),
url(r'^admin/', include(admin.site.urls)),
]
<% if (includeLoginPage == true) { %>
urlpatterns += [
url(r'^accounts/login/$', login, {
'template_name': 'login.html',
'authentication_form': AuthenticationForm
}, name='login'),
url(r'^accounts/logout/$', logout, {
'next_page': '/'
}, name='logout'),
]
<% } %>
<% } else { %>
urlpatterns = [
path(r'', views.index, name="index"),
path(r'', include('<%= appName %>.urls')),
path(r'admin/', admin.site.urls),
]
<% if (includeLoginPage == true) { %>
urlpatterns += [
path(r'accounts/login', login, {
'template_name': 'login.html',
'authentication_form': AuthenticationForm
}, name='login'),
path(r'accounts/logout', logout, {
'next_page': '/'
}, name='logout'),
]
<% } %>
<% } %>
| dfurtado/generator-djangospa | generators/app/templates/root/appfiles/urls.py | Python | mit | 1,320 |
import json
import unittest
import billboard
class TestCurrentGreatestHot100Singles(unittest.TestCase):
"""Checks that the ChartData object for the current Greatest Hot 100
Singles chart has entries and instance variables that are valid and
reasonable. Does not test whether the data is actually correct.
The Greatest Hot 100 Singles chart is special in that there are no past
charts (i.e., there is no historical data).
"""
def setUp(self):
self.chart = billboard.ChartData('greatest-hot-100-singles')
def test_date(self):
self.assertIsNone(self.chart.date) # This chart has no dates
def test_ranks(self):
ranks = list(entry.rank for entry in self.chart)
self.assertEqual(ranks, list(range(1, 101)))
def test_entries_validity(self):
self.assertEqual(len(self.chart), 100)
for entry in self.chart:
self.assertGreater(len(entry.title), 0)
self.assertGreater(len(entry.artist), 0)
self.assertIsNone(entry.peakPos)
self.assertIsNone(entry.lastPos)
self.assertIsNone(entry.weeks)
# Redundant because of test_ranks
self.assertTrue(1 <= entry.rank <= 100)
self.assertIsInstance(entry.isNew, bool)
def test_json(self):
self.assertTrue(json.loads(self.chart.json()))
for entry in self.chart:
self.assertTrue(json.loads(entry.json()))
| jameswenzel/billboard-charts | tests/test_greatest_hot_100_singles.py | Python | mit | 1,451 |
#!/usr/bin/env python
"""
A toolkit for identifying and advertising service resources.
Uses a specific naming convention for the Task Definition of services. If you
name the Task Definition ending with "-service", no configuration is needed.
This also requires that you not use that naming convention for task definitions
that are not services.
For example:
A Task Definition with the family name of 'cache-service' will have its
hosting Container Instance's internal ip added to a Route53 private Zone as
cache.local and other machines on the same subnet can address it that way.
"""
import argparse
import logging
import os
import re
import json
import boto
import boto.ec2
import boto.route53
import requests
from etcd.client import Client
from time import sleep
region = os.environ.get('ECS_REGION', 'us-east-1')
ecs = boto.connect_ec2containerservice(
host='ecs.{0}.amazonaws.com'.format(region))
ec2 = boto.ec2.connect_to_region(region)
route53 = boto.route53.connect_to_region(region)
logging.basicConfig(format='%(asctime)s %(message)s',
datefmt='%Y/%m/%d/ %I:%M:%S %p')
if 'ECS_CLUSTER' in os.environ:
cluster = os.environ['ECS_CLUSTER']
elif os.path.exists('/etc/ecs/ecs.config'):
pat = re.compile(r'\bECS_CLUSTER\b\s*=\s*(\w*)')
cluster = pat.findall(open('/etc/ecs/ecs.config').read())[-1]
else:
cluster = None
def get_task_arns(family):
"""
Get the ARN of running task, given the family name.
"""
response = ecs.list_tasks(cluster=cluster, family=family)
arns = response['ListTasksResponse']['ListTasksResult']['taskArns']
if len(arns) == 0:
return None
return arns
def get_ec2_interface(container_instance_arn):
"""
Get the ec2 interface from an container instance ARN.
"""
response = ecs.describe_container_instances(container_instance_arn, cluster=cluster)
ec2_instance_id = response['DescribeContainerInstancesResponse'] \
['DescribeContainerInstancesResult']['containerInstances'] \
[0]['ec2InstanceId']
response = ec2.get_all_instances(filters={'instance-id': ec2_instance_id})
return response[0].instances[0].interfaces[0]
def get_zone_for_vpc(vpc_id):
"""
Identify the Hosted Zone for the given VPC.
Assumes a 1 to 1 relationship.
NOTE: There is an existing bug.
https://github.com/boto/boto/issues/3061
When that changes, I expect to have to search ['VPCs'] as a list of
dictionaries rather than a dictionary. This has the unfortunate side
effect of not working for Hosted Zones that are associated with more than
one VPC. (But, why would you expect internal DNS for 2 different private
networks to be the same anyway?)
"""
response = route53.get_all_hosted_zones()['ListHostedZonesResponse']
for zone in response['HostedZones']:
zone_id = zone['Id'].split('/')[-1]
detail = route53.get_hosted_zone(zone_id)['GetHostedZoneResponse']
try:
if detail['VPCs']['VPC']['VPCId'] == vpc_id:
return {'zone_id': zone_id, 'zone_name': zone['Name']}
except KeyError:
pass
def get_service_info(service_name):
info = {
"name": service_name,
"tasks": []
}
if service_name[-8:] == '-service':
info['name'] = service_name[:-8]
task_arns = get_task_arns(service_name)
if not task_arns:
logging.info('{0} is NOT RUNNING'.format(service_name))
return None
else:
logging.info('{0} is RUNNING'.format(service_name))
data = ecs.describe_tasks(task_arns, cluster=cluster)
tasks = data['DescribeTasksResponse']['DescribeTasksResult']['tasks']
for task in tasks:
interface = get_ec2_interface(task['containerInstanceArn'])
task_info = {
'ip': interface.private_ip_address,
'ports': {}
}
for container in task['containers']:
if container['networkBindings']:
for port in container['networkBindings']:
if port['protocol'] == 'tcp':
task_info['ports'][port['containerPort']] = port['hostPort']
info['tasks'].append(task_info)
info['vpc_id'] = interface.vpc_id
return info
def update_dns(zone_id, zone_name, service_name, service_ips, ttl=20):
"""
Insert or update DNS record.
"""
host_name = '.'.join([service_name, zone_name])
record_set = boto.route53.record.ResourceRecordSets(route53, zone_id)
record = record_set.add_change('UPSERT', host_name, 'A', ttl)
for service_ip in service_ips:
record.add_value(service_ip)
record_set.commit()
return record_set
def update_service(service_name, method, prefix):
"""
Update DNS to allow discovery of properly named task definitions.
"""
info = get_service_info(service_name)
if not info:
return None
if method == 'dns':
network = get_zone_for_vpc(info["vpc_id"])
ips = [t['ip'] for t in info['tasks']]
logging.info('Registering {0}.{1} as {2}'.format(
info['name'], network['zone_name'], ','.join(ips)))
update_dns(network['zone_id'], network['zone_name'],
info['name'], ips)
elif method == 'etcd':
data = json.dumps(info['tasks'])
logging.info('Registering {0} as {1}'.format(
info['name'], data))
host = requests.get("http://169.254.169.254/latest/meta-data/local-ipv4").content
client = Client(host=host, port=4001)
key = '/' + '/'.join([i for i in ['tasks', prefix, info['name']] if i])
client.node.set(key, data)
def main():
"""
Main function that handles running the command.
"""
parser = argparse.ArgumentParser()
parser.add_argument('service_name', nargs=1,
help='list of services to start')
parser.add_argument('method', nargs=1,
help='method of registering service')
parser.add_argument('-p', '--prefix', action='store', default=False,
help='prefix when saving to etcd')
parser.add_argument('-q', '--quiet', action='store_true',
help='suppress output')
parser.add_argument('-r', '--rerun', action='store_true',
help='run again after a 60 second pause')
args = parser.parse_args()
if not args.quiet:
logging.getLogger().setLevel(logging.INFO)
update_service(args.service_name[0], args.method[0], args.prefix)
if args.rerun:
sleep(60)
update_service(args.service_name[0], args.method[0], args.prefix)
if __name__ == '__main__':
main()
| simonluijk/aws-ecs-service-discovery | register.py | Python | mit | 6,803 |
#!/usr/bin/env python
# Data manager for reference data for the QIIME Galaxy tools
import argparse
import ftplib
import json
import os
import tarfile
import zipfile
import requests
protocol = {
"unite": "http",
"greengenes": "ftp",
"silva": "http",
"img": "ftp"
}
baseUrl = {
"unite": "http://unite.ut.ee/sh_files/sh_qiime_release_",
"greengenes": "greengenes.microbio.me",
"silva": "http://www.arb-silva.de/fileadmin/silva_databases/qiime/Silva_",
"img": "ftp.microbio.me"
}
ftp_dir = {
"greengenes": "/greengenes_release/gg_",
"img": ""
}
ftp_file_prefix = {
"greengenes": "gg_",
"img": ""
}
ftp_file_suffix = {
"greengenes": "_otus",
"img": ""
}
extension = {
"unite": "zip",
"greengenes": "tar.gz",
"silva": {
"104_release": "tgz",
"108_release": "tgz",
"108_release_curated": "tgz",
"111_release": "tgz",
"119_consensus_majority_taxonomy": "zip",
"119_release": "zip",
"119_release_aligned_rep_files": "tar.gz",
"123_release": "zip",
"128_release": "tgz"},
"img": "tgz"
}
filetypes = ["rep_set", "rep_set_aligned", "taxonomy", "trees"]
# Utility functions for interacting with Galaxy JSON
def read_input_json(jsonfile):
"""Read the JSON supplied from the data manager tool
Returns a tuple (param_dict,extra_files_path)
'param_dict' is an arbitrary dictionary of parameters
input into the tool; 'extra_files_path' is the path
to a directory where output files must be put for the
receiving data manager to pick them up.
NB the directory pointed to by 'extra_files_path'
doesn't exist initially, it is the job of the script
to create it if necessary.
"""
params = json.loads(open(jsonfile).read())
return (params['param_dict'],
params['output_data'][0]['extra_files_path'])
# Utility functions for creating data table dictionaries
#
# Example usage:
# >>> d = create_data_tables_dict()
# >>> add_data_table(d,'my_data')
# >>> add_data_table_entry(dict(dbkey='hg19',value='human'))
# >>> add_data_table_entry(dict(dbkey='mm9',value='mouse'))
# >>> print str(json.dumps(d))
def create_data_tables_dict():
"""Return a dictionary for storing data table information
Returns a dictionary that can be used with 'add_data_table'
and 'add_data_table_entry' to store information about a
data table. It can be converted to JSON to be sent back to
the data manager.
"""
d = {}
d['data_tables'] = {}
return d
def add_data_table(d, table):
"""Add a data table to the data tables dictionary
Creates a placeholder for a data table called 'table'.
"""
d['data_tables'][table] = []
def add_data_table_entry(d, table, entry):
"""Add an entry to a data table
Appends an entry to the data table 'table'. 'entry'
should be a dictionary where the keys are the names of
columns in the data table.
Raises an exception if the named data table doesn't
exist.
"""
try:
d['data_tables'][table].append(entry)
except KeyError:
raise Exception("add_data_table_entry: no table '%s'" % table)
def get_ftp_file(ftp, filename):
"""
"""
try:
ftp.retrbinary("RETR " + filename, open(filename, 'wb').write)
except:
print("Error")
def download_archive(db, version, ext):
"""
"""
filepath = "%s_%s.%s" % (db, version, ext)
if protocol[db] == "http":
url = "%s%s.%s" % (baseUrl[db], version, ext)
r = requests.get(url, stream=True)
r.raise_for_status()
with open(filepath, "wb") as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
elif protocol[db] == "ftp":
ftp = ftplib.FTP(baseUrl[db])
ftp.login("anonymous", "ftplib-example-1")
if db == "greengenes" and version == "13_8":
ftp.cwd("%s%s" % (ftp_dir[db], "13_5"))
else:
ftp.cwd("%s%s" % (ftp_dir[db], version))
filepath = "%s%s%s.%s" % (
ftp_file_prefix[db],
version,
ftp_file_suffix[db],
ext)
get_ftp_file(ftp, filepath)
ftp.quit()
return filepath
def find_archive_content_path(archive_content_path):
"""
"""
content = os.listdir(archive_content_path)
archive_content = []
for x in content:
if not x.startswith(".") and not x.startswith("_"):
archive_content.append(x)
if len(archive_content) == 1:
archive_content_path = os.path.join(
archive_content_path,
archive_content[0])
return archive_content_path
def extract_archive(filepath, ext, db):
"""
"""
archive_content_path = "tmp"
if ext == "tar.gz" or ext == "tgz":
tar = tarfile.open(filepath)
tar.extractall(path=archive_content_path)
tar.close()
archive_content_path = find_archive_content_path(archive_content_path)
elif ext == "zip":
zip_ref = zipfile.ZipFile(filepath, 'r')
zip_ref.extractall(archive_content_path)
zip_ref.close()
archive_content_path = find_archive_content_path(archive_content_path)
return archive_content_path
def move_unite_files(archive_content_path, filename_prefix, name_prefix, data_tables, target_dir):
"""
"""
archive_content = os.listdir(archive_content_path)
for content in archive_content:
content_filepath = os.path.join(archive_content_path, content)
content_name_prefix = "%s - %s" % (name_prefix, content.split(".")[0])
content_filename_prefix = "%s_%s" % (filename_prefix, content)
if content.find("refs") != -1:
move_file(
content_filepath,
content_filename_prefix,
content_name_prefix,
data_tables,
os.path.join(target_dir, "rep_set"),
"rep_set")
elif content.find("taxonomy") != -1:
move_file(
content_filepath,
content_filename_prefix,
content_name_prefix,
data_tables,
os.path.join(target_dir, "taxonomy"),
"taxonomy")
def move_file(input_filepath, filename, name, data_tables, target_dir, filetype):
"""
"""
output_filepath = os.path.join(target_dir, filename)
os.rename(input_filepath, output_filepath)
add_data_table_entry(
data_tables,
"qiime_%s" % (filetype),
dict(
dbkey=filename,
value=os.path.splitext(filename)[0],
name=name,
path=output_filepath))
def move_dir_content(input_path, filename_prefix, name_prefix, data_tables, target_dir, filetype):
"""
"""
for content in os.listdir(input_path):
if content.startswith("."):
continue
content_path = os.path.join(input_path, content)
content_name_prefix = "%s - %s" % (name_prefix, content.split(".")[0])
content_filename_prefix = "%s_%s" % (filename_prefix, content)
if os.path.isdir(content_path):
move_dir_content(
content_path,
content_filename_prefix,
content_name_prefix,
data_tables,
target_dir,
filetype)
else:
move_file(
content_path,
content_filename_prefix,
content_name_prefix,
data_tables,
target_dir,
filetype)
def move_files(archive_content_path, filename_prefix, name_prefix, data_tables, target_dir, db, version):
"""
"""
for filetype in filetypes:
if filetype == "rep_set_aligned":
if db == "greengenes" and version == "12_10":
continue
filetype_target_dir = os.path.join(
target_dir,
filetype)
filetype_path = os.path.join(
archive_content_path,
filetype)
move_dir_content(
filetype_path,
filename_prefix,
name_prefix,
data_tables,
filetype_target_dir,
filetype)
def download_db(data_tables, db, version, target_dir):
"""Download QIIME database
Creates references to the specified file(s) on the Galaxy
server in the appropriate data table (determined from the
file extension).
The 'data_tables' dictionary should have been created using
the 'create_data_tables_dict' and 'add_data_table' functions.
Arguments:
data_tables: a dictionary containing the data table info
db: name of the database
version: version of the database
table_name: name of the table
target_dir: directory to put copy or link to the data file
"""
ext = extension[db]
if db == "silva":
ext = ext[version]
print("Download archive")
filepath = download_archive(db, version, ext)
print("Extract archive %s" % filepath)
archive_content_path = extract_archive(filepath, ext, db)
print("Moving file from %s" % archive_content_path)
filename_prefix = "%s_%s" % (db, version)
name_prefix = "%s (%s)" % (db, version)
if db == "greengenes" or db == "silva":
move_files(
archive_content_path,
filename_prefix,
name_prefix,
data_tables,
target_dir,
db,
version)
elif db == "unite":
move_unite_files(
archive_content_path,
filename_prefix,
name_prefix,
data_tables,
target_dir)
if __name__ == "__main__":
print("Starting...")
# Read command line
parser = argparse.ArgumentParser(
description='Download QIIME reference database')
parser.add_argument('--database', help="Database name")
parser.add_argument('--version', help="Database version")
parser.add_argument('--jsonfile', help="Output JSON file")
args = parser.parse_args()
jsonfile = args.jsonfile
# Read the input JSON
params, target_dir = read_input_json(jsonfile)
# Make the target directory
print("Making %s" % target_dir)
os.mkdir(target_dir)
os.mkdir(os.path.join(target_dir, "rep_set"))
os.mkdir(os.path.join(target_dir, "rep_set_aligned"))
os.mkdir(os.path.join(target_dir, "taxonomy"))
os.mkdir(os.path.join(target_dir, "trees"))
# Set up data tables dictionary
data_tables = create_data_tables_dict()
add_data_table(data_tables, "qiime_rep_set")
add_data_table(data_tables, "qiime_rep_set_aligned")
add_data_table(data_tables, "qiime_taxonomy")
add_data_table(data_tables, "qiime_trees")
# Fetch data from specified data sources
download_db(
data_tables,
args.database,
args.version,
target_dir)
# Write output JSON
print("Outputting JSON")
print(str(json.dumps(data_tables)))
with open(jsonfile, 'w') as out:
json.dump(data_tables, out)
print("Done.")
| blankclemens/tools-iuc | data_managers/data_manager_qiime_database_downloader/data_manager/data_manager_qiime_download.py | Python | mit | 11,147 |
#-*- encoding: utf-8 -*-
"""
Right triangles with integer coordinates
The points P (x1, y1) and Q (x2, y2) are plotted at integer co-ordinates and are joined to the origin, O(0,0), to form ΔOPQ.
There are exactly fourteen triangles containing a right angle that can be formed when each co-ordinate lies between 0 and 2 inclusive; that is,0 ≤ x1, y1, x2, y2 ≤ 2.
Given that 0 ≤ x1, y1, x2, y2 ≤ 50, how many right triangles can be formed?
"""
from utils import *
#
| zlsun/ProjectEuler | 091.py | Python | mit | 484 |
"""LaTeX Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
from traitlets import Unicode, default
from traitlets.config import Config
from nbconvert.filters.highlight import Highlight2Latex
from nbconvert.filters.filter_links import resolve_references
from .templateexporter import TemplateExporter
class LatexExporter(TemplateExporter):
"""
Exports to a Latex template. Inherit from this class if your template is
LaTeX based and you need custom tranformers/filters. Inherit from it if
you are writing your own HTML template and need custom tranformers/filters.
If you don't need custom tranformers/filters, just change the
'template_file' config option. Place your template in the special "/latex"
subfolder of the "../templates" folder.
"""
@default('file_extension')
def _file_extension_default(self):
return '.tex'
@default('template_file')
def _template_file_default(self):
return 'article.tplx'
# Latex constants
@default('default_template_path')
def _default_template_path_default(self):
return os.path.join("..", "templates", "latex")
@default('template_skeleton_path')
def _template_skeleton_path_default(self):
return os.path.join("..", "templates", "latex", "skeleton")
#Extension that the template files use.
template_extension = Unicode(".tplx").tag(config=True)
output_mimetype = 'text/latex'
def default_filters(self):
for x in super(LatexExporter, self).default_filters():
yield x
yield ('resolve_references', resolve_references)
@property
def default_config(self):
c = Config({
'NbConvertBase': {
'display_data_priority' : ['text/latex', 'application/pdf', 'image/png', 'image/jpeg', 'image/svg+xml', 'text/markdown', 'text/plain']
},
'ExtractOutputPreprocessor': {
'enabled':True
},
'SVG2PDFPreprocessor': {
'enabled':True
},
'LatexPreprocessor': {
'enabled':True
},
'SphinxPreprocessor': {
'enabled':True
},
'HighlightMagicsPreprocessor': {
'enabled':True
}
})
c.merge(super(LatexExporter,self).default_config)
return c
def from_notebook_node(self, nb, resources=None, **kw):
langinfo = nb.metadata.get('language_info', {})
lexer = langinfo.get('pygments_lexer', langinfo.get('name', None))
self.register_filter('highlight_code',
Highlight2Latex(pygments_lexer=lexer, parent=self))
return super(LatexExporter, self).from_notebook_node(nb, resources, **kw)
def _create_environment(self):
environment = super(LatexExporter, self)._create_environment()
# Set special Jinja2 syntax that will not conflict with latex.
environment.block_start_string = "((*"
environment.block_end_string = "*))"
environment.variable_start_string = "((("
environment.variable_end_string = ")))"
environment.comment_start_string = "((="
environment.comment_end_string = "=))"
return environment
| nitin-cherian/LifeLongLearning | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/nbconvert/exporters/latex.py | Python | mit | 3,419 |
'''
Opcode d(11:8) Operand d(7:0) Operation
0 8 bits representing a constant Load constant to Reg0
1 8 bits representing a constant Load constant to Reg1
2 d7 selects register Reg0 or Reg1 Load value of selected register to the ALU accumulator
3 d7 selects register Reg0 or Reg1 Add value of selected register to ALU accumulator and store result in accumulator
4 d7 selects register Reg0 or Reg1 Subtract value of selected register to ALU accumulator and store result in accumulator
5 Not used Accumulator shift right
6 Not used Accumulator shift left
7 d7 selects register Reg0 or Reg1 AND accumulator with selected register and store result in accumulator
8 d7 selects register Reg0 or Reg1 OR accumulator with selected register and store result in accumulator
9 Not used Invert Accumulator bits
A 8 bits represent address in
instruction memory Jump to address
B 8 bits represent address in
instruction memory Jump to address if Accumulator is all zeros
C 8 bits represent address in
instruction memory Jump subroutine (program counter value is stored for the subroutine return)
D Not used Return from subroutine (restore value of program counter)
E D(3:0) selects either Reg0 '000' or
Reg1 '001' or output port P1 '010' or
output port P2 '011' or UART transmit
register '100' Write value in accumulator to selected destination
F d7 selects register Reg0 or Reg1 Store UART received byte into selected register
'''
# Imports
import argparse;
import os;
# Constants and Globals
FILE_EXTENSION = str('ass');
DOT_FILE_EXTENSION = str('.{}'.format(FILE_EXTENSION));
CONSTANT_MAX = 255; # Largest number in 8 unsigned bits
CONSTANT_MIN = 0; # Only using unsigned bits
ADDRESS_MAX = 255; # Largest address
ADDRESS_MIN = 0; # Can't go lower than zero
INSTRUCTION_LENGTH_DICT = {
'load' : 3,
'move' : 2,
'add' : 2,
'sub' : 2,
'sr' : 1,
'sl' : 1,
'and' : 2,
'or' : 2,
'inv' : 1,
'j' : 2,
'jaz' : 2,
'jal' : 2,
'jr' : 1,
'wri' : 2,
'str' : 2
};
# Helper functions
def doExit(error):
print "ERROR: {}".format(error);
print "Exiting..."
os.sys.exit(1);
def printInstructions():
print """Instruction Set:
load [Constant] [Reg0, Reg1] {load constant to register}
move [Reg0, Reg1] {To Accum}
add [Reg0, Reg1] {To Accum}
sub [Reg0, Reg1] {From Accum}
sl {Shift accum left}
sr {Shift accum right}
and [Reg0, Reg1] {With Accum}
or [Reg0, Reg1] {With Accum}
inv {Invert Accum}
j [Address] {Jump to address}
jaz [Address] {Jump to address if accum zero}
jal [Address] {Jump and link (sub routine)}
jr {Jump return (From sub routine)}
wri [Reg0, Reg1, P1, P2, Tx] {Write accum to register}
str [Reg0, Reg1] {Store Rx from UART into register}
Examples:
load 25 Reg0 #Comment, Comment, Comment
load 1 Reg1
move Reg0
add Reg1
sub Reg0
wri P1 # p+++++++
List of Register:
Reg0: General Purpose Register
Reg1: General Purpose Register
P1: Register reading into first digit of seven segment display
P2: Register reading into second digit of seven segment display
UART: UART send register
Rx: UART receive register"""
return;
def getInstructionCode(lineList, LineCount):
'''Return the instruction as a binary value'''
instruction = '{0:012b}'.format(0);
if(len(lineList) != INSTRUCTION_LENGTH_DICT[lineList[0]]):
doExit("Invalid number of arguments to {0} instruciton on line {1}".format(lineList[0], LineCount));
code = lineList[0];
try:
if(code == 'load'):
# Test bounds of constant
if(int(lineList[1]) > CONSTANT_MAX or int(lineList[1]) < CONSTANT_MIN):
doExit("Invalid constant range for instruction on line {}".format(LineCount));
else:
#Determine which register we are storing it in
if(lineList[2] == 'reg0'):
instruction = '0000' + '{:08b}'.format(int(lineList[1]));
elif(lineList[2] == 'reg1'):
instruction = '0001' + '{:08b}'.format(int(lineList[1]));
else:
doExit("Unkown register {0} for load instruciton on line {1}".format(lineList[2], LineCount));
elif(code == 'move'):
#Determine which register we are storing it in
if(lineList[1] == 'reg0'):
instruction = '0010' + '00000000';
elif(lineList[1] == 'reg1'):
instruction = '0010' + '10000000';
else:
doExit("Unkown register {0} for move instruciton on line {1}".format(lineList[1], LineCount));
elif(code == 'add'):
if(lineList[1] == 'reg0'):
instruction = '0011' + '00000000';
elif(lineList[1] == 'reg1'):
instruction = '0011' + '10000000';
else:
doExit("Unkown register {0} for add instruciton on line {1}".format(lineList[1], LineCount));
elif(code == 'sub'):
if(lineList[1] == 'reg0'):
instruction = '0100' + '00000000';
elif(lineList[1] == 'reg1'):
instruction = '0100' + '10000000';
else:
doExit("Unkown register {0} for sub instruciton on line {1}".format(lineList[1], LineCount));
elif(code == 'sr'):
instruction = '0101' + '00000000';
elif(code == 'sl'):
instruction = '0110' + '00000000';
elif(code == 'and'):
if(lineList[1] == 'reg0'):
instruction = '0111' + '00000000';
elif(lineList[1] == 'reg1'):
instruction = '0111' + '10000000';
else:
doExit("Unkown register {0} for and instruciton on line {1}".format(lineList[1], LineCount));
elif(code == 'or'):
if(lineList[1] == 'reg0'):
instruction = '1000' + '00000000';
elif(lineList[1] == 'reg1'):
instruction = '1000' + '10000000';
else:
doExit("Unkown register {0} for or instruciton on line {1}".format(lineList[1], LineCount));
elif(code == 'inv'):
instruction = '1001' + '00000000';
elif(code == 'j'):
if(int(lineList[1]) > ADDRESS_MAX or int(lineList[1]) < ADDRESS_MIN):
doExit("Invalid address range for instruction on line {}".format(LineCount));
else:
instruction = '1010' + '{:08b}'.format(int(lineList[1]));
elif(code == 'jaz'):
if(int(lineList[1]) > ADDRESS_MAX or int(lineList[1]) < ADDRESS_MIN):
doExit("Invalid address range for instruction on line {}".format(LineCount));
else:
instruction = '1011' + '{:08b}'.format(int(lineList[1]));
elif(code == 'jal'):
if(int(lineList[1]) > ADDRESS_MAX or int(lineList[1]) < ADDRESS_MIN):
doExit("Invalid address range for instruction on line {}".format(LineCount));
else:
instruction = '1100' + '{:08b}'.format(int(lineList[1]));
elif(code == 'jr'):
instruction = '1101' + '00000000';
elif(code == 'wri'):
if(lineList[1] == 'reg0'):
instruction = '1110' + '00000' + '000';
elif(lineList[1] == 'reg1'):
instruction = '1110' + '00000' + '001';
elif(lineList[1] == 'p1'):
instruction = '1110' + '00000' + '010';
elif(lineList[1] == 'p2'):
instruction = '1110' + '00000' + '011';
elif(lineList[1] == 'tx'):
instruction = '1110' + '00000' + '100';
else:
doExit("Unkown register {0} for wri instruciton on line {1}".format(lineList[2], LineCount));
elif(code == 'str'):
if(lineList[1] == 'reg0'):
instruction = '1111' + '00000000';
elif(lineList[1] == 'reg1'):
instruction = '1111' + '10000000';
else:
doExit("Unkown register {0} for str instruciton on line {1}".format(lineList[1], LineCount));
except:
doExit("Unkown code {0} occured on line {1}".format(lineList, LineCount));
return instruction;
def fixString(line):
'''Fix the string we receive for any not wanted characters'''
# Remove comments, deal with empty lines # Example ' LoAd 123 Reg0 #Mean #Comment'
if('#' in line): # ' LoAd 123 Reg0 '
line = line[0:line.find('#')];
line = line.strip(); #Remove leading and trailing whitespace # 'LoAd 123 Reg0'
# If empty, continue
if(line == ''):
return [];
# Remove cases where there are extra spaces between characters
lineList = [c for c in line.lower().split(' ')];
while '' in lineList:
lineList.remove('') # ['load','123','reg0']
return lineList;
# Parse Arguments
parser = argparse.ArgumentParser( description='An assembler for a small processor build in ENEL453',
version=0.1
);
parser.add_argument( 'filename',
help='File name of the file you would like to convert to assembly',
nargs='*',
);
parser.add_argument( '-i', '--instructions',
action='store_true',
help='Print Instructions and exit'
);
parser.add_argument( '-b', '--binary',
help='Output in binary, default is hex',
action='store_true',
default=False
);
args = parser.parse_args();
if args.instructions:
printInstructions();
if len(args.filename) == 0:
parser.print_help();
doExit("Did not receive a file to assemble");
for name in args.filename:
# Attempt to open output file
try:
out = open(os.path.splitext(name)[0] + DOT_FILE_EXTENSION, 'w');
except IOError as e:
doExit("I/O error({0}): {1}".format(e.errno, e.strerror));
except ValueError:
doExit("Unable to convert file names to strings");
# Read line by line to parse
with open(name) as f:
# Keep a line counter for error handling
LineCount = int(0);
# Loop though lines handling them all
for line in f.readlines():
LineCount += 1;
lineList = fixString(line);
if(lineList == []):
continue;
instructionCode = getInstructionCode(lineList, LineCount);
if(args.binary == False):
instructionCode = "{0:#05X}".format(int(instructionCode,2));
out.write(instructionCode);
out.write('\n');
out.close();
| BryceBesler/ENEL453-Assembler | testCases/TestCasesRan/14-03-14--21-39/assembly.py | Python | mit | 11,254 |
from persistence.models import Agent, BaseModel
from peewee import *
class Message(BaseModel):
"""description of class"""
correlationid = CharField()
category = IntegerField()
body = CharField(null=True)
sender = ForeignKeyField(Agent, related_name='send_messages')
receiver = ForeignKeyField(Agent, related_name='received_messages')
# flags
complete = BooleanField(default=False)
processed = BooleanField(default=False)
# computed
def get_body(self):
if self.body is not None:
return self.body
if not self.complete:
return None
messageparts = sorted(self.parts, key=lambda x: x.position)
body = ''.join([part.body for part in messageparts])
return body
def as_dict(self):
return {
'id': self.correlationid,
'sender': self.sender.name,
'reciever': self.receiver.name,
'category': self.category,
'body': self.get_body()
}
| onnovalkering/sparql-over-sms | sos-service/src/persistence/models/message.py | Python | mit | 1,021 |
total = 0
n = 0
stop = 0
nextMark = input('Type in a mark: ')
while stop == 0:
nextMark = eval(nextMark)
total = total+nextMark
n = n + 1
nextMark = input('Hit enter to stop, or type in a mark: ')
if nextMark == "":
stop = 1
print("You entered", n, 'marks. The average is:',total/n)
| MrColwell/PythonProfessionalLearning | PythonForTeachers/studentExercises/8_2_average.py | Python | mit | 339 |
from iota.crypto import FRAGMENT_LENGTH
from iota.exceptions import with_context
from iota.types import Hash, TryteString, TrytesCompatible
__all__ = [
'BundleHash',
'Fragment',
'TransactionHash',
'TransactionTrytes',
'Nonce'
]
class BundleHash(Hash):
"""
An :py:class:`TryteString` (:py:class:`Hash`) that acts as a bundle hash.
"""
pass
class TransactionHash(Hash):
"""
An :py:class:`TryteString` (:py:class:`Hash`) that acts as a transaction hash.
"""
pass
class Fragment(TryteString):
"""
An :py:class:`TryteString` representation of a signature/message fragment
in a transaction.
:raises ValueError: if ``trytes`` is longer than 2187 trytes in length.
"""
LEN = FRAGMENT_LENGTH
"""
Length of a fragment in trytes.
"""
def __init__(self, trytes: TrytesCompatible) -> None:
super(Fragment, self).__init__(trytes, pad=self.LEN)
if len(self._trytes) > self.LEN:
raise with_context(
exc=ValueError('{cls} values must be {len} trytes long.'.format(
cls=type(self).__name__,
len=self.LEN
)),
context={
'trytes': trytes,
},
)
class TransactionTrytes(TryteString):
"""
An :py:class:`TryteString` representation of a Transaction.
:raises ValueError: if ``trytes`` is longer than 2673 trytes in length.
"""
LEN = 2673
"""
Length of a transaction in trytes.
"""
def __init__(self, trytes: TrytesCompatible) -> None:
super(TransactionTrytes, self).__init__(trytes, pad=self.LEN)
if len(self._trytes) > self.LEN:
raise with_context(
exc=ValueError('{cls} values must be {len} trytes long.'.format(
cls=type(self).__name__,
len=self.LEN
)),
context={
'trytes': trytes,
},
)
class Nonce(TryteString):
"""
An :py:class:`TryteString` that acts as a transaction nonce.
:raises ValueError: if ``trytes`` is longer than 27 trytes in length.
"""
LEN = 27
"""
Length of a nonce in trytes.
"""
def __init__(self, trytes: TrytesCompatible) -> None:
super(Nonce, self).__init__(trytes, pad=self.LEN)
if len(self._trytes) > self.LEN:
raise with_context(
exc=ValueError('{cls} values must be {len} trytes long.'.format(
cls=type(self).__name__,
len=self.LEN
)),
context={
'trytes': trytes,
},
)
| iotaledger/iota.lib.py | iota/transaction/types.py | Python | mit | 2,751 |
import re
from time import sleep
from .settings import settings
def get_parsed_mentions(raw_text):
regex = re.compile(r"@([\w\.]+)")
regex.findall(raw_text)
return regex.findall(raw_text)
def get_parsed_hashtags(raw_text):
regex = re.compile(r"#(\w+)")
regex.findall(raw_text)
return regex.findall(raw_text)
def fetch_mentions(raw_test, dict_obj):
if not settings.fetch_mentions:
return
mentions = get_parsed_mentions(raw_test)
if mentions:
dict_obj["mentions"] = mentions
def fetch_hashtags(raw_test, dict_obj):
if not settings.fetch_hashtags:
return
hashtags = get_parsed_hashtags(raw_test)
if hashtags:
dict_obj["hashtags"] = hashtags
def fetch_datetime(browser, dict_post):
ele_datetime = browser.find_one(".eo2As .c-Yi7 ._1o9PC")
datetime = ele_datetime.get_attribute("datetime")
dict_post["datetime"] = datetime
def fetch_imgs(browser, dict_post):
img_urls = set()
while True:
ele_imgs = browser.find("._97aPb img", waittime=10)
if isinstance(ele_imgs, list):
for ele_img in ele_imgs:
img_urls.add(ele_img.get_attribute("src"))
else:
break
next_photo_btn = browser.find_one("._6CZji .coreSpriteRightChevron")
if next_photo_btn:
next_photo_btn.click()
sleep(0.3)
else:
break
dict_post["img_urls"] = list(img_urls)
def fetch_likes_plays(browser, dict_post):
if not settings.fetch_likes_plays:
return
likes = None
el_likes = browser.find_one(".Nm9Fw > * > span")
el_see_likes = browser.find_one(".vcOH2")
if el_see_likes is not None:
el_plays = browser.find_one(".vcOH2 > span")
dict_post["views"] = int(el_plays.text.replace(",", "").replace(".", ""))
el_see_likes.click()
el_likes = browser.find_one(".vJRqr > span")
likes = el_likes.text
browser.find_one(".QhbhU").click()
elif el_likes is not None:
likes = el_likes.text
dict_post["likes"] = (
int(likes.replace(",", "").replace(".", "")) if likes is not None else 0
)
def fetch_likers(browser, dict_post):
if not settings.fetch_likers:
return
like_info_btn = browser.find_one(".EDfFK ._0mzm-.sqdOP")
like_info_btn.click()
likers = {}
liker_elems_css_selector = ".Igw0E ._7UhW9.xLCgt a"
likers_elems = list(browser.find(liker_elems_css_selector))
last_liker = None
while likers_elems:
for ele in likers_elems:
likers[ele.get_attribute("href")] = ele.get_attribute("title")
if last_liker == likers_elems[-1]:
break
last_liker = likers_elems[-1]
last_liker.location_once_scrolled_into_view
sleep(0.6)
likers_elems = list(browser.find(liker_elems_css_selector))
dict_post["likers"] = list(likers.values())
close_btn = browser.find_one(".WaOAr button")
close_btn.click()
def fetch_caption(browser, dict_post):
ele_comments = browser.find(".eo2As .gElp9")
if len(ele_comments) > 0:
temp_element = browser.find("span",ele_comments[0])
for element in temp_element:
if element.text not in ['Verified',''] and 'caption' not in dict_post:
dict_post["caption"] = element.text
fetch_mentions(dict_post.get("caption",""), dict_post)
fetch_hashtags(dict_post.get("caption",""), dict_post)
def fetch_comments(browser, dict_post):
if not settings.fetch_comments:
return
show_more_selector = "button .glyphsSpriteCircle_add__outline__24__grey_9"
show_more = browser.find_one(show_more_selector)
while show_more:
show_more.location_once_scrolled_into_view
show_more.click()
sleep(0.3)
show_more = browser.find_one(show_more_selector)
show_comment_btns = browser.find(".EizgU")
for show_comment_btn in show_comment_btns:
show_comment_btn.location_once_scrolled_into_view
show_comment_btn.click()
sleep(0.3)
ele_comments = browser.find(".eo2As .gElp9")
comments = []
for els_comment in ele_comments[1:]:
author = browser.find_one(".FPmhX", els_comment).text
temp_element = browser.find("span", els_comment)
for element in temp_element:
if element.text not in ['Verified','']:
comment = element.text
comment_obj = {"author": author, "comment": comment}
fetch_mentions(comment, comment_obj)
fetch_hashtags(comment, comment_obj)
comments.append(comment_obj)
if comments:
dict_post["comments"] = comments
def fetch_initial_comment(browser, dict_post):
comments_elem = browser.find_one("ul.XQXOT")
first_post_elem = browser.find_one(".ZyFrc", comments_elem)
caption = browser.find_one("span", first_post_elem)
if caption:
dict_post["description"] = caption.text
def fetch_details(browser, dict_post):
if not settings.fetch_details:
return
browser.open_new_tab(dict_post["key"])
username = browser.find_one("a.ZIAjV")
location = browser.find_one("a.O4GlU")
if username:
dict_post["username"] = username.text
if location:
dict_post["location"] = location.text
fetch_initial_comment(browser, dict_post)
browser.close_current_tab()
| huaying/ins-crawler | inscrawler/fetch.py | Python | mit | 5,418 |
"""
Tests for core and utility classes
"""
import unittest
from pandas_accounting import Company, Subsidiary
class TestCompany(unittest.TestCase):
def setUp(self):
self.C = Company(shares=100)
def test_company_shares(self):
self.assertEqual(self.C.shares, 100)
class TestSubsidiary(unittest.TestCase):
def setUp(self):
self.S = Subsidiary()
def test_sub(self):
self.assertEqual(0,0)
| davidastephens/pandas-accounting | pandas_accounting/core/tests/test_core.py | Python | mit | 439 |
#!/usr/bin/env python
import click
from aeriscloud.cli.helpers import standard_options, Command
@click.command(cls=Command)
@standard_options(start_prompt=False)
def cli(box):
"""
Destroy a box
"""
box.destroy()
if __name__ == '__main__':
cli()
| AerisCloud/AerisCloud | aeriscloud/cli/aeris/destroy.py | Python | mit | 271 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-11 03:42
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('students', '0007_auto_20170410_0523'),
('job', '0015_auto_20170410_0523'),
]
operations = [
migrations.CreateModel(
name='JobApplication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(max_length=200)),
('state', models.CharField(max_length=200)),
('country', models.CharField(max_length=200)),
],
),
migrations.RemoveField(
model_name='jobapply',
name='job_id',
),
migrations.RemoveField(
model_name='jobapply',
name='user_id',
),
migrations.RemoveField(
model_name='job',
name='location',
),
migrations.AlterField(
model_name='job',
name='skills',
field=models.ManyToManyField(null=True, related_name='reqskills', to='users.Skill'),
),
migrations.DeleteModel(
name='JobApply',
),
migrations.AddField(
model_name='jobapplication',
name='job',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='job.Job'),
),
migrations.AddField(
model_name='jobapplication',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='students.Student'),
),
migrations.AddField(
model_name='job',
name='applications',
field=models.ManyToManyField(related_name='applications', through='job.JobApplication', to='students.Student'),
),
]
| jamesaud/se1-group4 | jmatcher/job/migrations/0016_auto_20170411_0342.py | Python | mit | 2,381 |
from .db_utils import PostgresController
from .enums import Action, Change
__all__ = ['PostgresController', 'Action', 'Change']
| dashwav/nano-chan | cogs/utils/__init__.py | Python | mit | 129 |
"""
bamboo.globals
~~~~~~~~~~~~~
"""
from peak.util.proxies import CallbackProxy
from bamboo.context import context
db = CallbackProxy(lambda: context["db"])
| hahnicity/bamboo | bamboo/globals.py | Python | mit | 159 |
"""Plot spatial P-E"""
import re
import sys
script_dir = sys.path[0]
import os
import pdb
import argparse
import numpy as np
import matplotlib.pyplot as plt
import iris
from iris.experimental.equalise_cubes import equalise_attributes
import cartopy.crs as ccrs
import cmdline_provenance as cmdprov
repo_dir = '/'.join(script_dir.split('/')[:-2])
module_dir = repo_dir + '/modules'
sys.path.append(module_dir)
try:
import general_io as gio
import timeseries
except ImportError:
raise ImportError('Script and modules in wrong directories')
def regrid(cube):
"""Define the sample points for interpolation"""
lats = list(np.arange(-89, 90, 2))
lons = list(np.arange(1, 360, 2))
sample_points = []
coord_names = [coord.name() for coord in cube.dim_coords]
if 'time' in coord_names:
coord_names.remove('time')
for coord in coord_names:
if 'lat' in coord:
sample_points.append((coord, lats))
elif 'lon' in coord:
sample_points.append((coord, lons))
cube = cube.interpolate(sample_points, iris.analysis.Linear())
cube.coord('latitude').guess_bounds()
cube.coord('longitude').guess_bounds()
cube.coord('latitude').standard_name = 'latitude'
cube.coord('latitude').long_name = 'latitude'
cube.coord('latitude').var_name = 'lat'
cube.coord('latitude').units = 'degrees_north'
cube.coord('latitude').attributes = {}
cube.coord('longitude').standard_name = 'longitude'
cube.coord('longitude').long_name = 'longitude'
cube.coord('longitude').var_name = 'lon'
cube.coord('longitude').units = 'degrees_east'
cube.coord('longitude').circular = True
cube.coord('longitude').attributes = {}
return cube
def get_cube_list(infiles, agg, time_bounds=None, quick=False):
"""Read and process data."""
assert agg in ['clim', 'anom']
ensemble_cube_list = iris.cube.CubeList([])
for ensnum, ensemble_member in enumerate(infiles):
print(ensemble_member)
cube, history = gio.combine_files(ensemble_member,
'precipitation_minus_evaporation_flux',
new_calendar='365_day')
cube = gio.check_time_units(cube)
if time_bounds:
time_constraint = gio.get_time_constraint(time_bounds)
cube = cube.extract(time_constraint)
elif quick:
cube = cube[0:120, ::]
if agg == 'clim':
cube = timeseries.convert_to_annual(cube, aggregation='mean', days_in_month=True)
cube = cube.collapsed('time', iris.analysis.MEAN)
elif agg == 'anom':
start_data = cube.data[0, ::]
cube = cube[-1, ::]
cube.data = cube.data - start_data
cube.remove_coord('time')
cube = regrid(cube)
new_aux_coord = iris.coords.AuxCoord(ensnum, long_name='ensemble_member', units='no_unit')
cube.add_aux_coord(new_aux_coord)
cube.cell_methods = ()
ensemble_cube_list.append(cube)
print("Total number of models:", len(ensemble_cube_list))
return ensemble_cube_list, history
def ensemble_stats(cube_list):
"""Get the ensemble mean and sign agreement"""
equalise_attributes(cube_list)
ensemble_cube = cube_list.merge_cube()
ensemble_mean = ensemble_cube.collapsed('ensemble_member', iris.analysis.MEAN, mdtol=0)
ensemble_mean.remove_coord('ensemble_member')
ensemble_agreement = ensemble_mean.copy()
nmodels = ensemble_cube.shape[0]
pos_data = ensemble_cube.data > 0.0
ensemble_agreement.data = pos_data.sum(axis=0) / nmodels
return ensemble_mean, ensemble_agreement
def plot_data(ax, ensemble_mean, ensemble_agreement, agg, title,
agreement_bounds=None, clim=None):
"""Plot ensemble data"""
assert agg in ['clim', 'anom']
inproj = ccrs.PlateCarree()
plt.sca(ax)
plt.gca().set_global()
if agg == 'clim':
cmap = 'BrBG'
levels = np.arange(-7, 8, 1)
cbar_label = 'Annual mean P-E (mm/day)'
else:
cmap = 'RdBu'
levels = np.arange(-9000, 9100, 1500)
cbar_label = 'Time-integrated P-E anomaly, 1861-2005 (kg m-2)'
x = ensemble_mean.coord('longitude').points
y = ensemble_mean.coord('latitude').points
cf = ax.contourf(x, y, ensemble_mean.data,
transform=inproj,
cmap=cmap,
levels=levels,
extend='both')
if agreement_bounds:
hatch_data = ensemble_agreement.data
ax.contourf(x, y, hatch_data,
transform=inproj,
colors='none',
levels=agreement_bounds,
hatches=['\\\\'],) # # '.', '/', '\\', '\\\\', '*'
if clim:
ce = ax.contour(x, y, clim.data,
transform=inproj,
colors=['goldenrod', 'black', 'green'],
levels=np.array([-2, 0, 2]))
cbar = plt.colorbar(cf)
cbar.set_label(cbar_label) #, fontsize=label_size)
# cbar.ax.tick_params(labelsize=number_size)
plt.gca().coastlines()
ax.set_title(title)
if agg == 'clim':
lons = np.arange(-180, 180, 0.5)
lats_sh = np.repeat(-20, len(lons))
lats_nh = np.repeat(20, len(lons))
plt.plot(lons, lats_sh, color='0.5') # linestyle, linewidth
plt.plot(lons, lats_nh, color='0.5')
def main(args):
"""Run the program."""
clim_cube_list, clim_history = get_cube_list(args.clim_files, 'clim', quick=args.quick)
clim_ensemble_mean, clim_ensemble_agreement = ensemble_stats(clim_cube_list)
clim_ensemble_mean.data = clim_ensemble_mean.data * 86400
ghg_cube_list, ghg_history = get_cube_list(args.ghg_files, 'anom', time_bounds=args.time_bounds)
ghg_ensemble_mean, ghg_ensemble_agreement = ensemble_stats(ghg_cube_list)
aa_cube_list, aa_history = get_cube_list(args.aa_files, 'anom', time_bounds=args.time_bounds)
aa_ensemble_mean, aa_ensemble_agreement = ensemble_stats(aa_cube_list)
hist_cube_list, hist_history = get_cube_list(args.hist_files, 'anom', time_bounds=args.time_bounds)
hist_ensemble_mean, hist_ensemble_agreement = ensemble_stats(hist_cube_list)
width = 25
height = 10
fig = plt.figure(figsize=[width, height])
outproj = ccrs.PlateCarree(central_longitude=180.0)
nrows = 2
ncols = 2
ax1 = plt.subplot(nrows, ncols, 1, projection=outproj)
plot_data(ax1,
clim_ensemble_mean,
clim_ensemble_agreement,
'clim',
'(a) piControl',
agreement_bounds=[0.33, 0.66])
ax2 = plt.subplot(nrows, ncols, 2, projection=outproj)
plot_data(ax2,
ghg_ensemble_mean,
ghg_ensemble_agreement,
'anom',
'(b) GHG-only',
agreement_bounds=[0.33, 0.66],
clim=clim_ensemble_mean)
ax3 = plt.subplot(nrows, ncols, 3, projection=outproj)
plot_data(ax3,
aa_ensemble_mean,
aa_ensemble_agreement,
'anom',
'(c) AA-only',
agreement_bounds=[0.33, 0.66],
clim=clim_ensemble_mean)
ax4 = plt.subplot(nrows, ncols, 4, projection=outproj)
plot_data(ax4,
hist_ensemble_mean,
hist_ensemble_agreement,
'anom',
'(d) historical',
agreement_bounds=[0.33, 0.66],
clim=clim_ensemble_mean)
fig.tight_layout()
fig.subplots_adjust(wspace=-0.15, hspace=0.2)
plt.savefig(args.outfile, bbox_inches='tight', dpi=300)
metadata_dict = {args.ghg_files[-1]: ghg_history[-1],
args.clim_files[-1]: clim_history[-1]}
log_text = cmdprov.new_log(infile_history=metadata_dict, git_repo=repo_dir)
log_file = re.sub('.png', '.met', args.outfile)
cmdprov.write_log(log_file, log_text)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("outfile", type=str, help="output file")
parser.add_argument("--clim_files", type=str, nargs='*', help="climatology files")
parser.add_argument("--ghg_files", type=str, nargs='*', help="time-integrated anomaly files for GHG-only experiment")
parser.add_argument("--aa_files", type=str, nargs='*', help="time-integrated anomaly files for AA-only experiment")
parser.add_argument("--hist_files", type=str, nargs='*', help="time-integrated anomaly files for historical experiment")
parser.add_argument("--time_bounds", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'),
default=['1861-01-01', '2005-12-31'],
help="Time period")
parser.add_argument("--quick", action="store_true", default=False,
help="Use only first 10 years of clim files")
args = parser.parse_args()
main(args)
| DamienIrving/ocean-analysis | visualisation/water_cycle/plot_pe_spatial.py | Python | mit | 9,330 |
from flask.ext.admin.contrib.mongoengine import ModelView
from .models import Prediction
from app.management.mixins import AdminMixin
class PredictionView(AdminMixin, ModelView):
column_searchable_list = ('text',)
| piotrdubiel/scribeserver | app/recognition/admin.py | Python | mit | 220 |
from django.db import models
class EventTeam(models.Model):
name = models.CharField(max_length=100)
class Event(models.Model):
team1 = models.ForeignKey(EventTeam, null=True, related_name='opponent1')
team2 = models.ForeignKey(EventTeam, null=True, related_name='opponent2')
start_at = models.DateTimeField(null=True, blank=True)
day = models.ForeignKey('ChampionshipDay', related_name='events', null=True)
class ChampionshipDay(models.Model):
day = models.PositiveIntegerField()
championship = models.ForeignKey('Championship', related_name='days')
class Championship(models.Model):
name = models.CharField(max_length=100)
| sporteasy/sporteasy_breaking_mvc | sporteasy_breaking_mvc/core/models.py | Python | mit | 665 |
"""
Django settings for urlscript project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+^d@j=txhj+yu39&c(!^#w177dj$-si2*lhtho-53)g-5l(w%p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urlscript.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'urlscript.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# The path where the bwrap executable is located
BUBBLEWRAP_PATH = ""
# Custom args that can be sent to the bwrap executable e.g --dir /abc /abc etc
BWRAP_CUSTOM_OPTIONS = ""
# Preferably make the below a RAMfs to make it faster
SCRIPTS_TMP_DIR = ""
# The max time for the scripts to run
SCRIPT_TIMEOUT = 30
# A dictionary mappying the language extension with the executable
LANGUAGE_EXECUTABLE = {
'py': 'python3',
'js': 'node',
}
try:
from .local import *
except ImportError:
pass
| theju/urlscript | urlscript/settings.py | Python | mit | 3,699 |
from bs4 import BeautifulSoup
from django.conf import settings
from django.contrib.gis import admin
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.forms.formsets import formset_factory
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from geoforms.forms import CheckboxElementForm
from geoforms.forms import CheckboxElementFormSet
from geoforms.forms import DrawbuttonForm
from geoforms.forms import NumberElementForm
from geoforms.forms import ParagraphForm
from geoforms.forms import RadioElementForm
from geoforms.forms import RadioElementFormSet
from geoforms.forms import TextareaForm
from geoforms.forms import TextElementForm
from geoforms.forms import QuestionForm
from geoforms.forms import RangeElementForm
from geoforms.forms import SelectElementForm
from geoforms.forms import SelectElementFormSet
from geoforms.models import SelectElementModel
from geoforms.models import CheckboxElementModel
from geoforms.models import DrawbuttonElementModel
from geoforms.models import GeoformElement
from geoforms.models import FormElement
from geoforms.models import ParagraphElementModel
from geoforms.models import Questionnaire
from geoforms.models import QuestionnaireForm
from geoforms.models import NumberElementModel
from geoforms.models import RadioElementModel
from geoforms.models import TextElementModel
from geoforms.models import TextareaModel
from geoforms.models import RangeElementModel
from geoforms.models import PopupModel
from geoforms.models import PageModel
from geoforms.models import GeoJSONPopupModel
from geoforms.models import Lottery
from modeltranslation.admin import TranslationAdmin
from modeltranslation.admin import TranslationTabularInline
admin.site.register(Lottery, TranslationAdmin)
class GeoformElementAdmin(TranslationAdmin, admin.ModelAdmin):
list_display = ('name',
'element_type',
'id',
'html')
ordering = ['name']
def __init__(self, *args, **kwargs):
super(GeoformElementAdmin, self).__init__(*args, **kwargs)
sfields = ['element_type']
for lang in settings.LANGUAGES:
sfields.append('html_%s' % lang[0])
setattr(self,
'search_fields',
sfields)
class FormElementAdmin(admin.ModelAdmin):
ordering = ['geoform', 'order']
class ElementInline(TranslationTabularInline):
model = FormElement
extra = 0
class GeoformAdmin(TranslationAdmin, admin.ModelAdmin):
list_display = ('name', 'id')
inlines = [
ElementInline
]
class PageAdmin(GeoformAdmin):
"""
Page admin
"""
def queryset(self, request):
return self.model.objects.filter(page_type = 'form')
admin.site.register(PageModel, PageAdmin)
class PopupAdmin(GeoformAdmin):
"""
Popup admin
"""
def queryset(self, request):
return self.model.objects.filter(page_type = 'popup')
admin.site.register(PopupModel, PopupAdmin)
class GeoJSONPopupAdmin(GeoformAdmin):
"""
GeoJSONPopup admin
"""
def queryset(self, request):
return self.model.objects.filter(page_type = 'gpop')
admin.site.register(GeoJSONPopupModel, GeoJSONPopupAdmin)
class QuestionnaireFormAdmin(admin.ModelAdmin):
ordering = ['questionnaire', 'order']
class GeoformInline(TranslationTabularInline):
model = QuestionnaireForm
extra = 0
class QuestionnaireAdmin(admin.OSMGeoAdmin, TranslationAdmin):
list_display = ('name',)
ordering = ['name']
inlines = [
GeoformInline
]
default_lon = getattr(settings,
'ORGANIZATION_ADMIN_DEFAULT_MAP_SETTINGS',
{'default_lon': 0})['default_lon']
default_lat = getattr(settings,
'ORGANIZATION_ADMIN_DEFAULT_MAP_SETTINGS',
{'default_lat': 0})['default_lat']
default_zoom = getattr(settings,
'ORGANIZATION_ADMIN_DEFAULT_MAP_SETTINGS',
{'default_zoom': 4})['default_zoom']
fieldsets = (
(None, {
'fields': ('name', 'description', ('start_date', 'end_date'), 'area',)
}),
(_('Advanced options'), {
'classes': ('collapse',),
'fields': ('show_area', 'scale_visible_area',)
}),
)
#Following fields
openlayers_url = '%s%s' % (getattr(settings, 'STATIC_URL', '/'), 'js/libs/OpenLayers.js')
extra_js = (reverse_lazy('osmextra'),)
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['slug'] = Questionnaire.on_site.get(pk = object_id).slug
return super(QuestionnaireAdmin, self).change_view(request, object_id,
form_url, extra_context=extra_context)
class Media:
css = {
"all": ("css/questionnaire_admin.css",)
}
admin.site.register(GeoformElement, GeoformElementAdmin)
admin.site.register(Questionnaire, QuestionnaireAdmin)
class TextElementAdmin(GeoformElementAdmin):
"""
This is the admin for text inputs
"""
form = TextElementForm
def queryset(self, request):
return self.model.objects.filter(element_type = 'text')
admin.site.register(TextElementModel, TextElementAdmin)
class TextareaAdmin(GeoformElementAdmin):
"""
This is the admin for adding textareas
"""
form = TextareaForm
def queryset(self, request):
return self.model.objects.filter(element_type = 'textarea')
admin.site.register(TextareaModel, TextareaAdmin)
class NumberElementAdmin(GeoformElementAdmin):
form = NumberElementForm
fieldsets = (
(None, {
'fields': ('question',)
}),
(_('Advanced options'), {
'classes': ('collapse',),
'fields': ('min_value',
'max_value',
'step')
}),
)
def queryset(self, request):
return self.model.objects.filter(element_type = 'number')
admin.site.register(NumberElementModel, NumberElementAdmin)
class RangeElementAdmin(GeoformElementAdmin):
form = RangeElementForm
fieldsets = (
(None, {
'fields': ('question',
'min_label',
'max_label',)
}),
(_('Advanced options'), {
'classes': ('collapse',),
'fields': ('min_value',
'max_value',
'step',
'initial_value',)
}),
)
def queryset(self, request):
return self.model.objects.filter(element_type = 'range')
admin.site.register(RangeElementModel, RangeElementAdmin)
class ParagraphElementAdmin(GeoformElementAdmin):
form = ParagraphForm
def queryset(self, request):
return self.model.objects.filter(element_type = 'paragraph')
admin.site.register(ParagraphElementModel, ParagraphElementAdmin)
class DrawbuttonElementAdmin(GeoformElementAdmin):
form = DrawbuttonForm
def queryset(self, request):
return self.model.objects.filter(element_type = 'drawbutton')
admin.site.register(DrawbuttonElementModel, DrawbuttonElementAdmin)
class CheckboxElementAdmin(GeoformElementAdmin):
form = CheckboxElementForm
add_form_template = 'admin/geoforms/geoformelement/create_element.html'
change_form_template = add_form_template
def queryset(self, request):
return self.model.objects.filter(element_type = 'checkbox')
def add_view(self, request, form_url='', extra_context=None):
if request.method == 'POST':
ces = formset_factory(CheckboxElementForm,
formset=CheckboxElementFormSet)
cs = ces(request.POST)
cs.save()
return HttpResponseRedirect(reverse('admin:geoforms_checkboxelementmodel_changelist'))
else:
return super(CheckboxElementAdmin, self).add_view(request,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(),
'formset': formset_factory(CheckboxElementForm)})
def change_view(self, request, object_id, form_url='', extra_context=None):
if request.method == 'POST':
ces = formset_factory(CheckboxElementForm,
formset=CheckboxElementFormSet)
cs = ces(request.POST)
cs.save()
return HttpResponseRedirect(reverse('admin:geoforms_checkboxelementmodel_changelist'))
else:
initial_data = []
question_data = {'question': []}
checkboxelement = CheckboxElementModel.objects.get(id = object_id)
for i, lang in enumerate(settings.LANGUAGES):
html = getattr(checkboxelement,'html_%s' % lang[0])
if html == None:
html = getattr(checkboxelement,'html_%s' % settings.LANGUAGES[0][0])
soup = BeautifulSoup(html)
question_data['question'].append(soup.p.text.strip())
if soup.find(attrs={'data-random': 'true'}):
question_data['randomize'] = True
labels = soup.find_all('label')
for j, label in enumerate(labels):
if i == 0:
initial_data.append({u'label': [label.text.strip()]})
else:
initial_data[j]['label'].append(label.text.strip())
return super(CheckboxElementAdmin, self).change_view(request,
object_id,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(initial = question_data),
'formset': formset_factory(CheckboxElementForm,
extra = 0)(initial = initial_data)})
admin.site.register(CheckboxElementModel, CheckboxElementAdmin)
class RadioElementAdmin(GeoformElementAdmin):
form = RadioElementForm
add_form_template = 'admin/geoforms/geoformelement/create_element.html'
change_form_template = add_form_template
def queryset(self, request):
return self.model.objects.filter(element_type = 'radio')
def add_view(self, request, form_url='', extra_context=None):
if request.method == 'POST':
res = formset_factory(RadioElementForm,
formset=RadioElementFormSet)
rs = res(request.POST)
rs.save()
return HttpResponseRedirect(reverse('admin:geoforms_radioelementmodel_changelist'))
else:
return super(RadioElementAdmin, self).add_view(request,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(),
'formset': formset_factory(RadioElementForm)})
def change_view(self, request, object_id, form_url='', extra_context=None):
if request.method == 'POST':
res = formset_factory(RadioElementForm,
formset=RadioElementFormSet)
rs = res(request.POST)
rs.save()
return HttpResponseRedirect(reverse('admin:geoforms_radioelementmodel_changelist'))
else:
initial_data = []
question_data = {'question': []}
radioelement = RadioElementModel.objects.get(id = object_id)
for i, lang in enumerate(settings.LANGUAGES):
html = getattr(radioelement,'html_%s' % lang[0])
if html == None:
html = getattr(radioelement,'html_%s' % settings.LANGUAGES[0][0])
soup = BeautifulSoup(html)
question_data['question'].append(soup.p.text)
if soup.find(attrs={'data-random': 'true'}):
question_data['randomize'] = True
labels = soup.find_all('label')
for j, label in enumerate(labels):
if i == 0:
initial_data.append({u'label': [label.text.strip()]})
else:
initial_data[j]['label'].append(label.text.strip())
return super(RadioElementAdmin, self).change_view(request,
object_id,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(initial = question_data),
'formset': formset_factory(RadioElementForm,
extra = 0)(initial = initial_data)})
admin.site.register(RadioElementModel, RadioElementAdmin)
class SelectElementAdmin(GeoformElementAdmin):
form = SelectElementForm
add_form_template = 'admin/geoforms/geoformelement/create_element.html'
change_form_template = add_form_template
def queryset(self, request):
return self.model.objects.filter(element_type = 'select')
def add_view(self, request, form_url='', extra_context=None):
if request.method == 'POST':
res = formset_factory(SelectElementForm,
formset=SelectElementFormSet)
rs = res(request.POST)
rs.save()
return HttpResponseRedirect(reverse('admin:geoforms_selectelementmodel_changelist'))
else:
return super(SelectElementAdmin, self).add_view(request,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(),
'formset': formset_factory(SelectElementForm)})
def change_view(self, request, object_id, form_url='', extra_context=None):
if request.method == 'POST':
res = formset_factory(SelectElementForm,
formset=SelectElementFormSet)
rs = res(request.POST)
rs.save()
return HttpResponseRedirect(reverse('admin:geoforms_selectelementmodel_changelist'))
else:
initial_data = []
question_data = {'question': []}
selectelement = SelectElementModel.objects.get(id = object_id)
for i, lang in enumerate(settings.LANGUAGES):
html = getattr(selectelement,'html_%s' % lang[0])
if html == None:
html = getattr(selectelement,'html_%s' % settings.LANGUAGES[0][0])
soup = BeautifulSoup(html)
question_data['question'].append(soup.p.contents[0])
if soup.find(attrs={'data-random': 'true'}):
question_data['randomize'] = True
options = soup.find_all('option')
for j, option in enumerate(options):
# Don't add empty values
if option.text == '':
continue
if i == 0:
initial_data.append({u'label': [option.text.strip()]})
else:
initial_data[j-1]['label'].append(option.text.strip())
return super(SelectElementAdmin, self).change_view(request,
object_id,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(initial = question_data),
'formset': formset_factory(SelectElementForm,
extra = 0)(initial = initial_data)})
admin.site.register(SelectElementModel, SelectElementAdmin)
| geonition/geoforms | geoforms/admin.py | Python | mit | 17,493 |
import pandas as pd
import numpy as np
import scipy as sp
import sys
import warnings
import copy
import operator
import sklearn
from slicer import Slicer, Alias, Obj
# from ._order import Order
from .utils._general import OpChain
# slicer confuses pylint...
# pylint: disable=no-member
op_chain_root = OpChain("shap.Explanation")
class MetaExplanation(type):
""" This metaclass exposes the Explanation object's methods for creating template op chains.
"""
def __getitem__(cls, item):
return op_chain_root.__getitem__(item)
@property
def abs(cls):
""" Element-wize absolute value op.
"""
return op_chain_root.abs
@property
def identity(cls):
""" A no-op.
"""
return op_chain_root.identity
@property
def argsort(cls):
""" Numpy style argsort.
"""
return op_chain_root.argsort
@property
def sum(cls):
""" Numpy style sum.
"""
return op_chain_root.sum
@property
def max(cls):
""" Numpy style max.
"""
return op_chain_root.max
@property
def min(cls):
""" Numpy style min.
"""
return op_chain_root.min
@property
def mean(cls):
""" Numpy style mean.
"""
return op_chain_root.mean
@property
def sample(cls):
""" Numpy style sample.
"""
return op_chain_root.sample
@property
def hclust(cls):
""" Hierarchial clustering op.
"""
return op_chain_root.hclust
class Explanation(metaclass=MetaExplanation):
""" A slicable set of parallel arrays representing a SHAP explanation.
"""
def __init__( # pylint: disable=too-many-arguments
self,
values,
base_values=None,
data=None,
display_data=None,
instance_names=None,
feature_names=None,
output_names=None,
output_indexes=None,
lower_bounds=None,
upper_bounds=None,
error_std=None,
main_effects=None,
hierarchical_values=None,
clustering=None,
compute_time=None
):
self.op_history = []
self.compute_time = compute_time
# cloning. TODOsomeday: better cloning :)
if issubclass(type(values), Explanation):
e = values
values = e.values
base_values = e.base_values
data = e.data
self.output_dims = compute_output_dims(values, base_values, data, output_names)
values_shape = _compute_shape(values)
if output_names is None and len(self.output_dims) == 1:
output_names = [f"Output {i}" for i in range(values_shape[self.output_dims[0]])]
if len(_compute_shape(feature_names)) == 1: # TODOsomeday: should always be an alias once slicer supports per-row aliases
if len(values_shape) >= 1 and len(feature_names) == values_shape[0]:
feature_names = Alias(list(feature_names), 0)
elif len(values_shape) >= 2 and len(feature_names) == values_shape[1]:
feature_names = Alias(list(feature_names), 1)
if len(_compute_shape(output_names)) == 1: # TODOsomeday: should always be an alias once slicer supports per-row aliases
output_names = Alias(list(output_names), self.output_dims[0])
# if len(values_shape) >= 1 and len(output_names) == values_shape[0]:
# output_names = Alias(list(output_names), 0)
# elif len(values_shape) >= 2 and len(output_names) == values_shape[1]:
# output_names = Alias(list(output_names), 1)
if output_names is not None and not isinstance(output_names, Alias):
l = len(_compute_shape(output_names))
if l == 0:
pass
elif l == 1:
output_names = Obj(output_names, self.output_dims)
elif l == 2:
output_names = Obj(output_names, [0] + list(self.output_dims))
else:
raise ValueError("shap.Explanation does not yet support output_names of order greater than 3!")
if not hasattr(base_values, "__len__") or len(base_values) == 0:
pass
elif len(_compute_shape(base_values)) == len(self.output_dims):
base_values = Obj(base_values, list(self.output_dims))
else:
base_values = Obj(base_values, [0] + list(self.output_dims))
self._s = Slicer(
values=values,
base_values=base_values,
data=list_wrap(data),
display_data=list_wrap(display_data),
instance_names=None if instance_names is None else Alias(instance_names, 0),
feature_names=feature_names,
output_names=output_names,
output_indexes=None if output_indexes is None else (self.output_dims, output_indexes),
lower_bounds=list_wrap(lower_bounds),
upper_bounds=list_wrap(upper_bounds),
error_std=list_wrap(error_std),
main_effects=list_wrap(main_effects),
hierarchical_values=list_wrap(hierarchical_values),
clustering=None if clustering is None else Obj(clustering, [0])
)
@property
def shape(self):
""" Compute the shape over potentially complex data nesting.
"""
return _compute_shape(self._s.values)
@property
def values(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.values
@values.setter
def values(self, new_values):
self._s.values = new_values
@property
def base_values(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.base_values
@base_values.setter
def base_values(self, new_base_values):
self._s.base_values = new_base_values
@property
def data(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.data
@data.setter
def data(self, new_data):
self._s.data = new_data
@property
def display_data(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.display_data
@display_data.setter
def display_data(self, new_display_data):
if issubclass(type(new_display_data), pd.DataFrame):
new_display_data = new_display_data.values
self._s.display_data = new_display_data
@property
def instance_names(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.instance_names
@property
def output_names(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.output_names
@output_names.setter
def output_names(self, new_output_names):
self._s.output_names = new_output_names
@property
def output_indexes(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.output_indexes
@property
def feature_names(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.feature_names
@feature_names.setter
def feature_names(self, new_feature_names):
self._s.feature_names = new_feature_names
@property
def lower_bounds(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.lower_bounds
@property
def upper_bounds(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.upper_bounds
@property
def error_std(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.error_std
@property
def main_effects(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.main_effects
@main_effects.setter
def main_effects(self, new_main_effects):
self._s.main_effects = new_main_effects
@property
def hierarchical_values(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.hierarchical_values
@hierarchical_values.setter
def hierarchical_values(self, new_hierarchical_values):
self._s.hierarchical_values = new_hierarchical_values
@property
def clustering(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.clustering
@clustering.setter
def clustering(self, new_clustering):
self._s.clustering = new_clustering
def cohorts(self, cohorts):
""" Split this explanation into several cohorts.
Parameters
----------
cohorts : int or array
If this is an integer then we auto build that many cohorts using a decision tree. If this is
an array then we treat that as an array of cohort names/ids for each instance.
"""
if isinstance(cohorts, int):
return _auto_cohorts(self, max_cohorts=cohorts)
if isinstance(cohorts, (list, tuple, np.ndarray)):
cohorts = np.array(cohorts)
return Cohorts(**{name: self[cohorts == name] for name in np.unique(cohorts)})
raise Exception("The given set of cohort indicators is not recognized! Please give an array or int.")
def __repr__(self):
""" Display some basic printable info, but not everything.
"""
out = ".values =\n"+self.values.__repr__()
if self.base_values is not None:
out += "\n\n.base_values =\n"+self.base_values.__repr__()
if self.data is not None:
out += "\n\n.data =\n"+self.data.__repr__()
return out
def __getitem__(self, item):
""" This adds support for OpChain indexing.
"""
new_self = None
if not isinstance(item, tuple):
item = (item,)
# convert any OpChains or magic strings
pos = -1
for t in item: # pylint: disable=too-many-nested-blocks
pos += 1
# skip over Ellipsis
if t == Ellipsis:
pos += len(self.shape) - len(item)
continue
orig_t = t
if issubclass(type(t), OpChain):
t = t.apply(self)
if issubclass(type(t), (np.int64, np.int32)): # because slicer does not like numpy indexes
t = int(t)
elif issubclass(type(t), np.ndarray):
t = [int(v) for v in t] # slicer wants lists not numpy arrays for indexing
elif issubclass(type(t), Explanation):
t = t.values
elif isinstance(t, str):
# work around for 2D output_names since they are not yet slicer supported
output_names_dims = []
if "output_names" in self._s._objects:
output_names_dims = self._s._objects["output_names"].dim
elif "output_names" in self._s._aliases:
output_names_dims = self._s._aliases["output_names"].dim
if pos != 0 and pos in output_names_dims:
if len(output_names_dims) == 1:
t = np.argwhere(np.array(self.output_names) == t)[0][0]
elif len(output_names_dims) == 2:
new_values = []
new_base_values = []
new_data = []
new_self = copy.deepcopy(self)
for i, v in enumerate(self.values):
for j, s in enumerate(self.output_names[i]):
if s == t:
new_values.append(np.array(v[:,j]))
new_data.append(np.array(self.data[i]))
new_base_values.append(self.base_values[i][j])
new_self = Explanation(
np.array(new_values),
np.array(new_base_values),
np.array(new_data),
self.display_data,
self.instance_names,
np.array(new_data),
t, # output_names
self.output_indexes,
self.lower_bounds,
self.upper_bounds,
self.error_std,
self.main_effects,
self.hierarchical_values,
self.clustering
)
new_self.op_history = copy.copy(self.op_history)
# new_self = copy.deepcopy(self)
# new_self.values = np.array(new_values)
# new_self.base_values = np.array(new_base_values)
# new_self.data = np.array(new_data)
# new_self.output_names = t
# new_self.feature_names = np.array(new_data)
# new_self.clustering = None
# work around for 2D feature_names since they are not yet slicer supported
feature_names_dims = []
if "feature_names" in self._s._objects:
feature_names_dims = self._s._objects["feature_names"].dim
if pos != 0 and pos in feature_names_dims and len(feature_names_dims) == 2:
new_values = []
new_data = []
for i, val_i in enumerate(self.values):
for s,v,d in zip(self.feature_names[i], val_i, self.data[i]):
if s == t:
new_values.append(v)
new_data.append(d)
new_self = copy.deepcopy(self)
new_self.values = new_values
new_self.data = new_data
new_self.feature_names = t
new_self.clustering = None
# return new_self
if issubclass(type(t), (np.int8, np.int16, np.int32, np.int64)):
t = int(t)
if t is not orig_t:
tmp = list(item)
tmp[pos] = t
item = tuple(tmp)
# call slicer for the real work
item = tuple(v for v in item) # SML I cut out: `if not isinstance(v, str)`
if len(item) == 0:
return new_self
if new_self is None:
new_self = copy.copy(self)
new_self._s = new_self._s.__getitem__(item)
new_self.op_history.append({
"name": "__getitem__",
"args": (item,),
"prev_shape": self.shape
})
return new_self
def __len__(self):
return self.shape[0]
def __copy__(self):
new_exp = Explanation(
self.values,
self.base_values,
self.data,
self.display_data,
self.instance_names,
self.feature_names,
self.output_names,
self.output_indexes,
self.lower_bounds,
self.upper_bounds,
self.error_std,
self.main_effects,
self.hierarchical_values,
self.clustering
)
new_exp.op_history = copy.copy(self.op_history)
return new_exp
def _apply_binary_operator(self, other, binary_op, op_name):
new_exp = self.__copy__()
new_exp.op_history = copy.copy(self.op_history)
new_exp.op_history.append({
"name": op_name,
"args": (other,),
"prev_shape": self.shape
})
if isinstance(other, Explanation):
new_exp.values = binary_op(new_exp.values, other.values)
if new_exp.data is not None:
new_exp.data = binary_op(new_exp.data, other.data)
if new_exp.base_values is not None:
new_exp.base_values = binary_op(new_exp.base_values, other.base_values)
else:
new_exp.values = binary_op(new_exp.values, other)
if new_exp.data is not None:
new_exp.data = binary_op(new_exp.data, other)
if new_exp.base_values is not None:
new_exp.base_values = binary_op(new_exp.base_values, other)
return new_exp
def __add__(self, other):
return self._apply_binary_operator(other, operator.add, "__add__")
def __radd__(self, other):
return self._apply_binary_operator(other, operator.add, "__add__")
def __sub__(self, other):
return self._apply_binary_operator(other, operator.sub, "__sub__")
def __rsub__(self, other):
return self._apply_binary_operator(other, operator.sub, "__sub__")
def __mul__(self, other):
return self._apply_binary_operator(other, operator.mul, "__mul__")
def __rmul__(self, other):
return self._apply_binary_operator(other, operator.mul, "__mul__")
def __truediv__(self, other):
return self._apply_binary_operator(other, operator.truediv, "__truediv__")
# @property
# def abs(self):
# """ Element-size absolute value operator.
# """
# new_self = copy.copy(self)
# new_self.values = np.abs(new_self.values)
# new_self.op_history.append({
# "name": "abs",
# "prev_shape": self.shape
# })
# return new_self
def _numpy_func(self, fname, **kwargs):
""" Apply a numpy-style function to this Explanation.
"""
new_self = copy.copy(self)
axis = kwargs.get("axis", None)
# collapse the slicer to right shape
if axis == 0:
new_self = new_self[0]
elif axis == 1:
new_self = new_self[1]
elif axis == 2:
new_self = new_self[2]
if axis in [0,1,2]:
new_self.op_history = new_self.op_history[:-1] # pop off the slicing operation we just used
if self.feature_names is not None and not is_1d(self.feature_names) and axis == 0:
new_values = self._flatten_feature_names()
new_self.feature_names = np.array(list(new_values.keys()))
new_self.values = np.array([getattr(np, fname)(v,0) for v in new_values.values()])
new_self.clustering = None
else:
new_self.values = getattr(np, fname)(np.array(self.values), **kwargs)
if new_self.data is not None:
try:
new_self.data = getattr(np, fname)(np.array(self.data), **kwargs)
except:
new_self.data = None
if new_self.base_values is not None and issubclass(type(axis), int) and len(self.base_values.shape) > axis:
new_self.base_values = getattr(np, fname)(self.base_values, **kwargs)
elif issubclass(type(axis), int):
new_self.base_values = None
if axis == 0 and self.clustering is not None and len(self.clustering.shape) == 3:
if self.clustering.std(0).sum() < 1e-8:
new_self.clustering = self.clustering[0]
else:
new_self.clustering = None
new_self.op_history.append({
"name": fname,
"kwargs": kwargs,
"prev_shape": self.shape,
"collapsed_instances": axis == 0
})
return new_self
def mean(self, axis):
""" Numpy-style mean function.
"""
return self._numpy_func("mean", axis=axis)
def max(self, axis):
""" Numpy-style mean function.
"""
return self._numpy_func("max", axis=axis)
def min(self, axis):
""" Numpy-style mean function.
"""
return self._numpy_func("min", axis=axis)
def sum(self, axis=None, grouping=None):
""" Numpy-style mean function.
"""
if grouping is None:
return self._numpy_func("sum", axis=axis)
elif axis == 1 or len(self.shape) == 1:
return group_features(self, grouping)
else:
raise Exception("Only axis = 1 is supported for grouping right now...")
def hstack(self, other):
""" Stack two explanations column-wise.
"""
assert self.shape[0] == other.shape[0], "Can't hstack explanations with different numbers of rows!"
assert np.max(np.abs(self.base_values - other.base_values)) < 1e-6, "Can't hstack explanations with different base values!"
new_exp = Explanation(
np.hstack([self.values, other.values]),
np.hstack([self.values, other.values]),
self.base_values,
self.data,
self.display_data,
self.instance_names,
self.feature_names,
self.output_names,
self.output_indexes,
self.lower_bounds,
self.upper_bounds,
self.error_std,
self.main_effects,
self.hierarchical_values,
self.clustering
)
return self._numpy_func("min", axis=axis)
# def reshape(self, *args):
# return self._numpy_func("reshape", newshape=args)
@property
def abs(self):
return self._numpy_func("abs")
@property
def identity(self):
return self
@property
def argsort(self):
return self._numpy_func("argsort")
@property
def flip(self):
return self._numpy_func("flip")
def hclust(self, metric="sqeuclidean", axis=0):
""" Computes an optimal leaf ordering sort order using hclustering.
hclust(metric="sqeuclidean")
Parameters
----------
metric : string
A metric supported by scipy clustering.
axis : int
The axis to cluster along.
"""
values = self.values
if len(values.shape) != 2:
raise Exception("The hclust order only supports 2D arrays right now!")
if axis == 1:
values = values.T
# compute a hierarchical clustering and return the optimal leaf ordering
D = sp.spatial.distance.pdist(values, metric)
cluster_matrix = sp.cluster.hierarchy.complete(D)
inds = sp.cluster.hierarchy.leaves_list(sp.cluster.hierarchy.optimal_leaf_ordering(cluster_matrix, D))
return inds
def sample(self, max_samples, replace=False, random_state=0):
""" Randomly samples the instances (rows) of the Explanation object.
Parameters
----------
max_samples : int
The number of rows to sample. Note that if replace=False then less than
fewer than max_samples will be drawn if explanation.shape[0] < max_samples.
replace : bool
Sample with or without replacement.
"""
prev_seed = np.random.seed(random_state)
inds = np.random.choice(self.shape[0], min(max_samples, self.shape[0]), replace=replace)
np.random.seed(prev_seed)
return self[list(inds)]
def _flatten_feature_names(self):
new_values = {}
for i in range(len(self.values)):
for s,v in zip(self.feature_names[i], self.values[i]):
if s not in new_values:
new_values[s] = []
new_values[s].append(v)
return new_values
def _use_data_as_feature_names(self):
new_values = {}
for i in range(len(self.values)):
for s,v in zip(self.data[i], self.values[i]):
if s not in new_values:
new_values[s] = []
new_values[s].append(v)
return new_values
def percentile(self, q, axis=None):
new_self = copy.deepcopy(self)
if self.feature_names is not None and not is_1d(self.feature_names) and axis == 0:
new_values = self._flatten_feature_names()
new_self.feature_names = np.array(list(new_values.keys()))
new_self.values = np.array([np.percentile(v, q) for v in new_values.values()])
new_self.clustering = None
else:
new_self.values = np.percentile(new_self.values, q, axis)
new_self.data = np.percentile(new_self.data, q, axis)
#new_self.data = None
new_self.op_history.append({
"name": "percentile",
"args": (axis,),
"prev_shape": self.shape,
"collapsed_instances": axis == 0
})
return new_self
def group_features(shap_values, feature_map):
# TODOsomeday: support and deal with clusterings
reverse_map = {}
for name in feature_map:
reverse_map[feature_map[name]] = reverse_map.get(feature_map[name], []) + [name]
curr_names = shap_values.feature_names
sv_new = copy.deepcopy(shap_values)
found = {}
i = 0
rank1 = len(shap_values.shape) == 1
for name in curr_names:
new_name = feature_map.get(name, name)
if new_name in found:
continue
found[new_name] = True
new_name = feature_map.get(name, name)
cols_to_sum = reverse_map.get(new_name, [new_name])
old_inds = [curr_names.index(v) for v in cols_to_sum]
if rank1:
sv_new.values[i] = shap_values.values[old_inds].sum()
sv_new.data[i] = shap_values.data[old_inds].sum()
else:
sv_new.values[:,i] = shap_values.values[:,old_inds].sum(1)
sv_new.data[:,i] = shap_values.data[:,old_inds].sum(1)
sv_new.feature_names[i] = new_name
i += 1
return Explanation(
sv_new.values[:i] if rank1 else sv_new.values[:,:i],
base_values = sv_new.base_values,
data = sv_new.data[:i] if rank1 else sv_new.data[:,:i],
display_data = None if sv_new.display_data is None else (sv_new.display_data[:,:i] if rank1 else sv_new.display_data[:,:i]),
instance_names = None,
feature_names = None if sv_new.feature_names is None else sv_new.feature_names[:i],
output_names = None,
output_indexes = None,
lower_bounds = None,
upper_bounds = None,
error_std = None,
main_effects = None,
hierarchical_values = None,
clustering = None
)
def compute_output_dims(values, base_values, data, output_names):
""" Uses the passed data to infer which dimensions correspond to the model's output.
"""
values_shape = _compute_shape(values)
# input shape matches the data shape
if data is not None:
data_shape = _compute_shape(data)
# if we are not given any data we assume it would be the same shape as the given values
else:
data_shape = values_shape
# output shape is known from the base values or output names
if output_names is not None:
output_shape = _compute_shape(output_names)
# if our output_names are per sample then we need to drop the sample dimension here
if values_shape[-len(output_shape):] != output_shape and \
values_shape[-len(output_shape)+1:] == output_shape[1:] and values_shape[0] == output_shape[0]:
output_shape = output_shape[1:]
elif base_values is not None:
output_shape = _compute_shape(base_values)[1:]
else:
output_shape = tuple()
interaction_order = len(values_shape) - len(data_shape) - len(output_shape)
values_dims = list(range(len(values_shape)))
output_dims = range(len(data_shape) + interaction_order, len(values_shape))
return tuple(output_dims)
def is_1d(val):
return not (isinstance(val[0], list) or isinstance(val[0], np.ndarray))
class Op():
pass
class Percentile(Op):
def __init__(self, percentile):
self.percentile = percentile
def add_repr(self, s, verbose=False):
return "percentile("+s+", "+str(self.percentile)+")"
def _first_item(x):
for item in x:
return item
return None
def _compute_shape(x):
if not hasattr(x, "__len__") or isinstance(x, str):
return tuple()
elif not sp.sparse.issparse(x) and len(x) > 0 and isinstance(_first_item(x), str):
return (None,)
else:
if isinstance(x, dict):
return (len(x),) + _compute_shape(x[next(iter(x))])
# 2D arrays we just take their shape as-is
if len(getattr(x, "shape", tuple())) > 1:
return x.shape
# 1D arrays we need to look inside
if len(x) == 0:
return (0,)
elif len(x) == 1:
return (1,) + _compute_shape(_first_item(x))
else:
first_shape = _compute_shape(_first_item(x))
if first_shape == tuple():
return (len(x),)
else: # we have an array of arrays...
matches = np.ones(len(first_shape), dtype=np.bool)
for i in range(1, len(x)):
shape = _compute_shape(x[i])
assert len(shape) == len(first_shape), "Arrays in Explanation objects must have consistent inner dimensions!"
for j in range(0, len(shape)):
matches[j] &= shape[j] == first_shape[j]
return (len(x),) + tuple(first_shape[j] if match else None for j, match in enumerate(matches))
class Cohorts():
def __init__(self, **kwargs):
self.cohorts = kwargs
for k in self.cohorts:
assert isinstance(self.cohorts[k], Explanation), "All the arguments to a Cohorts set must be Explanation objects!"
def __getitem__(self, item):
new_cohorts = Cohorts()
for k in self.cohorts:
new_cohorts.cohorts[k] = self.cohorts[k].__getitem__(item)
return new_cohorts
def __getattr__(self, name):
new_cohorts = Cohorts()
for k in self.cohorts:
new_cohorts.cohorts[k] = getattr(self.cohorts[k], name)
return new_cohorts
def __call__(self, *args, **kwargs):
new_cohorts = Cohorts()
for k in self.cohorts:
new_cohorts.cohorts[k] = self.cohorts[k].__call__(*args, **kwargs)
return new_cohorts
def __repr__(self):
return f"<shap._explanation.Cohorts object with {len(self.cohorts)} cohorts of sizes: {[v.shape for v in self.cohorts.values()]}>"
def _auto_cohorts(shap_values, max_cohorts):
""" This uses a DecisionTreeRegressor to build a group of cohorts with similar SHAP values.
"""
# fit a decision tree that well spearates the SHAP values
m = sklearn.tree.DecisionTreeRegressor(max_leaf_nodes=max_cohorts)
m.fit(shap_values.data, shap_values.values)
# group instances by their decision paths
paths = m.decision_path(shap_values.data).toarray()
unique_paths = np.unique(m.decision_path(shap_values.data).todense(), axis=0)
path_names = []
# mark each instance with a path name
for i in range(shap_values.shape[0]):
name = ""
for j in range(len(paths[i])):
if paths[i,j] > 0:
feature = m.tree_.feature[j]
threshold = m.tree_.threshold[j]
val = shap_values.data[i,feature]
if feature >= 0:
name += str(shap_values.feature_names[feature])
if val < threshold:
name += " < "
else:
name += " >= "
name += str(threshold) + " & "
path_names.append(name[:-3]) # the -3 strips off the last unneeded ' & '
path_names = np.array(path_names)
# split the instances into cohorts by their path names
cohorts = {}
for name in np.unique(path_names):
cohorts[name] = shap_values[path_names == name]
return Cohorts(**cohorts)
def list_wrap(x):
""" A helper to patch things since slicer doesn't handle arrays of arrays (it does handle lists of arrays)
"""
if isinstance(x, np.ndarray) and len(x.shape) == 1 and isinstance(x[0], np.ndarray):
return [v for v in x]
else:
return x
| slundberg/shap | shap/_explanation.py | Python | mit | 32,276 |
from werkzeug.contrib.fixers import ProxyFix
from app import app
app.wsgi_app = ProxyFix(app.wsgi_app)
| ulope/nearest_pypi | wsgi.py | Python | mit | 104 |
# -*- coding: utf-8 -*-
from dp_tornado.engine.controller import Controller
class MmddController(Controller):
def get(self):
self.model.tests.helper_test.datetime.switch_timezone('Asia/Seoul')
ts = 1451671445
ms = ts * 1000
dt = self.helper.datetime.convert(timestamp=ts)
args_dt = {'datetime': dt}
args_ms = {'timestamp': ms, 'ms': True}
args_ts = {'timestamp': ts}
args_dt_cc = {'datetime': dt, 'concat': ''}
args_ms_cc = {'timestamp': ms, 'ms': True, 'concat': '/'}
args_ts_cc = {'timestamp': ts, 'concat': '/'}
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_dt}) == '01.02')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_ms}) == '01.02')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_ts}) == '01.02')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_dt_cc}) == '0102')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_ms_cc}) == '01/02')
assert(self.render_string('tests/view/ui_methods/mmdd.html', {'args': args_ts_cc}) == '01/02')
| why2pac/dp-tornado | example/controller/tests/view/ui_methods/mmdd.py | Python | mit | 1,216 |
from django.db import models
from fms import constants as FC
from django.utils import timezone
from django.core.files import File
from django.core.exceptions import ObjectDoesNotExist
class DocumentCategory(models.Model):
name = models.CharField(max_length=30, null=False, blank=False)
code = models.CharField(max_length=5, null=False, blank=False)
def __unicode__(self):
return self.code+": "+self.name
class DocumentSubCategory1(models.Model):
cat = models.ForeignKey('DocumentCategory')
name = models.CharField(max_length=30, null=False, blank=False)
code = models.CharField(max_length=5, null=False, blank=False)
def __unicode__(self):
return self.cat.code+" / "+self.code
class DocumentSubCategory2(models.Model):
cat = models.ForeignKey('DocumentCategory')
subcat1 = models.ForeignKey('DocumentSubCategory1')
name = models.CharField(max_length=30, null=False, blank=False)
code = models.CharField(max_length=5, null=False, blank=False)
def __unicode__(self):
return self.cat.code+" / "+self.subcat1.code+" / "+self.code
# Document
class Document(models.Model):
name = models.CharField(max_length=120, null=False, blank=False)
cat = models.ForeignKey('DocumentCategory', null=True, blank=True) # Optional
subcat1 = models.ForeignKey('DocumentSubCategory1', null=True, blank=True) # Optional
subcat2 = models.ForeignKey('DocumentSubCategory2', null=True, blank=True) # Optional
document_number = models.CharField(max_length=15, null=True, blank=True)
address = models.CharField(max_length=30, null=False, blank=False, editable=False)
rack = models.ForeignKey('DocumentRack', null=True, blank=True) # CR9 - C2
avilability_status = models.BooleanField(default=True, null=False, blank=False)
added_on = models.DateTimeField(null=False, editable=False)
last_updated = models.DateTimeField(null=False, editable=False)
def __unicode__(self):
return self.address +" : "+ self.name
def save(self, *args, **kwargs):
if not self.id:
self.added_on = timezone.now()
"""
if self.address is not None:
_ad = self.address
tmp = _ad.split('/')
self.cat = DocumentCategory.objects.get(code = tmp[0])
self.subcat1 = DocumentSubCategory1.objects.get(code = tmp[1])
if len(tmp) == 4:
self.subcat2 = DocumentSubCategory1.objects.get(code = tmp[2])
self.document_nucodember = tmp[len(tmp)]
"""
_ad = ''
if self.cat is not None:
_ad += self.cat.code + "/"
if self.subcat1 is not None:
_ad += self.subcat1.code + "/"
if self.subcat2 is not None:
_ad += self.subcat2.code + "/"
if self.document_number is not None:
_ad += self.document_number
self.address = _ad
self.last_updated = timezone.now()
return super(Document, self).save(*args, **kwargs)
class DocumentRack(models.Model):
rack_name = models.CharField(max_length=20, null=False, blank=False) # CR9 - C2``
document_type = models.CharField(max_length=30, null=True, blank=True)
type = models.CharField(max_length=10, null=True, blank=True)
image = models.ForeignKey('DocumentRackImage', null=True, blank=True)
def __unicode__(self):
return self.rack_name
class DocumentRackImage(models.Model):
image = models.ImageField(upload_to = 'media/racks/', default = 'media/PLAN.png')
def __unicode__(self):
return self.image.url
# Book Category
class Category(models.Model):
name = models.CharField(max_length=30, null=False, blank=False)
def __unicode__(self):
return self.name
# Book
class Book(models.Model):
name = models.CharField(max_length=100, null=False, blank=False)
code = models.CharField(max_length=15, null=True, blank=True)
categories = models.ManyToManyField('Category', related_name='category_books')
author = models.CharField(max_length=60, null=True, blank=True)
publisher = models.CharField(max_length=60, null=True, blank=True)
isbn_number = models.CharField(max_length=25, null=True, blank=True)
address = models.ForeignKey('BookAddress')
avilability_status = models.BooleanField(default=True, null=False, blank=False)
added_on = models.DateTimeField(null=False, editable=False)
last_updated = models.DateTimeField(null=False, editable=False)
image = models.ImageField(upload_to = 'media/books/', default = 'media/thumbnail.png')
#models.ForeignKey('Image', related_name='images', null=True)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not self.id:
self.added_on = timezone.now()
""""
if not self.image:
try:
print "try"
default_image = Image.objects.get(img = File(open('media/books/thumbnail.png')))
except ObjectDoesNotExist:
print "except"
default_image = Image(img = File(open('media/thumbnail.png')))
default_image.save()
self.image = default_image
print self.image
"""
self.last_updated = timezone.now()
return super(Book, self).save(*args, **kwargs)
class BookAddress(models.Model):
rack = models.CharField(max_length=15, null=True, blank=True)
row = models.CharField(max_length=10, null=True, blank=True)
shelf_set = models.CharField(max_length=10, null=True, blank=True)
def __unicode__(self):
return self.rack
"""
class Image(models.Model):
#name = models.CharField(max_length=10, null=False, blank=False)
img = models.ImageField(upload_to = 'media/books/', default = 'media/books/thumbnail.png')
def __unicode__(self):
return self.img.url
"""
class IssueBooks(models.Model):
person_name = models.CharField(max_length=60, null=False, blank=False)
person_email = models.EmailField(null=True, blank=True)
person_mobile_no = models.CharField(max_length=15, null=True, blank=True)
person_group = models.CharField(max_length=10, choices=FC.PERSON_GROUPS, null=False, blank=False)
issue_date = models.DateField(null=False, blank=False, default=timezone.now())
return_date = models.DateField(null=True, blank=True, default=timezone.now()+timezone.timedelta(7))
issued_books = models.ManyToManyField('Book', null=True)
is_submitted_all_books = models.BooleanField(default=False)
def __unicode__(self):
return self.person_group+": "+self.person_name
| jagannath93/File-Management-System | fms/models.py | Python | mit | 6,299 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from accounts.models import Profile
from django import forms
from django.conf import settings
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
from django.utils import formats, timezone
from django.utils.translation import ugettext as _
class RegistrationForm(UserCreationForm):
"""Edit form for sign up"""
email = forms.EmailField(label='email', required=True)
code = forms.CharField(label='code', required=False)
class Meta:
"""Meta for RegistrationForm"""
model = User
fields = {"username", "email", "code"}
if settings.ENABLE_NICKNAME:
fields.add("first_name")
def __init__(self, *args, **kwargs):
"""Override maxlength"""
super(RegistrationForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs['maxlength'] = settings.ID_MAX_LENGTH
if settings.ENABLE_NICKNAME:
self.fields['first_name'].widget.attrs['maxlength'] = settings.ID_MAX_LENGTH
class SettingForm(forms.ModelForm):
"""Edit form for setting"""
class Meta:
"""Meta for SettingForm"""
model = Profile
fields = {
"alarm_interval", "alarm_board", "alarm_reply", "alarm_paper",
"alarm_team", "alarm_full", "sense_client", "sense_slot"
}
def __init__(self, *args, **kwargs):
"""Init"""
super(SettingForm, self).__init__(*args, **kwargs)
self.fields['alarm_reply'].widget.attrs['checked'] = 'checked'
self.fields['alarm_reply'].widget.attrs['disabled'] = True
self.fields['alarm_full'].widget.attrs['checked'] = 'checked'
self.fields['alarm_full'].widget.attrs['disabled'] = True
class UserInfoForm(forms.ModelForm):
"""Edit form for user info"""
email = forms.EmailField(label='email', required=True)
code = forms.CharField(label='code', required=False)
first_name = forms.CharField(max_length=12, required=False)
class Meta:
"""Meta for UserInfoForm"""
model = Profile
fields = {
"portrait", "email", "code", "id1", "id2", "id3", "signature"
}
if settings.ENABLE_NICKNAME:
fields.add("first_name")
def __init__(self, *args, **kwargs):
"""Init"""
super(UserInfoForm, self).__init__(*args, **kwargs)
self.fields['email'].initial = self.instance.user.email
if settings.ENABLE_NICKNAME:
self.fields['first_name'].initial = self.instance.user.first_name
self.fields['first_name'].widget.attrs['maxlength'] = settings.ID_MAX_LENGTH
class LoginForm(AuthenticationForm):
"""Custom login form for suspension"""
def confirm_login_allowed(self, user):
"""Override confirm_login_allowed"""
if user.is_active:
pass
elif not user.is_active:
now = timezone.now()
if now > user.profile.suspension_till:
user.is_active = True
user.save()
else:
formatted_date = formats.date_format(timezone.localtime(
user.profile.suspension_till), "Y-m-d H:i:s")
error = _('You have been suspended until %(date)s.') % {
'date': formatted_date
}
raise forms.ValidationError(error, code='suspended')
| genonfire/bbgo | accounts/forms.py | Python | mit | 3,484 |
"""
automatic_questioner
--------------------
Module which serves as a interactor between the possible database with the
described structure and which contains information about functions and
variables of other packages.
Scheme of the db
----------------
# {'function_name':
# {'variables':
# {'variable_name':
# {'question_info':
# {'qtype': ['simple_input', 'confirmation_question',
# 'selection_options', 'selection_list_options'],
# 'question_spec': 'question_spec'},
# 'default': default}},
########
# 'descendants': [{'agg_description':
# {variable_name:
# {'variable_value': 'function_name'}
# },
# 'agg_name': 'aggregated_parameter_name'}]
# }}
######## OR
# 'descendants': [{'agg_description': 'function_name'
# 'agg_name': 'aggregated_parameter_name'}]
# }}
#TODO: checker 1 function with list of functions and dicts of dicts
"""
from tui_questioner import general_questioner
def check_quest_info(db):
"""Function which carry out the automatic checking of the database of
function and variables.
Parameters
----------
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
Returns
-------
check: boolean
returns the correctness of the database.
path: list
path of the possible error.
message: str
message of the error if it exists.
"""
## 0. Initial preset variables needed
# Function to compare lists
def equality_elements_list(a, b):
a = a.keys() if type(a) == dict else a
b = b.keys() if type(b) == dict else b
c = a[-1::-1]
return a == b or c == b
# List of elements available in some dicts at some levels
first_level = ['descendants', 'variables']
desc_2_level = ['agg_description', 'agg_name']
vars_2_level = ['question_info', 'default']
vars_3_level = ['qtype', 'question_spec']
# Messages of errors
m0 = "The given database of functions is not a dictionary."
m1 = "The function '%s' does not have "+str(first_level)+" as keys."
m2 = "The variables of function '%s' is not a dict."
m3 = "Incorrect keys "+str(vars_2_level)+" in function %s and variable %s."
m4 = "Incorrect question_info format for function %s and variable %s."
m5 = "Not a string the 'qtype' of function %s and variable %s."
m6 = "Incorrect 'question_spec' format for function %s and variable %s."
m7 = "Descendants of the function %s is not a list."
m8 = "Elements of the list of descendants not a dict for function %s."
m9 = "Incorrect structure of a dict in descendants for function %s."
m10 = "Incorrect type of agg_description for function %s and variable %s."
m11 = "Incorrect type of agg_description for function %s."
## Check db is a dict
if type(db) != dict:
return False, [], m0
## Loop for check each function in db
for funct in db.keys():
## Check main keys:
first_bl = equality_elements_list(db[funct], first_level)
if not first_bl:
return False, [funct], m1 % funct
## Check variables
if not type(db[funct]['variables']) == dict:
check = False
path = [funct, 'variables']
message = m2 % funct
return check, path, message
for var in db[funct]['variables']:
varsbles = db[funct]['variables']
v2_bl = equality_elements_list(varsbles[var], vars_2_level)
v3_bl = equality_elements_list(varsbles[var]['question_info'],
vars_3_level)
qtype_bl = db[funct]['variables'][var]['question_info']['qtype']
qtype_bl = type(qtype_bl) != str
qspec_bl = db[funct]['variables'][var]['question_info']
qspec_bl = type(qspec_bl['question_spec']) != dict
if not v2_bl:
check = False
path = [funct, 'variables', var]
message = m3 % (funct, var)
return check, path, message
### Check question_info
if not v3_bl:
check = False
path = [funct, 'variables', 'question_info']
message = m4 % (funct, var)
return check, path, message
if qtype_bl:
check = False
path = [funct, 'variables', 'question_info', 'qtype']
message = m5 % (funct, var)
return check, path, message
if qspec_bl:
check = False
path = [funct, 'variables', 'question_info', 'question_spec']
message = m6 % (funct, var)
return check, path, message
## Check descendants
if not type(db[funct]['descendants']) == list:
check = False
path = [funct, 'descendants']
message = m7 % funct
return check, path, message
for var_desc in db[funct]['descendants']:
if not type(var_desc) == dict:
check = False
path = [funct, 'descendants']
message = m8 % funct
return check, path, message
d2_bl = equality_elements_list(var_desc.keys(), desc_2_level)
if not d2_bl:
check = False
path = [funct, 'descendants']
message = m9 % funct
return check, path, message
if type(var_desc['agg_description']) == str:
pass
elif type(var_desc['agg_description']) == dict:
for varname in var_desc['agg_description']:
if not type(var_desc['agg_description'][varname]) == dict:
check = False
path = [funct, 'descendants', 'agg_description']
message = m10 % (funct, varname)
return check, path, message
else:
check = False
path = [funct, 'descendants', 'agg_description']
message = m11 % funct
return check, path, message
return True, [], ''
def automatic_questioner(function_name, db, choosen={}):
"""Function which carry out the automatic questioning task.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
"""
## Initialize variables needed
m1 = "Not value for a variables in order to create aggregate variables."
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Put the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
question = data_f['variables'][var]['question_info']
choosen_values[var] = general_questioner(**question)
# Put aggregated variables (descendants)
for var_desc in data_f['descendants']:
# Possible variables and aggregated parameter name
agg_description = var_desc['agg_description']
agg_param = var_desc['agg_name']
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
## Without dependant variable
if type(agg_description) == str:
# Obtain function name
fn = choosen_values[agg_param]
# Recurrent call
aux = automatic_questioner(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## With dependant variable
elif type(agg_description) == dict:
for var in var_desc['agg_description']:
if not var in choosen_values:
raise Exception(m1)
## Give a list and return a dict in the aggparam variable
elif type(choosen_values[var]) == str:
# Obtain function name
fn = var_desc['agg_description'][var][choosen_values[var]]
# Recurrent call
aux = automatic_questioner(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## Give a list and return a list in the aggparam variable
elif type(choosen_values[var]) == list:
choosen_values[agg_param] = []
aggvarval = [] if type(aggvarval) != list else aggvarval
for i in range(len(choosen_values[var])):
val = choosen_values[var][i]
fn = var_desc['agg_description'][var][val]
aux = automatic_questioner(fn, db, aggvarval[i])
choosen_values.append(aux)
return choosen_values
def get_default(function_name, db, choosen={}):
"""Function which returns a dictionary of choosen values by default.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
-----
TODO: Possibility of being integrated with authomatic_questioner after
testing.
"""
## Initialize variables needed
m1 = "Not value for a variables in order to create aggregate variables."
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Get the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
default = data_f['variables'][var]['default']
choosen_values[var] = default
# Get aggregated variables (descendants)
for var_desc in data_f['descendants']:
# Possible variables and aggregated parameter name
agg_description = var_desc['agg_description']
agg_param = var_desc['agg_name']
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
## Without dependant variable
if type(agg_description) == str:
# Obtain function name
fn = choosen_values[agg_param]
# Recurrent call
aux = get_default(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## With dependant variable
elif type(agg_description) == dict:
for var in var_desc['agg_description']:
if not var in choosen_values:
raise Exception(m1)
## Give a list and return a dict in the aggparam variable
elif type(choosen_values[var]) == str:
# Obtain function name
fn = var_desc['agg_description'][var][choosen_values[var]]
# Recurrent call
aux = get_default(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## Give a list and return a list in the aggparam variable
elif type(choosen_values[var]) == list:
choosen_values[agg_param] = []
aggvarval = [] if type(aggvarval) != list else aggvarval
for i in range(len(choosen_values[var])):
val = choosen_values[var][i]
fn = var_desc['agg_description'][var][val]
aux = get_default(fn, db, aggvarval[i])
choosen_values.append(aux)
return choosen_values
###############################################################################
###############################################################################
###############################################################################
def get_default3(function_name, db, choosen={}):
"""Function which returns a dictionary of choosen values by default.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
-----
TODO: Possibility of being integrated with authomatic_questioner after
testing.
"""
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Get the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
default = data_f['variables'][var]['default']
choosen_values[var] = default
# Get the aggregated variables (descendants)
for i in range(len(data_f['descendants'])):
# Possible variables and aggregated parameter name
vars_values = data_f['descendants'][i]['variable_values']
agg_param = data_f['descendants'][i]['parameters']
variables = vars_values.keys()
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
for var in variables:
# boolean variables
value = choosen_values[var]
iflist = type(value) == list
ifvars = var in choosen_values.keys()
# if we have to return a list
if ifvars and iflist:
# Initialization values
n = len(value)
aggvarval = aggvarval if ifaggvar else [{} for i in range(n)]
results = []
i = 0
for val in value:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
aux = get_default(f_name, db, aggvarval[i])
# Insert in the correspondent list
results.append(aux)
i += 1
# if we have to return a dict
elif ifvars and not iflist:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
choosen_values[agg_param] = get_default(f_name, db, aggvarval)
return choosen_values
def automatic_questioner3(function_name, db, choosen={}):
"""Function which carry out the automatic questioning task.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
"""
## Initialize variables needed
m1 = "Not value for a variables in order to create aggregate variables."
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Put the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
question = data_f['variables'][var]['question_info']
choosen_values[var] = general_questioner(**question)
# Put aggregated variables (descendants)
for i in range(len(data_f['descendants'])):
# Possible variables and aggregated parameter name
vars_values = data_f['descendants'][i]['variable_values']
agg_param = data_f['descendants'][i]['parameters']
variables = vars_values.keys()
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
for var in variables:
# boolean variables
value = choosen_values[var]
iflist = type(value) == list
ifvars = var in choosen_values.keys()
# if we have to return a list
if ifvars and iflist:
# Initialization values
n = len(value)
aggvarval = aggvarval if ifaggvar else [{} for i in range(n)]
results = []
i = 0
for val in value:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
aux = authomatic_questioner(f_name, db, aggvarval[i])
# Insert in the correspondent list
results.append(aux)
i += 1
# if we have to return a dict
elif ifvars and not iflist:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
choosen_values[agg_param] = authomatic_questioner(f_name, db,
aggvarval)
return choosen_values
| tgquintela/pythonUtils | pythonUtils/TUI_tools/automatic_questioner.py | Python | mit | 19,433 |
import logging
import time
import os
# will change these to specific imports once code is more final
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5 import uic
BRUSH_WHITE = QBrush(QColor(255, 255, 255), Qt.SolidPattern)
BRUSH_GREEN = QBrush(QColor(0, 255, 0), Qt.SolidPattern)
BRUSH_BLACK = QBrush(QColor(0, 0, 0), Qt.SolidPattern)
BRUSH_DARK_PURPLE = QBrush(QColor(128, 0, 255), Qt.SolidPattern)
class DeviceNode:
__slots__ = ["_callback", "_name", "_data", "_type", "_brush", "q_name", "q_state", "sub_properties",
"sub_properties_appended", "q_time_added", "log"]
def __init__(self):
self._callback = None
self._name = ""
self._data = {}
self._type = ""
self._brush = BRUSH_BLACK
self.q_name = QStandardItem()
self.q_state = QStandardItem()
self.sub_properties = {}
self.sub_properties_appended = False
self.q_time_added = QStandardItem()
self.q_time_added.setData(time.perf_counter(), Qt.DisplayRole)
self.q_name.setDragEnabled(True)
self.q_state.setData("", Qt.DisplayRole)
self.log = logging.getLogger('Device')
def setName(self, name):
self._name = name
self.q_name.setData(str(self._name), Qt.DisplayRole)
self.log = logging.getLogger('Device {}'.format(self._name))
self.q_state.emitDataChanged()
def setData(self, data):
"""Set data of device."""
if data == self._data:
# do nothing if data did not change
return
if not isinstance(data, dict):
data = {}
if self._callback:
self._callback()
self._data = data
state_str = str(list(self._data.values())[0])
if len(self._data) > 1:
state_str = state_str + " {…}"
self.q_state.setData(state_str, Qt.DisplayRole)
for row in self._data:
if not self.sub_properties_appended:
q_property = QStandardItem()
q_value = QStandardItem()
self.sub_properties.update({row: [q_property, q_value]})
self.q_name.appendRow(self.sub_properties.get(row))
self.sub_properties.get(row)[0].setData(str(row), Qt.DisplayRole)
self.sub_properties.get(row)[1].setData(str(self._data.get(row)), Qt.DisplayRole)
self.sub_properties_appended = True
self.q_state.emitDataChanged()
self._brush = self._calculate_colored_brush()
def setType(self, type):
self._type = type
self._brush = self._calculate_colored_brush()
self.q_state.emitDataChanged()
def get_row(self):
return [self.q_name, self.q_state, self.q_time_added]
def data(self):
return self._data
def type(self):
return self._type
def get_colored_brush(self) -> QBrush:
"""Return colored brush for device."""
return self._brush
def _calculate_color_gamma_correction(self, color):
"""Perform gamma correction.
Feel free to fiddle with these constants until it feels right
With gamma = 0.5 and constant a = 18, the top 54 values are lost,
but the bottom 25% feels much more normal.
"""
gamma = 0.5
a = 18
corrected = []
for value in color:
if value < 0 or value > 255:
self.log.warning("Got value %s for brightness which outside the expected range", value)
value = 0
value = int(pow(value, gamma) * a)
if value > 255:
value = 255
corrected.append(value)
return corrected
def _calculate_colored_brush(self):
if self._type == 'light':
color = self.data()['color']
if color == [0, 0, 0]:
# shortcut for black
return BRUSH_BLACK
color = self._calculate_color_gamma_correction(color)
elif self._type == 'switch':
state = self.data()['state']
if state:
return BRUSH_GREEN
else:
return BRUSH_BLACK
elif self._type == 'diverter':
state = self.data()['active']
if state:
return BRUSH_DARK_PURPLE
else:
return BRUSH_BLACK
else:
# Get first parameter and draw as white if it evaluates True
state = bool(list(self.data().values())[0])
if state:
return BRUSH_WHITE
else:
return BRUSH_BLACK
return QBrush(QColor(*color), Qt.SolidPattern)
def set_change_callback(self, callback):
if self._callback:
# raise AssertionError("Can only have one callback")
old_callback = self._callback
self._callback = callback
return old_callback
else:
self._callback = callback
self.q_state.emitDataChanged()
class DeviceDelegate(QStyledItemDelegate):
def __init__(self):
self.size = None
super().__init__()
def paint(self, painter, view, index):
super().paint(painter, view, index)
color = None
state = None
balls = None
found = False
text = ''
# src_index = index.model().mapToSource(index)
# src_index_model = src_index.model()
# print(index.data())
# print(src_index_model.data())
data = []
try:
data = index.model().itemFromIndex(index).data()
# src_index = index.model().mapToSource(index)
# data = index.model().data(src_index)
except:
pass
num_circles = 1
# return
if index.column() == 0:
return
try:
if 'color' in data:
color = data['color']
found = True
except TypeError:
return
try:
if 'brightness' in data:
color = [data['brightness']]*3
found = True
except TypeError:
return
try:
if 'state' in data:
text = str(data['state'])
found = True
except TypeError:
return
try:
if 'complete' in data:
state = not data['complete']
found = True
except TypeError:
return
try:
if 'enabled' in data:
state = data['enabled']
found = True
except TypeError:
return
try:
if 'balls' in data:
balls = data['balls']
found = True
except TypeError:
return
try:
if 'balls_locked' in data:
balls = data['balls_locked']
found = True
except TypeError:
return
try:
if 'num_balls_requested' in data:
text += 'Requested: {} '.format(
data['num_balls_requested'])
found = True
except TypeError:
return
try:
if 'unexpected_balls' in data:
text += 'Unexpected: {} '.format(
data['unexpected_balls'])
found = True
except TypeError:
return
if not found:
return
text += " " + str(data)
painter.save()
painter.setRenderHint(QPainter.Antialiasing, True)
painter.setPen(QPen(QColor(100, 100, 100), 1, Qt.SolidLine))
if color:
painter.setBrush(QBrush(QColor(*color), Qt.SolidPattern))
elif state is True:
painter.setBrush(QBrush(QColor(0, 255, 0), Qt.SolidPattern))
elif state is False:
painter.setBrush(QBrush(QColor(255, 255, 255), Qt.SolidPattern))
elif isinstance(balls, int):
painter.setBrush(QBrush(QColor(0, 255, 0), Qt.SolidPattern))
num_circles = balls
x_offset = 0
for _ in range(num_circles):
painter.drawEllipse(
view.rect.x() + x_offset, view.rect.y(), 14, 14)
x_offset += 20
if text:
painter.drawText(view.rect.x() + x_offset, view.rect.y() + 12,
str(text))
self.size = QSize(len(text) * 10, 20)
painter.restore()
def sizeHint(self, QStyleOptionViewItem, QModelIndex):
if self.size:
return self.size
else:
# Calling super() here seems to result in a segfault on close sometimes.
# return super().sizeHint(QStyleOptionViewItem, QModelIndex)
return QSize(80, 20)
class DeviceWindow(QWidget):
__slots__ = ["mpfmn", "ui", "model", "log", "already_hidden", "added_index", "device_states",
"device_type_widgets", "_debug_enabled"]
def __init__(self, mpfmon):
self.mpfmon = mpfmon
super().__init__()
self.ui = None
self.model = None
self.draw_ui()
self.attach_model()
self.attach_signals()
self.log = logging.getLogger('Core')
self.already_hidden = False
self.added_index = 0
self.device_states = dict()
self.device_type_widgets = dict()
self._debug_enabled = self.log.isEnabledFor(logging.DEBUG)
def draw_ui(self):
# Load ui file from ./ui/
ui_path = os.path.join(os.path.dirname(__file__), "ui", "searchable_tree.ui")
self.ui = uic.loadUi(ui_path, self)
self.ui.setWindowTitle('Devices')
self.ui.move(self.mpfmon.local_settings.value('windows/devices/pos',
QPoint(200, 200)))
self.ui.resize(self.mpfmon.local_settings.value('windows/devices/size',
QSize(300, 600)))
# Disable option "Sort", select first item.
# TODO: Store and load selected sort index to local_settings
self.ui.sortComboBox.model().item(0).setEnabled(False)
self.ui.sortComboBox.setCurrentIndex(1)
self.ui.treeView.setAlternatingRowColors(True)
def attach_signals(self):
assert (self.ui is not None)
self.ui.treeView.expanded.connect(self.resize_columns_to_content)
self.ui.treeView.collapsed.connect(self.resize_columns_to_content)
self.ui.filterLineEdit.textChanged.connect(self.filter_text)
self.ui.sortComboBox.currentIndexChanged.connect(self.change_sort)
def attach_model(self):
assert (self.ui is not None)
self.treeview = self.ui.treeView
self.model = QStandardItemModel()
self.model.setHorizontalHeaderLabels(["Device", "Data"])
self.treeview.setDragDropMode(QAbstractItemView.DragOnly)
# self.treeview.setItemDelegateForColumn(1, DeviceDelegate())
# Resizing to contents causes huge performance losses. Only resize when rows expanded or collapsed.
# self.treeview.header().setSectionResizeMode(QHeaderView.ResizeToContents)
self.filtered_model = QSortFilterProxyModel(self)
self.filtered_model.setSourceModel(self.model)
self.filtered_model.setRecursiveFilteringEnabled(True)
self.filtered_model.setFilterCaseSensitivity(False)
self.treeview.setModel(self.filtered_model)
def resize_columns_to_content(self):
self.ui.treeView.resizeColumnToContents(0)
self.ui.treeView.resizeColumnToContents(1)
def process_device_update(self, name, state, changes, type):
del changes
if self._debug_enabled:
self.log.debug("Device Update: %s.%s: %s", type, name, state)
if type not in self.device_states:
self.device_states[type] = dict()
item = QStandardItem(type)
self.device_type_widgets[type] = item
self.model.appendRow([item, QStandardItem(), QStandardItem(str(time.perf_counter()))])
if name not in self.device_states[type]:
node = DeviceNode()
node.setName(name)
node.setData(state)
node.setType(type)
self.device_states[type][name] = node
self.device_type_widgets[type].appendRow(node.get_row())
self.mpfmon.pf.create_widget_from_config(node, type, name)
else:
self.device_states[type][name].setData(state)
self.ui.treeView.setColumnHidden(2, True)
def filter_text(self, string):
wc_string = "*" + str(string) + "*"
self.filtered_model.setFilterWildcard(wc_string)
self.ui.treeView.resizeColumnToContents(0)
self.ui.treeView.resizeColumnToContents(1)
def change_sort(self, index=1):
self.model.layoutAboutToBeChanged.emit()
self.filtered_model.beginResetModel()
# This is a bit sloppy and probably should be reworked.
if index == 1: # Received up
self.filtered_model.sort(2, Qt.AscendingOrder)
elif index == 2: # Received down
self.filtered_model.sort(2, Qt.DescendingOrder)
elif index == 3: # Name up
self.filtered_model.sort(0, Qt.AscendingOrder)
elif index == 4: # Name down
self.filtered_model.sort(0, Qt.DescendingOrder)
self.filtered_model.endResetModel()
self.model.layoutChanged.emit()
def closeEvent(self, event):
super().closeEvent(event)
self.mpfmon.write_local_settings()
event.accept()
self.mpfmon.check_if_quit()
| missionpinball/mpf-monitor | mpfmonitor/core/devices.py | Python | mit | 13,716 |
import os
from select import select
from subprocess import PIPE
import sys
import time
from itertools import chain
from plumbum.commands.processes import run_proc, ProcessExecutionError
from plumbum.commands.processes import BY_TYPE
import plumbum.commands.base
from plumbum.lib import read_fd_decode_safely
class Future(object):
"""Represents a "future result" of a running process. It basically wraps a ``Popen``
object and the expected exit code, and provides poll(), wait(), returncode, stdout,
and stderr.
"""
def __init__(self, proc, expected_retcode, timeout=None):
self.proc = proc
self._expected_retcode = expected_retcode
self._timeout = timeout
self._returncode = None
self._stdout = None
self._stderr = None
def __repr__(self):
return "<Future %r (%s)>" % (
self.proc.argv,
self._returncode if self.ready() else "running",
)
def poll(self):
"""Polls the underlying process for termination; returns ``False`` if still running,
or ``True`` if terminated"""
if self.proc.poll() is not None:
self.wait()
return self._returncode is not None
ready = poll
def wait(self):
"""Waits for the process to terminate; will raise a
:class:`plumbum.commands.ProcessExecutionError` in case of failure"""
if self._returncode is not None:
return
self._returncode, self._stdout, self._stderr = run_proc(
self.proc, self._expected_retcode, self._timeout)
@property
def stdout(self):
"""The process' stdout; accessing this property will wait for the process to finish"""
self.wait()
return self._stdout
@property
def stderr(self):
"""The process' stderr; accessing this property will wait for the process to finish"""
self.wait()
return self._stderr
@property
def returncode(self):
"""The process' returncode; accessing this property will wait for the process to finish"""
self.wait()
return self._returncode
#===================================================================================================
# execution modifiers
#===================================================================================================
class ExecutionModifier(object):
__slots__ = ("__weakref__", )
def __repr__(self):
"""Automatically creates a representation for given subclass with slots.
Ignore hidden properties."""
slots = {}
for cls in self.__class__.__mro__:
slots_list = getattr(cls, "__slots__", ())
if isinstance(slots_list, str):
slots_list = (slots_list, )
for prop in slots_list:
if prop[0] != '_':
slots[prop] = getattr(self, prop)
mystrs = ("{0} = {1}".format(name, slots[name]) for name in slots)
return "{0}({1})".format(self.__class__.__name__, ", ".join(mystrs))
@classmethod
def __call__(cls, *args, **kwargs):
return cls(*args, **kwargs)
class _BG(ExecutionModifier):
"""
An execution modifier that runs the given command in the background, returning a
:class:`Future <plumbum.commands.Future>` object. In order to mimic shell syntax, it applies
when you right-and it with a command. If you wish to expect a different return code
(other than the normal success indicate by 0), use ``BG(retcode)``. Example::
future = sleep[5] & BG # a future expecting an exit code of 0
future = sleep[5] & BG(7) # a future expecting an exit code of 7
.. note::
When processes run in the **background** (either via ``popen`` or
:class:`& BG <plumbum.commands.BG>`), their stdout/stderr pipes might fill up,
causing them to hang. If you know a process produces output, be sure to consume it
every once in a while, using a monitoring thread/reactor in the background.
For more info, see `#48 <https://github.com/tomerfiliba/plumbum/issues/48>`_
"""
__slots__ = ("retcode", "kargs", "timeout")
def __init__(self, retcode=0, timeout=None, **kargs):
self.retcode = retcode
self.kargs = kargs
self.timeout = timeout
def __rand__(self, cmd):
return Future(
cmd.popen(**self.kargs), self.retcode, timeout=self.timeout)
BG = _BG()
class _FG(ExecutionModifier):
"""
An execution modifier that runs the given command in the foreground, passing it the
current process' stdin, stdout and stderr. Useful for interactive programs that require
a TTY. There is no return value.
In order to mimic shell syntax, it applies when you right-and it with a command.
If you wish to expect a different return code (other than the normal success indicate by 0),
use ``FG(retcode)``. Example::
vim & FG # run vim in the foreground, expecting an exit code of 0
vim & FG(7) # run vim in the foreground, expecting an exit code of 7
"""
__slots__ = ("retcode", "timeout")
def __init__(self, retcode=0, timeout=None):
self.retcode = retcode
self.timeout = timeout
def __rand__(self, cmd):
cmd(retcode=self.retcode,
stdin=None,
stdout=None,
stderr=None,
timeout=self.timeout)
FG = _FG()
class _TEE(ExecutionModifier):
"""Run a command, dumping its stdout/stderr to the current process's stdout
and stderr, but ALSO return them. Useful for interactive programs that
expect a TTY but also have valuable output.
Use as:
ls["-l"] & TEE
Returns a tuple of (return code, stdout, stderr), just like ``run()``.
"""
__slots__ = ("retcode", "buffered", "timeout")
def __init__(self, retcode=0, buffered=True, timeout=None):
"""`retcode` is the return code to expect to mean "success". Set
`buffered` to False to disable line-buffering the output, which may
cause stdout and stderr to become more entangled than usual.
"""
self.retcode = retcode
self.buffered = buffered
self.timeout = timeout
def __rand__(self, cmd):
with cmd.bgrun(
retcode=self.retcode,
stdin=None,
stdout=PIPE,
stderr=PIPE,
timeout=self.timeout) as p:
outbuf = []
errbuf = []
out = p.stdout
err = p.stderr
buffers = {out: outbuf, err: errbuf}
tee_to = {out: sys.stdout, err: sys.stderr}
done = False
while not done:
# After the process exits, we have to do one more
# round of reading in order to drain any data in the
# pipe buffer. Thus, we check poll() here,
# unconditionally enter the read loop, and only then
# break out of the outer loop if the process has
# exited.
done = (p.poll() is not None)
# We continue this loop until we've done a full
# `select()` call without collecting any input. This
# ensures that our final pass -- after process exit --
# actually drains the pipe buffers, even if it takes
# multiple calls to read().
progress = True
while progress:
progress = False
ready, _, _ = select((out, err), (), ())
for fd in ready:
buf = buffers[fd]
data, text = read_fd_decode_safely(fd, 4096)
if not data: # eof
continue
progress = True
# Python conveniently line-buffers stdout and stderr for
# us, so all we need to do is write to them
# This will automatically add up to three bytes if it cannot be decoded
tee_to[fd].write(text)
# And then "unbuffered" is just flushing after each write
if not self.buffered:
tee_to[fd].flush()
buf.append(data)
stdout = ''.join([x.decode('utf-8') for x in outbuf])
stderr = ''.join([x.decode('utf-8') for x in errbuf])
return p.returncode, stdout, stderr
TEE = _TEE()
class _TF(ExecutionModifier):
"""
An execution modifier that runs the given command, but returns True/False depending on the retcode.
This returns True if the expected exit code is returned, and false if it is not.
This is useful for checking true/false bash commands.
If you wish to expect a different return code (other than the normal success indicate by 0),
use ``TF(retcode)``. If you want to run the process in the forground, then use
``TF(FG=True)``.
Example::
local['touch']['/root/test'] & TF * Returns False, since this cannot be touched
local['touch']['/root/test'] & TF(1) # Returns True
local['touch']['/root/test'] & TF(FG=True) * Returns False, will show error message
"""
__slots__ = ("retcode", "FG", "timeout")
def __init__(self, retcode=0, FG=False, timeout=None):
"""`retcode` is the return code to expect to mean "success". Set
`FG` to True to run in the foreground.
"""
self.retcode = retcode
self.FG = FG
self.timeout = timeout
@classmethod
def __call__(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __rand__(self, cmd):
try:
if self.FG:
cmd(retcode=self.retcode,
stdin=None,
stdout=None,
stderr=None,
timeout=self.timeout)
else:
cmd(retcode=self.retcode, timeout=self.timeout)
return True
except ProcessExecutionError:
return False
TF = _TF()
class _RETCODE(ExecutionModifier):
"""
An execution modifier that runs the given command, causing it to run and return the retcode.
This is useful for working with bash commands that have important retcodes but not very
useful output.
If you want to run the process in the forground, then use ``RETCODE(FG=True)``.
Example::
local['touch']['/root/test'] & RETCODE # Returns 1, since this cannot be touched
local['touch']['/root/test'] & RETCODE(FG=True) * Returns 1, will show error message
"""
__slots__ = ("foreground", "timeout")
def __init__(self, FG=False, timeout=None):
"""`FG` to True to run in the foreground.
"""
self.foreground = FG
self.timeout = timeout
@classmethod
def __call__(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __rand__(self, cmd):
if self.foreground:
return cmd.run(
retcode=None,
stdin=None,
stdout=None,
stderr=None,
timeout=self.timeout)[0]
else:
return cmd.run(retcode=None, timeout=self.timeout)[0]
RETCODE = _RETCODE()
class _NOHUP(ExecutionModifier):
"""
An execution modifier that runs the given command in the background, disconnected
from the current process, returning a
standard popen object. It will keep running even if you close the current process.
In order to slightly mimic shell syntax, it applies
when you right-and it with a command. If you wish to use a diffent working directory
or different stdout, stderr, you can use named arguments. The default is ``NOHUP(
cwd=local.cwd, stdout='nohup.out', stderr=None)``. If stderr is None, stderr will be
sent to stdout. Use ``os.devnull`` for null output. Will respect redirected output.
Example::
sleep[5] & NOHUP # Outputs to nohup.out
sleep[5] & NOHUP(stdout=os.devnull) # No output
The equivelent bash command would be
.. code-block:: bash
nohup sleep 5 &
"""
__slots__ = ('cwd', 'stdout', 'stderr', 'append')
def __init__(self, cwd='.', stdout='nohup.out', stderr=None, append=True):
""" Set ``cwd``, ``stdout``, or ``stderr``.
Runs as a forked process. You can set ``append=False``, too.
"""
self.cwd = cwd
self.stdout = stdout
self.stderr = stderr
self.append = append
def __rand__(self, cmd):
if isinstance(cmd, plumbum.commands.base.StdoutRedirection):
stdout = cmd.file
append = False
cmd = cmd.cmd
elif isinstance(cmd, plumbum.commands.base.AppendingStdoutRedirection):
stdout = cmd.file
append = True
cmd = cmd.cmd
else:
stdout = self.stdout
append = self.append
return cmd.nohup(cmd, self.cwd, stdout, self.stderr, append)
NOHUP = _NOHUP()
class PipeToLoggerMixin():
"""
This mixin allows piping plumbum commands' output into a logger.
The logger must implement a ``log(level, msg)`` method, as in ``logging.Logger``
Example::
class MyLogger(logging.Logger, PipeToLoggerMixin):
pass
logger = MyLogger("example.app")
Here we send the output of an install.sh script into our log::
local['./install.sh'] & logger
We can choose the log-level for each stream::
local['./install.sh'] & logger.pipe(out_level=logging.DEBUG, err_level=logging.DEBUG)
Or use a convenience method for it::
local['./install.sh'] & logger.pipe_debug()
A prefix can be added to each line::
local['./install.sh'] & logger.pipe(prefix="install.sh: ")
If the command fails, an exception is raised as usual. This can be modified::
local['install.sh'] & logger.pipe_debug(retcode=None)
An exception is also raised if too much time (``DEFAULT_LINE_TIMEOUT``) passed between lines in the stream,
This can also be modified::
local['install.sh'] & logger.pipe(line_timeout=10)
If we happen to use logbook::
class MyLogger(logbook.Logger, PipeToLoggerMixin):
from logbook import DEBUG, INFO # hook up with logbook's levels
"""
from logging import DEBUG, INFO
DEFAULT_LINE_TIMEOUT = 10 * 60
DEFAULT_STDOUT = "INFO"
DEFAULT_STDERR = "DEBUG"
def pipe(self, out_level=None, err_level=None, prefix=None, line_timeout=None, **kw):
"""
Pipe a command's stdout and stderr lines into this logger.
:param out_level: the log level for lines coming from stdout
:param err_level: the log level for lines coming from stderr
Optionally use `prefix` for each line.
"""
class LogPipe(object):
def __rand__(_, cmd):
popen = cmd if hasattr(cmd, "iter_lines") else cmd.popen()
for typ, lines in popen.iter_lines(line_timeout=line_timeout, mode=BY_TYPE, **kw):
if not lines:
continue
level = levels[typ]
for line in lines.splitlines():
if prefix:
line = "%s: %s" % (prefix, line)
self.log(level, line)
return popen.returncode
levels = {1: getattr(self, self.DEFAULT_STDOUT), 2: getattr(self, self.DEFAULT_STDERR)}
if line_timeout is None:
line_timeout = self.DEFAULT_LINE_TIMEOUT
if out_level is not None:
levels[1] = out_level
if err_level is not None:
levels[2] = err_level
return LogPipe()
def pipe_info(self, prefix=None, **kw):
"""
Pipe a command's stdout and stderr lines into this logger (both at level INFO)
"""
return self.pipe(self.INFO, self.INFO, prefix=prefix, **kw)
def pipe_debug(self, prefix=None, **kw):
"""
Pipe a command's stdout and stderr lines into this logger (both at level DEBUG)
"""
return self.pipe(self.DEBUG, self.DEBUG, prefix=prefix, **kw)
def __rand__(self, cmd):
"""
Pipe a command's stdout and stderr lines into this logger.
Log levels for each stream are determined by ``DEFAULT_STDOUT`` and ``DEFAULT_STDERR``.
"""
return cmd & self.pipe(getattr(self, self.DEFAULT_STDOUT), getattr(self, self.DEFAULT_STDERR))
| AndydeCleyre/plumbum | plumbum/commands/modifiers.py | Python | mit | 16,698 |
# -*- coding: utf-8 -*-
"""
Clase C{TypeDeclarationNode} del árbol de sintáxis abstracta.
"""
from pytiger2c.ast.declarationnode import DeclarationNode
class TypeDeclarationNode(DeclarationNode):
"""
Clase C{TypeDeclarationNode} del árbol de sintáxis abstracta.
Representa las distintas declaraciones de tipos presentes en el lenguaje de
Tige. De esta clase heredan las declaraciones de records, arrays y alias
como tipos válidos de Tiger.
"""
def _get_name(self):
"""
Método para obtener el valor de la propiedad C{name}.
"""
return self._name
def _set_name(self, name):
"""
Método para cambiar el valor de la propiedad C{name}.
"""
self._name = name
name = property(_get_name, _set_name)
def _get_type(self):
"""
Método para obtener el valor de la propiedad C{type}.
"""
return self._type
type = property(_get_type)
def __init__(self, name):
"""
Inicializa la clase C{TypeDeclarationNode}.
@type name: C{str}
@param name: Nombre que se le asignará a este nuevo tipo.
"""
super(TypeDeclarationNode, self).__init__()
self._name = name
self._type = None
| yasserglez/pytiger2c | packages/pytiger2c/ast/typedeclarationnode.py | Python | mit | 1,319 |
import sdl2
from mgl2d.input.game_controller import GameController
class JoystickController(GameController):
_DEBUG_CONTROLLER = False
AXIS_DEAD_ZONE = 4000
AXIS_MIN_VALUE = -32768
AXIS_MAX_VALUE = 32767
def __init__(self):
super().__init__()
self._sdl_controller = None
self._sdl_joystick = None
self._sdl_joystick_id = None
def open(self, device_index):
self._sdl_controller = sdl2.SDL_GameControllerOpen(device_index)
self._sdl_joystick = sdl2.SDL_GameControllerGetJoystick(self._sdl_controller)
self._sdl_joystick_id = sdl2.SDL_JoystickInstanceID(self._sdl_joystick)
self._controller_name = sdl2.SDL_JoystickName(self._sdl_joystick)
self._num_axis = sdl2.SDL_JoystickNumAxes(self._sdl_joystick),
self._num_buttons = sdl2.SDL_JoystickNumButtons(self._sdl_joystick)
self._num_balls = sdl2.SDL_JoystickNumBalls(self._sdl_joystick)
for btn_index in range(0, self.MAX_BUTTONS):
self._button_down[btn_index] = 0
self._button_pressed[btn_index] = 0
self._button_released[btn_index] = 0
if self._sdl_joystick_id != -1:
self._connected = True
def close(self):
sdl2.SDL_GameControllerClose(self._sdl_controller)
self._sdl_controller = None
self._sdl_joystick = None
self._sdl_joystick_id = None
self._connected = False
def update(self):
if not self._connected:
return
for btn_index in range(0, self._num_buttons):
self._button_pressed[btn_index] = 0
is_down = sdl2.SDL_GameControllerGetButton(self._sdl_controller, btn_index)
if is_down and not self._button_down[btn_index]:
self._button_pressed[btn_index] = True
self._button_down[btn_index] = is_down
def get_axis(self, axis_index):
axis_value = sdl2.SDL_GameControllerGetAxis(self._sdl_controller, axis_index)
# Sticks have a dead zone
if axis_index != self.AXIS_TRIGGER_LEFT and axis_index != self.AXIS_TRIGGER_RIGHT:
if abs(axis_value) < self.AXIS_DEAD_ZONE:
return 0
# Return scaled value
return axis_value / self.AXIS_MAX_VALUE if axis_value > 0 else -axis_value / self.AXIS_MIN_VALUE
def get_axis_digital_value(self, axis_name):
# Not implemented
return 0
def to_string(self):
return f'[\'{self._controller_name}\',axis:{self._num_axis},buttons:{self._num_buttons}]'
| maxfish/mgl2d | mgl2d/input/joystick_controller.py | Python | mit | 2,557 |
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2017 Michal Stefanik <stefanik dot m@mail.muni.cz>, Tomas Jirsik <jirsik@ics.muni.cz>
# Institute of Computer Science, Masaryk University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Description: A method for computing statistics for hosts in network. Computed statistics
for each host each window contain:
- a list of top n most active ports as sorted by a number of flows on a given port
Usage:
host_daily_profile.py -iz <input-zookeeper-hostname>:<input-zookeeper-port> -it <input-topic>
-oz <output-zookeeper-hostname>:<output-zookeeper-port> -ot <output-topic>
To run this on the Stream4Flow, you need to receive flows by IPFIXCol and make them available via Kafka topic. Then
you can run the example
$ ./run-application.sh .statistics/hosts_profiling/host_daily_profile.py -iz producer:2181 -it host.stats
-oz producer:9092 -ot results.daily
"""
import sys # Common system functions
import os # Common operating system functions
import argparse # Arguments parser
import ujson as json # Fast JSON parser
import socket # Socket interface
import time # Time handling
import ipaddress # IP address handling
from termcolor import cprint # Colors in the console output
from pyspark import SparkContext # Spark API
from pyspark.streaming import StreamingContext # Spark streaming API
from pyspark.streaming.kafka import KafkaUtils # Spark streaming Kafka receiver
from kafka import KafkaProducer # Kafka Python client
from collections import namedtuple
# casting structures
IPStats = namedtuple('IPStats', 'ports dst_ips http_hosts')
StatsItem = namedtuple('StatsItem', 'packets bytes flows')
ZERO_ITEM = StatsItem(0, 0, 0) # neutral item used if no new data about the IP was collected in recent interval
# temporal constants
# default values are overridden from input params if available
hourly_interval = 3600 # aggregation interval for one item of temporal array
daily_interval = 86400 # collection interval of all aggregations as items in temporal array
INCREMENT = 0
# temporal methods resolving the temporal array consistency and construction:
def increment():
"""
increments the global counter that should keep consistent with the duration of the app run in hours
"""
global INCREMENT
INCREMENT += 1
def modulate_position(timestamp):
"""
counts the position in time-sorted log of IP activity as based on the timestamp attached to
the particular log in rdd
timestamp: attached timestamp
"""
result = (INCREMENT - timestamp) % time_dimension
return result
def update_array(array, position, value):
"""
updates an array inserting a _value_ to a chosen _position_ in an _array_
overcomes a general restriction disabling to use an assignment in lambda statements
:param array: _array_
:param position: _position_
:param value: _value_
"""
array[int(position)] = value
return array
def initialize_array(value, timestamp):
"""
initializes an empty array of default log length (=time_dimension) with a _value_
inserted on a position at a given _timestamp_
:param value: _value_
:param timestamp: _timestamp_
"""
return update_array(list([ZERO_ITEM] * time_dimension), modulate_position(timestamp), value)
def merge_init_arrays(a1, a2):
""" Merges the given arrays so that the output array contains either value of a1, or a2 for each nonzero value
Arrays should be in disjunction append -1 when both arrays are filled, so the error is traceable
:param a1 array of the size of a2
:param a2 array of the size of a1
:return Merged arrays
"""
merge = []
for i in range(len(a1)):
if a1[i] != ZERO_ITEM and a2[i] != ZERO_ITEM:
# should not happen
merge.append(-1)
else:
merge.append(a1[i] if a1[i] != ZERO_ITEM else a2[i])
return merge
# post-processing methods for resulting temporal arrays:
def send_to_kafka(data, producer, topic):
"""
Send given data to the specified kafka topic.
:param data: data to send
:param producer: producer that sends the data
:param topic: name of the receiving kafka topic
"""
# Debug print - data to be sent to kafka in resulting format
# print data
producer.send(topic, str(data))
def process_results(json_rdd, producer, topic):
"""
Transform given computation results into the JSON format and send them to the specified kafka instance.
JSON format:
{"src_ipv4":"<host src IPv4 address>",
"@type":"host_stats_temporal_profile",
"stats":{
{
<t=1>: {"packets": <val>, "bytes": <val>, "flows": <val>},
<t=2>: {"packets": <val>, "bytes": <val>, "flows": <val>},
...
<t=time_dimension>: {"port":<port #n>, "flows":# of flows}
}
}
Where <val> is aggregated sum of the specified attribute in an interval of hourly_interval length
that has started in time: <current time> - (<entry's t> * hourly_interval) and ended roughly in a time of send
:param json_rrd: Map in a format: (src IP , [ IPStats(packets, bytes, flows), ..., IPStats(packets, bytes, flows) ])
:param producer: producer that sends the data
:param topic: name of the receiving kafka topic
:return:
"""
for ip, ip_stats in json_rdd.iteritems():
stats_dict = dict()
for stat_idx in range(len(ip_stats)):
temporal_stats = {"packets": ip_stats[stat_idx].packets,
"bytes": ip_stats[stat_idx].bytes,
"flows": ip_stats[stat_idx].flows}
stats_dict[stat_idx] = temporal_stats
# construct the output object in predefined format
result_dict = {"@type": "host_stats_temporal_profile",
"src_ipv4": ip,
"stats": stats_dict}
# send the processed data in json form
send_to_kafka(json.dumps(result_dict)+"\n", producer, topic)
# logging terminal output
print("%s: Stats of %s IPs parsed and sent" % (time.strftime("%c"), len(json_rdd.keys())))
# main computation methods:
def collect_hourly_stats(stats_json):
"""
Performs a hourly aggregation on input data, which result is to be collected as items of daily aggregation
:param stats_json: RDDs of stats in json format matching the output format of host_stats.py application
:type stats_json: Initialized spark streaming context, with data in json format as in host_stats application
"""
stats_windowed = stats_json.window(hourly_interval, hourly_interval)
stats_windowed_keyed = stats_windowed.map(lambda json_rdd: (json_rdd["src_ipv4"],
(json_rdd["stats"]["total"]["packets"],
json_rdd["stats"]["total"]["bytes"],
json_rdd["stats"]["total"]["flow"])
))
ip_stats_summed = stats_windowed_keyed.reduceByKey(lambda current, update: (current[0] + update[0],
current[1] + update[1],
current[2] + update[2]))
ip_stats_objected = ip_stats_summed.mapValues(lambda summed_values: (StatsItem(*summed_values), INCREMENT))
return ip_stats_objected
def collect_daily_stats(hourly_stats):
"""
Aggregation of the time stats of _small_window_data_ in a tuple format (data, timestamp) into a log vector
in format [data_t_n, data_t_n-1, ... , data_t_n-k] containing the entries of the most k recent
_small_window_data_ rdd-s where k = time_dimension (= daily_interval/hourly_interval)
:param hourly_stats: _hourly_stats_ aggregated in hourly_interval window
"""
global INCREMENT
# set a window of DAY_WINDOW_INTERVAL on small-windowed RDDs
long_window_base = hourly_stats.window(daily_interval, hourly_interval)
# Debug print - see how recent incoming RDDs are transformed after each HOUR_WINDOW_INTERVAL
# long_window_debug = long_window_base.map(lambda rdd: {"ip": rdd[0]
# "rdd_timestamp": rdd[1][1],
# "current_inc": INCREMENT,
# "mod_pos": modulate_position(int(rdd[1][1])),
# "value": rdd[1][0]})
# long_window_debug.pprint()
# Here, RDDs of small window in format (IP: (data, timestamp)) are mapped into sparse vector=[0, 0, .. , volume, 0]
# where vector has a size of time_dimension and data inserted on modulated position (see modulate_position())
# then sparse vectors are combined/merged: "summing-up" nonzero positions (see merge_init_arrays())
long_window_data_stream = long_window_base.map(lambda rdd: (rdd[0], initialize_array(rdd[1][0], rdd[1][1]))) \
.reduceByKey(lambda current, update: merge_init_arrays(current, update))
# position counter is consistent with small window length cycle - here increments on each new data from hourly_stats
long_window_data_stream.reduce(lambda current, update: 1).foreachRDD(lambda rdd: increment())
# Debug print of target temporal arrays in interval of a small window
long_window_data_stream.pprint(5)
# return the temporal arrays windowed in a daily interval
return long_window_data_stream.window(hourly_interval, daily_interval)
if __name__ == "__main__":
# Prepare arguments parser (automatically creates -h argument).
parser = argparse.ArgumentParser()
parser.add_argument("-iz", "--input_zookeeper", help="input zookeeper hostname:port", type=str, required=True)
parser.add_argument("-it", "--input_topic", help="input kafka topic", type=str, required=True)
parser.add_argument("-oz", "--output_zookeeper", help="output zookeeper hostname:port", type=str, required=True)
parser.add_argument("-ot", "--output_topic", help="output kafka topic", type=str, required=True)
parser.add_argument("-sw", "--short_window", help="small window duration", type=int, required=False)
parser.add_argument("-lw", "--long_window", help="long window duration", type=int, required=False)
# Parse arguments.
args = parser.parse_args()
# if input arguments are filled, override the default temporal values
if args.short_window and args.long_window:
hourly_interval = args.short_window
daily_interval = args.long_window
time_dimension = daily_interval / hourly_interval # set a number of aggregation entries in temporal array
print("Time dimension set to %s" % time_dimension)
# Set variables
application_name = os.path.basename(sys.argv[0]) # Application name used as identifier
kafka_partitions = 1 # Number of partitions of the input Kafka topic
# Spark context initialization
sc = SparkContext(appName=application_name + " " + " ".join(sys.argv[1:])) # Application name used as the appName
ssc = StreamingContext(sc, 1) # Spark micro batch is 1 second
# Initialize input DStream of flows from specified Zookeeper server and Kafka topic
input_stream = KafkaUtils.createStream(ssc, args.input_zookeeper, "spark-consumer-" + application_name,
{args.input_topic: kafka_partitions})
# Parse flows in the JSON format
input_stream_json = input_stream.map(lambda x: json.loads(x[1]))
# Process data to the defined function.
hourly_host_statistics = collect_hourly_stats(input_stream_json)
daily_host_statistics = collect_daily_stats(hourly_host_statistics)
kafka_producer = KafkaProducer(bootstrap_servers=args.output_zookeeper,
client_id="spark-producer-" + application_name)
# Transform computed statistics into desired json format and send it to output_host as given in -oh input param
daily_host_statistics.foreachRDD(lambda rdd: process_results(rdd.collectAsMap(), kafka_producer, args.output_topic))
# drop the processed RDDs to balance the memory usage
daily_host_statistics.foreachRDD(lambda rdd: rdd.unpersist())
# Start input data processing
ssc.start()
ssc.awaitTermination()
| CSIRT-MU/Stream4Flow | applications/statistics/hosts_profiling/spark/host_daily_profile.py | Python | mit | 13,684 |
#!/usr/bin/env python
import unittest
import securetrading
from securetrading.test import abstract_test_stobjects
import six
class Test_Request(abstract_test_stobjects.Abstract_Test_StObjects):
def setUp(self):
super(Test_Request, self).setUp()
self.class_ = securetrading.Request
def test___init__(self):
request = self.class_()
six.assertRegex(self, request["requestreference"], "A[a-z0-9]+")
self.assertEqual(securetrading.version_info, self.version_info)
def test__set_cachetoken(self):
exp1 = self.get_securetrading_request(
{"datacenterurl": "https://webservices.securetrading.net",
"datacenterpath": "/json/",
"cachetoken":
"17-ae7e511172a07c2fb45db4c73388087e4d850777386a5d72029aaf895\
87f3cf0"})
exp2 = self.get_securetrading_request(
{"datacenterurl": "https://webservices.securetrading.net",
"cachetoken": "17-6a0287dd04497ba8dab257acbd983741f55410b5c709463\
7d8c3f0fb57bd25ec"})
exp3 = self.get_securetrading_request(
{"cachetoken": "17-6a0287dd04497ba8dab257acbd983741f55410b5c709463\
7d8c3f0fb57bd25ec"})
# Test below treats invalid base64 string as cachetoken
exp4 = self.get_securetrading_request(
{"cachetoken": "eyJkYXRhY2VudGVydXJsIjogImh0dHBzOi8vd2Vic2VydmljZX\
Muc2VjdXJldHJhZGluZy5uZXQiLCAiY2FjaGV0b2tlbiI6ICIxNy1hZTdlNTExMTcy"})
tests = [('eyJkYXRhY2VudGVycGF0aCI6ICIvanNvbi8iLCAiZGF0YWNlbnRlcnVybCI\
6ICJodHRwczovL3dlYnNlcnZpY2VzLnNlY3VyZXRyYWRpbmcubmV0IiwgImNhY2hldG9rZW4iOiAiM\
TctYWU3ZTUxMTE3MmEwN2MyZmI0NWRiNGM3MzM4ODA4N2U0ZDg1MDc3NzM4NmE1ZDcyMDI5YWFmODk\
1ODdmM2NmMCJ9', exp1),
('"eyJkYXRhY2VudGVydXJsIjogImh0dHBzOi8vd2Vic2VydmljZXMuc2VjdX\
JldHJhZGluZy5uZXQiLCAiY2FjaGV0b2tlbiI6ICIxNy02YTAyODdkZDA0NDk3YmE4ZGFiMjU3YWNi\
ZDk4Mzc0MWY1NTQxMGI1YzcwOTQ2MzdkOGMzZjBmYjU3YmQyNWVjIn0=', exp2),
('17-6a0287dd04497ba8dab257acbd983741f55410b5c7094637d8c3f0fb\
57bd25ec', exp3),
('eyJkYXRhY2VudGVydXJsIjogImh0dHBzOi8vd2Vic2VydmljZXMuc2VjdXJ\
ldHJhZGluZy5uZXQiLCAiY2FjaGV0b2tlbiI6ICIxNy1hZTdlNTExMTcy', exp4),
]
for cachetoken, expected in tests:
request = self.class_()
request._set_cachetoken(cachetoken)
for obj in [expected, request]:
del obj["requestreference"] # Unique for every request object
self.assertEqual(request, expected)
class Test_Requests(Test_Request):
def setUp(self):
super(Test_Requests, self).setUp()
self.class_ = securetrading.Requests
def test_verify(self):
get_requests = self.get_securetrading_requests
get_request = self.get_securetrading_request
requests1 = get_requests([])
requests2 = get_requests(
[get_request({"a": "b"})])
requests3 = get_requests(
[get_request({"a": "b"}),
get_request({"c": "d"})])
datacenter_url_dict = {"datacenterurl": "url"}
requests4 = get_requests(
[get_request({"a": "b"}),
get_request(datacenter_url_dict)])
datacenter_path_dict = {"datacenterpath": "path"}
requests5 = get_requests(
[get_request({"a": "b"}),
get_request(datacenter_path_dict)])
tests = [(requests1, None, None, None, None),
(requests2, None, None, None, None),
(requests3, None, None, None, None),
(requests4, securetrading.ApiError,
"10", "10 The key 'datacenterurl' must be specifed \
in the outer 'securetrading.Requests' object",
["The key 'datacenterurl' must be specifed in the \
outer 'securetrading.Requests' object"]),
(requests5, securetrading.ApiError,
"10", "10 The key 'datacenterpath' must be specifed \
in the outer 'securetrading.Requests' object",
["The key 'datacenterpath' must be specifed in the \
outer 'securetrading.Requests' object"]),
]
for requests, exp_exception, exp_code, exp_english, exp_data in tests:
if exp_exception is None:
requests.verify()
else:
self.check_st_exception(exp_exception, exp_data, exp_english,
exp_code, requests.verify)
def test__validate_requests(self):
get_requests = self.get_securetrading_requests
get_request = self.get_securetrading_request
tests = [([], None, None),
([get_request({"a": "b"})], None, None),
([{"a": "b"}], AssertionError, "Invalid requests specified")
]
for requests_list, exp_exception, exp_message in tests:
if exp_exception is None:
requests = get_requests(requests_list)
else:
six.assertRaisesRegex(self, exp_exception, exp_message,
get_requests,
requests_list)
if __name__ == "__main__":
unittest.main()
| Secure-Trading/PythonAPI | securetrading/test/test_requestobject.py | Python | mit | 5,171 |
# coding: utf-8
from users import models as m
# write your business logic here
| asyncee/django-project-template | src/users/services.py | Python | mit | 81 |
from django.contrib import admin
from .models import Catalog, Item
admin.site.register(Catalog)
admin.site.register(Item)
| JacekKarnasiewicz/HomePage | apps/tree_traversal/admin.py | Python | mit | 124 |
"""
A simple HTTP interface for making GET, PUT and POST requests.
"""
import http.client
import json
from urllib.parse import urlparse, urlencode # NOQA
from base64 import b64encode
from functools import partial
from collections import namedtuple
Response = namedtuple("Response", ("payload", "headers", "status", "is_json"))
def request(verb, host, port, path, payload=None, https=False, headers=None, auth=None, redirect=True):
"""
Make an HTTP(S) request with the provided HTTP verb, host FQDN, port number, path,
payload, protocol, headers, and auth information. Return a response object with
payload, headers, JSON flag, and HTTP status number.
"""
if not headers:
headers = {}
headers["User-Agent"] = "GitSavvy Sublime Plug-in"
if auth:
username_password = "{}:{}".format(*auth).encode("ascii")
headers["Authorization"] = "Basic {}".format(b64encode(username_password).decode("ascii"))
connection = (http.client.HTTPSConnection(host, port)
if https
else http.client.HTTPConnection(host, port))
connection.request(verb, path, body=payload, headers=headers)
response = connection.getresponse()
response_payload = response.read()
response_headers = dict(response.getheaders())
status = response.status
is_json = "application/json" in response_headers["Content-Type"]
if is_json:
response_payload = json.loads(response_payload.decode("utf-8"))
response.close()
connection.close()
if redirect and verb == "GET" and status == 301 or status == 302:
return request_url(
verb,
response_headers["Location"],
headers=headers,
auth=auth
)
return Response(response_payload, response_headers, status, is_json)
def request_url(verb, url, payload=None, headers=None, auth=None):
parsed = urlparse(url)
https = parsed.scheme == "https"
return request(
verb,
parsed.hostname,
parsed.port or 443 if https else 80,
parsed.path,
payload=payload,
https=https,
headers=headers,
auth=([parsed.username, parsed.password]
if parsed.username and parsed.password
else None)
)
get = partial(request, "GET")
post = partial(request, "POST")
put = partial(request, "PUT")
get_url = partial(request_url, "GET")
post_url = partial(request_url, "POST")
put_url = partial(request_url, "PUT")
| ddevlin/GitSavvy | common/interwebs.py | Python | mit | 2,509 |