text stringlengths 4 1.02M | meta dict |
|---|---|
import rest_framework.serializers
from rest_framework_nested.relations import NestedHyperlinkedIdentityField, NestedHyperlinkedRelatedField
try:
from rest_framework.utils.field_mapping import get_nested_relation_kwargs
except ImportError:
pass
# passing because NestedHyperlinkedModelSerializer can't be used anyway
# if version too old.
class NestedHyperlinkedModelSerializer(rest_framework.serializers.HyperlinkedModelSerializer):
"""
A type of `ModelSerializer` that uses hyperlinked relationships with compound keys instead
of primary key relationships. Specifically:
* A 'url' field is included instead of the 'id' field.
* Relationships to other instances are hyperlinks, instead of primary keys.
NOTE: this only works with DRF 3.1.0 and above.
"""
parent_lookup_kwargs = {
'parent_pk': 'parent__pk'
}
serializer_url_field = NestedHyperlinkedIdentityField
serializer_related_field = NestedHyperlinkedRelatedField
def __init__(self, *args, **kwargs):
self.parent_lookup_kwargs = kwargs.pop('parent_lookup_kwargs', self.parent_lookup_kwargs)
super(NestedHyperlinkedModelSerializer, self).__init__(*args, **kwargs)
def build_url_field(self, field_name, model_class):
field_class, field_kwargs = super(NestedHyperlinkedModelSerializer, self).build_url_field(
field_name,
model_class
)
field_kwargs['parent_lookup_kwargs'] = self.parent_lookup_kwargs
return field_class, field_kwargs
def build_nested_field(self, field_name, relation_info, nested_depth):
"""
Create nested fields for forward and reverse relationships.
"""
class NestedSerializer(NestedHyperlinkedModelSerializer):
class Meta:
model = relation_info.related_model
depth = nested_depth - 1
fields = '__all__'
field_class = NestedSerializer
field_kwargs = get_nested_relation_kwargs(relation_info)
return field_class, field_kwargs
| {
"content_hash": "3cf918f31fcdc2d3000c6bf48de4b96c",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 105,
"avg_line_length": 38.48148148148148,
"alnum_prop": 0.6929740134744947,
"repo_name": "alanjds/drf-nested-routers",
"id": "38c50a800d40a3babb47dca3dfc7f45a916cfd19",
"size": "2078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest_framework_nested/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "67210"
}
],
"symlink_target": ""
} |
"""Tests for dopamine.logger."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import shutil
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from dopamine.discrete_domains import logger
import tensorflow as tf
FLAGS = flags.FLAGS
class LoggerTest(parameterized.TestCase):
def setUp(self):
super(LoggerTest, self).setUp()
self._test_subdir = os.path.join('/tmp/dopamine_tests', 'logging')
shutil.rmtree(self._test_subdir, ignore_errors=True)
os.makedirs(self._test_subdir)
def testLoggingDisabledWithEmptyDirectory(self):
exp_logger = logger.Logger('')
self.assertFalse(exp_logger.is_logging_enabled())
def testLoggingDisabledWithInvalidDirectory(self):
exp_logger = logger.Logger('/does/not/exist')
self.assertFalse(exp_logger.is_logging_enabled())
def testLoggingEnabledWithValidDirectory(self):
exp_logger = logger.Logger('/tmp/dopamine_tests')
self.assertTrue(exp_logger.is_logging_enabled())
def testSetEntry(self):
exp_logger = logger.Logger('/tmp/dopamine_tests')
self.assertEmpty(exp_logger.data.keys())
key = 'key'
val = [1, 2, 3, 4]
exp_logger[key] = val
expected_dictionary = {}
expected_dictionary[key] = val
self.assertEqual(expected_dictionary, exp_logger.data)
# Calling __setitem__ with the same value should overwrite the previous
# value.
val = 'new value'
exp_logger[key] = val
expected_dictionary[key] = val
self.assertEqual(expected_dictionary, exp_logger.data)
def testLogToFileWithInvalidDirectory(self):
exp_logger = logger.Logger('/does/not/exist')
self.assertFalse(exp_logger.is_logging_enabled())
exp_logger.log_to_file(None, None)
def testLogToFileWithValidDirectory(self):
exp_logger = logger.Logger(self._test_subdir)
self.assertTrue(exp_logger.is_logging_enabled())
key = 'key'
val = [1, 2, 3, 4]
exp_logger[key] = val
expected_dictionary = {}
expected_dictionary[key] = val
self.assertEqual(expected_dictionary, exp_logger.data)
iteration_number = 7
exp_logger.log_to_file('log', iteration_number)
log_file = os.path.join(self._test_subdir,
'log_{}'.format(iteration_number))
with tf.io.gfile.GFile(log_file, 'rb') as f:
contents = f.read()
self.assertEqual(contents, pickle.dumps(expected_dictionary,
protocol=pickle.HIGHEST_PROTOCOL))
@parameterized.parameters((2), (4))
def testGarbageCollectionWithDefaults(self, logs_duration):
exp_logger = logger.Logger(self._test_subdir, logs_duration=logs_duration)
self.assertTrue(exp_logger.is_logging_enabled())
key = 'key'
val = [1, 2, 3, 4]
exp_logger[key] = val
expected_dictionary = {}
expected_dictionary[key] = val
self.assertEqual(expected_dictionary, exp_logger.data)
deleted_log_files = 7
total_log_files = logs_duration + deleted_log_files
for iteration_number in range(total_log_files):
exp_logger.log_to_file('log', iteration_number)
for iteration_number in range(total_log_files):
log_file = os.path.join(self._test_subdir,
'log_{}'.format(iteration_number))
if iteration_number < deleted_log_files:
self.assertFalse(tf.io.gfile.exists(log_file))
else:
self.assertTrue(tf.io.gfile.exists(log_file))
if __name__ == '__main__':
tf.compat.v1.disable_v2_behavior()
absltest.main()
| {
"content_hash": "b5f5a47c1a4231784447f40ddce3dcf9",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 78,
"avg_line_length": 34.56730769230769,
"alnum_prop": 0.6820584144645341,
"repo_name": "google/dopamine",
"id": "955a4a639d8920a23e974a2c6f3df411bc99d5e5",
"size": "4196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/dopamine/discrete_domains/logger_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2666"
},
{
"name": "HTML",
"bytes": "6143"
},
{
"name": "Jupyter Notebook",
"bytes": "6671613"
},
{
"name": "Python",
"bytes": "791424"
},
{
"name": "Shell",
"bytes": "383"
}
],
"symlink_target": ""
} |
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
from pymeasure.instruments import Instrument
from pymeasure.adapters import VISAAdapter
from pymeasure.instruments.validators import (
truncated_discrete_set, strict_discrete_set,
truncated_range
)
from time import sleep, time
import numpy as np
import re
class AMI430(Instrument):
""" Represents the AMI 430 Power supply
and provides a high-level for interacting with the instrument.
.. code-block:: python
magnet = AMI430("TCPIP::web.address.com::7180::SOCKET")
magnet.coilconst = 1.182 # kGauss/A
magnet.voltage_limit = 2.2 # Sets the voltage limit in V
magnet.target_current = 10 # Sets the target current to 10 A
magnet.target_field = 1 # Sets target field to 1 kGauss
magnet.ramp_rate_current = 0.0357 # Sets the ramp rate in A/s
magnet.ramp_rate_field = 0.0422 # Sets the ramp rate in kGauss/s
magnet.ramp # Initiates the ramping
magnet.pause # Pauses the ramping
magnet.status # Returns the status of the magnet
magnet.ramp_to_current(5) # Ramps the current to 5 A
magnet.shutdown() # Ramps the current to zero and disables output
"""
def __init__(self, resourceName, **kwargs):
adapter = VISAAdapter(resourceName, read_termination='\n')
super(ami430, self).__init__(
adapter,
"AMI superconducting magnet power supply.",
includeSCPI=True,
**kwargs
)
# Read twice in order to remove welcome/connect message
self.read()
self.read()
maximumfield = 1.00
maximumcurrent = 50.63
coilconst = Instrument.control(
"COIL?", "CONF:COIL %g",
""" A floating point property that sets the coil contant
in kGauss/A. """
)
voltage_limit = Instrument.control(
"VOLT:LIM?", "CONF:VOLT:LIM %g",
""" A floating point property that sets the voltage limit
for charging/discharging the magnet. """
)
target_current = Instrument.control(
"CURR:TARG?", "CONF:CURR:TARG %g",
""" A floating point property that sets the target current
in A for the magnet. """
)
target_field = Instrument.control(
"FIELD:TARG?", "CONF:FIELD:TARG %g",
""" A floating point property that sets the target field
in kGauss for the magnet. """
)
ramp_rate_current = Instrument.control(
"RAMP:RATE:CURR:1?", "CONF:RAMP:RATE:CURR 1,%g",
""" A floating point property that sets the current ramping
rate in A/s. """
)
ramp_rate_field = Instrument.control(
"RAMP:RATE:FIELD:1?", "CONF:RAMP:RATE:FIELD 1,%g,1.00",
""" A floating point property that sets the field ramping
rate in kGauss/s. """
)
magnet_current = Instrument.measurement("CURR:MAG?",
""" Reads the current in Amps of the magnet.
"""
)
supply_current = Instrument.measurement("CURR:SUPP?",
""" Reads the current in Amps of the power supply.
"""
)
field = Instrument.measurement("FIELD:MAG?",
""" Reads the field in kGauss of the magnet.
"""
)
state = Instrument.measurement("STATE?",
""" Reads the field in kGauss of the magnet.
"""
)
def zero(self):
""" Initiates the ramping of the magnetic field to zero
current/field with ramping rate previously set. """
self.write("ZERO")
def pause(self):
""" Pauses the ramping of the magnetic field. """
self.write("PAUSE")
def ramp(self):
""" Initiates the ramping of the magnetic field to set
current/field with ramping rate previously set.
"""
self.write("RAMP")
def has_persistent_switch_enabled(self):
""" Returns a boolean if the persistent switch is enabled. """
return bool(self.ask("PSwitch?"))
def enable_persistent_switch(self):
""" Enables the persistent switch. """
self.write("PSwitch 1")
def disable_persistent_switch(self):
""" Disables the persistent switch. """
self.write("PSwitch 0")
@property
def magnet_status(self):
STATES = {
1: "RAMPING",
2: "HOLDING",
3: "PAUSED",
4: "Ramping in MANUAL UP",
5: "Ramping in MANUAL DOWN",
6: "ZEROING CURRENT in progress",
7: "QUENCH!!!",
8: "AT ZERO CURRENT",
9: "Heating Persistent Switch",
10: "Cooling Persistent Switch"
}
return STATES[self.state]
def ramp_to_current(self, current, rate):
""" Heats up the persistent switch and
ramps the current with set ramp rate.
"""
self.enable_persistent_switch()
self.target_current = current
self.ramp_rate_current = rate
self.wait_for_holding()
self.ramp()
def ramp_to_field(self, field, rate):
""" Heats up the persistent switch and
ramps the current with set ramp rate.
"""
self.enable_persistent_switch()
self.target_field = field
self.ramp_rate_field = rate
self.wait_for_holding()
self.ramp()
def wait_for_holding(self, should_stop=lambda: False,
timeout=800, interval=0.1):
"""
"""
t = time()
while self.state != 2 and self.state != 3 and self.state != 8:
sleep(interval)
if should_stop():
return
if (time()-t) > timeout:
raise Exception("Timed out waiting for AMI430 switch to warm up.")
def shutdown(self, ramp_rate=0.0357):
""" Turns on the persistent switch,
ramps down the current to zero, and turns off the persistent switch.
"""
self.enable_persistent_switch()
self.wait_for_holding()
self.ramp_rate_current = ramp_rate
self.zero()
self.wait_for_holding()
self.disable_persistent_switch() | {
"content_hash": "d9ea97e30ca2c9ec193a5618b618a295",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 93,
"avg_line_length": 31.102941176470587,
"alnum_prop": 0.571946414499606,
"repo_name": "dvspirito/pymeasure",
"id": "e98fa659796c6f0c5e43fac027f194ccffc1900b",
"size": "7502",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymeasure/instruments/ami/ami430.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "458273"
}
],
"symlink_target": ""
} |
from xml.etree.ElementTree import XML
import os
import logging
from metashare.settings import LOG_HANDLER
import pycountry
# Setup logging support.
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(LOG_HANDLER)
def read_langs(filename):
if not os.path.isfile(filename):
LOGGER.error('read_langs: {0} not found'.format(filename))
return None
file_hnd = os.open(filename, os.O_RDONLY)
xml_langs = XML(os.read(file_hnd, 10000))
return xml_langs
def read_languages():
langs = pycountry.languages
lang_list = []
for index in range(len(langs.objects)):
lang = langs.objects[index]
if hasattr(lang, 'alpha2'):
lang_item = (index, lang.alpha2, lang.name)
lang_list.append(lang_item)
else:
#lang_item = (index, '', lang.name)
pass
return lang_list
def read_lang_alpha2():
langs = pycountry.languages
lang_list = []
for index in range(len(langs.objects)):
lang = langs.objects[index]
if hasattr(lang, 'alpha2'):
lang_item = (lang.alpha2)
lang_list.append(lang_item)
return lang_list
def get_lang_list(xml_tree):
lang_el_list = xml_tree.findall('lang')
lang_list = []
for lel in lang_el_list:
lang_id = lel.find('id').text
lang_name = lel.find('name').text
lang_list.append((lang_id, lang_name))
return lang_list
| {
"content_hash": "d11543b97d93ad8f32beaac6008de1a2",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 66,
"avg_line_length": 28.274509803921568,
"alnum_prop": 0.6178918169209431,
"repo_name": "zeehio/META-SHARE",
"id": "37f29cd54c53a2b03b3fbfcac05d149ded14d54c",
"size": "1443",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "metashare/repository/editor/lang.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7362"
},
{
"name": "C",
"bytes": "321"
},
{
"name": "C++",
"bytes": "112277"
},
{
"name": "CSS",
"bytes": "125117"
},
{
"name": "HTML",
"bytes": "2956138"
},
{
"name": "Java",
"bytes": "12780"
},
{
"name": "JavaScript",
"bytes": "201032"
},
{
"name": "M4",
"bytes": "8416"
},
{
"name": "Makefile",
"bytes": "26172"
},
{
"name": "Python",
"bytes": "4084877"
},
{
"name": "Shell",
"bytes": "121386"
},
{
"name": "XSLT",
"bytes": "473763"
}
],
"symlink_target": ""
} |
import responses
from requests.exceptions import HTTPError
from gitlab_tests.base_test import BaseTest
from response_data.deploy_keys import *
class TestGetAllDeployKeys(BaseTest):
@responses.activate
def test_get_all_deploy_keys(self):
responses.add(
responses.GET,
self.gitlab.api_url + '/deploy_keys',
json=get_deploy_keys,
status=200,
content_type='application/json')
self.assertEqual(get_deploy_keys, self.gitlab.get_all_deploy_keys())
@responses.activate
def test_get_all_deploy_keys_empty_list(self):
responses.add(
responses.GET,
self.gitlab.api_url + '/deploy_keys',
json=[],
status=200,
content_type='application/json')
self.assertEqual([], self.gitlab.get_all_deploy_keys())
@responses.activate
def test_get_all_deploy_keys_exception(self):
responses.add(
responses.GET,
self.gitlab.api_url + '/deploy_keys',
body='{"error":"404 Not Found"}',
status=404,
content_type='application/json')
self.gitlab.suppress_http_error = False
self.assertRaises(HTTPError, self.gitlab.get_all_deploy_keys)
self.gitlab.suppress_http_error = True
class TestEnableDeployKeys(BaseTest):
@responses.activate
def test_enable_deploy_key(self):
responses.add(
responses.POST,
self.gitlab.api_url + '/projects/5/deploy_keys/1/enable',
json=get_deploy_keys[0],
status=201,
content_type='application/json')
self.assertEqual(get_deploy_keys[0], self.gitlab.enable_deploy_key(5, 1))
@responses.activate
def test_enable_deploy_key_exception(self):
responses.add(
responses.POST,
self.gitlab.api_url + '/projects/5/deploy_keys/2/enable',
body='{"message": "500 Internal Server Error"}',
status=500,
content_type='application/json')
self.gitlab.suppress_http_error = False
self.assertRaises(HTTPError, self.gitlab.enable_deploy_key, 5, 2)
self.gitlab.suppress_http_error = True
| {
"content_hash": "f9a269a0a4306f11909a293b0d0f6b25",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 81,
"avg_line_length": 32.6764705882353,
"alnum_prop": 0.6134113411341134,
"repo_name": "pyapi-gitlab/pyapi-gitlab",
"id": "20d40b6fd8ed149d60a8ddfa295f11f0953ac6e7",
"size": "2222",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "gitlab_tests/test_deploy_keys.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "120571"
},
{
"name": "Shell",
"bytes": "3576"
}
],
"symlink_target": ""
} |
import math
import string
import sys
import struct
import matplotlib
import matplotlib.pyplot as pyplot
#import utils
import numpy as np
#import array
#import astLib.astStats as astStats
import cPickle
import asciitable
import scipy.ndimage
import scipy.stats as ss
import scipy as sp
import pyfits
import make_color_image
import os
# The camera class performs the basic operations.
# It takes as input 10 parameters from the CAMERAX-PARAMETERS HDUs created by Sunrise
# The position and FOV units are in KPC
# It returns an object containing these data plus methods for converting generic
# x,y,z coordinates from the simulation frame (in Physical kpc!!) into a camera-based coordinate system.
# The camera coordinates are defined such that the axis ranges are [-1,1].
# The return coordinates can be modified to use a pixel-based grid instead, but this more generic function can be used for both the
# CANDELized and perfect images (i.e., on the same axis extent, in matplotlib terms)
# There is one remaining uncertainty -- the sense of the rows and columns in the stored images (and how they are read into a given language).
# In Python:pyfits/astropy, given simulation coordinates, the returned pixel values correspond to the location on the image given the following assumptions:
# The "imshow" command was run with origin='lower' and extent=(-1,1,-1,1)
# The images returned by pyfits from the broadband.fits or _candelized_noise.fits must be **TRANSPOSED** first
# Presumably there are other iterations of these two settings (or other ways to manipulate the images) that will be satisfactory (or better, if the reason they work is known).
# -Greg Snyder, 8/21/2014
class camera:
def __init__(self,x,y,z,dirx,diry,dirz,upx,upy,upz,fov,shape):
self.x=x
self.y=y
self.z=z
self.dirx=dirx #x3 unit vector w/ ref to lab frame
self.diry=diry
self.dirz=dirz
self.upx=upx #x2 unit vector
self.upy=upy
self.upz=upz
self.fov = fov
#These vectors are defined following the convention at http://en.wikipedia.org/wiki/Pinhole_camera_model
self.x3vector = np.asarray([self.dirx,self.diry,self.dirz])
self.x2vector = np.asarray([self.upx,self.upy,self.upz])
self.x1vector = np.cross(self.x2vector,self.x3vector)
self.x1vector = self.x1vector/np.linalg.norm(self.x1vector)
self.shape = shape
# This is the heart of the matter. The idea is to express the object's coordinates in the frame of the camera model defined in __init__.
# Let the object's position expressed in the original frame be A, and the unit vectors i1, i2, i3 be those along the simulation axes.
# Let the object's position defined in the camera's reference (without shifting the origin yet) be A'.
# Then, with linear operators, A' = M A, where M is constructed by taking dot products of the camera's unit vectors i' with the original unit vectors i.
# When the original frame is standard cartesian coords, this equation reduces to the algebra below.
def express_in_camcoords(self,x,y,z):
new_x = x*self.x1vector[0] + y*self.x1vector[1] + z*self.x1vector[2]
new_y = x*self.x2vector[0] + y*self.x2vector[1] + z*self.x2vector[2]
new_z = x*self.x3vector[0] + y*self.x3vector[1] + z*self.x3vector[2]
return np.asarray([new_x,new_y,new_z])
#Wrapper that reconstructs the Sunrise pinhole camera model, expresses the object's position in the camera frame, and computes its position in the image plane.
def xyz_to_pixelvals(self,x,y,z):
camdist = (self.x**2 + self.y**2 + self.z**2)**0.5
camvec = self.express_in_camcoords(x,y,z)
#define focal length such that image values span from -1 to 1.
f = camdist/(0.5*self.fov)
#See guidance at http://en.wikipedia.org/wiki/Pinhole_camera_model
y1 = (f/camdist)*camvec[0]*1.0
y2 = (f/camdist)*camvec[1]*1.0
return y1,y2
#Wrapper that converts a list of Mandelker clump data into list of pixel coordinates, as defined above.
def process_clump_data(self, clumpdata):
x = clumpdata['col4']
y = clumpdata['col5']
z = clumpdata['col6']
N = x.shape[0]
y1_array = np.ndarray(shape=(N))*0.0
y2_array = np.ndarray(shape=(N))*0.0
for i in range(N):
y1,y2 = self.xyz_to_pixelvals(x[i],y[i],z[i])
y1_array[i] = y1
y2_array[i] = y2
return y1_array, y2_array
#set_camobj_from_hdu: Helper function taking a pyfits HDU, the hdu['CAMERAX-PARAMETERS'], and returning an initialized camera object.
def set_camobj_from_hdu(camparhdu_1):
camposx = camparhdu_1.header.get('CAMPOSX')
camposy = camparhdu_1.header.get('CAMPOSY')
camposz = camparhdu_1.header.get('CAMPOSZ')
camdirx = camparhdu_1.header.get('CAMDIRX')
camdiry = camparhdu_1.header.get('CAMDIRY')
camdirz = camparhdu_1.header.get('CAMDIRZ')
camupx = camparhdu_1.header.get('CAMUPX')
camupy = camparhdu_1.header.get('CAMUPY')
camupz = camparhdu_1.header.get('CAMUPZ')
fov_kpc = camparhdu_1.header.get('linear_fov')
shape = camparhdu_1.data.shape
camobj_1 = camera(camposx,camposy,camposz,camdirx,camdiry,camdirz,camupx,camupy,camupz,fov_kpc,shape)
return camobj_1
#The main example script demonstrating the use of the camera object.
if __name__=="__main__":
#Note: You'll need to edit these paths to get the example working.
clumpcat = '/Users/gfs/Documents/Professional/HydroART_Morphology/VELA26_clumps/clump_catalogue/Nir_clump_cat_a0.420.txt'
clumpdat = asciitable.read(clumpcat,data_start=6) #this call skips the "Bulge" clump because there is no whitespace between the first two entries of that line.
imagefile = '/Users/gfs/Documents/Professional/HydroART_Morphology/VELA26/VELA26_a0.420_0003014__skipir/broadbandz.fits'
esclump = clumpdat[0] #for the example above, this one is the "ex-situ" clump
print esclump
galx = esclump['col4'] ; galy = esclump['col5'] ; galz = esclump['col6']
boxx = esclump['col7'] ; boxy = esclump['col8'] ; boxz = esclump['col9']
print galx, galy, galz
print boxx, boxy, boxz
next10 = clumpdat[1:]
#simply the rest of the example catalog entries
#Open the broadbandz.fits file corresponding to this snapshot
bb = pyfits.open(imagefile)
bb.info()
#Load in the images of interest, but especially the CAMERAX-PARAMETERS HDU
camparhdu_1 = bb['CAMERA1-PARAMETERS'] #CAMERA1
camhdu_1 = bb['CAMERA1-BROADBAND']
#12,15,17 == F606, F850, F125... just for example
F606_1 = camhdu_1.data[12,:,:]
F850_1 = camhdu_1.data[15,:,:]
F125_1 = camhdu_1.data[17,:,:]
#Create camera object from campar.
camobj_1 = set_camobj_from_hdu(camparhdu_1)
#This shows how the object works on a single entry of x,y,z (can be anything).
y1_1,y2_1 = camobj_1.xyz_to_pixelvals(galx,galy,galz)
#Return values span -1 to 1.
#This now does the same as the previous line, but for an entire asciitable full of Mandelker clump catalog lines.
y1_array_1, y2_array_1 = camobj_1.process_clump_data(next10)
#repeat for cameras of interest
camparhdu_0 = bb['CAMERA0-PARAMETERS'] #CAMERA1
camhdu_0 = bb['CAMERA0-BROADBAND']
F606_0 = camhdu_0.data[12,:,:]
F850_0 = camhdu_0.data[15,:,:]
F125_0 = camhdu_0.data[17,:,:]
camobj_0 = set_camobj_from_hdu(camparhdu_0)
y1_0,y2_0 = camobj_0.xyz_to_pixelvals(galx,galy,galz) #these span -1 to 1
y1_array_0, y2_array_0 = camobj_0.process_clump_data(next10)
camparhdu_6 = bb['CAMERA6-PARAMETERS'] #CAMERA1
camhdu_6 = bb['CAMERA6-BROADBAND']
F606_6 = camhdu_6.data[12,:,:]
F850_6 = camhdu_6.data[15,:,:]
F125_6 = camhdu_6.data[17,:,:]
camobj_6 = set_camobj_from_hdu(camparhdu_6)
y1_6,y2_6 = camobj_6.xyz_to_pixelvals(galx,galy,galz) #these span -1 to 1
y1_array_6, y2_array_6 = camobj_6.process_clump_data(next10)
#Grab corresponding CANDELized images
candels_dir = '/Users/gfs/Documents/Professional/HydroART_Morphology/VELA26/VELA26_a0.420_0003014__skipir/images/'
c_0_f606 = pyfits.open(os.path.join(candels_dir,'VELA26_a0.420_0003014__skipir_CAMERA0-BROADBAND_F606W_candelized_noise.fits'))[0].data
c_0_f850 = pyfits.open(os.path.join(candels_dir,'VELA26_a0.420_0003014__skipir_CAMERA0-BROADBAND_F850LP_candelized_noise.fits'))[0].data
c_0_f125 = pyfits.open(os.path.join(candels_dir,'VELA26_a0.420_0003014__skipir_CAMERA0-BROADBAND_F125W_candelized_noise.fits'))[0].data
c_1_f606 = pyfits.open(os.path.join(candels_dir,'VELA26_a0.420_0003014__skipir_CAMERA1-BROADBAND_F606W_candelized_noise.fits'))[0].data
c_1_f850 = pyfits.open(os.path.join(candels_dir,'VELA26_a0.420_0003014__skipir_CAMERA1-BROADBAND_F850LP_candelized_noise.fits'))[0].data
c_1_f125 = pyfits.open(os.path.join(candels_dir,'VELA26_a0.420_0003014__skipir_CAMERA1-BROADBAND_F125W_candelized_noise.fits'))[0].data
c_6_f606 = pyfits.open(os.path.join(candels_dir,'VELA26_a0.420_0003014__skipir_CAMERA6-BROADBAND_F606W_candelized_noise.fits'))[0].data
c_6_f850 = pyfits.open(os.path.join(candels_dir,'VELA26_a0.420_0003014__skipir_CAMERA6-BROADBAND_F850LP_candelized_noise.fits'))[0].data
c_6_f125 = pyfits.open(os.path.join(candels_dir,'VELA26_a0.420_0003014__skipir_CAMERA6-BROADBAND_F125W_candelized_noise.fits'))[0].data
#must convert to reasonable flux units (!!)
b_ZP = pyfits.open(os.path.join(candels_dir,'VELA26_a0.420_0003014__skipir_CAMERA0-BROADBAND_F606W_candelized_noise.fits'))[0].header.get('AB_ZP')
g_ZP = pyfits.open(os.path.join(candels_dir,'VELA26_a0.420_0003014__skipir_CAMERA0-BROADBAND_F850LP_candelized_noise.fits'))[0].header.get('AB_ZP')
r_ZP = pyfits.open(os.path.join(candels_dir,'VELA26_a0.420_0003014__skipir_CAMERA0-BROADBAND_F125W_candelized_noise.fits'))[0].header.get('AB_ZP')
b_ZPfact = 10.0**(-0.4*np.float(b_ZP)) *1.0e14
g_ZPfact = 10.0**(-0.4*np.float(g_ZP)) *1.0e14
r_ZPfact = 10.0**(-0.4*np.float(r_ZP)) *1.0e14
#Create example plot.
fDISK = pyplot.figure(figsize=(12,8))
pyplot.subplots_adjust(left=0, right=0.999, bottom=0, top=0.999,wspace=0.0,hspace=0.0)
DISKnum = fDISK.number
#########
#THIS PART I JUST PLAYED AROUND WITH UNTIL IT WORKED
#IMPORTANT
bSIM_1 = np.transpose(F606_1) ; gSIM_1 = np.transpose(F850_1) ; rSIM_1 = np.transpose(F125_1)
bSIM_0 = np.transpose(F606_0) ; gSIM_0 = np.transpose(F850_0) ; rSIM_0 = np.transpose(F125_0)
bSIM_6 = np.transpose(F606_6) ; gSIM_6 = np.transpose(F850_6) ; rSIM_6 = np.transpose(F125_6)
#multiply by a factor to convert into nu-based flux units from counts
bHST_1 = np.transpose(c_1_f606)*b_ZPfact ; gHST_1 = np.transpose(c_1_f850)*g_ZPfact ; rHST_1 = np.transpose(c_1_f125)*r_ZPfact
bHST_0 = np.transpose(c_0_f606)*b_ZPfact ; gHST_0 = np.transpose(c_0_f850)*g_ZPfact ; rHST_0 = np.transpose(c_0_f125)*r_ZPfact
bHST_6 = np.transpose(c_6_f606)*b_ZPfact ; gHST_6 = np.transpose(c_6_f850)*g_ZPfact ; rHST_6 = np.transpose(c_6_f125)*r_ZPfact
#IMPORTANT
#HERE THERE BE TRANPOSES
#########
#to get the colors to look reasonable (started from lambda ratios and modify until happy)
bscale = 1.0 ; gscale = 6.0/10.0 ; rscale = 6.0/12.5
#Create RGB images in Lupton+ 04 Scheme
rgbdata_edge_hires = make_color_image.make_interactive(bSIM_1/bscale,gSIM_1/gscale,rSIM_1/rscale,10.0,9.0)
rgbdata_face_hires = make_color_image.make_interactive(bSIM_0/bscale,gSIM_0/gscale,rSIM_0/rscale,10.0,9.0)
rgbdata_6_hires = make_color_image.make_interactive(bSIM_6/bscale,gSIM_6/gscale,rSIM_6/rscale,10.0,9.0)
rgbdata_edge_lores = make_color_image.make_interactive(bHST_1/bscale,gHST_1*gscale,rHST_1*rscale,0.006,4.0)
rgbdata_face_lores = make_color_image.make_interactive(bHST_0/bscale,gHST_0*gscale,rHST_0*rscale,0.006,4.0)
rgbdata_6_lores = make_color_image.make_interactive(bHST_6/bscale,gHST_6*gscale,rHST_6*rscale,0.006,4.0)
#plot 'em.
axiHST = pyplot.axes([0.0,0.0,0.333,0.5],frameon=True,axisbg='black')
axiHST.set_xticks([]) ; axiHST.set_yticks([])
axiHST.imshow(rgbdata_edge_hires,interpolation='nearest',origin='lower',extent=[-1,1,-1,1])
axiHST.plot([y1_1],[y2_1],'ok',markerfacecolor='None',markeredgecolor='Lime',markersize=10,alpha=0.75)
axiHST.plot(y1_array_1,y2_array_1,'ok',markerfacecolor='None',markeredgecolor='Yellow',markersize=5,alpha=0.75)
axiHST = pyplot.axes([0.333,0.0,0.333,0.5],frameon=True,axisbg='black')
axiHST.set_xticks([]) ; axiHST.set_yticks([])
axiHST.imshow(rgbdata_face_hires,interpolation='nearest',origin='lower',extent=[-1,1,-1,1])
axiHST.plot([y1_0],[y2_0],'ok',markerfacecolor='None',markeredgecolor='Lime',markersize=10,alpha=0.75)
axiHST.plot(y1_array_0,y2_array_0,'ok',markerfacecolor='None',markeredgecolor='Yellow',markersize=5,alpha=0.75)
axiHST = pyplot.axes([0.666,0.0,0.333,0.5],frameon=True,axisbg='black')
axiHST.set_xticks([]) ; axiHST.set_yticks([])
axiHST.imshow(rgbdata_6_hires,interpolation='nearest',origin='lower',extent=[-1,1,-1,1])
axiHST.plot([y1_6],[y2_6],'ok',markerfacecolor='None',markeredgecolor='Lime',markersize=10,alpha=0.75)
axiHST.plot(y1_array_6,y2_array_6,'ok',markerfacecolor='None',markeredgecolor='Yellow',markersize=5,alpha=0.75)
axiHST = pyplot.axes([0.0,0.5,0.333,0.5],frameon=True,axisbg='black')
axiHST.set_xticks([]) ; axiHST.set_yticks([])
axiHST.imshow(rgbdata_edge_lores,interpolation='nearest',origin='lower',extent=[-1,1,-1,1])
axiHST.plot([y1_1],[y2_1],'ok',markerfacecolor='None',markeredgecolor='Lime',markersize=10,alpha=0.75)
axiHST.plot(y1_array_1,y2_array_1,'ok',markerfacecolor='None',markeredgecolor='Yellow',markersize=5,alpha=0.75)
axiHST = pyplot.axes([0.333,0.5,0.333,0.5],frameon=True,axisbg='black')
axiHST.set_xticks([]) ; axiHST.set_yticks([])
axiHST.imshow(rgbdata_face_lores,interpolation='nearest',origin='lower',extent=[-1,1,-1,1])
axiHST.plot([y1_0],[y2_0],'ok',markerfacecolor='None',markeredgecolor='Lime',markersize=10,alpha=0.75)
axiHST.plot(y1_array_0,y2_array_0,'ok',markerfacecolor='None',markeredgecolor='Yellow',markersize=5,alpha=0.75)
axiHST = pyplot.axes([0.666,0.5,0.333,0.5],frameon=True,axisbg='black')
axiHST.set_xticks([]) ; axiHST.set_yticks([])
axiHST.imshow(rgbdata_6_lores,interpolation='nearest',origin='lower',extent=[-1,1,-1,1])
axiHST.plot([y1_6],[y2_6],'ok',markerfacecolor='None',markeredgecolor='Lime',markersize=10,alpha=0.75)
axiHST.plot(y1_array_6,y2_array_6,'ok',markerfacecolor='None',markeredgecolor='Yellow',markersize=5,alpha=0.75)
#save.
fDISK.savefig('ClumpExample.pdf',format='pdf',dpi=500)
pyplot.close(fDISK)
| {
"content_hash": "070cc311a27dec4d92d914a29b62383a",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 179,
"avg_line_length": 55.520446096654275,
"alnum_prop": 0.690592567793773,
"repo_name": "gsnyder206/mock-surveys",
"id": "41cf2716126b05e5239362d33b3e4fb6db3d78dd",
"size": "14953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "original_illustris/vela_clumps_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "IDL",
"bytes": "42610"
},
{
"name": "Jupyter Notebook",
"bytes": "7514654"
},
{
"name": "Prolog",
"bytes": "24960"
},
{
"name": "Python",
"bytes": "664895"
},
{
"name": "Roff",
"bytes": "19364"
},
{
"name": "Shell",
"bytes": "465"
}
],
"symlink_target": ""
} |
import abc
import copy
import os
import oslo_messaging
import six
from neutron.agent.linux import ip_lib
from neutron.common import rpc as n_rpc
from neutron import context
from neutron_lib import constants
from neutron_lib.plugins import directory
from neutron_vpnaas.services.vpn import device_drivers
from neutron_vpnaas.services.vpn.device_drivers import fedora_strongswan_ipsec
from neutron_vpnaas.services.vpn.device_drivers import ipsec
from neutron_vpnaas.services.vpn.device_drivers import strongswan_ipsec
from nuage_neutron.vpnaas.common import topics
from nuage_neutron.vpnaas.nuage_interface import NuageInterfaceDriver
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
LOG = logging.getLogger(__name__)
TEMPLATE_PATH = os.path.dirname(os.path.abspath(__file__))
IPSEC_CONNS = 'ipsec_site_connections'
class NuageIPsecVpnDriverApi(object):
"""IPSecVpnDriver RPC api."""
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_vpn_services_on_host(self, context, host):
"""Get list of vpnservices.
The vpnservices including related ipsec_site_connection,
ikepolicy and ipsecpolicy on this host
"""
cctxt = self.client.prepare()
return cctxt.call(context, 'get_vpn_services_on_host', host=host)
def update_status(self, context, status):
"""Update local status.
This method call updates status attribute of
VPNServices.
"""
cctxt = self.client.prepare()
return cctxt.call(context, 'update_status', status=status)
@six.add_metaclass(abc.ABCMeta)
class NuageIPsecDriver(device_drivers.DeviceDriver):
def __init__(self, vpn_service, host):
self.conf = vpn_service.conf
self.host = host
self.conn = n_rpc.create_connection(new=True)
self.context = context.get_admin_context_without_session()
self.topic = topics.NUAGE_IPSEC_AGENT_TOPIC
self.processes = {}
self.routers = {}
self.process_status_cache = {}
self.endpoints = [self]
self.conn.create_consumer(self.topic, self.endpoints)
self.conn.consume_in_threads()
self.agent_rpc = NuageIPsecVpnDriverApi(
topics.NUAGE_IPSEC_DRIVER_TOPIC)
self.process_status_cache_check = loopingcall.FixedIntervalLoopingCall(
self.report_status, self.context)
self.process_status_cache_check.start(
interval=20)
self.nuage_if_driver = NuageInterfaceDriver(cfg.CONF)
def _get_l3_plugin(self):
return directory.get_plugin(constants.L3)
def get_namespace(self, router_id):
"""Get namespace of router.
:router_id: router_id
:returns: namespace string.
"""
return 'vpn-' + router_id
def vpnservice_updated(self, context, **kwargs):
"""Vpnservice updated rpc handler
VPN Service Driver will call this method
when vpnservices updated.
Then this method start sync with server.
"""
router = kwargs.get('router', None)
self.sync(context, [router] if router else [])
def tracking(self, context, **kwargs):
"""Handling create router event.
Agent calls this method, when the process namespace is ready.
Note: process_id == router_id == vpnservice_id
"""
router = kwargs.get('router', None)
process_id = router['id']
self.routers[process_id] = process_id
if process_id in self.processes:
# In case of vpnservice is created
# before vpn service namespace
process = self.processes[process_id]
process.enable()
def non_tracking(self, context, **kwargs):
router = kwargs.get('router', None)
process_id = router['id']
self.destroy_process(process_id)
if process_id in self.routers:
del self.routers[process_id]
def ensure_process(self, process_id, vpnservice=None):
"""Ensuring process.
If the process doesn't exist, it will create process
and store it in self.processs
"""
process = self.processes.get(process_id)
if not process or not process.namespace:
namespace = self.get_namespace(process_id)
process = self.create_process(
process_id,
vpnservice,
namespace)
self.processes[process_id] = process
elif vpnservice:
process.update_vpnservice(vpnservice)
return process
@lockutils.synchronized('vpn-agent', 'neutron-')
def sync(self, context, routers):
"""Sync status with server side.
:param context: context object for RPC call
:param routers: Router objects which is created in this sync event
There could be many failure cases should be
considered including the followings.
1) Agent class restarted
2) Failure on process creation
3) VpnService is deleted during agent down
4) RPC failure
In order to handle, these failure cases,
the driver needs to take sync strategies.
"""
vpnservices = self.agent_rpc.get_vpn_services_on_host(
context, self.host)
router_ids = [vpnservice['router_id'] for vpnservice in vpnservices]
sync_router_ids = [router['id'] for router in routers]
self._sync_vpn_processes(vpnservices, sync_router_ids)
self._delete_vpn_processes(sync_router_ids, router_ids)
self._cleanup_stale_vpn_processes(router_ids)
self.report_status(context)
def get_process_status_cache(self, process):
if not self.process_status_cache.get(process.id):
self.process_status_cache[process.id] = {
'status': None,
'id': process.vpnservice['id'],
'updated_pending_status': False,
'ipsec_site_connections': {}}
return self.process_status_cache[process.id]
def report_status(self, context):
status_changed_vpn_services = []
for process in self.processes.values():
previous_status = self.get_process_status_cache(process)
if self.is_status_updated(process, previous_status):
new_status = self.copy_process_status(process)
self.update_downed_connections(process.id, new_status)
status_changed_vpn_services.append(new_status)
self.process_status_cache[process.id] = (
self.copy_process_status(process))
# We need unset updated_pending status after it
# is reported to the server side
self.unset_updated_pending_status(process)
if status_changed_vpn_services:
self.agent_rpc.update_status(context,
status_changed_vpn_services)
def _sync_vpn_processes(self, vpnservices, sync_router_ids):
for vpnservice in vpnservices:
if vpnservice['router_id'] not in self.processes or (
vpnservice['router_id'] in sync_router_ids):
process = self.ensure_process(vpnservice['router_id'],
vpnservice=vpnservice)
router = self.routers.get(vpnservice['router_id'])
if not router:
continue
process.update()
def _delete_vpn_processes(self, sync_router_ids, vpn_router_ids):
for process_id in sync_router_ids:
if process_id not in vpn_router_ids:
self.destroy_process(process_id)
def _cleanup_stale_vpn_processes(self, vpn_router_ids):
process_ids = [pid for pid in self.processes
if pid not in vpn_router_ids]
for process_id in process_ids:
self.destroy_process(process_id)
def is_status_updated(self, process, previous_status):
if process.updated_pending_status:
return True
if process.status != previous_status['status']:
return True
if (process.connection_status !=
previous_status['ipsec_site_connections']):
return True
def unset_updated_pending_status(self, process):
process.updated_pending_status = False
for connection_status in process.connection_status.values():
connection_status['updated_pending_status'] = False
def copy_process_status(self, process):
return {
'id': process.vpnservice['id'],
'status': process.status,
'updated_pending_status': process.updated_pending_status,
'ipsec_site_connections': copy.deepcopy(process.connection_status)
}
def update_downed_connections(self, process_id, new_status):
"""Update info to be reported, if connections just went down.
If there is no longer any information for a connection, because it
has been removed (e.g. due to an admin down of VPN service or IPSec
connection), but there was previous status information for the
connection, mark the connection as down for reporting purposes.
"""
if process_id in self.process_status_cache:
for conn in self.process_status_cache[process_id][IPSEC_CONNS]:
if conn not in new_status[IPSEC_CONNS]:
new_status[IPSEC_CONNS][conn] = {
'status': constants.DOWN,
'updated_pending_status': True
}
def create_router(self, router):
"""Handling create router event."""
pass
def destroy_router(self, process_id):
pass
def destroy_process(self, process_id):
"""Destroy process.
Disable the process and remove the process
manager for the processes that no longer are running vpn service.
"""
if process_id in self.processes:
process = self.processes[process_id]
process.disable()
if process_id in self.processes:
del self.processes[process_id]
def plug_to_ovs(self, context, **kwargs):
self.nuage_if_driver.plug(kwargs['network_id'], kwargs['port_id'],
kwargs['device_name'], kwargs['mac'],
'alubr0', kwargs['ns_name'])
self.nuage_if_driver.init_l3(kwargs['device_name'], kwargs['cidr'],
kwargs['ns_name'])
device = ip_lib.IPDevice(kwargs['device_name'],
namespace=kwargs['ns_name'])
for gateway_ip in kwargs['gw_ip']:
device.route.add_gateway(gateway_ip)
def unplug_from_ovs(self, context, **kwargs):
self.nuage_if_driver.unplug(kwargs['device_name'], 'alubr0',
kwargs['ns_name'])
ip = ip_lib.IPWrapper(kwargs['ns_name'])
ip.garbage_collect_namespace()
# On Redhat deployments an additional directory is created named
# 'ip_vti0' in the namespace which prevents the cleanup
# of namespace by the neutron agent in 'ip_lib.py' which we clean.
if kwargs['ns_name'] in ip.get_namespaces():
ip.netns.delete(kwargs['ns_name'])
class NuageOpenSwanDriver(NuageIPsecDriver):
def create_process(self, process_id, vpnservice, namespace):
return ipsec.OpenSwanProcess(
self.conf,
process_id,
vpnservice,
namespace)
class NuageStrongSwanDriver(NuageIPsecDriver):
def create_process(self, process_id, vpnservice, namespace):
return strongswan_ipsec.StrongSwanProcess(
self.conf,
process_id,
vpnservice,
namespace)
class NuageStrongSwanDriverFedora(NuageIPsecDriver):
def create_process(self, process_id, vpnservice, namespace):
return fedora_strongswan_ipsec.FedoraStrongSwanProcess(
self.conf,
process_id,
vpnservice,
namespace)
| {
"content_hash": "8e71532212c1e45171a6dce38bacab6d",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 79,
"avg_line_length": 38.490625,
"alnum_prop": 0.6162214825038564,
"repo_name": "naveensan1/nuage-openstack-neutron",
"id": "c67a5b32cc16e82519deee31ae8cd2d354d05579",
"size": "12933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nuage_neutron/vpnaas/device_drivers/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1048"
},
{
"name": "Python",
"bytes": "1120262"
},
{
"name": "Shell",
"bytes": "13430"
}
],
"symlink_target": ""
} |
import numpy as np
from scipy.integrate import odeint
# Set constant 2 * pi.
PI2 = 2 * np.pi
class Cantilever(object):
"""Damped Driven Harmonic Oscillator Simulator for AFM Cantilevers.
Simulates a DDHO under excitation with given parameters.
Parameters
----------
can_params : dict
Parameters for cantilever properties. The dictionary contains:
amp_invols = float (in m/V)
def_invols = float (in m/V)
soft_amp = float (in V)
drive_freq = float (in Hz)
res_freq = float (in Hz)
k = float (in N/m)
q_factor = float
force_params : dict
Parameters for forces. The dictionary contains:
es_force = float (in N)
delta_freq = float (in Hz)
tau = float (in seconds)
sim_params : dict
Parameters for simulation. The dictionary contains:
trigger = float (in seconds)
total_time = float (in seconds)
sampling_rate = int (in Hz)
Attributes
----------
amp : float
Amplitude of the cantilever in meters.
beta : float
Damping factor of the cantilever in rad/s.
delta : float
Initial phase of the cantilever in radians.
delta_freq : float
Frequency shift of the cantilever under excitation.
mass : float
Mass of the cantilever in kilograms.
Method
------
simulate(trigger_phase=180)
Simulates the cantilever motion with excitation happening
at the given phase.
See Also
--------
pixel: Pixel processing for FF-trEFM data.
"""
def __init__(self, can_params, force_params, sim_params):
# Initialize cantilever parameters and calculate some others.
for key, value in can_params.items():
setattr(self, key, value)
self.w0 = PI2 * self.res_freq # Radial resonance frequency.
self.wd = PI2 * self.drive_freq # Radial drive frequency.
self.beta = self.w0 / (2 * self.q_factor) # Damping factor.
self.mass = self.k / (self.w0 ** 2) # Mass of the cantilever in kg.
self.amp = self.soft_amp * self.amp_invols # Amplitude in meters.
# Calculate reduced driving force and phase in equilibrium.
self.f0 = self.amp * np.sqrt((self.w0 ** 2 - self.wd ** 2) ** 2 +
4 * self.beta ** 2 * self.wd ** 2)
self.delta = np.abs(np.arctan(np.divide(2 * self.wd * self.beta,
self.w0 ** 2 - self.wd ** 2)))
# Initialize force parameters and calculate some others.
for key, value in force_params.items():
setattr(self, key, value)
self.delta_w = PI2 * self.delta_freq # Frequency shift in radians.
self.fe = self.es_force / self.mass # Reduced electrostatic force.
# Initialize simulation parameters.
for key, value in sim_params.items():
setattr(self, key, value)
return
def set_conditions(self, trigger_phase=180):
"""
Sets initial conditions and other simulation parameters.
Parameters
----------
trigger_phase: float, optional
Trigger phase is in degrees and wrt cosine. Default value is 180.
"""
self.trigger_phase = np.mod(np.pi * trigger_phase / 180, PI2)
self.n_points = self.total_time * 1e8
# Add extra cycles to the simulation to find correct phase at trigger.
cycle_points = int(2 * 1e8 / self.res_freq)
self.n_points_sim = cycle_points + self.n_points
# Create time vector and find the trigger wrt phase.
self.t = np.arange(self.n_points_sim) / 1e8
# Current phase at trigger.
current_phase = np.mod(self.wd * self.trigger - self.delta, PI2)
phase_diff = np.mod(self.trigger_phase - current_phase, PI2)
self.t0 = self.trigger + phase_diff / self.wd
# Set the initial conditions at t=0.
z0 = self.amp * np.sin(-self.delta)
v0 = self.amp * self.wd * np.cos(-self.delta)
self.Z0 = np.array([z0, v0])
return
@staticmethod
def __gamma__(t, t0, tau):
"""
Exponential decay function for force and resonance frequency.
Parameters
----------
t : float
Time in seconds.
t0: float
Event time in seconds.
tau : float
Decay constant in the exponential function, in seconds.
Returns
-------
value : float
Value of the function at the given time.
"""
if t >= t0:
return -np.expm1(-(t - t0) / tau)
else:
return 0
def omega(self, t, t0, tau):
"""
Exponentially decaying resonance frequency.
Parameters
----------
t : float
Time in seconds.
t0: float
Event time in seconds.
tau : float
Decay constant in the exponential function, in seconds.
Returns
-------
w : float
Resonance frequency of the cantilever at a given time, in rad/s.
"""
return self.w0 + self.delta_w * self.__gamma__(t, t0, tau)
def force(self, t, t0, tau):
"""
Force on the cantilever at a given time. It contains driving force and
electrostatic force.
Parameters
----------
t : float
Time in seconds.
t0: float
Event time in seconds.
tau : float
Decay constant in the exponential function, in seconds.
Returns
-------
f : float
Force on the cantilever at a given time, in N/kg.
"""
driving_force = self.f0 * np.sin(self.wd * t)
electro_force = self.fe * self.__gamma__(t, t0, tau)
return driving_force - electro_force
def dZ_dt(self, Z, t=0):
"""
Takes the derivative of the given Z with respect to time.
Parameters
----------
Z : (2, ) array_like
Z[0] is the cantilever position, and Z[1] is the cantilever
velocity.
t : float
Time.
Returns
-------
Zdot : (2, ) array_like
Zdot[0] is the cantilever velocity, and Zdot[1] is the cantilever
acceleration.
"""
t0 = self.t0
tau = self.tau
v = Z[1]
vdot = (self.force(t, t0, tau) -
self.omega(t, t0, tau) * Z[1] / self.q_factor -
self.omega(t, t0, tau) ** 2 * Z[0])
return np.array([v, vdot])
def simulate(self, trigger_phase=180):
"""
Simulates the cantilever motion.
Parameters
----------
trigger_phase: float, optional
Trigger phase is in degrees and wrt cosine. Default value is 180.
Returns
-------
Z : (n_points, 1) array_like
Cantilever position in Volts.
infodict : dict
Information about the ODE solver.
"""
self.set_conditions(trigger_phase)
Z, infodict = odeint(self.dZ_dt, self.Z0, self.t, full_output=True)
t0_idx = int(self.t0 * 1e8)
tidx = int(self.trigger * 1e8)
Z_cut = Z[(t0_idx - tidx):(t0_idx + self.n_points - tidx), 0]
step = int(1e8 / self.sampling_rate)
n_points = self.total_time * self.sampling_rate
self.Z = Z_cut[0::step].reshape(n_points, 1) / self.def_invols
self.infodict = infodict
return self.Z, self.infodict
| {
"content_hash": "30a499698ba29fb851989f6afab1ecad",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 78,
"avg_line_length": 28.136531365313655,
"alnum_prop": 0.5489836065573771,
"repo_name": "jarrison/trEFM-learn",
"id": "530acd982f4dafd757f279ae922f8cee21a9444a",
"size": "7625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trEFMlearn/simulate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18048"
}
],
"symlink_target": ""
} |
from . import core, utils
import cgt
import ctypes, os.path as osp, hashlib, numpy as np, sys, subprocess, string, os, time, traceback, cPickle
from collections import defaultdict, namedtuple
from StringIO import StringIO
import logging
def function(inputs, outputs, dbg=None, updates=None, givens=None):
assert isinstance(inputs, list), "Inputs must be a list"
assert all(el.is_argument() for el in inputs), "Invalid input: should be a list of Argument nodes"
if isinstance(outputs, list):
assert all(isinstance(el, core.Node) for el in outputs), "Invalid output: should all be symbolic variables"
return _function_listout(inputs, outputs, dbg, updates, givens)
elif isinstance(outputs, core.Node):
f_listout = _function_listout(inputs, [outputs], dbg, updates, givens)
return lambda *args : f_listout(*args)[0]
else:
raise ValueError("Expected `outputs` to be a Node or a list of Nodes. Got an object of type %s"%type(outputs))
def _function_listout(inputs, outputs, dbg = None, updates=None, givens=None):
if isinstance(updates,dict): updates=updates.items()
if updates is None: updates = []
else: assert (isinstance(updates, list) and
all(isinstance(a,tuple) and len(a)==2
and isinstance(a[0], core.Node) and isinstance(a[1], core.Node)
for a in updates)), "updates should be a list of pairs (before, after)"
if givens is None: givens = []
else: assert all(before.is_data() for (before,_) in updates), "lhs of updates must be Data instances"
if dbg: raise core.Todo("debug functionality is broken")
outputs = [cgt.make_tuple(*x) if isinstance(x, tuple) else x for x in outputs]
interp = run_compilation_pipeline(inputs, outputs, updates, givens)
return interp
# ================================================================
# Execution
# ================================================================
def python_only():
return not hasattr(cgt,"cycgt")
def determine_devices(nodes_sorted, updatetarg2src):
# Op definitions (available impls, inplace-ness, etc) define constraints
# on possible devices for a node
if python_only():
return {node:core.Device() for node in nodes_sorted}
# (1) Get available devices for nodes, determined by which impls are available and node types
compile_info = get_compile_info()
cuda_enabled = compile_info["CGT_ENABLE_CUDA"]
node2dev = {}
home_device = core.Device(devtype="cpu", idx=0)
for node in nodes_sorted:
default_device = node.props.get("default_device", home_device)
if node.is_scalar():
device = home_device
elif node in updatetarg2src:
device = node2dev[updatetarg2src[node]]
assert "native_"+device.devtype in node.op.available_impls, "XXX bug: update only works if final operation can be performed on target device"
elif node.is_data():
device = node.op.device
elif node.is_argument():
device = home_device
else:
if ("native_gpu" in node.op.available_impls) and ((default_device.devtype == "gpu") or ("native_cpu" not in node.op.available_impls)):
assert cuda_enabled, "trying to put op on gpu but cuda is disabled"
device = core.Device("gpu", default_device.idx)
else:
device = core.Device(devtype="cpu", idx=default_device.idx)
node2dev[node] = device
return node2dev
def is_tensor(x):
return isinstance(x.typ, core.TensorType)
def is_tuple(x):
return isinstance(x.typ, core.TupleType)
def create_interpreter(inputs, outputs, eg, node2memloc):
assert isinstance(eg, ExecutionGraph)
input_types = [input.typ for input in inputs] #pylint: disable=W0622
output_locs = [node2memloc[node] for node in outputs]
config = cgt.get_config()
backend = config["backend"]
parallel = config["parallel"]
if backend == "python":
if parallel:
raise NotImplementedError("For parallel=True, set backend=native")
# return ParallelInterpreter(eg, output_locs, input_types)
else:
return SequentialInterpreter(eg, output_locs, input_types)
elif backend == "native":
if parallel:
return cgt.cycgt.CppInterpreterWrapper(eg, input_types, output_locs, config["num_threads"])
else:
return cgt.cycgt.CppInterpreterWrapper(eg, input_types, output_locs, 0)
else:
raise NotImplementedError("invalid backend %s"%backend)
def topsorted_shapes_first(outputs, node2shape):
# Almost identical to topsorted(...) function
# But we also need to visit the shape elements of an in-place node
# before visiting that node
marks = {}
out = []
stack = []
for x in outputs:
stack.append((x,0))
while stack:
(i,jidx) = stack.pop()
if jidx == 0:
m = marks.get(i,0)
if m == 0:
marks[i] = 1
elif m == 1:
raise ValueError("not a dag")
else:
continue
ps = i.parents
###### Changed part ######
if i.ndim > 0 and not i.is_input() and i.op.return_type=="byref":
if i in node2shape:
shpels = node2shape[i]
else:
raise core.Unreachable
# shpels = i.op.shp_apply(i.parents)
ps = ps + shpels
elif is_tuple(i):
for arrshp in node2shape[i]:
ps = ps + arrshp
##########################
if jidx == len(ps):
marks[i] = 2
out.append(i)
else:
stack.append((i,jidx+1))
j = ps[jidx]
stack.append((j,0))
return out
def determine_memowner(nodes_sorted, updates, node2dev):
# First determine how many "child" nodes each node has
node2child = defaultdict(list)
for node in nodes_sorted:
for parent in node.parents:
node2child[parent].append(node)
# Now traverse graph again and see where we can use the same memory
node2memowner = {} # mapping node x -> the node that owns its memory
# For updates, memlocation(RHS) = memlocation(LHS)
after2before = {after:before for (before,after) in updates}
enable_inplace_opt = core.get_config()["enable_inplace_opt"]
for node in nodes_sorted:
base = node # by default,
if node.is_argument():
pass
elif node.op.writes_to_input >= 0:
base = node2memowner[node.parents[node.op.writes_to_input]]
elif node in after2before:
base = after2before[node]
elif enable_inplace_opt and node.op.return_type == "byref": # TODO think about if we need any other conditions
nodeshape = node.op.shp_apply(node.parents)
for parent in node.parents:
if (len(node2child[parent])==1
and nodeshape==cgt.shape(parent) # XXX not a very robust way to check
and node.dtype == parent.dtype
and _is_data_mutable(parent)):
base = parent
break
# TODO: add optimization for in-place incrementing
node2memowner[node] = base
return node2memowner
class MemCounter(object):
"""
returns `MemLocation`s with indices 0,1,...
`count` member indicates how many have been returned thus far
"""
def __init__(self):
self.count=0
def new_memloc(self, devtype):
out = MemLocation(self.count, devtype)
self.count += 1
return out
def create_execution_graph(inputs, nodes_sorted, node2shape, node2memowner, node2dev):
# node2impltype = copy.copy(node2impltype) # we'll insert transport ops
instrs = []
counter = MemCounter()
node2memloc = {}
for node in nodes_sorted:
if node not in node2dev: node2dev[node] = core.Device(devtype="cpu",idx=node2dev[node.parents[0]].idx if len(node.parents)>0 else 0)
if node.is_argument():
write_loc = counter.new_memloc(node2dev[node].devtype)
node2memloc[node] = write_loc
i = inputs.index(node)
instrs.append(LoadArgument(i, write_loc))
else:
read_locs = [node2memloc[parent] for parent in node.parents]
if node.op.return_type == "byref":
if node2memowner[node] is node:
if is_tensor(node): # just make one memory location for output
nodeshape = node2shape[node] if node.ndim > 0 else []
shape_locs = [node2memloc[shpel] for shpel in nodeshape]
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(Alloc(node.dtype, shape_locs, write_loc))
else: # if it's a tuple, we need to allocate all of the components, then build tuple
nodeshape = node2shape[node]
assert isinstance(nodeshape, tuple)
arr_locs = []
for (arrshp, arrtyp) in utils.safezip(nodeshape, node.typ):
arr_loc = counter.new_memloc(node2dev[node].devtype)
shape_locs = [node2memloc[shpel] for shpel in arrshp]
instrs.append(Alloc(arrtyp.dtype, shape_locs, arr_loc))
arr_locs.append(arr_loc)
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(BuildTup(node.typ, arr_locs, write_loc))
else:
# If this node writes to another node's memory, the devices must be the same
# this should have been enforced in determine_devices()
assert node2dev[node] == node2dev[node2memowner[node]]
write_loc = node2memloc[node2memowner[node]]
instrs.append(ReturnByRef(node.op, [par.typ for par in node.parents], read_locs, write_loc, node_props=node.props))
else:
assert node.op.return_type == "byval"
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(ReturnByVal(node.op, [par.typ for par in node.parents], read_locs, write_loc, node_props=node.props))
node2memloc[node] = write_loc
return ExecutionGraph(instrs, len(inputs), counter.count), node2memloc
def get_callable(op, input_types, devtype, prefer_python=False):
assert op.available_impls, "need to set op.available_impls"
config = core.get_config()
if (prefer_python or config["force_python_impl"]) and "python" in op.available_impls:
return op.get_py_callable(input_types)
elif config["backend"] == "python":
if "python" in op.available_impls:
return op.get_py_callable(input_types)
else:
assert devtype=="cpu", "can't use devtype=gpu with python backend"
if "native_cpu" in op.available_impls:
return get_native_callable(op, input_types, "cpu")
else:
raise RuntimeError("Can't find an implementation of %s suitable for python backend. Just have available_impls=%s"%(op,op.available_impls))
else: # backend = native
if devtype == "cpu":
if "native_cpu" in op.available_impls:
return get_native_callable(op, input_types, "cpu")
else:
print "using python impl for",op
return op.get_py_callable(input_types)
else:
if "native_gpu" in op.available_impls:
return get_native_callable(op, input_types, "gpu")
else:
raise RuntimeError("Tried to put Op %s on the GPU but I only have a python impl :("%op)
def get_native_callable(op, input_types, devtype):
nci = op.get_native_compile_info(input_types, devtype)
nci.op_str = str(op)
nci.return_type = op.return_type
nci.n_in = len(input_types)
return nci2callable(nci)
def add_transports(nodelist, node2dev, node2shape):
node2child = defaultdict(list)
for node in nodelist:
for par in node.parents:
node2child[par].append(node)
# XXX look at native compilation info, gpu deref mask
for node in nodelist:
dev = node2dev[node]
dev2copy = {}
for child in node2child[node]:
childdev = node2dev[child]
if not childdev == dev:
if childdev not in dev2copy:
nodecopy = core.Result(core.Transport(childdev), [node])
node2dev[nodecopy] = childdev
dev2copy[childdev] = nodecopy
node2shape[nodecopy] = node2shape[node]
replace_parents(child, node, dev2copy[childdev])
def replace_parents(node, before, after):
for (i,p) in enumerate(node.parents):
if p is before:
node.parents[i] = after
def run_compilation_pipeline(inputs, outputs, updates, givens):
"""
Compiles the expression graph into an execution graph.
"""
config = core.get_config()
# Phase 1: simplification and analysis of expression graph
# ------------------------------------------------------
# Add add update targets to outputs
outputs_updatetargs = outputs + [after for (_before, after) in updates]
if givens: outputs_updatetargs = core.clone(outputs_updatetargs, dict(givens))
# Do simplification + analysis pass on expression graph
outputs_updatetargs_simple, analysis, _ = \
core.simplify_and_analyze(outputs_updatetargs) if config["enable_simplification"] \
else (outputs_updatetargs, core.analyze(outputs_updatetargs), {})
# Phase 2: device targeting
# ------------------------------------------------------
outputs_updatetargs_simple = cgt.core.clone(outputs_updatetargs_simple)
analysis = core.analyze(outputs_updatetargs_simple)
# XXX inefficient to just copy the graph and redo analysis
nodelist = core.topsorted(outputs_updatetargs_simple)
updatesrcs = [before for (before, _) in updates]
updatetargs_simple = outputs_updatetargs_simple[len(outputs):]
node2dev = determine_devices(nodelist, {targ:src for (src,targ) in zip(updatesrcs, updatetargs_simple)})
add_transports(nodelist, node2dev, analysis["node2shape"])
# XXX we're missing stuff used for shape computation
# XXX i think we might also have unnecessary stuff from shape comp in exe graph
# Phase 3: build execution graph
# ------------------------------------------------------
# Sort nodes so that shape elements appear before a given node
nodes_sorted = topsorted_shapes_first(outputs_updatetargs_simple, analysis["node2shape"]) # XXX don't need shapes for byval ops
# For each node, figure out if its output should be written to a previous node's memory
# (memowner : "memory owner")
updatetargs_simple = outputs_updatetargs_simple[len(outputs):]
node2memowner = determine_memowner(nodes_sorted, zip(updatesrcs, updatetargs_simple), node2dev)
# Find the outputs we want to return
outputs_simple = outputs_updatetargs_simple[:len(outputs)] # get rid
# Generate execution graph
eg, node2memloc = create_execution_graph(
inputs, nodes_sorted, analysis["node2shape"], node2memowner, node2dev)
# print execution graph
if config["verbose"]:
print 'begin'
print '\n'.join(str(i)+'.) \t'+repr(instr) for (i,instr) in enumerate(eg.instrs))
print 'end'
# Phase 3: create C or Python interpreter for graph
# ------------------------------------------------------
interp = create_interpreter(inputs, outputs_simple, eg, node2memloc)
# Done!
return interp
# ================================================================
# Simple numeric eval via traversal
# ================================================================
def numeric_eval(output, arg2val):
"""
Numerically evaluates symbolic variable without any compilation,
by associating each argument with a value (via `arg2val`) and traversing the
computation graph
Inputs
------
output: symbolic variable or list of variables we would like to evaluate
arg2val: dictionary assigning each argument that output depends on to a numerical value
Returns
-------
Numeric value or list of numeric values of variables corresponding to output
"""
if isinstance(output, list):
assert all(isinstance(x, core.Node) for x in output), "expected a list of Nodes"
return _numeric_eval_listout(output, arg2val)
elif isinstance(output, core.Node):
return _numeric_eval_listout([output], arg2val)[0]
else:
raise ValueError("expected `output` to be a Node or a list of Nodes. Got an object of type %s"%type(output))
def _numeric_eval_listout(outputs, arg2val):
"""
Evaluate outputs numerically. arg2val is a dictionary mapping arguments to numerical values
"""
assert isinstance(outputs, list)
assert isinstance(arg2val, dict)
nodes = list(core.topsorted(outputs))
node2val = {}
for node in nodes:
if node.is_argument():
node2val[node] = core.as_valid_array(arg2val[node])
elif node.is_data():
node2val[node] = node.op.get_value()
else:
parentvals = [node2val[par] for par in node.parents]
node2val[node] = core.py_numeric_apply(node, parentvals)
# assert node.get_ndim() == np.array(node2val[node]).ndim
numeric_outputs = [node2val[node] for node in outputs]
return numeric_outputs
################################################################
### Execution graph
################################################################
MemInfo = namedtuple("MemInfo",["loc","access"])
MEM_OVERWRITE = "overwrite"
MEM_INCREMENT = "increment"
class ExecutionGraph(object):
def __init__(self, instrs, n_args, n_locs):
self.instrs = instrs
self.n_args = n_args
self.n_locs = n_locs
class MemLocation(object):
def __init__(self, idx, devtype):
assert isinstance(idx, int) and devtype in ["cpu", "gpu"]
self.index = idx
self.devtype = devtype
# TODO: dtype
def __repr__(self):
return "%%%i/%s" % (self.index, self.devtype)
# ================================================================
# Instructions
# ================================================================
class Instr(object):
def fire(self, interp):
raise NotImplementedError
class LoadArgument(Instr):
def __init__(self, ind, write_loc):
self.ind = ind
self.write_loc = write_loc
def fire(self, interp):
interp.set(self.write_loc, interp.getarg(self.ind))
def __repr__(self):
return "%s = LoadArg ind:%i" % (self.write_loc, self.ind)
class Alloc(Instr):
def __init__(self, dtype, read_locs, write_loc):
self.dtype = dtype
self.read_locs = read_locs
self.write_loc = write_loc
def fire(self, interp):
shp = tuple(interp.get(mem) for mem in self.read_locs)
prevarr = interp.get(self.write_loc)
if prevarr is None or prevarr.shape != shp:
interp.set(self.write_loc, np.ones(shp, self.dtype))
def __repr__(self):
return "%s = Alloc shp:%s dtype:%s" % (self.write_loc, str(self.read_locs), self.dtype)
class BuildTup(Instr):
def __init__(self, typ, read_locs, write_loc):
self.typ = typ
self.read_locs = read_locs
self.write_loc = write_loc
def fire(self, interp):
interp.set(self.write_loc, tuple(interp.get(loc) for loc in self.read_locs))
def __repr__(self):
return "%s = BuildTup args:%s" % (self.write_loc, str(self.read_locs))
class ReturnByRef(Instr):
def __init__(self, op, input_types, read_locs, write_loc, node_props=None):
self.op = op
self.input_types = input_types
self.read_locs = read_locs
self.write_loc = write_loc
self._callable = None
self.node_props=node_props
def fire(self, interp):
if self._callable is None: self._callable = self.get_callable()
self._callable.call(
[interp.get(mem) for mem in self.read_locs],
interp.get(self.write_loc))
def __repr__(self):
return "%s = ReturnByRef op:%s args:%s" % (self.write_loc, str(self.op), str(self.read_locs))
def get_callable(self):
return get_callable(self.op, self.input_types, self.write_loc.devtype)
class ReturnByVal(Instr):
def __init__(self, op, input_types, read_locs, write_loc, node_props=None):
self.op = op
self.input_types = input_types
self.read_locs = read_locs
self.write_loc = write_loc
self._callable = None
self.node_props=node_props
def fire(self, interp):
if self._callable is None: self._callable = self.get_callable()
interp.set(self.write_loc, self._callable.call([interp.get(mem) for mem in self.read_locs]))
def get_callable(self):
return get_callable(self.op, self.input_types, self.write_loc.devtype)
def __repr__(self):
return "%s = ReturnByVal op:%s args:%s" % (self.write_loc, str(self.op), str(self.read_locs))
# ================================================================
# Compiling native code
# ================================================================
def nci2callable(nci):
template_code = gen_templated_code(nci.includes, nci.closure_triples, nci.func_code)
compile_info = get_compile_info()
prefix = utils.hash_seq1(template_code, compile_info["CPP_FLAGS"], *(src.code for src in nci.extra_srcs))
d = dict(function=_funcname(prefix), closure=_closurename(prefix),setup=_setupname(prefix),teardown=_teardownname(prefix))
fn_srcfile = core.SrcFile("c++",string.Template(template_code).substitute(d))
srcfiles = [fn_srcfile]
srcfiles.extend(core.SrcFile(sf.lang, string.Template(sf.code).substitute(d)) for sf in nci.extra_srcs)
CACHE_ROOT = compile_info["CACHE_ROOT"]
libpath = osp.join(CACHE_ROOT, prefix+".so")
if not osp.exists(libpath):
tu = TranslationUnit(srcfiles, nci.link_flags)
tu.compile(prefix, libpath)
lib = get_or_load_lib(libpath)
fptr = getattr(lib, _funcname(prefix))
setup_fptr = getattr(lib, _setupname(prefix)) if nci.setup else None
teardown_fptr = getattr(lib, _teardownname(prefix)) if nci.teardown else None
cldata = _build_closure(nci.closure_triples)
return core.NativeCallable(nci.n_in, nci.return_type, nci.op_str, fptr, cldata=cldata, setup_fptr=setup_fptr, teardown_fptr=teardown_fptr,
store_objects=nci.store_objects)
def _funcname(prefix):
return "call_"+prefix
def _setupname(prefix):
return "setup_"+prefix
def _teardownname(prefix):
return "teardown_"+prefix
def _closurename(prefix):
return "closure_"+prefix
def gen_templated_code(includes, closure_info, func_code):
s = StringIO()
includes = ["cgt_common.h"] + includes
for fname in includes:
s.write('#include "%s"\n'%fname)
gen_struct_code(closure_info, s)
s.write(func_code)
return s.getvalue()
def gen_struct_code(triples, outstream):
if triples is None:
return
outstream.write("typedef struct $closure {\n")
for (fieldname,fieldtype,_val) in triples:
outstream.write(_ctypes2str[fieldtype])
outstream.write(" ")
outstream.write(fieldname)
outstream.write(";\n")
outstream.write("} $closure;\n")
_LIBRARIES = {}
def get_or_load_lib(libname):
if libname in _LIBRARIES:
return _LIBRARIES[libname]
else:
out = ctypes.cdll.LoadLibrary(libname)
_LIBRARIES[libname] = out
return out
class TranslationUnit(object):
"""All the input that goes into building a native binary for one or more ops"""
def __init__(self, srcfiles, link_flags):
self.srcfiles = srcfiles
self.link_flags = link_flags
def compile(self, prefix, libpath):
"""
Compiles all of the files, places them in the cache directory
Then links them creating prefix.so
"""
CACHE_ROOT = get_compile_info()["CACHE_ROOT"]
cmds = ["cd %s"%CACHE_ROOT]
objs = []
for (i,(lang,code)) in enumerate(self.srcfiles):
if lang=="c++":
srcpath = osp.join(CACHE_ROOT, prefix+"_%i.cpp"%i)
cmds.append(_make_cpp_compile_cmd(srcpath))
elif lang=="cuda":
srcpath = osp.join(CACHE_ROOT, prefix+"_%i.cu"%i)
cmds.append(_make_cuda_compile_cmd(srcpath))
else:
raise NotImplementedError
with open(srcpath,"w") as fh: fh.write(code)
objs.append(srcpath+".o")
cmds.append(_make_link_cmd(objs, self.link_flags, libpath))
bigcmd = " && ".join(cmds)
call_and_print(bigcmd)
_COMPILE_CONFIG = None
def get_compile_info():
global _COMPILE_CONFIG
if _COMPILE_CONFIG is None:
config = core.get_config()
CGT_BUILD_ROOT = cgt.cycgt.cgt_build_root() #pylint: disable=E1101
cmake_info = {}
with open(osp.join(CGT_BUILD_ROOT,"build_info.txt")) as fh:
lines = fh.readlines()
for line in lines:
if ":=" not in line: print "skipping",line
lhs,rhs = line.split(":=")
lhs = lhs.strip()
rhs = rhs.strip()
cmake_info[lhs] = rhs
CUDA_ROOT = cmake_info["CUDA_ROOT"]
CGT_ENABLE_CUDA = cmake_info["CGT_ENABLE_CUDA"] in ["1","ON"]
CGT_ENABLE_CUDNN = cmake_info["CGT_ENABLE_CUDNN"] in ["1","ON"]
DEFINITIONS = "-DENABLE_CUDA" if CGT_ENABLE_CUDA else ""
CUDNN_ROOT = cmake_info["CUDNN_ROOT"]
cuda_library_dir = osp.join(CUDA_ROOT,"lib64") if osp.exists(osp.join(CUDA_ROOT,"lib64")) else osp.join(CUDA_ROOT,"lib")
_COMPILE_CONFIG = dict(
OPENBLAS_INCLUDE_DIR = osp.join(CGT_BUILD_ROOT,"OpenBLAS"),
CGT_INCLUDE_DIR = cmake_info["CGT_INCLUDE_DIR"],
CGT_LIBRARY_DIR = osp.join(CGT_BUILD_ROOT,"lib"),
CUDA_LIBRARY_DIR = cuda_library_dir,
CUDA_INCLUDE_DIR = osp.join(CUDA_ROOT,"include"),
CUDA_LIBRARIES = cmake_info["CUDA_LIBRARIES"],
DEFINITIONS = DEFINITIONS,
CUDA_ROOT = CUDA_ROOT,
CUDNN_ROOT = CUDNN_ROOT,
CACHE_ROOT = osp.expanduser(config["cache_dir"]),
CGT_ENABLE_CUDA = CGT_ENABLE_CUDA,
CGT_ENABLE_CUDNN = CGT_ENABLE_CUDNN,
# CGT_LIBRARY = cmake_info["CGT_LIBRARY"],
)
includes = "-I"+_COMPILE_CONFIG["CGT_INCLUDE_DIR"]
includes += " -I"+_COMPILE_CONFIG["OPENBLAS_INCLUDE_DIR"]
link_flags = ""
if _COMPILE_CONFIG["CGT_ENABLE_CUDA"]: includes += " -I"+_COMPILE_CONFIG["CUDA_INCLUDE_DIR"]
if _COMPILE_CONFIG["CGT_ENABLE_CUDNN"]: includes += " -I"+_COMPILE_CONFIG["CUDNN_ROOT"]
_COMPILE_CONFIG["INCLUDES"] = includes
link_flags = "-lcgt -L%s -L%s"%(_COMPILE_CONFIG["CGT_LIBRARY_DIR"], _COMPILE_CONFIG["OPENBLAS_INCLUDE_DIR"])
link_flags += " -Wl,-rpath,%s -Wl,-rpath,%s"%(_COMPILE_CONFIG["CGT_LIBRARY_DIR"], _COMPILE_CONFIG["OPENBLAS_INCLUDE_DIR"])
if _COMPILE_CONFIG["CGT_ENABLE_CUDA"]: link_flags += " -L"+_COMPILE_CONFIG["CUDA_LIBRARY_DIR"]
if _COMPILE_CONFIG["CGT_ENABLE_CUDNN"]:
link_flags += " -L"+_COMPILE_CONFIG["CUDNN_ROOT"]
link_flags += " -Wl,-rpath,"+_COMPILE_CONFIG["CUDNN_ROOT"]
if sys.platform == "darwin":
link_flags += " -dynamiclib -Wl,-headerpad_max_install_names"
else:
link_flags += " -shared -rdynamic"
_COMPILE_CONFIG["LINK_FLAGS"] = link_flags
cpp_flags = "-fvisibility=hidden -std=c++11 -fPIC" + (" -O0 -g" if config["debug_cpp"] else " -O3 -DNDEBUG")
if sys.platform == "darwin": cpp_flags += " -stdlib=libc++"
_COMPILE_CONFIG["CPP_FLAGS"] = cpp_flags
CACHE_ROOT = _COMPILE_CONFIG["CACHE_ROOT"]
if not osp.exists(CACHE_ROOT):
os.makedirs(CACHE_ROOT)
return _COMPILE_CONFIG
def _make_cpp_compile_cmd(srcpath):
d = get_compile_info()
return "c++ %(cpp_flags)s %(srcpath)s -c -o %(srcpath)s.o %(includes)s %(definitions)s"%dict(
srcpath = srcpath, includes=d["INCLUDES"], definitions=d["DEFINITIONS"],
cpp_flags=d["CPP_FLAGS"], cacheroot=d["CACHE_ROOT"])
def _make_cuda_compile_cmd(srcpath):
d = get_compile_info()
return "nvcc %(srcpath)s -c -o %(srcpath)s.o -ccbin cc -m64 -Xcompiler -fPIC -Xcompiler -O3 %(includes)s %(definitions)s"%dict(
srcpath = srcpath, includes=d["INCLUDES"], definitions=d["DEFINITIONS"])
def _make_link_cmd(objs, extra_link_flags, libpath):
d = get_compile_info()
iname = "-install_name %s"%osp.basename(libpath) if sys.platform=="darwin" else ""
return r"c++ %(cpp_flags)s %(objnames)s %(link_flags)s %(iname)s -o %(libpath)s"%dict(
objnames=" ".join(objs), includes=d["INCLUDES"], cpp_flags=d["CPP_FLAGS"], libpath=libpath,
link_flags=d["LINK_FLAGS"]+" "+extra_link_flags, cacheroot=d["CACHE_ROOT"], iname=iname)
def call_and_print(cmd):
print "\x1b[32m%s\x1b[0m"%cmd
subprocess.check_call(cmd,shell=True)
_ctypes2str = {
ctypes.c_byte : "uint8_t",
ctypes.c_bool : "bool",
ctypes.c_char : "char",
ctypes.c_int : "int",
ctypes.c_long : "long",
ctypes.c_void_p : "void*",
ctypes.c_double : "double",
ctypes.c_float : "float"
}
_struct_cache = {} # because creating ctypes.Structure class is slow for some reason
def _build_closure(triples):
if triples is None:
return ctypes.c_void_p(0)
vals = []
fields = []
for (fieldname,fieldtype,val) in triples:
vals.append(val)
fields.append((fieldname,fieldtype))
try:
key = cPickle.dumps(fields)
S = _struct_cache[key]
except KeyError:
class S(ctypes.Structure):
_fields_ = fields
_struct_cache[key] = S
closure = S(*vals)
return closure
################################################################
### Interpreters
################################################################
class Interpreter(object):
def __call__(self, args):
raise NotImplementedError
def get(self, mem):
raise NotImplementedError
def set(self, mem, val):
raise NotImplementedError
def getarg(self, i):
raise NotImplementedError
class SequentialInterpreter(Interpreter):
"""
Runs an execution graph
"""
def __init__(self, eg, output_locs, input_types, copy_outputs=True):
self.eg = eg
self.input_types = input_types
self.output_locs = output_locs
self.storage = [None for _ in xrange(self.eg.n_locs)]
self.args = None
self.copy_outputs = copy_outputs
def __call__(self, *args):
assert len(args) == len(self.input_types), "Wrong number of inputs provided"
self.args = tuple(core.as_valid_array(arg, intype) for (arg, intype) in zip(args, self.input_types))
for instr in self.eg.instrs:
if profiler.on: tstart = time.time()
try:
instr.fire(self)
except Exception as e:
traceback.print_exc()
if isinstance(instr, (ReturnByRef,ReturnByVal)):
if core.get_config()["debug"]:
assert "stack" in instr.node_props
utils.colorprint(utils.Color.MAGENTA, "HERE'S THE STACK WHEN THE OFFENDING NODE WAS CREATED\n",o=sys.stderr)
print>>sys.stderr, ">>>>>>>>>>>>>>>>>>>>>>>>>>"
traceback.print_list(instr.node_props["stack"])
print>>sys.stderr, "<<<<<<<<<<<<<<<<<<<<<<<<<<"
raise e
else:
utils.error("Didn't save the stack so I can't give you a nice traceback :(. Try running with CGT_FLAGS=debug=True")
raise e
else:
utils.error("Oy vey, an exception occurred in a %s Instruction. I don't know how to help you debug this one right now :(."%type(instr))
raise e
if profiler.on: profiler.update(instr, time.time()-tstart)
outputs = [self.get(loc) for loc in self.output_locs]
if self.copy_outputs: outputs = map(_copy, outputs)
return outputs
# need to copy because otherwise we might mess up the data when we call func again
# todo: add option that prevents this behavior
def get(self, mem):
return self.storage[mem.index]
def set(self, mem, val):
self.storage[mem.index] = val
def getarg(self, i):
return self.args[i]
# ================================================================
# Profiler
# ================================================================
class _Profiler(object):
"""
Profiler for Python backend, i.e. Interpreter
"""
def __init__(self):
self.instr2stats = {}
self.on = False
self.t_total = 0.0
def start(self): self.on = True
def stop(self): self.on = False
def update(self, instr, elapsed):
(prevcount, prevtime) = self.instr2stats.get(instr, (0,0.0))
self.instr2stats[instr] = (prevcount+1, prevtime+elapsed)
self.t_total += elapsed
def print_stats(self):
op2stats = {}
# Collapse by Op, rather than instruction
for (instr,(count,t)) in self.instr2stats.iteritems():
if isinstance(instr, (ReturnByRef, ReturnByVal)):
opkey = str(instr.op)
elif isinstance(instr, Alloc):
opkey = "Alloc{dtype=%s,ndim=%i}"%(instr.dtype, len(instr.read_locs))
else:
opkey = instr.__class__.__name__
(prevcount, prevtime) = op2stats.get(opkey, (0, 0.0))
op2stats[opkey] = (prevcount+count, prevtime+t)
print "Total time elapsed: %.3g seconds"%self.t_total
# _print_heading("By instruction")
# _print_stats(self.instr2stats, self.t_total)
_print_heading("By Op")
_print_stats(op2stats, self.t_total)
def clear_stats(self):
self.instr2stats = {}
self.t_total = 0.0
profiler = _Profiler()
def _print_heading(heading):
heading = " " + heading + " "
width = 60
assert len(heading) < width-10
print
print "*"*width
padleft = (width-len(heading))//2
padright = width-len(heading)-padleft
print "*"*padleft + heading + "*"*padright
print "*"*width
def _print_stats(key2stats, t_total):
rows = []
for (key, (count,t)) in key2stats.iteritems():
rows.append([str(key), count, t, t/t_total])
rows = sorted(rows, key=lambda row: row[2], reverse=True)
cumsum = 0
for row in rows:
cumsum += row[3]
row.append(cumsum)
from thirdparty.tabulate import tabulate
print tabulate(rows, headers=["Instruction","Count","Time","Frac","Frac cumsum"])
def _copy(x):
if isinstance(x, np.ndarray): return x.copy()
elif isinstance(x, tuple): return tuple(el.copy() for el in x)
elif np.isscalar(x): return x # xxx is this case ok?
else: raise NotImplementedError
def typecheck_args(numargs, types):
assert len(numargs)==len(types), "wrong number of arguments. got %i, expected %i"%(len(numargs),len(types))
for (numarg,typ) in zip(numargs,types):
if isinstance(typ, core.TensorType):
assert numarg.dtype==typ.dtype and numarg.ndim==typ.ndim
# ================================================================
# Utils
# ================================================================
def _list_to_json(xs):
return [x.to_json() for x in xs]
def _is_data_mutable(node):
return not node.is_input() and not isinstance(node.op, core.Constant)
| {
"content_hash": "35b03755c3d288a8c9f45a652881f87a",
"timestamp": "",
"source": "github",
"line_count": 900,
"max_line_length": 162,
"avg_line_length": 40.66888888888889,
"alnum_prop": 0.5859789082563794,
"repo_name": "RichardWarfield/cgt",
"id": "b9466f0d00c8d9b7a314a512f331357ea2003dd5",
"size": "36602",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cgt/compilation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3225"
},
{
"name": "C++",
"bytes": "49432"
},
{
"name": "CMake",
"bytes": "21526"
},
{
"name": "Cuda",
"bytes": "5004"
},
{
"name": "Python",
"bytes": "300501"
}
],
"symlink_target": ""
} |
"""
EasyBuild support for Python packages that are configured with CMake, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.easyblocks.generic.pythonpackage import PythonPackage
class CMakePythonPackage(CMakeMake, PythonPackage):
"""Build a Python package and module with cmake.
Some packages use cmake to first build and install C Python packages
and then put the Python package in lib/pythonX.Y/site-packages.
We install this in a seperate location and generate a module file
which sets the PYTHONPATH.
We use the default CMake implementation, and use make_module_extra from PythonPackage.
"""
def __init__(self, *args, **kwargs):
"""Initialize with PythonPackage."""
PythonPackage.__init__(self, *args, **kwargs)
def configure_step(self, *args, **kwargs):
"""Main configuration using cmake"""
PythonPackage.configure_step(self, *args, **kwargs)
return CMakeMake.configure_step(self, *args, **kwargs)
def build_step(self, *args, **kwargs):
"""Build Python package with cmake"""
return CMakeMake.build_step(self, *args, **kwargs)
def install_step(self):
"""Install with cmake install"""
return CMakeMake.install_step(self)
def sanity_check_step(self, *args, **kwargs):
"""
Custom sanity check for Python packages
"""
return PythonPackage.sanity_check_step(self, *args, **kwargs)
def make_module_extra(self):
"""Add extra Python package module parameters"""
return PythonPackage.make_module_extra(self)
| {
"content_hash": "e0fbd1ed9dc671497f4aa485bd07624b",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 97,
"avg_line_length": 35.16981132075472,
"alnum_prop": 0.6958154506437768,
"repo_name": "ULHPC/modules",
"id": "6a83197b3c980cd1e8f3283412e1d0b9feadb1fd",
"size": "2922",
"binary": false,
"copies": "10",
"ref": "refs/heads/devel",
"path": "easybuild/easybuild-easyblocks/easybuild/easyblocks/generic/cmakepythonpackage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "36174"
},
{
"name": "Perl",
"bytes": "34780"
},
{
"name": "Python",
"bytes": "2711250"
},
{
"name": "Ruby",
"bytes": "932"
},
{
"name": "Shell",
"bytes": "51560"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from ircb.models import create_tables
from .network import NetworkStore
from .client import ClientStore
from .network_message_store import NetworkMessageStore
from .channel import ChannelStore
def initialize():
create_tables()
NetworkStore.initialize()
ClientStore.initialize()
ChannelStore.initialize()
__all__ = [
'ClientStore',
'NetworkStore',
'NetworkMessageStore',
'ChannelStore'
]
| {
"content_hash": "35252bfea8036d5e20b7b4dab8a82291",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 54,
"avg_line_length": 21.045454545454547,
"alnum_prop": 0.7386609071274298,
"repo_name": "Ghost-script/ircb",
"id": "d16ffcb1e13b94b45f932b131c0c349246f79cdf",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ircb/stores/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45337"
}
],
"symlink_target": ""
} |
"""
This script runs the application using a development server.
It contains the definition of routes and views for the application.
"""
from flask import Flask
import ast
app = Flask(__name__)
# routes.py after app is created, circular references
# noinspection PyPep8
from routes import *
# noinspection PyPep8
import globes as gb
gb.global_init()
# App settings
app.config['UPLOAD_FOLDER'] = gb.UPLOAD_FOLDER
app.config['STATIC_FOLDER'] = gb.STATIC_FOLDER
app.config['USER_FOLDER'] = gb.USER_FOLDER
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
# Make the WSGI interface available at the top level so wfastcgi can get it.
wsgi_app = app.wsgi_app
# launch server
if __name__ == '__main__':
import os
HOST = os.environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(os.environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
# Some config testing for lists
style = ast.literal_eval(GlobalConfig.get('DefaultStyles', '0!default'))
print(style)
import mapping.gdal_mapping as gdm
gdm.hello()
local_only = GlobalConfig.getboolean('ApplicationConfig', 'local_access_only')
if local_only:
app.run(HOST, PORT, threaded=True)
else:
accept_remote_connection = "0.0.0.0"
app.run(accept_remote_connection, PORT, threaded=True)
| {
"content_hash": "a5f8181bc8433cef4969e8490e32cb8b",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 82,
"avg_line_length": 26.176470588235293,
"alnum_prop": 0.6891385767790262,
"repo_name": "cliftbar/FlaPyDisaster",
"id": "863310b9582fc8b99ec48a7c634c90df3a3b075b",
"size": "1337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FlaPyDisaster/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14648"
},
{
"name": "HTML",
"bytes": "69626"
},
{
"name": "JavaScript",
"bytes": "399244"
},
{
"name": "Python",
"bytes": "149891"
}
],
"symlink_target": ""
} |
"""
Diagonal single qubit gate.
"""
from qiskit import CompositeGate
from qiskit import Gate
from qiskit import InstructionSet
from qiskit import QuantumCircuit
from qiskit import QuantumRegister
from qiskit.extensions.standard import header # pylint: disable=unused-import
class U1Gate(Gate):
"""Diagonal single-qubit gate."""
def __init__(self, theta, qubit, circ=None):
"""Create new diagonal single-qubit gate."""
super().__init__("u1", [theta], [qubit], circ)
def qasm(self):
"""Return OPENQASM string."""
qubit = self.arg[0]
theta = self.param[0]
return self._qasmif("u1(%s) %s[%d];" % (
theta, qubit[0].openqasm_name, qubit[1]))
def inverse(self):
"""Invert this gate."""
self.param[0] = -self.param[0]
return self
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.u1(self.param[0], self.arg[0]))
def u1(self, theta, q):
"""Apply u1 with angle theta to q."""
if isinstance(q, QuantumRegister):
instructions = InstructionSet()
for j in range(q.size):
instructions.add(self.u1(theta, (q, j)))
return instructions
self._check_qubit(q)
return self._attach(U1Gate(theta, q, self))
QuantumCircuit.u1 = u1
CompositeGate.u1 = u1
| {
"content_hash": "47ffeea0d91b9e13ff9e5f5011ad26b5",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 27.857142857142858,
"alnum_prop": 0.6249084249084249,
"repo_name": "atilag/qiskit-sdk-py",
"id": "62cd54b38d0af50a3c76b60f20b1336f7404d59d",
"size": "2100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiskit/extensions/standard/u1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "328941"
},
{
"name": "CMake",
"bytes": "18631"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Python",
"bytes": "1237474"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
} |
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import threading
import time
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class MapDatasetTest(test.TestCase, parameterized.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count))
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildMapDataset(components, count)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={count: 14})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={count: 18})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_parallel_calls,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn, num_parallel_calls=num_parallel_calls)
.prefetch(output_buffer_size)
.repeat(count))
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
num_parallel_calls = array_ops.placeholder(dtypes.int32, shape=[])
output_buffer_size = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildParallelMapDataset(
components, count, num_parallel_calls, output_buffer_size)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
def do_test(num_parallel_calls_val, output_buffer_size_val):
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={
count: 14,
num_parallel_calls: num_parallel_calls_val,
output_buffer_size: output_buffer_size_val})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={
count: 18,
num_parallel_calls: num_parallel_calls_val,
output_buffer_size: output_buffer_size_val})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_parallel_calls_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.prefetch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = (input_sentences
.map(lambda x: string_ops.string_split([x]).values)
.map(table.lookup)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(table.init)
sess.run(init_op)
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: queue.dequeue()).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for element in elements:
self.assertEqual(element, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureSameResourceMultipleTimes(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: (queue.dequeue(), queue_2.dequeue()))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for i in range(100):
self.assertEqual(sorted([elements[i * 2], elements[i * 2 + 1]]),
sorted(sess.run(get_next)))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureVariable(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(counter_var.initializer)
sess.run(init_op)
for i in range(10):
self.assertEqual(i, sess.run(counter_var))
self.assertEqual(i + 1, sess.run(get_next))
self.assertEqual(10, sess.run(counter_var))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
self.assertEqual(10, sess.run(counter_var))
def testCaptureUninitializedVariableError(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.NotFoundError):
sess.run(get_next)
def testSeededStatefulOperatorIsProperlyStateful(self):
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(sess.run(get_next))
self.assertEqual(10, len(random_values))
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
sess.run(init_op)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(sess.run(get_next))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testStatefulMapKeepsStateAcrossIterators(self):
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11))
.repeat(1000)
.batch(10)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
random_values = sess.run(get_next)
# Assert that one of the next 99 batches yielded by the iterator is
# different from the first.
i = 0
while i < 99:
if np.any(random_values != sess.run(get_next)):
break
i += 1
self.assertLess(i, 99)
def testMapDict(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.map(lambda d: d["foo"] + d["bar"])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual(i * 2 + i ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapNamedtuple(self, count=10):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(count)
images = labels.map(lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = dataset_tuple.map(example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = dataset_tuple.map(preprocess_tuple)
dataset_namedtuple = dataset_namedtuple.map(preprocess_namedtuple)
next_tuple = dataset_tuple.make_one_shot_iterator().get_next()
next_namedtuple = dataset_namedtuple.make_one_shot_iterator().get_next()
# make sure both datasets contain the same data
with self.cached_session() as sess:
for i in range(count):
tuple_, namedtuple_ = sess.run([next_tuple, next_namedtuple])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_namedtuple)
def testUseStepContainerInMap(self):
row = np.arange(6)
iterator = (
dataset_ops.Dataset.from_tensors(row)
.map(lambda elems: functional_ops.map_fn(lambda x: x * x, elems))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
self.assertAllEqual(row ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
buffer_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (
dataset_ops.Dataset.range(100)
.map(_map_fn)
.prefetch(buffer_size_placeholder)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
# Simple test that prefetch yields the expected values in the
# expected order.
for buffer_size in [1, 10, 100, 1000]:
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
for buffer_size in range(1, set_event_during_invocation):
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, sess.run(get_next))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReturnList(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: [x, constant_op.constant(37.0)])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
iterator = (dataset_ops.Dataset.range(10)
.map(_map_fn)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def assertSparseValuesEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
iterator = (dataset_ops.Dataset.range(10)
.map(_sparse)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _sparse(i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparseChain(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
iterator = (
dataset_ops.Dataset.range(10).map(_sparse).map(_check)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _check(_sparse(i)).eval())
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testParallelMapOutOfRangeError(self):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
iterator = (
dataset_ops.Dataset.range(105)
.map(lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(100):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testConstantOutput(self):
iterator = (
dataset_ops.Dataset.range(10).map(lambda x: [x, "hello", 10])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, b"hello", 10), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testWarnOnLookupTable(self):
def collecting_function(x):
_ = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer([], []), 0.0, name="t1")
return x
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(collecting_function)
# NOTE(mrry): Python 3 prints other warnings in addition to the one we are
# testing, so we search for the expected warning.
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Creating lookup tables inside a function passed to Dataset.map() is "
"not supported." in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
def testNestedDatasetError(self):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
NotImplementedError, r"The Dataset.map\(\) transformation does not "
"currently support nested datasets as outputs."):
_ = dataset.map(dataset_ops.Dataset.from_tensor_slices)
def testReturnValueError(self):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
TypeError, r"Unsupported return value from function passed to "
r"Dataset.map\(\): None."):
_ = dataset.map(lambda x: None)
def testBrokenFunctionErrorOnInitialization(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0, 3.0])
def broken_function(_):
"""A function deliberately designed to fail on instantiation."""
value = []
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtypes.float32, shape=[0], verify_shape=False))
dtype_value = attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)
# Create a "Const" op with a `tf.float32` value and a `tf.int32` type
# attr.
const_tensor = ops.get_default_graph().create_op(
"Const", [], [dtypes.int32],
attrs={
"value": tensor_value,
"dtype": dtype_value
},
name="BrokenConst").outputs[0]
return const_tensor
dataset = dataset.map(broken_function)
iterator = dataset.make_initializable_iterator()
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, "BrokenConst"):
sess.run(iterator.initializer)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Map", lambda dataset, func:
dataset_ops.MapDataset(dataset, func, use_inter_op_parallelism=False)),
("ParallelMap", lambda dataset, func:
dataset_ops.ParallelMapDataset(dataset, func, num_parallel_calls=1,
use_inter_op_parallelism=False)),
)
def testNoInterOpParallelism(self, make_dataset_fn):
dataset = dataset_ops.Dataset.from_tensors(0)
def _get_tid():
return np.int64(threading.current_thread().ident)
def _map_fn(_):
tids = []
for _ in range(10):
tids.append(script_ops.py_func(_get_tid, [], dtypes.int64))
return tids
dataset = make_dataset_fn(dataset, _map_fn)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.cached_session() as sess:
tids = sess.run(get_next)
self.assertTrue(all(tids[0] == tid for tid in tids))
# pylint: enable=g-long-lambda
class MapDatasetBenchmark(test.Benchmark):
def benchmarkChainOfMaps(self):
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
for use_inter_op_parallelism in [False, True]:
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset_ops.MapDataset(
dataset,
lambda x: x,
use_inter_op_parallelism=use_inter_op_parallelism)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
print("Map dataset chain length%s: %d Median wall time: %f" %
(" (single threaded mode)" if not use_inter_op_parallelism
else "", chain_length, median_wall_time))
self.report_benchmark(
iters=1000,
wall_time=median_wall_time,
name="benchmark_map_dataset_chain_latency_%d%s" %
(chain_length, "_single_threaded"
if not use_inter_op_parallelism else ""))
def benchmarkMapFanOut(self):
fan_outs = [1, 2, 5, 10, 20, 50, 100]
for fan_out in fan_outs:
for use_inter_op_parallelism in [False, True]:
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(
tuple(0 for _ in range(fan_out))).repeat(None)
dataset = dataset_ops.MapDataset(
dataset,
lambda *xs: xs,
use_inter_op_parallelism=use_inter_op_parallelism)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element[0].op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element[0].op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
print("Map dataset fan out%s: %d Median wall time: %f" %
(" (single threaded mode)" if not use_inter_op_parallelism
else "", fan_out, median_wall_time))
self.report_benchmark(
iters=1000,
wall_time=median_wall_time,
name="benchmark_map_dataset_fan_out_%d%s" %
(fan_out, "_single_threaded"
if not use_inter_op_parallelism else ""))
if __name__ == "__main__":
test.main()
| {
"content_hash": "a8faa31c1b3e1019d9422a5b4a58edf7",
"timestamp": "",
"source": "github",
"line_count": 824,
"max_line_length": 80,
"avg_line_length": 37.61407766990291,
"alnum_prop": 0.6263147706007615,
"repo_name": "xodus7/tensorflow",
"id": "ae04995436995c66a053784c40dc8c56bb59cff1",
"size": "31683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/kernel_tests/map_dataset_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "340946"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "48861698"
},
{
"name": "CMake",
"bytes": "195699"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1240309"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "834061"
},
{
"name": "Jupyter Notebook",
"bytes": "2604756"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52618"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40952138"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "459258"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
"""
Tests for the repo manager tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import glob
import shutil
import tempfile
import unittest
import ga4gh.exceptions as exceptions
import ga4gh.datarepo as datarepo
import ga4gh.cli as cli
import ga4gh.datamodel as datamodel
import tests.paths as paths
class TestGetNameFromPath(unittest.TestCase):
"""
Tests the method for deriving the default name of objects from file
paths.
"""
def testError(self):
self.assertRaises(ValueError, cli.getNameFromPath, "")
def testLocalDirectory(self):
self.assertEqual(cli.getNameFromPath("no_extension"), "no_extension")
self.assertEqual(cli.getNameFromPath("x.y"), "x")
self.assertEqual(cli.getNameFromPath("x.y.z"), "x")
def testFullPaths(self):
self.assertEqual(cli.getNameFromPath("/no_ext"), "no_ext")
self.assertEqual(cli.getNameFromPath("/x.y"), "x")
self.assertEqual(cli.getNameFromPath("/x.y.z"), "x")
self.assertEqual(cli.getNameFromPath("/a/no_ext"), "no_ext")
self.assertEqual(cli.getNameFromPath("/a/x.y"), "x")
self.assertEqual(cli.getNameFromPath("/a/x.y.z"), "x")
def testUrls(self):
self.assertEqual(cli.getNameFromPath("file:///no_ext"), "no_ext")
self.assertEqual(cli.getNameFromPath("http://example.com/x.y"), "x")
self.assertEqual(cli.getNameFromPath("ftp://x.y.z"), "x")
def testDirectoryName(self):
self.assertEqual(cli.getNameFromPath("/a/xy"), "xy")
self.assertEqual(cli.getNameFromPath("/a/xy/"), "xy")
self.assertEqual(cli.getNameFromPath("xy/"), "xy")
self.assertEqual(cli.getNameFromPath("xy"), "xy")
class AbstractRepoManagerTest(unittest.TestCase):
"""
Base class for repo manager tests
"""
def setUp(self):
fd, self._repoPath = tempfile.mkstemp(prefix="ga4gh_repoman_test")
os.unlink(self._repoPath)
def runCommand(self, cmd):
cli.RepoManager.runCommand(cmd.split())
def tearDown(self):
os.unlink(self._repoPath)
def readRepo(self):
repo = datarepo.SqlDataRepository(self._repoPath)
repo.open(datarepo.MODE_READ)
return repo
def init(self):
self.runCommand("init {}".format(self._repoPath))
def addOntology(self):
self._ontologyName = paths.ontologyName
cmd = "add-ontology {} {}".format(self._repoPath, paths.ontologyPath)
self.runCommand(cmd)
def addDataset(self, datasetName=None):
if datasetName is None:
datasetName = "test_dataset"
self._datasetName = datasetName
cmd = "add-dataset {} {}".format(self._repoPath, datasetName)
self.runCommand(cmd)
def addReferenceSet(self):
self._referenceSetName = "test_rs"
fastaFile = paths.faPath
self.runCommand("add-referenceset {} {} --name={}".format(
self._repoPath, fastaFile, self._referenceSetName))
def addReadGroupSet(self):
bamFile = paths.bamPath
self._readGroupSetName = "test_rgs"
cmd = (
"add-readgroupset {} {} {} --referenceSetName={} "
"--name={}").format(
self._repoPath, self._datasetName, bamFile,
self._referenceSetName, self._readGroupSetName)
self.runCommand(cmd)
def addVariantSet(self):
vcfDir = paths.vcfDirPath
self._variantSetName = "test_vs"
cmd = (
"add-variantset {} {} {} --referenceSetName={} "
"--name={}").format(
self._repoPath, self._datasetName, vcfDir,
self._referenceSetName, self._variantSetName)
self.runCommand(cmd)
def addFeatureSet(self):
featuresPath = paths.featuresPath
self._featureSetName = paths.featureSetName
cmd = (
"add-featureset {} {} {} --referenceSetName={} "
"--ontologyName={}").format(
self._repoPath, self._datasetName, featuresPath,
self._referenceSetName, self._ontologyName)
self.runCommand(cmd)
def addPhenotypeAssociationSet(self):
phenotypeAssociationSetPath = paths.phenotypeAssociationSetPath
self._phenotypeAssociationSetName = "test_phenotypeAssociationSet"
cmd = (
"add-phenotypeassociationset {} {} {} -n {}").format(
self._repoPath,
self._datasetName,
phenotypeAssociationSetPath,
self._phenotypeAssociationSetName)
self.runCommand(cmd)
def getFeatureSet(self):
repo = self.readRepo()
dataset = repo.getDatasetByName(self._datasetName)
featureSet = dataset.getFeatureSetByName(self._featureSetName)
return featureSet
class TestAddFeatureSet(AbstractRepoManagerTest):
def setUp(self):
super(TestAddFeatureSet, self).setUp()
self.init()
self.addDataset()
self.addOntology()
self.addReferenceSet()
def testAddFeatureSet(self):
self.addFeatureSet()
featureSet = self.getFeatureSet()
self.assertEqual(featureSet.getLocalId(), self._featureSetName)
self.assertEqual(
featureSet._parentContainer.getLocalId(), self._datasetName)
self.assertEqual(
featureSet.getReferenceSet().getLocalId(),
self._referenceSetName)
# TODO not clear these fields get populated now
# self.assertEqual(featureSet.getInfo(), "TODO")
# self.assertEqual(featureSet.getSourceUrl(), "TODO")
def testAddFeatureSetNoReferenceSet(self):
featuresPath = paths.featuresPath
cmd = "add-featureset {} {} {} --ontologyName={}".format(
self._repoPath, self._datasetName, featuresPath,
self._ontologyName)
self.assertRaises(
exceptions.RepoManagerException, self.runCommand, cmd)
def testAddFeatureSetBadReferenceSet(self):
featuresPath = paths.featuresPath
cmd = (
"add-featureset {} {} {} --referenceSetName=notafefset "
"--ontologyName={}").format(
self._repoPath, self._datasetName, featuresPath,
self._ontologyName)
self.assertRaises(
exceptions.ReferenceSetNameNotFoundException,
self.runCommand, cmd)
def testAddFeatureSetNoOntology(self):
featuresPath = paths.featuresPath
cmd = "add-featureset {} {} {} --referenceSetName={} ".format(
self._repoPath, self._datasetName, featuresPath,
self._referenceSetName)
self.assertRaises(
exceptions.RepoManagerException, self.runCommand, cmd)
def testAddFeatureSetBadOntology(self):
featuresPath = paths.featuresPath
cmd = "add-featureset {} {} {} --referenceSetName={} ".format(
self._repoPath, self._datasetName, featuresPath,
self._referenceSetName)
self.assertRaises(
exceptions.RepoManagerException, self.runCommand, cmd)
class TestRemoveFeatureSet(AbstractRepoManagerTest):
def setUp(self):
super(TestRemoveFeatureSet, self).setUp()
self.init()
self.addDataset()
self.addOntology()
self.addReferenceSet()
self.addFeatureSet()
def testRemoveFeatureSet(self):
featureSet = self.getFeatureSet()
cmd = "remove-featureset {} {} {} -f".format(
self._repoPath, self._datasetName, featureSet.getLocalId())
self.runCommand(cmd)
with self.assertRaises(exceptions.FeatureSetNameNotFoundException):
self.getFeatureSet()
class TestAddDataset(AbstractRepoManagerTest):
def setUp(self):
super(TestAddDataset, self).setUp()
self.init()
def testDefaults(self):
name = "test_dataset"
self.runCommand("add-dataset {} {}".format(self._repoPath, name))
repo = self.readRepo()
dataset = repo.getDatasetByName(name)
self.assertEqual(dataset.getLocalId(), name)
def testSameName(self):
name = "test_dataset"
cmd = "add-dataset {} {}".format(self._repoPath, name)
self.runCommand(cmd)
self.assertRaises(
exceptions.DuplicateNameException, self.runCommand, cmd)
class TestAddPhenotypeAssociationSet(AbstractRepoManagerTest):
def setUp(self):
super(TestAddPhenotypeAssociationSet, self).setUp()
self.init()
def testDefaults(self):
self.addDataset()
self.addPhenotypeAssociationSet()
def testSameName(self):
self.addDataset()
self.addPhenotypeAssociationSet()
with self.assertRaises(exceptions.DuplicateNameException):
self.addPhenotypeAssociationSet()
class TestRemovePhenotypeAssociationSet(AbstractRepoManagerTest):
def setUp(self):
super(TestRemovePhenotypeAssociationSet, self).setUp()
self.init()
def _assertPhenotypeAssociationSetRemoved(self):
repo = self.readRepo()
dataset = repo.getDatasetByName(self._datasetName)
with self.assertRaises(
exceptions.PhenotypeAssociationSetNotFoundException):
dataset.getPhenotypeAssociationSetByName(
self._phenotypeAssociationSetName)
def _removePhenotypeAssociationSet(self):
cmdString = "remove-phenotypeassociationset {} {} {} -f"
self.runCommand(cmdString.format(
self._repoPath, self._datasetName,
self._phenotypeAssociationSetName))
def testDefaults(self):
self.addDataset()
self.addPhenotypeAssociationSet()
self._removePhenotypeAssociationSet()
self._assertPhenotypeAssociationSetRemoved()
def testDuplicateDelete(self):
self.addDataset()
self.addPhenotypeAssociationSet()
self._removePhenotypeAssociationSet()
self._assertPhenotypeAssociationSetRemoved()
with self.assertRaises(
exceptions.PhenotypeAssociationSetNotFoundException):
self._removePhenotypeAssociationSet()
class TestAddReferenceSet(AbstractRepoManagerTest):
def setUp(self):
super(TestAddReferenceSet, self).setUp()
self.init()
def testDefaults(self):
fastaFile = paths.ncbi37FaPath
name = os.path.split(fastaFile)[1].split(".")[0]
self.runCommand("add-referenceset {} {}".format(
self._repoPath, fastaFile))
repo = self.readRepo()
referenceSet = repo.getReferenceSetByName(name)
self.assertEqual(referenceSet.getLocalId(), name)
self.assertEqual(referenceSet.getDataUrl(), os.path.abspath(fastaFile))
# TODO check that the default values for all fields are set correctly.
def testWithName(self):
name = "test_reference_set"
fastaFile = paths.ncbi37FaPath
cmd = "add-referenceset {} {} --name={}".format(
self._repoPath, fastaFile, name)
self.runCommand(cmd)
repo = self.readRepo()
referenceSet = repo.getReferenceSetByName(name)
self.assertEqual(referenceSet.getLocalId(), name)
self.assertEqual(referenceSet.getDataUrl(), os.path.abspath(fastaFile))
def testWithSameName(self):
fastaFile = paths.ncbi37FaPath
# Default name
cmd = "add-referenceset {} {}".format(self._repoPath, fastaFile)
self.runCommand(cmd)
self.assertRaises(
exceptions.RepoManagerException, self.runCommand, cmd)
# Specified name
cmd = "add-referenceset {} {} --name=testname".format(
self._repoPath, fastaFile)
self.runCommand(cmd)
self.assertRaises(
exceptions.DuplicateNameException, self.runCommand, cmd)
class TestAddOntology(AbstractRepoManagerTest):
def setUp(self):
super(TestAddOntology, self).setUp()
self.init()
def testDefaults(self):
ontologyFile = paths.ontologyPath
name = os.path.split(ontologyFile)[1].split(".")[0]
self.runCommand("add-ontology {} {}".format(
self._repoPath, ontologyFile))
repo = self.readRepo()
ontology = repo.getOntologyByName(name)
self.assertEqual(ontology.getName(), name)
self.assertEqual(ontology.getDataUrl(), os.path.abspath(ontologyFile))
def testWithName(self):
ontologyFile = paths.ontologyPath
name = "test_name"
self.runCommand("add-ontology {} {} --name={}".format(
self._repoPath, ontologyFile, name))
repo = self.readRepo()
ontology = repo.getOntologyByName(name)
self.assertEqual(ontology.getName(), name)
self.assertEqual(ontology.getDataUrl(), os.path.abspath(ontologyFile))
def testWithSameName(self):
ontologyFile = paths.ontologyPath
# Default name
cmd = "add-ontology {} {}".format(self._repoPath, ontologyFile)
self.runCommand(cmd)
self.assertRaises(
exceptions.RepoManagerException, self.runCommand, cmd)
# Specified name
cmd = "add-ontology {} {} --name=testname".format(
self._repoPath, ontologyFile)
self.runCommand(cmd)
self.assertRaises(
exceptions.DuplicateNameException, self.runCommand, cmd)
def testMissingFile(self):
cmd = "add-ontology {} {}".format(self._repoPath, "/no/such/file")
self.assertRaises(
exceptions.FileOpenFailedException, self.runCommand, cmd)
def testNonOboTextFile(self):
cmd = "add-ontology {} {}".format(
self._repoPath, paths.landingMessageHtml)
self.assertRaises(
exceptions.OntologyFileFormatException, self.runCommand, cmd)
def testNonOboBinaryFile(self):
cmd = "add-ontology {} {}".format(self._repoPath, paths.bamPath)
self.assertRaises(
exceptions.OntologyFileFormatException, self.runCommand, cmd)
class TestRemoveDataset(AbstractRepoManagerTest):
def setUp(self):
super(TestRemoveDataset, self).setUp()
self.init()
self.addDataset()
self.addReferenceSet()
def assertDatasetRemoved(self):
repo = self.readRepo()
self.assertRaises(
exceptions.DatasetNameNotFoundException,
repo.getDatasetByName, self._datasetName)
def testEmptyDatasetForce(self):
self.runCommand("remove-dataset {} {} -f".format(
self._repoPath, self._datasetName))
self.assertDatasetRemoved()
def testContainsReadGroupSet(self):
self.addReadGroupSet()
self.runCommand("remove-dataset {} {} -f".format(
self._repoPath, self._datasetName))
self.assertDatasetRemoved()
class TestRemoveReadGroupSet(AbstractRepoManagerTest):
def setUp(self):
super(TestRemoveReadGroupSet, self).setUp()
self.init()
self.addDataset()
self.addReferenceSet()
self.addReadGroupSet()
def assertReadGroupSetRemoved(self):
repo = self.readRepo()
dataset = repo.getDatasetByName(self._datasetName)
self.assertRaises(
exceptions.ReadGroupSetNameNotFoundException,
dataset.getReadGroupSetByName, self._readGroupSetName)
def testWithForce(self):
self.runCommand("remove-readgroupset {} {} {} -f".format(
self._repoPath, self._datasetName, self._readGroupSetName))
self.assertReadGroupSetRemoved()
class TestRemoveVariantSet(AbstractRepoManagerTest):
def setUp(self):
super(TestRemoveVariantSet, self).setUp()
self.init()
self.addDataset()
self.addReferenceSet()
self.addVariantSet()
def assertVariantSetRemoved(self):
repo = self.readRepo()
dataset = repo.getDatasetByName(self._datasetName)
self.assertRaises(
exceptions.VariantSetNameNotFoundException,
dataset.getVariantSetByName, self._variantSetName)
def testWithForce(self):
self.runCommand("remove-variantset {} {} {} -f".format(
self._repoPath, self._datasetName, self._variantSetName))
self.assertVariantSetRemoved()
# TODO test when we have a variant set with the same name in
# a different dataset. This should be unaffected.
class TestRemoveReferenceSet(AbstractRepoManagerTest):
def setUp(self):
super(TestRemoveReferenceSet, self).setUp()
self.init()
self.addReferenceSet()
def assertReferenceSetRemoved(self):
repo = self.readRepo()
self.assertRaises(
exceptions.ReferenceSetNameNotFoundException,
repo.getReferenceSetByName, self._referenceSetName)
def testDefaults(self):
self.runCommand("remove-referenceset {} {} -f".format(
self._repoPath, self._referenceSetName))
self.assertReferenceSetRemoved()
class TestVerify(AbstractRepoManagerTest):
def setUp(self):
super(TestVerify, self).setUp()
def testVerify(self):
self.init()
self.addDataset()
self.addOntology()
self.addReferenceSet()
self.addReadGroupSet()
self.addFeatureSet()
self.addVariantSet()
cmd = "verify {}".format(self._repoPath)
self.runCommand(cmd)
class TestRemoveOntology(AbstractRepoManagerTest):
def setUp(self):
super(TestRemoveOntology, self).setUp()
self.init()
self.addOntology()
def assertOntologyRemoved(self):
repo = self.readRepo()
self.assertRaises(
exceptions.OntologyNameNotFoundException,
repo.getOntologyByName, self._ontologyName)
def testDefaults(self):
self.runCommand("remove-ontology {} {} -f".format(
self._repoPath, self._ontologyName))
self.assertOntologyRemoved()
class TestAddReadGroupSet(AbstractRepoManagerTest):
def setUp(self):
super(TestAddReadGroupSet, self).setUp()
self.init()
self.addDataset()
self.addReferenceSet()
def verifyReadGroupSet(self, name, dataUrl, indexFile):
repo = self.readRepo()
dataset = repo.getDatasetByName(self._datasetName)
referenceSet = repo.getReferenceSetByName(self._referenceSetName)
readGroupSet = dataset.getReadGroupSetByName(name)
self.assertEqual(readGroupSet.getLocalId(), name)
self.assertEqual(readGroupSet.getReferenceSet(), referenceSet)
self.assertEqual(readGroupSet.getDataUrl(), os.path.abspath(dataUrl))
self.assertEqual(
readGroupSet.getIndexFile(), os.path.abspath(indexFile))
def testDefaultsLocalFile(self):
bamFile = paths.bamPath
name = os.path.split(bamFile)[1].split(".")[0]
cmd = "add-readgroupset {} {} {} --referenceSetName={}".format(
self._repoPath, self._datasetName, bamFile,
self._referenceSetName)
self.runCommand(cmd)
self.verifyReadGroupSet(name, bamFile, bamFile + ".bai")
def testLocalFileWithIndex(self):
bamFile = paths.bamPath
name = os.path.split(bamFile)[1].split(".")[0]
with tempfile.NamedTemporaryFile() as temp:
indexFile = temp.name
shutil.copyfile(bamFile + ".bai", indexFile)
cmd = (
"add-readgroupset {} {} {} -I {} "
"--referenceSetName={}").format(
self._repoPath, self._datasetName, bamFile,
indexFile, self._referenceSetName)
self.runCommand(cmd)
self.verifyReadGroupSet(name, bamFile, indexFile)
def testLocalFileWithName(self):
bamFile = paths.bamPath
name = "test_rgs"
cmd = (
"add-readgroupset {} {} {} --referenceSetName={} "
"--name={}").format(
self._repoPath, self._datasetName, bamFile,
self._referenceSetName, name)
self.runCommand(cmd)
self.verifyReadGroupSet(name, bamFile, bamFile + ".bai")
def testAddReadGroupSetWithSameName(self):
# Default name
bamFile = paths.bamPath
name = os.path.split(bamFile)[1].split(".")[0]
cmd = "add-readgroupset {} {} {} --referenceSetName={}".format(
self._repoPath, self._datasetName, bamFile,
self._referenceSetName)
self.runCommand(cmd)
self.assertRaises(
exceptions.DuplicateNameException, self.runCommand, cmd)
# Specified name
name = "test_rgs"
cmd = (
"add-readgroupset {} {} {} --referenceSetName={} "
"--name={}").format(
self._repoPath, self._datasetName, bamFile,
self._referenceSetName, name)
self.runCommand(cmd)
self.assertRaises(
exceptions.DuplicateNameException, self.runCommand, cmd)
def testUrlWithMissingIndex(self):
bamFile = "http://example.com/example.bam"
cmd = "add-readgroupset {} {} {} --referenceSetName={}".format(
self._repoPath, self._datasetName, bamFile,
self._referenceSetName)
self.assertRaises(
exceptions.MissingIndexException, self.runCommand, cmd)
def testMissingDataset(self):
bamFile = paths.bamPath
cmd = "add-readgroupset {} {} {} --referenceSetName={}".format(
self._repoPath, "not_a_dataset_name", bamFile,
self._referenceSetName)
self.assertRaises(
exceptions.DatasetNameNotFoundException, self.runCommand, cmd)
def testMissingReferenceSet(self):
bamFile = paths.bamPath
cmd = "add-readgroupset {} {} {} --referenceSetName={}".format(
self._repoPath, self._datasetName, bamFile,
"not_a_referenceset_name")
self.assertRaises(
exceptions.ReferenceSetNameNotFoundException, self.runCommand, cmd)
class TestAddVariantSet(AbstractRepoManagerTest):
def setUp(self):
super(TestAddVariantSet, self).setUp()
self.init()
self.addDataset()
self.addReferenceSet()
self.vcfDir = paths.vcfDirPath
self.vcfFiles = glob.glob(os.path.join(paths.vcfDirPath, "*.vcf.gz"))
self.indexFiles = [vcfFile + ".tbi" for vcfFile in self.vcfFiles]
def verifyVariantSet(self, name, dataUrls, indexFiles):
repo = self.readRepo()
dataset = repo.getDatasetByName(self._datasetName)
referenceSet = repo.getReferenceSetByName(self._referenceSetName)
variantSet = dataset.getVariantSetByName(name)
self.assertEqual(variantSet.getLocalId(), name)
self.assertEqual(variantSet.getReferenceSet(), referenceSet)
dataUrls = map(lambda x: os.path.abspath(x), dataUrls)
indexFiles = map(lambda x: os.path.abspath(x), indexFiles)
pairs = sorted(zip(dataUrls, indexFiles))
self.assertEqual(pairs, sorted(variantSet.getDataUrlIndexPairs()))
def testDefaultsLocalFiles(self):
dataFiles = self.vcfFiles
name = "test_name"
cmd = "add-variantset {} {} {} --name={} --referenceSetName={}".format(
self._repoPath, self._datasetName, " ".join(dataFiles),
name, self._referenceSetName)
self.runCommand(cmd)
self.verifyVariantSet(name, dataFiles, self.indexFiles)
def testDefaultsLocalDirectory(self):
vcfDir = self.vcfDir
name = os.path.split(vcfDir)[1]
cmd = "add-variantset {} {} {} --referenceSetName={}".format(
self._repoPath, self._datasetName, vcfDir,
self._referenceSetName)
self.runCommand(cmd)
self.verifyVariantSet(name, self.vcfFiles, self.indexFiles)
def testLocalFilesWithIndexes(self):
dataFiles = self.vcfFiles
tempdir = tempfile.mkdtemp(prefix="ga4gh_test_add_variantset")
name = "test_name"
try:
indexFiles = []
for indexFile in self.indexFiles:
indexFileCopy = os.path.join(
tempdir, os.path.split(indexFile)[1])
shutil.copyfile(indexFile, indexFileCopy)
indexFiles.append(indexFileCopy)
cmd = (
"add-variantset {} {} {} -I {} --name={} "
"--referenceSetName={}".format(
self._repoPath, self._datasetName, " ".join(dataFiles),
" ".join(indexFiles), name, self._referenceSetName))
self.runCommand(cmd)
self.verifyVariantSet(name, dataFiles, indexFiles)
finally:
shutil.rmtree(tempdir)
def testAddVariantSetWithSameName(self):
# Default name
vcfDir = self.vcfDir
name = os.path.split(vcfDir)[1]
cmd = "add-variantset {} {} {} --referenceSetName={}".format(
self._repoPath, self._datasetName, vcfDir,
self._referenceSetName)
self.runCommand(cmd)
self.assertRaises(
exceptions.DuplicateNameException, self.runCommand, cmd)
# Specified name
name = "test_vs"
cmd = (
"add-variantset {} {} {} --referenceSetName={} "
"--name={}").format(
self._repoPath, self._datasetName, vcfDir,
self._referenceSetName, name)
self.runCommand(cmd)
self.assertRaises(
exceptions.DuplicateNameException, self.runCommand, cmd)
def testUrlWithMissingIndex(self):
dataFile = "http://example.com/example.vcf.gz"
cmd = "add-variantset {} {} {} --referenceSetName={}".format(
self._repoPath, self._datasetName, dataFile,
self._referenceSetName)
self.assertRaises(
exceptions.MissingIndexException, self.runCommand, cmd)
def testMissingDataset(self):
cmd = "add-variantset {} {} {} --referenceSetName={}".format(
self._repoPath, "not_a_dataset_name", self.vcfDir,
self._referenceSetName)
self.assertRaises(
exceptions.DatasetNameNotFoundException, self.runCommand, cmd)
def testMissingReferenceSet(self):
cmd = "add-variantset {} {} {} --referenceSetName={}".format(
self._repoPath, self._datasetName, self.vcfDir,
"not_a_referenceset_name")
self.assertRaises(
exceptions.ReferenceSetNameNotFoundException, self.runCommand, cmd)
# TODO add more tests for to verify that errors are correctly thrown
# when incorrect indexes are passed, mixtures of directories and URLS
# for the dataFiles argument, and other common error cases in the UI.
class TestAddAnnotatedVariantSet(AbstractRepoManagerTest):
def setUp(self):
super(TestAddAnnotatedVariantSet, self).setUp()
self.init()
self.addDataset()
self.addReferenceSet()
self.addOntology()
self.vcfDir = paths.annotatedVcfPath
def testNoAnnotations(self):
name = "test_vs_no_annotations"
cmd = "add-variantset {} {} {} -R {} -n {}".format(
self._repoPath, self._datasetName, self.vcfDir,
self._referenceSetName, name)
self.runCommand(cmd)
repo = self.readRepo()
dataset = repo.getDatasetByName(self._datasetName)
variantSet = dataset.getVariantSetByName(name)
self.assertEqual(len(variantSet.getVariantAnnotationSets()), 0)
def testAnnotations(self):
name = "test_vs_annotations"
cmd = "add-variantset {} {} {} -R {} -n {} -aO {}".format(
self._repoPath, self._datasetName, self.vcfDir,
self._referenceSetName, name, self._ontologyName)
self.runCommand(cmd)
repo = self.readRepo()
dataset = repo.getDatasetByName(self._datasetName)
variantSet = dataset.getVariantSetByName(name)
self.assertEqual(len(variantSet.getVariantAnnotationSets()), 1)
def testAnnotationsNoOntology(self):
name = "test_vs_annotations"
cmd = "add-variantset {} {} {} -R {} -n {} -a".format(
self._repoPath, self._datasetName, self.vcfDir,
self._referenceSetName, name)
self.assertRaises(
exceptions.RepoManagerException, self.runCommand, cmd)
def testAnnotationsBadOntology(self):
name = "test_vs_annotations"
cmd = "add-variantset {} {} {} -R {} -n {} -aO {}".format(
self._repoPath, self._datasetName, self.vcfDir,
self._referenceSetName, name, "not_an_ontology")
self.assertRaises(
exceptions.OntologyNameNotFoundException, self.runCommand, cmd)
class TestDuplicateNameDelete(AbstractRepoManagerTest):
"""
If two objects exist with the same name in different datasets,
ensure that only one is deleted on a delete call
"""
def setUp(self):
super(TestDuplicateNameDelete, self).setUp()
self.init()
self.dataset1Name = "dataset1"
self.dataset2Name = "dataset2"
self.addDataset(self.dataset1Name)
self.addDataset(self.dataset2Name)
self.addOntology()
self.addReferenceSet()
def readDatasets(self):
repo = self.readRepo()
self.dataset1 = repo.getDatasetByName(self.dataset1Name)
self.dataset2 = repo.getDatasetByName(self.dataset2Name)
def testReadGroupSetDelete(self):
readGroupSetName = "test_rgs"
cmdString = (
"add-readgroupset {} {} {} --referenceSetName={} "
"--name={}")
addReadGroupSetCmd1 = cmdString.format(
self._repoPath, self.dataset1Name, paths.bamPath,
self._referenceSetName, readGroupSetName)
self.runCommand(addReadGroupSetCmd1)
addReadGroupSetCmd2 = cmdString.format(
self._repoPath, self.dataset2Name, paths.bamPath,
self._referenceSetName, readGroupSetName)
self.runCommand(addReadGroupSetCmd2)
removeCmd = "remove-readgroupset {} {} {} -f".format(
self._repoPath, self.dataset1Name, readGroupSetName)
self.runCommand(removeCmd)
self.readDatasets()
self.assertEqual(len(self.dataset1.getReadGroupSets()), 0)
self.assertEqual(len(self.dataset2.getReadGroupSets()), 1)
def testVariantSetDelete(self):
vcfDir = paths.vcfDirPath
variantSetName = "test_vs"
cmdString = "add-variantset {} {} {} --referenceSetName={} --name={}"
addVariantSetCmd1 = cmdString.format(
self._repoPath, self.dataset1Name, vcfDir,
self._referenceSetName, variantSetName)
self.runCommand(addVariantSetCmd1)
addVariantSetCmd2 = cmdString.format(
self._repoPath, self.dataset2Name, vcfDir,
self._referenceSetName, variantSetName)
self.runCommand(addVariantSetCmd2)
removeCmd = "remove-variantset {} {} {} -f".format(
self._repoPath, self.dataset1Name, variantSetName)
self.runCommand(removeCmd)
self.readDatasets()
self.assertEqual(len(self.dataset1.getVariantSets()), 0)
self.assertEqual(len(self.dataset2.getVariantSets()), 1)
def testFeatureSetDelete(self):
cmdString = "add-featureset {} {} {} -R {} -O {}"
addFeatureSetCmd1 = cmdString.format(
self._repoPath, self.dataset1Name, paths.featuresPath,
self._referenceSetName, self._ontologyName)
self.runCommand(addFeatureSetCmd1)
addFeatureSetCmd2 = cmdString.format(
self._repoPath, self.dataset2Name, paths.featuresPath,
self._referenceSetName, self._ontologyName)
self.runCommand(addFeatureSetCmd2)
removeCmd = "remove-featureset {} {} {} -f".format(
self._repoPath, self.dataset1Name, paths.featureSetName)
self.runCommand(removeCmd)
self.readDatasets()
self.assertEqual(len(self.dataset1.getFeatureSets()), 0)
self.assertEqual(len(self.dataset2.getFeatureSets()), 1)
class TestInvalidVariantIndexFile(AbstractRepoManagerTest):
"""
Test that the repo manager throws exceptions when invalid index
files are provided for vcf files.
"""
def setUp(self):
super(TestInvalidVariantIndexFile, self).setUp()
self.init()
self.addDataset()
self.addReferenceSet()
def _testWithIndexPath(self, indexPath):
cmd = (
"add-variantset {} {} {} --referenceSetName={} -I {}").format(
self._repoPath, self._datasetName, paths.vcfPath1,
self._referenceSetName, indexPath)
with self.assertRaises(exceptions.NotIndexedException):
self.runCommand(cmd)
def testNonexistentIndexFile(self):
indexPath = '/path/does/not/exist'
self._testWithIndexPath(indexPath)
def testIndexFileNotAnIndexFile(self):
indexPath = paths.vcfPath2 # not an index file
self._testWithIndexPath(indexPath)
@unittest.skip("Skipping until we can detect incorrect indexes")
def testWrongIndexFile(self):
indexPath = paths.vcfIndexPath2 # incorrect index
self._testWithIndexPath(indexPath)
class TestInvalidReadGroupSetIndexFile(AbstractRepoManagerTest):
"""
Test that the repo manager throws exceptions when invalid index
files are provided for bam files.
"""
@classmethod
def setUpClass(cls):
# clear the file handle cache because if the data file we are
# testing with an invalid index is already in the cache, the
# index will not be opened during the test; without this line
# the below tests will succeed when the test class is run but
# fail when the file's tests are run
datamodel.fileHandleCache = datamodel.PysamFileHandleCache()
def setUp(self):
super(TestInvalidReadGroupSetIndexFile, self).setUp()
self.init()
self.addDataset()
self.addReferenceSet()
def _testWithIndexPath(self, indexPath):
cmd = (
"add-readgroupset {} {} {} --referenceSetName={} "
"-I {}").format(
self._repoPath, self._datasetName, paths.bamPath,
self._referenceSetName, indexPath)
self.runCommand(cmd)
def testNonexistentIndexFile(self):
indexPath = '/path/does/not/exist'
with self.assertRaises(exceptions.FileOpenFailedException):
self._testWithIndexPath(indexPath)
def testIndexFileNotAnIndexFile(self):
indexPath = paths.bamPath2 # not an index file
with self.assertRaises(exceptions.DataException):
self._testWithIndexPath(indexPath)
@unittest.skip("Skipping until we can detect incorrect indexes")
def testWrongIndexFile(self):
indexPath = paths.bamIndexPath2 # incorrect index
self._testWithIndexPath(indexPath)
| {
"content_hash": "bccc7227c3c65f96f822c2f3ca95c556",
"timestamp": "",
"source": "github",
"line_count": 930,
"max_line_length": 79,
"avg_line_length": 37.575268817204304,
"alnum_prop": 0.6359708112748605,
"repo_name": "ohsu-computational-biology/server",
"id": "ec397ce9b28829f9fe8e51b6f4f8fbd8964d1bb6",
"size": "34945",
"binary": false,
"copies": "1",
"ref": "refs/heads/g2p-2.5",
"path": "tests/unit/test_repo_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7380"
},
{
"name": "Jupyter Notebook",
"bytes": "41926"
},
{
"name": "Python",
"bytes": "1238265"
},
{
"name": "Shell",
"bytes": "973"
}
],
"symlink_target": ""
} |
"""
This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
import optparse
import sys
import landmine_utils
builder = landmine_utils.platform
distributor = landmine_utils.distributor
gyp_defines = landmine_utils.gyp_defines
gyp_msvs_version = landmine_utils.gyp_msvs_version
platform = landmine_utils.platform
def print_landmines(target):
"""
ALL LANDMINES ARE EMITTED FROM HERE.
target can be one of {'Release', 'Debug', 'Debug_x64', 'Release_x64'}.
"""
if (distributor() == 'goma' and platform() == 'win32' and
builder() == 'ninja'):
print 'Need to clobber winja goma due to backend cwd cache fix.'
if platform() == 'android':
print 'Clobber: Resources removed in r195014 require clobber.'
if platform() == 'win' and builder() == 'ninja':
print 'Compile on cc_unittests fails due to symbols removed in r185063.'
if platform() == 'linux' and builder() == 'ninja':
print 'Builders switching from make to ninja will clobber on this.'
if platform() == 'mac':
print 'Switching from bundle to unbundled dylib (issue 14743002).'
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version() == '2012' and
gyp_defines().get('target_arch') == 'x64' and
gyp_defines().get('dcheck_always_on') == '1'):
print "Switched win x64 trybots from VS2010 to VS2012."
print 'Need to clobber everything due to an IDL change in r154579 (blink)'
def main():
parser = optparse.OptionParser()
parser.add_option('-t', '--target',
help=='Target for which the landmines have to be emitted')
options, args = parser.parse_args()
if args:
parser.error('Unknown arguments %s' % args)
print_landmines(options.target)
return 0
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "3b11bc45cf5bc77f10400568db4f7157",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 80,
"avg_line_length": 31.5,
"alnum_prop": 0.6694033935413246,
"repo_name": "cvsuser-chromium/chromium",
"id": "05c9de6962846a3f2280b5a3f5f6b68d2b91b5d2",
"size": "2012",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/get_landmines.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Assembly",
"bytes": "36421"
},
{
"name": "C",
"bytes": "6924841"
},
{
"name": "C++",
"bytes": "179649999"
},
{
"name": "CSS",
"bytes": "812951"
},
{
"name": "Java",
"bytes": "3768838"
},
{
"name": "JavaScript",
"bytes": "8338074"
},
{
"name": "Makefile",
"bytes": "52980"
},
{
"name": "Objective-C",
"bytes": "819293"
},
{
"name": "Objective-C++",
"bytes": "6453781"
},
{
"name": "PHP",
"bytes": "61320"
},
{
"name": "Perl",
"bytes": "17897"
},
{
"name": "Python",
"bytes": "5640877"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "648699"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "15926"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from jinja2 import Markup
from changes.buildfailures.base import BuildFailure
class MissingArtifact(BuildFailure):
def get_html_label(self, build):
# TODO(dcramer): we need arbitrary data with build failures so this can
# say *what* artifact
return Markup('A critical artifact was expected, but was not collected.')
| {
"content_hash": "3568a87df71d4b141fa20402bef1817f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 81,
"avg_line_length": 32.25,
"alnum_prop": 0.7312661498708011,
"repo_name": "bowlofstew/changes",
"id": "cacd2daf2e2a05e9350b4322dd397be2ca4179cc",
"size": "387",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "changes/buildfailures/missingartifact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "87142"
},
{
"name": "HTML",
"bytes": "137437"
},
{
"name": "JavaScript",
"bytes": "385108"
},
{
"name": "Makefile",
"bytes": "6212"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1546048"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
} |
import setuptools
# All these imported to be added to our distribution
import conjureup
find_420_friendly_packages = setuptools.PEP420PackageFinder.find
setuptools.setup(
name="conjure-up",
version=conjureup.__version__,
author="Adam Stokes",
author_email="adam.stokes@ubuntu.com",
description="conjure-up a power tool for installing big software",
url="https://github.com/conjure-up/conjure-up",
packages=find_420_friendly_packages(),
entry_points={
"console_scripts": [
"conjure-up = conjureup.app:main",
"conjure-down = conjureup.destroy:main"
]
}
)
| {
"content_hash": "3753b35af533ab689321f8d9ec778b95",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 70,
"avg_line_length": 28.727272727272727,
"alnum_prop": 0.6756329113924051,
"repo_name": "ubuntu/conjure-up",
"id": "1c6bb9471b8113d8f12ecf536327572d650276a5",
"size": "632",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2552"
},
{
"name": "Python",
"bytes": "470520"
},
{
"name": "Shell",
"bytes": "4588"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from sita.appointments.models import Appointment
from calendar import monthrange
def construct_query_view_month(month, year):
calendar = monthrange(year, month)
first_day_month = datetime(year=year, month=month, day=1)
last_day_month = datetime(year=year, month=month, day=calendar[1]) + timedelta(hours=23, minutes=59, seconds=59)
query = Appointment.objects.extra(where=["date_appointment >= '{0}' and date_appointment <= '{1}'".format(first_day_month, last_day_month)]).order_by("date_appointment")
return query
| {
"content_hash": "a7c840602c9d455aab326b3d3d128c01",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 173,
"avg_line_length": 57.7,
"alnum_prop": 0.7400346620450606,
"repo_name": "Fabfm4/Sita-BackEnd",
"id": "dd1972b14369d5704592537caa87b8860ee5581a",
"size": "577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sita/utils/appointmentQuery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "239234"
},
{
"name": "Shell",
"bytes": "5693"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib import admin
from jmbo.admin import ModelBaseAdmin
from poll.models import Poll, PollOption
readonly_attrs = {
'readonly': 'readonly',
'style': 'border-width: 0px;',
}
class PollOptionForm(forms.ModelForm):
votes = forms.IntegerField(
required=False,
help_text='Number of votes cast for this option.',
widget=forms.TextInput(attrs=readonly_attrs),
initial='',
)
percentage = forms.FloatField(
required=False,
help_text='Percentage of votes cast for this option in '
'relation to all of the other options.',
widget=forms.TextInput(attrs=readonly_attrs),
)
class Meta:
model = PollOption
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance')
self.base_fields['votes'].initial = instance.vote_count if \
instance else ''
self.base_fields['percentage'].initial = instance.percentage if \
instance else ''
super(PollOptionForm, self).__init__(*args, **kwargs)
class PollOptionInline(admin.StackedInline):
model = PollOption
form = PollOptionForm
class PollAdmin(ModelBaseAdmin):
inlines = [PollOptionInline]
admin.site.register(Poll, PollAdmin)
| {
"content_hash": "b106b9d497421bc9152b66dccfd27da6",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 73,
"avg_line_length": 28.130434782608695,
"alnum_prop": 0.6530139103554868,
"repo_name": "praekelt/jmbo-poll",
"id": "530568059084e3d229117a990775476725aa7f68",
"size": "1294",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "poll/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "5632"
},
{
"name": "Python",
"bytes": "39762"
}
],
"symlink_target": ""
} |
"""create laptime view
Revision ID: 4f8fc8f7c68d
Revises: 4d73c542db51
Create Date: 2014-05-01 21:45:38.669382
"""
# revision identifiers, used by Alembic.
revision = '4f8fc8f7c68d'
down_revision = '4d73c542db51'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import literal_column, cast, select
from sqlalchemy.schema import DDLElement
from sqlalchemy.sql import table
from sqlalchemy.orm import Query
from n64_storage import models
from n64_storage.models import db
event = models.Event.metadata.tables['event']
class CreateView(DDLElement):
def __init__(self, name, selectable):
self.name = name
self.selectable = selectable
class DropView(DDLElement):
def __init__(self, name):
self.name = name
@compiles(CreateView)
def compile_create_view(element, compiler, **kw):
import ipdb
ipdb.set_trace()
return "CREATE VIEW %s AS %s" % (element.name, compiler.sql_compiler.process(element.selectable))
@compiles(DropView)
def compile_drop_view(element, compiler, **kw):
return "DROP VIEW IF EXISTS %s" % (element.name)
def View(name, selectable):
"""
`View` support for SQLAlchemy
See: http://www.sqlalchemy.org/trac/wiki/UsageRecipes/Views
"""
t = table(name)
if isinstance(selectable, Query):
selectable = selectable.subquery()
for c in selectable.c:
c._make_proxy(t)
CreateView(name, selectable).execute_at('after-create', db.metadata)
DropView(name).execute_at('before-drop', db.metadata)
return t
def upgrade():
e = event.alias()
e1 = event.alias()
create_laptime_view = CreateView("laptime",
select([e.c.id.label('id'),
e.c.race_id.label('race_id'),
e.c.player.label('player'),
e.c.timestamp.label('timestamp'),
(e.c.lap - 1).label('lap'),
e.c.place.label('place'),
e.c.event_number.label('event_number'),
e.c.event_type.label('event_type'),
literal_column("Time").label('event_subtype'),
cast(e.c.timestamp - e1.c.timestamp, db.String).label('event_info'),
e.c.linked_event_id.label('linked_event_id'),
e.c.image_url.label('image_url')]).\
select_from(e.join(e1, e1.c.race_id == e.c.race_id)).\
where((e.c.event_type == "Lap")
& (e.c.event_subtype == "New")
& (e1.c.event_type == "Lap")
& (e1.c.event_subtype == "New")
& (e1.c.player == e.c.player)
& (cast(e.c.event_info, db.Integer) == (cast(e1.c.event_info, db.Integer) + 1))))
op.execute(create_laptime_view)
def downgrade():
drop_laptime_view = DropView('laptime')
op.execute(drop_laptime_view)
| {
"content_hash": "3c3fff327260743f08a51a14d5f26759",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 101,
"avg_line_length": 28.294117647058822,
"alnum_prop": 0.613998613998614,
"repo_name": "kartyboyz/n64-storage-flask",
"id": "1ad19c4fec54ab35766cf7ca63bd8ccda11fc1f0",
"size": "2886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/4f8fc8f7c68d_create_laptime_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64381"
}
],
"symlink_target": ""
} |
import socket
import datetime
import time
from aeon.exceptions import ProbeError
class BaseDevice(object):
DEFAULT_PROBE_TIMEOUT = 10
def __init__(self, target, connector, **kwargs):
"""
:param target: hostname or ipaddr of target device
:param kwargs:
'user' : login user-name, defaults to "admin"
'passwd': login password, defaults to "admin
"""
self.target = target
self.port = kwargs.get('port')
self.user = kwargs.get('user', 'admin')
self.passwd = kwargs.get('passwd', 'admin')
self.timeout = kwargs.get('timeout', self.DEFAULT_PROBE_TIMEOUT)
self.facts = {}
self.api = connector(hostname=target, **kwargs)
if 'no_probe' not in kwargs:
self.probe()
if 'no_gather_facts' not in kwargs:
self.gather_facts()
def gather_facts(self):
"""
Will be overridden by subclass
:return: None
"""
pass
def probe(self):
interval = 1
start = datetime.datetime.now()
end = start + datetime.timedelta(seconds=self.timeout)
port = self.port or socket.getservbyname(self.api.proto)
while datetime.datetime.now() < end:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(interval)
try:
s.connect((self.target, int(port)))
s.shutdown(socket.SHUT_RDWR)
s.close()
elapsed = datetime.datetime.now() - start
return True, elapsed
except: # NOQA
time.sleep(interval)
pass
# Raise ProbeError if unable to reach in time allotted
raise ProbeError('Unable to reach device within %s seconds' % self.timeout)
def __repr__(self):
return 'Device(%r)' % self.target
def __str__(self):
return '{vendor} {os} at {target}'.format(vendor=self.facts['vendor'],
os=self.facts['os'],
target=self.target)
| {
"content_hash": "fb1e5fd321d1e6d080e8a37e8ab11528",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 83,
"avg_line_length": 31.970149253731343,
"alnum_prop": 0.5410830999066293,
"repo_name": "Apstra/aeon-venos",
"id": "1a7a083f4f05afc170ccdd97077322306265aedc",
"size": "2337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylib/aeon/base/device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "130"
},
{
"name": "Python",
"bytes": "91545"
}
],
"symlink_target": ""
} |
import logging
import typing
import discord
from asyncpg.exceptions import PostgresError
from discord.ext import commands
from discord.ext.commands import BucketType, has_permissions
from bot.bot import command, group, cooldown
from bot.converters import CommandConverter
from bot.formatter import Paginator
from bot.globals import BlacklistTypes, PermValues
from cogs.cog import Cog
from utils.utilities import (split_string, send_paged_message)
logger = logging.getLogger('terminal')
perms = discord.Permissions(8)
class CommandBlacklist(Cog):
def __init__(self, bot):
super().__init__(bot)
@group(no_pm=True, invoke_without_command=True)
@has_permissions(administrator=True)
@cooldown(1, 5, type=BucketType.guild)
async def blacklist(self, ctx, commands_: commands.Greedy[CommandConverter] = None,
*, mention: typing.Union[discord.TextChannel, discord.Role, discord.User] = None):
"""Blacklist a command for a user, role or channel
To blacklist multiple commands at the same time wrap the command names in quotes
like this {prefix}{name} \"command1 command2 command3\" #channel
The hierarchy of `blacklist` and `whitelist` is as follows
Whitelist always overrides blacklist of the same level
Then levels of scope it can have are as follows
`User` > `Role` > `Channel` > `Server` where each level overrides every scope perm after it
e.g. Blacklisting command ping for role Member and whitelisting it for role Mod
would make it so people with Member role wouldn't be able to use it unless they had Mod role
Also if you further whitelisted ping from a single member
that user would be able to use the command always
since user whitelist overrides every other scope
To blacklist a command server wide specify the commands and don't specify the mention param
like this `{prefix}blacklist "cmd1 cmd2 etc"` which would blacklist those commands
for everyone in the server unless they have it whitelisted
Whitelisting server wide isn't possible
For dangers of whitelisting see `{prefix}help whitelist`"""
guild = ctx.guild
if not commands_ and mention is None:
return await ctx.send('No parameters given')
async def _blacklist(name):
if mention is None:
whereclause = "guild=$1 AND type IN (%s, %s) AND command=$2 AND channel IS NULL AND role IS NULL AND uid IS NULL" % (
BlacklistTypes.BLACKLIST, BlacklistTypes.WHITELIST)
success = await self._set_blacklist(ctx, whereclause, guild=guild.id, command=name)
if success:
return 'Blacklisted command {} from this server'.format(name)
elif success is None:
return 'Removed blacklist for command {} on this server'.format(name)
elif isinstance(mention, discord.User):
return await self._add_user_blacklist(ctx, name, mention, guild)
elif isinstance(mention, discord.Role):
return await self._add_role_blacklist(ctx, name, mention, guild)
elif isinstance(mention, discord.TextChannel):
return await self._add_channel_blacklist(ctx, name, mention, guild)
s = ''
if commands_ is None:
val = await self._set_all_commands(ctx, mention)
if isinstance(val, str):
s += val
else:
for command_ in commands_:
if command_.name == 'privacy':
await ctx.send("Cannot blacklist privacy command as it's required that anyone can see it")
continue
val = await _blacklist(command_.name)
if isinstance(val, str):
s += val + '\n'
if not s:
return
for msg in split_string(s, splitter='\n'):
await ctx.send(msg)
@blacklist.command(no_pm=True)
@has_permissions(administrator=True)
async def toggle(self, ctx):
"""
Disable all commands on this server (owner will still be able to use them)
Whitelisting commands also overrides this rule
Won't override existing commands that have been blacklisted so when you toggle
again the commands that have been specifically blacklisted are still blacklisted
"""
guild = ctx.guild
values = {'command': None, 'guild': guild.id, 'type': BlacklistTypes.BLACKLIST}
where = 'guild=%s AND command IS NULL AND NOT type=%s AND uid IS NULL AND role IS NULL AND channel IS NULL' % (guild.id, BlacklistTypes.GLOBAL)
success = await self._set_blacklist(ctx, where, (), **values)
if success:
msg = 'All commands disabled on this server for non whitelisted users'
elif success is None:
msg = 'Commands are usable on this server again'
else:
return
await ctx.send(msg)
async def _set_all_commands(self, ctx, scope, type=BlacklistTypes.BLACKLIST):
guild = ctx.guild
values = {'command': None, 'guild': guild.id, 'type': type}
where = 'guild=%s AND command IS NULL AND NOT type=%s AND ' % (guild.id, BlacklistTypes.GLOBAL)
type_string = 'Blacklisted' if type == BlacklistTypes.BLACKLIST else 'Whitelisted'
type_string2 = 'blacklist' if type == BlacklistTypes.BLACKLIST else 'whitelist'
message = None
if isinstance(scope, discord.User):
userid = scope.id
success = await self._set_blacklist(ctx, where + 'uid=%s' % userid, (), uid=userid, **values)
if success:
message = f'{type_string} all commands for user {scope} `{userid}`'
elif success is None:
message = f'removed {type_string2} from user {scope}, `{userid}`'
elif isinstance(scope, discord.Role):
success = await self._set_blacklist(ctx, where + 'role=%s' % scope.id, (), role=scope.id, **values)
if success:
message = '{0} all commands from role {1} `{1.id}`'.format(type_string, scope)
elif success is None:
message = 'Removed {0} from role {1} `{1.id}`'.format(type_string2, scope)
elif isinstance(scope, discord.TextChannel):
success = await self._set_blacklist(ctx, where + 'channel=%s' % scope.id, (),
channel=scope.id, **values)
if success:
message = '{0} all commands from channel {1} `{1.id}`'.format(type_string, scope)
elif success is None:
message = 'Removed {0} from channel {1} `{1.id}`'.format(type_string2, scope)
else:
return 'No valid mentions'
return message
@command(no_pm=True)
@has_permissions(administrator=True)
@cooldown(1, 5, type=BucketType.guild)
async def whitelist(self, ctx, commands_: commands.Greedy[CommandConverter], *, mention: typing.Union[discord.TextChannel, discord.Role, discord.User]):
"""Whitelist a command for a user, role or channel
To whitelist multiple commands at the same time wrap the command names in quotes
like this {prefix}{name} \"command1 command2 command3\" #channel
To see specifics on the hierarchy of whitelist/blacklist see `{prefix}help blacklist`
**WHITELISTING COULD BE DANGEROUS IF YOU DON'T KNOW WHAT YOU ARE DOING!**
Before whitelisting read the following
Whitelisting WILL OVERRIDE ANY REQUIRED PERMS for the command being called
If a command requires ban perms and you whitelist it for a role
everyone with that role can use that command even when they don't have ban perms
Due to safety reasons whitelisting commands from this module is not allowed.
Give the users correct discord perms instead
"""
msg = ctx.message
guild = msg.guild
async def _whitelist(_command):
name = _command.name
if _command.cog_name == self.__class__.__name__:
return f"Due to safety reasons commands from {_command.cog_name} module can't be whitelisted"
elif isinstance(mention, discord.User):
return await self._add_user_whitelist(ctx, name, mention, guild)
elif isinstance(mention, discord.Role):
return await self._add_role_whitelist(ctx, name, mention, guild)
elif isinstance(mention, discord.TextChannel):
return await self._add_channel_whitelist(ctx, name, mention, guild)
s = ''
for command_ in commands_:
val = await _whitelist(command_)
if isinstance(val, str):
s += val + '\n'
if not s:
return
for msg in split_string(s, splitter='\n'):
await ctx.send(msg)
async def _set_blacklist(self, ctx, whereclause, whereargs=None, type_=BlacklistTypes.BLACKLIST, **values):
"""
Args:
ctx: Context object that error messages can be sent to
whereclause: The whereclause to search records
whereargs: Args that are used in the whereclause. If not defined
values.values() will be used
type_:
**values: Keyword values that are used to set database columns
Uses the format column name=column value
Returns:
True when new permissions are set
None when permission is removed. e.g. blacklist to no blacklist
False when operation failed
"""
type_string = 'blacklist' if type_ == BlacklistTypes.BLACKLIST else 'whitelist'
sql = 'SELECT id, type FROM command_blacklist WHERE %s' % whereclause
whereargs = whereargs if whereargs is not None else values.values()
try:
row = await self.bot.dbutil.fetch(sql, whereargs, fetchmany=False)
except PostgresError:
logger.exception('Failed to remove blacklist')
await ctx.send('Failed to remove %s' % type_string)
return False
if row:
if row['type'] == type_:
sql = 'DELETE FROM command_blacklist WHERE %s' % whereclause
try:
await self.bot.dbutil.execute(sql, whereargs)
except PostgresError:
logger.exception(f'Could not update {type_string} with whereclause {whereclause}')
await ctx.send(f'Failed to remove {type_string}')
return False
else:
return
else:
sql = 'UPDATE command_blacklist SET type=$1 WHERE id=$2'
try:
await self.bot.dbutil.execute(sql, (type_, row['id']))
except PostgresError:
logger.exception(f'Could not update {type_string} with whereclause {whereclause}')
await ctx.send(f'Failed to remove {type_string}')
return False
else:
return True
else:
# Dynamically create a insert that looks like this
# INSERT INTO command_blacklist (v1, v2, v3) VALUES ($1, $2, $3)
sql = 'INSERT INTO command_blacklist ('
values['type'] = type_
keys = values.keys()
val = '('
l = len(keys)
for idx, k in enumerate(keys):
sql += '"%s"' % k
val += '$%s' % (idx + 1)
if idx != l - 1:
sql += ', '
val += ', '
sql += ') VALUES ' + val + ')'
try:
await self.bot.dbutil.execute(sql, values.values())
except PostgresError:
logger.exception(f'Could not set values {values}')
await ctx.send(f'Failed to set {type_string}')
return False
return True
async def _add_user_blacklist(self, ctx, command_name, user, guild):
whereclause = 'guild=$1 AND command=$2 AND uid=$3 AND NOT type=$4'
success = await self._set_blacklist(ctx, whereclause, guild=guild.id,
command=command_name,
uid=user.id,
type=BlacklistTypes.GLOBAL)
if success:
return 'Blacklisted command {0} from user {1} `{1.id}`'.format(command_name, user)
elif success is None:
return 'Removed command {0} blacklist from user {1} `{1.id}`'.format(command_name, user)
async def _add_role_blacklist(self, ctx, command_name, role, guild):
whereclause = 'guild=$1 AND command=$2 AND role=$3 AND NOT type=$4'
success = await self._set_blacklist(ctx, whereclause, guild=guild.id,
command=command_name,
role=role.id,
type=BlacklistTypes.GLOBAL)
if success:
return 'Blacklisted command {0} from role {1} `{1.id}`'.format(command_name, role)
elif success is None:
return 'Removed command {0} blacklist from role {1} `{1.id}`'.format(command_name, role)
async def _add_channel_blacklist(self, ctx, command_name, channel, guild):
whereclause = 'guild=$1 AND command=$2 AND channel=$3 AND NOT type=$4'
success = await self._set_blacklist(ctx, whereclause, guild=guild.id,
command=command_name,
channel=channel.id,
type=BlacklistTypes.GLOBAL)
if success:
return 'Blacklisted command {0} from channel {1} `{1.id}`'.format(command_name, channel)
elif success is None:
return 'Removed command {0} blacklist from channel {1} `{1.id}`'.format(command_name, channel)
async def _add_user_whitelist(self, ctx, command_name, user, guild):
whereclause = 'guild=$1 AND command=$2 AND uid=$3 AND NOT type=$4'
success = await self._set_blacklist(ctx, whereclause,
type_=BlacklistTypes.WHITELIST,
guild=guild.id,
command=command_name,
uid=user.id,
type=BlacklistTypes.GLOBAL)
if success:
return 'Whitelisted command {0} from user {1} `{1.id}`'.format(command_name, user)
elif success is None:
return 'Removed command {0} whitelist from user {1} `{1.id}`'.format(command_name, user)
async def _add_role_whitelist(self, ctx, command_name, role, guild):
whereclause = 'guild=$1 AND command=$2 AND role=$3 AND NOT type=$4'
success = await self._set_blacklist(ctx, whereclause,
type_=BlacklistTypes.WHITELIST,
guild=guild.id,
command=command_name,
role=role.id,
type=BlacklistTypes.GLOBAL)
if success:
return 'Whitelisted command {0} from role {1} `{1.id}`'.format(command_name, role)
elif success is None:
return 'Removed command {0} whitelist from role {1} `{1.id}`'.format(command_name, role)
async def _add_channel_whitelist(self, ctx, command_name, channel, guild):
whereclause = 'guild=$1 AND command=$2 AND channel=$3 AND NOT type=$4'
success = await self._set_blacklist(ctx, whereclause,
type_=BlacklistTypes.WHITELIST,
guild=guild.id,
command=command_name,
channel=channel.id,
type=BlacklistTypes.GLOBAL)
if success:
return 'Whitelisted command {0} from channel {1} `{1.id}`'.format(command_name, channel)
elif success is None:
return 'Removed command {0} whitelist from channel {1} `{1.id}`'.format(command_name, channel)
@command(owner_only=True)
async def test_perms(self, ctx, user: discord.Member, command_):
value = await self.bot.dbutil.check_blacklist(f"(command='{command_}' OR command IS NULL)", user, ctx, True)
await ctx.send(value or 'No special perms')
async def get_rows(self, whereclause, select='*'):
sql = 'SELECT %s FROM command_blacklist WHERE %s' % (select, whereclause)
rows = await self.bot.dbutil.fetch(sql)
return rows
@staticmethod
def get_applying_perm(command_rows, return_type=False):
smallest = 18
smallest_row = None
perm_type = 0x10 # guild
for row in command_rows:
if row['type'] == BlacklistTypes.GLOBAL:
return False
if row['type'] == BlacklistTypes.WHITELIST:
v1 = PermValues.VALUES['whitelist']
else:
v1 = PermValues.VALUES['blacklist']
if row['uid'] is not None:
v2 = PermValues.VALUES['user']
elif row['role'] is not None:
v2 = PermValues.VALUES['role']
else:
continue
v = v1 | v2
if v < smallest:
smallest = v
perm_type = v2
smallest_row = row
if return_type:
return smallest_row, perm_type
return smallest_row
@command(no_pm=True)
@cooldown(1, 30, BucketType.user)
async def role_perms(self, ctx, *, role: discord.Role=None):
"""Show white- and blacklist for all or specified role"""
guild = ctx.guild
if role:
where = 'guild={} AND uid IS NULL AND channel IS NULL AND role={}'.format(guild.id, role.id)
else:
where = 'guild={} AND uid IS NULL AND channel IS NULL AND NOT role IS NULL ORDER BY role, type'.format(guild.id)
rows = await self.get_rows(where)
if not rows:
return await ctx.send('No perms found')
paginator = Paginator('Role perms')
last = None
last_type = None
def get_command(row):
return 'All commands' if row['command'] is None else row['command']
for row in rows:
if row['role'] != last:
last = row['role']
role = guild.get_role(row['role'])
if role is None:
logger.warning('Role {} has been deleted and it has perms'.format(row['role']))
last = None
continue
last_type = row['type']
perm_type = 'Whitelisted:\n' if last_type == BlacklistTypes.WHITELIST else 'Blacklisted:\n'
paginator.add_field('{0.name} {0.id}'.format(role), perm_type + get_command(row) + '\n')
else:
s = ''
if row['type'] != last_type:
last_type = row['type']
s = '\nWhitelisted:\n' if last_type == BlacklistTypes.WHITELIST else '\nBlacklisted:\n'
s += get_command(row) + '\n'
paginator.add_to_field(s)
paginator.finalize()
pages = paginator.pages
for idx, page in enumerate(pages):
page.set_footer(text='Page {}/{}'.format(idx + 1, len(pages)))
await send_paged_message(ctx, pages, embed=True)
@command(no_pm=True, aliases=['sp'])
@cooldown(1, 10, BucketType.guild)
async def show_perms(self, ctx, *, type_: typing.Union[discord.Role, discord.User, discord.TextChannel]=None):
"""
Shows all server perms in one paged embed.
If type is a role, user or a text channel will only show permissions for that.
"""
sql = f'SELECT command, type, uid, role, channel FROM command_blacklist WHERE guild={ctx.guild.id}'
# Add extra filters to sql
if isinstance(type_, discord.Role):
sql += f" AND role={type_.id}"
elif isinstance(type_, discord.User):
sql += f" AND uid={type_.id}"
elif isinstance(type_, discord.TextChannel):
sql += f" AND channel={type_.id}"
sql += " ORDER BY uid, role, channel"
rows = await self.bot.dbutil.fetch(sql)
if not rows:
await ctx.send(f'No perms found for {type_ or "guild"}')
return
perms = {'guild': [], 'channel': [], 'role': [], 'user': []}
for row in rows:
if row['uid']:
perms['user'].append(row)
elif row['channel']:
perms['channel'].append(row)
elif row['role']:
perms['role'].append(row)
else:
perms['guild'].append(row)
ITEMS_PER_PAGE = 10
# Flatten dict to key value pairs
newperms = []
for k in perms:
newperms.extend([(perm, k) for perm in sorted(perms[k], key=lambda r: r['type'])])
paginator = Paginator(title=f"Permissions for guild {ctx.guild.name}", init_page=False)
for i in range(0, len(newperms), ITEMS_PER_PAGE):
s = ''
for row, type_ in newperms[i:i+ITEMS_PER_PAGE]:
t, e = ('whitelisted', '✅') if row['type'] == BlacklistTypes.WHITELIST else ('disabled', '❌')
cmd = f'Command `{row["command"]}`' if row["command"] else 'All commands'
if type_ == 'guild':
s += f'🖥{e} {cmd} {t} for this guild\n'
elif type_ == 'channel':
s += f'📝{e} {cmd} {t} in channel <#{row["channel"]}>\n'
elif type_ == 'role':
role = '<@&{0}> {0}'.format(row['role'])
s += f'⚙{e} {cmd} {t} for role {role}\n'
elif type_ == 'user':
user = self.bot.get_user(row['uid']) or ''
s += f'👤{e} {cmd} {t} for user <@{row["uid"]}> {user}\n'
paginator.add_page(description=s)
paginator.finalize()
await send_paged_message(ctx, paginator.pages, embed=True)
@command(name='commands', no_pm=True)
@cooldown(1, 30, type=BucketType.user)
async def commands_(self, ctx, user: discord.Member=None):
"""Get your or the specified users white- and blacklisted commands on this server"""
guild = ctx.guild
if not user:
user = ctx.author
if user.roles:
roles = '(role IS NULL OR role IN ({}))'.format(', '.join(map(lambda r: str(r.id), user.roles)))
else:
roles = 'role IS NULL'
where = f'guild={guild.id} AND (uid={user.id} or uid IS NULL) AND channel IS NULL AND {roles}'
rows = await self.get_rows(where)
commands = {}
for row in rows:
name = row['command']
if name in commands:
commands[name].append(row)
else:
commands[name] = [row]
whitelist = []
blacklist = []
global_blacklist = []
for name, rows in commands.items():
row = self.get_applying_perm(rows)
name = f'`{name}`'
if row is False:
global_blacklist.append(name)
continue
# Don't want channel or server specific blacklists
if row is None:
continue
if row['type'] == BlacklistTypes.WHITELIST:
whitelist.append(name)
elif row['type'] == BlacklistTypes.BLACKLIST:
blacklist.append(name)
s = ''
if whitelist:
s += f'{user}s whitelisted commands\n' + '\n'.join(whitelist) + '\n\n'
if blacklist:
s += f'Commands blacklisted fom {user}\n' + '\n'.join(blacklist) + '\n\n'
if global_blacklist:
s += f'Commands globally blacklisted for {user}\n' + '\n'.join(global_blacklist) + '\n\n'
if not s:
s = '{0} has no special perms set up on the server {1}'.format(user, guild.name)
else:
s += '{}s perms on server {}\nChannel specific perms are not checked'.format(user, guild.name)
s = split_string(s, maxlen=2000, splitter='\n')
for ss in s:
await ctx.author.send(ss)
def setup(bot):
bot.add_cog(CommandBlacklist(bot))
| {
"content_hash": "0dc6e8ef04735df5f57cf50521370ef2",
"timestamp": "",
"source": "github",
"line_count": 577,
"max_line_length": 156,
"avg_line_length": 43.114384748700175,
"alnum_prop": 0.5559352011898541,
"repo_name": "s0hvaperuna/Not-a-bot",
"id": "a70e28c64302610cace00ce56e7650854abe9aef",
"size": "24892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cogs/command_blacklist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13"
},
{
"name": "Python",
"bytes": "682402"
}
],
"symlink_target": ""
} |
"""
This module implements unit tests for the item-based checks of the core layer. For now, only
checks done by L{SanityCheckTreeWalker<datafinder.core.item.visitor.checks.SanityCheckTreeWalker>}
are tested.
"""
import unittest
from datafinder.core.item.collection import ItemRoot, ItemCollection
from datafinder.core.item.data_persister import constants
from datafinder.core.item.leaf import ItemLeaf
from datafinder.core.item.link import ItemLink
from datafinder.core.item.privileges.privilege import ALL_PRIVILEGE
from datafinder.core.item.visitor.checks import ActionCheckVisitor, ActionCheckTreeWalker
from datafinder_test.mocks import SimpleMock
__version__ = "$Revision-Id:$"
class ActionCheckTestCase(unittest.TestCase):
"""
Abstract base test class for both checkers of L{datafinder.core.item.visitor.checks}.
"""
__checker__ = object
def __init__(self, name):
"""
Constructor.
"""
unittest.TestCase.__init__(self, name)
def setUp(self):
"""
Set up a minimal item tree which has a root->collection[->leaf, ->link] structure.
It uses the configuration attribute "__checker__" as the checker in charge.
"""
# A sanity checker (tree walker)
self.checker = self.__checker__(False, True, True)
# A root for testing
self.testRoot = ItemRoot("root")
self.testRoot._privileges = [ALL_PRIVILEGE]
self.testRoot._fileStorer = SimpleMock(list(), canAddChildren=True)
self.testRoot._dataPersister = SimpleMock(self.testRoot.fileStorer, state=constants.ITEM_STATE_ACCESSIBLE)
self.testRoot.itemFactory = SimpleMock(self.testRoot.fileStorer)
self.testRoot.path = "/"
# A collection for testing
self.testNode = ItemCollection("collection")
self.testNode._privileges = [ALL_PRIVILEGE]
self.testNode._fileStorer = SimpleMock(list(), state=constants.ITEM_STATE_ARCHIVED)
self.testNode.itemFactory = SimpleMock(self.testNode.fileStorer)
self.testNode.parent = self.testRoot
# A leaf for testing
self.testLeaf = ItemLeaf("leaf")
self.testLeaf._privileges = [ALL_PRIVILEGE]
self.testLeaf._fileStorer = SimpleMock(list(), state=constants.ITEM_STATE_ACCESSIBLE)
self.testLeaf.itemFactory = SimpleMock(self.testLeaf.fileStorer)
self.testLeaf.parent = self.testNode
# A link for testing
self.testLink = ItemLink("link")
self.testLink._privileges = [ALL_PRIVILEGE]
self.testLink._fileStorer = SimpleMock(list())
self.testLink.itemFactory = SimpleMock(self.testLink.fileStorer)
self.testLink.parent = self.testNode
self.testLink._linkTarget = self.testRoot
class ActionCheckVisitorTestCase(ActionCheckTestCase):
"""
Test case for L{ActionCheckVisitor<datafinder.core.item.visitor.checks.ActionCheckVisitor>}.
"""
__checker__ = ActionCheckVisitor
def testAllFine(self):
"""
Simply compares if the resulting constraints match the expectations.
"""
# Root
self.checker.check(self.testRoot)
self.assertTrue(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_ADD_CHILDREN])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_DELETE])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_COPY])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_MOVE])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_STORE])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_RETRIEVE])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_ARCHIVE])
self.assertEquals(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_SEARCH], self.checker._hasSearchSupport)
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_RETRIEVE_PROPERTIES])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_STORE_PROPERTIES])
# Collection is archived
self.checker.check(self.testNode)
self.assertTrue(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_ADD_CHILDREN]) # it is an archive
self.assertTrue(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_DELETE])
self.assertTrue(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_COPY])
self.assertTrue(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_MOVE])
self.assertTrue(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_STORE])
self.assertTrue(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_RETRIEVE])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_ARCHIVE])
self.assertEquals(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_SEARCH], self.checker._hasSearchSupport)
self.assertEquals(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_RETRIEVE_PROPERTIES],
self.checker._hasCustomMetadataSupport)
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_STORE_PROPERTIES]) # it is an archive
def testCanCreate(self):
"""
Tests the C{canAddChildren} check.
"""
self.assertTrue(self.checker.canAddChildren(self.testRoot))
self.assertTrue(self.checker.canAddChildren(self.testNode)) # it is an archive
self.assertTrue(self.checker.canAddChildren(self.testLeaf))
def testCanDelete(self):
""" Tests the C{canDelete} check. """
self.assertFalse(self.checker.canDelete(self.testRoot))
self.assertTrue(self.checker.canDelete(self.testNode))
self.assertTrue(self.checker.canDelete(self.testLeaf))
def testCanCopy(self):
"""
Tests the C{canCopy} check.
"""
self.assertFalse(self.checker.canCopy(self.testRoot))
self.assertTrue(self.checker.canCopy(self.testNode))
self.assertTrue(self.checker.canCopy(self.testLeaf))
def testCanMove(self):
"""
Tests the C{canMove} check.
"""
self.assertFalse(self.checker.canMove(self.testRoot))
self.assertTrue(self.checker.canMove(self.testNode))
self.assertTrue(self.checker.canMove(self.testLeaf))
def testCanStoreData(self):
"""
Tests the C{canStoreData} check.
"""
self.assertFalse(self.checker.canStoreData(self.testRoot))
self.assertTrue(self.checker.canStoreData(self.testNode))
self.assertTrue(self.checker.canStoreData(self.testLeaf))
def testCanRetrieveData(self):
"""
Tests the C{canRetrieveData} check.
"""
self.assertFalse(self.checker.canRetrieveData(self.testRoot))
self.assertTrue(self.checker.canRetrieveData(self.testNode))
self.assertTrue(self.checker.canRetrieveData(self.testLeaf))
def testCanArchive(self):
"""
Tests the C{canArchive} check.
"""
self.assertFalse(self.checker.canArchive(self.testRoot))
self.assertFalse(self.checker.canArchive(self.testNode))
self.assertFalse(self.checker.canArchive(self.testLeaf)) # only collections can be archived
def testCanSearch(self):
"""
Tests the C{canSearch} check.
"""
self.assertEquals(self.checker.canSearch(self.testRoot), self.checker._hasSearchSupport)
self.assertEquals(self.checker.canSearch(self.testNode), self.checker._hasSearchSupport)
self.assertEquals(self.checker.canSearch(self.testLeaf), self.checker._hasSearchSupport)
def testCanRetrieveProperties(self):
"""
Tests the C{canRetrieveProperties} check.
"""
self.assertFalse(self.checker.canRetrieveProperties(self.testRoot))
self.assertEquals(self.checker.canRetrieveProperties(self.testNode), self.checker._hasCustomMetadataSupport)
self.assertEquals(self.checker.canRetrieveProperties(self.testLeaf), self.checker._hasCustomMetadataSupport)
def testCanStoreProperties(self):
"""
Tests the C{canStoreProperties} check.
"""
self.assertFalse(self.checker.canStoreProperties(self.testRoot))
self.assertEquals(self.checker.canStoreProperties(self.testNode), False) # it is an archive
self.assertEquals(self.checker.canStoreProperties(self.testLeaf), self.checker._hasCustomMetadataSupport)
class ActionCheckTreeWalkerTestCase(ActionCheckTestCase):
"""
Test case for L{ActionCheckTreeWalker<datafinder.core.item.visitor.checks.ActionCheckTreeWalker>}.
"""
__checker__ = ActionCheckTreeWalker
def testAllFine(self):
""" Simply compares if the resulting constraints match the expectations. """
# Only check root as it inherits attributes from children
self.checker.check(self.testRoot)
self.assertTrue(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_ADD_CHILDREN])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_DELETE])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_COPY])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_MOVE])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_STORE])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_RETRIEVE])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_ARCHIVE])
self.assertEquals(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_SEARCH], self.checker._hasSearchSupport)
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_RETRIEVE_PROPERTIES])
self.assertFalse(self.checker.capabilities[ActionCheckTreeWalker.CAPABILITY_STORE_PROPERTIES])
def testCanCreate(self):
""" Tests the C{canAddChildren} check. """
self.assertTrue(self.checker.canAddChildren(self.testRoot))
self.assertTrue(self.checker.canAddChildren(self.testNode)) # it is an archive
self.assertTrue(self.checker.canAddChildren(self.testLeaf))
def testCanDelete(self):
""" Tests the C{canDelete} check. """
self.assertFalse(self.checker.canDelete(self.testRoot))
self.assertTrue(self.checker.canDelete(self.testNode))
self.assertTrue(self.checker.canDelete(self.testLeaf))
def testCanCopy(self):
""" Tests the C{canCopy} check. """
self.assertFalse(self.checker.canCopy(self.testRoot))
self.assertTrue(self.checker.canCopy(self.testNode))
self.assertTrue(self.checker.canCopy(self.testLeaf))
def testCanMove(self):
""" Tests the C{canMove} check. """
self.assertFalse(self.checker.canMove(self.testRoot))
self.assertTrue(self.checker.canMove(self.testNode))
self.assertTrue(self.checker.canMove(self.testLeaf))
def testCanStoreData(self):
""" Tests the C{canStoreData} check. """
self.assertFalse(self.checker.canStoreData(self.testRoot))
self.assertTrue(self.checker.canStoreData(self.testNode))
self.assertTrue(self.checker.canStoreData(self.testLeaf))
def testCanRetrieveData(self):
""" Tests the C{canRetrieveData} check. """
self.assertFalse(self.checker.canRetrieveData(self.testRoot))
self.assertTrue(self.checker.canRetrieveData(self.testNode))
self.assertTrue(self.checker.canRetrieveData(self.testLeaf))
def testCanArchive(self):
""" Tests the C{canArchive} check. """
self.assertFalse(self.checker.canArchive(self.testRoot))
self.assertFalse(self.checker.canArchive(self.testNode))
self.assertFalse(self.checker.canArchive(self.testLeaf)) # only collections can be archived
def testCanSearch(self):
"""
Tests the C{canSearch} check.
"""
self.assertEquals(self.checker.canSearch(self.testRoot), self.checker._hasSearchSupport)
self.assertEquals(self.checker.canSearch(self.testNode), self.checker._hasSearchSupport)
self.assertEquals(self.checker.canSearch(self.testLeaf), self.checker._hasSearchSupport)
def testCanRetrieveProperties(self):
"""
Tests the C{canRetrieveProperties} check.
"""
self.assertFalse(self.checker.canRetrieveProperties(self.testRoot))
self.assertEquals(self.checker.canRetrieveProperties(self.testNode), self.checker._hasCustomMetadataSupport)
self.assertEquals(self.checker.canRetrieveProperties(self.testLeaf), self.checker._hasCustomMetadataSupport)
def testCanStoreProperties(self):
"""
Tests the C{canStoreProperties} check.
"""
self.assertFalse(self.checker.canStoreProperties(self.testRoot))
self.assertEquals(self.checker.canStoreProperties(self.testNode), self.checker._hasCustomMetadataSupport)
self.assertEquals(self.checker.canStoreProperties(self.testLeaf), self.checker._hasCustomMetadataSupport)
def testAffectedItems(self):
""" Checks the the C{affectedItems} attribute. """
self.checker.check(self.testRoot)
self.assertEquals(len(self.checker.affectedItems), 3)
self.checker.check(self.testNode)
self.assertEquals(len(self.checker.affectedItems), 2)
self.checker.check(self.testLeaf)
self.assertEquals(len(self.checker.affectedItems), 0)
self.checker.check(self.testLink)
self.assertEquals(len(self.checker.affectedItems), 0)
| {
"content_hash": "31c7ae27b2111a5c1486c3437b485ba3",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 125,
"avg_line_length": 46.19292604501608,
"alnum_prop": 0.6850897953501323,
"repo_name": "DLR-SC/DataFinder",
"id": "2e69743ac0846dba4056cf59b11e61bdd037fb7d",
"size": "16064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unittest/datafinder_test/core/item/visitor/checks_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "7649"
},
{
"name": "Python",
"bytes": "7056802"
},
{
"name": "QMake",
"bytes": "1975"
}
],
"symlink_target": ""
} |
"""
BunnyFunctions contains a set of high level functions to manipulate experiment objects.
"""
__license__ = "MIT"
from Experiment import *
from Participant import *
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import sys
import time
def Explore(Exp, lower=15, limit=35, filename=None):
"""
Plot an experiment's power as a function of the sample size.
Args:
Exp (Experiment): Experiment object to use (see Experiment class)
lower (int) : Smallest sample size to consider.
limit (int) : Highest sample size to consider.
filename (str) : Filename. If provided, the plot is saved instead of printed.
Returns:
None
>> Explore(MyExperiment)
>> Explore(MyExperiment,10,50,"Results.png")
"""
if not Exp.Validate():
print "Error: Experiment failed validation."
return None
print "Exploring sample sizes ... "
res = ExploreSampleSize(Exp, lower, limit)
PlotPowerSamples(res, filename)
def Hop(Exp, limit=100, power=None, samples=10000, Verbose=True):
"""
Determine an experiment's sample size through binary + local search.
An experiment's power is often only approximately monotonic as a function of the sample size (especially with NHST).
This function does binary search to find an approximate sample size, and then exhaustively checks
the nearby region ().
Args:
Exp (Experiment): Experiment object to use (see Experiment class)
limit (int) : Highest sample size to consider.
power (float) : Experiment's power. If none is provided then the experiment object's power is used. If neither are determined, power is set to 0.95
samples (int) : Number of simulations for each sample size proposal.
Verbose (bool) : Run function silently or not.
Returns:
[Sample size, Power] (list) : First item shows smallest sample size and second item shows the corresponding power.
>> Hop(MyExperiment)
>> Hop(MyExperiment, limit=30, power=0.99, samples=100, Verbose=False)
"""
if power is None:
if Exp.Power is None:
if Verbose:
print "Setting power to 0.95."
power = 0.95
else:
power = Exp.Power
if (power >= 1 or power <= 0):
if Verbose:
print "Error: Power has to be between 0 and 1."
return None
if Verbose:
sys.stdout.write("Power: " + str(power) + "\nLimit: " +
str(limit) + "\nReplications per proposal: " + str(samples) + "\n")
lower = 1
upper = limit
current = (upper - lower) / 2 + lower
if Verbose:
print "Searching for your sample size..."
underpowered = True
while True:
if Verbose:
sys.stdout.write(
"Simulating with " + str(current) + " participants per condition... ")
sys.stdout.flush()
Exp.SetSampleSize(current)
p = Exp.GetPower(samples)
if Verbose:
sys.stdout.write("Power=" + str(p) + "\n")
if p < power:
# If experiment is underpowered
if (upper - lower) <= 1:
Exp.SetSampleSize(upper)
Exp.UpdatePower()
# Check if Hopping worked
if underpowered:
print "Warning: Failed to converge. Bunny.Hop() assumes that the pattern your DataTest searches for exists.\nIf you're using a null model consider using Bunny.Explore() instead.\nIf you're using a non-random model then increase the search limit by sending a number greater than 100 as the second paramter of Bunny.Hop()"
return [upper, Exp.Power]
lower = current
current = (upper - lower) / 2 + lower
else:
underpowered = False
if (upper - lower) <= 1:
Exp.SetSampleSize(lower)
Exp.UpdatePower()
# If you're here then at least one instance was over.
if Verbose:
print "Binay search complete. Moving backwards ..."
return ReverseSearch(Exp, lower, power, 5)
upper = current
current = (upper - lower) / 2 + lower
def Inspect(Exp, RecomputePower=False):
"""
Print experiment details. Automatically computes sample size or power if possible.
Args:
Exp (Experiment): Experiment object to use (see Experiment class)
RecomputePower (bool) : If the experiment object has a power stored RecomputePower determines if it should be recomputed.
Returns:
None
>> Inspect(MyExperiment)
>> Inspect(MyExperiment,True)
"""
sys.stdout.write("\nValidating experiment...")
if Exp.Validate():
sys.stdout.write(" SUCCESS\n\n")
sys.stdout.write("Experiment name: " + str(Exp.Name) + "\n")
sys.stdout.write("Statistical test: " + str(Exp.StatTest.Name) + "\n\n")
if not Exp.SampleSize is None:
sys.stdout.write("Sample size: " + str(Exp.SampleSize) + "\n")
else:
sys.stdout.write(
"No sample size associated. Checking if I can estimate it... ")
if not Exp.Power is None:
sys.stdout.write(
"Yes.\nComputing smallest sample size needed... \n\n")
Hop(Exp, limit=100, power=Exp.Power, samples=5000, Verbose=False)
sys.stdout.write("Sample size: " + str(Exp.SampleSize) + "\n")
else:
sys.stdout.write(
"No.\nUse Bunny.Explore(Experiment) to see the relation between sampe size and power.\n")
if not Exp.Power is None:
if RecomputePower is True:
Exp.UpdatePower()
sys.stdout.write(
"Power: " + str(Exp.Power) + " (Freshly computed!)\n")
else:
sys.stdout.write(
"Power: " + str(Exp.Power) + " (Call Bunny.Inspect(True) to recompute power)\n")
else:
sys.stdout.write("No power. Checking if I can estimate it... ")
if not Exp.SampleSize is None:
sys.stdout.write("Yes.\n\n")
Exp.UpdatePower()
sys.stdout.write("Power: " + str(Exp.Power) + "\n")
else:
sys.stdout.write(
"No.\nUse Bunny.Explore(Experiment) to see the relation between sampe size and power.\n\n")
def Imagine(Exp, samples=10000):
"""
Plot the key statistics of a simulation along with the decision.
Args:
Exp (Experiment): Experiment object to use (see Experiment class)
samples (int) : Number of simulations to run
Returns:
None
>> Imagine(MyExperiment)
>> Imagine(MyExperiment,samples=10000)
"""
usepvals = False
if Exp.SampleSize is None:
print "ERROR: Need a sample size! (Use SetSampleSize())"
return None
Res = Exp.Replicate(samples)
if not (Res[0].HasKeyStats()):
print "WARNING: DataTest has no key statistics to plot. Trying to use p-values instead..."
if (Res[0].HasPvals()):
usepvals = True
else:
print "ERROR: No p-values. Cannot plot."
return None
if len(Exp.Participants) == 1:
if usepvals:
Stats = [Res[i].pvals[0] for i in range(samples)]
else:
Stats = [Res[i].keystats[0] for i in range(samples)]
Decisions = [Res[i].aggregatedecision for i in range(samples)]
SuccessTrials_indices = [i for i, x in enumerate(Decisions) if x == 1]
FailedTrials_indices = [i for i, x in enumerate(Decisions) if x == 0]
SuccessTrials = [Stats[i] for i in SuccessTrials_indices]
FailedTrials = [Stats[i] for i in FailedTrials_indices]
Power = sum(Decisions) * 1.0 / len(Decisions)
pylab.figure()
binno = len(set(Stats))
n, bins, patches = pylab.hist([SuccessTrials, FailedTrials], binno, histtype='bar', stacked=True, color=[
'green', 'red'], label=['Success', 'Fail'])
pylab.legend()
if usepvals:
pylab.xlabel('P-value')
else:
pylab.xlabel('Statistic')
pylab.ylabel('Number of simulations')
pylab.title(str(samples) + ' simulations with ' +
str(Exp.SampleSize) + ' participants. Power = ' + str(Power))
pylab.show()
else:
if len(Exp.Participants) > 2:
print "WARNING: Experiment has more than two conditions. Only plotting first two"
if usepvals:
StatsDim0 = [Res[i].pvals[0] for i in range(samples)]
StatsDim1 = [Res[i].pvals[1] for i in range(samples)]
else:
StatsDim0 = [Res[i].keystats[0] for i in range(samples)]
StatsDim1 = [Res[i].keystats[1] for i in range(samples)]
Decisions = [Res[i].aggregatedecision for i in range(samples)]
Power = sum(Decisions) * 1.0 / len(Decisions)
Domain0 = list(np.sort(list(set(StatsDim0))))
Domain1 = list(np.sort(list(set(StatsDim1))))
X, Y = np.meshgrid(Domain0, Domain1)
X = X.flatten()
Y = Y.flatten()
hist, xedges, yedges = np.histogram2d(
StatsDim0, StatsDim1, bins=[len(Domain0), len(Domain1)])
Z = np.transpose(hist).flatten()
C = [0] * len(X)
# Now create the decision drawing.
for index in range(len(X)):
indicesX = [i for i, x in enumerate(StatsDim0) if x == X[index]]
indicesY = [i for i, x in enumerate(StatsDim1) if x == Y[index]]
DecisionIndex = [i for i in indicesX if i in indicesY]
if DecisionIndex == []:
C[index] = 0
else:
C[index] = Decisions[DecisionIndex[0]]
# Normalize Z dimension
Z = Z * 100.0 / sum(Z)
# Convert color vector a color scheme
C = ['r' if i == 0 else 'g' for i in C]
fig = plt.figure()
ax = fig.gca(projection='3d')
for i in range(len(X)):
ax.scatter(X[i], Y[i], Z[i], c=C[i])
ax.set_xlabel('Condition 1: ' + Exp.Participants[0].Name)
ax.set_ylabel('Condition 2: ' + Exp.Participants[1].Name)
ax.set_zlabel('Percentage of simulations')
ax.set_title(str(samples) + ' simulations with ' +
str(Exp.SampleSize) + ' participnats. Power = ' + str(Power))
plt.show()
def ExploreSampleSize(Exp, lower=1, limit=-1, samples=10000):
"""
Calculate an experiment's power for a range of sample sizes
This is the main function that Explore() uses to generate the data.
.. warning::
This function is for internal use only.
Args:
Exp (Experiment): Experiment object to use (see Experiment class)
lower (int) : Smallest sample size to consider.
limit (int) : Highest sample size to consider. If limit=-1 then samples sizes between 15 and 35 are tested
samples (int) : Number of simulations per proposal.
Returns:
[[Sample sizes], [Power]] (list) : [Sample sizes] is a list of sample sizes and [Power] is has the power associated with each sample size.
>> ExploreSampleSize(MyExperiment)
>> ExploreSampleSize(MyExperiment,10,50,10000)
"""
if limit == -1:
print "No limit specified. Testing samples between 15 and 35 ..."
lower = 15
limit = 35
Power = []
if lower > limit:
print "Error: Lower limit is higher than upper limit"
return None
print "Estimating time ...."
SampleSize = range(lower, limit + 1)
CurrSampleSize = Exp.SampleSize
for i in SampleSize:
if i == lower:
start = time.time()
Exp.SetSampleSize(i)
Power.append(Exp.GetPower(samples))
if i == lower:
end = time.time()
secs = (end - start) * len(SampleSize)
sys.stdout.write("This will take at least ")
if (secs < 60):
sys.stdout.write(str(round(secs, 2)) + " seconds.\n")
else:
mins = secs * 1.0 / 60
if (mins < 60):
sys.stdout.write(str(round(mins, 2)) + " minutes.\n")
else:
hours = mins * 1.0 / 60
sys.stdout.write(str(round(hours, 2)) + " hours.\n")
print "Done!"
# Restore experiment object.
if CurrSampleSize is None:
Exp.ResetSampleSize()
Exp.ResetPower()
else:
Exp.SetSampleSize(CurrSampleSize)
Exp.UpdatePower(samples)
return [SampleSize, Power]
def ReverseSearch(Exp, lower, power, steps):
"""
Check if there are any smaller sample-sizes in the vicinity with greater or equal power (this often happens with NHST).
.. warning::
This function is for internal use only.
Args:
Exp (Experiment): Experiment object to use.
lower (int): Initial sample size.
power (float): Initial sample size's power.
steps (int): Number of steps to move.
Returns:
[Sample size, Power] (list) : First item shows smallest sample size and second item shows the corresponding power.
"""
CurrS = lower
CurrP = power
for samplesize in range(lower - 1, lower - steps - 1, -1):
Exp.SetSampleSize(samplesize)
p = Exp.GetPower(samples)
if p >= CurrP:
CurrP = p
CurrS = samplesize
Exp.SetSampleSize(CurrS)
Exp.UpdatePower()
return [CurrS, CurrP]
def PlotPowerSamples(Samples, Filename=None):
"""
Plot sample size and power relation
This is the function Explore() uses to produce its plots.
.. warning::
This function is for internal use only.
Args:
Samples (list): Samples should contain two lists. The first list contains sample sizes and second list contains the power associated with each sample size. ExploreSampleSize produces the output that can be sent directly to this function.
Filename (str) : When the function receives a filename, it saves the resulting plot instead of displaying it.
Returns:
None
>> res = ExploreSampleSize(Exp, 15, 20)
>> PlotPowerSamples(res)
"""
plt.clf()
plt.plot(Samples[0], Samples[1], 'bo', Samples[0], Samples[1], 'k')
plt.xlabel('Sample Size')
plt.ylabel('Power')
plt.title('Relation between sample size and power')
plt.grid(True)
if Filename is None:
plt.show()
else:
plt.savefig(Filename)
| {
"content_hash": "ca724a6702aa737ffef4dff5f2c29c79",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 340,
"avg_line_length": 36.08888888888889,
"alnum_prop": 0.5938697318007663,
"repo_name": "julianje/Bunny",
"id": "2226d9ea7d82fe04c17250379e20dd049ac3fd0c",
"size": "14641",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Bunny/BunnyFunctions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106850"
}
],
"symlink_target": ""
} |
import time
from django.test import TestCase
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
from .utils import element_has_css_class
import os
os.environ['WDM_LOG_LEVEL'] = '0'
class TestBatonIndex(TestCase):
def setUp(self):
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-extensions')
chrome_options.add_argument('--disable-dev-shm-usage')
self.driver = webdriver.Chrome(
ChromeDriverManager().install(),
options=chrome_options,
)
self.driver.set_window_size(1920, 1080)
self.driver.implicitly_wait(10)
self.login()
def tearDown(self):
self.driver.quit()
def login(self):
self.driver.get('http://localhost:8000/admin')
username_field = self.driver.find_element_by_id("id_username")
password_field = self.driver.find_element_by_id("id_password")
button = self.driver.find_element_by_css_selector('input[type=submit]')
username_field.send_keys('admin')
time.sleep(1)
password_field.send_keys('admin')
time.sleep(1)
button.click()
def test_navbar(self):
# Wait until baton is ready
wait = WebDriverWait(self.driver, 10)
wait.until(element_has_css_class((By.TAG_NAME, 'body'), "baton-ready"))
# site title
site_name = self.driver.find_element_by_css_selector("#site-name a")
self.assertEqual(
site_name.get_attribute('innerHTML'), 'Baton Test App')
def test_content(self):
# Wait until baton is ready
wait = WebDriverWait(self.driver, 10)
wait.until(element_has_css_class((By.TAG_NAME, 'body'), "baton-ready"))
time.sleep(1)
# page title
page_title = self.driver.find_element_by_css_selector(
"#content h1")
self.assertEqual(page_title.get_attribute('innerHTML'), 'Baton administration')
self.assertEqual(page_title.is_displayed(), True)
# recent actions
recent_actions = self.driver.find_element_by_id('recent-actions-module')
self.assertEqual(recent_actions.is_displayed(), True)
modules = self.driver.find_elements_by_css_selector(
"#content-main .module")
self.assertEqual(len(modules), 2)
def test_footer(self):
# Wait until baton is ready
wait = WebDriverWait(self.driver, 10)
wait.until(element_has_css_class((By.TAG_NAME, 'body'), "baton-ready"))
links = self.driver.find_elements_by_css_selector(
"#footer .col-sm-4 p")
self.assertEqual(len(links), 3)
# support
self.assertEqual(links[0].find_element_by_css_selector('a').get_attribute('href'), 'mailto:mail@otto.to.it')
self.assertEqual(links[0].get_attribute('innerText').strip(), 'Support')
# copyright
self.assertEqual(links[1].get_attribute('innerText').strip(), 'copyright © 2022 Otto srl')
# powered by
self.assertEqual(links[2].get_attribute('innerText').strip(), 'Baton Test App · Developed by Otto srl')
| {
"content_hash": "62fa54d22f0c047c1bda765b8b466df7",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 116,
"avg_line_length": 37.46739130434783,
"alnum_prop": 0.6483899042645779,
"repo_name": "otto-torino/django-baton",
"id": "d8ac6913871a1fadba57d5cb857b406941afe311",
"size": "3449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testapp/app/app/tests/test_e2e_index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1384"
},
{
"name": "HTML",
"bytes": "16296"
},
{
"name": "JavaScript",
"bytes": "77826"
},
{
"name": "Python",
"bytes": "81288"
},
{
"name": "SCSS",
"bytes": "60222"
},
{
"name": "Vim Script",
"bytes": "120"
}
],
"symlink_target": ""
} |
"""\
=================
Pure Transformer component
=================
This component applies a function specified at its creation to messages
received (a filter). If the function returns None, no message is sent,
otherwise the result of the function is sent to "outbox".
Example Usage
-------------
To read in lines of text, convert to upper case and then write to the console.
pipeline(
ConsoleReader(),
PureTransformer(lambda x : x.upper()),
ConsoleEchoer()
).run()
"""
from Axon.Component import component
class PureTransformer(component):
def __init__(self, function=None):
super(PureTransformer, self).__init__()
if function:
self.processMessage = function
def processMessage(self, msg):
pass
def main(self):
while 1:
yield 1
while self.dataReady("inbox"):
returnval = self.processMessage(self.recv("inbox"))
if returnval != None:
self.send(returnval, "outbox")
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdown):
self.send(producerFinished(self), "signal")
return
self.pause()
__kamaelia_components__ = ( PureTransformer, )
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import pipeline
from Kamaelia.Util.Console import ConsoleReader, ConsoleEchoer
# Example - display the contents of files whose names are entered
pipeline(
ConsoleReader(eol=""),
PureTransformer(lambda x : "foo" + x + "bar!\n"),
ConsoleEchoer()
).run()
| {
"content_hash": "82b890af57a0d3f99903dd56351a40ed",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 82,
"avg_line_length": 30.24561403508772,
"alnum_prop": 0.5968677494199536,
"repo_name": "sparkslabs/kamaelia_",
"id": "1f9e3512fade873fce34826a9ed3a75ddf62784f",
"size": "2629",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/RJL/Kamaelia/Community/RJL/Kamaelia/Util/PureTransformer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3814"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "504"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896248"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "707430"
}
],
"symlink_target": ""
} |
"""Trigger entity."""
from __future__ import annotations
import logging
from typing import Any
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_ICON,
CONF_NAME,
CONF_UNIQUE_ID,
CONF_UNIT_OF_MEASUREMENT,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import template, update_coordinator
from . import TriggerUpdateCoordinator
from .const import CONF_ATTRIBUTES, CONF_AVAILABILITY, CONF_PICTURE
class TriggerEntity(update_coordinator.CoordinatorEntity):
"""Template entity based on trigger data."""
domain = ""
extra_template_keys: tuple | None = None
extra_template_keys_complex: tuple | None = None
def __init__(
self,
hass: HomeAssistant,
coordinator: TriggerUpdateCoordinator,
config: dict,
) -> None:
"""Initialize the entity."""
super().__init__(coordinator)
entity_unique_id = config.get(CONF_UNIQUE_ID)
if entity_unique_id and coordinator.unique_id:
self._unique_id = f"{coordinator.unique_id}-{entity_unique_id}"
else:
self._unique_id = entity_unique_id
self._config = config
self._static_rendered = {}
self._to_render_simple = []
self._to_render_complex = []
for itm in (
CONF_NAME,
CONF_ICON,
CONF_PICTURE,
CONF_AVAILABILITY,
):
if itm not in config:
continue
if config[itm].is_static:
self._static_rendered[itm] = config[itm].template
else:
self._to_render_simple.append(itm)
if self.extra_template_keys is not None:
self._to_render_simple.extend(self.extra_template_keys)
if self.extra_template_keys_complex is not None:
self._to_render_complex.extend(self.extra_template_keys_complex)
# We make a copy so our initial render is 'unknown' and not 'unavailable'
self._rendered = dict(self._static_rendered)
self._parse_result = {CONF_AVAILABILITY}
@property
def name(self):
"""Name of the entity."""
return self._rendered.get(CONF_NAME)
@property
def unique_id(self):
"""Return unique ID of the entity."""
return self._unique_id
@property
def device_class(self):
"""Return device class of the entity."""
return self._config.get(CONF_DEVICE_CLASS)
@property
def unit_of_measurement(self) -> str | None:
"""Return unit of measurement."""
return self._config.get(CONF_UNIT_OF_MEASUREMENT)
@property
def icon(self) -> str | None:
"""Return icon."""
return self._rendered.get(CONF_ICON)
@property
def entity_picture(self) -> str | None:
"""Return entity picture."""
return self._rendered.get(CONF_PICTURE)
@property
def available(self):
"""Return availability of the entity."""
return (
self._rendered is not self._static_rendered
and
# Check against False so `None` is ok
self._rendered.get(CONF_AVAILABILITY) is not False
)
@property
def extra_state_attributes(self) -> dict[str, Any] | None:
"""Return extra attributes."""
return self._rendered.get(CONF_ATTRIBUTES)
async def async_added_to_hass(self) -> None:
"""Handle being added to Home Assistant."""
template.attach(self.hass, self._config)
await super().async_added_to_hass()
if self.coordinator.data is not None:
self._process_data()
@callback
def _process_data(self) -> None:
"""Process new data."""
try:
rendered = dict(self._static_rendered)
for key in self._to_render_simple:
rendered[key] = self._config[key].async_render(
self.coordinator.data["run_variables"],
parse_result=key in self._parse_result,
)
for key in self._to_render_complex:
rendered[key] = template.render_complex(
self._config[key],
self.coordinator.data["run_variables"],
)
if CONF_ATTRIBUTES in self._config:
rendered[CONF_ATTRIBUTES] = template.render_complex(
self._config[CONF_ATTRIBUTES],
self.coordinator.data["run_variables"],
)
self._rendered = rendered
except template.TemplateError as err:
logging.getLogger(f"{__package__}.{self.entity_id.split('.')[0]}").error(
"Error rendering %s template for %s: %s", key, self.entity_id, err
)
self._rendered = self._static_rendered
self.async_set_context(self.coordinator.data["context"])
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._process_data()
self.async_write_ha_state()
| {
"content_hash": "763e57615dbb1c7c0ef0497d13effe51",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 85,
"avg_line_length": 31.21472392638037,
"alnum_prop": 0.5837264150943396,
"repo_name": "aronsky/home-assistant",
"id": "c80620b045343e8608f99b5a0962cdea7897f692",
"size": "5088",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/template/trigger_entity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from typing import Union
from fastapi import Body, FastAPI
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Item = Body(embed=True)):
results = {"item_id": item_id, "item": item}
return results
| {
"content_hash": "1640d408b61491d7fb59c27c1d17ec4f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 67,
"avg_line_length": 21.42105263157895,
"alnum_prop": 0.6732186732186732,
"repo_name": "tiangolo/fastapi",
"id": "29e6e14b7e306571b35c99237165e1e06df9df2d",
"size": "407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs_src/body_multiple_params/tutorial005.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "187"
},
{
"name": "Python",
"bytes": "1928986"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import mnist
from tensorflow.examples.tutorials.mnist import input_data
import cdbn_backup as cdbn
""" --------------------------------------------
------------------- DATA -------------------
-------------------------------------------- """
class MNIST_HANDLER(object):
def __init__(self, data):
self.num_training_example = data.train.num_examples
self.num_test_example = data.test.num_examples
self.training_data , self.training_labels = data.train.next_batch(self.num_training_example)
self.test_data , self.test_labels = data.test.next_batch(self.num_test_example)
self.whiten = False
self.training_index = -20
self.test_index = -20
def do_whiten(self):
self.whiten = True
data_to_be_whitened = np.copy(self.training_data)
mean = np.sum(data_to_be_whitened, axis = 0)/self.num_training_example
mean = np.tile(mean,self.num_training_example)
mean = np.reshape(mean,(self.num_training_example,784))
centered_data = data_to_be_whitened - mean
covariance = np.dot(centered_data.T,centered_data)/self.num_training_example
U,S,V = np.linalg.svd(covariance)
epsilon = 1e-5
lambda_square = np.diag(1./np.sqrt(S+epsilon))
self.whitening_mat = np.dot(np.dot(U, lambda_square), V)
self.whitened_training_data = np.dot(centered_data,self.whitening_mat)
data_to_be_whitened = np.copy(self.test_data)
mean = np.sum(data_to_be_whitened, axis = 0)/self.num_test_example
mean = np.tile(mean,self.num_test_example)
mean = np.reshape(mean,(self.num_test_example,784))
centered_data = data_to_be_whitened - mean
self.whitened_test_data = np.dot(centered_data,self.whitening_mat)
def next_batch(self, batch_size, type = 'train'):
if type == 'train':
if self.whiten:
operand = self.whitened_training_data
else:
operand = self.training_data
operand_bis = self.training_labels
self.training_index = (batch_size + self.training_index) % self.num_training_example
index = self.training_index
number = self.num_training_example
elif type == 'test':
if self.whiten:
operand = self.whitened_test_data
else:
operand = self.test_data
operand_bis = self.test_labels
self.test_index = (batch_size + self.test_index) % self.num_test_example
index = self.test_index
number = self.num_test_example
if index + batch_size > number:
part1 = operand[index:,:]
part2 = operand[:(index + batch_size)% number,:]
result = np.concatenate([part1, part2])
part1 = operand_bis[index:,:]
part2 = operand_bis[:(index + batch_size)% number,:]
result_bis = np.concatenate([part1, part2])
else:
result = operand[index:index + batch_size,:]
result_bis = operand_bis[index:index + batch_size,:]
return result, result_bis
mnist_dataset = MNIST_HANDLER(input_data.read_data_sets('data', one_hot=True))
#mnist_dataset.do_whiten()
sess = tf.Session()
""" ---------------------------------------------
------------------- MODEL -------------------
--------------------------------------------- """
my_cdbn = cdbn.CDBN('mnist_cdbn', 20, '/home/arthur/pedestrian_detection/log', mnist_dataset, sess, verbosity = 2)
my_cdbn.add_layer('layer_1', fully_connected = False, v_height = 28, v_width = 28, v_channels = 1, f_height = 11, f_width = 11, f_number = 40,
init_biases_H = -3, init_biases_V = 0.01, init_weight_stddev = 0.01,
gaussian_unit = True, gaussian_variance = 0.2,
prob_maxpooling = True, padding = True,
learning_rate = 0.00005, learning_rate_decay = 0.5, momentum = 0.9, decay_step = 50000,
weight_decay = 2.0, sparsity_target = 0.003, sparsity_coef = 0.1)
my_cdbn.add_layer('layer_2', fully_connected = False, v_height = 14, v_width = 14, v_channels = 40, f_height = 7, f_width = 7, f_number = 40,
init_biases_H = -3, init_biases_V = 0.025, init_weight_stddev = 0.025,
gaussian_unit = False, gaussian_variance = 0.2,
prob_maxpooling = True, padding = True,
learning_rate = 0.0025, learning_rate_decay = 0.5, momentum = 0.9, decay_step = 50000,
weight_decay = 0.1, sparsity_target = 0.1, sparsity_coef = 0.1)
my_cdbn.add_layer('layer_3', fully_connected = True, v_height = 1, v_width = 1, v_channels = 40*7*7, f_height = 1, f_width = 1, f_number = 200,
init_biases_H = -3, init_biases_V = 0.025, init_weight_stddev = 0.025,
gaussian_unit = False, gaussian_variance = 0.2,
prob_maxpooling = False, padding = False,
learning_rate = 0.0025, learning_rate_decay = 0.5, momentum = 0.9, decay_step = 50000,
weight_decay = 0.1, sparsity_target = 0.1, sparsity_coef = 0.1)
my_cdbn.add_softmax_layer(10, 0.1)
my_cdbn.lock_cdbn()
""" ---------------------------------------------
------------------ TRAINING -----------------
--------------------------------------------- """
my_cdbn.manage_layers(['layer_1','layer_2','layer_3'],[],[10000,10000,10000], [1,1,1], 20000, restore_softmax = False, fine_tune = True)
my_cdbn.do_eval() | {
"content_hash": "acf38f24d84c562ef507db676358a167",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 144,
"avg_line_length": 44.83064516129032,
"alnum_prop": 0.5634106853750674,
"repo_name": "arthurmeyer/Convolutional_Deep_Belief_Network",
"id": "de25ac275bf89e462e0087335489b47a2d8b8938",
"size": "5559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo_cdbn_mnist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62756"
}
],
"symlink_target": ""
} |
"""
Tests for L{twisted.python.log}.
"""
from __future__ import division, absolute_import, print_function
from twisted.python.compat import _PY3, NativeStringIO as StringIO
import os
import sys
import time
import logging
import warnings
import calendar
from io import IOBase
from imp import reload
from twisted.trial import unittest
from twisted.python import log, failure
from twisted.logger.test.test_stdlib import handlerAndBytesIO
from twisted.python.log import LogPublisher
from twisted.logger import (
LoggingFile, LogLevel as NewLogLevel, LogBeginner,
LogPublisher as NewLogPublisher
)
class FakeWarning(Warning):
"""
A unique L{Warning} subclass used by tests for interactions of
L{twisted.python.log} with the L{warnings} module.
"""
class TextFromEventDictTests(unittest.SynchronousTestCase):
"""
Tests for L{textFromEventDict}.
"""
def test_message(self):
"""
The C{"message"} value, when specified, is concatenated to generate the
message.
"""
eventDict = dict(message=("a", "b", "c"))
text = log.textFromEventDict(eventDict)
self.assertEqual(text, "a b c")
def test_format(self):
"""
The C{"format"} value, when specified, is used to format the message.
"""
eventDict = dict(
message=(), isError=0, format="Hello, %(foo)s!", foo="dude"
)
text = log.textFromEventDict(eventDict)
self.assertEqual(text, "Hello, dude!")
def test_noMessageNoFormat(self):
"""
If C{"format"} is unspecified and C{"message"} is empty, return
L{None}.
"""
eventDict = dict(message=(), isError=0)
text = log.textFromEventDict(eventDict)
self.assertIsNone(text)
def test_whySpecified(self):
"""
The C{"why"} value, when specified, is first part of message.
"""
try:
raise RuntimeError()
except:
eventDict = dict(
message=(), isError=1, failure=failure.Failure(), why="foo"
)
text = log.textFromEventDict(eventDict)
self.assertTrue(text.startswith("foo\n"))
def test_whyDefault(self):
"""
The C{"why"} value, when unspecified, defaults to C{"Unhandled Error"}.
"""
try:
raise RuntimeError()
except:
eventDict = dict(message=(), isError=1, failure=failure.Failure())
text = log.textFromEventDict(eventDict)
self.assertTrue(text.startswith("Unhandled Error\n"))
def test_noTracebackForYou(self):
"""
If unable to obtain a traceback due to an exception, catch it and note
the error.
"""
# Invalid failure object doesn't implement .getTraceback()
eventDict = dict(message=(), isError=1, failure=object())
text = log.textFromEventDict(eventDict)
self.assertIn("\n(unable to obtain traceback)", text)
class LogTests(unittest.SynchronousTestCase):
def setUp(self):
self.catcher = []
self.observer = self.catcher.append
log.addObserver(self.observer)
self.addCleanup(log.removeObserver, self.observer)
def testObservation(self):
catcher = self.catcher
log.msg("test", testShouldCatch=True)
i = catcher.pop()
self.assertEqual(i["message"][0], "test")
self.assertTrue(i["testShouldCatch"])
self.assertIn("time", i)
self.assertEqual(len(catcher), 0)
def testContext(self):
catcher = self.catcher
log.callWithContext({"subsystem": "not the default",
"subsubsystem": "a",
"other": "c"},
log.callWithContext,
{"subsubsystem": "b"}, log.msg, "foo", other="d")
i = catcher.pop()
self.assertEqual(i['subsubsystem'], 'b')
self.assertEqual(i['subsystem'], 'not the default')
self.assertEqual(i['other'], 'd')
self.assertEqual(i['message'][0], 'foo')
def testErrors(self):
for e, ig in [("hello world", "hello world"),
(KeyError(), KeyError),
(failure.Failure(RuntimeError()), RuntimeError)]:
log.err(e)
i = self.catcher.pop()
self.assertEqual(i['isError'], 1)
self.flushLoggedErrors(ig)
def testErrorsWithWhy(self):
for e, ig in [("hello world", "hello world"),
(KeyError(), KeyError),
(failure.Failure(RuntimeError()), RuntimeError)]:
log.err(e, 'foobar')
i = self.catcher.pop()
self.assertEqual(i['isError'], 1)
self.assertEqual(i['why'], 'foobar')
self.flushLoggedErrors(ig)
def test_erroneousErrors(self):
"""
Exceptions raised by log observers are logged but the observer which
raised the exception remains registered with the publisher. These
exceptions do not prevent the event from being sent to other observers
registered with the publisher.
"""
L1 = []
L2 = []
def broken(event):
1 // 0
for observer in [L1.append, broken, L2.append]:
log.addObserver(observer)
self.addCleanup(log.removeObserver, observer)
for i in range(3):
# Reset the lists for simpler comparison.
L1[:] = []
L2[:] = []
# Send out the event which will break one of the observers.
log.msg("Howdy, y'all.", log_trace=[])
# The broken observer should have caused this to be logged.
excs = self.flushLoggedErrors(ZeroDivisionError)
del self.catcher[:]
self.assertEqual(len(excs), 1)
# Both other observers should have seen the message.
self.assertEqual(len(L1), 2)
self.assertEqual(len(L2), 2)
# The first event is delivered to all observers; then, errors
# are delivered.
self.assertEqual(L1[0]['message'], ("Howdy, y'all.",))
self.assertEqual(L2[0]['message'], ("Howdy, y'all.",))
def test_showwarning(self):
"""
L{twisted.python.log.showwarning} emits the warning as a message
to the Twisted logging system.
"""
publisher = log.LogPublisher()
publisher.addObserver(self.observer)
publisher.showwarning(
FakeWarning("unique warning message"), FakeWarning,
"warning-filename.py", 27)
event = self.catcher.pop()
self.assertEqual(
event['format'] % event,
'warning-filename.py:27: twisted.test.test_log.FakeWarning: '
'unique warning message')
self.assertEqual(self.catcher, [])
# Python 2.6 requires that any function used to override the
# warnings.showwarning API accept a "line" parameter or a
# deprecation warning is emitted.
publisher.showwarning(
FakeWarning("unique warning message"), FakeWarning,
"warning-filename.py", 27, line=object())
event = self.catcher.pop()
self.assertEqual(
event['format'] % event,
'warning-filename.py:27: twisted.test.test_log.FakeWarning: '
'unique warning message')
self.assertEqual(self.catcher, [])
def test_warningToFile(self):
"""
L{twisted.python.log.showwarning} passes warnings with an explicit file
target on to the underlying Python warning system.
"""
message = "another unique message"
category = FakeWarning
filename = "warning-filename.py"
lineno = 31
output = StringIO()
log.showwarning(message, category, filename, lineno, file=output)
self.assertEqual(
output.getvalue(),
warnings.formatwarning(message, category, filename, lineno))
# In Python 2.6 and higher, warnings.showwarning accepts
# a "line" argument which gives the source line the warning
# message is to include.
line = "hello world"
output = StringIO()
log.showwarning(message, category, filename, lineno, file=output,
line=line)
self.assertEqual(
output.getvalue(),
warnings.formatwarning(message, category, filename, lineno,
line))
def test_publisherReportsBrokenObserversPrivately(self):
"""
Log publisher does not use the global L{log.err} when reporting broken
observers.
"""
errors = []
def logError(eventDict):
if eventDict.get("isError"):
errors.append(eventDict["failure"].value)
def fail(eventDict):
raise RuntimeError("test_publisherLocalyReportsBrokenObservers")
publisher = log.LogPublisher()
publisher.addObserver(logError)
publisher.addObserver(fail)
publisher.msg("Hello!")
self.assertEqual(set(publisher.observers), set([logError, fail]))
self.assertEqual(len(errors), 1)
self.assertIsInstance(errors[0], RuntimeError)
class FakeFile(list):
def write(self, bytes):
self.append(bytes)
def flush(self):
pass
IOBase.register(FakeFile)
class EvilStr:
def __str__(self):
1 // 0
class EvilRepr:
def __str__(self):
return "Happy Evil Repr"
def __repr__(self):
1 // 0
class EvilReprStr(EvilStr, EvilRepr):
pass
class LogPublisherTestCaseMixin:
def setUp(self):
"""
Add a log observer which records log events in C{self.out}. Also,
make sure the default string encoding is ASCII so that
L{testSingleUnicode} can test the behavior of logging unencodable
unicode messages.
"""
self.out = FakeFile()
self.lp = log.LogPublisher()
self.flo = log.FileLogObserver(self.out)
self.lp.addObserver(self.flo.emit)
try:
str(u'\N{VULGAR FRACTION ONE HALF}')
except UnicodeEncodeError:
# This is the behavior we want - don't change anything.
self._origEncoding = None
else:
if _PY3:
self._origEncoding = None
return
reload(sys)
self._origEncoding = sys.getdefaultencoding()
sys.setdefaultencoding('ascii')
def tearDown(self):
"""
Verify that everything written to the fake file C{self.out} was a
C{str}. Also, restore the default string encoding to its previous
setting, if it was modified by L{setUp}.
"""
for chunk in self.out:
self.assertIsInstance(chunk, str,
"%r was not a string" % (chunk,))
if self._origEncoding is not None:
sys.setdefaultencoding(self._origEncoding)
del sys.setdefaultencoding
class LogPublisherTests(LogPublisherTestCaseMixin,
unittest.SynchronousTestCase):
def testSingleString(self):
self.lp.msg("Hello, world.")
self.assertEqual(len(self.out), 1)
def testMultipleString(self):
# Test some stupid behavior that will be deprecated real soon.
# If you are reading this and trying to learn how the logging
# system works, *do not use this feature*.
self.lp.msg("Hello, ", "world.")
self.assertEqual(len(self.out), 1)
def test_singleUnicode(self):
"""
L{log.LogPublisher.msg} does not accept non-ASCII Unicode on Python 2,
logging an error instead.
On Python 3, where Unicode is default message type, the message is
logged normally.
"""
message = u"Hello, \N{VULGAR FRACTION ONE HALF} world."
self.lp.msg(message)
self.assertEqual(len(self.out), 1)
if _PY3:
self.assertIn(message, self.out[0])
else:
self.assertIn('with str error', self.out[0])
self.assertIn('UnicodeEncodeError', self.out[0])
class FileObserverTests(LogPublisherTestCaseMixin,
unittest.SynchronousTestCase):
"""
Tests for L{log.FileObserver}.
"""
ERROR_INVALID_FORMAT = 'Invalid format string'
ERROR_UNFORMATTABLE_OBJECT = 'UNFORMATTABLE OBJECT'
ERROR_FORMAT = (
'Invalid format string or unformattable object in log message'
)
ERROR_PATHOLOGICAL = 'PATHOLOGICAL ERROR'
ERROR_NO_FORMAT = 'Unable to format event'
ERROR_UNFORMATTABLE_SYSTEM = '[UNFORMATTABLE]'
ERROR_MESSAGE_LOST = 'MESSAGE LOST: unformattable object logged'
def _getTimezoneOffsetTest(self, tzname, daylightOffset, standardOffset):
"""
Verify that L{getTimezoneOffset} produces the expected offset for a
certain timezone both when daylight saving time is in effect and when
it is not.
@param tzname: The name of a timezone to exercise.
@type tzname: L{bytes}
@param daylightOffset: The number of seconds west of UTC the timezone
should be when daylight saving time is in effect.
@type daylightOffset: L{int}
@param standardOffset: The number of seconds west of UTC the timezone
should be when daylight saving time is not in effect.
@type standardOffset: L{int}
"""
if getattr(time, 'tzset', None) is None:
raise unittest.SkipTest(
"Platform cannot change timezone, cannot verify correct "
"offsets in well-known timezones.")
originalTimezone = os.environ.get('TZ', None)
try:
os.environ['TZ'] = tzname
time.tzset()
# The behavior of mktime depends on the current timezone setting.
# So only do this after changing the timezone.
# Compute a POSIX timestamp for a certain date and time that is
# known to occur at a time when daylight saving time is in effect.
localDaylightTuple = (2006, 6, 30, 0, 0, 0, 4, 181, 1)
daylight = time.mktime(localDaylightTuple)
# Compute a POSIX timestamp for a certain date and time that is
# known to occur at a time when daylight saving time is not in
# effect.
localStandardTuple = (2007, 1, 31, 0, 0, 0, 2, 31, 0)
standard = time.mktime(localStandardTuple)
self.assertEqual(
(self.flo.getTimezoneOffset(daylight),
self.flo.getTimezoneOffset(standard)),
(daylightOffset, standardOffset))
finally:
if originalTimezone is None:
del os.environ['TZ']
else:
os.environ['TZ'] = originalTimezone
time.tzset()
def test_getTimezoneOffsetWestOfUTC(self):
"""
Attempt to verify that L{FileLogObserver.getTimezoneOffset} returns
correct values for the current C{TZ} environment setting for at least
some cases. This test method exercises a timezone that is west of UTC
(and should produce positive results).
"""
self._getTimezoneOffsetTest("America/New_York", 14400, 18000)
def test_getTimezoneOffsetEastOfUTC(self):
"""
Attempt to verify that L{FileLogObserver.getTimezoneOffset} returns
correct values for the current C{TZ} environment setting for at least
some cases. This test method exercises a timezone that is east of UTC
(and should produce negative results).
"""
self._getTimezoneOffsetTest("Europe/Berlin", -7200, -3600)
def test_getTimezoneOffsetWithoutDaylightSavingTime(self):
"""
Attempt to verify that L{FileLogObserver.getTimezoneOffset} returns
correct values for the current C{TZ} environment setting for at least
some cases. This test method exercises a timezone that does not use
daylight saving time at all (so both summer and winter time test values
should have the same offset).
"""
# Test a timezone that doesn't have DST. mktime() implementations
# available for testing seem happy to produce results for this even
# though it's not entirely valid.
self._getTimezoneOffsetTest("Africa/Johannesburg", -7200, -7200)
def test_timeFormatting(self):
"""
Test the method of L{FileLogObserver} which turns a timestamp into a
human-readable string.
"""
when = calendar.timegm((2001, 2, 3, 4, 5, 6, 7, 8, 0))
# Pretend to be in US/Eastern for a moment
self.flo.getTimezoneOffset = lambda when: 18000
self.assertEqual(self.flo.formatTime(when), '2001-02-02 23:05:06-0500')
# Okay now we're in Eastern Europe somewhere
self.flo.getTimezoneOffset = lambda when: -3600
self.assertEqual(self.flo.formatTime(when), '2001-02-03 05:05:06+0100')
# And off in the Pacific or someplace like that
self.flo.getTimezoneOffset = lambda when: -39600
self.assertEqual(self.flo.formatTime(when), '2001-02-03 15:05:06+1100')
# One of those weird places with a half-hour offset timezone
self.flo.getTimezoneOffset = lambda when: 5400
self.assertEqual(self.flo.formatTime(when), '2001-02-03 02:35:06-0130')
# Half-hour offset in the other direction
self.flo.getTimezoneOffset = lambda when: -5400
self.assertEqual(self.flo.formatTime(when), '2001-02-03 05:35:06+0130')
# Test an offset which is between 0 and 60 minutes to make sure the
# sign comes out properly in that case.
self.flo.getTimezoneOffset = lambda when: 1800
self.assertEqual(self.flo.formatTime(when), '2001-02-03 03:35:06-0030')
# Test an offset between 0 and 60 minutes in the other direction.
self.flo.getTimezoneOffset = lambda when: -1800
self.assertEqual(self.flo.formatTime(when), '2001-02-03 04:35:06+0030')
# If a strftime-format string is present on the logger, it should
# use that instead. Note we don't assert anything about day, hour
# or minute because we cannot easily control what time.strftime()
# thinks the local timezone is.
self.flo.timeFormat = '%Y %m'
self.assertEqual(self.flo.formatTime(when), '2001 02')
def test_microsecondTimestampFormatting(self):
"""
L{FileLogObserver.formatTime} supports a value of C{timeFormat} which
includes C{"%f"}, a L{datetime}-only format specifier for microseconds.
"""
self.flo.timeFormat = '%f'
self.assertEqual("600000", self.flo.formatTime(12345.6))
def test_loggingAnObjectWithBroken__str__(self):
# HELLO, MCFLY
self.lp.msg(EvilStr())
self.assertEqual(len(self.out), 1)
# Logging system shouldn't need to crap itself for this trivial case
self.assertNotIn(self.ERROR_UNFORMATTABLE_OBJECT, self.out[0])
def test_formattingAnObjectWithBroken__str__(self):
self.lp.msg(format='%(blat)s', blat=EvilStr())
self.assertEqual(len(self.out), 1)
self.assertIn(self.ERROR_INVALID_FORMAT, self.out[0])
def test_brokenSystem__str__(self):
self.lp.msg('huh', system=EvilStr())
self.assertEqual(len(self.out), 1)
self.assertIn(self.ERROR_FORMAT, self.out[0])
def test_formattingAnObjectWithBroken__repr__Indirect(self):
self.lp.msg(format='%(blat)s', blat=[EvilRepr()])
self.assertEqual(len(self.out), 1)
self.assertIn(self.ERROR_UNFORMATTABLE_OBJECT, self.out[0])
def test_systemWithBroker__repr__Indirect(self):
self.lp.msg('huh', system=[EvilRepr()])
self.assertEqual(len(self.out), 1)
self.assertIn(self.ERROR_UNFORMATTABLE_OBJECT, self.out[0])
def test_simpleBrokenFormat(self):
self.lp.msg(format='hooj %s %s', blat=1)
self.assertEqual(len(self.out), 1)
self.assertIn(self.ERROR_INVALID_FORMAT, self.out[0])
def test_ridiculousFormat(self):
self.lp.msg(format=42, blat=1)
self.assertEqual(len(self.out), 1)
self.assertIn(self.ERROR_INVALID_FORMAT, self.out[0])
def test_evilFormat__repr__And__str__(self):
self.lp.msg(format=EvilReprStr(), blat=1)
self.assertEqual(len(self.out), 1)
self.assertIn(self.ERROR_PATHOLOGICAL, self.out[0])
def test_strangeEventDict(self):
"""
This kind of eventDict used to fail silently, so test it does.
"""
self.lp.msg(message='', isError=False)
self.assertEqual(len(self.out), 0)
def _startLoggingCleanup(self):
"""
Cleanup after a startLogging() call that mutates the hell out of some
global state.
"""
self.addCleanup(log.theLogPublisher._stopLogging)
self.addCleanup(setattr, sys, 'stdout', sys.stdout)
self.addCleanup(setattr, sys, 'stderr', sys.stderr)
def test_printToStderrSetsIsError(self):
"""
startLogging()'s overridden sys.stderr should consider everything
written to it an error.
"""
self._startLoggingCleanup()
fakeFile = StringIO()
log.startLogging(fakeFile)
def observe(event):
observed.append(event)
observed = []
log.addObserver(observe)
print("Hello, world.", file=sys.stderr)
self.assertEqual(observed[0]["isError"], 1)
def test_startLogging(self):
"""
startLogging() installs FileLogObserver and overrides sys.stdout and
sys.stderr.
"""
origStdout, origStderr = sys.stdout, sys.stderr
self._startLoggingCleanup()
# When done with test, reset stdout and stderr to current values:
fakeFile = StringIO()
observer = log.startLogging(fakeFile)
self.addCleanup(observer.stop)
log.msg("Hello!")
self.assertIn("Hello!", fakeFile.getvalue())
self.assertIsInstance(sys.stdout, LoggingFile)
self.assertEqual(sys.stdout.level, NewLogLevel.info)
encoding = getattr(origStdout, "encoding", None)
if not encoding:
encoding = sys.getdefaultencoding()
self.assertEqual(sys.stdout.encoding.upper(), encoding.upper())
self.assertIsInstance(sys.stderr, LoggingFile)
self.assertEqual(sys.stderr.level, NewLogLevel.error)
encoding = getattr(origStderr, "encoding", None)
if not encoding:
encoding = sys.getdefaultencoding()
self.assertEqual(sys.stderr.encoding.upper(), encoding.upper())
def test_startLoggingTwice(self):
"""
There are some obscure error conditions that can occur when logging is
started twice. See http://twistedmatrix.com/trac/ticket/3289 for more
information.
"""
self._startLoggingCleanup()
# The bug is particular to the way that the t.p.log 'global' function
# handle stdout. If we use our own stream, the error doesn't occur. If
# we use our own LogPublisher, the error doesn't occur.
sys.stdout = StringIO()
def showError(eventDict):
if eventDict['isError']:
sys.__stdout__.write(eventDict['failure'].getTraceback())
log.addObserver(showError)
self.addCleanup(log.removeObserver, showError)
observer = log.startLogging(sys.stdout)
self.addCleanup(observer.stop)
# At this point, we expect that sys.stdout is a StdioOnnaStick object.
self.assertIsInstance(sys.stdout, LoggingFile)
fakeStdout = sys.stdout
observer = log.startLogging(sys.stdout)
self.assertIs(sys.stdout, fakeStdout)
def test_startLoggingOverridesWarning(self):
"""
startLogging() overrides global C{warnings.showwarning} such that
warnings go to Twisted log observers.
"""
self._startLoggingCleanup()
newPublisher = NewLogPublisher()
class SysModule(object):
stdout = object()
stderr = object()
tempLogPublisher = LogPublisher(
newPublisher, newPublisher,
logBeginner=LogBeginner(newPublisher, StringIO(), SysModule,
warnings)
)
# Trial reports warnings in two ways. First, it intercepts the global
# 'showwarning' function *itself*, after starting logging (by way of
# the '_collectWarnings' function which collects all warnings as a
# around the test's 'run' method). Second, it has a log observer which
# immediately reports warnings when they're propagated into the log
# system (which, in normal operation, happens only at the end of the
# test case). In order to avoid printing a spurious warning in this
# test, we first replace the global log publisher's 'showwarning' in
# the module with our own.
self.patch(log, "theLogPublisher", tempLogPublisher)
# And, one last thing, pretend we're starting from a fresh import, or
# warnings.warn won't be patched at all.
log._oldshowwarning = None
# Global mutable state is bad, kids. Stay in school.
fakeFile = StringIO()
# We didn't previously save log messages, so let's make sure we don't
# save them any more.
evt = {"pre-start": "event"}
received = []
def preStartObserver(x):
if 'pre-start' in x.keys():
received.append(x)
newPublisher(evt)
newPublisher.addObserver(preStartObserver)
log.startLogging(fakeFile, setStdout=False)
self.addCleanup(tempLogPublisher._stopLogging)
self.assertEqual(received, [])
warnings.warn("hello!")
output = fakeFile.getvalue()
self.assertIn("UserWarning: hello!", output)
def test_emitPrefix(self):
"""
FileLogObserver.emit() will add a timestamp and system prefix to its
file output.
"""
output = StringIO()
flo = log.FileLogObserver(output)
events = []
def observer(event):
# Capture the event for reference and pass it along to flo
events.append(event)
flo.emit(event)
publisher = log.LogPublisher()
publisher.addObserver(observer)
publisher.msg("Hello!")
self.assertEqual(len(events), 1)
event = events[0]
result = output.getvalue()
prefix = "{time} [{system}] ".format(
time=flo.formatTime(event["time"]), system=event["system"],
)
self.assertTrue(
result.startswith(prefix),
"{0!r} does not start with {1!r}".format(result, prefix)
)
def test_emitNewline(self):
"""
FileLogObserver.emit() will append a newline to its file output.
"""
output = StringIO()
flo = log.FileLogObserver(output)
publisher = log.LogPublisher()
publisher.addObserver(flo.emit)
publisher.msg("Hello!")
result = output.getvalue()
suffix = "Hello!\n"
self.assertTrue(
result.endswith(suffix),
"{0!r} does not end with {1!r}".format(result, suffix)
)
class PythonLoggingObserverTests(unittest.SynchronousTestCase):
"""
Test the bridge with python logging module.
"""
def setUp(self):
rootLogger = logging.getLogger("")
originalLevel = rootLogger.getEffectiveLevel()
rootLogger.setLevel(logging.DEBUG)
@self.addCleanup
def restoreLevel():
rootLogger.setLevel(originalLevel)
self.hdlr, self.out = handlerAndBytesIO()
rootLogger.addHandler(self.hdlr)
@self.addCleanup
def removeLogger():
rootLogger.removeHandler(self.hdlr)
self.hdlr.close()
self.lp = log.LogPublisher()
self.obs = log.PythonLoggingObserver()
self.lp.addObserver(self.obs.emit)
def test_singleString(self):
"""
Test simple output, and default log level.
"""
self.lp.msg("Hello, world.")
self.assertIn(b"Hello, world.", self.out.getvalue())
self.assertIn(b"INFO", self.out.getvalue())
def test_errorString(self):
"""
Test error output.
"""
f = failure.Failure(ValueError("That is bad."))
self.lp.msg(failure=f, isError=True)
prefix = b"CRITICAL:"
output = self.out.getvalue()
self.assertTrue(
output.startswith(prefix),
"Does not start with {0!r}: {1!r}".format(prefix, output)
)
def test_formatString(self):
"""
Test logging with a format.
"""
self.lp.msg(format="%(bar)s oo %(foo)s", bar="Hello", foo="world")
self.assertIn(b"Hello oo world", self.out.getvalue())
def test_customLevel(self):
"""
Test the logLevel keyword for customizing level used.
"""
self.lp.msg("Spam egg.", logLevel=logging.ERROR)
self.assertIn(b"Spam egg.", self.out.getvalue())
self.assertIn(b"ERROR", self.out.getvalue())
self.out.seek(0, 0)
self.out.truncate()
self.lp.msg("Foo bar.", logLevel=logging.WARNING)
self.assertIn(b"Foo bar.", self.out.getvalue())
self.assertIn(b"WARNING", self.out.getvalue())
def test_strangeEventDict(self):
"""
Verify that an event dictionary which is not an error and has an empty
message isn't recorded.
"""
self.lp.msg(message='', isError=False)
self.assertEqual(self.out.getvalue(), b'')
class PythonLoggingIntegrationTests(unittest.SynchronousTestCase):
"""
Test integration of python logging bridge.
"""
def test_startStopObserver(self):
"""
Test that start and stop methods of the observer actually register
and unregister to the log system.
"""
oldAddObserver = log.addObserver
oldRemoveObserver = log.removeObserver
l = []
try:
log.addObserver = l.append
log.removeObserver = l.remove
obs = log.PythonLoggingObserver()
obs.start()
self.assertEqual(l[0], obs.emit)
obs.stop()
self.assertEqual(len(l), 0)
finally:
log.addObserver = oldAddObserver
log.removeObserver = oldRemoveObserver
def test_inheritance(self):
"""
Test that we can inherit L{log.PythonLoggingObserver} and use super:
that's basically a validation that L{log.PythonLoggingObserver} is
new-style class.
"""
class MyObserver(log.PythonLoggingObserver):
def emit(self, eventDict):
super(MyObserver, self).emit(eventDict)
obs = MyObserver()
l = []
oldEmit = log.PythonLoggingObserver.emit
try:
log.PythonLoggingObserver.emit = l.append
obs.emit('foo')
self.assertEqual(len(l), 1)
finally:
log.PythonLoggingObserver.emit = oldEmit
class DefaultObserverTests(unittest.SynchronousTestCase):
"""
Test the default observer.
"""
def test_failureLogger(self):
"""
The reason argument passed to log.err() appears in the report
generated by DefaultObserver.
"""
self.catcher = []
self.observer = self.catcher.append
log.addObserver(self.observer)
self.addCleanup(log.removeObserver, self.observer)
obs = log.DefaultObserver()
obs.stderr = StringIO()
obs.start()
self.addCleanup(obs.stop)
reason = "The reason."
log.err(Exception(), reason)
errors = self.flushLoggedErrors()
self.assertIn(reason, obs.stderr.getvalue())
self.assertEqual(len(errors), 1)
def test_emitEventWithBrokenRepr(self):
"""
DefaultObserver.emit() does not raise when it observes an error event
with a message that causes L{repr} to raise.
"""
class Ouch(object):
def __repr__(self):
return str(1 / 0)
message = ("foo", Ouch())
event = dict(message=message, isError=1)
observer = log.DefaultObserver()
with StringIO() as output:
observer.stderr = output
observer.emit(event)
self.assertTrue(output.getvalue().startswith("foo <Ouch instance"))
class StdioOnnaStickTests(unittest.SynchronousTestCase):
"""
StdioOnnaStick should act like the normal sys.stdout object.
"""
def setUp(self):
self.resultLogs = []
log.addObserver(self.resultLogs.append)
def tearDown(self):
log.removeObserver(self.resultLogs.append)
def getLogMessages(self):
return ["".join(d['message']) for d in self.resultLogs]
def test_write(self):
"""
Writing to a StdioOnnaStick instance results in Twisted log messages.
Log messages are generated every time a '\\n' is encountered.
"""
stdio = log.StdioOnnaStick()
stdio.write("Hello there\nThis is a test")
self.assertEqual(self.getLogMessages(), ["Hello there"])
stdio.write("!\n")
self.assertEqual(self.getLogMessages(),
["Hello there", "This is a test!"])
def test_metadata(self):
"""
The log messages written by StdioOnnaStick have printed=1 keyword, and
by default are not errors.
"""
stdio = log.StdioOnnaStick()
stdio.write("hello\n")
self.assertFalse(self.resultLogs[0]['isError'])
self.assertTrue(self.resultLogs[0]['printed'])
def test_writeLines(self):
"""
Writing lines to a StdioOnnaStick results in Twisted log messages.
"""
stdio = log.StdioOnnaStick()
stdio.writelines(["log 1", "log 2"])
self.assertEqual(self.getLogMessages(), ["log 1", "log 2"])
def test_print(self):
"""
When StdioOnnaStick is set as sys.stdout, prints become log messages.
"""
oldStdout = sys.stdout
sys.stdout = log.StdioOnnaStick()
self.addCleanup(setattr, sys, "stdout", oldStdout)
print("This", end=" ")
print("is a test")
self.assertEqual(self.getLogMessages(), ["This is a test"])
def test_error(self):
"""
StdioOnnaStick created with isError=True log messages as errors.
"""
stdio = log.StdioOnnaStick(isError=True)
stdio.write("log 1\n")
self.assertTrue(self.resultLogs[0]['isError'])
def test_unicode(self):
"""
StdioOnnaStick converts unicode prints to byte strings on Python 2, in
order to be compatible with the normal stdout/stderr objects.
On Python 3, the prints are left unmodified.
"""
unicodeString = u"Hello, \N{VULGAR FRACTION ONE HALF} world."
stdio = log.StdioOnnaStick(encoding="utf-8")
self.assertEqual(stdio.encoding, "utf-8")
stdio.write(unicodeString + u"\n")
stdio.writelines([u"Also, " + unicodeString])
oldStdout = sys.stdout
sys.stdout = stdio
self.addCleanup(setattr, sys, "stdout", oldStdout)
# This should go to the log, utf-8 encoded too:
print(unicodeString)
if _PY3:
self.assertEqual(self.getLogMessages(),
[unicodeString,
u"Also, " + unicodeString,
unicodeString])
else:
self.assertEqual(self.getLogMessages(),
[unicodeString.encode("utf-8"),
(u"Also, " + unicodeString).encode("utf-8"),
unicodeString.encode("utf-8")])
| {
"content_hash": "a577858ec1cc7187dfe504d9b8a7e870",
"timestamp": "",
"source": "github",
"line_count": 1083,
"max_line_length": 79,
"avg_line_length": 33.47737765466297,
"alnum_prop": 0.6045895851721095,
"repo_name": "whitehorse-io/encarnia",
"id": "a66cc342de92c8f51333426e5d6cfabf6665e28e",
"size": "36329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyenv/lib/python2.7/site-packages/twisted/test/test_log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "63966"
},
{
"name": "CSS",
"bytes": "87525"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "GAP",
"bytes": "18122"
},
{
"name": "HTML",
"bytes": "91741"
},
{
"name": "JavaScript",
"bytes": "151335"
},
{
"name": "Objective-C",
"bytes": "1292"
},
{
"name": "Python",
"bytes": "24616242"
},
{
"name": "Shell",
"bytes": "8808"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.views.generic import CreateView
from django.contrib import messages
from datetimewidget.widgets import DateTimeWidget
from .models import Appointment
class AppointmentCreateMixin(object):
fields = ('patient_name',
'patient_middlename',
'patient_surname',
'doctor',
'appointment_time')
@property
def success_msg(self):
return NonImplemented
def form_valid(self, form):
messages.success(self.request, self.success_msg)
return super(AppointmentCreateMixin, self).form_valid(form)
class AppointmentCreateView(AppointmentCreateMixin, CreateView):
model = Appointment
fields = ('patient_name',
'patient_middlename',
'patient_surname',
'doctor',
'appointment_time')
success_msg = "Спасибо! Вы записались на прием."
def get_form(self, form_class):
form = super(AppointmentCreateView, self).get_form()
dateTimeOptions = {
'weekStart': '1',
'format': 'dd/mm/yyyy HH',
'daysOfWeekDisabled': "'0,6'",
'minuteStep': '60',
}
form.fields['appointment_time'].widget = DateTimeWidget(
options=dateTimeOptions, usel10n=True, bootstrap_version=3)
return form
def form_valid(self, form):
import datetime
start_date = form.cleaned_data['appointment_time']
end_date = form.cleaned_data['appointment_time'] + \
datetime.timedelta(hours=1)
if not datetime.time(9, 00) \
<= start_date.time() < datetime.time(18, 00):
form.add_error('appointment_time', 'Часы приема — 09:00-18:00')
return self.form_invalid(form)
if start_date.weekday() == 5 or start_date.weekday() == 6:
form.add_error('appointment_time', 'Дни приема — ПН-ПТ')
return self.form_invalid(form)
if Appointment.objects.filter(appointment_time__range=(start_date,
end_date)):
form.add_error('appointment_time', 'К сожалению, время занято!')
return self.form_invalid(form)
return super(AppointmentCreateView, self).form_valid(form)
template_name = 'appointments/appointment_form.html'
| {
"content_hash": "ff4f6c5ea4b8eb8db0cc9ccee33ab373",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 76,
"avg_line_length": 34.970588235294116,
"alnum_prop": 0.6021867115222876,
"repo_name": "thefivekey/django-doctor-appointment",
"id": "8013a49048f96dc22ab675d307f02a2fc054a26e",
"size": "2452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polyclinic/appointments/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "686"
},
{
"name": "Python",
"bytes": "11149"
},
{
"name": "Shell",
"bytes": "568"
}
],
"symlink_target": ""
} |
import time
from pokemongo_bot import logger
from pokemongo_bot.human_behaviour import (normalized_reticle_size, sleep,
spin_modifier)
class PokemonCatchWorker(object):
BAG_FULL = 'bag_full'
NO_POKEBALLS = 'no_pokeballs'
def __init__(self, pokemon, bot):
self.pokemon = pokemon
self.api = bot.api
self.bot = bot
self.position = bot.position
self.config = bot.config
self.pokemon_list = bot.pokemon_list
self.item_list = bot.item_list
self.inventory = bot.inventory
self.spawn_point_guid = ''
self.response_key = ''
self.response_status_key = ''
def work(self):
encounter_id = self.pokemon['encounter_id']
response_dict = self.create_encounter_api_call()
if response_dict and 'responses' in response_dict:
if self.response_key in response_dict['responses']:
if self.response_status_key in response_dict['responses'][self.response_key]:
if response_dict['responses'][self.response_key][self.response_status_key] is 7:
if self.config.release_pokemon:
raise RuntimeError('Pokemon Bag is full!')
if response_dict['responses'][self.response_key][self.response_status_key] is 1:
cp = 0
if 'wild_pokemon' in response_dict['responses'][self.response_key] or 'pokemon_data' in \
response_dict['responses'][self.response_key]:
if self.response_key == 'ENCOUNTER':
pokemon = response_dict['responses'][self.response_key]['wild_pokemon']
else:
pokemon = response_dict['responses'][self.response_key]
catch_rate = response_dict['responses'][self.response_key]['capture_probability'][
'capture_probability'] # 0 = pokeballs, 1 great balls, 3 ultra balls
if 'pokemon_data' in pokemon and 'cp' in pokemon['pokemon_data']:
pokemon_data = pokemon['pokemon_data']
cp = pokemon_data['cp']
individual_attack = pokemon_data.get("individual_attack", 0)
individual_stamina = pokemon_data.get("individual_stamina", 0)
individual_defense = pokemon_data.get("individual_defense", 0)
iv_display = '{}/{}/{}'.format(
individual_stamina,
individual_attack,
individual_defense
)
pokemon_potential = self.pokemon_potential(pokemon_data)
pokemon_num = int(pokemon_data['pokemon_id']) - 1
pokemon_name = self.pokemon_list[int(pokemon_num)]['Name']
logger.log('A Wild {} appeared! [CP {}] [Potential {}]'.format(
pokemon_name, cp, pokemon_potential), 'yellow')
logger.log('IV [Stamina/Attack/Defense] = [{}]'.format(iv_display))
pokemon_data['name'] = pokemon_name
# Simulate app
sleep(3)
if not self.should_capture_pokemon(pokemon_name, cp, pokemon_potential, response_dict):
# logger.log('[x] Rule prevents capture.')
return False
items_stock = self.bot.current_inventory()
while True:
# pick the most simple ball from stock
pokeball = 1 # start from 1 - PokeBalls
current_type = pokeball
# if this type's stock = 0 and not top tier yet
while items_stock[current_type] is 0 and current_type < 3:
# progress to next tier
current_type += 1
# next tier's stock > 0
if items_stock[current_type] > 0:
pokeball = current_type
# re-check stock again
if items_stock[pokeball] is 0:
logger.log('Out of pokeballs', 'red')
return PokemonCatchWorker.NO_POKEBALLS
# Use berry to increase success chance.
berry_id = 701 # @ TODO: use better berries if possible
berries_count = self.bot.item_inventory_count(berry_id)
if catch_rate[pokeball-1] < 0.5 and berries_count > 0: # and berry is in stock
success_percentage = '{0:.2f}'.format(catch_rate[pokeball-1]*100)
logger.log(
'Catch Rate with normal Pokeball is low ({}%). '
'Throwing {}... ({} left!)'.format(
success_percentage,
self.item_list[str(berry_id)],berries_count-1
)
)
if items_stock[pokeball] is 0:
break
self.api.use_item_capture(
item_id=berry_id,
encounter_id=encounter_id,
spawn_point_id=self.spawn_point_guid
)
response_dict = self.api.call()
if response_dict and response_dict['status_code'] is 1 and 'item_capture_mult' in \
response_dict['responses']['USE_ITEM_CAPTURE']:
for i in range(len(catch_rate)):
if 'item_capture_mult' in response_dict['responses']['USE_ITEM_CAPTURE']:
catch_rate[i] = catch_rate[i] * \
response_dict['responses']['USE_ITEM_CAPTURE'][
'item_capture_mult']
success_percentage = '{0:.2f}'.format(catch_rate[pokeball - 1] * 100)
logger.log('Catch Rate with normal Pokeball has increased to {}%'.format(
success_percentage))
else:
if response_dict['status_code'] is 1:
logger.log('Fail to use berry. Seem like you are softbanned.', 'red')
self.bot.softban = True
else:
logger.log(
'Fail to use berry. Status Code: {}'.format(response_dict['status_code']),
'red')
# change ball to next tier if catch rate is too low
current_type = pokeball
while current_type < 3:
current_type += 1
if catch_rate[pokeball - 1] < 0.35 and items_stock[current_type] > 0:
# if current ball chance to catch is under 35%,
# and player has better ball - then use it
pokeball = current_type # use better ball
# @TODO, use the best ball in stock to catch VIP (Very Important Pokemon: Configurable)
items_stock[pokeball] -= 1
success_percentage = '{0:.2f}'.format(catch_rate[pokeball - 1] * 100)
logger.log('Using {} (chance: {}%)... ({} left!)'.format(
self.item_list[str(pokeball)],
success_percentage,
items_stock[pokeball]
))
id_list1 = self.count_pokemon_inventory()
reticle_size_parameter = normalized_reticle_size(self.config.catch_randomize_reticle_factor)
spin_modifier_parameter = spin_modifier(self.config.catch_randomize_spin_factor)
self.api.catch_pokemon(encounter_id=encounter_id,
pokeball=pokeball,
normalized_reticle_size=reticle_size_parameter,
spawn_point_id=self.spawn_point_guid,
hit_pokemon=1,
spin_modifier=spin_modifier_parameter,
NormalizedHitPosition=1)
response_dict = self.api.call()
if response_dict and \
'responses' in response_dict and \
'CATCH_POKEMON' in response_dict['responses'] and \
'status' in response_dict['responses']['CATCH_POKEMON']:
status = response_dict['responses'][
'CATCH_POKEMON']['status']
if status is 2:
logger.log(
'[-] Attempted to capture {} - failed.. trying again!'.format(pokemon_name),
'red')
sleep(2)
continue
if status is 3:
logger.log(
'Oh no! {} vanished! :('.format(pokemon_name), 'red')
if success_percentage == 100:
self.softban = True
if status is 1:
self.bot.metrics.captured_pokemon(pokemon_name, cp, iv_display, pokemon_potential)
logger.log('Captured {}! [CP {}] [Potential {}] [{}] [+{} exp]'.format(
pokemon_name,
cp,
pokemon_potential,
iv_display,
sum(response_dict['responses']['CATCH_POKEMON']['capture_award']['xp'])
), 'blue')
self.bot.softban = False
if (self.config.evolve_captured
and (self.config.evolve_captured[0] == 'all'
or pokemon_name in self.config.evolve_captured)):
id_list2 = self.count_pokemon_inventory()
# No need to capture this even for metrics, player stats includes it.
pokemon_to_transfer = list(set(id_list2) - set(id_list1))
# TODO dont throw RuntimeError, do something better
if len(pokemon_to_transfer) == 0:
raise RuntimeError(
'Trying to evolve 0 pokemons!')
self.api.evolve_pokemon(pokemon_id=pokemon_to_transfer[0])
response_dict = self.api.call()
status = response_dict['responses']['EVOLVE_POKEMON']['result']
if status == 1:
logger.log(
'{} has been evolved!'.format(pokemon_name), 'green')
else:
logger.log(
'Failed to evolve {}!'.format(pokemon_name))
break
time.sleep(5)
def count_pokemon_inventory(self):
# don't use cached bot.get_inventory() here
# because we need to have actual information in capture logic
self.api.get_inventory()
response_dict = self.api.call()
id_list = []
callback = lambda pokemon: id_list.append(pokemon['id'])
self._foreach_pokemon_in_inventory(response_dict, callback)
return id_list
def _foreach_pokemon_in_inventory(self, response_dict, callback):
try:
reduce(dict.__getitem__, [
"responses", "GET_INVENTORY", "inventory_delta", "inventory_items"], response_dict)
except KeyError:
pass
else:
for item in response_dict['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']:
try:
reduce(dict.__getitem__, [
"inventory_item_data", "pokemon_data"], item)
except KeyError:
pass
else:
pokemon = item['inventory_item_data']['pokemon_data']
if not pokemon.get('is_egg', False):
callback(pokemon)
def pokemon_potential(self, pokemon_data):
total_iv = 0
iv_stats = ['individual_attack', 'individual_defense', 'individual_stamina']
for individual_stat in iv_stats:
try:
total_iv += pokemon_data[individual_stat]
except:
pokemon_data[individual_stat] = 0
continue
return round((total_iv / 45.0), 2)
def should_capture_pokemon(self, pokemon_name, cp, iv, response_dict):
catch_config = self._get_catch_config_for(pokemon_name)
cp_iv_logic = catch_config.get('logic')
if not cp_iv_logic:
cp_iv_logic = self._get_catch_config_for('any').get('logic', 'and')
catch_results = {
'cp': False,
'iv': False,
}
if catch_config.get('never_catch', False):
return False
if catch_config.get('always_catch', False):
return True
catch_cp = catch_config.get('catch_above_cp', 0)
if cp > catch_cp:
catch_results['cp'] = True
catch_iv = catch_config.get('catch_above_iv', 0)
if iv > catch_iv:
catch_results['iv'] = True
logic_to_function = {
'or': lambda x, y: x or y,
'and': lambda x, y: x and y
}
# logger.log(
# "Catch config for {}: CP {} {} IV {}".format(
# pokemon_name,
# catch_cp,
# cp_iv_logic,
# catch_iv
# ), 'yellow'
# )
return logic_to_function[cp_iv_logic](*catch_results.values())
def _get_catch_config_for(self, pokemon):
catch_config = self.config.catch.get(pokemon)
if not catch_config:
catch_config = self.config.catch.get('any')
return catch_config
def create_encounter_api_call(self):
encounter_id = self.pokemon['encounter_id']
player_latitude = self.pokemon['latitude']
player_longitude = self.pokemon['longitude']
if 'spawn_point_id' in self.pokemon:
spawn_point_id = self.pokemon['spawn_point_id']
self.spawn_point_guid = spawn_point_id
self.response_key = 'ENCOUNTER'
self.response_status_key = 'status'
self.api.encounter(encounter_id=encounter_id, spawn_point_id=spawn_point_id,
player_latitude=player_latitude, player_longitude=player_longitude)
else:
fort_id = self.pokemon['fort_id']
self.spawn_point_guid = fort_id
self.response_key = 'DISK_ENCOUNTER'
self.response_status_key = 'result'
self.api.disk_encounter(encounter_id=encounter_id, fort_id=fort_id,
player_latitude=player_latitude, player_longitude=player_longitude)
return self.api.call()
| {
"content_hash": "5bd8b93dd51e06ee7d5a535d6d12d662",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 120,
"avg_line_length": 51.52252252252252,
"alnum_prop": 0.43270968117969344,
"repo_name": "codybaldwin/PokemonGo-Bot",
"id": "b2f57b503217f310b85e86710a470d38927b5d01",
"size": "17182",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pokemongo_bot/cell_workers/pokemon_catch_worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Protocol Buffer",
"bytes": "43769"
},
{
"name": "Python",
"bytes": "141173"
},
{
"name": "Shell",
"bytes": "778"
}
],
"symlink_target": ""
} |
import itertools
from typing import Union, Sequence, Optional
import numpy as np
_RealArraylike = Union[np.ndarray, float]
def _single_qubit_unitary(
theta: _RealArraylike, phi_d: _RealArraylike, phi_o: _RealArraylike
) -> np.ndarray:
"""Single qubit unitary matrix.
Args:
theta: cos(theta) is magnitude of 00 matrix element. May be a scalar
or real ndarray (for broadcasting).
phi_d: exp(i phi_d) is the phase of 00 matrix element. May be a scalar
or real ndarray (for broadcasting).
phi_o: i exp(i phi_o) is the phase of 10 matrix element. May be a scalar
or real ndarray (for broadcasting).
Notes:
The output is vectorized with respect to the angles. I.e, if the angles
are (broadcastable) arraylike objects whose sum would have shape (...),
the output is an array of shape (...,2,2), where the final two indices
correspond to unitary matrices.
"""
U00 = np.cos(theta) * np.exp(1j * np.asarray(phi_d))
U10 = 1j * np.sin(theta) * np.exp(1j * np.asarray(phi_o))
# This implementation is agnostic to the shapes of the angles, as long
# as they can be broadcast together.
Udiag = np.array([[U00, np.zeros_like(U00)], [np.zeros_like(U00), U00.conj()]])
Udiag = np.moveaxis(Udiag, [0, 1], [-2, -1])
Uoff = np.array([[np.zeros_like(U10), -U10.conj()], [U10, np.zeros_like(U10)]])
Uoff = np.moveaxis(Uoff, [0, 1], [-2, -1])
return Udiag + Uoff
def random_qubit_unitary(
shape: Sequence[int] = (),
randomize_global_phase: bool = False,
rng: Optional[np.random.RandomState] = None,
) -> np.ndarray:
"""Random qubit unitary distributed over the Haar measure.
The implementation is vectorized for speed.
Args:
shape: The broadcasted shape of the output. This is used to generate
a tensor of random unitaries with dimensions tuple(shape) + (2,2).
randomize_global_phase: (Default False) If True, a global phase is also
sampled randomly. This corresponds to sampling over U(2) instead of
SU(2).
rng: Random number generator to be used in sampling. Default is
numpy.random.
"""
rng = np.random if rng is None else rng
theta = np.arcsin(np.sqrt(rng.rand(*shape)))
phi_d = rng.rand(*shape) * np.pi * 2
phi_o = rng.rand(*shape) * np.pi * 2
out = _single_qubit_unitary(theta, phi_d, phi_o)
if randomize_global_phase:
out = np.moveaxis(out, (-2, -1), (0, 1))
out *= np.exp(1j * np.pi * 2 * rng.rand(*shape))
out = np.moveaxis(out, (0, 1), (-2, -1))
return out
def vector_kron(first: np.ndarray, second: np.ndarray) -> np.ndarray:
"""Vectorized implementation of kron for square matrices."""
s_0, s_1 = first.shape[-2:], second.shape[-2:]
assert s_0[0] == s_0[1]
assert s_1[0] == s_1[1]
out = np.einsum('...ab,...cd->...acbd', first, second)
s_v = out.shape[:-4]
return out.reshape(s_v + (s_0[0] * s_1[0],) * 2)
# Encode all possible local operations that produce equivalent KAK vectors
# and which can also be detected by the entanglement fidelity function
# These operations can be decomposed as s_x^a s_y^b s_z^c n_j p, where
# s_j denotes a pi/2 shift in index j (a,b,c are 0 or 1), n_j is a pi rotation
# about the j axis, and p is a permutation of the three indices.
# all permutations of (1,2,3)
_perms_123 = np.zeros((6, 3, 3), int)
for ind, perm in enumerate(itertools.permutations((0, 1, 2))):
_perms_123[ind, (0, 1, 2), perm] = 1
_negations = np.zeros((4, 3, 3), int)
_negations[0, (0, 1, 2), (0, 1, 2)] = 1
_negations[1, (0, 1, 2), (0, 1, 2)] = (1, -1, -1)
_negations[2, (0, 1, 2), (0, 1, 2)] = (-1, 1, -1)
_negations[3, (0, 1, 2), (0, 1, 2)] = (-1, -1, 1)
_offsets = np.zeros((8, 3))
_offsets[1, 0] = np.pi / 2
_offsets[2, 1] = np.pi / 2
_offsets[3, 2] = np.pi / 2
_offsets[4, (1, 2)] = np.pi / 2
_offsets[5, (0, 2)] = np.pi / 2
_offsets[6, (0, 1)] = np.pi / 2
_offsets[7, (0, 1, 2)] = np.pi / 2
def _kak_equivalent_vectors(kak_vec) -> np.ndarray:
"""Generates all KAK vectors equivalent under single qubit unitaries."""
# Technically this is not all equivalent vectors, but a subset of vectors
# which are not guaranteed to give the same answer under the infidelity
# formula.
kak_vec = np.asarray(kak_vec, dtype=float)
# Apply all permutations, then all negations, then all shifts.
out = np.einsum('pab,...b->...pa', _perms_123, kak_vec) # (...,6,3)
out = np.einsum('nab,...b->...na', _negations, out) # (...,6,4,3)
# (...,8,6,4,3)
out = out[..., np.newaxis, :, :, :] + _offsets[:, np.newaxis, np.newaxis, :]
# Merge indices
return np.reshape(out, out.shape[:-4] + (192, 3))
def kak_vector_infidelity(
k_vec_a: np.ndarray, k_vec_b: np.ndarray, ignore_equivalent_vectors: bool = False
) -> np.ndarray:
r"""The locally invariant infidelity between two KAK vectors.
This is the quantity
$$
\min 1 - F_e( \exp(i k_a · (XX,YY,ZZ)) kL \exp(i k_b · (XX,YY,ZZ)) kR)
$$
where $F_e$ is the entanglement (process) fidelity and the minimum is taken
over all 1-local unitaries kL, kR.
Args:
k_vec_a: A 3-vector or tensor of 3-vectors with shape (...,3).
k_vec_b: A 3-vector or tensor of 3-vectors with shape (...,3). If both
k_vec_a and k_vec_b are tensors, their shapes must be compatible
for broadcasting.
ignore_equivalent_vectors: If True, the calculation ignores any other
KAK vectors that are equivalent to the inputs under local unitaries.
The resulting infidelity is then only an upper bound to the true
infidelity.
Returns:
An ndarray storing the locally invariant infidelity between the inputs.
If k_vec_a or k_vec_b is a tensor, the result is vectorized.
"""
k_vec_a, k_vec_b = np.asarray(k_vec_a), np.asarray(k_vec_b)
if ignore_equivalent_vectors:
k_diff = k_vec_a - k_vec_b
out = 1 - np.product(np.cos(k_diff), axis=-1) ** 2
out -= np.product(np.sin(k_diff), axis=-1) ** 2
return out
# We must take the minimum infidelity over all possible locally equivalent
# and nontrivial KAK vectors. We need only consider equivalent vectors
# of one input.
# Ensure we consider equivalent vectors for only the smallest input.
if k_vec_a.size < k_vec_b.size:
k_vec_a, k_vec_b = k_vec_b, k_vec_a # coverage: ignore
k_vec_a = k_vec_a[..., np.newaxis, :] # (...,1,3)
k_vec_b = _kak_equivalent_vectors(k_vec_b) # (...,192,3)
k_diff = k_vec_a - k_vec_b
out = 1 - np.product(np.cos(k_diff), axis=-1) ** 2
out -= np.product(np.sin(k_diff), axis=-1) ** 2 # (...,192)
return out.min(axis=-1)
def in_weyl_chamber(kak_vec: np.ndarray) -> np.ndarray:
"""Whether a given collection of coordinates is within the Weyl chamber.
Args:
kak_vec: A numpy.ndarray tensor encoding a KAK 3-vector. Input may be
broadcastable with shape (...,3).
Returns:
np.ndarray of boolean values denoting whether the given coordinates
are in the Weyl chamber.
"""
kak_vec = np.asarray(kak_vec)
assert kak_vec.shape[-1] == 3, 'Last index of input must represent a 3-vector.'
# For convenience
xp, yp, zp = kak_vec[..., 0], kak_vec[..., 1], kak_vec[..., 2]
pi_4 = np.pi / 4
x_inside = np.logical_and(0 <= xp, xp <= pi_4)
y_inside = np.logical_and(0 <= yp, yp <= pi_4)
y_inside = np.logical_and(y_inside, xp >= yp)
z_inside = np.abs(zp) <= yp
return np.logical_and.reduce((x_inside, y_inside, z_inside))
def weyl_chamber_mesh(spacing: float) -> np.ndarray:
"""Cubic mesh of points in the Weyl chamber.
Args:
spacing: Euclidean distance between neighboring KAK vectors.
Returns:
np.ndarray of shape (N,3) corresponding to the points in the Weyl
chamber.
"""
if spacing < 1e-3: # memory required ~ 1 GB
raise ValueError(f'Generating a mesh with spacing {spacing} may cause system to crash.')
# Uniform mesh
disps = np.arange(-np.pi / 4, np.pi / 4, step=spacing)
mesh_points = np.array([a.ravel() for a in np.array(np.meshgrid(*(disps,) * 3))])
mesh_points = np.moveaxis(mesh_points, 0, -1)
# Reduce to points within Weyl chamber
return mesh_points[in_weyl_chamber(mesh_points)]
_XX = np.zeros((4, 4))
_XX[(0, 1, 2, 3), (3, 2, 1, 0)] = 1
_ZZ = np.diag([1, -1, -1, 1])
_YY = -_XX @ _ZZ
_kak_gens = np.array([_XX, _YY, _ZZ])
def kak_vector_to_unitary(vector: np.ndarray) -> np.ndarray:
r"""Convert a KAK vector to its unitary matrix equivalent.
Args:
vector: A KAK vector shape (..., 3). (Input may be vectorized).
Returns:
unitary: Corresponding 2-qubit unitary, of the form
$exp( i k_x \sigma_x \sigma_x + i k_y \sigma_y \sigma_y
+ i k_z \sigma_z \sigma_z)$.
matrix or tensor of matrices of shape (..., 4,4).
"""
vector = np.asarray(vector)
gens = np.einsum('...a,abc->...bc', vector, _kak_gens)
evals, evecs = np.linalg.eigh(gens)
return np.einsum('...ab,...b,...cb', evecs, np.exp(1j * evals), evecs.conj())
def unitary_entanglement_fidelity(U_actual: np.ndarray, U_ideal: np.ndarray) -> np.ndarray:
r"""Entanglement fidelity between two unitaries.
For unitary matrices, this is related to the average unitary fidelity F
as
:math:`F = \frac{F_e d + 1}{d + 1}`
where d is the matrix dimension.
Args:
U_actual : Matrix whose fidelity to U_ideal will be computed. This may
be a non-unitary matrix, i.e. the projection of a larger unitary
matrix into the computational subspace.
U_ideal : Unitary matrix to which U_actual will be compared.
Both arguments may be vectorized, in that their shapes may be of the form
(...,M,M) (as long as both shapes can be broadcast together).
Returns:
The entanglement fidelity between the two unitaries. For inputs with
shape (...,M,M), the output has shape (...).
"""
U_actual = np.asarray(U_actual)
U_ideal = np.asarray(U_ideal)
assert (
U_actual.shape[-1] == U_actual.shape[-2]
), "Inputs' trailing dimensions must be equal (square)."
dim = U_ideal.shape[-1]
prod_trace = np.einsum('...ba,...ba->...', U_actual.conj(), U_ideal)
return np.real((np.abs(prod_trace)) / dim) ** 2
| {
"content_hash": "15bb489f888f3cb874fc8798142eccfa",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 96,
"avg_line_length": 35.48148148148148,
"alnum_prop": 0.6113114442968305,
"repo_name": "balopat/Cirq",
"id": "d4cd6904187816aadcf50036e362c017134280b4",
"size": "10540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cirq-google/cirq_google/optimizers/two_qubit_gates/math_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "5923"
},
{
"name": "HTML",
"bytes": "262"
},
{
"name": "Jupyter Notebook",
"bytes": "23905"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "6256825"
},
{
"name": "Shell",
"bytes": "50383"
},
{
"name": "Starlark",
"bytes": "5979"
}
],
"symlink_target": ""
} |
from mock import patch
from _common import unittest
from helper import TestHelper
from beets.library import Item
class KeyFinderTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.load_plugins('keyfinder')
self.patcher = patch('beets.util.command_output')
self.command_output = self.patcher.start()
def tearDown(self):
self.teardown_beets()
self.unload_plugins()
self.patcher.stop()
def test_add_key(self):
item = Item(path='/file')
item.add(self.lib)
self.command_output.return_value = 'dbm'
self.run_command('keyfinder')
item.load()
self.assertEqual(item['initial_key'], 'C#m')
self.command_output.assert_called_with(
['KeyFinder', '-f', item.path])
def test_add_key_on_import(self):
self.command_output.return_value = 'dbm'
importer = self.create_importer()
importer.run()
item = self.lib.items().get()
self.assertEqual(item['initial_key'], 'C#m')
def test_force_overwrite(self):
self.config['keyfinder']['overwrite'] = True
item = Item(path='/file', initial_key='F')
item.add(self.lib)
self.command_output.return_value = 'C#m'
self.run_command('keyfinder')
item.load()
self.assertEqual(item['initial_key'], 'C#m')
def test_do_not_overwrite(self):
item = Item(path='/file', initial_key='F')
item.add(self.lib)
self.command_output.return_value = 'dbm'
self.run_command('keyfinder')
item.load()
self.assertEqual(item['initial_key'], 'F')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| {
"content_hash": "a8e25174fed162db0d6c873bfbe85a28",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 60,
"avg_line_length": 26.720588235294116,
"alnum_prop": 0.6037424325811778,
"repo_name": "bj-yinyan/beets",
"id": "5795002dcde42ddf04c705aec80d43ee7a4abc43",
"size": "2465",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_keyfinder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "404323"
},
{
"name": "JavaScript",
"bytes": "85858"
},
{
"name": "Python",
"bytes": "1278854"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
} |
import collections
import sys
def main():
lines = [[int(x) for x in line.split()] for line in sys.stdin]
[n, m], a, b, c = lines
def one():
return 1
# http://codereview.stackexchange.com/questions/62956/performance-in-hackerrank-challenge-sherlock-and-queries
factors = collections.defaultdict(one)
for i in range(0, m):
factors[b[i]] = factors[b[i]] * c[i] % 1000000007
for i, factor in factors.iteritems():
for idx in xrange(i-1, n, i):
a[idx] = a[idx] * factor % 1000000007
print ' '.join(map(str, a))
if __name__ == "__main__":
main() | {
"content_hash": "7a937c46ba8954d701558a3464479dd6",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 114,
"avg_line_length": 26.782608695652176,
"alnum_prop": 0.5941558441558441,
"repo_name": "Dobiasd/HackerRank-solutions",
"id": "21c90500200507d79457e62e5b041ea1ef143457",
"size": "616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Algorithms/Warmup/Sherlock_and_Queries/Main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1108"
},
{
"name": "Haskell",
"bytes": "20640"
},
{
"name": "Python",
"bytes": "616"
},
{
"name": "Shell",
"bytes": "375"
}
],
"symlink_target": ""
} |
from mock import patch, Mock, sentinel
from nose.tools import raises
from tests import FlexGetBase
from flexget.task import Task
from flexget.plugins.output import notify_sns
class TestNotifySNS(object):
@patch('boto3.Session')
def test_emitter_build_session_from_empty_config(self, Session):
e = notify_sns.SNSNotificationEmitter({'aws_region': 'test'})
e.build_session()
assert e.session is Session.return_value
Session.assert_called_once_with(
region_name='test',
aws_access_key_id=None,
aws_secret_access_key=None,
profile_name=None,
)
@patch('boto3.Session')
def test_emitter_uses_config_credentials(self, Session):
e = notify_sns.SNSNotificationEmitter({
'aws_region': None,
'aws_access_key_id': 'DUMMY',
'aws_secret_access_key': 'DUMMYKEY',
'profile_name': 'profile-name',
})
e.build_session()
Session.assert_called_once_with(
region_name=None,
aws_access_key_id='DUMMY',
aws_secret_access_key='DUMMYKEY',
profile_name='profile-name',
)
@patch('flexget.plugins.output.notify_sns.SNSNotificationEmitter.get_topic')
def test_dry_run_does_not_send_message(self, get_topic):
topic = get_topic.return_value
manager = Mock()
manager.config = {'tasks': {}}
task = Mock(wraps=Task(manager, 'fake'))
task.options.test = True
event = Mock()
task.accepted = [event]
e = notify_sns.SNSNotificationEmitter({'aws_region': 'test', 'sns_topic_arn': 'arn'})
e.send_notifications(task)
event.render.assert_called_once_with(notify_sns.DEFAULT_TEMPLATE_VALUE)
assert not topic.publish.called
@patch('flexget.plugins.output.notify_sns.SNSNotificationEmitter.get_topic')
def test_send_message_for_Event(self, get_topic):
topic = get_topic.return_value
manager = Mock()
manager.config = {'tasks': {}}
task = Mock(wraps=Task(manager, 'fake'))
task.options.test = False
event = Mock()
task.accepted = [event]
e = notify_sns.SNSNotificationEmitter({'aws_region': 'test', 'sns_topic_arn': 'arn'})
e.send_notifications(task)
topic.publish.assert_called_once_with(Message=event.render.return_value)
| {
"content_hash": "7089ac10f5a0c184bdf372882a9df94c",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 93,
"avg_line_length": 36.515151515151516,
"alnum_prop": 0.6203319502074689,
"repo_name": "tsnoam/Flexget",
"id": "1bc26c3bc326b46c794040098f81302426bcac5f",
"size": "2410",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "tests/test_plugin_output_notify_sns.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4567"
},
{
"name": "HTML",
"bytes": "20672"
},
{
"name": "JavaScript",
"bytes": "36021"
},
{
"name": "Python",
"bytes": "2330178"
}
],
"symlink_target": ""
} |
__author__ = 'pc'
#a small server
import socket
s = socket.socket()
host = s.getpeername
port = 1234
s.bind(host, sort)
s.listen(5)
while True:
c, addr = s.accept()
print 'Got connection from ', addr
c.send('Thank you for connecting')
c.close()
import socket
s = socket.socket()
host = socket.gethostname()
port = 1234
s.connect(host, port)
print s.recv(1024)
from SocketServer import TCPServer, StreamRequestHandler
class Handler(StreamRequestHandler):
def handle(self):
addr = self.request.getpeername()
print 'Got connection from :', addr
self.wfile.write('Thank you for connecting')
server = TCPServer(('', 1234), Handler)
server.serve_forever()
from SocketServer import TCPServer, ThreadingMixIn, StreamRequestHandler
class Sever(ThreadingMixIn, TCPServer): pass
class Handle(StreamRequestHandler):
def handle(self):
addr = self.request.getpeername()
print 'Got connection from :', addr
self.wfile.write('Thank you for connecting')
server = Sever((''. 1234), Handler)
server.serve_forever( )
import socket, select
s = socket.socket()
host = socket.gethostname()
port = 1234
s.bind((host, port))
s.listen(5)
inputs=[s]
while True:
rs, ws, es = select.select(inputs, [], [])
for r in rs:
if r in s :
c, addr = s.accept()
print 'Got connection from ', addr
inputs.append(c)
else:
try:
data = r.recv(1024)
disconnected = not data
except socket.error:
disconnected = True
if disconnected:
print r.getpeername(), 'disconnected'
else:
print data
import socket, select
s = socket.socket()
host = socket.gethostname()
port = 1234
s.bind((host. port))
fdmap = {s.fileno(): s}
s.listen(5)
p = select.poll()
p.register(s)
while True:
events = p.poll()
for fd, event in events:
if fd = s.fileno():
c, addr = s.accept()
print 'Got connection from', addr
p.register(c)
fdmap[c.s.fileno()] = c
elif event & select.POllIN:
data = fdmap[fd].recv(1024)
if not data:
print fdmap[fd].getpeername, 'disconnected'
p.unregister[fd]
del fdmap[fd]
else:
print data | {
"content_hash": "172cc90e4fadeaabfd250e51186b5517",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 72,
"avg_line_length": 20.74137931034483,
"alnum_prop": 0.587281795511222,
"repo_name": "hisuley/sxj",
"id": "508a34ae8d666a179e5fae8d3e8e2f3060fb9e28",
"size": "2406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "practice/test11.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7449"
},
{
"name": "Python",
"bytes": "26786"
}
],
"symlink_target": ""
} |
"""
Created on Sat Aug 15 12:55:25 2015
@author: Ben
"""
# def write_IO(self):
# """write IO_table for all supply subsectors and solves for final demand"""
# #creates empty IO table with all subsectors
# IO_table = pd.DataFrame(0,index = self.subsectors.keys(),
# columns = self.subsectors.keys(),dtype=float)
# #writes IO table flows from database
# for column in self.subsectors.keys():
# for index in self.subsectors.keys():
# flow = util.sql_read_table('EnergyFlows', 'efficiency',subsector_ID=column, input_ID=index)
# if flow == []:
# pass
# else:
# IO_table.set_value(index,column,
# util.sql_read_table('EnergyFlows', 'efficiency',
# subsector_ID=column, input_ID=index))
# IO_array = np.matrix(IO_table.values)
# identity_matrix = np.matrix(np.eye(len(IO_array), len(IO_array)))
# #creates demand dataframe and sets it from database
# demand = pd.DataFrame(0,index=self.subsectors.keys(),columns=['value'],dtype=float)
# for s in self.subsectors.keys():
# demand.set_value(s,['value'],util.sql_read_table('Subsectors', 'demand',ID=s))
# #solves IO matrix and stores it in dataframe
# total_demand = pd.DataFrame(np.linalg.solve(identity_matrix - IO_array, demand),
# index=self.subsectors.keys(),columns=['value'])
# #sets supply attributes of IO_table and demand
# setattr(self,'IO_table',IO_table)
# setattr(self,'demand',total_demand)
#
# def link_subsector_input_shapes(self):
# """calculate shapes of subsector inputs and outputs"""
# #loop through all subsectors
# for ID in [i for i in self.subsectors.keys() if self.subsectors[i].input_shape == True]:
# #checks if the subsector has an endogenous input shape (i.e. load)
# #sets the initial list of subsectors to solve as ID
# subs = [ID]
# #while there is still a subsector to be solved that subsector has input subsectors
# while len(subs) and len(self.subsectors[ID].input_subsectors):
# for ID in subs:
# #loop though the subsector's input_subsectors
# for next_ID in self.subsectors[ID].input_subsectors:
# if self.subsectors[next_ID].input_shape!=True:
# #if the input_subsector doesn't have an endogenous input_shape then
# #we have to solve for the share of the active output subsector that is contributed to the input subsector shape
# #ex. how much of distribution delivery demand is fed by the transmission delivery subsector
# flow = self.subsectors[ID].input_shape_data * self.IO_table.loc[next_ID,ID]/self.IO_table[ID].sum()*self.IO_table.loc[next_ID,ID]
# self.subsectors[next_ID].input_shape_data += flow
# #remove the current subsector from the input subsector's list of unsolved output_subsectors
# self.subsectors[next_ID].unsolved_output_subsectors.remove(ID)
# else:
# #remove the current subsector from the input subsector's list of unsolved output_subsectors
# self.subsectors[next_ID].unsolved_output_subsectors.remove(ID)
# #if the subsector has an endogenous input_shape then skip it
# pass
# continuation_subs = []
# #if the subsector has remaining unsolved_output_subsectors, we have to break the loop
# #by not adding the next_id to the continuation_subs list
# if len(self.subsectors[next_ID].unsolved_output_subsectors):
# pass
# else:
# continuation_subs.append(next_ID)
# #subs is replaced with the list of continuation_subs, which continues movement through the matrix
# subs = continuation_subs
# #checks if the subsector has an endogenous output shape (i.e. solar pv gen shape)
#
# def link_subsector_output_shapes(self):
# for ID in self.subsectors.keys():
# if self.subsectors[ID].output_shape == True:
# #sets the initial list of subsectors to solve as ID
# subs = [ID]
# continuation_subs = []
# #while there is still a subsector to be solved that has output subsectors
# while len(subs) and len(self.subsectors[ID].output_subsectors):
# for ID in subs:
# #loop though the subsector's output_subsectors
# for next_ID in self.subsectors[ID].output_subsectors:
# if self.subsectors[next_ID].output_shape!=True:
# #if the output_subsector doesn't have an endogenous output_shape then
# #we have to solve for the share of the active input subsector that is contributed to the output subsector shape
# #ex. how much of transmission delivery supply is fed to the distribution delivery subsector
# output_share =(self.subsectors[ID].output_shape_data/self.subsectors[ID].output_shape_data.sum())
# output_share.fillna(0,inplace=True)
# flow = (self.subsectors[next_ID].demand * self.IO_table.loc[ID,next_ID]
# * output_share)
# self.subsectors[next_ID].output_shape_data += flow
# #remove the current input subsector from the output subsector's list of unsolved input_subsectors
# self.subsectors[next_ID].unsolved_input_subsectors.remove(ID)
# else:
# #remove the current input subsector from the output subsector's list of unsolved input_subsectors
# self.subsectors[next_ID].unsolved_input_subsectors.remove(ID)
# #if the subsector has an endogenous output_shape then skip it
# pass
# #if the subsector has remaining unsolved_input_subsectors, we have to break the loop
# #by not adding the next_id to the continuation_subs list
# if len(self.subsectors[next_ID].unsolved_input_subsectors):
# pass
# else:
# continuation_subs.append(next_ID)
# subs = continuation_subs
#
#
# def add_subsector_shapes(self):
# for ID in self.subsectors.keys():
# self.add_subsector_shape(ID)
#
# def add_subsector_shape(self,ID):
# subsector = self.subsectors[ID]
# #creates default blank and flat shapes
# blank_shape= pd.DataFrame(0,np.arange(1,25,1),
# columns = ['value'])
# flat_shape= pd.DataFrame(1/24.0,np.arange(1,25,1),
# columns = ['value'],dtype=float)
# #determines demand of subsector
# demand = self.demand.loc[ID,'value']
# subsector.demand = demand
# #if the subsector has a specified shape, shape is taken drom database
# if ID in util.sql_read_table('SubsectorShapes','ID'):
# shape = pd.DataFrame(util.sql_read_table('SubsectorShapes','value',ID=ID),
# index = util.sql_read_table('SubsectorShapes','time',ID=ID),
# columns = ['value'])
# #creates uni-directional input or demand shape for consumption subsectors
# if subsector.type=='Consumption':
# subsector.input_shape_data = shape.copy() * demand
# subsector.input_shape = True
# subsector.output_shape_data = blank_shape.copy()
# #creates bi-directional input/output shapes for all other subsectors
# else:
# subsector.input_shape_data = shape.copy() * demand
# subsector.output_shape_data = shape.copy() * demand
# subsector.input_shape = True
# subsector.output_shape = True
# #if subsector is linked to a dispatch, it must have a base shape for calculating its
# #input/output signal before it has been dispatched. In this case, it can be either
# #flat (i.e. hydro dispatch), none (i.e. thermal dispatch), or exogenous (i.e. load dispatch)
# elif ID in util.sql_read_table('DispatchSubsectors','subsector_ID'):
# shape_type = util.sql_read_table('DispatchSubsectors','base_shape',subsector_ID=ID)
# if shape_type == 'exogenous':
# shape = pd.DataFrame(util.sql_read_table('SubsectorShapes','value',ID=ID),
# index = util.sql_read_table('SubsectorShapes','time',ID=ID),
# columns = ['value'])
# elif shape_type == 'flat':
# shape = flat_shape.copy()
# else:
# shape = blank_shape.copy()
# if subsector.type=='Consumption':
# subsector.input_shape_data= shape.copy()*demand
# subsector.input_shape = True
# subsector.output_shape_data = blank_shape.copy()
# else:
# subsector.input_shape_data = shape.copy() * demand
# subsector.output_shape_data = shape.copy() * demand
# subsector.input_shape = True
# subsector.output_shape = True
# else:
# if subsector.type=='Consumption':
# subsector.input_shape_data,subsector.output_shape_data = flat_shape.copy()*demand,blank_shape.copy()*demand
# subsector.input_shape=True
# else:
# subsector.input_shape_data,subsector.output_shape_data = blank_shape.copy(), blank_shape.copy()
# def add_subsectors_io(self):
# for ID in self.subsectors.keys():
# self.add_subsector_io(ID)
# def add_subsector_io(self,ID):
# for sub in self.subsectors.keys():
# if self.IO_table.loc[ID,sub]!= 0:
# self.subsectors[ID].output_subsectors.append(sub)
# self.subsectors[ID].unsolved_output_subsectors.append(sub)
# elif self.IO_table.loc[sub,ID]!=0:
# self.subsectors[ID].input_subsectors.append(sub)
# self.subsectors[ID].unsolved_input_subsectors.append(sub) | {
"content_hash": "9f622b9f4011fdfffe2777de32ec4de7",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 208,
"avg_line_length": 65.31428571428572,
"alnum_prop": 0.5349956255468067,
"repo_name": "energyPATHWAYS/energyPATHWAYS",
"id": "5f5ee8fef7c09dd86b5ac52178fb528835cdb572",
"size": "11454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "energyPATHWAYS/_obsolete/old_IO.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1073495"
}
],
"symlink_target": ""
} |
class ValidationError(Exception):
def __init__(self, message=None):
self.message = message
| {
"content_hash": "94c190371316c9f742d3ad4a14fd36c7",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 35,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.711340206185567,
"repo_name": "sanjuro/RCJK",
"id": "02507c5a2c990c3132cb57beda0008e9b39a105c",
"size": "109",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "vendor/cleanliness/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "26840"
},
{
"name": "Python",
"bytes": "1109105"
},
{
"name": "R",
"bytes": "1277"
},
{
"name": "Shell",
"bytes": "6923"
}
],
"symlink_target": ""
} |
import os
import shutil
import tempfile
import fiona
from . import fiona_dataset
from utils import get_compressed_file_wrapper
def read(
fp, prop_map, filterer=None, source_filename=None, layer_name=None, merge_on=None
):
"""Read geojson file.
:param fp: file-like object
:param prop_map: dictionary mapping source properties to output properties
:param source_filename: Filename to read, only applicable if fp is a zip file
"""
filename = os.path.basename(fp.name)
root, ext = os.path.splitext(filename)
unzip_dir = tempfile.mkdtemp()
if ext == ".geojson" or ext == ".json":
file_to_process = fp.name
else:
# search for a geojson file in the zip file, unzip if found
shp_name = source_filename
zipped_file = get_compressed_file_wrapper(fp.name)
if shp_name is None:
for name in zipped_file.infolist():
base, ext = os.path.splitext(name.filename)
if ext == ".geojson":
if shp_name is not None:
raise Exception("Found multiple shapefiles in zipfile")
shp_name = name.filename
if shp_name is None:
raise Exception("Found 0 shapefiles in zipfile")
zipped_file.extractall(unzip_dir)
zipped_file.close()
file_to_process = os.path.join(unzip_dir, shp_name)
# Open the shapefile
with fiona.open(file_to_process) as source:
collection = fiona_dataset.read_fiona(
source, prop_map, filterer, merge_on=merge_on
)
shutil.rmtree(unzip_dir)
return collection
| {
"content_hash": "9809d3860ab1db6216ac0a2c4c35508e",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 85,
"avg_line_length": 29.945454545454545,
"alnum_prop": 0.6211293260473588,
"repo_name": "OpenBounds/Processing",
"id": "2a0c7c8abc8462b1b4e8c1ca12e0187eab5fa082",
"size": "1647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adapters/geojson.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50689"
}
],
"symlink_target": ""
} |
from gevent import monkey; monkey.patch_all()
import web
import os
from cerebro_model import CerebroModel
from experiment_runner import SimulationDataElement
import json
import pprint
import pymongo
from experiment_db import ExperimentDB
USE_MONGO = True
urls = (
r'^/$', 'index',
r'^/anomaly$','anomaly',
r'^/anomaly/setThreshold$','setThreshold',
r'^/loadDescriptionFile$', 'loadDescriptionFile',
r'^/runCurrentExperiment$', 'runCurrentExperiment',
r'^/stopCurrentExperiment$', 'stopCurrentExperiment',
r'^/setPredictedField$', 'setPredictedField',
r'^/getPredictions$', 'getPredictions',
r'^/setModelParams$', 'setModelParams',
r'^/runExperiment$', 'runExperiment',
r'^/createDataset$', 'createDataset',
r'^/saveDataset$', 'saveDataset',
r'^/saveDescription$', 'saveDescriptionFile',
r'^/getDataAtTime$', 'getDataAtTime',
# Managing Experiments
r'^/experiment/rename$', 'setExperimentName',
r'^/experiment/list$', 'ExperimentList',
r'^/experiment/load$', 'loadExperiment',
r'^/experiment/delete$', 'deleteExperiment'
)
render = web.template.render("templates/")
web.webapi.internalerror = web.debugerror
class index:
def GET(self):
f = open("templates/index.html")
s = f.read()
f.close()
CerebroModel.USE_MONGO = USE_MONGO
return s
class anomaly:
def GET(self):
f = open("templates/index_anomaly.html")
s = f.read()
f.close()
CerebroModel.USE_MONGO = USE_MONGO
return s
class getEngineState:
def POST(self):
state = {}
state['consoleOptions'] = SimulationDataElement._fields
return json.dumps(state)
class setPredictedField:
def POST(self):
cerebro = CerebroModel.get()
predictedFieldname = web.input()['fieldname']
cerebro.setPredictedField(fieldname=predictedFieldname)
return ""
class setModelParams:
def POST(self):
cerebro = CerebroModel.get()
params = eval(web.input()['params'])
cerebro.setModelParams(params)
return
class runExperiment:
def POST(self):
expType = web.input()["type"]
cerebro = CerebroModel.get()
cerebro.name = cerebro.default_name
results = cerebro.runCurrentExperiment(expType)
return json.dumps(results)
class loadExperiment:
def POST(self):
expType = web.input()["type"]
name = web.input()["name"]
cerebro = CerebroModel.get()
cerebro.name = name
results = cerebro.runCurrentExperiment(expType, True)
return json.dumps(results)
class deleteExperiment:
def POST(self):
name = web.input()["name"]
return json.dumps(ExperimentDB.delete(name))
class setThreshold:
def POST(self):
newThreshold = web.input()["threshold"]
cerebro = CerebroModel.get()
results = cerebro.setClassifierThreshold(newThreshold)
return json.dumps(results)
class stopCurrentExperiment:
def POST(self):
cerebro = CerebroModel.get()
cerebro.stopCurrentExperiment()
return ""
class getPredictions:
def POST(self):
cerebro = CerebroModel.get()
returnData = json.dumps(cerebro.getLatestPredictions())
web.header("Content-Type", "application/json")
return returnData
class getDataAtTime:
def POST(self):
""" Get information about the current model at a specific timestep"""
cerebro = CerebroModel.get()
dataInput = dict(web.input())
data = cerebro.getDataAtTime(dataInput)
web.header("Content-Type", "application/json")
return json.dumps(data)
class loadDescriptionFile:
def POST(self):
""" Load a dataset/model from a description.py """
cerebro = CerebroModel.get()
params = web.input()
cerebro.loadDescriptionFile(descriptionFile = params["experimentFile"],
subDescriptionFile= params["subExperimentFile"])
modelDesc = cerebro.getCurrentModelParams()
return pprint.pformat(modelDesc['modelParams'])
class createDataset:
def POST(self):
""" Create a dataset from a function """
cerebro = CerebroModel.get()
fnText = web.input()["text"]
iterations = int(web.input()["iterations"])
cerebro.createProceduralDataset(fnText, iterations)
modelDesc = cerebro.getCurrentModelParams()
return pprint.pformat(modelDesc['modelParams'])
class saveDataset:
def GET(self):
""" FIXME: Right now, this returns the csv as a text file, so that the user
can use the "save file as" button """
cerebro = CerebroModel.get()
text = cerebro.getDatasetText()
web.header("Content-Type", "text/plain")
web.header('Content-Disposition', "attachment; filename=data.csv")
return text
class saveDescriptionFile:
def GET(self):
""" FIXME: Right now, this returns the csv as a text file, so that the user
can use the "save file as" button """
cerebro = CerebroModel.get()
text = cerebro.getDescriptionText()
web.header("Content-Type", "text/plain")
web.header('Content-Disposition', "attachment; filename=description.py")
return text
# Managing Experiments
class setExperimentName:
def POST(self):
""" Save the currently used mongoDB for use later """
name = web.input()["name"]
cerebro = CerebroModel.get()
results = cerebro.setExperimentName(name)
return json.dumps(results)
class ExperimentList:
def GET(self):
return json.dumps(ExperimentDB.list())
app = web.application(urls, globals())
def setup():
if USE_MONGO:
import pymongo
from subprocess import Popen
import subprocess
try:
conn = pymongo.Connection()
except pymongo.errors.AutoReconnect:
print "MongoDB not running. Starting..."
dbPath = os.path.expanduser('~/nta/mongodb/')
if not os.path.exists(dbPath):
print 'Directory for the MongoDB files does not exist. Creating ...'
os.makedirs(dbPath)
pid = Popen(["mongod --dbpath ~/nta/mongodb/"], shell=True).pid
print "MongoDB running with process id", pid
if __name__ == "__main__":
setup()
app.run()
| {
"content_hash": "b90346bf5ed511c4441ccbec57d56cff",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 80,
"avg_line_length": 27.50462962962963,
"alnum_prop": 0.6887729338495203,
"repo_name": "numenta/nupic.cerebro",
"id": "ba5eb72f0140ad57f506b0b24567fb3bf51e8855",
"size": "7060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cerebro.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4365"
},
{
"name": "HTML",
"bytes": "12205"
},
{
"name": "JavaScript",
"bytes": "283905"
},
{
"name": "Python",
"bytes": "59022"
}
],
"symlink_target": ""
} |
"""
Pyfranca lexer tests.
"""
import unittest
from pyfranca import Lexer
class BaseTestCase(unittest.TestCase):
@staticmethod
def _tokenize(data):
lexer = Lexer()
tokenized_data = lexer.tokenize_data(data)
return tokenized_data
class TestCheckRegularExpressions(BaseTestCase):
"""Test regular expression used by the lexer """
def test_integer_valid_syntax(self):
"""test an integer """
tokenized_data = self._tokenize("1234\n2345\n0x1234\n0X56789\n0xabcdef\n0XABCDEF\n0b10\n0B101")
self.assertEqual(tokenized_data[0].type, "INTEGER_VAL")
self.assertEqual(tokenized_data[0].value, 1234)
self.assertEqual(tokenized_data[1].type, "INTEGER_VAL")
self.assertEqual(tokenized_data[1].value, 2345)
self.assertEqual(tokenized_data[2].type, "HEXADECIMAL_VAL")
self.assertEqual(tokenized_data[2].value, 0x1234)
self.assertEqual(tokenized_data[3].type, "HEXADECIMAL_VAL")
self.assertEqual(tokenized_data[3].value, 0x56789)
self.assertEqual(tokenized_data[4].type, "HEXADECIMAL_VAL")
self.assertEqual(tokenized_data[4].value, 0xabcdef)
self.assertEqual(tokenized_data[5].type, "HEXADECIMAL_VAL")
self.assertEqual(tokenized_data[5].value, 0xabcdef)
self.assertEqual(tokenized_data[6].type, "BINARY_VAL")
self.assertEqual(tokenized_data[6].value, 0b10)
self.assertEqual(tokenized_data[7].type, "BINARY_VAL")
self.assertEqual(tokenized_data[7].value, 0b101)
def test_string_valid_syntax(self):
"""test a string """
tokenized_data = self._tokenize("\"This is a string\"")
self.assertEqual(tokenized_data[0].type, "STRING_VAL")
self.assertEqual(tokenized_data[0].value, "This is a string")
tokenized_data = self._tokenize("\"This is a string \n with an newline\"")
self.assertEqual(tokenized_data[0].type, "STRING_VAL")
self.assertEqual(tokenized_data[0].value, "This is a string \n with an newline")
def test_boolean_valid_syntax(self):
"""test a boolean value """
tokenized_data = self._tokenize("true\nfalse")
self.assertEqual(tokenized_data[0].type, "BOOLEAN_VAL")
self.assertEqual(tokenized_data[0].value, True)
self.assertEqual(tokenized_data[1].type, "BOOLEAN_VAL")
self.assertEqual(tokenized_data[1].value, False)
def test_integerinvalid_syntax(self):
"""test a boolean value """
tokenized_data = self._tokenize("0xgabcdefg")
for t in tokenized_data:
self.assertNotEqual(t.type, "HEXADECIMAL_VAL")
def test_booleaninvalid_syntax(self):
"""test a boolean value """
tokenized_data = self._tokenize("istrue\nisfalse")
for t in tokenized_data:
self.assertNotEqual(t.type, "BOOLEAN_VAL")
def test_float_valid_syntax(self):
"""test a float value """
tokenized_data = self._tokenize("1.1f\n-2.2f\n3.3e3f\n-4.4e4f\n5.5e-5f"
"-6.6e-6f\n0.00001f\n-0.000002f\n1e4f\n-1e4f"
".1f\n-.2f\n.3e3f\n-.4e4f\n.5e-5f"
"-.6e-6f\n.00001f\n-.000002f"
)
cnt = 0
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[0].value, "1.1f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-2.2f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "3.3e3f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-4.4e4f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "5.5e-5f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-6.6e-6f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "0.00001f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-0.000002f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "1e4f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-1e4f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, ".1f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-.2f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, ".3e3f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-.4e4f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, ".5e-5f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-.6e-6f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, ".00001f")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-.000002f")
def test_double_valid_syntax(self):
"""test a double value """
tokenized_data = self._tokenize("1.1d\n-2.2d\n3.3e3d\n-4.4e4d\n5.5e-5d"
"-6.6e-6d\n0.00001d\n-0.000002d\n1e4d\n-1e4d"
".1d\n-.2d\n.3e3d\n-.4e4d\n.5e-5d"
"-.6e-6d\n.00001d\n-.000002d"
)
cnt = 0
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[0].value, "1.1d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-2.2d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "3.3e3d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-4.4e4d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "5.5e-5d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-6.6e-6d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "0.00001d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-0.000002d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "1e4d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-1e4d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, ".1d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-.2d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, ".3e3d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-.4e4d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, ".5e-5d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-.6e-6d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, ".00001d")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-.000002d")
def test_real_valid_syntax(self):
"""test a real value """
tokenized_data = self._tokenize("1.1\n-2.2\n3.3e3\n-4.4e4\n5.5e-5"
"-6.6e-6\n0.00001\n-0.000002\n1e4\n-1e4"
".1\n-.2\n.3e3\n-.4e4\n.5e-5"
"-.6e-6\n.00001\n-.000002"
)
cnt = 0
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[0].value, "1.1")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-2.2")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "3.3e3")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-4.4e4")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "5.5e-5")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-6.6e-6")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "0.00001")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-0.000002")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "1e4")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-1e4")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, ".1")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-.2")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, ".3e3")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-.4e4")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, ".5e-5")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-.6e-6")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, ".00001")
cnt += 1
self.assertEqual(tokenized_data[cnt].type, "REAL_VAL")
self.assertEqual(tokenized_data[cnt].value, "-.000002")
def test_doublefloat_invalid_syntax(self):
"""test a text containing .f """
tokenized_data = self._tokenize("""
package org.franca.examples
0ef .ef -1ef ef ed .ed
}
""")
for t in tokenized_data:
self.assertNotEqual(t.type, "REAL_VAL")
def test_type_valid_syntax(self):
"""test an integer """
tokenized_data = self._tokenize("const Boolean b1 = true")
self.assertEqual(tokenized_data[0].type, "CONST")
self.assertEqual(tokenized_data[0].value, 'const')
self.assertEqual(tokenized_data[1].type, "BOOLEAN")
self.assertEqual(tokenized_data[1].value, 'Boolean')
self.assertEqual(tokenized_data[2].type, "ID")
self.assertEqual(tokenized_data[2].value, 'b1')
self.assertEqual(tokenized_data[3].type, "=")
self.assertEqual(tokenized_data[3].value, '=')
self.assertEqual(tokenized_data[4].type, "BOOLEAN_VAL")
self.assertEqual(tokenized_data[4].value, True)
def test_type_invalid_syntax(self):
"""test an integer """
tokenized_data = self._tokenize("const boolean b1 = true")
self.assertEqual(tokenized_data[0].type, "CONST")
self.assertEqual(tokenized_data[0].value, 'const')
self.assertEqual(tokenized_data[1].type, "ID")
self.assertEqual(tokenized_data[1].value, 'boolean')
self.assertEqual(tokenized_data[2].type, "ID")
self.assertEqual(tokenized_data[2].value, 'b1')
self.assertEqual(tokenized_data[3].type, "=")
self.assertEqual(tokenized_data[3].value, '=')
self.assertEqual(tokenized_data[4].type, "BOOLEAN_VAL")
self.assertEqual(tokenized_data[4].value, True)
| {
"content_hash": "2aaf424c1d443a6e5f15eba9bf572a20",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 103,
"avg_line_length": 44.71617161716171,
"alnum_prop": 0.5977562919772677,
"repo_name": "zayfod/pyfranca",
"id": "633895f6be3b4ecc8f55c5aea28b6e6dfaa22dec",
"size": "13549",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyfranca/tests/test_franca_lexer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "154739"
}
],
"symlink_target": ""
} |
from S3 import S3 # NOQA
def config(parser):
parser.add_argument("--upload.s3.region", dest="upload.s3.region", default="us-east-1", type=str,
help="S3 Uploader AWS region to connect to (default: us-east-1)")
parser.add_argument("--upload.s3.access_key", dest="upload.s3.access_key", type=str,
help="S3 Uploader AWS Access Key (required for S3 upload)")
parser.add_argument("--upload.s3.secret_key", dest="upload.s3.secret_key", type=str,
help="S3 Uploader AWS Secret Key (required for S3 upload)")
parser.add_argument("--upload.s3.bucket_name", dest="upload.s3.bucket_name", type=str,
help="S3 Uploader destination bucket name")
parser.add_argument("--upload.s3.skip_bucket_validation", dest="upload.s3.skip_bucket_validation", default=False,
action="store_true",
help="S3 Upload will check upfront if the bucket exists. Skip this check if bucket "
"permissions don't allow access to the bucket's root. (default: false)")
parser.add_argument("--upload.s3.bucket_prefix", dest="upload.s3.bucket_prefix", type=str,
help="S3 Uploader destination bucket path prefix")
parser.add_argument("--upload.s3.bucket_explicit_key", dest="upload.s3.bucket_explicit_key", type=str,
help="S3 Uploader explicit storage key within the S3 bucket")
parser.add_argument("--upload.s3.chunk_size_mb", dest="upload.s3.chunk_size_mb", default=50, type=int,
help="S3 Uploader upload chunk size, in megabytes (default: 50)")
parser.add_argument("--upload.s3.target_mb_per_second", dest="upload.s3.target_mb_per_second", default=None,
type=int, help="S3 Uploader target bandwidth in MB/s per upload thread. (default: unlimited)")
parser.add_argument("--upload.s3.secure", dest="upload.s3.secure", default=True, action="store_false",
help="S3 Uploader connect over SSL (default: true)")
parser.add_argument("--upload.s3.acl", dest="upload.s3.acl", default=None, type=str,
help="S3 Uploader ACL associated with objects (default: none)")
return parser
| {
"content_hash": "11f8c4a514b11bd02c9b635b5995415e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 118,
"avg_line_length": 78.96551724137932,
"alnum_prop": 0.6283842794759825,
"repo_name": "Percona-Lab/mongodb_consistent_backup",
"id": "3c7b1d5fdb6908d1ec334d2358b22fe62520350a",
"size": "2290",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mongodb_consistent_backup/Upload/S3/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "707"
},
{
"name": "JavaScript",
"bytes": "536"
},
{
"name": "Makefile",
"bytes": "5715"
},
{
"name": "Python",
"bytes": "235278"
},
{
"name": "Shell",
"bytes": "7177"
}
],
"symlink_target": ""
} |
import os
import string
import textwrap
import six
from six.moves.configparser import ConfigParser
from swift.common.utils import (
config_true_value, SWIFT_CONF_FILE, whataremyips, list_from_csv)
from swift.common.ring import Ring, RingData
from swift.common.utils import quorum_size
from swift.common.exceptions import RingLoadError
from pyeclib.ec_iface import ECDriver, ECDriverError, VALID_EC_TYPES
LEGACY_POLICY_NAME = 'Policy-0'
VALID_CHARS = '-' + string.ascii_letters + string.digits
DEFAULT_POLICY_TYPE = REPL_POLICY = 'replication'
EC_POLICY = 'erasure_coding'
DEFAULT_EC_OBJECT_SEGMENT_SIZE = 1048576
class BindPortsCache(object):
def __init__(self, swift_dir, bind_ip):
self.swift_dir = swift_dir
self.mtimes_by_ring_path = {}
self.portsets_by_ring_path = {}
self.my_ips = set(whataremyips(bind_ip))
def all_bind_ports_for_node(self):
"""
Given an iterable of IP addresses identifying a storage backend server,
return a set of all bind ports defined in all rings for this storage
backend server.
The caller is responsible for not calling this method (which performs
at least a stat on all ring files) too frequently.
"""
# NOTE: we don't worry about disappearing rings here because you can't
# ever delete a storage policy.
for policy in POLICIES:
# NOTE: we must NOT use policy.load_ring to load the ring. Users
# of this utility function will not need the actual ring data, just
# the bind ports.
#
# This is duplicated with Ring.__init__ just a bit...
serialized_path = os.path.join(self.swift_dir,
policy.ring_name + '.ring.gz')
try:
new_mtime = os.path.getmtime(serialized_path)
except OSError:
continue
old_mtime = self.mtimes_by_ring_path.get(serialized_path)
if not old_mtime or old_mtime != new_mtime:
self.portsets_by_ring_path[serialized_path] = set(
dev['port']
for dev in RingData.load(serialized_path,
metadata_only=True).devs
if dev and dev['ip'] in self.my_ips)
self.mtimes_by_ring_path[serialized_path] = new_mtime
# No "break" here so that the above line will update the
# mtimes_by_ring_path entry for any ring that changes, not just
# the first one we notice.
# Return the requested set of ports from our (now-freshened) cache
return six.moves.reduce(set.union,
self.portsets_by_ring_path.values(), set())
class PolicyError(ValueError):
def __init__(self, msg, index=None):
if index is not None:
msg += ', for index %r' % index
super(PolicyError, self).__init__(msg)
def _get_policy_string(base, policy_index):
if policy_index == 0 or policy_index is None:
return_string = base
else:
return_string = base + "-%d" % int(policy_index)
return return_string
def get_policy_string(base, policy_or_index):
"""
Helper function to construct a string from a base and the policy.
Used to encode the policy index into either a file name or a
directory name by various modules.
:param base: the base string
:param policy_or_index: StoragePolicy instance, or an index
(string or int), if None the legacy
storage Policy-0 is assumed.
:returns: base name with policy index added
:raises: PolicyError if no policy exists with the given policy_index
"""
if isinstance(policy_or_index, BaseStoragePolicy):
policy = policy_or_index
else:
policy = POLICIES.get_by_index(policy_or_index)
if policy is None:
raise PolicyError("Unknown policy", index=policy_or_index)
return _get_policy_string(base, int(policy))
def split_policy_string(policy_string):
"""
Helper function to convert a string representing a base and a
policy. Used to decode the policy from either a file name or
a directory name by various modules.
:param policy_string: base name with policy index added
:raises: PolicyError if given index does not map to a valid policy
:returns: a tuple, in the form (base, policy) where base is the base
string and policy is the StoragePolicy instance for the
index encoded in the policy_string.
"""
if '-' in policy_string:
base, policy_index = policy_string.rsplit('-', 1)
else:
base, policy_index = policy_string, None
policy = POLICIES.get_by_index(policy_index)
if get_policy_string(base, policy) != policy_string:
raise PolicyError("Unknown policy", index=policy_index)
return base, policy
class BaseStoragePolicy(object):
"""
Represents a storage policy. Not meant to be instantiated directly;
implement a derived subclasses (e.g. StoragePolicy, ECStoragePolicy, etc)
or use :func:`~swift.common.storage_policy.reload_storage_policies` to
load POLICIES from ``swift.conf``.
The object_ring property is lazy loaded once the service's ``swift_dir``
is known via :meth:`~StoragePolicyCollection.get_object_ring`, but it may
be over-ridden via object_ring kwarg at create time for testing or
actively loaded with :meth:`~StoragePolicy.load_ring`.
"""
policy_type_to_policy_cls = {}
def __init__(self, idx, name='', is_default=False, is_deprecated=False,
object_ring=None, aliases=''):
# do not allow BaseStoragePolicy class to be instantiated directly
if type(self) == BaseStoragePolicy:
raise TypeError("Can't instantiate BaseStoragePolicy directly")
# policy parameter validation
try:
self.idx = int(idx)
except ValueError:
raise PolicyError('Invalid index', idx)
if self.idx < 0:
raise PolicyError('Invalid index', idx)
self.alias_list = []
self.add_name(name)
if aliases:
names_list = list_from_csv(aliases)
for alias in names_list:
if alias == name:
continue
self.add_name(alias)
self.is_deprecated = config_true_value(is_deprecated)
self.is_default = config_true_value(is_default)
if self.policy_type not in BaseStoragePolicy.policy_type_to_policy_cls:
raise PolicyError('Invalid type', self.policy_type)
if self.is_deprecated and self.is_default:
raise PolicyError('Deprecated policy can not be default. '
'Invalid config', self.idx)
self.ring_name = _get_policy_string('object', self.idx)
self.object_ring = object_ring
@property
def name(self):
return self.alias_list[0]
@name.setter
def name_setter(self, name):
self._validate_policy_name(name)
self.alias_list[0] = name
@property
def aliases(self):
return ", ".join(self.alias_list)
def __int__(self):
return self.idx
def __cmp__(self, other):
return cmp(self.idx, int(other))
def __repr__(self):
return ("%s(%d, %r, is_default=%s, "
"is_deprecated=%s, policy_type=%r)") % \
(self.__class__.__name__, self.idx, self.alias_list,
self.is_default, self.is_deprecated, self.policy_type)
@classmethod
def register(cls, policy_type):
"""
Decorator for Storage Policy implementations to register
their StoragePolicy class. This will also set the policy_type
attribute on the registered implementation.
"""
def register_wrapper(policy_cls):
if policy_type in cls.policy_type_to_policy_cls:
raise PolicyError(
'%r is already registered for the policy_type %r' % (
cls.policy_type_to_policy_cls[policy_type],
policy_type))
cls.policy_type_to_policy_cls[policy_type] = policy_cls
policy_cls.policy_type = policy_type
return policy_cls
return register_wrapper
@classmethod
def _config_options_map(cls):
"""
Map config option name to StoragePolicy parameter name.
"""
return {
'name': 'name',
'aliases': 'aliases',
'policy_type': 'policy_type',
'default': 'is_default',
'deprecated': 'is_deprecated',
}
@classmethod
def from_config(cls, policy_index, options):
config_to_policy_option_map = cls._config_options_map()
policy_options = {}
for config_option, value in options.items():
try:
policy_option = config_to_policy_option_map[config_option]
except KeyError:
raise PolicyError('Invalid option %r in '
'storage-policy section' % config_option,
index=policy_index)
policy_options[policy_option] = value
return cls(policy_index, **policy_options)
def get_info(self, config=False):
"""
Return the info dict and conf file options for this policy.
:param config: boolean, if True all config options are returned
"""
info = {}
for config_option, policy_attribute in \
self._config_options_map().items():
info[config_option] = getattr(self, policy_attribute)
if not config:
# remove some options for public consumption
if not self.is_default:
info.pop('default')
if not self.is_deprecated:
info.pop('deprecated')
info.pop('policy_type')
return info
def _validate_policy_name(self, name):
"""
Helper function to determine the validity of a policy name. Used
to check policy names before setting them.
:param name: a name string for a single policy name.
:raises: PolicyError if the policy name is invalid.
"""
if not name:
raise PolicyError('Invalid name %r' % name, self.idx)
# this is defensively restrictive, but could be expanded in the future
if not all(c in VALID_CHARS for c in name):
msg = 'Names are used as HTTP headers, and can not ' \
'reliably contain any characters not in %r. ' \
'Invalid name %r' % (VALID_CHARS, name)
raise PolicyError(msg, self.idx)
if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0:
msg = 'The name %s is reserved for policy index 0. ' \
'Invalid name %r' % (LEGACY_POLICY_NAME, name)
raise PolicyError(msg, self.idx)
if name.upper() in (existing_name.upper() for existing_name
in self.alias_list):
msg = 'The name %s is already assigned to this policy.' % name
raise PolicyError(msg, self.idx)
def add_name(self, name):
"""
Adds an alias name to the storage policy. Shouldn't be called
directly from the storage policy but instead through the
storage policy collection class, so lookups by name resolve
correctly.
:param name: a new alias for the storage policy
"""
self._validate_policy_name(name)
self.alias_list.append(name)
def remove_name(self, name):
"""
Removes an alias name from the storage policy. Shouldn't be called
directly from the storage policy but instead through the storage
policy collection class, so lookups by name resolve correctly. If
the name removed is the primary name then the next available alias
will be adopted as the new primary name.
:param name: a name assigned to the storage policy
"""
if name not in self.alias_list:
raise PolicyError("%s is not a name assigned to policy %s"
% (name, self.idx))
if len(self.alias_list) == 1:
raise PolicyError("Cannot remove only name %s from policy %s. "
"Policies must have at least one name."
% (name, self.idx))
else:
self.alias_list.remove(name)
def change_primary_name(self, name):
"""
Changes the primary/default name of the policy to a specified name.
:param name: a string name to replace the current primary name.
"""
if name == self.name:
return
elif name in self.alias_list:
self.remove_name(name)
else:
self._validate_policy_name(name)
self.alias_list.insert(0, name)
def load_ring(self, swift_dir):
"""
Load the ring for this policy immediately.
:param swift_dir: path to rings
"""
if self.object_ring:
return
self.object_ring = Ring(swift_dir, ring_name=self.ring_name)
@property
def quorum(self):
"""
Number of successful backend requests needed for the proxy to
consider the client request successful.
"""
raise NotImplementedError()
@BaseStoragePolicy.register(REPL_POLICY)
class StoragePolicy(BaseStoragePolicy):
"""
Represents a storage policy of type 'replication'. Default storage policy
class unless otherwise overridden from swift.conf.
Not meant to be instantiated directly; use
:func:`~swift.common.storage_policy.reload_storage_policies` to load
POLICIES from ``swift.conf``.
"""
@property
def quorum(self):
"""
Quorum concept in the replication case:
floor(number of replica / 2) + 1
"""
if not self.object_ring:
raise PolicyError('Ring is not loaded')
return quorum_size(self.object_ring.replica_count)
@BaseStoragePolicy.register(EC_POLICY)
class ECStoragePolicy(BaseStoragePolicy):
"""
Represents a storage policy of type 'erasure_coding'.
Not meant to be instantiated directly; use
:func:`~swift.common.storage_policy.reload_storage_policies` to load
POLICIES from ``swift.conf``.
"""
def __init__(self, idx, name='', aliases='', is_default=False,
is_deprecated=False, object_ring=None,
ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE,
ec_type=None, ec_ndata=None, ec_nparity=None):
super(ECStoragePolicy, self).__init__(
idx=idx, name=name, aliases=aliases, is_default=is_default,
is_deprecated=is_deprecated, object_ring=object_ring)
# Validate erasure_coding policy specific members
# ec_type is one of the EC implementations supported by PyEClib
if ec_type is None:
raise PolicyError('Missing ec_type')
if ec_type not in VALID_EC_TYPES:
raise PolicyError('Wrong ec_type %s for policy %s, should be one'
' of "%s"' % (ec_type, self.name,
', '.join(VALID_EC_TYPES)))
self._ec_type = ec_type
# Define _ec_ndata as the number of EC data fragments
# Accessible as the property "ec_ndata"
try:
value = int(ec_ndata)
if value <= 0:
raise ValueError
self._ec_ndata = value
except (TypeError, ValueError):
raise PolicyError('Invalid ec_num_data_fragments %r' %
ec_ndata, index=self.idx)
# Define _ec_nparity as the number of EC parity fragments
# Accessible as the property "ec_nparity"
try:
value = int(ec_nparity)
if value <= 0:
raise ValueError
self._ec_nparity = value
except (TypeError, ValueError):
raise PolicyError('Invalid ec_num_parity_fragments %r'
% ec_nparity, index=self.idx)
# Define _ec_segment_size as the encode segment unit size
# Accessible as the property "ec_segment_size"
try:
value = int(ec_segment_size)
if value <= 0:
raise ValueError
self._ec_segment_size = value
except (TypeError, ValueError):
raise PolicyError('Invalid ec_object_segment_size %r' %
ec_segment_size, index=self.idx)
# Initialize PyECLib EC backend
try:
self.pyeclib_driver = \
ECDriver(k=self._ec_ndata, m=self._ec_nparity,
ec_type=self._ec_type)
except ECDriverError as e:
raise PolicyError("Error creating EC policy (%s)" % e,
index=self.idx)
# quorum size in the EC case depends on the choice of EC scheme.
self._ec_quorum_size = \
self._ec_ndata + self.pyeclib_driver.min_parity_fragments_needed()
self._fragment_size = None
@property
def ec_type(self):
return self._ec_type
@property
def ec_ndata(self):
return self._ec_ndata
@property
def ec_nparity(self):
return self._ec_nparity
@property
def ec_segment_size(self):
return self._ec_segment_size
@property
def fragment_size(self):
"""
Maximum length of a fragment, including header.
NB: a fragment archive is a sequence of 0 or more max-length
fragments followed by one possibly-shorter fragment.
"""
# Technically pyeclib's get_segment_info signature calls for
# (data_len, segment_size) but on a ranged GET we don't know the
# ec-content-length header before we need to compute where in the
# object we should request to align with the fragment size. So we
# tell pyeclib a lie - from it's perspective, as long as data_len >=
# segment_size it'll give us the answer we want. From our
# perspective, because we only use this answer to calculate the
# *minimum* size we should read from an object body even if data_len <
# segment_size we'll still only read *the whole one and only last
# fragment* and pass than into pyeclib who will know what to do with
# it just as it always does when the last fragment is < fragment_size.
if self._fragment_size is None:
self._fragment_size = self.pyeclib_driver.get_segment_info(
self.ec_segment_size, self.ec_segment_size)['fragment_size']
return self._fragment_size
@property
def ec_scheme_description(self):
"""
This short hand form of the important parts of the ec schema is stored
in Object System Metadata on the EC Fragment Archives for debugging.
"""
return "%s %d+%d" % (self._ec_type, self._ec_ndata, self._ec_nparity)
def __repr__(self):
return ("%s, EC config(ec_type=%s, ec_segment_size=%d, "
"ec_ndata=%d, ec_nparity=%d)") % \
(super(ECStoragePolicy, self).__repr__(), self.ec_type,
self.ec_segment_size, self.ec_ndata, self.ec_nparity)
@classmethod
def _config_options_map(cls):
options = super(ECStoragePolicy, cls)._config_options_map()
options.update({
'ec_type': 'ec_type',
'ec_object_segment_size': 'ec_segment_size',
'ec_num_data_fragments': 'ec_ndata',
'ec_num_parity_fragments': 'ec_nparity',
})
return options
def get_info(self, config=False):
info = super(ECStoragePolicy, self).get_info(config=config)
if not config:
info.pop('ec_object_segment_size')
info.pop('ec_num_data_fragments')
info.pop('ec_num_parity_fragments')
info.pop('ec_type')
return info
@property
def quorum(self):
"""
Number of successful backend requests needed for the proxy to consider
the client request successful.
The quorum size for EC policies defines the minimum number
of data + parity elements required to be able to guarantee
the desired fault tolerance, which is the number of data
elements supplemented by the minimum number of parity
elements required by the chosen erasure coding scheme.
For example, for Reed-Solomon, the minimum number parity
elements required is 1, and thus the quorum_size requirement
is ec_ndata + 1.
Given the number of parity elements required is not the same
for every erasure coding scheme, consult PyECLib for
min_parity_fragments_needed()
"""
return self._ec_quorum_size
def load_ring(self, swift_dir):
"""
Load the ring for this policy immediately.
:param swift_dir: path to rings
"""
if self.object_ring:
return
def validate_ring_data(ring_data):
"""
EC specific validation
Replica count check - we need _at_least_ (#data + #parity) replicas
configured. Also if the replica count is larger than exactly that
number there's a non-zero risk of error for code that is
considering the number of nodes in the primary list from the ring.
"""
nodes_configured = len(ring_data._replica2part2dev_id)
if nodes_configured != (self.ec_ndata + self.ec_nparity):
raise RingLoadError(
'EC ring for policy %s needs to be configured with '
'exactly %d replicas. Got %d.' % (
self.name, self.ec_ndata + self.ec_nparity,
nodes_configured))
self.object_ring = Ring(
swift_dir, ring_name=self.ring_name,
validation_hook=validate_ring_data)
class StoragePolicyCollection(object):
"""
This class represents the collection of valid storage policies for the
cluster and is instantiated as :class:`StoragePolicy` objects are added to
the collection when ``swift.conf`` is parsed by
:func:`parse_storage_policies`.
When a StoragePolicyCollection is created, the following validation
is enforced:
* If a policy with index 0 is not declared and no other policies defined,
Swift will create one
* The policy index must be a non-negative integer
* If no policy is declared as the default and no other policies are
defined, the policy with index 0 is set as the default
* Policy indexes must be unique
* Policy names are required
* Policy names are case insensitive
* Policy names must contain only letters, digits or a dash
* Policy names must be unique
* The policy name 'Policy-0' can only be used for the policy with index 0
* If any policies are defined, exactly one policy must be declared default
* Deprecated policies can not be declared the default
"""
def __init__(self, pols):
self.default = []
self.by_name = {}
self.by_index = {}
self._validate_policies(pols)
def _add_policy(self, policy):
"""
Add pre-validated policies to internal indexes.
"""
for name in policy.alias_list:
self.by_name[name.upper()] = policy
self.by_index[int(policy)] = policy
def __repr__(self):
return (textwrap.dedent("""
StoragePolicyCollection([
%s
])
""") % ',\n '.join(repr(p) for p in self)).strip()
def __len__(self):
return len(self.by_index)
def __getitem__(self, key):
return self.by_index[key]
def __iter__(self):
return iter(self.by_index.values())
def _validate_policies(self, policies):
"""
:param policies: list of policies
"""
for policy in policies:
if int(policy) in self.by_index:
raise PolicyError('Duplicate index %s conflicts with %s' % (
policy, self.get_by_index(int(policy))))
for name in policy.alias_list:
if name.upper() in self.by_name:
raise PolicyError('Duplicate name %s conflicts with %s' % (
policy, self.get_by_name(name)))
if policy.is_default:
if not self.default:
self.default = policy
else:
raise PolicyError(
'Duplicate default %s conflicts with %s' % (
policy, self.default))
self._add_policy(policy)
# If a 0 policy wasn't explicitly given, or nothing was
# provided, create the 0 policy now
if 0 not in self.by_index:
if len(self) != 0:
raise PolicyError('You must specify a storage policy '
'section for policy index 0 in order '
'to define multiple policies')
self._add_policy(StoragePolicy(0, name=LEGACY_POLICY_NAME))
# at least one policy must be enabled
enabled_policies = [p for p in self if not p.is_deprecated]
if not enabled_policies:
raise PolicyError("Unable to find policy that's not deprecated!")
# if needed, specify default
if not self.default:
if len(self) > 1:
raise PolicyError("Unable to find default policy")
self.default = self[0]
self.default.is_default = True
def get_by_name(self, name):
"""
Find a storage policy by its name.
:param name: name of the policy
:returns: storage policy, or None
"""
return self.by_name.get(name.upper())
def get_by_index(self, index):
"""
Find a storage policy by its index.
An index of None will be treated as 0.
:param index: numeric index of the storage policy
:returns: storage policy, or None if no such policy
"""
# makes it easier for callers to just pass in a header value
if index in ('', None):
index = 0
else:
try:
index = int(index)
except ValueError:
return None
return self.by_index.get(index)
@property
def legacy(self):
return self.get_by_index(None)
def get_object_ring(self, policy_idx, swift_dir):
"""
Get the ring object to use to handle a request based on its policy.
An index of None will be treated as 0.
:param policy_idx: policy index as defined in swift.conf
:param swift_dir: swift_dir used by the caller
:returns: appropriate ring object
"""
policy = self.get_by_index(policy_idx)
if not policy:
raise PolicyError("No policy with index %s" % policy_idx)
if not policy.object_ring:
policy.load_ring(swift_dir)
return policy.object_ring
def get_policy_info(self):
"""
Build info about policies for the /info endpoint
:returns: list of dicts containing relevant policy information
"""
policy_info = []
for pol in self:
# delete from /info if deprecated
if pol.is_deprecated:
continue
policy_entry = pol.get_info()
policy_info.append(policy_entry)
return policy_info
def add_policy_alias(self, policy_index, *aliases):
"""
Adds a new name or names to a policy
:param policy_index: index of a policy in this policy collection.
:param aliases: arbitrary number of string policy names to add.
"""
policy = self.get_by_index(policy_index)
for alias in aliases:
if alias.upper() in self.by_name:
raise PolicyError('Duplicate name %s in use '
'by policy %s' % (alias,
self.get_by_name(alias)))
else:
policy.add_name(alias)
self.by_name[alias.upper()] = policy
def remove_policy_alias(self, *aliases):
"""
Removes a name or names from a policy. If the name removed is the
primary name then the next available alias will be adopted
as the new primary name.
:param aliases: arbitrary number of existing policy names to remove.
"""
for alias in aliases:
policy = self.get_by_name(alias)
if not policy:
raise PolicyError('No policy with name %s exists.' % alias)
if len(policy.alias_list) == 1:
raise PolicyError('Policy %s with name %s has only one name. '
'Policies must have at least one name.' % (
policy, alias))
else:
policy.remove_name(alias)
del self.by_name[alias.upper()]
def change_policy_primary_name(self, policy_index, new_name):
"""
Changes the primary or default name of a policy. The new primary
name can be an alias that already belongs to the policy or a
completely new name.
:param policy_index: index of a policy in this policy collection.
:param new_name: a string name to set as the new default name.
"""
policy = self.get_by_index(policy_index)
name_taken = self.get_by_name(new_name)
# if the name belongs to some other policy in the collection
if name_taken and name_taken != policy:
raise PolicyError('Other policy %s with name %s exists.' %
(self.get_by_name(new_name).idx, new_name))
else:
policy.change_primary_name(new_name)
self.by_name[new_name.upper()] = policy
def parse_storage_policies(conf):
"""
Parse storage policies in ``swift.conf`` - note that validation
is done when the :class:`StoragePolicyCollection` is instantiated.
:param conf: ConfigParser parser object for swift.conf
"""
policies = []
for section in conf.sections():
if not section.startswith('storage-policy:'):
continue
policy_index = section.split(':', 1)[1]
config_options = dict(conf.items(section))
policy_type = config_options.pop('policy_type', DEFAULT_POLICY_TYPE)
policy_cls = BaseStoragePolicy.policy_type_to_policy_cls[policy_type]
policy = policy_cls.from_config(policy_index, config_options)
policies.append(policy)
return StoragePolicyCollection(policies)
class StoragePolicySingleton(object):
"""
An instance of this class is the primary interface to storage policies
exposed as a module level global named ``POLICIES``. This global
reference wraps ``_POLICIES`` which is normally instantiated by parsing
``swift.conf`` and will result in an instance of
:class:`StoragePolicyCollection`.
You should never patch this instance directly, instead patch the module
level ``_POLICIES`` instance so that swift code which imported
``POLICIES`` directly will reference the patched
:class:`StoragePolicyCollection`.
"""
def __iter__(self):
return iter(_POLICIES)
def __len__(self):
return len(_POLICIES)
def __getitem__(self, key):
return _POLICIES[key]
def __getattribute__(self, name):
return getattr(_POLICIES, name)
def __repr__(self):
return repr(_POLICIES)
def reload_storage_policies():
"""
Reload POLICIES from ``swift.conf``.
"""
global _POLICIES
policy_conf = ConfigParser()
policy_conf.read(SWIFT_CONF_FILE)
try:
_POLICIES = parse_storage_policies(policy_conf)
except PolicyError as e:
raise SystemExit('ERROR: Invalid Storage Policy Configuration '
'in %s (%s)' % (SWIFT_CONF_FILE, e))
# parse configuration and setup singleton
_POLICIES = None
reload_storage_policies()
POLICIES = StoragePolicySingleton()
| {
"content_hash": "49db54a8ac8a8fa1647a646ef1c15554",
"timestamp": "",
"source": "github",
"line_count": 873,
"max_line_length": 79,
"avg_line_length": 37.29896907216495,
"alnum_prop": 0.5927154351698298,
"repo_name": "prashanthpai/swift",
"id": "0714e51dab7d3947621c36d57445df9342914573",
"size": "33109",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "swift/common/storage_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6742828"
},
{
"name": "Shell",
"bytes": "1452"
}
],
"symlink_target": ""
} |
"""unit testing code for the SD file handling stuff
"""
import unittest,sys,os
from rdkit import RDConfig
from rdkit import Chem
import tempfile
from cStringIO import StringIO
class TestCase(unittest.TestCase):
def setUp(self):
#print '\n%s: '%self.shortDescription(),
self.fName = os.path.join(RDConfig.RDDataDir,'NCI','first_200.props.sdf')
def _testReader(self):
" tests reads using a file name "
supp = Chem.SDMolSupplier(self.fName)
for i in range(10):
m = supp.next()
assert m,'read %d failed'%i
assert m.GetNumAtoms(),'no atoms in mol %d'%i
i = 100
m = supp[i-1]
assert m,'read %d failed'%i
assert m.GetNumAtoms(),'no atoms in mol %d'%i
l = len(supp)
assert l == 200,'bad supplier length: %d'%(l)
i = 12
m = supp[i-1]
assert m,'back index %d failed'%i
assert m.GetNumAtoms(),'no atoms in mol %d'%i
try:
m = supp[201]
except IndexError:
fail = 1
else:
fail = 0
assert fail,'out of bound read did not fail'
def test_Writer(self):
" tests writes using a file name "
inD = open(self.fName,'r').read()
supp = Chem.SDMolSupplier(self.fName)
outName = tempfile.mktemp('.sdf')
writer = Chem.SDWriter(outName)
m1 = supp.next()
writer.SetProps(m1.GetPropNames())
for m in supp:
writer.write(m)
writer.flush()
writer = None
outD = open(outName,'r').read()
try:
os.unlink(outName)
except:
import time
time.sleep(1)
try:
os.unlink(outName)
except:
pass
assert inD.count('$$$$')==outD.count('$$$$'),'bad nMols in output'
def _testStreamRoundtrip(self):
inD = open(self.fName).read()
supp = Chem.SDMolSupplier(self.fName)
outName = tempfile.mktemp('.sdf')
writer = Chem.SDWriter(outName)
m1 = supp.next()
for m in supp:
writer.write(m)
writer.flush()
writer = None
outD = open(outName,'r').read()
try:
os.unlink(outName)
except:
import time
time.sleep(1)
try:
os.unlink(outName)
except:
pass
assert inD.count('$$$$')==outD.count('$$$$'),'bad nMols in output'
io = StringIO(outD)
supp = Chem.SDMolSupplier(stream=io)
outD2 = supp.Dump()
assert outD2.count('$$$$')==len(supp),'bad nMols in output'
assert outD2.count('$$$$')==outD.count('$$$$'),'bad nMols in output'
assert outD2==outD,'bad outd'
def _testLazyDataRoundtrip(self):
inD = open(self.fName).read()
supp = Chem.SDMolSupplier(self.fName)
outName = tempfile.mktemp('.sdf')
writer = Chem.SDWriter(outName)
m1 = supp.next()
for m in supp:
writer.write(m)
writer.flush()
writer = None
outD = open(outName,'r').read()
try:
os.unlink(outName)
except:
import time
time.sleep(1)
try:
os.unlink(outName)
except:
pass
assert inD.count('$$$$')==outD.count('$$$$'),'bad nMols in output'
supp = SDMolSupplier.LazySDMolSupplier(inD=outD)
outD2 = supp.Dump()
assert outD2.count('$$$$')==len(supp),'bad nMols in output'
assert outD2.count('$$$$')==outD.count('$$$$'),'bad nMols in output'
assert outD2==outD,'bad outd'
def _testLazyIter(self):
" tests lazy reads using the iterator interface "
supp = SDMolSupplier.LazySDMolSupplier(fileN=self.fName)
nDone = 0
for mol in supp:
assert mol,'read %d failed'%i
assert mol.GetNumAtoms(),'no atoms in mol %d'%i
nDone += 1
assert nDone==200,'bad number of molecules: %d'%(nDone)
l = len(supp)
assert l == 200,'bad supplier length: %d'%(l)
i = 12
m = supp[i-1]
assert m,'back index %d failed'%i
assert m.GetNumAtoms(),'no atoms in mol %d'%i
try:
m = supp[201]
except IndexError:
fail = 1
else:
fail = 0
assert fail,'out of bound read did not fail'
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "3c783d922fc008b58f161644548c44bb",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 77,
"avg_line_length": 25.29299363057325,
"alnum_prop": 0.5948123898262403,
"repo_name": "rdkit/rdkit-orig",
"id": "47c835364d2801d7b31109af323bd04df58cc4db",
"size": "4271",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rdkit/Chem/Suppliers/UnitTestSDMolSupplier.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "317825"
},
{
"name": "C#",
"bytes": "6745"
},
{
"name": "C++",
"bytes": "4903579"
},
{
"name": "FORTRAN",
"bytes": "7661"
},
{
"name": "Java",
"bytes": "232201"
},
{
"name": "JavaScript",
"bytes": "12734"
},
{
"name": "Objective-C",
"bytes": "26015"
},
{
"name": "Perl",
"bytes": "5919"
},
{
"name": "Prolog",
"bytes": "389"
},
{
"name": "Python",
"bytes": "2698794"
},
{
"name": "Shell",
"bytes": "10661"
}
],
"symlink_target": ""
} |
from typing import Any, List, Optional
import tensorflow as tf
import numpy as np
from yarll.memory.experiences_memory import ExperiencesMemory, Experience
from yarll.misc.utils import RunningMeanStd, normalize
class EnvRunner(object):
"""Environment runner using a policy"""
def __init__(self, env, policy, config: dict, normalize_states=False, state_preprocessor=None, summary_writer=None) -> None:
super(EnvRunner, self).__init__()
self.env = env
self.policy = policy
self.state: Optional[np.ndarray] = None
self.features: Any = policy.initial_features
self.config: dict = dict(
batch_update="timesteps",
episode_max_length=env.spec.max_episode_steps,
timesteps_per_batch=10000,
n_iter=100
)
self.episode_steps: int = 0
self.episode_reward: float = 0.0
self.n_episodes: int = 0
self.config.update(config)
self.state_preprocessor = state_preprocessor
self.summary_writer = summary_writer
# Normalize states before giving it as input to the network.
# Mean and std are only updated at the end of `get_steps`.
self.normalize_states = normalize_states
if normalize_states:
self.rms = RunningMeanStd(self.env.observation_space.shape)
self.reset_env()
def choose_action(self, state: np.ndarray):
"""Choose an action based on the current state in the environment."""
return self.policy.choose_action(state, self.features)
def normalize(self, state: np.ndarray) -> np.ndarray:
return normalize(state, self.rms.mean, self.rms.std)
def reset_env(self) -> None:
"""Reset the current environment and get the initial state"""
self.state = self.env.reset()
self.state = self.state if self.state_preprocessor is None else self.state_preprocessor(self.state)
def step_env(self, action):
"""Execute an action in the current environment."""
state, reward, done, info = self.env.step(self.policy.get_env_action(action))
state = state if self.state_preprocessor is None else self.state_preprocessor(state)
return state, reward, done, info
def get_steps(self, n_steps: int, reset: bool = False, stop_at_trajectory_end: bool = True, render: bool = False) -> ExperiencesMemory:
if reset:
self.reset_env()
self.policy.new_trajectory()
memory = ExperiencesMemory()
for _ in range(n_steps):
input_state = self.normalize(self.state) if self.normalize_states else self.state
results = self.choose_action(input_state)
action = results["action"]
value = results.get("value", None)
new_features = results.get("features", None)
new_state, rew, done, _ = self.step_env(action)
memory.add(self.state, action, rew, value, terminal=done, features=self.features)
self.state = new_state
self.features = new_features
self.episode_reward += rew
self.episode_steps += 1
if done or self.episode_steps >= self.config["episode_max_length"]:
if self.summary_writer is not None:
summary = tf.Summary()
summary.value.add(tag="env/Episode_length", simple_value=float(self.episode_steps))
summary.value.add(tag="env/Reward", simple_value=float(self.episode_reward))
self.summary_writer.add_summary(summary, self.n_episodes)
self.summary_writer.flush()
self.episode_reward = 0
self.episode_steps = 0
self.n_episodes += 1
self.reset_env()
self.features = self.policy.initial_features
self.policy.new_trajectory()
# Decide whether to stop when the episode (=trajectory) is done
# or to keep collecting until n_steps
if stop_at_trajectory_end:
break
if render:
self.env.render()
if self.normalize_states:
self.rms.add_values([exp.state for exp in memory.experiences])
for i, exp in enumerate(memory.experiences):
memory.experiences[i] = Experience(self.normalize(exp.state),
exp.action,
exp.reward,
exp.value,
exp.features,
exp.terminal)
return memory
def get_trajectory(self, stop_at_trajectory_end: bool = True, render: bool = False) -> ExperiencesMemory:
return self.get_steps(self.config["episode_max_length"],
stop_at_trajectory_end=stop_at_trajectory_end,
render=render)
def get_trajectories(self, stop_at_trajectory_end: bool = True, render: bool = False) -> List[ExperiencesMemory]:
"""Generate trajectories until a certain number of timesteps or trajectories."""
use_timesteps = self.config["batch_update"] == "timesteps"
trajectories = []
timesteps_total = 0
i = 0
while(use_timesteps and timesteps_total < self.config["timesteps_per_batch"]) or\
(not(use_timesteps) and i < self.config["trajectories_per_batch"]):
i += 1
trajectory = self.get_trajectory(stop_at_trajectory_end, render)
trajectories.append(trajectory)
timesteps_total += len(trajectory.rewards)
return trajectories
| {
"content_hash": "2512833a4d65e0a63177a1565d7f9d5b",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 139,
"avg_line_length": 48.470588235294116,
"alnum_prop": 0.5884188626907073,
"repo_name": "arnomoonens/DeepRL",
"id": "26fb6f94dd9b37fb726b114fa26915c4c1a3b29f",
"size": "5791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yarll/agents/env_runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6107"
},
{
"name": "Python",
"bytes": "236593"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0049_account_accept_friend_requests'),
]
operations = [
migrations.RemoveField(
model_name='userpreferences',
name='accept_friend_requests',
),
]
| {
"content_hash": "982671db2fe0b20d2c906a269b67f549",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 55,
"avg_line_length": 21.235294117647058,
"alnum_prop": 0.6177285318559557,
"repo_name": "dburr/SchoolIdolAPI",
"id": "d878df559b888c8d87891787e98411688aedadf0",
"size": "385",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "api/migrations/0050_remove_userpreferences_accept_friend_requests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "67801"
},
{
"name": "HTML",
"bytes": "473993"
},
{
"name": "JavaScript",
"bytes": "93928"
},
{
"name": "Python",
"bytes": "741798"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
__author__ = 'Marko A. Rodriguez (http://markorodriguez.com)'
import datetime
import time
import json
import uuid
import math
from decimal import *
from mock import Mock
import six
from gremlin_python.statics import *
from gremlin_python.structure.graph import Vertex, Edge, Property, VertexProperty, Graph, Path
from gremlin_python.structure.io.graphsonV2d0 import GraphSONWriter, GraphSONReader, GraphSONUtil
import gremlin_python.structure.io.graphsonV2d0
from gremlin_python.process.traversal import P
from gremlin_python.process.strategies import SubgraphStrategy
from gremlin_python.process.graph_traversal import __
class TestGraphSONReader(object):
graphson_reader = GraphSONReader()
def test_number_input(self):
x = self.graphson_reader.readObject(json.dumps({
"@type": "gx:Byte",
"@value": 1
}))
assert isinstance(x, SingleByte)
assert 1 == x
x = self.graphson_reader.readObject(json.dumps({
"@type": "g:Int32",
"@value": 31
}))
assert isinstance(x, int)
assert 31 == x
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "g:Int64",
"@value": 31
}))
assert isinstance(x, long)
assert long(31) == x
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "g:Float",
"@value": 31.3
}))
assert isinstance(x, float)
assert 31.3 == x
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "g:Double",
"@value": 31.2
}))
assert isinstance(x, float)
assert 31.2 == x
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "g:Double",
"@value": "NaN"
}))
assert isinstance(x, float)
assert math.isnan(x)
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "g:Double",
"@value": "Infinity"
}))
assert isinstance(x, float)
assert math.isinf(x) and x > 0
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "g:Double",
"@value": "-Infinity"
}))
assert isinstance(x, float)
assert math.isinf(x) and x < 0
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "gx:BigDecimal",
"@value": 31.2
}))
assert isinstance(x, Decimal)
assert Decimal(31.2) == x
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "gx:BigDecimal",
"@value": 123456789987654321123456789987654321
}))
assert isinstance(x, Decimal)
assert Decimal('123456789987654321123456789987654321') == x
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "gx:BigDecimal",
"@value": "NaN"
}))
assert isinstance(x, Decimal)
assert math.isnan(x)
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "gx:BigDecimal",
"@value": "Infinity"
}))
assert isinstance(x, Decimal)
assert math.isinf(x) and x > 0
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "gx:BigDecimal",
"@value": "-Infinity"
}))
assert isinstance(x, Decimal)
assert math.isinf(x) and x < 0
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "gx:BigInteger",
"@value": 31
}))
assert isinstance(x, long)
assert 31 == x
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "gx:BigInteger",
"@value": 123456789987654321123456789987654321
}))
assert isinstance(x, long)
assert 123456789987654321123456789987654321 == x
def test_graph(self):
vertex = self.graphson_reader.readObject("""
{"@type":"g:Vertex", "@value":{"id":{"@type":"g:Int32","@value":1},"label":"person","outE":{"created":[{"id":{"@type":"g:Int32","@value":9},"inV":{"@type":"g:Int32","@value":3},"properties":{"weight":{"@type":"g:Double","@value":0.4}}}],"knows":[{"id":{"@type":"g:Int32","@value":7},"inV":{"@type":"g:Int32","@value":2},"properties":{"weight":{"@type":"g:Double","@value":0.5}}},{"id":{"@type":"g:Int32","@value":8},"inV":{"@type":"g:Int32","@value":4},"properties":{"weight":{"@type":"g:Double","@value":1.0}}}]},"properties":{"name":[{"id":{"@type":"g:Int64","@value":0},"value":"marko"}],"age":[{"id":{"@type":"g:Int64","@value":1},"value":{"@type":"g:Int32","@value":29}}]}}}""")
assert isinstance(vertex, Vertex)
assert "person" == vertex.label
assert 1 == vertex.id
assert isinstance(vertex.id, int)
assert vertex == Vertex(1)
##
vertex = self.graphson_reader.readObject("""
{"@type":"g:Vertex", "@value":{"id":{"@type":"g:Float","@value":45.23}}}""")
assert isinstance(vertex, Vertex)
assert 45.23 == vertex.id
assert isinstance(vertex.id, FloatType)
assert "vertex" == vertex.label
assert vertex == Vertex(45.23)
##
vertex_property = self.graphson_reader.readObject("""
{"@type":"g:VertexProperty", "@value":{"id":"anId","label":"aKey","value":true,"vertex":{"@type":"g:Int32","@value":9}}}""")
assert isinstance(vertex_property, VertexProperty)
assert "anId" == vertex_property.id
assert "aKey" == vertex_property.label
assert vertex_property.value
assert vertex_property.vertex == Vertex(9)
##
vertex_property = self.graphson_reader.readObject("""
{"@type":"g:VertexProperty", "@value":{"id":{"@type":"g:Int32","@value":1},"label":"name","value":"marko"}}""")
assert isinstance(vertex_property, VertexProperty)
assert 1 == vertex_property.id
assert "name" == vertex_property.label
assert "marko" == vertex_property.value
assert vertex_property.vertex is None
##
edge = self.graphson_reader.readObject("""
{"@type":"g:Edge", "@value":{"id":{"@type":"g:Int64","@value":17},"label":"knows","inV":"x","outV":"y","inVLabel":"xLab","properties":{"aKey":"aValue","bKey":true}}}""")
# print edge
assert isinstance(edge, Edge)
assert 17 == edge.id
assert "knows" == edge.label
assert edge.inV == Vertex("x", "xLabel")
assert edge.outV == Vertex("y", "vertex")
##
property = self.graphson_reader.readObject("""
{"@type":"g:Property", "@value":{"key":"aKey","value":{"@type":"g:Int64","@value":17},"element":{"@type":"g:Edge","@value":{"id":{"@type":"g:Int64","@value":122},"label":"knows","inV":"x","outV":"y","inVLabel":"xLab"}}}}""")
# print property
assert isinstance(property, Property)
assert "aKey" == property.key
assert 17 == property.value
assert Edge(122, Vertex("x"), "knows", Vertex("y")) == property.element
def test_path(self):
path = self.graphson_reader.readObject(
"""{"@type":"g:Path","@value":{"labels":[["a"],["b","c"],[]],"objects":[{"@type":"g:Vertex","@value":{"id":{"@type":"g:Int32","@value":1},"label":"person","properties":{"name":[{"@type":"g:VertexProperty","@value":{"id":{"@type":"g:Int64","@value":0},"value":"marko","label":"name"}}],"age":[{"@type":"g:VertexProperty","@value":{"id":{"@type":"g:Int64","@value":1},"value":{"@type":"g:Int32","@value":29},"label":"age"}}]}}},{"@type":"g:Vertex","@value":{"id":{"@type":"g:Int32","@value":3},"label":"software","properties":{"name":[{"@type":"g:VertexProperty","@value":{"id":{"@type":"g:Int64","@value":4},"value":"lop","label":"name"}}],"lang":[{"@type":"g:VertexProperty","@value":{"id":{"@type":"g:Int64","@value":5},"value":"java","label":"lang"}}]}}},"lop"]}}"""
)
assert isinstance(path, Path)
assert "path[v[1], v[3], lop]" == str(path)
assert Vertex(1) == path[0]
assert Vertex(1) == path["a"]
assert "lop" == path[2]
assert 3 == len(path)
def test_custom_mapping(self):
# extended mapping
class X(object):
pass
type_string = "test:Xtype"
override_string = "g:Int64"
serdes = Mock()
reader = GraphSONReader(deserializer_map={type_string: serdes})
assert type_string in reader.deserializers
# base dicts are not modified
assert type_string not in gremlin_python.structure.io.graphsonV2d0._deserializers
x = X()
o = reader.toObject({GraphSONUtil.TYPE_KEY: type_string, GraphSONUtil.VALUE_KEY: x})
serdes.objectify.assert_called_once_with(x, reader)
assert o is serdes.objectify()
# overridden mapping
type_string = "g:Int64"
serdes = Mock()
reader = GraphSONReader(deserializer_map={type_string: serdes, override_string: serdes})
assert gremlin_python.structure.io.graphsonV2d0._deserializers[type_string] is not reader.deserializers[type_string]
value = 3
o = reader.toObject({GraphSONUtil.TYPE_KEY: type_string, GraphSONUtil.VALUE_KEY: value})
serdes.objectify.assert_called_once_with(value, reader)
assert o is serdes.objectify()
def test_datetime(self):
expected = datetime.datetime(2016, 12, 14, 16, 14, 36, 295000)
pts = time.mktime(expected.timetuple()) + expected.microsecond / 1e6 - \
(time.mktime(datetime.datetime(1970, 1, 1).timetuple()))
ts = int(round(pts * 1000))
dt = self.graphson_reader.readObject(json.dumps({"@type": "g:Date", "@value": ts}))
assert isinstance(dt, datetime.datetime)
# TINKERPOP-1848
assert dt == expected
def test_timestamp(self):
dt = self.graphson_reader.readObject(json.dumps({"@type": "g:Timestamp", "@value": 1481750076295}))
assert isinstance(dt, timestamp)
assert float(dt) == 1481750076.295
def test_duration(self):
d = self.graphson_reader.readObject(json.dumps({"@type": "gx:Duration", "@value": "PT120H"}))
assert isinstance(d, datetime.timedelta)
assert d == datetime.timedelta(hours=120)
def test_uuid(self):
prop = self.graphson_reader.readObject(
json.dumps({'@type': 'g:UUID', '@value': "41d2e28a-20a4-4ab0-b379-d810dede3786"}))
assert isinstance(prop, uuid.UUID)
assert str(prop) == '41d2e28a-20a4-4ab0-b379-d810dede3786'
def test_metrics(self):
prop = self.graphson_reader.readObject(
json.dumps([{'@type': 'g:TraversalMetrics', '@value': {'dur': 1.468594, 'metrics': [
{'@type': 'g:Metrics', '@value': {'dur': 1.380957, 'counts': {}, 'name': 'GraphStep(__.V())', 'annotations': {'percentDur': 94.03259171697556}, 'id': '4.0.0()'}},
{'@type': 'g:Metrics', '@value': {'dur': 0.087637, 'counts': {}, 'name': 'ReferenceElementStep', 'annotations': {'percentDur': 5.967408283024444}, 'id': '3.0.0()'}}
]}}]))
assert isinstance(prop, list)
assert prop == [{'dur': 1.468594, 'metrics': [
{'dur': 1.380957, 'counts': {}, 'name': 'GraphStep(__.V())', 'annotations': {'percentDur': 94.03259171697556}, 'id': '4.0.0()'},
{'dur': 0.087637, 'counts': {}, 'name': 'ReferenceElementStep', 'annotations': {'percentDur': 5.967408283024444}, 'id': '3.0.0()'}
]}]
def test_bytebuffer(self):
bb = self.graphson_reader.readObject(
json.dumps({"@type": "gx:ByteBuffer", "@value": "c29tZSBieXRlcyBmb3IgeW91"}))
assert isinstance(bb, ByteBufferType)
assert ByteBufferType("c29tZSBieXRlcyBmb3IgeW91", "utf8") == bb
def test_char(self):
c = self.graphson_reader.readObject(json.dumps({"@type": "gx:Char", "@value": "L"}))
assert isinstance(c, SingleChar)
assert chr(76) == c
class TestGraphSONWriter(object):
graphson_writer = GraphSONWriter()
graphson_reader = GraphSONReader()
def test_number_output(self):
assert {"@type": "g:Int64", "@value": 2} == json.loads(self.graphson_writer.writeObject(long(2)))
assert {"@type": "g:Int32", "@value": 1} == json.loads(self.graphson_writer.writeObject(1))
assert {"@type": "g:Double", "@value": 3.2} == json.loads(self.graphson_writer.writeObject(3.2))
assert """true""" == self.graphson_writer.writeObject(True)
def test_numbers(self):
assert {"@type": "gx:Byte", "@value": 1} == json.loads(self.graphson_writer.writeObject(int.__new__(SingleByte, 1)))
assert {"@type": "g:Int64", "@value": 2} == json.loads(self.graphson_writer.writeObject(long(2)))
assert {"@type": "g:Int32", "@value": 1} == json.loads(self.graphson_writer.writeObject(1))
assert {"@type": "g:Double", "@value": 3.2} == json.loads(self.graphson_writer.writeObject(3.2))
assert {"@type": "g:Double", "@value": "NaN"} == json.loads(self.graphson_writer.writeObject(float('nan')))
assert {"@type": "g:Double", "@value": "Infinity"} == json.loads(self.graphson_writer.writeObject(float('inf')))
assert {"@type": "g:Double", "@value": "-Infinity"} == json.loads(self.graphson_writer.writeObject(float('-inf')))
assert {"@type": "gx:BigDecimal", "@value": "123456789987654321123456789987654321"} == json.loads(self.graphson_writer.writeObject(Decimal('123456789987654321123456789987654321')))
assert {"@type": "gx:BigDecimal", "@value": "NaN"} == json.loads(self.graphson_writer.writeObject(Decimal('nan')))
assert {"@type": "gx:BigDecimal", "@value": "Infinity"} == json.loads(self.graphson_writer.writeObject(Decimal('inf')))
assert {"@type": "gx:BigDecimal", "@value": "-Infinity"} == json.loads(self.graphson_writer.writeObject(Decimal('-inf')))
assert {"@type": "gx:BigInteger", "@value": "123456789987654321123456789987654321"} == json.loads(self.graphson_writer.writeObject(long(123456789987654321123456789987654321)))
assert {"@type": "gx:BigInteger", "@value": "123456789987654321123456789987654321"} == json.loads(self.graphson_writer.writeObject(123456789987654321123456789987654321))
assert """true""" == self.graphson_writer.writeObject(True)
def test_P(self):
result = {'@type': 'g:P',
'@value': {
'predicate': 'and',
'value': [{
'@type': 'g:P',
'@value': {
'predicate': 'or',
'value': [{
'@type': 'g:P',
'@value': {'predicate': 'lt', 'value': 'b'}
},
{'@type': 'g:P', '@value': {'predicate': 'gt', 'value': 'c'}}
]
}
},
{'@type': 'g:P', '@value': {'predicate': 'neq', 'value': 'd'}}]}}
assert result == json.loads(
self.graphson_writer.writeObject(P.lt("b").or_(P.gt("c")).and_(P.neq("d"))))
result = {'@type': 'g:P', '@value': {'predicate':'within','value': [{"@type": "g:Int32", "@value": 1},{"@type": "g:Int32", "@value": 2}]}}
assert result == json.loads(self.graphson_writer.writeObject(P.within([1, 2])))
assert result == json.loads(self.graphson_writer.writeObject(P.within(1, 2)))
result = {'@type': 'g:P', '@value': {'predicate':'within','value': [{"@type": "g:Int32", "@value": 1}]}}
assert result == json.loads(self.graphson_writer.writeObject(P.within([1])))
assert result == json.loads(self.graphson_writer.writeObject(P.within(1)))
def test_strategies(self):
# we have a proxy model for now given that we don't want to have to have g:XXX all registered on the Gremlin traversal machine (yet)
assert {"@type": "g:SubgraphStrategy", "@value": {}} == json.loads(
self.graphson_writer.writeObject(SubgraphStrategy))
assert {"@type": "g:SubgraphStrategy", "@value": {
"vertices": {"@type": "g:Bytecode", "@value": {"step": [["has", "name", "marko"]]}}}} == json.loads(
self.graphson_writer.writeObject(SubgraphStrategy(vertices=__.has("name", "marko"))))
def test_graph(self):
# TODO: this assert is not compatible with python 3 and now that we test with both 2 and 3 it fails
assert {"@type": "g:Vertex", "@value": {"id": {"@type": "g:Int64", "@value": 12}, "label": "person"}} == json.loads(self.graphson_writer.writeObject(Vertex(long(12), "person")))
assert {"@type": "g:Edge", "@value": {"id": {"@type": "g:Int32", "@value": 7},
"outV": {"@type": "g:Int32", "@value": 0},
"outVLabel": "person",
"label": "knows",
"inV": {"@type": "g:Int32", "@value": 1},
"inVLabel": "dog"}} == json.loads(
self.graphson_writer.writeObject(Edge(7, Vertex(0, "person"), "knows", Vertex(1, "dog"))))
assert {"@type": "g:VertexProperty", "@value": {"id": "blah", "label": "keyA", "value": True,
"vertex": "stephen"}} == json.loads(
self.graphson_writer.writeObject(VertexProperty("blah", "keyA", True, Vertex("stephen"))))
assert {"@type": "g:Property",
"@value": {"key": "name", "value": "marko", "element": {"@type": "g:VertexProperty",
"@value": {
"vertex": "vertexId",
"id": {"@type": "g:Int32", "@value": 1234},
"label": "aKey"}}}} == json.loads(
self.graphson_writer.writeObject(
Property("name", "marko", VertexProperty(1234, "aKey", 21345, Vertex("vertexId")))))
vertex = self.graphson_reader.readObject(self.graphson_writer.writeObject(Vertex(1, "person")))
assert 1 == vertex.id
assert "person" == vertex.label
edge = self.graphson_reader.readObject(
self.graphson_writer.writeObject(Edge(3, Vertex(1, "person"), "knows", Vertex(2, "dog"))))
assert "knows" == edge.label
assert 3 == edge.id
assert 1 == edge.outV.id
assert 2 == edge.inV.id
vertex_property = self.graphson_reader.readObject(
self.graphson_writer.writeObject(VertexProperty(1, "age", 32, Vertex(1))))
assert 1 == vertex_property.id
assert "age" == vertex_property.key
assert 32 == vertex_property.value
property = self.graphson_reader.readObject(self.graphson_writer.writeObject(Property("age", 32.2, Edge(1,Vertex(2),"knows",Vertex(3)))))
assert "age" == property.key
assert 32.2 == property.value
def test_custom_mapping(self):
# extended mapping
class X(object):
pass
serdes = Mock()
writer = GraphSONWriter(serializer_map={X: serdes})
assert X in writer.serializers
# base dicts are not modified
assert X not in gremlin_python.structure.io.graphsonV2d0._serializers
obj = X()
d = writer.toDict(obj)
serdes.dictify.assert_called_once_with(obj, writer)
assert d is serdes.dictify()
# overridden mapping
serdes = Mock()
writer = GraphSONWriter(serializer_map={int: serdes})
assert gremlin_python.structure.io.graphsonV2d0._serializers[int] is not writer.serializers[int]
value = 3
d = writer.toDict(value)
serdes.dictify.assert_called_once_with(value, writer)
assert d is serdes.dictify()
def test_write_long(self):
mapping = self.graphson_writer.toDict(1)
assert mapping['@type'] == 'g:Int32'
assert mapping['@value'] == 1
mapping = self.graphson_writer.toDict(long(1))
assert mapping['@type'] == 'g:Int64'
assert mapping['@value'] == 1
def test_datetime(self):
expected = json.dumps({"@type": "g:Date", "@value": 1481750076295}, separators=(',', ':'))
dt = datetime.datetime.utcfromtimestamp(1481750076295 / 1000.0)
output = self.graphson_writer.writeObject(dt)
assert expected == output
def test_timestamp(self):
expected = json.dumps({"@type": "g:Timestamp", "@value": 1481750076295}, separators=(',', ':'))
ts = timestamp(1481750076295 / 1000.0)
output = self.graphson_writer.writeObject(ts)
assert expected == output
def test_duration(self):
expected = json.dumps({"@type": "gx:Duration", "@value": "P5D"}, separators=(',', ':'))
d = datetime.timedelta(hours=120)
output = self.graphson_writer.writeObject(d)
assert expected == output
def test_uuid(self):
expected = json.dumps({'@type': 'g:UUID', '@value': "41d2e28a-20a4-4ab0-b379-d810dede3786"}, separators=(',', ':'))
prop = uuid.UUID("41d2e28a-20a4-4ab0-b379-d810dede3786")
output = self.graphson_writer.writeObject(prop)
assert expected == output
def test_bytebuffer(self):
expected = json.dumps({'@type': 'gx:ByteBuffer', '@value': 'c29tZSBieXRlcyBmb3IgeW91'}, separators=(',', ':'))
bb = ByteBufferType("c29tZSBieXRlcyBmb3IgeW91", "utf8")
output = self.graphson_writer.writeObject(bb)
assert expected == output
def test_char(self):
expected = json.dumps({'@type': 'gx:Char', '@value': 'L'}, separators=(',', ':'))
c = str.__new__(SingleChar, chr(76))
output = self.graphson_writer.writeObject(c)
assert expected == output
class TestFunctionalGraphSONIO(object):
"""Functional IO tests"""
def test_timestamp(self, remote_connection_v2):
g = Graph().traversal().withRemote(remote_connection_v2)
ts = timestamp(1481750076295 / 1000)
resp = g.addV('test_vertex').property('ts', ts)
resp = resp.toList()
vid = resp[0].id
try:
ts_prop = g.V(vid).properties('ts').toList()[0]
assert isinstance(ts_prop.value, timestamp)
assert ts_prop.value == ts
except OSError:
assert False, "Error making request"
finally:
g.V(vid).drop().iterate()
def test_datetime(self, remote_connection_v2):
g = Graph().traversal().withRemote(remote_connection_v2)
dt = datetime.datetime.utcfromtimestamp(1481750076295 / 1000)
resp = g.addV('test_vertex').property('dt', dt).toList()
vid = resp[0].id
try:
dt_prop = g.V(vid).properties('dt').toList()[0]
assert isinstance(dt_prop.value, datetime.datetime)
assert dt_prop.value == dt
except OSError:
assert False, "Error making request"
finally:
g.V(vid).drop().iterate()
def test_uuid(self, remote_connection_v2):
g = Graph().traversal().withRemote(remote_connection_v2)
uid = uuid.UUID("41d2e28a-20a4-4ab0-b379-d810dede3786")
resp = g.addV('test_vertex').property('uuid', uid).toList()
vid = resp[0].id
try:
uid_prop = g.V(vid).properties('uuid').toList()[0]
assert isinstance(uid_prop.value, uuid.UUID)
assert uid_prop.value == uid
except OSError:
assert False, "Error making request"
finally:
g.V(vid).drop().iterate()
| {
"content_hash": "b9385c3af97fb5a60b0cbabd3b60b6f3",
"timestamp": "",
"source": "github",
"line_count": 518,
"max_line_length": 780,
"avg_line_length": 47.87644787644788,
"alnum_prop": 0.5668951612903226,
"repo_name": "pluradj/incubator-tinkerpop",
"id": "66c5ad1a98fd7e776d7e2c280438e2887286568a",
"size": "24800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gremlin-python/src/main/jython/tests/structure/io/test_graphsonV2d0.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4544"
},
{
"name": "Groovy",
"bytes": "369370"
},
{
"name": "Java",
"bytes": "6508842"
},
{
"name": "Python",
"bytes": "1481"
},
{
"name": "Shell",
"bytes": "24104"
}
],
"symlink_target": ""
} |
"""
django_localflavot_pt.forms
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains PT-specific Django form helpers.
"""
from __future__ import unicode_literals
from .pt_regions import REGION_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from re import compile as regex_compile, sub as regex_replace
CITIZEN_CARD_NUMBER_REGEX = regex_compile(r'^(\d{8})-?(\d[A-Z0-9]{2}\d)$')
PHONE_NUMBER_REGEX = regex_compile(r'^((00|\+)351)?\d{3,9}$')
SOCIAL_SECURITY_NUMBER_MULTIPLIERS = [29, 23, 19, 17, 13, 11, 7, 5, 3, 2]
SOCIAL_SECURITY_NUMBER_REGEX = regex_compile(r'^[12]\d{10}$')
ZIP_CODE_REGEX = regex_compile(r'^[1-9]\d{3}-\d{3}$')
class PTCitizenCardNumberField(Field):
"""
A field which validates Portuguese Citizen Card numbers (locally CC - 'Cartão do Cidadão').
- Citizen Card numbers have the format XXXXXXXXXYYX or XXXXXXXX-XYYX (where X is a digit and Y is an alphanumeric character).
- Citizen Card numbers validate as per http://bit.ly/RP0BzW.
- The input string may or may not have an hyphen separating the identity number from the document's check-digits.
- This field does NOT validate old ID card numbers (locally BI - 'Bilhete de Identidade').
"""
default_error_messages = {
'badchecksum': _('The specified value is not a valid Citizen Card number.'),
'invalid': _('Citizen Card numbers have the format XXXXXXXXXYYX or XXXXXXXX-XYYX (where X is a digit and Y is an alphanumeric character).'),
}
def clean(self, value):
super(PTCitizenCardNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
match = CITIZEN_CARD_NUMBER_REGEX.match(value)
if not match:
raise ValidationError(self.error_messages['invalid'])
number, checkdigits = match.groups()
encoded = number + checkdigits
decoded = [int(digit, 36) for digit in encoded]
checksum = sum([PTCitizenCardNumberField.compute(index, decoded_value)
for index, decoded_value in enumerate(decoded)])
if not checksum % 10 == 0:
raise ValidationError(self.error_messages['badchecksum'])
return '{0}-{1}'.format(number, checkdigits)
@staticmethod
def compute(index, value):
if index % 2:
return value
else:
value *= 2
return value if value < 10 else value - 9
class PTPhoneNumberField(Field):
"""
A field which validates Portuguese phone numbers.
- Phone numbers have at least 3 and at most 9 digits and may optionally be prefixed with '00351' or '+351'.
- The input string is allowed to contain spaces (though they will be stripped).
"""
default_error_messages = {
'invalid': _('Phone numbers have at least 3 and at most 9 digits and may optionally be prefixed with \'00351\' or \'+351\'.'),
}
def clean(self, value):
super(PTPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = regex_replace('(\.|\s)', '', smart_text(value))
match = PHONE_NUMBER_REGEX.search(value)
if not match:
raise ValidationError(self.error_messages['invalid'])
return '{0}'.format(value)
class PTRegionSelect(Select):
"""
A select widget which uses a list of Portuguese regions as its choices.
- Regions correspond to the Portuguese 'distritos' and 'regiões autónomas' as per ISO3166:2-PT.
"""
def __init__(self, attrs=None):
super(PTRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class PTSocialSecurityNumberField(Field):
"""
A field which validates Portuguese Social Security numbers (locally NISS - 'Número de Identificação na Segurança Social').
- Social Security numbers must be in the format XYYYYYYYYYY (where X is either 1 or 2 and Y is any other digit).
"""
default_error_messages = {
'badchecksum': _('The specified number is not a valid Social Security number.'),
'invalid': _('Social Security numbers must be in the format XYYYYYYYYYY (where X is either 1 or 2 and Y is any other digit).'),
}
def clean(self, value):
super(PTSocialSecurityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
match = SOCIAL_SECURITY_NUMBER_REGEX.search(value)
if not match:
raise ValidationError(self.error_messages['invalid'])
digits = [int(digit) for digit in value]
factors = list(zip(digits, SOCIAL_SECURITY_NUMBER_MULTIPLIERS))
dotproduct = sum(p * q for p, q in factors)
checksum = 9 - dotproduct % 10
checkdigit = int(value[-1])
if not checksum == checkdigit:
raise ValidationError(self.error_messages['badchecksum'])
return int(value)
class PTZipCodeField(RegexField):
"""
A field which validates Portuguese zip codes.
NOTE
- Zip codes have the format XYYY-YYY (where X is a digit between 1 and 9 and Y is any other digit).
"""
default_error_messages = {
'invalid': _('Zip codes must be in the format XYYY-YYY (where X is a digit between 1 and 9 and Y is any other digit).'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(PTZipCodeField, self).__init__(ZIP_CODE_REGEX, max_length, min_length, *args, **kwargs)
| {
"content_hash": "f78b2ccdffa93aaf4daf79778c363c4b",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 148,
"avg_line_length": 35.8525641025641,
"alnum_prop": 0.6551045950295011,
"repo_name": "M157q/django-localflavor",
"id": "75c900540a3b594e05ac4bf7669a7924ca21b52d",
"size": "5627",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "localflavor/pt/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "766740"
}
],
"symlink_target": ""
} |
import logging
import json
import ast
from webob import Response
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import dpset
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.lib import ofctl_v1_0
from ryu.lib import ofctl_v1_2
from ryu.lib import ofctl_v1_3
from ryu.app.wsgi import ControllerBase, WSGIApplication
LOG = logging.getLogger('ryu.app.ofctl_rest')
# REST API
#
# Retrieve the switch stats
#
# get the list of all switches
# GET /stats/switches
#
# get the desc stats of the switch
# GET /stats/desc/<dpid>
#
# get flows stats of the switch
# GET /stats/flow/<dpid>
#
# get flows stats of the switch filtered by the fields
# POST /stats/flow/<dpid>
#
# get aggregate flows stats of the switch
# GET /stats/aggregateflow/<dpid>
#
# get aggregate flows stats of the switch filtered by the fields
# POST /stats/aggregateflow/<dpid>
#
# get ports stats of the switch
# GET /stats/port/<dpid>
#
# get queues stats of the switch
# GET /stats/queue/<dpid>
#
# get meter features stats of the switch
# GET /stats/meterfeatures/<dpid>
#
# get meter config stats of the switch
# GET /stats/meterconfig/<dpid>
#
# get meters stats of the switch
# GET /stats/meter/<dpid>
#
# get group features stats of the switch
# GET /stats/groupfeatures/<dpid>
#
# get groups desc stats of the switch
# GET /stats/groupdesc/<dpid>
#
# get groups stats of the switch
# GET /stats/group/<dpid>
#
# get ports description of the switch
# GET /stats/portdesc/<dpid>
# Update the switch stats
#
# add a flow entry
# POST /stats/flowentry/add
#
# modify all matching flow entries
# POST /stats/flowentry/modify
#
# modify flow entry strictly matching wildcards and priority
# POST /stats/flowentry/modify_strict
#
# delete all matching flow entries
# POST /stats/flowentry/delete
#
# delete flow entry strictly matching wildcards and priority
# POST /stats/flowentry/delete_strict
#
# delete all flow entries of the switch
# DELETE /stats/flowentry/clear/<dpid>
#
# add a meter entry
# POST /stats/meterentry/add
#
# modify a meter entry
# POST /stats/meterentry/modify
#
# delete a meter entry
# POST /stats/meterentry/delete
#
# add a group entry
# POST /stats/groupentry/add
#
# modify a group entry
# POST /stats/groupentry/modify
#
# delete a group entry
# POST /stats/groupentry/delete
#
# modify behavior of the physical port
# POST /stats/portdesc/modify
#
#
# send a experimeter message
# POST /stats/experimenter/<dpid>
class StatsController(ControllerBase):
def __init__(self, req, link, data, **config):
super(StatsController, self).__init__(req, link, data, **config)
self.dpset = data['dpset']
self.waiters = data['waiters']
def get_dpids(self, req, **_kwargs):
dps = self.dpset.dps.keys()
body = json.dumps(dps)
return Response(content_type='application/json', body=body)
def get_desc_stats(self, req, dpid, **_kwargs):
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
desc = ofctl_v1_0.get_desc_stats(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
desc = ofctl_v1_2.get_desc_stats(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
desc = ofctl_v1_3.get_desc_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(desc)
return Response(content_type='application/json', body=body)
def get_flow_stats(self, req, dpid, **_kwargs):
if req.body == '':
flow = {}
else:
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
flows = ofctl_v1_0.get_flow_stats(dp, self.waiters, flow)
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
flows = ofctl_v1_2.get_flow_stats(dp, self.waiters, flow)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
flows = ofctl_v1_3.get_flow_stats(dp, self.waiters, flow)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(flows)
return Response(content_type='application/json', body=body)
def get_aggregate_flow_stats(self, req, dpid, **_kwargs):
if req.body == '':
flow = {}
else:
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
flows = ofctl_v1_0.get_aggregate_flow_stats(dp, self.waiters, flow)
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
flows = ofctl_v1_2.get_aggregate_flow_stats(dp, self.waiters, flow)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
flows = ofctl_v1_3.get_aggregate_flow_stats(dp, self.waiters, flow)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(flows)
return Response(content_type='application/json', body=body)
def get_port_stats(self, req, dpid, **_kwargs):
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
ports = ofctl_v1_0.get_port_stats(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
ports = ofctl_v1_2.get_port_stats(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
ports = ofctl_v1_3.get_port_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(ports)
return Response(content_type='application/json', body=body)
def get_queue_stats(self, req, dpid, **_kwargs):
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
queues = ofctl_v1_0.get_queue_stats(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
queues = ofctl_v1_2.get_queue_stats(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
queues = ofctl_v1_3.get_queue_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(queues)
return Response(content_type='application/json', body=body)
def get_meter_features(self, req, dpid, **_kwargs):
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
meters = ofctl_v1_3.get_meter_features(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION or \
dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
LOG.debug('Request not supported in this OF protocol version')
return Response(status=501)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(meters)
return Response(content_type='application/json', body=body)
def get_meter_config(self, req, dpid, **_kwargs):
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
meters = ofctl_v1_3.get_meter_config(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION or \
dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
LOG.debug('Request not supported in this OF protocol version')
return Response(status=501)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(meters)
return Response(content_type='application/json', body=body)
def get_meter_stats(self, req, dpid, **_kwargs):
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
meters = ofctl_v1_3.get_meter_stats(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION or \
dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
LOG.debug('Request not supported in this OF protocol version')
return Response(status=501)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(meters)
return Response(content_type='application/json', body=body)
def get_group_features(self, req, dpid, **_kwargs):
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
groups = ofctl_v1_2.get_group_features(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
groups = ofctl_v1_3.get_group_features(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
LOG.debug('Request not supported in this OF protocol version')
return Response(status=501)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def get_group_desc(self, req, dpid, **_kwargs):
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
groups = ofctl_v1_2.get_group_desc(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
groups = ofctl_v1_3.get_group_desc(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
LOG.debug('Request not supported in this OF protocol version')
return Response(status=501)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def get_group_stats(self, req, dpid, **_kwargs):
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
groups = ofctl_v1_2.get_group_stats(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
groups = ofctl_v1_3.get_group_stats(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
LOG.debug('Request not supported in this OF protocol version')
return Response(status=501)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def get_port_desc(self, req, dpid, **_kwargs):
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
groups = ofctl_v1_0.get_port_desc(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
groups = ofctl_v1_2.get_port_desc(dp, self.waiters)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
groups = ofctl_v1_3.get_port_desc(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def mod_flow_entry(self, req, cmd, **_kwargs):
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = flow.get('dpid')
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd == 'add':
cmd = dp.ofproto.OFPFC_ADD
elif cmd == 'modify':
cmd = dp.ofproto.OFPFC_MODIFY
elif cmd == 'modify_strict':
cmd = dp.ofproto.OFPFC_MODIFY_STRICT
elif cmd == 'delete':
cmd = dp.ofproto.OFPFC_DELETE
elif cmd == 'delete_strict':
cmd = dp.ofproto.OFPFC_DELETE_STRICT
else:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
ofctl_v1_0.mod_flow_entry(dp, flow, cmd)
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
ofctl_v1_2.mod_flow_entry(dp, flow, cmd)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
ofctl_v1_3.mod_flow_entry(dp, flow, cmd)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
def delete_flow_entry(self, req, dpid, **_kwargs):
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
flow = {'table_id': dp.ofproto.OFPTT_ALL}
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
ofctl_v1_0.delete_flow_entry(dp)
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
ofctl_v1_2.mod_flow_entry(dp, flow, dp.ofproto.OFPFC_DELETE)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
ofctl_v1_3.mod_flow_entry(dp, flow, dp.ofproto.OFPFC_DELETE)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
def mod_meter_entry(self, req, cmd, **_kwargs):
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = flow.get('dpid')
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd == 'add':
cmd = dp.ofproto.OFPMC_ADD
elif cmd == 'modify':
cmd = dp.ofproto.OFPMC_MODIFY
elif cmd == 'delete':
cmd = dp.ofproto.OFPMC_DELETE
else:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
ofctl_v1_3.mod_meter_entry(dp, flow, cmd)
elif dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION or \
dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
LOG.debug('Request not supported in this OF protocol version')
return Response(status=501)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
def mod_group_entry(self, req, cmd, **_kwargs):
try:
group = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = group.get('dpid')
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
LOG.debug('Request not supported in this OF protocol version')
return Response(status=501)
if cmd == 'add':
cmd = dp.ofproto.OFPGC_ADD
elif cmd == 'modify':
cmd = dp.ofproto.OFPGC_MODIFY
elif cmd == 'delete':
cmd = dp.ofproto.OFPGC_DELETE
else:
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
ofctl_v1_2.mod_group_entry(dp, group, cmd)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
ofctl_v1_3.mod_group_entry(dp, group, cmd)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
def mod_port_behavior(self, req, cmd, **_kwargs):
try:
port_config = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = port_config.get('dpid')
port_no = int(port_config.get('port_no', 0))
port_info = self.dpset.port_state[int(dpid)].get(port_no)
if port_info:
port_config.setdefault('hw_addr', port_info.hw_addr)
port_config.setdefault('advertise', port_info.advertised)
else:
return Response(status=404)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd != 'modify':
return Response(status=404)
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
ofctl_v1_0.mod_port_behavior(dp, port_config)
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
ofctl_v1_2.mod_port_behavior(dp, port_config)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
ofctl_v1_3.mod_port_behavior(dp, port_config)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
def send_experimenter(self, req, dpid, **_kwargs):
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
try:
exp = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
if dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
ofctl_v1_2.send_experimenter(dp, exp)
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
ofctl_v1_3.send_experimenter(dp, exp)
elif dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
LOG.debug('Request not supported in this OF protocol version')
return Response(status=501)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
class RestStatsApi(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION,
ofproto_v1_2.OFP_VERSION,
ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'dpset': dpset.DPSet,
'wsgi': WSGIApplication
}
def __init__(self, *args, **kwargs):
super(RestStatsApi, self).__init__(*args, **kwargs)
self.dpset = kwargs['dpset']
wsgi = kwargs['wsgi']
self.waiters = {}
self.data = {}
self.data['dpset'] = self.dpset
self.data['waiters'] = self.waiters
mapper = wsgi.mapper
wsgi.registory['StatsController'] = self.data
path = '/stats'
uri = path + '/switches'
mapper.connect('stats', uri,
controller=StatsController, action='get_dpids',
conditions=dict(method=['GET']))
uri = path + '/desc/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_desc_stats',
conditions=dict(method=['GET']))
uri = path + '/flow/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_flow_stats',
conditions=dict(method=['GET', 'POST']))
uri = path + '/aggregateflow/{dpid}'
mapper.connect('stats', uri,
controller=StatsController,
action='get_aggregate_flow_stats',
conditions=dict(method=['GET', 'POST']))
uri = path + '/port/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_port_stats',
conditions=dict(method=['GET']))
uri = path + '/queue/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_queue_stats',
conditions=dict(method=['GET']))
uri = path + '/meterfeatures/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_meter_features',
conditions=dict(method=['GET']))
uri = path + '/meterconfig/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_meter_config',
conditions=dict(method=['GET']))
uri = path + '/meter/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_meter_stats',
conditions=dict(method=['GET']))
uri = path + '/groupfeatures/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_group_features',
conditions=dict(method=['GET']))
uri = path + '/groupdesc/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_group_desc',
conditions=dict(method=['GET']))
uri = path + '/group/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_group_stats',
conditions=dict(method=['GET']))
uri = path + '/portdesc/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_port_desc',
conditions=dict(method=['GET']))
uri = path + '/flowentry/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_flow_entry',
conditions=dict(method=['POST']))
uri = path + '/flowentry/clear/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='delete_flow_entry',
conditions=dict(method=['DELETE']))
uri = path + '/meterentry/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_meter_entry',
conditions=dict(method=['POST']))
uri = path + '/groupentry/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_group_entry',
conditions=dict(method=['POST']))
uri = path + '/portdesc/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_port_behavior',
conditions=dict(method=['POST']))
uri = path + '/experimenter/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='send_experimenter',
conditions=dict(method=['POST']))
@set_ev_cls([ofp_event.EventOFPStatsReply,
ofp_event.EventOFPDescStatsReply,
ofp_event.EventOFPFlowStatsReply,
ofp_event.EventOFPAggregateStatsReply,
ofp_event.EventOFPPortStatsReply,
ofp_event.EventOFPQueueStatsReply,
ofp_event.EventOFPMeterStatsReply,
ofp_event.EventOFPMeterFeaturesStatsReply,
ofp_event.EventOFPMeterConfigStatsReply,
ofp_event.EventOFPGroupStatsReply,
ofp_event.EventOFPGroupFeaturesStatsReply,
ofp_event.EventOFPGroupDescStatsReply,
ofp_event.EventOFPPortDescStatsReply
], MAIN_DISPATCHER)
def stats_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
flags = 0
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
flags = dp.ofproto.OFPSF_REPLY_MORE
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
flags = dp.ofproto.OFPSF_REPLY_MORE
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
flags = dp.ofproto.OFPMPF_REPLY_MORE
if msg.flags & flags:
return
del self.waiters[dp.id][msg.xid]
lock.set()
@set_ev_cls([ofp_event.EventOFPSwitchFeatures], MAIN_DISPATCHER)
def features_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
del self.waiters[dp.id][msg.xid]
lock.set()
| {
"content_hash": "e4ff6e9360da4273d33f2ba8fc0a282c",
"timestamp": "",
"source": "github",
"line_count": 718,
"max_line_length": 79,
"avg_line_length": 36.128133704735376,
"alnum_prop": 0.5888974556669236,
"repo_name": "StephenKing/summerschool-2015-ryu",
"id": "38301ca585f169f41f909d5eff42a45b6f79f8a9",
"size": "26553",
"binary": false,
"copies": "2",
"ref": "refs/heads/summerschool-step2-complete",
"path": "ryu/app/ofctl_rest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5444"
},
{
"name": "CSS",
"bytes": "306"
},
{
"name": "Erlang",
"bytes": "871862"
},
{
"name": "HTML",
"bytes": "306"
},
{
"name": "JavaScript",
"bytes": "8436"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "5235248"
},
{
"name": "Shell",
"bytes": "14380"
}
],
"symlink_target": ""
} |
from requests.auth import HTTPDigestAuth
from requests.utils import dict_from_cookiejar
from log import LOG
from sys import stdout
from hashlib import sha1
from re import findall
import hmac
import utility
import state
default_credentials = [("admin", "admin")]
def _salt(url):
""" ColdFusion requires a salt that it uses client-side and sends
back to the server, which it is expecting. We can obtain the next salt
by simply requesting it.
"""
r = utility.requests_get(url)
if r.status_code is 200:
salt = findall("name=\"salt\" type=\"hidden\" value=\"(.*?)\">", r.content)
return salt[0]
def _auth(usr, pswd, url, version):
""" Authenticate to the remote ColdFusion server; bit of a pain
"""
if version in ['9.0']:
salt = _salt(url)
hsh = hmac.new(salt, sha1(pswd).hexdigest().upper(), sha1).hexdigest().upper()
data = {"cfadminPassword" : hsh,
"requestedURL" : "/CFIDE/administrator/enter.cfm?",
"cfadminUserId" : usr,
"salt" : salt,
"submit" : "Login"
}
elif version in ['10.0']:
hsh = sha1(pswd).hexdigest().upper()
data = {'cfadminPassword' : hsh,
'requestedURL' : '/CFIDE/administrator/enter.cfm?',
'cfadminUserId' : usr,
'submit' : 'Login'
}
try:
res = utility.requests_post(url, data=data)
if res.status_code is 200 and len(res.history) > 0:
utility.Msg("Successfully authenticated with %s:%s" % (usr, pswd), LOG.DEBUG)
return (dict_from_cookiejar(res.history[0].cookies), None)
except Exception, e:
utility.Msg("Error authenticating: %s" % e, LOG.ERROR)
return (None, None)
def checkAuth(ip, port, title, version):
"""
"""
url = "http://{0}:{1}/CFIDE/administrator/enter.cfm".format(ip, port)
# check with given auth
if state.usr_auth:
(usr, pswd) = state.usr_auth.split(':')
return _auth(usr, pswd, url, version)
# else try default creds
for (usr, pswd) in default_credentials:
cook = _auth(usr, pswd, url, version)
if cook:
return cook
# if we're still here, check if they supplied a wordlist
if state.bf_wordlist and not state.hasbf:
state.hasbf = True
wordlist = []
try:
with open(state.bf_wordlist, 'r') as f:
# ensure everything is ascii or requests will explode
wordlist = [x.decode('ascii', 'ignore').rstrip() for x in f.readlines()]
except Exception, e:
utility.Msg("Failed to read wordlist (%s)" % e, LOG.ERROR)
return
utility.Msg("Brute forcing account %s with %d passwords..." %
(state.bf_user, len(wordlist)), LOG.DEBUG)
try:
for (idx, word) in enumerate(wordlist):
stdout.flush()
stdout.write("\r\033[32m [%s] Brute forcing password for %s [%d/%d]\033[0m"\
% (utility.timestamp(), state.bf_user, idx+1,
len(wordlist)))
cook = _auth(state.bf_user, word, url, version)
if cook:
print '' # newline
if not (state.bf_user, word) in default_credentials:
default_credentials.insert(0, (state.bf_user, word))
utility.Msg("Successful login %s:%s" %
(state.bf_user, word), LOG.SUCCESS)
return cook
print ''
except KeyboardInterrupt:
pass
| {
"content_hash": "d7d2bf872faac9a3cf86a72871a06b92",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 92,
"avg_line_length": 32.504347826086956,
"alnum_prop": 0.5417335473515249,
"repo_name": "jorik041/clusterd",
"id": "5f72a844c762fbbef3a0d2e69e8e09f86eb4cbbe",
"size": "3738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/platform/coldfusion/authenticate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ColdFusion",
"bytes": "56300"
},
{
"name": "Java",
"bytes": "12329"
},
{
"name": "Python",
"bytes": "175558"
},
{
"name": "Shell",
"bytes": "5889"
}
],
"symlink_target": ""
} |
"""Configuration.from_yaml() with environment variables interpolation tests."""
import os
import yaml
from pytest import mark, raises
def test_env_variable_interpolation(config, yaml_config_file_3):
config.from_yaml(yaml_config_file_3)
assert config() == {
"section1": {
"value1": "test-value",
"value2": "test-path/path",
},
}
assert config.section1() == {
"value1": "test-value",
"value2": "test-path/path",
}
assert config.section1.value1() == "test-value"
assert config.section1.value2() == "test-path/path"
def test_missing_envs_not_required(config, yaml_config_file_3):
del os.environ["CONFIG_TEST_ENV"]
del os.environ["CONFIG_TEST_PATH"]
config.from_yaml(yaml_config_file_3)
assert config() == {
"section1": {
"value1": None,
"value2": "/path",
},
}
assert config.section1() == {
"value1": None,
"value2": "/path",
}
assert config.section1.value1() is None
assert config.section1.value2() == "/path"
def test_missing_envs_required(config, yaml_config_file_3):
with open(yaml_config_file_3, "w") as file:
file.write(
"section:\n"
" undefined: ${UNDEFINED}\n"
)
with raises(ValueError, match="Missing required environment variable \"UNDEFINED\""):
config.from_yaml(yaml_config_file_3, envs_required=True)
@mark.parametrize("config_type", ["strict"])
def test_missing_envs_strict_mode(config, yaml_config_file_3):
with open(yaml_config_file_3, "w") as file:
file.write(
"section:\n"
" undefined: ${UNDEFINED}\n"
)
with raises(ValueError, match="Missing required environment variable \"UNDEFINED\""):
config.from_yaml(yaml_config_file_3)
@mark.parametrize("config_type", ["strict"])
def test_missing_envs_not_required_in_strict_mode(config, yaml_config_file_3):
with open(yaml_config_file_3, "w") as file:
file.write(
"section:\n"
" undefined: ${UNDEFINED}\n"
)
config.from_yaml(yaml_config_file_3, envs_required=False)
assert config.section.undefined() is None
def test_option_missing_envs_not_required(config, yaml_config_file_3):
del os.environ["CONFIG_TEST_ENV"]
del os.environ["CONFIG_TEST_PATH"]
config.option.from_yaml(yaml_config_file_3)
assert config.option() == {
"section1": {
"value1": None,
"value2": "/path",
},
}
assert config.option.section1() == {
"value1": None,
"value2": "/path",
}
assert config.option.section1.value1() is None
assert config.option.section1.value2() == "/path"
def test_option_missing_envs_required(config, yaml_config_file_3):
with open(yaml_config_file_3, "w") as file:
file.write(
"section:\n"
" undefined: ${UNDEFINED}\n"
)
with raises(ValueError, match="Missing required environment variable \"UNDEFINED\""):
config.option.from_yaml(yaml_config_file_3, envs_required=True)
@mark.parametrize("config_type", ["strict"])
def test_option_missing_envs_not_required_in_strict_mode(config, yaml_config_file_3):
config.override({"option": {}})
with open(yaml_config_file_3, "w") as file:
file.write(
"section:\n"
" undefined: ${UNDEFINED}\n"
)
config.option.from_yaml(yaml_config_file_3, envs_required=False)
assert config.option.section.undefined() is None
@mark.parametrize("config_type", ["strict"])
def test_option_missing_envs_strict_mode(config, yaml_config_file_3):
with open(yaml_config_file_3, "w") as file:
file.write(
"section:\n"
" undefined: ${UNDEFINED}\n"
)
with raises(ValueError, match="Missing required environment variable \"UNDEFINED\""):
config.option.from_yaml(yaml_config_file_3)
def test_default_values(config, yaml_config_file_3):
with open(yaml_config_file_3, "w") as file:
file.write(
"section:\n"
" defined_with_default: ${DEFINED:default}\n"
" undefined_with_default: ${UNDEFINED:default}\n"
" complex: ${DEFINED}/path/${DEFINED:default}/${UNDEFINED}/${UNDEFINED:default}\n"
)
config.from_yaml(yaml_config_file_3)
assert config.section() == {
"defined_with_default": "defined",
"undefined_with_default": "default",
"complex": "defined/path/defined//default",
}
def test_option_env_variable_interpolation(config, yaml_config_file_3):
config.option.from_yaml(yaml_config_file_3)
assert config.option() == {
"section1": {
"value1": "test-value",
"value2": "test-path/path",
},
}
assert config.option.section1() == {
"value1": "test-value",
"value2": "test-path/path",
}
assert config.option.section1.value1() == "test-value"
assert config.option.section1.value2() == "test-path/path"
def test_env_variable_interpolation_custom_loader(config, yaml_config_file_3):
config.from_yaml(yaml_config_file_3, loader=yaml.UnsafeLoader)
assert config.section1() == {
"value1": "test-value",
"value2": "test-path/path",
}
assert config.section1.value1() == "test-value"
assert config.section1.value2() == "test-path/path"
def test_option_env_variable_interpolation_custom_loader(config, yaml_config_file_3):
config.option.from_yaml(yaml_config_file_3, loader=yaml.UnsafeLoader)
assert config.option.section1() == {
"value1": "test-value",
"value2": "test-path/path",
}
assert config.option.section1.value1() == "test-value"
assert config.option.section1.value2() == "test-path/path"
| {
"content_hash": "84704f38bde5835b33497b7bdc1e1bbb",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 95,
"avg_line_length": 31.57837837837838,
"alnum_prop": 0.6129750085587128,
"repo_name": "ets-labs/python-dependency-injector",
"id": "8e6e1c0dc54517cad8c1d0167f8aa3714941dd19",
"size": "5842",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/unit/providers/configuration/test_from_yaml_with_env_py2_py3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "201812"
},
{
"name": "Makefile",
"bytes": "1942"
},
{
"name": "Python",
"bytes": "492977"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import datetime
import logging
import os
import pprint
import numpy as np
import tensorflow as tf
from sklearn import metrics
from tensorflow.python.saved_model import (
signature_constants, signature_def_utils, tag_constants, utils)
import sparse_model
import util
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
def define_flags():
"""
Define all the command-line parameters.
Return:
The FLAGS object.
"""
flags = tf.app.flags
flags.DEFINE_string("train_files", "./data/a8a/a8a_train.libsvm.tfrecords",
"The glob pattern of train TFRecords files")
flags.DEFINE_string("validation_files",
"./data/a8a/a8a_test.libsvm.tfrecords",
"The glob pattern of train TFRecords files")
flags.DEFINE_integer("feature_size", 124, "Number of feature size")
flags.DEFINE_integer("label_size", 2, "Number of label size")
flags.DEFINE_string("label_type", "int", "The type of label")
flags.DEFINE_float("learning_rate", 0.01, "The learning rate")
flags.DEFINE_integer("epoch_number", 10, "Number of epochs to train")
flags.DEFINE_integer("batch_size", 1024, "The batch size of training")
flags.DEFINE_integer("train_batch_size", 64, "The batch size of training")
flags.DEFINE_integer("validation_batch_size", 64,
"The batch size of training")
flags.DEFINE_integer("validate_batch_size", 1024,
"The batch size of validation")
flags.DEFINE_integer("batch_thread_number", 1,
"Number of threads to read data")
flags.DEFINE_integer("min_after_dequeue", 100,
"The minimal number after dequeue")
flags.DEFINE_string("checkpoint_path", "./sparse_checkpoint/",
"The path of checkpoint")
flags.DEFINE_string("output_path", "./sparse_tensorboard/",
"The path of tensorboard event files")
flags.DEFINE_string("model", "dnn", "Support dnn, lr, wide_and_deep")
flags.DEFINE_string("model_network", "128 32 8",
"The neural network of model")
flags.DEFINE_boolean("enable_bn", False, "Enable batch normalization or not")
flags.DEFINE_float("bn_epsilon", 0.001, "The epsilon of batch normalization")
flags.DEFINE_boolean("enable_dropout", False, "Enable dropout or not")
flags.DEFINE_float("dropout_keep_prob", 0.5, "The dropout keep prob")
flags.DEFINE_boolean("enable_lr_decay", False, "Enable learning rate decay")
flags.DEFINE_float("lr_decay_rate", 0.96, "Learning rate decay rate")
flags.DEFINE_string("optimizer", "adagrad", "The optimizer to train")
flags.DEFINE_integer("steps_to_validate", 10,
"Steps to validate and print state")
flags.DEFINE_string("mode", "train", "Support train, export, inference")
flags.DEFINE_string("saved_model_path", "./sparse_saved_model/",
"The path of the saved model")
flags.DEFINE_string("model_path", "./sparse_model/", "The path of the model")
flags.DEFINE_integer("model_version", 1, "The version of the model")
flags.DEFINE_string("inference_test_file", "./data/a8a_test.libsvm",
"The test file for inference")
flags.DEFINE_string("inference_result_file", "./inference_result.txt",
"The result file from inference")
flags.DEFINE_boolean("benchmark_mode", False,
"Reduce extra computation in benchmark mode")
FLAGS = flags.FLAGS
# Check parameters
assert (FLAGS.optimizer in [
"sgd", "adadelta", "adagrad", "adam", "ftrl", "rmsprop"
])
# Print flags
FLAGS.mode
parameter_value_map = {}
for key in FLAGS.__flags.keys():
parameter_value_map[key] = FLAGS.__flags[key].value
pprint.PrettyPrinter().pprint(parameter_value_map)
return FLAGS
FLAGS = define_flags()
def parse_tfrecords_function(example_proto):
"""
Decode TFRecords for Dataset.
Args:
example_proto: TensorFlow ExampleProto object.
Return:
The op of features and labels
"""
if FLAGS.label_type == "int":
features = {
"ids": tf.VarLenFeature(tf.int64),
"values": tf.VarLenFeature(tf.float32),
"label": tf.FixedLenFeature([], tf.int64, default_value=0)
}
parsed_features = tf.parse_single_example(example_proto, features)
labels = parsed_features["label"]
ids = parsed_features["ids"]
values = parsed_features["values"]
elif FLAGS.label_type == "float":
features = {
"ids": tf.VarLenFeature(tf.int64),
"values": tf.VarLenFeature(tf.float32),
"label": tf.FixedLenFeature([], tf.float32, default_value=0)
}
parsed_features = tf.parse_single_example(example_proto, features)
labels = tf.cast(parsed_features["label"], tf.int32)
ids = parsed_features["ids"]
values = parsed_features["values"]
return labels, ids, values
def inference(sparse_ids, sparse_values, is_train=True):
"""
Define the model by model name.
Return:
The logit of the model output.
"""
if FLAGS.model == "dnn":
return sparse_model.dnn_inference(sparse_ids, sparse_values, is_train,
FLAGS)
elif FLAGS.model == "lr":
return sparse_model.lr_inference(sparse_ids, sparse_values, is_train,
FLAGS)
elif FLAGS.model == "wide_and_deep":
return sparse_model.wide_and_deep_inference(sparse_ids, sparse_values,
is_train, FLAGS)
elif FLAGS.model == "customized":
return sparse_model.customized_inference(sparse_ids, sparse_values,
is_train, FLAGS)
def main():
if os.path.exists(FLAGS.checkpoint_path) == False:
os.makedirs(FLAGS.checkpoint_path)
checkpoint_file_path = FLAGS.checkpoint_path + "/checkpoint.ckpt"
latest_checkpoint_file_path = tf.train.latest_checkpoint(
FLAGS.checkpoint_path)
if os.path.exists(FLAGS.output_path) == False:
os.makedirs(FLAGS.output_path)
# Step 1: Construct the dataset op
epoch_number = FLAGS.epoch_number
if epoch_number <= 0:
epoch_number = -1
train_buffer_size = FLAGS.train_batch_size * 3
validation_buffer_size = FLAGS.train_batch_size * 3
train_filename_list = [filename for filename in FLAGS.train_files.split(",")]
train_filename_placeholder = tf.placeholder(tf.string, shape=[None])
train_dataset = tf.data.TFRecordDataset(train_filename_placeholder)
train_dataset = train_dataset.map(parse_tfrecords_function).repeat(
epoch_number).batch(FLAGS.train_batch_size).shuffle(
buffer_size=train_buffer_size)
train_dataset_iterator = train_dataset.make_initializable_iterator()
batch_labels, batch_ids, batch_values = train_dataset_iterator.get_next()
validation_filename_list = [
filename for filename in FLAGS.validation_files.split(",")
]
validation_filename_placeholder = tf.placeholder(tf.string, shape=[None])
validation_dataset = tf.data.TFRecordDataset(validation_filename_placeholder)
validation_dataset = validation_dataset.map(parse_tfrecords_function).repeat(
).batch(FLAGS.validation_batch_size).shuffle(
buffer_size=validation_buffer_size)
validation_dataset_iterator = validation_dataset.make_initializable_iterator(
)
validation_labels, validation_ids, validation_values = validation_dataset_iterator.get_next(
)
# Define the model
logits = inference(batch_ids, batch_values, True)
batch_labels = tf.to_int64(batch_labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=batch_labels)
loss = tf.reduce_mean(cross_entropy, name="loss")
global_step = tf.Variable(0, name="global_step", trainable=False)
if FLAGS.enable_lr_decay:
logging.info(
"Enable learning rate decay rate: {}".format(FLAGS.lr_decay_rate))
starter_learning_rate = FLAGS.learning_rate
learning_rate = tf.train.exponential_decay(
starter_learning_rate,
global_step,
100000,
FLAGS.lr_decay_rate,
staircase=True)
else:
learning_rate = FLAGS.learning_rate
optimizer = util.get_optimizer_by_name(FLAGS.optimizer, learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
tf.get_variable_scope().reuse_variables()
# Define accuracy op for train data
train_accuracy_logits = inference(batch_ids, batch_values, False)
train_softmax = tf.nn.softmax(train_accuracy_logits)
train_correct_prediction = tf.equal(
tf.argmax(train_softmax, 1), batch_labels)
train_accuracy = tf.reduce_mean(
tf.cast(train_correct_prediction, tf.float32))
# Define auc op for train data
batch_labels = tf.cast(batch_labels, tf.int32)
sparse_labels = tf.reshape(batch_labels, [-1, 1])
derived_size = tf.shape(batch_labels)[0]
indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1])
concated = tf.concat(axis=1, values=[indices, sparse_labels])
outshape = tf.stack([derived_size, FLAGS.label_size])
new_train_batch_labels = tf.sparse_to_dense(concated, outshape, 1.0, 0.0)
_, train_auc = tf.contrib.metrics.streaming_auc(train_softmax,
new_train_batch_labels)
# Define accuracy op for validate data
validate_accuracy_logits = inference(validation_ids, validation_values,
False)
validate_softmax = tf.nn.softmax(validate_accuracy_logits)
validate_batch_labels = tf.to_int64(validation_labels)
validate_correct_prediction = tf.equal(
tf.argmax(validate_softmax, 1), validate_batch_labels)
validate_accuracy = tf.reduce_mean(
tf.cast(validate_correct_prediction, tf.float32))
# Define auc op for validate data
validate_batch_labels = tf.cast(validate_batch_labels, tf.int32)
sparse_labels = tf.reshape(validate_batch_labels, [-1, 1])
derived_size = tf.shape(validate_batch_labels)[0]
indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1])
concated = tf.concat(axis=1, values=[indices, sparse_labels])
outshape = tf.stack([derived_size, FLAGS.label_size])
new_validate_batch_labels = tf.sparse_to_dense(concated, outshape, 1.0, 0.0)
_, validate_auc = tf.contrib.metrics.streaming_auc(validate_softmax,
new_validate_batch_labels)
# Define inference op
sparse_index = tf.placeholder(tf.int64, [None, 2])
sparse_ids = tf.placeholder(tf.int64, [None])
sparse_values = tf.placeholder(tf.float32, [None])
sparse_shape = tf.placeholder(tf.int64, [2])
inference_ids = tf.SparseTensor(sparse_index, sparse_ids, sparse_shape)
inference_values = tf.SparseTensor(sparse_index, sparse_values, sparse_shape)
inference_logits = inference(inference_ids, inference_values, False)
inference_softmax = tf.nn.softmax(inference_logits)
inference_op = tf.argmax(inference_softmax, 1)
keys_placeholder = tf.placeholder(tf.int32, shape=[None, 1])
keys = tf.identity(keys_placeholder)
signature_def_map = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature_def_utils.build_signature_def(
inputs={
"keys": utils.build_tensor_info(keys_placeholder),
"indexs": utils.build_tensor_info(sparse_index),
"ids": utils.build_tensor_info(sparse_ids),
"values": utils.build_tensor_info(sparse_values),
"shape": utils.build_tensor_info(sparse_shape)
},
outputs={
"keys": utils.build_tensor_info(keys),
"softmax": utils.build_tensor_info(inference_softmax),
"prediction": utils.build_tensor_info(inference_op)
},
method_name=signature_constants.PREDICT_METHOD_NAME)
}
# Initialize saver and summary
saver = tf.train.Saver()
tf.summary.scalar("loss", loss)
tf.summary.scalar("train_accuracy", train_accuracy)
tf.summary.scalar("train_auc", train_auc)
tf.summary.scalar("validate_accuracy", validate_accuracy)
tf.summary.scalar("validate_auc", validate_auc)
summary_op = tf.summary.merge_all()
init_op = [
tf.global_variables_initializer(),
tf.local_variables_initializer()
]
# Create session to run
with tf.Session() as sess:
writer = tf.summary.FileWriter(FLAGS.output_path, sess.graph)
sess.run(init_op)
sess.run(
train_dataset_iterator.initializer,
feed_dict={train_filename_placeholder: train_filename_list})
sess.run(
validation_dataset_iterator.initializer,
feed_dict={validation_filename_placeholder: validation_filename_list})
if FLAGS.mode == "train":
# Restore session and start queue runner
util.restore_from_checkpoint(sess, saver, latest_checkpoint_file_path)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
start_time = datetime.datetime.now()
try:
while not coord.should_stop():
if FLAGS.benchmark_mode:
sess.run(train_op)
else:
_, step = sess.run([train_op, global_step])
# Print state while training
if step % FLAGS.steps_to_validate == 0:
loss_value, train_accuracy_value, train_auc_value, validate_accuracy_value, auc_value, summary_value = sess.run(
[
loss, train_accuracy, train_auc, validate_accuracy,
validate_auc, summary_op
])
end_time = datetime.datetime.now()
logging.info(
"[{}] Step: {}, loss: {}, train_acc: {}, train_auc: {}, valid_acc: {}, valid_auc: {}".
format(end_time - start_time, step, loss_value,
train_accuracy_value, train_auc_value,
validate_accuracy_value, auc_value))
writer.add_summary(summary_value, step)
saver.save(sess, checkpoint_file_path, global_step=step)
start_time = end_time
except tf.errors.OutOfRangeError:
if FLAGS.benchmark_mode:
print("Finish training for benchmark")
exit(0)
else:
# Export the model after training
util.save_model(
FLAGS.model_path,
FLAGS.model_version,
sess,
signature_def_map,
is_save_graph=False)
finally:
coord.request_stop()
coord.join(threads)
elif FLAGS.mode == "save_model":
if not util.restore_from_checkpoint(sess, saver,
latest_checkpoint_file_path):
logging.error("No checkpoint found, exit now")
exit(1)
util.save_model(
FLAGS.model_path,
FLAGS.model_version,
sess,
signature_def_map,
is_save_graph=False)
elif FLAGS.mode == "inference":
if not util.restore_from_checkpoint(sess, saver,
latest_checkpoint_file_path):
logging.error("No checkpoint found, exit now")
exit(1)
# Load inference test data
inference_result_file_name = "./inference_result.txt"
inference_test_file_name = "./data/a8a_test.libsvm"
labels = []
feature_ids = []
feature_values = []
feature_index = []
ins_num = 0
for line in open(inference_test_file_name, "r"):
tokens = line.split(" ")
labels.append(int(tokens[0]))
feature_num = 0
for feature in tokens[1:]:
feature_id, feature_value = feature.split(":")
feature_ids.append(int(feature_id))
feature_values.append(float(feature_value))
feature_index.append([ins_num, feature_num])
feature_num += 1
ins_num += 1
# Run inference
start_time = datetime.datetime.now()
prediction, prediction_softmax = sess.run(
[inference_op, inference_softmax],
feed_dict={
sparse_index: feature_index,
sparse_ids: feature_ids,
sparse_values: feature_values,
sparse_shape: [ins_num, FLAGS.feature_size]
})
end_time = datetime.datetime.now()
# Compute accuracy
label_number = len(labels)
correct_label_number = 0
for i in range(label_number):
if labels[i] == prediction[i]:
correct_label_number += 1
accuracy = float(correct_label_number) / label_number
# Compute auc
expected_labels = np.array(labels)
predict_labels = prediction_softmax[:, 0]
fpr, tpr, thresholds = metrics.roc_curve(
expected_labels, predict_labels, pos_label=0)
auc = metrics.auc(fpr, tpr)
logging.info("[{}] Inference accuracy: {}, auc: {}".format(
end_time - start_time, accuracy, auc))
# Save result into the file
np.savetxt(inference_result_file_name, prediction_softmax, delimiter=",")
logging.info(
"Save result to file: {}".format(inference_result_file_name))
elif FLAGS.mode == "inference_with_tfrecords":
if not util.restore_from_checkpoint(sess, saver,
latest_checkpoint_file_path):
logging.error("No checkpoint found, exit now")
exit(1)
# Load inference test data
inference_result_file_name = "./inference_result.txt"
inference_test_file_name = "./data/a8a/a8a_test.libsvm.tfrecords"
batch_feature_index = []
batch_labels = []
batch_ids = []
batch_values = []
ins_num = 0
# Read from TFRecords files
for serialized_example in tf.python_io.tf_record_iterator(
inference_test_file_name):
# Get serialized example from file
example = tf.train.Example()
example.ParseFromString(serialized_example)
label = example.features.feature["label"].float_list.value
ids = example.features.feature["ids"].int64_list.value
values = example.features.feature["values"].float_list.value
#print("label: {}, features: {}".format(label, " ".join([str(id) + ":" + str(value) for id, value in zip(ids, values)])))
batch_labels.append(label)
# Notice that using extend() instead of append() to flatten the values
batch_ids.extend(ids)
batch_values.extend(values)
for i in xrange(len(ids)):
batch_feature_index.append([ins_num, i])
ins_num += 1
# Run inference
start_time = datetime.datetime.now()
prediction, prediction_softmax = sess.run(
[inference_op, inference_softmax],
feed_dict={
sparse_index: batch_feature_index,
sparse_ids: batch_ids,
sparse_values: batch_values,
sparse_shape: [ins_num, FLAGS.feature_size]
})
end_time = datetime.datetime.now()
# Compute accuracy
label_number = len(batch_labels)
correct_label_number = 0
for i in range(label_number):
if batch_labels[i] == prediction[i]:
correct_label_number += 1
accuracy = float(correct_label_number) / label_number
# Compute auc
expected_labels = np.array(batch_labels)
predict_labels = prediction_softmax[:, 0]
fpr, tpr, thresholds = metrics.roc_curve(
expected_labels, predict_labels, pos_label=0)
auc = metrics.auc(fpr, tpr)
logging.info("[{}] Inference accuracy: {}, auc: {}".format(
end_time - start_time, accuracy, auc))
# Save result into the file
np.savetxt(inference_result_file_name, prediction_softmax, delimiter=",")
logging.info(
"Save result to file: {}".format(inference_result_file_name))
if __name__ == "__main__":
main()
| {
"content_hash": "e742770bb83c21a851e58c7f66ca667f",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 129,
"avg_line_length": 39.303149606299215,
"alnum_prop": 0.6328258038665732,
"repo_name": "tobegit3hub/deep_recommend_system",
"id": "430e2e298aef472a5b561758bd3fb48aba47b1eb",
"size": "20015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sparse_classifier.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "6416"
},
{
"name": "Go",
"bytes": "6298"
},
{
"name": "Java",
"bytes": "15516"
},
{
"name": "Protocol Buffer",
"bytes": "561281"
},
{
"name": "Python",
"bytes": "96803"
},
{
"name": "Scala",
"bytes": "6980"
},
{
"name": "Shell",
"bytes": "1464"
}
],
"symlink_target": ""
} |
from random import randint
''' Returns a prime greater than n '''
def nextPrime(n):
assert n > 0
n += 1
while isPrime(n) != True:
n += 1
return n
''' Returns True if n is prime False otherwise'''
def isPrime(n):
if n == 2 :
return True
if n % 2 == 0 :
return False
i = 3
while i*i <= n :
if n % i == 0 :
return False
i += 2
return True
''' Get large random prime '''
def getLargeRandomPrime():
largeNumber = randint(10**9, 10**10)
return nextPrime(largeNumber)
| {
"content_hash": "b78658a92a869eb8caab9faa2c30640d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 49,
"avg_line_length": 20.74074074074074,
"alnum_prop": 0.5410714285714285,
"repo_name": "2020saurav/DGA",
"id": "5e099d9e34b7b7c08bb5b6914b5fabcb0df855db",
"size": "560",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/util/primes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43379"
}
],
"symlink_target": ""
} |
"""Command line interface for testing commands."""
| {
"content_hash": "447f783c9d402bd424d909c05f650957",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 50,
"avg_line_length": 51,
"alnum_prop": 0.7450980392156863,
"repo_name": "nstoik/farm_monitor",
"id": "7639c88d3afafc3a83f80f23aab4e3988234c77e",
"size": "75",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "database/fm_database/cli/testing/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "18259"
},
{
"name": "HCL",
"bytes": "1534"
},
{
"name": "HTML",
"bytes": "611"
},
{
"name": "JavaScript",
"bytes": "268"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "242717"
},
{
"name": "Shell",
"bytes": "2937"
},
{
"name": "TypeScript",
"bytes": "18970"
},
{
"name": "Vue",
"bytes": "14394"
}
],
"symlink_target": ""
} |
from sqlalchemy import create_engine, and_
from datetime import datetime
import numpy as np
from models import (ActionMixin, UserMixin, ItemMixin, ComputationMixin,
COMPUTATION_SK_NAME,
ACTION_UPVOTE, ACTION_DOWNVOTE,
ACTION_FLAG_SPAM, ACTION_FLAG_HAM)
import spam_utils as su
import spam_detection_karger as sdk
import spam_detection_dirichlet as sdd
import hitsDB
SPAM_ALGO = su.ALGO_DIRICHLET
def bind_engine(engine, session, base, should_create=True):
session.configure(bind=engine)
base.metadata.bind = engine
if should_create:
base.metadata.create_all(engine)
def bootstrap(base, create_all=False):
""" Engine should be binded before calling this function."""
class Computation(ComputationMixin, base):
pass
class ModerationAction(ActionMixin, base):
pass
class ModeratedAnnotation(ItemMixin, base):
pass
class ModerationUser(UserMixin, base):
pass
ActionMixin.cls = ModerationAction
ItemMixin.cls = ModeratedAnnotation
ComputationMixin.cls = Computation
UserMixin.cls = ModerationUser
if create_all:
base.metadata.create_all(base.metadata.bind)
def run_offline_spam_detection(algo_name, session):
""" Method runs offline spam detection. """
# Obtains class names to perform db querries later.
if algo_name == su.ALGO_KARGER:
sdk.run_offline_computations(session)
else:
sdd.run_offline_computations(session)
pass
session.flush()
def raise_spam_flag(item, user, session, algo_name=su.ALGO_DIRICHLET):
timestamp = datetime.utcnow()
if algo_name == su.ALGO_KARGER:
sdk.flag_spam(item, user, timestamp, session)
else:
sdd.flag_spam(item, user, timestamp, session)
def raise_ham_flag(item, user, session, algo_name=su.ALGO_DIRICHLET):
timestamp = datetime.utcnow()
if algo_name == su.ALGO_KARGER:
sdk.flag_ham(item, user, timestamp, session)
else:
sdd.flag_ham(item, user, timestamp, session)
def suggest_n_users_to_review(item, n, session):
if item is None or item.page_url is None:
return []
n_users = hitsDB.suggest_n_users_to_review(item, n, session)
if len(n_users) < n:
# todo(michael): do random sampling (or some criteria)
pass
return n_users
def get_n_items_for_spam_mm_randomly(n, session):
return ItemMixin.cls.get_n_items_for_spam_mm_randomly(n, session)
def delete_spam_item_by_author(item, session, algo_name=su.ALGO_DIRICHLET):
""" If item is deleted by author then there is no reputation damage to the
author, plus users who flagged it receive boost to base reliability.
"""
if item.action_twin is not None:
# If the item is also an action, delete the action first.
if item.action_twin.type == ACTION_UPVOTE:
item.parent.author.mm_vote_counter -= 1
item.parent.author.vote_counter -= 1
elif item.action_twin.type == ACTION_DOWNVOTE:
item.parent.author.mm_vote_counter += 1
item.parent.author.vote_counter += 1
else:
raise Exception("Unknown action: %s" % item.action_twin)
session.delete(item.action_twin)
session.flush()
# Okay, deletes the item.
if algo_name == su.ALGO_KARGER:
sdk.delete_spam_item_by_author(item, session)
elif algo_name == su.ALGO_DIRICHLET:
sdd.delete_spam_item_by_author(item, session)
else:
raise Exception("Unknown algorithm!")
def add_item(page_url, item_id, user, session, parent_id=None, action_type=None,
spam_detect_algo=su.ALGO_DIRICHLET):
""" Creates an item and adds it to the db."""
annot = ItemMixin.cls(page_url, item_id, user, parent_id=parent_id,
spam_detect_algo=spam_detect_algo)
session.add(annot)
session.flush()
# If the annotation is action, then create and bind the action.
if action_type is not None:
if parent_id is None:
raise Exception("New annotation which is action should have a parent!")
act = ActionMixin.cls(parent_id, user.id, action_type,
datetime.utcnow(), item_twin_id=annot.id)
item = ItemMixin.cls.get_item(parent_id, session)
if action_type == ACTION_UPVOTE:
item.author.mm_vote_counter += 1
item.author.vote_counter += 1
elif action_type == ACTION_DOWNVOTE:
item.author.mm_vote_counter -= 1
item.author.vote_counter -= 1
else:
raise Exception("Action should be whether upvote or donwvote!")
session.add(act)
session.flush()
return annot
def get_add_item(page_url, item_id, user, session, parent_id=None,
action_type=None, spam_detect_algo=su.ALGO_DIRICHLET):
annot = ItemMixin.cls.get_item(item_id, session)
# If annotation does not exist then create it.
if annot is None:
annot = add_item(page_url, item_id, user, session, parent_id=parent_id,
action_type=action_type, spam_detect_algo=spam_detect_algo)
return annot
def delete_item(item, session):
# If the item is action, then delete this action and then delete the item.
if item.children is not None and len(item.children) != 0:
# We cannot delete the item, it has subitems
print 'childred', item.children
print 'inside'
return
if item.action_twin is not None:
if item.action_twin.type == ACTION_UPVOTE:
item.parent.author.mm_vote_counter -= 1
item.parent.author.vote_counter -= 1
elif item.action_twin.type == ACTION_DOWNVOTE:
item.parent.author.mm_vote_counter += 1
item.parent.author.vote_counter += 1
else:
raise Exception("Unknown action: %s" % item.action_twin)
session.delete(item.action_twin)
session.delete(item)
session.flush()
def get_add_user(user_id, session):
""" The function retruns a user by its id (string), if the user record
does not exist then the function creates it and retunrs user object."""
user = UserMixin.cls.get_user(user_id, session)
if user is None:
user = UserMixin.cls(user_id)
session.add(user)
session.flush()
return user
def upvote(item, user, session):
# Checks whether the user has upvoted the item
upvote = ActionMixin.cls.get_action(item.id, user.id, ACTION_UPVOTE, session)
if upvote is not None:
# The item has been upvoted by the user.
return
# Undo downvote if it exists.
undo_downvote(item, user, session)
# Okay, upvoting fresh
act = ActionMixin.cls(item.id, user.id, ACTION_UPVOTE, datetime.utcnow())
# Increase item author's vote counter.
item.author.vote_counter += 1
raise_ham_flag(item, user, session)
session.add(act)
session.flush()
def downvote(item, user, session):
downvote = ActionMixin.cls.get_action(item.id, user.id, ACTION_DOWNVOTE, session)
if downvote is not None:
return
# Undo upvote is it exists.
undo_upvote(item, user, session)
# Downvoting
act = ActionMixin.cls(item.id, user.id, ACTION_DOWNVOTE, datetime.utcnow())
# Decrease item author's vote counter
item.author.vote_counter -= 1
session.add(act)
session.flush()
def undo_upvote(item, user, session):
upvote = ActionMixin.cls.get_action(item.id, user.id, ACTION_UPVOTE, session)
if upvote is None:
# Nothing to do
return
item.author.vote_counter -= 1
if SPAM_ALGO == su.ALGO_KARGER:
sdk._undo_spam_ham_flag(item, user, session, spam_flag=False)
elif SPAM_ALGO == su.ALGO_DIRICHLET:
sdd._undo_spam_ham_flag(item, user, session, spam_flag=False)
else:
raise Exception("unknown algorithm")
session.delete(upvote)
session.flush()
def undo_downvote(item, user, session):
downvote = ActionMixin.cls.get_action(item.id, user.id, ACTION_DOWNVOTE, session)
if downvote is None:
# Nothing to do
return
item.author.vote_counter += 1
session.delete(downvote)
session.flush()
| {
"content_hash": "13514822c307c0a7a68ea05eca6d564c",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 85,
"avg_line_length": 34.855932203389834,
"alnum_prop": 0.6511062484804279,
"repo_name": "mshavlovsky/mannord",
"id": "c35dff992a67fc858758c81ba0aedfbd94e8636a",
"size": "8226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mannord/api.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "102440"
},
{
"name": "Shell",
"bytes": "146"
}
],
"symlink_target": ""
} |
"""Part of the Keras training engine related to distributed training.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.distribute import distributed_training_utils as dist_utils
from tensorflow.python.keras.engine import partial_batch_padding_handler as padding_util
from tensorflow.python.keras.engine import training_arrays
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import tf_logging as logging
def _per_replica_execution_function(model, mode):
exec_func = model._make_execution_function(mode)
return (exec_func.inputs, exec_func.outputs, exec_func.updates_op,
exec_func.session_kwargs)
def _build_model(strategy, model, mode, inputs, targets=None):
if model._compile_distribution:
dist_utils.clone_model_on_replicas(
model, strategy, mode, inputs=inputs, targets=targets)
else:
dist_utils._build_distributed_network(model, strategy, mode, inputs,
targets)
def _make_train_step_fn(model, mode, strategy, output_labels):
"""Create step fn.
Arguments:
model: a Keras Model instance.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
strategy: a `tf.distribute.Strategy` instance.
output_labels: the output labels for the step function.
Returns:
A step function to run by `tf.distribute.Strategy`.
"""
def _step_fn(ctx, inputs):
"""A step fn that returns update ops."""
if isinstance(inputs, (tuple, list)) and len(inputs) == 2:
inputs, targets = inputs
else:
targets = None
# When input feature is a dictionary of tensors, dictionary is flattended
# to an array and passed as a model input. This results in input mismatch
# when model input layer names are not sorted in alphabetical order as
# `nest.flatten()`sorts dictionary elements by keys. As so, transform input
# tensors into an array and order it along `model._feed_input_names`.
if isinstance(inputs, dict):
inputs = [inputs[input_name] for input_name in model._feed_input_names]
_build_model(strategy, model, mode, inputs, targets)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = strategy.extended.call_for_each_replica(
_per_replica_execution_function,
args=(dist_utils.get_distributed_model(model, mode), mode))
(all_inputs, all_outputs, all_updates,
all_session_args) = dist_utils.unwrap_values(strategy, grouped_inputs,
grouped_outputs,
grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_' + str(mode) + '_function',
**all_session_args)
for label, output in zip(output_labels, combined_fn.outputs):
if label == 'loss':
reduce_op = ds_reduce_util.ReduceOp.SUM
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
ctx.set_last_step_output(label, output, reduce_op)
# TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
# feed_dict, session kwargs, run options, run_metadata for now. These should
# be handled appropriately
return combined_fn.updates_op
return _step_fn
def experimental_tpu_fit_loop(model,
dataset,
epochs=100,
verbose=1,
callbacks=None,
initial_epoch=0,
steps_per_epoch=None,
val_dataset=None,
validation_steps=None,
validation_freq=1):
"""Fit loop for training with TPU tf.distribute.Strategy.
Arguments:
model: Keras Model instance.
dataset: Dataset that returns inputs and targets
epochs: Number of times to iterate over the data
verbose: Integer, Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
val_dataset: Dataset for validation data.
validation_steps: Number of steps to run validation for
(only if doing validation from data tensors).
Ignored with the default value of `None`.
validation_freq: Only relevant if validation data is provided. Integer or
`collections.abc.Container` instance (e.g. list, tuple, etc.). If an
integer, specifies how many training epochs to run before a new
validation run is performed, e.g. `validation_freq=2` runs
validation every 2 epochs. If a Container, specifies the epochs on
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
Returns:
Returns `None`.
Raises:
ValueError: in case of invalid arguments.
"""
mode = ModeKeys.TRAIN
current_strategy = model._distribution_strategy
iteration_value = min(steps_per_epoch,
current_strategy.extended.steps_per_run)
steps_per_run = K.variable(
value=iteration_value,
dtype='int32',
name='steps_per_run')
# TODO(fchollet): add support for `steps_per_epoch=None` in TPU loops.
iterator = dist_utils.get_iterator(dataset, current_strategy)
scope = dist_utils.distributed_scope(
strategy=current_strategy, learning_phase=1)
scope.__enter__()
out_labels = model.metrics_names or []
step_fn = _make_train_step_fn(model, ModeKeys.TRAIN, current_strategy,
out_labels)
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for m in model._get_training_eval_metrics():
tensor = m.result()
initial_loop_values[m.name] = array_ops.zeros(tensor.shape, tensor.dtype)
ctx = current_strategy.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=steps_per_run,
initial_loop_values=initial_loop_values)
train_op = ctx.run_op
output_tensors = ctx.last_step_outputs
do_validation = bool(validation_steps)
if model._compile_distribution:
dist_utils._copy_weights_to_distributed_model(model, mode)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
verbose=verbose,
count_mode='steps',
mode=mode)
# Calculate the steps each time on the device.
steps_to_run = ([current_strategy.extended.steps_per_run] *
(steps_per_epoch //
current_strategy.extended.steps_per_run))
if steps_per_epoch % current_strategy.extended.steps_per_run:
steps_to_run.append(
steps_per_epoch % current_strategy.extended.steps_per_run)
target_steps = len(steps_to_run)
callbacks._call_begin_hook(mode)
initial_epoch = model._maybe_load_initial_epoch_from_ckpt(initial_epoch, mode)
for epoch in range(initial_epoch, epochs):
dist_utils._reset_metrics(model)
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
step_index = 0
prev_step_count = None
current_step = 0
while current_step < target_steps:
step_count = steps_to_run[current_step]
batch_logs = {'batch': step_index, 'size': 1, 'num_steps': step_count}
callbacks._call_batch_hook(mode, 'begin', step_index, batch_logs)
if prev_step_count is None or step_count != prev_step_count:
K.get_session().run(steps_per_run.assign(step_count))
prev_step_count = step_count
try:
_, outputs = K.batch_get_value([train_op, output_tensors])
except errors.OutOfRangeError:
logging.warning('Your dataset iterator ran out of data; '
'interrupting training. Make sure that your dataset '
'can generate at least `steps_per_epoch * epochs` '
'batches (in this case, %d batches).' %
steps_per_epoch * epochs)
break
batch_logs.update(outputs)
callbacks._call_batch_hook(mode, 'end', step_index, batch_logs)
step_index = step_index + step_count
current_step += 1
if callbacks.model.stop_training:
break
if (do_validation and
training_utils.should_run_validation(validation_freq, epoch)):
logging.info('Running validation at fit epoch: %s', epoch)
if model._compile_distribution:
# Since we create a new clone from the original model we need to copy
# the weights back to the original model before we can run validation.
dist_utils._copy_weights_to_original_model(model, ModeKeys.TRAIN)
val_outs = experimental_tpu_test_loop( # pylint: disable=undefined-variable
model,
val_dataset,
steps=validation_steps,
verbose=verbose,
callbacks=callbacks)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for label, val_out in zip(out_labels, val_outs):
epoch_logs['val_' + label] = val_out
callbacks.on_epoch_end(epoch, epoch_logs)
if callbacks.model.stop_training:
break
model._successful_loop_finish = True
callbacks._call_end_hook(mode)
if model._compile_distribution:
# Copy the weights back from the replicated model to the original model.
dist_utils._copy_weights_to_original_model(model, ModeKeys.TRAIN)
scope.__exit__(None, None, None)
return model.history
def experimental_tpu_test_loop(model,
dataset,
verbose=0,
steps=None,
callbacks=None):
"""Test loop for evaluating with TPU tf.distribute.Strategy.
Arguments:
model: Keras Model instance.
dataset: Dataset for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
callbacks: List of callbacks to be called during training
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the outputs.
"""
mode = ModeKeys.TEST
current_strategy = model._distribution_strategy
iterator = dist_utils.get_iterator(dataset, current_strategy)
scope = dist_utils.distributed_scope(
strategy=current_strategy, learning_phase=0)
scope.__enter__()
out_labels = model.metrics_names
def _test_step_fn(inputs):
"""A fn that returns output of single test step."""
if isinstance(inputs, (tuple, list)) and len(inputs) == 2:
inputs, targets = inputs
else:
targets = None
(distribution_strategy_context.get_replica_context().merge_call(
_build_model, args=(model, mode, inputs, targets)))
(_, outputs, updates, _) = _per_replica_execution_function(
dist_utils.get_distributed_model(model, mode), mode)
with ops.control_dependencies([updates]):
return [array_ops.identity(out) for out in outputs]
test_input_data = iterator.get_next()
per_replica_outputs = current_strategy.run(
_test_step_fn, args=(test_input_data,))
output_tensors = {}
for label, output in zip(out_labels, per_replica_outputs):
if label == 'loss':
reduce_op = ds_reduce_util.ReduceOp.SUM
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
output_tensors[label] = current_strategy.reduce(reduce_op, output,
axis=None)
test_op = control_flow_ops.group(list(output_tensors.values()))
if verbose >= 1:
progbar = Progbar(target=steps)
if model._compile_distribution:
dist_utils._copy_weights_to_distributed_model(model, mode)
dist_utils._reset_metrics(model)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=False,
epochs=1,
steps_per_epoch=steps,
verbose=verbose,
count_mode='steps',
mode=ModeKeys.TEST)
callbacks._call_begin_hook(mode)
outs = [0.] * len(model.metrics_names)
if steps is not None:
target_steps = steps
else:
raise ValueError('Number of steps could not be inferred from the data, '
'please pass the steps argument.')
current_step = 0
while current_step < target_steps:
batch_logs = {'batch': current_step, 'size': 1}
callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs)
try:
_, batch_outs = K.batch_get_value([test_op, output_tensors])
except errors.OutOfRangeError:
warning_msg = 'Make sure that your dataset can generate at least '
'`steps` batches (in this case, {} batches).'.format(steps)
logging.warning('Your dataset iterator ran out of data; '
'interrupting evaluation. ' + warning_msg)
target_steps = current_step
break
for i, label in enumerate(model.metrics_names):
if i == 0:
# Loss is stateless metrics.
outs[i] += batch_outs[label]
else:
# For all stateful metrics, the aggregation is handled by mirrored vars.
outs[i] = batch_outs[label]
batch_logs = cbks.make_logs(model, batch_logs, outs, mode)
callbacks._call_batch_hook(mode, 'end', current_step, batch_logs)
if verbose == 1:
progbar.update(current_step + 1)
current_step += 1
if verbose >= 1:
# Progress bar finishes at the end.
progbar.update(target_steps)
callbacks._call_end_hook(mode)
scope.__exit__(None, None, None)
if len(outs) >= 0:
outs[0] /= (target_steps)
if len(outs) == 1:
return outs[0]
return outs
def experimental_tpu_predict_loop(model,
dataset,
verbose=0,
steps=None,
callbacks=None):
"""Predict loop for predicting with TPU tf.distribute.Strategy.
Arguments:
model: Keras Model instance.
dataset: Dataset for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
callbacks: List of callbacks to be called during training
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
mode = ModeKeys.PREDICT
dataset_fully_shaped = dist_utils.is_dataset_shape_fully_defined(dataset)
padding_handler = None
if not dataset_fully_shaped:
# TODO(hongjunchoi): Investigate whether operations from
# PartialBatchPaddingHandler are unnecessarily pruned out
# during graph optimization.
padding_handler = padding_util.PartialBatchPaddingHandler(
model._feed_output_shapes)
batch_size, _, prefetch_buffer = input_lib._get_dataset_attributes(dataset)
padding_handler.padded_batch_size = batch_size
padding_handler.padding_mask = dataset.reduce(padding_handler.padding_mask,
padding_handler.update_mask)
dataset = dataset.map(padding_handler.pad_batch)
dataset = dataset.unbatch()
# Upon this point, it is guaranteed that the dataset does not
# have partial batches. Thus, we set `drop_remainder=True` to
# get static shape information about the elements in the dataset.
dataset = dataset.batch(batch_size, drop_remainder=True)
if prefetch_buffer is not None:
dataset = dataset.prefetch(prefetch_buffer)
current_strategy = model._distribution_strategy
iterator = dist_utils.get_iterator(dataset, current_strategy)
scope = dist_utils.distributed_scope(
strategy=current_strategy, learning_phase=0)
scope.__enter__()
def _predict_step_fn(inputs):
"""A fn that returns output of single prediction step."""
(distribution_strategy_context.get_replica_context().merge_call(
_build_model, args=(model, mode, inputs)))
(_, outputs, updates, _) = _per_replica_execution_function(
dist_utils.get_distributed_model(model, mode), mode)
with ops.control_dependencies([updates]):
return [array_ops.identity(out) for out in outputs]
# TODO(hongjunchoi): When numpy array is passed as an input to `predict()`
# use numpy arrays directly to avoid cumulating unnecessary input pipeline
# ops.
predict_input_data = iterator.get_next()
per_replica_outputs = current_strategy.run(
_predict_step_fn, args=(predict_input_data,))
output_tensors = dist_utils.flatten_per_replica_values(
current_strategy, per_replica_outputs)
if verbose >= 1:
progbar = Progbar(target=steps)
if model._compile_distribution:
dist_utils._copy_weights_to_distributed_model(model, mode)
dist_utils._reset_metrics(model)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=False,
epochs=1,
steps_per_epoch=steps,
verbose=verbose,
count_mode='steps',
mode=mode)
callbacks._call_begin_hook(mode)
# Since we do not know how many samples we will see, we cannot pre-allocate
# the returned Numpy arrays. Instead, we store one array per batch seen
# and concatenate them upon returning.
num_model_outputs = len(model.output_names)
unconcatenated_outs = [[] for _ in range(num_model_outputs)]
if steps is not None:
target_steps = steps
else:
raise ValueError('Number of steps could not be inferred from the data, '
'please pass the steps argument.')
current_step = 0
while current_step < target_steps:
batch_logs = {'batch': current_step, 'size': 1}
callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs)
try:
predict_ops = control_flow_ops.group(output_tensors)
_, batch_outs = K.batch_get_value([predict_ops, output_tensors])
except errors.OutOfRangeError:
warning_msg = 'Make sure that your dataset can generate at least '
'`steps` batches (in this case, {} batches).'.format(steps)
logging.warning('Your dataset iterator ran out of data; '
'interrupting evaluation. ' + warning_msg)
break
# TODO(priyag): maybe need to unwrap the outputs first for MirroredStrategy.
for i in range(num_model_outputs):
output_start_index = i * current_strategy.num_replicas_in_sync
output_end_index = (
output_start_index + current_strategy.num_replicas_in_sync)
single_model_output = batch_outs[output_start_index:output_end_index]
unconcatenated_outs[i].extend(single_model_output)
batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode)
callbacks._call_batch_hook(mode, 'end', current_step, batch_logs)
if verbose == 1:
progbar.update(current_step + 1)
current_step += 1
if verbose >= 1:
# Progress bar finishes at the end.
progbar.update(current_step)
callbacks._call_end_hook(mode)
scope.__exit__(None, None, None)
if len(unconcatenated_outs) == 1:
prediction_result = np.concatenate(unconcatenated_outs[0], axis=0)
else:
prediction_result = [
np.concatenate(out, axis=0) for out in unconcatenated_outs
]
if padding_handler:
prediction_result = padding_handler.apply_mask(prediction_result)
return prediction_result
class DistributionSingleWorkerTrainingLoop(training_utils.TrainingLoop):
"""Training loop for distribution strategy with single worker."""
def fit(self,
model,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1,
**kwargs):
"""Fit loop for Distribution Strategies."""
dist_utils.validate_callbacks(input_callbacks=callbacks,
optimizer=model.optimizer)
dist_utils.validate_inputs(x, y)
batch_size, steps_per_epoch = dist_utils.process_batch_and_step_size(
model._distribution_strategy,
x,
batch_size,
steps_per_epoch,
ModeKeys.TRAIN,
validation_split=validation_split)
batch_size = model._validate_or_infer_batch_size(
batch_size, steps_per_epoch, x)
dataset = model._distribution_standardize_user_data(
x, y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
validation_split=validation_split,
shuffle=shuffle,
epochs=epochs)
if not dist_utils.is_distributing_by_cloning(model):
with model._distribution_strategy.scope():
(dataset, _, _) = model._standardize_user_data(
dataset,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
validation_split=validation_split,
shuffle=shuffle)
val_dataset = None
if validation_data:
val_x, val_y, val_sample_weights = training_utils.unpack_validation_data(
validation_data)
dist_utils.validate_inputs(val_x, val_y)
_, validation_steps = dist_utils.process_batch_and_step_size(
model._distribution_strategy, val_x, batch_size, validation_steps,
ModeKeys.TEST)
val_dataset = model._distribution_standardize_user_data(
val_x, val_y,
sample_weight=val_sample_weights,
class_weight=None,
batch_size=batch_size,
validation_split=validation_split,
shuffle=shuffle,
allow_partial_batch=True)
elif validation_split:
raise ValueError('validation_split argument is not supported with '
'distribution strategies.')
if dist_utils.is_tpu_strategy(model._distribution_strategy):
steps_per_epoch = training_utils.infer_steps_for_dataset(
model, dataset, steps_per_epoch, epochs, steps_name='steps_per_epoch')
if steps_per_epoch is None:
raise ValueError('Number of steps could not be inferred from the data, '
'please pass the steps_per_epoch argument.')
if not context.executing_eagerly():
# Run TPU training in a custom loop in graph mode.
return experimental_tpu_fit_loop(
model,
dataset,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_dataset=val_dataset,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq)
return training_arrays.fit_loop(
model,
dataset,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_dataset,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq,
steps_name='steps_per_epoch')
def evaluate(self,
model,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
**kwargs):
"""Evaluate loop for Distribution Strategies."""
dist_utils.validate_inputs(x, y)
batch_size, steps = dist_utils.process_batch_and_step_size(
model._distribution_strategy, x, batch_size, steps, ModeKeys.TEST)
batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
dataset = model._distribution_standardize_user_data(
x, y,
sample_weight=sample_weight,
batch_size=batch_size,
allow_partial_batch=True)
if dist_utils.is_tpu_strategy(model._distribution_strategy):
steps = training_utils.infer_steps_for_dataset(
model, dataset, steps, steps_name='steps')
if steps is None:
raise ValueError('Number of steps could not be inferred from the data, '
'please pass the steps argument.')
if not context.executing_eagerly():
# Run TPU evaluation in a custom loop in graph mode.
return experimental_tpu_test_loop(
model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
return training_arrays.test_loop(
model,
inputs=dataset,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks)
def predict(self,
model,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
**kwargs):
"""Predict loop for Distribution Strategies."""
dist_utils.validate_inputs(x=x, y=None)
batch_size, steps = dist_utils.process_batch_and_step_size(
model._distribution_strategy, x, batch_size, steps, ModeKeys.PREDICT)
batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
dataset = model._distribution_standardize_user_data(
x,
batch_size=batch_size,
allow_partial_batch=True)
if dist_utils.is_tpu_strategy(model._distribution_strategy):
steps = training_utils.infer_steps_for_dataset(
model, dataset, steps, steps_name='steps')
if steps is None:
raise ValueError('Number of steps could not be inferred from the data, '
'please pass the steps argument.')
if not context.executing_eagerly():
return experimental_tpu_predict_loop(
model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
return training_arrays.predict_loop(
model,
dataset,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks)
def _train_with_multi_worker(method):
"""Decorator that handles multi worker training with distribution strategy."""
def wrapper(model, **kwargs):
def _worker_fn(_):
callbacks = kwargs.pop('callbacks', None)
filtered_callbacks = dist_utils.filter_distributed_callbacks(
callbacks, model)
kwargs['callbacks'] = filtered_callbacks
return method(model, **kwargs)
return dc.run_distribute_coordinator(
_worker_fn,
model._distribution_strategy,
mode=dc.CoordinatorMode.INDEPENDENT_WORKER)
return wrapper
class DistributionMultiWorkerTrainingLoop(training_utils.TrainingLoop):
"""Training loop for distribution strategy with multiple worker."""
def __init__(self, single_worker_loop):
self._single_worker_loop = single_worker_loop
def fit(self, *args, **kwargs):
return _train_with_multi_worker(self._single_worker_loop.fit)(
*args, **kwargs)
def evaluate(self, *args, **kwargs):
return _train_with_multi_worker(self._single_worker_loop.evaluate)(
*args, **kwargs)
def predict(self, *args, **kwargs):
# Currently predict is still using the single worker implementation.
return self._single_worker_loop.predict(*args, **kwargs)
| {
"content_hash": "300165c2bbf21be9ce7299483707babd",
"timestamp": "",
"source": "github",
"line_count": 784,
"max_line_length": 88,
"avg_line_length": 37.25765306122449,
"alnum_prop": 0.6483738445737761,
"repo_name": "aldian/tensorflow",
"id": "b33a90bd5330d336bdf2d500d43f3c8a6e4faaee",
"size": "29899",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/engine/training_distributed.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29667924"
},
{
"name": "CMake",
"bytes": "647100"
},
{
"name": "Go",
"bytes": "976514"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "275733"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26424665"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373109"
}
],
"symlink_target": ""
} |
import pprint, socket
from pysandesh.sandesh_base import *
from pysandesh.connection_info import ConnectionState
from gen_py.broadview.ttypes import \
PRouterBroadViewInfo, \
Device, \
IngressPortPriorityGroup, \
IngressPortServicePool, \
IngressServicePool, \
EgressPortServicePool, \
EgressServicePool, \
EgressUcQueue, \
EgressUcQueueGroup, \
EgressMcQueue, \
EgressCpuQueue, \
EgressRqeQueue
from sandesh_common.vns.ttypes import Module, NodeType
from sandesh_common.vns.constants import ModuleNames, CategoryNames,\
ModuleCategoryMap, Module2NodeType, NodeTypeNames, ModuleIds,\
INSTANCE_ID_DEFAULT
class BroadViewOL(object):
_raw_params = (
"device",
"ingress-service-pool",
"ingress-port-service-pool",
"ingress-port-priority-group",
"egress-port-service-pool",
"egress-service-pool",
"egress-uc-queue",
"egress-uc-queue-group",
"egress-mc-queue",
"egress-cpu-queue",
"egress-rqe-queue",
)
def __init__(self, conf):
self._conf = conf
module = Module.CONTRAIL_BROADVIEW
self._moduleid = ModuleNames[module]
node_type = Module2NodeType[module]
self._node_type_name = NodeTypeNames[node_type]
self._hostname = socket.gethostname()
self._instance_id = '0'
if self._conf.sandesh_send_rate_limit() is not None:
SandeshSystem.set_sandesh_send_rate_limit( \
self._conf.sandesh_send_rate_limit())
sandesh_global.init_generator(self._moduleid, self._hostname,
self._node_type_name,
self._instance_id,
self._conf.collectors(),
self._node_type_name,
self._conf.http_port(),
['contrail_broadview.gen_py'])
sandesh_global.set_logging_params(
enable_local_log=self._conf.log_local(),
category=self._conf.log_category(),
level=self._conf.log_level(),
file=self._conf.log_file(),
enable_syslog=self._conf.use_syslog(),
syslog_facility=self._conf.syslog_facility())
self.mk_maps()
def mk_maps(self):
self._r2s, self._s2r = {}, {}
for rp in self._raw_params:
x = rp.split('-')
n = ''.join([x[0]] + map(lambda p: p.capitalize(), x[1:]))
self._r2s[rp] = n
self._s2r[n] = rp
def map_realm_name(self, realm):
return self._r2s.get(realm, None)
def get_raw_params(self):
return self._r2s.keys()
def send(self, data):
pprint.pprint(data)
if 'device' in data:
data['device'] = Device(data['device'])
for prms in self._s2r.keys():
if prms != 'device' and prms in data:
cl = prms[0].upper() + prms[1:]
fn = locals().get(cl, globals().get(cl))
# print cl, fn, data[prms]
data[prms] = map(lambda x: fn(**x), data[prms])
objlog = PRouterBroadViewInfo(**data)
objlog.send()
def delete(self, name):
PRouterBroadViewInfo(name=name, deleted=True).send()
| {
"content_hash": "56cf0789c1db55fcc2422f09b3b20880",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 70,
"avg_line_length": 37.65217391304348,
"alnum_prop": 0.5363741339491916,
"repo_name": "codilime/contrail-controller",
"id": "29ab99ed7ea7fc890c4886a5ef1275ffbe251db8",
"size": "3533",
"binary": false,
"copies": "3",
"ref": "refs/heads/windows3.1",
"path": "src/analytics/contrail-broadview/contrail_broadview/bv_uve.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "96717"
},
{
"name": "C++",
"bytes": "20662554"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "GDB",
"bytes": "44610"
},
{
"name": "HTML",
"bytes": "519766"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "LLVM",
"bytes": "2937"
},
{
"name": "Lua",
"bytes": "19459"
},
{
"name": "Makefile",
"bytes": "12449"
},
{
"name": "PowerShell",
"bytes": "1784"
},
{
"name": "Python",
"bytes": "5590763"
},
{
"name": "Roff",
"bytes": "40925"
},
{
"name": "Shell",
"bytes": "52721"
},
{
"name": "Thrift",
"bytes": "8382"
},
{
"name": "Yacc",
"bytes": "35530"
}
],
"symlink_target": ""
} |
"""Testing decorators module."""
from numpy.testing import assert_raises, assert_equal
from dipy.testing import assert_true
from dipy.testing.decorators import doctest_skip_parser
def test_skipper():
def f():
pass
docstring = \
""" Header
>>> something # skip if not HAVE_AMODULE
>>> something + else
>>> a = 1 # skip if not HAVE_BMODULE
>>> something2 # skip if HAVE_AMODULE
"""
f.__doc__ = docstring
global HAVE_AMODULE, HAVE_BMODULE
HAVE_AMODULE = False
HAVE_BMODULE = True
f2 = doctest_skip_parser(f)
assert_true(f is f2)
assert_equal(f2.__doc__,
""" Header
>>> something # doctest: +SKIP
>>> something + else
>>> a = 1
>>> something2
""")
HAVE_AMODULE = True
HAVE_BMODULE = False
f.__doc__ = docstring
f2 = doctest_skip_parser(f)
assert_true(f is f2)
assert_equal(f2.__doc__,
""" Header
>>> something
>>> something + else
>>> a = 1 # doctest: +SKIP
>>> something2 # doctest: +SKIP
""")
del HAVE_AMODULE
f.__doc__ = docstring
assert_raises(NameError, doctest_skip_parser, f)
| {
"content_hash": "2525d3500e4cc2e51a34ddab87946744",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 55,
"avg_line_length": 25.270833333333332,
"alnum_prop": 0.5539983511953833,
"repo_name": "FrancoisRheaultUS/dipy",
"id": "3b5747b9c35a0a49b238139b490344ba6d5ac3b7",
"size": "1213",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "dipy/testing/tests/test_decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2932"
},
{
"name": "Makefile",
"bytes": "3686"
},
{
"name": "Python",
"bytes": "3246086"
}
],
"symlink_target": ""
} |
from RGT.XML.SVG.Filters.baseFilterNode import BaseFilterNode
from types import StringType
from RGT.XML.SVG.basicSvgNode import BasicSvgNode
class FeColorMatrixNode(BaseFilterNode):
svgNodeType = BasicSvgNode.SVG_FE_COLOR_MATRIX_NODE
ATTRIBUTE_IN = 'in'
ATTRIBUTE_TYPE = 'type'
ATTRIBUTE_VALUES = 'values'
def __init__(self, ownerDoc):
BaseFilterNode.__init__(self, ownerDoc, 'feColorMatrix')
self._allowedSvgChildNodes.update({self.SVG_ANIMATE_NODE, self.SVG_SET_NODE})
def setIn(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_IN, data)
def setType(self, data):
allowedValues = ['matrix', 'saturate', 'hueRotate', 'luminanceToAlpha']
if data is not None:
if type(data) is not StringType:
data = str(data)
if data not in allowedValues:
values = ''
for value in allowedValues:
values += value + ', '
values = values[0: len(values) - 2]
raise ValueError('Value not allowed, only ' + values + 'are allowed')
else:
self._setNodeAttribute(self.ATTRIBUTE_TYPE, data)
def setValues(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_VALUES, data)
def getIn(self):
node = self._getNodeAttribute(self.ATTRIBUTE_IN)
if node is not None:
return node.nodeValue
return None
def getType(self):
node = self._getNodeAttribute(self.ATTRIBUTE_TYPE)
if node is not None:
return node.nodeValue
return None
def getValues(self):
node = self._getNodeAttribute(self.ATTRIBUTE_VALUES)
if node is not None:
return node.nodeValue
return None | {
"content_hash": "bd9dba515e1cf8934bbff9d50133284a",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 85,
"avg_line_length": 33.83606557377049,
"alnum_prop": 0.5794573643410853,
"repo_name": "danrg/RGT-tool",
"id": "85f5501d63798e49ec948d79fa9d95591fd9f4dd",
"size": "2064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/RGT/XML/SVG/Filters/feColorMatrixNode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83200"
},
{
"name": "HTML",
"bytes": "93970"
},
{
"name": "JavaScript",
"bytes": "111380"
},
{
"name": "Python",
"bytes": "788710"
},
{
"name": "SQLPL",
"bytes": "722"
}
],
"symlink_target": ""
} |
from os import path
from codecs import open
from setuptools import setup, find_packages
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, 'README.rst'), encoding = 'utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(HERE, 'LICENSE'), encoding = 'utf-8') as license_file:
license = license_file.read()
setup(
name = 'specification',
version = '0.1.1',
description = 'Specification pattern for Python.',
long_description = readme,
url = 'https://github.com/uetoyo/py-specification',
author = 'David Landa',
author_email = 'david.landa@seznam.cz',
license = license,
classifiers = [],
packages=find_packages(exclude=('tests', 'docs'))
)
| {
"content_hash": "03fd5632e487c8be1f5ce891af1100d4",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 76,
"avg_line_length": 31.375,
"alnum_prop": 0.646746347941567,
"repo_name": "uetoyo/specific.py",
"id": "87252c8076137f81494f0547b2727cbb755de6fe",
"size": "800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "380"
},
{
"name": "Python",
"bytes": "13378"
}
],
"symlink_target": ""
} |
from .auto_rest_swagger_bat_array_service import AutoRestSwaggerBATArrayService, AutoRestSwaggerBATArrayServiceConfiguration
from .version import VERSION
__all__ = [
'AutoRestSwaggerBATArrayService',
'AutoRestSwaggerBATArrayServiceConfiguration'
]
__version__ = VERSION
| {
"content_hash": "dbee5f28e33c386cb27fd04d3edd1b2a",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 124,
"avg_line_length": 28.1,
"alnum_prop": 0.8078291814946619,
"repo_name": "csmengwan/autorest",
"id": "aaf980709effe10b78ae5ecf6f835620d55a1a12",
"size": "755",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/BodyArray/autorestswaggerbatarrayservice/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13761"
},
{
"name": "C#",
"bytes": "10517556"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "4684473"
},
{
"name": "JavaScript",
"bytes": "4658203"
},
{
"name": "PowerShell",
"bytes": "5703"
},
{
"name": "Python",
"bytes": "2237671"
},
{
"name": "Ruby",
"bytes": "232025"
},
{
"name": "Shell",
"bytes": "142"
},
{
"name": "TypeScript",
"bytes": "179577"
}
],
"symlink_target": ""
} |
"""
Copyright 2003-2010 Cort Stratton. All rights reserved.
Copyright 2015, 2016 Hanson Robotics
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""This file contains assorted general utility functions used by other
modules in the PyAIML package.
"""
def sentences(s):
"""Split the string s into a list of sentences."""
try:
s + ""
except:
raise TypeError, "s must be a string"
pos = 0
sentenceList = []
l = len(s)
while pos < l:
try:
p = s.index('.', pos)
except:
p = l + 1
try:
q = s.index('?', pos)
except:
q = l + 1
try:
e = s.index('!', pos)
except:
e = l + 1
end = min(p, q, e)
sentenceList.append(s[pos:end].strip())
pos = end + 1
# If no sentences were found, return a one-item list containing
# the entire input string.
if len(sentenceList) == 0:
sentenceList.append(s)
return sentenceList
# Self test
if __name__ == "__main__":
# sentences
sents = sentences(
"First. Second, still? Third and Final! Well, not really")
assert(len(sents) == 4)
| {
"content_hash": "d9b097b2eda777ff165ebbe43bf66f10",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 71,
"avg_line_length": 34.88235294117647,
"alnum_prop": 0.6732715008431703,
"repo_name": "hansonrobotics/chatbot",
"id": "7942d715dc06e7e47e52d8966b8c73f0a458082f",
"size": "2372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/chatbot/aiml/Utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "5634"
},
{
"name": "CSS",
"bytes": "1149"
},
{
"name": "HTML",
"bytes": "1657"
},
{
"name": "JavaScript",
"bytes": "45747"
},
{
"name": "Python",
"bytes": "342716"
},
{
"name": "Shell",
"bytes": "136"
}
],
"symlink_target": ""
} |
"""Tests for the Home Assistant Websocket API."""
import asyncio
from unittest.mock import patch
from aiohttp import WSMsgType
from async_timeout import timeout
import pytest
from homeassistant.core import callback
from homeassistant.components import websocket_api as wapi, frontend
from tests.common import mock_http_component_app
API_PASSWORD = 'test1234'
@pytest.fixture
def websocket_client(loop, hass, test_client):
"""Websocket client fixture connected to websocket server."""
websocket_app = mock_http_component_app(hass)
wapi.WebsocketAPIView().register(websocket_app.router)
client = loop.run_until_complete(test_client(websocket_app))
ws = loop.run_until_complete(client.ws_connect(wapi.URL))
auth_ok = loop.run_until_complete(ws.receive_json())
assert auth_ok['type'] == wapi.TYPE_AUTH_OK
yield ws
if not ws.closed:
loop.run_until_complete(ws.close())
@pytest.fixture
def no_auth_websocket_client(hass, loop, test_client):
"""Websocket connection that requires authentication."""
websocket_app = mock_http_component_app(hass, API_PASSWORD)
wapi.WebsocketAPIView().register(websocket_app.router)
client = loop.run_until_complete(test_client(websocket_app))
ws = loop.run_until_complete(client.ws_connect(wapi.URL))
auth_ok = loop.run_until_complete(ws.receive_json())
assert auth_ok['type'] == wapi.TYPE_AUTH_REQUIRED
yield ws
if not ws.closed:
loop.run_until_complete(ws.close())
@asyncio.coroutine
def test_auth_via_msg(no_auth_websocket_client):
"""Test authenticating."""
no_auth_websocket_client.send_json({
'type': wapi.TYPE_AUTH,
'api_password': API_PASSWORD
})
msg = yield from no_auth_websocket_client.receive_json()
assert msg['type'] == wapi.TYPE_AUTH_OK
@asyncio.coroutine
def test_auth_via_msg_incorrect_pass(no_auth_websocket_client):
"""Test authenticating."""
no_auth_websocket_client.send_json({
'type': wapi.TYPE_AUTH,
'api_password': API_PASSWORD + 'wrong'
})
msg = yield from no_auth_websocket_client.receive_json()
assert msg['type'] == wapi.TYPE_AUTH_INVALID
assert msg['message'] == 'Invalid password'
@asyncio.coroutine
def test_pre_auth_only_auth_allowed(no_auth_websocket_client):
"""Verify that before authentication, only auth messages are allowed."""
no_auth_websocket_client.send_json({
'type': wapi.TYPE_CALL_SERVICE,
'domain': 'domain_test',
'service': 'test_service',
'service_data': {
'hello': 'world'
}
})
msg = yield from no_auth_websocket_client.receive_json()
assert msg['type'] == wapi.TYPE_AUTH_INVALID
assert msg['message'].startswith('Message incorrectly formatted')
@asyncio.coroutine
def test_invalid_message_format(websocket_client):
"""Test sending invalid JSON."""
websocket_client.send_json({'type': 5})
msg = yield from websocket_client.receive_json()
assert msg['type'] == wapi.TYPE_RESULT
error = msg['error']
assert error['code'] == wapi.ERR_INVALID_FORMAT
assert error['message'].startswith('Message incorrectly formatted')
@asyncio.coroutine
def test_invalid_json(websocket_client):
"""Test sending invalid JSON."""
websocket_client.send_str('this is not JSON')
msg = yield from websocket_client.receive()
assert msg.type == WSMsgType.close
@asyncio.coroutine
def test_quiting_hass(hass, websocket_client):
"""Test sending invalid JSON."""
with patch.object(hass.loop, 'stop'):
yield from hass.async_stop()
msg = yield from websocket_client.receive()
assert msg.type == WSMsgType.CLOSE
@asyncio.coroutine
def test_call_service(hass, websocket_client):
"""Test call service command."""
calls = []
@callback
def service_call(call):
calls.append(call)
hass.services.async_register('domain_test', 'test_service', service_call)
websocket_client.send_json({
'id': 5,
'type': wapi.TYPE_CALL_SERVICE,
'domain': 'domain_test',
'service': 'test_service',
'service_data': {
'hello': 'world'
}
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_RESULT
assert msg['success']
assert len(calls) == 1
call = calls[0]
assert call.domain == 'domain_test'
assert call.service == 'test_service'
assert call.data == {'hello': 'world'}
@asyncio.coroutine
def test_subscribe_unsubscribe_events(hass, websocket_client):
"""Test subscribe/unsubscribe events command."""
init_count = sum(hass.bus.async_listeners().values())
websocket_client.send_json({
'id': 5,
'type': wapi.TYPE_SUBSCRIBE_EVENTS,
'event_type': 'test_event'
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_RESULT
assert msg['success']
# Verify we have a new listener
assert sum(hass.bus.async_listeners().values()) == init_count + 1
hass.bus.async_fire('ignore_event')
hass.bus.async_fire('test_event', {'hello': 'world'})
hass.bus.async_fire('ignore_event')
with timeout(3, loop=hass.loop):
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_EVENT
event = msg['event']
assert event['event_type'] == 'test_event'
assert event['data'] == {'hello': 'world'}
assert event['origin'] == 'LOCAL'
websocket_client.send_json({
'id': 6,
'type': wapi.TYPE_UNSUBSCRIBE_EVENTS,
'subscription': 5
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 6
assert msg['type'] == wapi.TYPE_RESULT
assert msg['success']
# Check our listener got unsubscribed
assert sum(hass.bus.async_listeners().values()) == init_count
@asyncio.coroutine
def test_get_states(hass, websocket_client):
"""Test get_states command."""
hass.states.async_set('greeting.hello', 'world')
hass.states.async_set('greeting.bye', 'universe')
websocket_client.send_json({
'id': 5,
'type': wapi.TYPE_GET_STATES,
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_RESULT
assert msg['success']
states = []
for state in hass.states.async_all():
state = state.as_dict()
state['last_changed'] = state['last_changed'].isoformat()
state['last_updated'] = state['last_updated'].isoformat()
states.append(state)
assert msg['result'] == states
@asyncio.coroutine
def test_get_services(hass, websocket_client):
"""Test get_services command."""
websocket_client.send_json({
'id': 5,
'type': wapi.TYPE_GET_SERVICES,
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_RESULT
assert msg['success']
assert msg['result'] == hass.services.async_services()
@asyncio.coroutine
def test_get_config(hass, websocket_client):
"""Test get_config command."""
websocket_client.send_json({
'id': 5,
'type': wapi.TYPE_GET_CONFIG,
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_RESULT
assert msg['success']
if 'components' in msg['result']:
msg['result']['components'] = set(msg['result']['components'])
assert msg['result'] == hass.config.as_dict()
@asyncio.coroutine
def test_get_panels(hass, websocket_client):
"""Test get_panels command."""
frontend.register_built_in_panel(hass, 'map', 'Map',
'mdi:account-location')
websocket_client.send_json({
'id': 5,
'type': wapi.TYPE_GET_PANELS,
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_RESULT
assert msg['success']
assert msg['result'] == hass.data[frontend.DATA_PANELS]
@asyncio.coroutine
def test_ping(websocket_client):
"""Test get_panels command."""
websocket_client.send_json({
'id': 5,
'type': wapi.TYPE_PING,
})
msg = yield from websocket_client.receive_json()
assert msg['id'] == 5
assert msg['type'] == wapi.TYPE_PONG
| {
"content_hash": "68a0d3b54b9b6e3c6ceaa559fdc571a2",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 77,
"avg_line_length": 27.85148514851485,
"alnum_prop": 0.6429671762057115,
"repo_name": "kyvinh/home-assistant",
"id": "3cdc77414eeda5d04acb7a7edd70fdeb560c80db",
"size": "8439",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/test_websocket_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1548645"
},
{
"name": "Python",
"bytes": "5298607"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14220"
}
],
"symlink_target": ""
} |
from unittest import TestCase, skip
from IPython import embed
from qlknn.plots.load_data import *
| {
"content_hash": "89c155450c5a40e83fab7679c358106e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 35,
"avg_line_length": 24.75,
"alnum_prop": 0.8080808080808081,
"repo_name": "Karel-van-de-Plassche/QLKNN-develop",
"id": "c5f7383c263ce014a8e5d4770b99763005950aeb",
"size": "99",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/plots/test_load_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "15260"
},
{
"name": "Python",
"bytes": "817964"
},
{
"name": "Shell",
"bytes": "4776"
}
],
"symlink_target": ""
} |
def get_available_ip_by_domain(domain):
"""
:param domain: The domain to choose from
:type domain: class:`Domain`
:param system: The system the interface belongs to
:returns: ip_address
This function looks at `domain.name` and strips off 'mozilla.com' (yes it
needs to be `<something>.mozilla.com`). The function then tries to
determine which site and vlan the domain is in. Once it knows the site and
vlan it looks for network's in the vlan and eventually for ranges and a
free ip in that range. If at any time this function can't do any of those
things it raises a ValidationError.
"""
| {
"content_hash": "37bb5053575c56832105dbd30b069d07",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 45.5,
"alnum_prop": 0.7048665620094191,
"repo_name": "rtucker-mozilla/mozilla_inventory",
"id": "148ad1435cfa9d5bc05327f63764b234e27daa96",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/interface/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "9538"
},
{
"name": "JavaScript",
"bytes": "1485560"
},
{
"name": "PHP",
"bytes": "27273"
},
{
"name": "Puppet",
"bytes": "6422"
},
{
"name": "Python",
"bytes": "1960271"
},
{
"name": "Ruby",
"bytes": "1459"
},
{
"name": "Shell",
"bytes": "8766"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import hashlib
import os
import subprocess
import sys
from binstar_client.utils import get_binstar
import binstar_client.errors
import conda.config
from conda_build.metadata import MetaData
from conda_build.build import bldpkg_path
def built_distribution_already_exists(cli, meta, owner):
"""
Checks to see whether the built recipe (aka distribution) already
exists on the owner/user's binstar account.
"""
distro_name = '{}/{}.tar.bz2'.format(conda.config.subdir, meta.dist())
fname = bldpkg_path(meta)
try:
dist_info = cli.distribution(owner, meta.name(), meta.version(),
distro_name)
except binstar_client.errors.NotFound:
dist_info = {}
exists = bool(dist_info)
# Unfortunately, we cannot check the md5 quality of the built distribution, as
# this will depend on fstat information such as modification date (because
# distributions are tar files). Therefore we can only assume that the distribution
# just built, and the one on anaconda.org are the same.
# if exists:
# md5_on_binstar = dist_info.get('md5')
# with open(fname, 'rb') as fh:
# md5_of_build = hashlib.md5(fh.read()).hexdigest()
#
# if md5_on_binstar != md5_of_build:
# raise ValueError('This build ({}), and the build already on binstar '
# '({}) are different.'.format(md5_of_build, md5_on_binstar))
return exists
def upload(cli, meta, owner, channels):
try:
with open('binstar.token', 'w') as fh:
fh.write(cli.token)
subprocess.check_call(['anaconda', '--quiet', '-t', 'binstar.token',
'upload', bldpkg_path(meta),
'--user={}'.format(owner),
'--channel={}'.format(channels)],
env=os.environ)
finally:
os.remove('binstar.token')
def distribution_exists_on_channel(binstar_cli, meta, owner, channel='main'):
"""
Determine whether a distribution exists on a specific channel.
Note from @pelson: As far as I can see, there is no easy way to do this on binstar.
"""
fname = '{}/{}.tar.bz2'.format(conda.config.subdir, meta.dist())
distributions_on_channel = [dist['basename'] for dist in
binstar_cli.show_channel(owner=owner, channel=channel)['files']]
return fname in distributions_on_channel
def add_distribution_to_channel(binstar_cli, meta, owner, channel='main'):
"""
Add a(n already existing) distribution on binstar to another channel.
Note - the addition is done based on name and version - no build strings etc.
so if you have a foo-0.1-np18 and foo-0.1-np19 *both* will be added to the channel.
"""
package_fname = '{}/{}.tar.bz2'.format(conda.config.subdir, meta.dist())
binstar_cli.add_channel(channel, owner, meta.name(), meta.version())
def main():
token = os.environ.get('BINSTAR_TOKEN')
description = ('Upload or check consistency of a built version of a '
'conda recipe with binstar. Note: The existence of the '
'BINSTAR_TOKEN environment variable determines '
'whether the upload should actually take place.')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('recipe_dir', help='the conda recipe directory')
parser.add_argument('owner', help='the binstar owner/user')
parser.add_argument('--channel', help='the binstar channel', default='main')
args = parser.parse_args()
recipe_dir, owner, channel = args.recipe_dir, args.owner, args.channel
cli = get_binstar(argparse.Namespace(token=token, site=None))
meta = MetaData(recipe_dir)
if meta.skip():
print("No upload to take place - this configuration was skipped in build/skip.")
return
exists = built_distribution_already_exists(cli, meta, owner)
if token:
on_channel = distribution_exists_on_channel(cli, meta, owner, channel)
if not exists:
upload(cli, meta, owner, channel)
print('Uploaded {}'.format(bldpkg_path(meta)))
elif not on_channel:
print('Adding distribution {} to {}\'s {} channel'
''.format(bldpkg_path(meta), owner, channel))
add_distribution_to_channel(cli, meta, owner, channel)
else:
print('Distribution {} already \nexists on {}\'s {} channel.'
''.format(bldpkg_path(meta), owner, channel))
else:
print("No BINSTAR_TOKEN present, so no upload is taking place. "
"The distribution just built {} already available on {}'s "
"{} channel.".format('is' if exists else 'is not',
owner, channel))
if __name__ == '__main__':
main()
| {
"content_hash": "3165c15ddfaec346f42a1ce7460bd290",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 96,
"avg_line_length": 40.26829268292683,
"alnum_prop": 0.6188168786593984,
"repo_name": "willyd/conda-recipes",
"id": "681f144d4434a30e22986b083d2485f8b37f820b",
"size": "4975",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "opencv/ci_support/upload_or_check_non_existence.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1311"
}
],
"symlink_target": ""
} |
"""Lexer has one of the main Curly's functions, :py:func:`tokenize`.
The main idea of lexing is to split raw text into structured list of
well known and defined parts, called *tokens*. Each token has a class
and some contents (let's say, from ``{% if something %}`` we can get
following contents for :py:class:`StartBlockToken`: ``if`` as a function
name and ``["something"]`` as a block expression).
Here is the example:
.. code-block:: python3
>>> from curly.lexer import tokenize
>>> text = '''\\
... Hello! My name is {{ name }}.\\
... {% if likes %}And I like these things: {% loop likes %}\\
... {{ item }},{% /loop %}{% /if %}'''
>>> for token in tokenize(text):
... print(repr(token))
...
<LiteralToken(raw=' Hello! My name is ', \
contents={'text': ' Hello! My name is '})>
<PrintToken(raw='{{ name }}', contents={'expression': ['name']})>
<LiteralToken(raw='.', contents={'text': '.'})>
<StartBlockToken(raw='{% if likes %}', \
contents={'function': 'if', 'expression': ['likes']})>
<LiteralToken(raw='And I like these things: ', \
contents={'text': 'And I like these things: '})>
<StartBlockToken(raw='{% loop likes %}', \
contents={'function': 'loop', 'expression': ['likes']})>
<PrintToken(raw='{{ item }}', contents={'expression': ['item']})>
<LiteralToken(raw=',', contents={'text': ','})>
<EndBlockToken(raw='{% /loop %}', contents={'function': 'loop'})>
<EndBlockToken(raw='{% /if %}', contents={'function': 'if'})>
>>>
Some terminology:
*function*
Function is the name of function to call within a block. For example,
in block tag ``{% if something %}`` function is ``if``.
*expression*
Expression is the something to print or to pass to function. For
example, in block tag ``{% if lala | blabla | valueof "qq pp" %}``,
expression is ``lala | blabla | valueof "qq pp"``. Usually, expression
is parsed according to POSIX shell lexing: ``["lala", "|", "blabla",
"|", "valueof", "qq pp"]``.
It is out of the scope of the Curly is how to implement evaluation
of the expression. By default, curly tries to find it in
context literally, but if you want, feel free to implement your
own Jinja2-style DSL. Or even call :py:func:`ast.parse` with
:py:func:`compile`.
For details on lexing please check :py:func:`tokenize` function.
"""
import collections
import functools
from curly import exceptions
from curly import utils
REGEXP_FUNCTION = r"[a-zA-Z0-9_-]+"
"""Regular expression for function definition."""
REGEXP_EXPRESSION = r"(?:\\.|[^\{\}%])+"
"""Regular expression for 'expression' definition."""
class Token(collections.UserString):
"""Base class for every token to parse.
Token is parsed by :py:func:`tokenize` only if it has defined REGEXP
attribute.
:param str raw_string: Text which was recognized as a token.
:raises:
:py:exc:`curly.exceptions.CurlyLexerStringDoesNotMatchError`: if
string does not match regular expression.
"""
REGEXP = None
def __init__(self, raw_string):
matcher = self.REGEXP.match(raw_string)
if matcher is None:
raise exceptions.CurlyLexerStringDoesNotMatchError(
raw_string, self.REGEXP)
super().__init__(raw_string)
self.contents = self.extract_contents(matcher)
def extract_contents(self, matcher):
"""Extract more detail token information from regular expression.
:param re.match matcher: Regular expression matcher.
:return: A details on the token.
:rtype: dict[str, str]
"""
return {}
def __repr__(self):
return ("<{0.__class__.__name__}(raw={0.data!r}, "
"contents={0.contents!r})>").format(self)
class PrintToken(Token):
"""Responsible for matching of print tag ``{{ var }}``.
The contents of the block is the *expression* which should be
printed. In ``{{ var }}`` it is ``["var"]``. Regular expression for
*expression* is :py:data:`REGEXP_EXPRESSION`.
"""
REGEXP = utils.make_regexp(
r"""
{{\s* # open {{
(%s) # expression 'var' in {{ var }}
\s*}} # closing }}
""" % REGEXP_EXPRESSION)
"""Regular expression of the token."""
def extract_contents(self, matcher):
return {"expression": utils.make_expression(matcher.group(1))}
class StartBlockToken(Token):
"""Responsible for matching of start function call block tag.
In other words, it matches ``{% function expr1 expr2 expr3... %}``.
The contents of the block is the *function* and *expression*.
Regular expression for *function* is :py:data:`REGEXP_FUNCTION`, for
expression: :py:data:`REGEXP_EXPRESSION`.
"""
REGEXP = utils.make_regexp(
r"""
{%%\s* # open block tag
(%s) # function name
(%s)? # expression for function
\s*%%} # closing block tag
""" % (REGEXP_FUNCTION, REGEXP_EXPRESSION))
"""Regular expression of the token."""
def extract_contents(self, matcher):
return {
"function": matcher.group(1).strip(),
"expression": utils.make_expression(matcher.group(2))}
class EndBlockToken(Token):
"""Responsible for matching of ending function call block tag.
In other words, it matches ``{% /function %}``.
The contents of the block is the *function* (regular expression is
:py:data:`REGEXP_FUNCTION`).
"""
REGEXP = utils.make_regexp(
r"""
{%%\s* # open block tag
/\s* # / character
(%s) # function name
\s*%%} # closing block tag
""" % REGEXP_FUNCTION)
"""Regular expression of the token."""
def extract_contents(self, matcher):
return {"function": matcher.group(1).strip()}
class LiteralToken(Token):
"""Responsible for parts of the texts which are literal.
Literal part of the text should be printed as is, they are context
undependend and not enclosed in any tag. Otherwise: they are placed
outside any tag.
For example, in the template ``{{ first_name }} - {{ last_name }}``,
literal token is " - " (yes, with spaces).
"""
TEXT_UNESCAPE = utils.make_regexp(r"\\(.)")
def __init__(self, text):
self.data = text
self.contents = {"text": self.TEXT_UNESCAPE.sub(r"\1", text)}
def tokenize(text):
"""Lexical analysis of the given text.
Main lexing function: it takes text and returns iterator to
the produced tokens. There are several facts you have to
know about this function:
#. It does not raise exceptions. If something goes fishy,
tokenizer fallbacks to :py:class:`LiteralToken`.
#. It uses one big regular expression, taken from
:py:func:`make_tokenizer_regexp`. This regular expression
looks like this:
::
(?P<SomeToken>{%\s*(\S+)\s*%})|(?P<AnotherToken>{{\s*(\w+)\s*}})
#. Actually, function searches only for template tokens,
emiting of :py:class:`LiteralToken` is a side effect.
The logic of the function is quite simple:
#. It gets expression to match from
:py:func:`make_tokenizer_regexp`.
#. Function starts to traverse the text using
:py:meth:`re.regex.finditer` method. It yields non-overlapping
matches for the regular expression.
#. When match is found, we are trying to check if we've emit
:py:class:`LiteralToken` for the text before. Let's say,
we have a text like that:
::
'Hello, {{ var }}'
First match on iteration of
:py:meth:`re.regex.finditer` will be for "{{ var }}",
so we've jumped over "Hello, " substring. To emit
this token, we need to remember position where last
match was made (:py:meth:`re.match.end`, safe to start with 0)
and where new one is occured (:py:meth:`re.match.start`).
So ``text[previous_end:matcher.start(0)]`` is our
text "Hello, " which goes for :py:class:`LiteralToken`.
#. When we stop iteration, we need to check if we have any
leftovers after. This could be done emiting :py:class:`LiteralToken`
with ``text[previous_end:]`` text (if it is non empty, obviously).
:param text: Text to lex into tokens.
:type text: str or bytes
:return: Generator with :py:class:`Token` instances.
:rtype: Generator[:py:class:`Token`]
"""
previous_end = 0
tokens = get_token_patterns()
if isinstance(text, bytes):
text = text.decode("utf-8")
for matcher in make_tokenizer_regexp().finditer(text):
if matcher.start(0) != previous_end:
yield LiteralToken(text[previous_end:matcher.start(0)])
previous_end = matcher.end(0)
match_groups = matcher.groupdict()
token_class = tokens[matcher.lastgroup]
yield token_class(match_groups[matcher.lastgroup])
leftover = text[previous_end:]
if leftover:
yield LiteralToken(leftover)
@functools.lru_cache(1)
def make_tokenizer_regexp():
"""Create regular expression for :py:func:`tokenize`.
This small wrapper takes a list of know tokens and their regular
expressions and concatenates them into one big expression.
:return: Regular expression for :py:func:`tokenize` function.
:rtype: :py:class:`re.regex`
"""
patterns = get_token_patterns()
patterns = [
"(?P<{0}>{1})".format(k, v.REGEXP.pattern)
for k, v in patterns.items()]
patterns = "|".join(patterns)
patterns = utils.make_regexp(patterns)
return patterns
@functools.lru_cache(1)
def get_token_patterns():
"""Mapping of pattern name to its class.
:return: Mapping of the known tokens with regular expressions.
:rtype: dict[str, Token]
"""
return get_token_patterns_rec(Token)
def get_token_patterns_rec(cls):
patterns = {}
for scls in cls.__subclasses__():
patterns.update(get_token_patterns_rec(scls))
if scls.REGEXP is not None:
patterns[scls.__name__] = scls
return patterns
| {
"content_hash": "9eaa5fdc7a45df5c218e016b4e892ecd",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 75,
"avg_line_length": 33.288079470198674,
"alnum_prop": 0.6272754401671143,
"repo_name": "9seconds/curly",
"id": "a66d6f843833526b4f329a7b845e5cf79145fce1",
"size": "10077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "curly/lexer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60988"
}
],
"symlink_target": ""
} |
"""
the default serializer for the plugin.
it uses json to do the actual work.
"""
import json
def json_serialize(value):
return json.dumps(value)
def json_deserialize(s):
return json.loads(s)
| {
"content_hash": "7c40074a08e81972010b3751c19ad045",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 38,
"avg_line_length": 13.8,
"alnum_prop": 0.7004830917874396,
"repo_name": "amdorra/django-kvmodel",
"id": "4979a1105eb5a665819f9570c2ba84f0221ceb69",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kvmodel/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13619"
}
],
"symlink_target": ""
} |
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Test lua header table functionality
'''
Test.SkipUnless(
Condition.PluginExists('tslua.so'),
)
Test.ContinueOnFail = True
# Define default ATS
ts = Test.MakeATSProcess("ts")
ts.Disk.remap_config.AddLine(
f"map / http://127.0.0.1 @plugin=tslua.so @pparam=header_table.lua"
)
# Configure the tslua's configuration file.
ts.Setup.Copy("header_table.lua", ts.Variables.CONFIGDIR)
# Test - Check for header table
tr = Test.AddTestRun("Lua Header Table")
ps = tr.Processes.Default # alias
ps.StartBefore(Test.Processes.ts)
ps.Command = f"curl -s -D /dev/stderr -H 'X-Test: test1' -H 'X-Test: test2' http://127.0.0.1:{ts.Variables.port}"
ps.Env = ts.Env
ps.ReturnCode = 0
ps.Streams.stdout.Content = Testers.ContainsExpression("test1test2", "expected header table results")
tr.StillRunningAfter = ts
| {
"content_hash": "c8792982ded100f5b13bfc2054e4d309",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 113,
"avg_line_length": 36.24444444444445,
"alnum_prop": 0.7400367872470877,
"repo_name": "pbchou/trafficserver",
"id": "16d746a97a584cf21f8b448e76ec28f33e576d22",
"size": "1631",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/gold_tests/pluginTest/lua/lua_header_table.test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1478100"
},
{
"name": "C++",
"bytes": "16547456"
},
{
"name": "CMake",
"bytes": "13151"
},
{
"name": "Dockerfile",
"bytes": "6693"
},
{
"name": "Java",
"bytes": "9881"
},
{
"name": "Lua",
"bytes": "64412"
},
{
"name": "M4",
"bytes": "216500"
},
{
"name": "Makefile",
"bytes": "250518"
},
{
"name": "Objective-C",
"bytes": "12972"
},
{
"name": "Perl",
"bytes": "128436"
},
{
"name": "Python",
"bytes": "1509938"
},
{
"name": "SWIG",
"bytes": "25777"
},
{
"name": "Shell",
"bytes": "175893"
},
{
"name": "Starlark",
"bytes": "987"
},
{
"name": "Vim script",
"bytes": "192"
}
],
"symlink_target": ""
} |
"""
Component management module for lpm
A component definition consists of (at least):
- an ID (part number)
- a descriptive name
- a description
- a category
- a list of suppliers
- a list of manufacturers
- a list of revisions consisting of:
- a description
- a list of files
- a 'released' flag
- an 'obsolete' flag
- a history
Additional data may be specified as standard Python dictionary entries.
The latest revision is always the active one while earlier revisions are kept for archiving purposes.
Once a component is released it is immutable. Only admins can change the released flag or mark a component as obsolete.
The rules of access are as follows:
- anyone may view component definitions
- component_edit users may additionally:
- create new components
- modify components that are not currently released.
- create a new revision for components that are released.
- component_admin users may additionally:
- release / un-release components
- obsolete components
Valid categories can be defined with the LPM_COMPONENT_CATEGORIES configuration entry.
Note: There is no lock mechanism available, i.e. multiple users may edit the same component simultaneously.
:copyright: (c) 2016 Hannes Friederich.
:license: BSD, see LICENSE for more details.
"""
import re
import os
from datetime import datetime
from werkzeug import secure_filename
from flask import Blueprint, current_app, render_template, flash, abort, redirect, url_for, request, send_from_directory
from flask.ext.login import login_required, current_user
from pymongo import ReturnDocument
from pymongo.errors import DuplicateKeyError
from flask_wtf import Form
from wtforms import TextAreaField, StringField, SubmitField, FileField, SelectField
from wtforms.validators import InputRequired
from lpm.login import role_required
from lpm.utils import extract_errors
bp = Blueprint('components', __name__)
class ComponentForm(Form):
name = StringField(label='Name', validators=[InputRequired()])
description = TextAreaField(label='Description')
category = SelectField(label='Category', validators=[InputRequired()])
comment = TextAreaField(label='Revision Comment')
supplier1 = StringField(label='Supplier 1')
supplier1part = StringField('Supplier 1 Part Number')
supplier2 = StringField(label='Supplier 2')
supplier2part = StringField('Supplier 2 Part Number')
manufacturer1 = StringField(label='Manufacturer 1')
manufacturer1part = StringField('Manufacturer 1 Part Number')
manufacturer2 = StringField(label='Manufacturer 2')
manufacturer2part = StringField('Manufacturer 2 Part Number')
class UploadForm(Form):
file = FileField(label='File')
class RevisionForm(Form):
comment = TextAreaField(label='Revision Comment')
class ReleaseForm(Form):
action = SubmitField(label='Release')
class UnReleaseForm(Form):
action = SubmitField(label='Un-Release')
class ObsoleteForm(Form):
action = SubmitField(label='Mark as Obsolete')
@bp.route('/')
@login_required
def overview():
"""
Shows the overview page containing all components
"""
filter = {'obsolete': False}
if request.args.get('show_obsolete'):
filter = None
data = current_app.mongo.db.components.find(filter=filter, projection=['name', 'category', 'obsolete', 'released'])
return render_template('components/overview.html', data=data,
show_obsolete=request.args.get('show_obsolete'))
@bp.route('/<partno>')
@login_required
def details(partno):
"""
Shows the details about the given component
- Only component_edit users may look at specific revisions
- All other only see the latest revision
"""
try:
pn = PartNumber(partno)
except ValueError:
abort(404)
# redirect to the revisionless URL if the user cannot view outdated revisions
if pn.revision is not None and not current_user.has_role('component_edit'):
return redirect(url_for('components.details', partno=pn.base_number))
# ensure the object exists and the revision is valid
obj = current_app.mongo.db.components.find_one_or_404(pn.base_number)
# ensure the desired revision exists
num_revisions = len(obj.get('revisions', list()))
if pn.revision_number is not None and pn.revision_number >= num_revisions:
abort(404)
pn.set_num_revisions(num_revisions)
files = _get_files(pn.id)
preview_file = None
for file in files:
if file.startswith('preview.'):
preview_file = file
break
return render_template('components/details.html', data=obj,
partno=pn, files=files, preview_file=preview_file)
@bp.route('/<partno>/<file>')
@login_required
def file(partno, file):
"""
Sends the specified file, after performing some access checks.
Only component_edit users may look at all revisions, all other may only see the latest revision
"""
try:
pn = PartNumber(partno)
except ValueError:
abort(404)
# a revision must be specified
if pn.revision is None:
abort(404)
# ensure the object exists and the revision is valid
obj = current_app.mongo.db.components.find_one_or_404(pn.base_number)
# ensure the desired revision exists
num_revisions = len(obj.get('revisions', list()))
assert pn.revision_number is not None
if pn.revision_number >= num_revisions:
abort(404)
pn.set_num_revisions(num_revisions)
if pn.is_outdated() and not current_user.has_role('component_edit'):
abort(403)
# instruct werkzeug to stream the file
dir = os.path.join(current_app.config['LPM_COMPONENT_FILES_DIR'], partno)
return send_from_directory(dir, file)
@bp.route('/add', methods=['GET', 'POST'])
@role_required('component_edit')
def add():
"""
Presents the form to add a new component, and adds it to the database if submitted
"""
form = ComponentForm(request.form)
form.category.choices = _get_categories()
# form submittal handling
if request.method == 'POST' and form.validate_on_submit():
id = _create_new_partno()
suppliers = _extract_suppliers(form)
manufacturers = _extract_manufacturers(form)
now = datetime.now()
obj = dict(_id=id,
name=form.name.data,
description=form.description.data,
category=form.category.data,
suppliers=suppliers,
manufacturers=manufacturers,
revisions=[{'date': now, 'comment': form.comment.data}],
released=False,
obsolete=False,
history=[{'date': now, 'user': current_user.id, 'message': 'created'}])
try:
current_app.mongo.db.components.insert(obj)
flash('component successfully created', 'success')
return redirect(url_for('components.details', partno=id))
except DuplicateKeyError as e:
flash('data insertion failed (%s), please contact the administrator' % e, 'error')
extract_errors(form)
return render_template('components/new_form.html', form=form, type=type)
@bp.route('/<partno>/edit', methods=['GET', 'POST'])
@role_required('component_edit')
def edit(partno):
"""
Presents the form to edit an already existing component
"""
obj = _load_if_unreleased(partno)
# prepare the form data
revisions = obj.get('revisions')
suppliers = obj.get('suppliers', list())
manufacturers = obj.get('manufacturers', list())
revidx = len(revisions)-1
num_suppliers = len(suppliers)
num_manufacturers = len(manufacturers)
data = dict(name=obj.get('name'),
description=obj.get('description'),
category=obj.get('category'),
comment=revisions[revidx].get('comment'))
if num_suppliers > 0:
data['supplier1'] = suppliers[0].get('name')
data['supplier1part'] = suppliers[0].get('partno')
if num_suppliers > 1:
data['supplier2'] = suppliers[1].get('name')
data['supplier2part'] = suppliers[1].get('partno')
if num_manufacturers > 0:
data['manufacturer1'] = manufacturers[0].get('name')
data['manufacturer1part'] = manufacturers[0].get('partno')
if num_manufacturers > 1:
data['manufacturer2'] = manufacturers[1].get('name')
data['manufacturer2part'] = manufacturers[1].get('partno')
form = ComponentForm(request.form, data=data)
form.category.choices = _get_categories()
# form submittal handling
# use $set for the updated fields, directly update the latest revision
# add a comment in the history
if request.method == 'POST' and form.validate_on_submit():
suppliers = _extract_suppliers(form)
manufacturers = _extract_manufacturers(form)
set_data = dict(name=form.name.data,
description=form.description.data,
category=form.category.data,
suppliers=suppliers,
manufacturers=manufacturers)
set_data['revisions.'+str(revidx)+'.comment'] = form.comment.data
result = current_app.mongo.db.components.update_one(
filter={'_id': partno},
update={
'$set': set_data,
'$push': {
'history': {
'date': datetime.now(),
'user': current_user.id,
'message': 'updated',
}
}
}
)
if result.modified_count == 1:
flash('data successfully updated', 'success')
else:
# should not happen. If the ID is wrong, the initial lookup will fail
flash('no data modified, please contact the administrator', 'error')
return redirect(url_for('components.details', partno=partno))
extract_errors(form)
return render_template('components/edit_form.html', form=form, partno=partno)
@bp.route('/<partno>/fileupload', methods=['GET', 'POST'])
@role_required('component_edit')
def fileupload(partno):
"""
Presents the form to upload a new file for the design item.
Stores the uploaded file in the correct location upon POST submit
"""
# the part number must be valid
try:
pn = PartNumber(partno)
except ValueError:
abort(404)
# the revision must be specified
if pn.revision is None:
abort(404)
# check the data
obj = _load_if_unreleased(pn.base_number)
# ensure the desired revision exists
num_revisions = len(obj.get('revisions', list()))
if pn.revision_number >= num_revisions:
abort(404)
pn.set_num_revisions(num_revisions)
if pn.is_outdated():
flash('cannot upload files to outdated revisions', 'error')
return redirect(url_for('components.details', partno=partno))
form = UploadForm(request.form)
# WTF is NOT used for the file handling, since the file upload handling seems broken.
file = request.files.get('file')
if request.method == 'POST' and form.validate_on_submit() and file:
try:
filename = secure_filename(file.filename)
dir = os.path.join(current_app.config['LPM_COMPONENT_FILES_DIR'], partno)
if not os.path.exists(dir):
os.makedirs(dir)
path = os.path.join(dir, filename)
file.save(path)
flash('file successfully uploaded', 'success')
return redirect(url_for('components.details', partno=partno))
except Exception as e:
flash(e, 'error')
extract_errors(form)
return render_template('components/upload_form.html', form=form, partno=partno)
@bp.route('/<partno>/new-revision', methods=['GET', 'POST'])
@role_required('component_edit')
def new_revision(partno):
"""
Presents the form to add a new revision, and creates it upon POST submit
"""
_load_if_released(partno) # ensures the component exists and is released
form = RevisionForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
now = datetime.now()
result = current_app.mongo.db.components.update_one(
filter={'_id': partno},
update={
'$set': {
'released': False # a new revision is not already released
},
'$push': {
'revisions': {
'date': now,
'comment': form.comment.data
},
'history': {
'date': now,
'user': current_user.id,
'message': 'new revision created'
}
}
}
)
if result.modified_count == 1:
flash('new revision created', 'success')
else:
# should not happen.
flash('no data modified, please contact the administrator', 'error')
return redirect(url_for('components.details', partno=partno))
extract_errors(form)
return render_template('components/revision_form.html', form=form, partno=partno)
@bp.route('/<partno>/release', methods=['GET', 'POST'])
@role_required('component_admin')
def release(partno):
"""
Releases the component when a POST form is submitted
"""
obj = _load_if_unreleased(partno)
form = ReleaseForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
result = current_app.mongo.db.components.update_one(
filter={'_id': partno},
update={
'$set': {
'released': True
},
'$push': {
'history': {
'date': datetime.now(),
'user': current_user.id,
'message': 'released'
}
}
}
)
if result.modified_count == 1:
flash('component released', 'success')
else:
# should not happen.
flash('no data modified, please contact the administrator', 'error')
return redirect(url_for('components.details', partno=partno))
extract_errors(form)
return render_template('components/release_form.html', data=obj, form=form)
@bp.route('/<partno>/unrelease', methods=['GET', 'POST'])
@role_required('component_admin')
def unrelease(partno):
"""
Un-releases the component when a POST form is submitted
"""
obj = _load_if_released(partno)
form = UnReleaseForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
result = current_app.mongo.db.components.update_one(
filter={'_id': partno},
update={
'$set': {
'released': False
},
'$push': {
'history': {
'date': datetime.now(),
'user': current_user.id,
'message': 'un-released'
}
}
}
)
if result.modified_count == 1:
flash('component un-released', 'success')
else:
# should not happen.
flash('no data modified, please contact the administrator', 'error')
return redirect(url_for('components.details', partno=partno))
extract_errors(form)
return render_template('components/unrelease_form.html', data=obj, form=form)
@bp.route('/<partno>/make-obsolete', methods=['GET', 'POST'])
@role_required('component_admin')
def make_obsolete(partno):
"""
Marks the given component as obsolete.
Precondition: The user must have the admin role and the item must not already be obsolete
"""
obj = _load_if_active(partno)
form = ObsoleteForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
result = current_app.mongo.db.components.update_one(
filter={'_id': partno},
update={
'$set': {
'obsolete': True
},
'$push': {
'history': {
'date': datetime.now(),
'user': current_user.id,
'message': 'component obsoleted'
}
}
}
)
if result.modified_count == 1:
flash('component obsoleted', 'success')
else:
# should not happen.
flash('no data modified, please contact the administrator', 'error')
return redirect(url_for('components.details', partno=partno))
extract_errors(form)
return render_template('components/obsolete_form.html', data=obj, form=form)
def ensure_exists(partno):
"""
Ensures that the given part number does exist in the database and raises
ValueError if the item does not exist.
"""
obj = current_app.mongo.db.components.find_one(partno)
if not obj:
raise ValueError('unknown part number %s' % partno)
def _create_new_partno():
"""
Creates and returns a new part number (component ID).
The new number is retrieved from the database and prefixed with the configured prefix
"""
data = current_app.mongo.db.unique_numbers.find_one_and_update(
{'_id': 'partno'},
{'$inc': {'seq': 1}},
upsert=True, # creates the item if needed
return_document=ReturnDocument.AFTER
)
prefix = current_app.config.get('LPM_PARTNO_PREFIX', '')
return '%s%04d' % (prefix, data['seq'])
def _get_files(partno):
"""
Returns a list of files belonging to the given part number
"""
try:
dir = os.path.join(current_app.config['LPM_COMPONENT_FILES_DIR'], partno)
return sorted(os.listdir(dir))
except:
return list()
def _load_if_active(partno):
"""
Loads the component with given ID from the database and returns it.
Aborts with 404 if the component is not found.
Flashes an error message and redirects to the details page if the component is obsolete
"""
obj = current_app.mongo.db.components.find_one_or_404(partno)
if obj.get('obsolete', True):
flash('Invalid operation for obsolete components', 'error')
abort(redirect(url_for('components.details', partno=partno)))
return obj
def _load_if_released(partno):
"""
Loads the component with given ID from the database and returns it.
Aborts with 404 if the component is not found.
Flashes an error message and redirects to the details page if the component is not released
"""
obj = _load_if_active(partno)
if not obj.get('released', False):
flash('Invalid operation for non-released components', 'error')
abort(redirect(url_for('components.details', partno=partno)))
return obj
def _load_if_unreleased(partno):
"""
Loads the component with given ID from the database and returns it.
Aborts with 404 if the component is not found.
Flashes an error message and redirects to the details page if the component is not released
"""
obj = _load_if_active(partno)
if obj.get('released', True):
flash('Invalid operation for released components', 'error')
abort(redirect(url_for('components.details', partno=partno)))
return obj
def _extract_suppliers(form):
"""
Extracts the list of suppliers from the form data
"""
suppliers = list()
if form.supplier1.data:
suppliers.append({'name': form.supplier1.data, 'partno': form.supplier1part.data})
if form.supplier2.data:
suppliers.append({'name': form.supplier2.data, 'partno': form.supplier2part.data})
return suppliers
def _extract_manufacturers(form):
"""
Extracts the list of manufacturers from the form data
"""
manufacturers = list()
if form.manufacturer1.data:
manufacturers.append({'name': form.manufacturer1.data, 'partno': form.manufacturer1part.data})
if form.manufacturer2.data:
manufacturers.append({'name': form.manufacturer2.data, 'partno': form.manufacturer2part.data})
return manufacturers
def _get_categories():
return [(c, c) for c in current_app.config.get('LPM_COMPONENT_CATEGORIES', set())]
class PartNumber:
"""
Class that encapsulates parsing and revision handling of part numbers
"""
pattern = re.compile('^([A-Z]+\d{4})([a-z])?$')
def __init__(self, partno):
match = PartNumber.pattern.match(partno)
if not match:
raise ValueError("string '%s' is not a valid part number" % str)
self._baseno = match.group(1)
self._rev = match.group(2)
self._num_revisions = None
def set_num_revisions(self, num_revisions):
"""
Sets the number of revisions and assigns the latest revision if the revision has not been already set.
The number of revisions must be > 0
"""
assert num_revisions > 0
self._num_revisions = num_revisions
if self._rev is None:
self._rev = PartNumber.revision_repr(num_revisions-1)
@property
def id(self):
v = self._baseno
if self._rev is not None:
v += self._rev
return v
@property
def base_number(self):
return self._baseno
@property
def revision(self):
return self._rev
@property
def revision_number(self):
return None if self._rev is None else ord(self._rev) - ord('a')
def is_outdated(self):
"""
Returns whether the given revision is outdated. The number of revisions must have been set previously
"""
assert self._num_revisions is not None
assert self._rev is not None
return self._num_revisions > self.revision_number+1
@classmethod
def revision_repr(cls, revision):
return chr(revision + ord('a'))
def revision_id(self, revision):
return self._baseno + PartNumber.revision_repr(revision)
def __repr__(self):
return self.id
| {
"content_hash": "43ee6882f4655ee0beae61d4ec200b11",
"timestamp": "",
"source": "github",
"line_count": 639,
"max_line_length": 120,
"avg_line_length": 35.38810641627543,
"alnum_prop": 0.6109759872639632,
"repo_name": "h-friederich/lpm",
"id": "3679b46fea57dfe53ba9afd81109c0271f1c4737",
"size": "22637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5901"
},
{
"name": "HTML",
"bytes": "35441"
},
{
"name": "JavaScript",
"bytes": "450499"
},
{
"name": "Python",
"bytes": "148885"
}
],
"symlink_target": ""
} |
import httplib # basic HTTP library for HTTPS connections
import logging
from quantum.plugins.nicira.nicira_nvp_plugin.api_client.client_eventlet \
import NvpApiClientEventlet
from quantum.plugins.nicira.nicira_nvp_plugin.api_client.request_eventlet \
import NvpGenericRequestEventlet
LOG = logging.getLogger("NVPApiHelper")
LOG.setLevel(logging.INFO)
class NVPApiHelper(NvpApiClientEventlet):
'''
Helper class to do basic login, cookie management, and provide base
method to send HTTP requests.
Implements new eventlet-based framework derived from the management
console nvp_gevent_client module.
'''
def __init__(self, api_providers, user, password, request_timeout,
http_timeout, retries, redirects, failover_time,
concurrent_connections=3):
'''Constructor.
:param api_providers: a list of tuples in the form:
(host, port, is_ssl=True). Passed on to NvpClientEventlet.
:param user: the login username.
:param password: the login password.
:param concurrent_connections: the number of concurrent connections.
:param request_timeout: all operations (including retries, redirects
from unresponsive controllers, etc) should finish within this
timeout.
:param http_timeout: how long to wait before aborting an
unresponsive controller
:param retries: the number of concurrent connections.
:param redirects: the number of concurrent connections.
:param failover_time: minimum time between controller failover and new
connections allowed.
'''
NvpApiClientEventlet.__init__(
self, api_providers, user, password, concurrent_connections,
failover_time=failover_time)
self._request_timeout = request_timeout
self._http_timeout = http_timeout
self._retries = retries
self._redirects = redirects
def login(self, user=None, password=None):
'''Login to NVP controller.
Assumes same password is used for all controllers.
:param user: NVP controller user (usually admin). Provided for
backwards compatability. In the normal mode of operation
this should be None.
:param password: NVP controller password. Provided for backwards
compatability. In the normal mode of operation this should
be None.
:returns: Does not return a value.
'''
if user:
self._user = user
if password:
self._password = password
return NvpApiClientEventlet.login(self)
def request(self, method, url, body="", content_type="application/json"):
'''Issues request to controller.'''
g = NvpGenericRequestEventlet(
self, method, url, body, content_type, auto_login=True,
request_timeout=self._request_timeout,
http_timeout=self._http_timeout,
retries=self._retries, redirects=self._redirects)
g.start()
response = g.join()
LOG.debug('NVPApiHelper.request() returns "%s"' % response)
# response is a modified HTTPResponse object or None.
# response.read() will not work on response as the underlying library
# request_eventlet.NvpApiRequestEventlet has already called this
# method in order to extract the body and headers for processing.
# NvpApiRequestEventlet derived classes call .read() and
# .getheaders() on the HTTPResponse objects and store the results in
# the response object's .body and .headers data members for future
# access.
if response is None:
# Timeout.
LOG.error('Request timed out: %s to %s' % (method, url))
raise RequestTimeout()
status = response.status
if status == httplib.UNAUTHORIZED:
raise UnAuthorizedRequest()
# Fail-fast: Check for exception conditions and raise the
# appropriate exceptions for known error codes.
if status in self.error_codes:
LOG.error("Received error code: %s" % status)
LOG.error("Server Error Message: %s" % response.body)
self.error_codes[status](self)
# Continue processing for non-error condition.
if (status != httplib.OK and status != httplib.CREATED
and status != httplib.NO_CONTENT):
LOG.error(
"%s to %s, unexpected response code: %d (content = '%s')" %
(method, url, response.status, response.body))
return None
return response.body
def fourZeroFour(self):
raise ResourceNotFound()
def fourZeroNine(self):
raise Conflict()
def fiveZeroThree(self):
raise ServiceUnavailable()
def fourZeroThree(self):
raise Forbidden()
def zero(self):
raise NvpApiException()
error_codes = {
404: fourZeroFour,
409: fourZeroNine,
503: fiveZeroThree,
403: fourZeroThree,
301: zero,
307: zero,
400: zero,
500: zero,
}
class NvpApiException(Exception):
'''
Base NvpApiClient Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
'''
message = "An unknown exception occurred."
def __init__(self, **kwargs):
try:
self._error_string = self.message % kwargs
except Exception:
# at least get the core message out if something happened
self._error_string = self.message
def __str__(self):
return self._error_string
class UnAuthorizedRequest(NvpApiException):
message = "Server denied session's authentication credentials."
class ResourceNotFound(NvpApiException):
message = "An entity referenced in the request was not found."
class Conflict(NvpApiException):
message = "Request conflicts with configuration on a different entity."
class ServiceUnavailable(NvpApiException):
message = ("Request could not completed because the associated "
"resource could not be reached.")
class Forbidden(NvpApiException):
message = ("The request is forbidden from accessing the "
"referenced resource.")
class RequestTimeout(NvpApiException):
message = "The request has timed out."
| {
"content_hash": "d9f7bb4390d523e298d53f3d1598163a",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 78,
"avg_line_length": 33.71134020618557,
"alnum_prop": 0.6443425076452599,
"repo_name": "savi-dev/quantum",
"id": "dd1387e6cd0fbe0004fc85ce4b9ecfc16e6e568f",
"size": "7202",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "quantum/plugins/nicira/nicira_nvp_plugin/NvpApiClient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "18263"
},
{
"name": "Python",
"bytes": "1519204"
},
{
"name": "Shell",
"bytes": "7766"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
try:
from builtins import object
except ImportError:
pass
import sys
from .utils import InheritedStuff
from .utils import Stuff, DummyModel
from functools import partial
from transitions import Machine, MachineError, State, EventData
from transitions.core import listify, _prep_ordered_arg
from unittest import TestCase, skipIf
import warnings
import weakref
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
def on_exit_A(event):
event.model.exit_A_called = True
def on_exit_B(event):
event.model.exit_B_called = True
class TestTransitions(TestCase):
def setUp(self):
self.stuff = Stuff()
self.machine_cls = Machine
def tearDown(self):
pass
def test_init_machine_with_hella_arguments(self):
states = [
State('State1'),
'State2',
{
'name': 'State3',
'on_enter': 'hello_world'
}
]
transitions = [
{'trigger': 'advance',
'source': 'State2',
'dest': 'State3'
}
]
s = Stuff()
m = s.machine_cls(model=s, states=states, transitions=transitions, initial='State2')
s.advance()
self.assertEqual(s.message, 'Hello World!')
def test_listify(self):
self.assertEqual(listify(4), [4])
self.assertEqual(listify(None), [])
self.assertEqual(listify((4, 5)), (4, 5))
self.assertEqual(listify([1, 3]), [1, 3])
class Foo:
pass
obj = Foo()
proxy = weakref.proxy(obj)
del obj
self.assertEqual(listify(proxy), [proxy])
def test_weakproxy_model(self):
d = DummyModel()
pr = weakref.proxy(d)
self.machine_cls(pr, states=['A', 'B'], transitions=[['go', 'A', 'B']], initial='A')
pr.go()
self.assertTrue(pr.is_B())
def test_property_initial(self):
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = self.stuff.machine_cls(states=states, transitions=transitions, initial='A')
self.assertEqual(m.initial, 'A')
m = self.stuff.machine_cls(states=states, transitions=transitions, initial='C')
self.assertEqual(m.initial, 'C')
m = self.stuff.machine_cls(states=states, transitions=transitions)
self.assertEqual(m.initial, 'initial')
def test_transition_definitions(self):
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = Machine(states=states, transitions=transitions, initial='A')
m.walk()
self.assertEqual(m.state, 'B')
# Define with list of lists
transitions = [
['walk', 'A', 'B'],
['run', 'B', 'C'],
['sprint', 'C', 'D']
]
m = Machine(states=states, transitions=transitions, initial='A')
m.to_C()
m.sprint()
self.assertEqual(m.state, 'D')
def test_add_states(self):
s = self.stuff
s.machine.add_state('X')
s.machine.add_state('Y')
s.machine.add_state('Z')
event = s.machine.events['to_{0}'.format(s.state)]
self.assertEqual(1, len(event.transitions['X']))
def test_transitioning(self):
s = self.stuff
s.machine.add_transition('advance', 'A', 'B')
s.machine.add_transition('advance', 'B', 'C')
s.machine.add_transition('advance', 'C', 'D')
s.advance()
self.assertEqual(s.state, 'B')
self.assertFalse(s.is_A())
self.assertTrue(s.is_B())
s.advance()
self.assertEqual(s.state, 'C')
def test_pass_state_instances_instead_of_names(self):
state_A = State('A')
state_B = State('B')
states = [state_A, state_B]
m = Machine(states=states, initial=state_A)
assert m.state == 'A'
m.add_transition('advance', state_A, state_B)
m.advance()
assert m.state == 'B'
state_B2 = State('B', on_enter='this_passes')
with self.assertRaises(ValueError):
m.add_transition('advance2', state_A, state_B2)
m2 = Machine(states=states, initial=state_A.name)
assert m.initial == m2.initial
with self.assertRaises(ValueError):
Machine(states=states, initial=State('A'))
def test_conditions(self):
s = self.stuff
s.machine.add_transition('advance', 'A', 'B', conditions='this_passes')
s.machine.add_transition('advance', 'B', 'C', unless=['this_fails'])
s.machine.add_transition('advance', 'C', 'D', unless=['this_fails',
'this_passes'])
s.advance()
self.assertEqual(s.state, 'B')
s.advance()
self.assertEqual(s.state, 'C')
s.advance()
self.assertEqual(s.state, 'C')
def test_uncallable_callbacks(self):
s = self.stuff
s.machine.add_transition('advance', 'A', 'B', conditions=['property_that_fails', 'is_false'])
# make sure parameters passed by trigger events can be handled
s.machine.add_transition('advance', 'A', 'C', before=['property_that_fails', 'is_false'])
s.advance(level='MaximumSpeed')
self.assertTrue(s.is_C())
def test_conditions_with_partial(self):
def check(result):
return result
s = self.stuff
s.machine.add_transition('advance', 'A', 'B',
conditions=partial(check, True))
s.machine.add_transition('advance', 'B', 'C',
unless=[partial(check, False)])
s.machine.add_transition('advance', 'C', 'D',
unless=[partial(check, False), partial(check, True)])
s.advance()
self.assertEqual(s.state, 'B')
s.advance()
self.assertEqual(s.state, 'C')
s.advance()
self.assertEqual(s.state, 'C')
def test_multiple_add_transitions_from_state(self):
s = self.stuff
s.machine.add_transition(
'advance', 'A', 'B', conditions=['this_fails'])
s.machine.add_transition('advance', 'A', 'C')
s.advance()
self.assertEqual(s.state, 'C')
def test_use_machine_as_model(self):
states = ['A', 'B', 'C', 'D']
m = Machine(states=states, initial='A')
m.add_transition('move', 'A', 'B')
m.add_transition('move_to_C', 'B', 'C')
m.move()
self.assertEqual(m.state, 'B')
def test_state_change_listeners(self):
s = self.stuff
s.machine.add_transition('advance', 'A', 'B')
s.machine.add_transition('reverse', 'B', 'A')
s.machine.on_enter_B('hello_world')
s.machine.on_exit_B('goodbye')
s.advance()
self.assertEqual(s.state, 'B')
self.assertEqual(s.message, 'Hello World!')
s.reverse()
self.assertEqual(s.state, 'A')
self.assertTrue(s.message.startswith('So long'))
def test_before_after_callback_addition(self):
m = Machine(Stuff(), states=['A', 'B', 'C'], initial='A')
m.add_transition('move', 'A', 'B')
trans = m.events['move'].transitions['A'][0]
trans.add_callback('after', 'increase_level')
m.model.move()
self.assertEqual(m.model.level, 2)
def test_before_after_transition_listeners(self):
m = Machine(Stuff(), states=['A', 'B', 'C'], initial='A')
m.add_transition('move', 'A', 'B')
m.add_transition('move', 'B', 'C')
m.before_move('increase_level')
m.model.move()
self.assertEqual(m.model.level, 2)
m.model.move()
self.assertEqual(m.model.level, 3)
def test_prepare(self):
m = Machine(Stuff(), states=['A', 'B', 'C'], initial='A')
m.add_transition('move', 'A', 'B', prepare='increase_level')
m.add_transition('move', 'B', 'C', prepare='increase_level')
m.add_transition('move', 'C', 'A', prepare='increase_level', conditions='this_fails')
m.add_transition('dont_move', 'A', 'C', prepare='increase_level')
m.prepare_move('increase_level')
m.model.move()
self.assertEqual(m.model.state, 'B')
self.assertEqual(m.model.level, 3)
m.model.move()
self.assertEqual(m.model.state, 'C')
self.assertEqual(m.model.level, 5)
# State does not advance, but increase_level still runs
m.model.move()
self.assertEqual(m.model.state, 'C')
self.assertEqual(m.model.level, 7)
# An invalid transition shouldn't execute the callback
try:
m.model.dont_move()
except MachineError as e:
self.assertTrue("Can't trigger event" in str(e))
self.assertEqual(m.model.state, 'C')
self.assertEqual(m.model.level, 7)
def test_state_model_change_listeners(self):
s = self.stuff
s.machine.add_transition('go_e', 'A', 'E')
s.machine.add_transition('go_f', 'E', 'F')
s.machine.on_enter_F('hello_F')
s.go_e()
self.assertEqual(s.state, 'E')
self.assertEqual(s.message, 'I am E!')
s.go_f()
self.assertEqual(s.state, 'F')
self.assertEqual(s.exit_message, 'E go home...')
assert 'I am F!' in s.message
assert 'Hello F!' in s.message
def test_inheritance(self):
states = ['A', 'B', 'C', 'D', 'E']
s = InheritedStuff(states=states, initial='A')
s.add_transition('advance', 'A', 'B', conditions='this_passes')
s.add_transition('advance', 'B', 'C')
s.add_transition('advance', 'C', 'D')
s.advance()
self.assertEqual(s.state, 'B')
self.assertFalse(s.is_A())
self.assertTrue(s.is_B())
s.advance()
self.assertEqual(s.state, 'C')
class NewMachine(Machine):
def __init__(self, *args, **kwargs):
super(NewMachine, self).__init__(*args, **kwargs)
n = NewMachine(states=states, transitions=[['advance', 'A', 'B']], initial='A')
self.assertTrue(n.is_A())
n.advance()
self.assertTrue(n.is_B())
with self.assertRaises(ValueError):
NewMachine(state=['A', 'B'])
def test_send_event_data_callbacks(self):
states = ['A', 'B', 'C', 'D', 'E']
s = Stuff()
# First pass positional and keyword args directly to the callback
m = Machine(model=s, states=states, initial='A', send_event=False,
auto_transitions=True)
m.add_transition(
trigger='advance', source='A', dest='B', before='set_message')
s.advance(message='Hallo. My name is Inigo Montoya.')
self.assertTrue(s.message.startswith('Hallo.'))
s.to_A()
s.advance('Test as positional argument')
self.assertTrue(s.message.startswith('Test as'))
# Now wrap arguments in an EventData instance
m.send_event = True
m.add_transition(
trigger='advance', source='B', dest='C', before='extract_message')
s.advance(message='You killed my father. Prepare to die.')
self.assertTrue(s.message.startswith('You'))
def test_send_event_data_conditions(self):
states = ['A', 'B', 'C', 'D']
s = Stuff()
# First pass positional and keyword args directly to the condition
m = Machine(model=s, states=states, initial='A', send_event=False)
m.add_transition(
trigger='advance', source='A', dest='B',
conditions='this_fails_by_default')
s.advance(boolean=True)
self.assertEqual(s.state, 'B')
# Now wrap arguments in an EventData instance
m.send_event = True
m.add_transition(
trigger='advance', source='B', dest='C',
conditions='extract_boolean')
s.advance(boolean=False)
self.assertEqual(s.state, 'B')
def test_auto_transitions(self):
states = ['A', {'name': 'B'}, State(name='C')]
m = Machine(states=states, initial='A', auto_transitions=True)
m.to_B()
self.assertEqual(m.state, 'B')
m.to_C()
self.assertEqual(m.state, 'C')
m.to_A()
self.assertEqual(m.state, 'A')
# Should fail if auto transitions is off...
m = Machine(states=states, initial='A', auto_transitions=False)
with self.assertRaises(AttributeError):
m.to_C()
def test_ordered_transitions(self):
states = ['beginning', 'middle', 'end']
m = Machine(states=states)
m.add_ordered_transitions()
self.assertEqual(m.state, 'initial')
m.next_state()
self.assertEqual(m.state, 'beginning')
m.next_state()
m.next_state()
self.assertEqual(m.state, 'end')
m.next_state()
self.assertEqual(m.state, 'initial')
# Include initial state in loop
m = Machine(states=states)
m.add_ordered_transitions(loop_includes_initial=False)
m.to_end()
m.next_state()
self.assertEqual(m.state, 'beginning')
# Do not loop transitions
m = Machine(states=states)
m.add_ordered_transitions(loop=False)
m.to_end()
with self.assertRaises(MachineError):
m.next_state()
# Test user-determined sequence and trigger name
m = Machine(states=states, initial='beginning')
m.add_ordered_transitions(['end', 'beginning'], trigger='advance')
m.advance()
self.assertEqual(m.state, 'end')
m.advance()
self.assertEqual(m.state, 'beginning')
# Via init argument
m = Machine(states=states, initial='beginning', ordered_transitions=True)
m.next_state()
self.assertEqual(m.state, 'middle')
# Alter initial state
m = Machine(states=states, initial='middle', ordered_transitions=True)
m.next_state()
self.assertEqual(m.state, 'end')
m.next_state()
self.assertEqual(m.state, 'beginning')
# Partial state machine without the initial state
m = Machine(states=states, initial='beginning')
m.add_ordered_transitions(['middle', 'end'])
self.assertEqual(m.state, 'beginning')
with self.assertRaises(MachineError):
m.next_state()
m.to_middle()
for s in ('end', 'middle', 'end'):
m.next_state()
self.assertEqual(m.state, s)
def test_ordered_transition_error(self):
m = Machine(states=['A'], initial='A')
with self.assertRaises(ValueError):
m.add_ordered_transitions()
m.add_state('B')
m.add_ordered_transitions()
m.add_state('C')
with self.assertRaises(ValueError):
m.add_ordered_transitions(['C'])
def test_ignore_invalid_triggers(self):
a_state = State('A')
transitions = [['a_to_b', 'A', 'B']]
# Exception is triggered by default
b_state = State('B')
m1 = Machine(states=[a_state, b_state], transitions=transitions,
initial='B')
with self.assertRaises(MachineError):
m1.a_to_b()
# Set default value on machine level
m2 = Machine(states=[a_state, b_state], transitions=transitions,
initial='B', ignore_invalid_triggers=True)
m2.a_to_b()
# Exception is suppressed, so this passes
b_state = State('B', ignore_invalid_triggers=True)
m3 = Machine(states=[a_state, b_state], transitions=transitions,
initial='B')
m3.a_to_b()
# Set for some states but not others
new_states = ['C', 'D']
m1.add_states(new_states, ignore_invalid_triggers=True)
m1.to_D()
m1.a_to_b() # passes because exception suppressed for D
m1.to_B()
with self.assertRaises(MachineError):
m1.a_to_b()
# State value overrides machine behaviour
m3 = Machine(states=[a_state, b_state], transitions=transitions,
initial='B', ignore_invalid_triggers=False)
m3.a_to_b()
def test_string_callbacks(self):
m = Machine(states=['A', 'B'],
before_state_change='before_state_change',
after_state_change='after_state_change', send_event=True,
initial='A', auto_transitions=True)
m.before_state_change = MagicMock()
m.after_state_change = MagicMock()
m.to_B()
self.assertTrue(m.before_state_change[0].called)
self.assertTrue(m.after_state_change[0].called)
# after_state_change should have been called with EventData
event_data = m.after_state_change[0].call_args[0][0]
self.assertIsInstance(event_data, EventData)
self.assertTrue(event_data.result)
def test_function_callbacks(self):
before_state_change = MagicMock()
after_state_change = MagicMock()
m = Machine(states=['A', 'B'],
before_state_change=before_state_change,
after_state_change=after_state_change, send_event=True,
initial='A', auto_transitions=True)
m.to_B()
self.assertTrue(m.before_state_change[0].called)
self.assertTrue(m.after_state_change[0].called)
def test_state_callbacks(self):
class Model:
def on_enter_A(self):
pass
def on_exit_A(self):
pass
def on_enter_B(self):
pass
def on_exit_B(self):
pass
states = [State(name='A', on_enter='on_enter_A', on_exit='on_exit_A'),
State(name='B', on_enter='on_enter_B', on_exit='on_exit_B')]
machine = Machine(Model(), states=states)
state_a = machine.get_state('A')
state_b = machine.get_state('B')
self.assertEqual(len(state_a.on_enter), 1)
self.assertEqual(len(state_a.on_exit), 1)
self.assertEqual(len(state_b.on_enter), 1)
self.assertEqual(len(state_b.on_exit), 1)
def test_state_callable_callbacks(self):
class Model:
def __init__(self):
self.exit_A_called = False
self.exit_B_called = False
def on_enter_A(self, event):
pass
def on_enter_B(self, event):
pass
states = [State(name='A', on_enter='on_enter_A', on_exit='tests.test_core.on_exit_A'),
State(name='B', on_enter='on_enter_B', on_exit=on_exit_B),
State(name='C', on_enter='tests.test_core.AAAA')]
model = Model()
machine = Machine(model, states=states, send_event=True, initial='A')
state_a = machine.get_state('A')
state_b = machine.get_state('B')
self.assertEqual(len(state_a.on_enter), 1)
self.assertEqual(len(state_a.on_exit), 1)
self.assertEqual(len(state_b.on_enter), 1)
self.assertEqual(len(state_b.on_exit), 1)
model.to_B()
self.assertTrue(model.exit_A_called)
model.to_A()
self.assertTrue(model.exit_B_called)
with self.assertRaises(AttributeError):
model.to_C()
def test_pickle(self):
import sys
if sys.version_info < (3, 4):
import dill as pickle
else:
import pickle
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = Machine(states=states, transitions=transitions, initial='A')
m.walk()
dump = pickle.dumps(m)
self.assertIsNotNone(dump)
m2 = pickle.loads(dump)
self.assertEqual(m.state, m2.state)
m2.run()
def test_pickle_model(self):
import sys
if sys.version_info < (3, 4):
import dill as pickle
else:
import pickle
self.stuff.to_B()
dump = pickle.dumps(self.stuff)
self.assertIsNotNone(dump)
model2 = pickle.loads(dump)
self.assertEqual(self.stuff.state, model2.state)
model2.to_F()
def test_queued(self):
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
def change_state(machine):
self.assertEqual(machine.state, 'A')
if machine.has_queue:
machine.run(machine=machine)
self.assertEqual(machine.state, 'A')
else:
with self.assertRaises(MachineError):
machine.run(machine=machine)
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B', 'before': change_state},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = Machine(states=states, transitions=transitions, initial='A')
m.walk(machine=m)
self.assertEqual(m.state, 'B')
m = Machine(states=states, transitions=transitions, initial='A', queued=True)
m.walk(machine=m)
self.assertEqual(m.state, 'C')
def test_queued_errors(self):
def before_change(machine):
if machine.has_queue:
machine.to_A(machine)
machine._queued = False
def after_change(machine):
machine.to_C(machine)
states = ['A', 'B', 'C']
transitions = [{'trigger': 'do', 'source': '*', 'dest': 'C',
'before': partial(self.stuff.this_raises, ValueError)}]
m = Machine(states=states, transitions=transitions, queued=True,
before_state_change=before_change, after_state_change=after_change)
with self.assertRaises(MachineError):
m.to_B(machine=m)
with self.assertRaises(ValueError):
m.do(machine=m)
def test_queued_remove(self):
m = self.machine_cls(model=None, states=['A', 'B', 'C'], initial='A', queued=True)
assert_equal = self.assertEqual
class BaseModel:
def on_enter_A(self):
pass
def on_enter_B(self):
pass
def on_enter_C(self):
pass
class SubModel(BaseModel):
def __init__(self):
self.inner = BaseModel()
def on_enter_A(self):
self.to_B()
self.inner.to_B()
def on_enter_B(self):
self.to_C()
self.inner.to_C()
# queue should contain to_B(), inner.to_B(), to_C(), inner.to_C()
assert_equal(4, len(m._transition_queue))
m.remove_model(self)
# since to_B() is currently executed it should still be in the list, to_C should be gone
assert_equal(3, len(m._transition_queue))
def on_enter_C(self):
raise RuntimeError("Event was not cancelled")
model = SubModel()
m.add_model([model, model.inner])
model.to_A()
# test whether models can be removed outside event queue
m.remove_model(model.inner)
self.assertTrue(model.inner.is_C())
def test___getattr___and_identify_callback(self):
m = self.machine_cls(Stuff(), states=['A', 'B', 'C'], initial='A')
m.add_transition('move', 'A', 'B')
m.add_transition('move', 'B', 'C')
callback = m.__getattr__('before_move')
self.assertTrue(callable(callback))
with self.assertRaises(AttributeError):
m.__getattr__('before_no_such_transition')
with self.assertRaises(AttributeError):
m.__getattr__('before_no_such_transition')
with self.assertRaises(AttributeError):
m.__getattr__('__no_such_method__')
with self.assertRaises(AttributeError):
m.__getattr__('')
type, target = m._identify_callback('on_exit_foobar')
self.assertEqual(type, 'on_exit')
self.assertEqual(target, 'foobar')
type, target = m._identify_callback('on_exitfoobar')
self.assertEqual(type, None)
self.assertEqual(target, None)
type, target = m._identify_callback('notacallback_foobar')
self.assertEqual(type, None)
self.assertEqual(target, None)
type, target = m._identify_callback('totallyinvalid')
self.assertEqual(type, None)
self.assertEqual(target, None)
type, target = m._identify_callback('before__foobar')
self.assertEqual(type, 'before')
self.assertEqual(target, '_foobar')
type, target = m._identify_callback('before__this__user__likes__underscores___')
self.assertEqual(type, 'before')
self.assertEqual(target, '_this__user__likes__underscores___')
type, target = m._identify_callback('before_stuff')
self.assertEqual(type, 'before')
self.assertEqual(target, 'stuff')
type, target = m._identify_callback('before_trailing_underscore_')
self.assertEqual(type, 'before')
self.assertEqual(target, 'trailing_underscore_')
type, target = m._identify_callback('before_')
self.assertIs(type, None)
self.assertIs(target, None)
type, target = m._identify_callback('__')
self.assertIs(type, None)
self.assertIs(target, None)
type, target = m._identify_callback('')
self.assertIs(type, None)
self.assertIs(target, None)
def test_state_and_transition_with_underscore(self):
m = Machine(Stuff(), states=['_A_', '_B_', '_C_'], initial='_A_')
m.add_transition('_move_', '_A_', '_B_', prepare='increase_level')
m.add_transition('_after_', '_B_', '_C_', prepare='increase_level')
m.add_transition('_on_exit_', '_C_', '_A_', prepare='increase_level', conditions='this_fails')
m.model._move_()
self.assertEqual(m.model.state, '_B_')
self.assertEqual(m.model.level, 2)
m.model._after_()
self.assertEqual(m.model.state, '_C_')
self.assertEqual(m.model.level, 3)
# State does not advance, but increase_level still runs
m.model._on_exit_()
self.assertEqual(m.model.state, '_C_')
self.assertEqual(m.model.level, 4)
def test_callback_identification(self):
m = Machine(Stuff(), states=['A', 'B', 'C', 'D', 'E', 'F'], initial='A')
m.add_transition('transition', 'A', 'B', before='increase_level')
m.add_transition('after', 'B', 'C', before='increase_level')
m.add_transition('on_exit_A', 'C', 'D', before='increase_level', conditions='this_fails')
m.add_transition('check', 'C', 'E', before='increase_level')
m.add_transition('prepare', 'E', 'F', before='increase_level')
m.add_transition('before', 'F', 'A', before='increase_level')
m.before_transition('increase_level')
m.before_after('increase_level')
m.before_on_exit_A('increase_level')
m.after_check('increase_level')
m.before_prepare('increase_level')
m.before_before('increase_level')
m.model.transition()
self.assertEqual(m.model.state, 'B')
self.assertEqual(m.model.level, 3)
m.model.after()
self.assertEqual(m.model.state, 'C')
self.assertEqual(m.model.level, 5)
m.model.on_exit_A()
self.assertEqual(m.model.state, 'C')
self.assertEqual(m.model.level, 5)
m.model.check()
self.assertEqual(m.model.state, 'E')
self.assertEqual(m.model.level, 7)
m.model.prepare()
self.assertEqual(m.model.state, 'F')
self.assertEqual(m.model.level, 9)
m.model.before()
self.assertEqual(m.model.state, 'A')
self.assertEqual(m.model.level, 11)
# An invalid transition shouldn't execute the callback
with self.assertRaises(MachineError):
m.model.on_exit_A()
def test_process_trigger(self):
m = Machine(states=['raw', 'processed'], initial='raw')
m.add_transition('process', 'raw', 'processed')
m.process()
self.assertEqual(m.state, 'processed')
def test_multiple_models(self):
s1, s2 = Stuff(), Stuff()
states = ['A', 'B', 'C']
m = Machine(model=[s1, s2], states=states,
initial=states[0])
self.assertEqual(len(m.models), 2)
self.assertEqual(len(m.model), 2)
m.add_transition('advance', 'A', 'B')
s1.advance()
self.assertEqual(s1.state, 'B')
self.assertEqual(s2.state, 'A')
m = Machine(model=s1, states=states,
initial=states[0])
# for backwards compatibility model should return a model instance
# rather than a list
self.assertNotIsInstance(m.model, list)
def test_dispatch(self):
s1, s2 = Stuff(), Stuff()
states = ['A', 'B', 'C']
m = Machine(model=s1, states=states, ignore_invalid_triggers=True,
initial=states[0], transitions=[['go', 'A', 'B'], ['go', 'B', 'C']])
m.add_model(s2, initial='B')
m.dispatch('go')
self.assertEqual(s1.state, 'B')
self.assertEqual(s2.state, 'C')
def test_remove_model(self):
m = self.machine_cls()
self.assertIn(m, m.models)
m.remove_model(m)
self.assertNotIn(m, m.models)
def test_string_trigger(self):
def return_value(value):
return value
class Model:
def trigger(self, value):
return value
self.stuff.machine.add_transition('do', '*', 'C')
self.stuff.trigger('do')
self.assertTrue(self.stuff.is_C())
self.stuff.machine.add_transition('maybe', 'C', 'A', conditions=return_value)
self.assertFalse(self.stuff.trigger('maybe', value=False))
self.assertTrue(self.stuff.trigger('maybe', value=True))
self.assertTrue(self.stuff.is_A())
with self.assertRaises(AttributeError):
self.stuff.trigger('not_available')
with self.assertRaises(MachineError):
self.stuff.trigger('maybe')
model = Model()
m = Machine(model=model)
self.assertEqual(model.trigger(5), 5)
self.stuff.machine.add_transition('do_raise_keyerror', '*', 'C',
before=partial(self.stuff.this_raises, KeyError))
with self.assertRaises(KeyError):
self.stuff.trigger('do_raise_keyerror')
self.stuff.machine.get_model_state(self.stuff).ignore_invalid_triggers = True
self.stuff.trigger('should_not_raise_anything')
self.stuff.trigger('to_A')
self.assertTrue(self.stuff.is_A())
self.stuff.machine.ignore_invalid_triggers = True
self.stuff.trigger('should_not_raise_anything')
def test_get_triggers(self):
states = ['A', 'B', 'C']
transitions = [['a2b', 'A', 'B'],
['a2c', 'A', 'C'],
['c2b', 'C', 'B']]
machine = Machine(states=states, transitions=transitions, initial='A', auto_transitions=False)
self.assertEqual(len(machine.get_triggers('A')), 2)
self.assertEqual(len(machine.get_triggers('B')), 0)
self.assertEqual(len(machine.get_triggers('C')), 1)
# self stuff machine should have to-transitions to every state
m = self.stuff.machine
self.assertEqual(len(m.get_triggers('B')), len(m.states))
trigger_name = m.get_triggers('B')
trigger_state = m.get_triggers(m.states['B'])
self.assertEqual(trigger_name, trigger_state)
def test_skip_override(self):
local_mock = MagicMock()
class Model(object):
def go(self):
local_mock()
model = Model()
transitions = [['go', 'A', 'B'], ['advance', 'A', 'B']]
m = self.stuff.machine_cls(model=model, states=['A', 'B'], transitions=transitions, initial='A')
model.go()
self.assertEqual(model.state, 'A')
self.assertTrue(local_mock.called)
model.advance()
self.assertEqual(model.state, 'B')
model.to_A()
model.trigger('go')
self.assertEqual(model.state, 'B')
@skipIf(sys.version_info < (3, ),
"String-checking disabled on PY-2 because is different")
def test_repr(self):
def a_condition(event_data):
self.assertRegex(
str(event_data.transition.conditions),
r"\[<Condition\(<function TestTransitions.test_repr.<locals>"
r".a_condition at [^>]+>\)@\d+>\]")
return True
# No transition has been assigned to EventData yet
def check_prepare_repr(event_data):
self.assertRegex(
str(event_data),
r"<EventData\('<State\('A'\)@\d+>', "
r"None\)@\d+>")
def check_before_repr(event_data):
self.assertRegex(
str(event_data),
r"<EventData\('<State\('A'\)@\d+>', "
r"<Transition\('A', 'B'\)@\d+>\)@\d+>")
m.checked = True
m = Machine(states=['A', 'B'],
prepare_event=check_prepare_repr,
before_state_change=check_before_repr, send_event=True,
initial='A')
m.add_transition('do_strcheck', 'A', 'B', conditions=a_condition)
self.assertTrue(m.do_strcheck())
self.assertIn('checked', vars(m))
def test_machine_prepare(self):
global_mock = MagicMock()
local_mock = MagicMock()
def global_callback():
global_mock()
def local_callback():
local_mock()
def always_fails():
return False
transitions = [
{'trigger': 'go', 'source': 'A', 'dest': 'B', 'conditions': always_fails, 'prepare': local_callback},
{'trigger': 'go', 'source': 'A', 'dest': 'B', 'conditions': always_fails, 'prepare': local_callback},
{'trigger': 'go', 'source': 'A', 'dest': 'B', 'conditions': always_fails, 'prepare': local_callback},
{'trigger': 'go', 'source': 'A', 'dest': 'B', 'conditions': always_fails, 'prepare': local_callback},
{'trigger': 'go', 'source': 'A', 'dest': 'B', 'prepare': local_callback},
]
m = Machine(states=['A', 'B'], transitions=transitions,
prepare_event=global_callback, initial='A')
m.go()
self.assertEqual(global_mock.call_count, 1)
self.assertEqual(local_mock.call_count, len(transitions))
def test_machine_finalize(self):
finalize_mock = MagicMock()
def always_fails(event_data):
return False
transitions = [
{'trigger': 'go', 'source': 'A', 'dest': 'B'},
{'trigger': 'planA', 'source': 'B', 'dest': 'A', 'conditions': always_fails},
{'trigger': 'planB', 'source': 'B', 'dest': 'A',
'conditions': partial(self.stuff.this_raises, RuntimeError)}
]
m = self.stuff.machine_cls(states=['A', 'B'], transitions=transitions,
finalize_event=finalize_mock, initial='A', send_event=True)
m.go()
self.assertEqual(finalize_mock.call_count, 1)
m.planA()
event_data = finalize_mock.call_args[0][0]
self.assertIsInstance(event_data, EventData)
self.assertEqual(finalize_mock.call_count, 2)
self.assertFalse(event_data.result)
with self.assertRaises(RuntimeError):
m.planB()
m.finalize_event.append(partial(self.stuff.this_raises, ValueError))
# ValueError in finalize should be suppressed
# but mock should have been called anyway
with self.assertRaises(RuntimeError):
m.planB()
self.assertEqual(4, finalize_mock.call_count)
def test_machine_finalize_exception(self):
def finalize_callback(event):
self.assertIsInstance(event.error, ZeroDivisionError)
m = self.stuff.machine_cls(states=['A', 'B'], send_event=True, initial='A',
before_state_change=partial(self.stuff.this_raises, ZeroDivisionError),
finalize_event=finalize_callback)
with self.assertRaises(ZeroDivisionError):
m.to_B()
def test_prep_ordered_arg(self):
self.assertTrue(len(_prep_ordered_arg(3, None)) == 3)
self.assertTrue(all(a is None for a in _prep_ordered_arg(3, None)))
with self.assertRaises(ValueError):
_prep_ordered_arg(3, [None, None])
def test_ordered_transition_callback(self):
class Model:
def __init__(self):
self.flag = False
def make_true(self):
self.flag = True
model = Model()
states = ['beginning', 'middle', 'end']
transits = [None, None, 'make_true']
m = Machine(model, states, initial='beginning')
m.add_ordered_transitions(before=transits)
model.next_state()
self.assertFalse(model.flag)
model.next_state()
model.next_state()
self.assertTrue(model.flag)
def test_ordered_transition_condition(self):
class Model:
def __init__(self):
self.blocker = False
def check_blocker(self):
return self.blocker
model = Model()
states = ['beginning', 'middle', 'end']
m = Machine(model, states, initial='beginning')
m.add_ordered_transitions(conditions=[None, None, 'check_blocker'])
model.to_end()
self.assertFalse(model.next_state())
model.blocker = True
self.assertTrue(model.next_state())
def test_get_transitions(self):
states = ['A', 'B', 'C', 'D']
m = self.machine_cls(states=states, initial='A', auto_transitions=False)
m.add_transition('go', ['A', 'B', 'C'], 'D')
m.add_transition('run', 'A', 'D')
self.assertEqual(
{(t.source, t.dest) for t in m.get_transitions('go')},
{('A', 'D'), ('B', 'D'), ('C', 'D')})
self.assertEqual(
[(t.source, t.dest)
for t in m.get_transitions(source='A', dest='D')],
[('A', 'D'), ('A', 'D')])
self.assertEqual(
sorted([(t.source, t.dest)
for t in m.get_transitions(dest='D')]),
[('A', 'D'), ('A', 'D'), ('B', 'D'), ('C', 'D')])
self.assertEqual(
[(t.source, t.dest)
for t in m.get_transitions(source=m.states['A'], dest=m.states['D'])],
[('A', 'D'), ('A', 'D')])
self.assertEqual(
sorted([(t.source, t.dest)
for t in m.get_transitions(dest=m.states['D'])]),
[('A', 'D'), ('A', 'D'), ('B', 'D'), ('C', 'D')])
def test_remove_transition(self):
self.stuff.machine.add_transition('go', ['A', 'B', 'C'], 'D')
self.stuff.machine.add_transition('walk', 'A', 'B')
self.stuff.go()
self.assertEqual(self.stuff.state, 'D')
self.stuff.to_A()
self.stuff.machine.remove_transition('go', source='A')
with self.assertRaises(MachineError):
self.stuff.go()
self.stuff.machine.add_transition('go', 'A', 'D')
self.stuff.walk()
self.stuff.go()
self.assertEqual(self.stuff.state, 'D')
self.stuff.to_C()
self.stuff.machine.remove_transition('go', dest='D')
self.assertFalse(hasattr(self.stuff, 'go'))
def test_reflexive_transition(self):
self.stuff.machine.add_transition('reflex', ['A', 'B'], '=', after='increase_level')
self.assertEqual(self.stuff.state, 'A')
self.stuff.reflex()
self.assertEqual(self.stuff.state, 'A')
self.assertEqual(self.stuff.level, 2)
self.stuff.to_B()
self.assertEqual(self.stuff.state, 'B')
self.stuff.reflex()
self.assertEqual(self.stuff.state, 'B')
self.assertEqual(self.stuff.level, 3)
self.stuff.to_C()
with self.assertRaises(MachineError):
self.stuff.reflex()
self.assertEqual(self.stuff.level, 3)
def test_internal_transition(self):
m = Machine(Stuff(), states=['A', 'B'], initial='A')
m.add_transition('move', 'A', None, prepare='increase_level')
m.model.move()
self.assertEqual(m.model.state, 'A')
self.assertEqual(m.model.level, 2)
def test_dynamic_model_state_attribute(self):
class Model:
def __init__(self):
self.status = None
self.state = 'some_value'
m = self.machine_cls(Model(), states=['A', 'B'], initial='A', model_attribute='status')
self.assertEqual(m.model.status, 'A')
self.assertEqual(m.model.state, 'some_value')
m.add_transition('move', 'A', 'B')
m.model.move()
self.assertEqual(m.model.status, 'B')
self.assertEqual(m.model.state, 'some_value')
def test_multiple_machines_per_model(self):
class Model:
def __init__(self):
self.car_state = None
self.driver_state = None
instance = Model()
machine_a = Machine(instance, states=['A', 'B'], initial='A', model_attribute='car_state')
machine_a.add_transition('accelerate_car', 'A', 'B')
machine_b = Machine(instance, states=['A', 'B'], initial='B', model_attribute='driver_state')
machine_b.add_transition('driving', 'B', 'A')
assert instance.car_state == 'A'
assert instance.driver_state == 'B'
assert instance.is_car_state_A()
assert instance.is_driver_state_B()
instance.accelerate_car()
assert instance.car_state == 'B'
assert instance.driver_state == 'B'
assert not instance.is_car_state_A()
assert instance.is_car_state_B()
instance.driving()
assert instance.driver_state == 'A'
assert instance.car_state == 'B'
assert instance.is_driver_state_A()
assert not instance.is_driver_state_B()
assert instance.to_driver_state_B()
assert instance.driver_state == 'B'
def test_initial_not_registered(self):
m1 = self.machine_cls(states=['A', 'B'], initial=self.machine_cls.state_cls('C'))
self.assertTrue(m1.is_C())
self.assertTrue('C' in m1.states)
def test_trigger_name_cannot_be_equal_to_model_attribute(self):
m = self.machine_cls(states=['A', 'B'])
with self.assertRaises(ValueError):
m.add_transition(m.model_attribute, "A", "B")
def test_new_state_in_enter_callback(self):
machine = self.machine_cls(states=['A', 'B'], initial='A')
def on_enter_B():
state = self.machine_cls.state_cls(name='C')
machine.add_state(state)
machine.to_C()
machine.on_enter_B(on_enter_B)
machine.to_B()
def test_on_exception_callback(self):
mock = MagicMock()
def on_exception(event_data):
self.assertIsInstance(event_data.error, (ValueError, MachineError))
mock()
m = self.machine_cls(states=['A', 'B'], initial='A', transitions=[['go', 'A', 'B']], send_event=True,
after_state_change=partial(self.stuff.this_raises, ValueError))
with self.assertRaises(ValueError):
m.to_B()
self.assertTrue(m.is_B())
with self.assertRaises(MachineError):
m.go()
m.on_exception.append(on_exception)
m.to_B()
m.go()
self.assertTrue(mock.called)
self.assertEqual(2, mock.call_count)
def test_may_transition(self):
states = ['A', 'B', 'C']
d = DummyModel()
m = Machine(model=d, states=states, initial='A', auto_transitions=False)
m.add_transition('walk', 'A', 'B')
m.add_transition('stop', 'B', 'C')
assert d.may_walk()
assert not d.may_stop()
d.walk()
assert not d.may_walk()
assert d.may_stop()
def test_may_transition_for_autogenerated_triggers(self):
states = ['A', 'B', 'C']
m = Machine(states=states, initial='A')
assert m.may_to_A()
m.to_A()
assert m.to_B()
m.to_B()
assert m.may_to_C()
m.to_C()
def test_may_transition_with_conditions(self):
states = ['A', 'B', 'C']
d = DummyModel()
m = Machine(model=d, states=states, initial='A', auto_transitions=False)
m.add_transition('walk', 'A', 'B', conditions=[lambda: False])
m.add_transition('stop', 'B', 'C')
m.add_transition('run', 'A', 'C')
assert not d.may_walk()
assert not d.may_stop()
assert d.may_run()
d.run()
assert not d.may_run()
def test_may_transition_with_auto_transitions(self):
states = ['A', 'B', 'C']
d = DummyModel()
self.machine_cls(model=d, states=states, initial='A')
assert d.may_to_A()
assert d.may_to_B()
assert d.may_to_C()
def test_machine_may_transitions(self):
states = ['A', 'B', 'C']
m = self.machine_cls(states=states, initial='A', auto_transitions=False)
m.add_transition('walk', 'A', 'B', conditions=[lambda: False])
m.add_transition('stop', 'B', 'C')
m.add_transition('run', 'A', 'C')
m.add_transition('reset', 'C', 'A')
assert not m.may_walk()
assert not m.may_stop()
assert m.may_run()
m.run()
assert not m.may_run()
assert not m.may_stop()
assert not m.may_walk()
def test_may_transition_with_invalid_state(self):
states = ['A', 'B', 'C']
d = DummyModel()
m = self.machine_cls(model=d, states=states, initial='A', auto_transitions=False)
m.add_transition('walk', 'A', 'UNKNOWN')
assert not d.may_walk()
| {
"content_hash": "2d542b1ee7d8e650eaed591cf6f1ecd6",
"timestamp": "",
"source": "github",
"line_count": 1285,
"max_line_length": 113,
"avg_line_length": 36.4443579766537,
"alnum_prop": 0.5554013367214025,
"repo_name": "tyarkoni/transitions",
"id": "0048fdb7f14476ce0851851db6aaa2337ab498bb",
"size": "46831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "167793"
}
],
"symlink_target": ""
} |
import os
import logging
import structlog
import json
LOG_DEFAULT_LEVEL = 'info'
def log_factory(handler, level, namespace):
""" Opinionated logger factory. """
logger = logging.getLogger(namespace)
logger.setLevel(level)
if not logger.handlers:
logger.addHandler(handler)
return structlog.wrap_logger(
logger,
processors=[
structlog.stdlib.filter_by_level,
structlog.stdlib.add_log_level,
structlog.stdlib.add_logger_name,
structlog.processors.TimeStamper(fmt='iso', utc=True, key='created_at'),
structlog.processors.JSONRenderer()
]
)
def tracer(namespace=__name__, level=None):
""" Configure and provide a structured logger. """
level = level or os.environ.get('LOG_LEVEL', LOG_DEFAULT_LEVEL)
return log_factory(logging.StreamHandler(), level.upper(), namespace)
| {
"content_hash": "4a58dd4d77a83a01bb1b9a1714c03cf6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 84,
"avg_line_length": 28.15625,
"alnum_prop": 0.658157602663707,
"repo_name": "axelbellec/BoTigo",
"id": "32cb195e312ca178a022e4f628a601b295312d74",
"size": "901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "botigo/tracing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7314"
}
],
"symlink_target": ""
} |
import argparse
from mock import patch
from ..orchestra import cluster
from .. import misc
from ..config import config
import pytest
class FakeRemote(object):
pass
def test_get_clients_simple():
ctx = argparse.Namespace()
remote = FakeRemote()
ctx.cluster = cluster.Cluster(
remotes=[
(remote, ['client.0', 'client.1'])
],
)
g = misc.get_clients(ctx=ctx, roles=['client.1'])
got = next(g)
assert len(got) == 2
assert got[0] == ('1')
assert got[1] is remote
with pytest.raises(StopIteration):
next(g)
def test_get_http_log_path():
# Fake configuration
archive_server = "http://example.com/server_root"
config.archive_server = archive_server
archive_dir = "/var/www/archives"
path = misc.get_http_log_path(archive_dir)
assert path == "http://example.com/server_root/archives/"
job_id = '12345'
path = misc.get_http_log_path(archive_dir, job_id)
assert path == "http://example.com/server_root/archives/12345/"
# Inktank configuration
archive_server = "http://qa-proxy.ceph.com/teuthology/"
config.archive_server = archive_server
archive_dir = "/var/lib/teuthworker/archive/teuthology-2013-09-12_11:49:50-ceph-deploy-master-testing-basic-vps"
job_id = 31087
path = misc.get_http_log_path(archive_dir, job_id)
assert path == "http://qa-proxy.ceph.com/teuthology/teuthology-2013-09-12_11:49:50-ceph-deploy-master-testing-basic-vps/31087/"
path = misc.get_http_log_path(archive_dir)
assert path == "http://qa-proxy.ceph.com/teuthology/teuthology-2013-09-12_11:49:50-ceph-deploy-master-testing-basic-vps/"
class TestHostnames(object):
def setup(self):
config._conf = dict()
def teardown(self):
config.load()
def test_canonicalize_hostname(self):
host_base = 'box1'
result = misc.canonicalize_hostname(host_base)
assert result == 'ubuntu@box1.front.sepia.ceph.com'
def test_decanonicalize_hostname(self):
host = 'ubuntu@box1.front.sepia.ceph.com'
result = misc.decanonicalize_hostname(host)
assert result == 'box1'
def test_canonicalize_hostname_nouser(self):
host_base = 'box1'
result = misc.canonicalize_hostname(host_base, user=None)
assert result == 'box1.front.sepia.ceph.com'
def test_decanonicalize_hostname_nouser(self):
host = 'box1.front.sepia.ceph.com'
result = misc.decanonicalize_hostname(host)
assert result == 'box1'
def test_canonicalize_hostname_otherlab(self):
config.lab_domain = 'example.com'
host_base = 'box1'
result = misc.canonicalize_hostname(host_base)
assert result == 'ubuntu@box1.example.com'
def test_decanonicalize_hostname_otherlab(self):
config.lab_domain = 'example.com'
host = 'ubuntu@box1.example.com'
result = misc.decanonicalize_hostname(host)
assert result == 'box1'
class TestMergeConfigs(object):
""" Tests merge_config and deep_merge in teuthology.misc """
@patch("os.path.exists")
@patch("yaml.safe_load")
@patch("__builtin__.file")
def test_merge_configs(self, m_file, m_safe_load, m_exists):
""" Only tests with one yaml file being passed, mainly just to test
the loop logic. The actual merge will be tested in subsequent
tests.
"""
expected = {"a": "b", "b": "c"}
m_exists.return_value = True
m_safe_load.return_value = expected
result = misc.merge_configs(["path/to/config1"])
assert result == expected
m_file.assert_called_once_with("path/to/config1")
def test_merge_configs_empty(self):
assert misc.merge_configs([]) == {}
def test_deep_merge(self):
a = {"a": "b"}
b = {"b": "c"}
result = misc.deep_merge(a, b)
assert result == {"a": "b", "b": "c"}
def test_overwrite_deep_merge(self):
a = {"a": "b"}
b = {"a": "overwritten", "b": "c"}
result = misc.deep_merge(a, b)
assert result == {"a": "overwritten", "b": "c"}
def test_list_deep_merge(self):
a = [1, 2]
b = [3, 4]
result = misc.deep_merge(a, b)
assert result == [1, 2, 3, 4]
def test_missing_list_deep_merge(self):
a = [1, 2]
b = "not a list"
with pytest.raises(AssertionError):
misc.deep_merge(a, b)
def test_missing_a_deep_merge(self):
result = misc.deep_merge(None, [1, 2])
assert result == [1, 2]
def test_missing_b_deep_merge(self):
result = misc.deep_merge([1, 2], None)
assert result == [1, 2]
def test_invalid_b_deep_merge(self):
with pytest.raises(AssertionError):
misc.deep_merge({"a": "b"}, "invalid")
class TestIsInDict(object):
def test_simple_membership(self):
assert misc.is_in_dict('a', 'foo', {'a':'foo', 'b':'bar'})
def test_dict_membership(self):
assert misc.is_in_dict(
'a', {'sub1':'key1', 'sub2':'key2'},
{'a':{'sub1':'key1', 'sub2':'key2', 'sub3':'key3'}}
)
def test_simple_nonmembership(self):
assert not misc.is_in_dict('a', 'foo', {'a':'bar', 'b':'foo'})
def test_nonmembership_with_presence_at_lower_level(self):
assert not misc.is_in_dict('a', 'foo', {'a':{'a': 'foo'}})
| {
"content_hash": "1be90c943a3a70e36deb64de398b2d0f",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 131,
"avg_line_length": 31.976331360946745,
"alnum_prop": 0.6034418948926721,
"repo_name": "michaelsevilla/teuthology",
"id": "c3da5dbc10163334b7f06f784c89427eed8908e1",
"size": "5404",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "teuthology/test/test_misc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "776119"
},
{
"name": "Shell",
"bytes": "9732"
}
],
"symlink_target": ""
} |
from itertools import count
from math import sqrt
from collections import Counter
puz_input = 33100000
factor_count = Counter()
def get_factors(n):
global factor_count
def check_count(factor, flist):
if factor_count[factor] > 50:
pass
else:
factor_count[factor] += 1
flist.add(factor)
root = sqrt(n)
i = 2
factors = set([1, n])
while i <= root:
if n % i == 0:
check_count(i, factors)
div = (n//i)
check_count(div, factors)
i += 1
return factors
for house in count(1,1):
x = sum([ i * 11 for i in get_factors(house)])
if x >= puz_input:
print(house)
break
| {
"content_hash": "abfa598c44a99ef14a3619077a8cd084",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 50,
"avg_line_length": 22.34375,
"alnum_prop": 0.5412587412587413,
"repo_name": "pwicks86/adventofcode2015",
"id": "96b4eaeab2bf3cdfc3ba994c9611f77b51f45475",
"size": "715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day20/p2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42595"
}
],
"symlink_target": ""
} |
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r'''
options:
url:
description:
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
type: str
force:
description:
- If C(yes) do not get a cached copy.
- Alias C(thirsty) has been deprecated and will be removed in 2.13.
type: bool
default: no
aliases: [ thirsty ]
http_agent:
description:
- Header to identify as, generally appears in web server logs.
type: str
default: ansible-httpget
use_proxy:
description:
- If C(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
type: bool
default: yes
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed certificates.
type: bool
default: yes
url_username:
description:
- The username for use in HTTP basic authentication.
- This parameter can be used without I(url_password) for sites that allow empty passwords
type: str
url_password:
description:
- The password for use in HTTP basic authentication.
- If the I(url_username) parameter is not specified, the I(url_password) parameter will not be used.
type: str
force_basic_auth:
description:
- Credentials specified with I(url_username) and I(url_password) should be passed in HTTP Header.
type: bool
default: no
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- This file can also include the key as well, and if the key is included, C(client_key) is not required.
type: path
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL client authentication.
- If C(client_cert) contains both the certificate and key, this option is not required.
type: path
'''
| {
"content_hash": "e2aed6df859f819aebc86f0b43876697",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 113,
"avg_line_length": 35.63793103448276,
"alnum_prop": 0.6840832123850992,
"repo_name": "thaim/ansible",
"id": "d6fdcc384d005c9fe7a7d451d37d544a389c4e4f",
"size": "2244",
"binary": false,
"copies": "28",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/plugins/doc_fragments/url.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
ret_len = 0
counter = dict()
left, right = 0, 0
while True:
while right < len(s):
counter[s[right]] = counter.get(s[right], 0) + 1
if counter[s[right]] > 1:
break
right += 1
ret_len = max(ret_len, right - left)
if right == len(s):
break
while counter[s[right]] > 1:
counter[s[left]] = counter[s[left]] - 1
left += 1
right += 1
return ret_len
if __name__ == '__main__':
print(Solution().lengthOfLongestSubstring("abcabcbb"))
print(Solution().lengthOfLongestSubstring("b"))
print(Solution().lengthOfLongestSubstring("pwwkew"))
| {
"content_hash": "e31978c0d15ff1ae8b892815d4b90d48",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 64,
"avg_line_length": 31.642857142857142,
"alnum_prop": 0.47404063205417607,
"repo_name": "knuu/competitive-programming",
"id": "47eed66f6a516304156d62344473507ee03c971b",
"size": "886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode/leet_003.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "156029"
},
{
"name": "C++",
"bytes": "609501"
},
{
"name": "Haskell",
"bytes": "208"
},
{
"name": "Java",
"bytes": "9111"
},
{
"name": "Nim",
"bytes": "208992"
},
{
"name": "OCaml",
"bytes": "221"
},
{
"name": "Python",
"bytes": "410086"
}
],
"symlink_target": ""
} |
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if not vars().has_key('ENABLE_WALLET'):
ENABLE_WALLET=0
if not vars().has_key('ENABLE_BITCOIND'):
ENABLE_BITCOIND=0
if not vars().has_key('ENABLE_UTILS'):
ENABLE_UTILS=0
if not vars().has_key('ENABLE_ZMQ'):
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
os.environ["BITCOIND"] = buildDir + '/src/bitcoind' + EXEEXT
os.environ["BITCOINCLI"] = buildDir + '/src/bitcoin-cli' + EXEEXT
#Disable Windows tests by default
if EXEEXT == ".exe" and "-win" not in opts:
print "Win tests currently disabled. Use -win option to enable"
sys.exit(0)
#Tests
testScripts = [
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'decodescript.py',
'p2p-fullblocktest.py',
'blockchain.py',
'disablewallet.py',
'sendheaders.py',
'keypool.py',
'prioritise_transaction.py',
]
testScriptsExt = [
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'pruning.py',
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'invalidblockrequest.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
'replace-by-fee.py',
]
#Enable ZMQ tests
if ENABLE_ZMQ == 1:
testScripts.append('zmq_test.py')
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
if(ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
else:
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| {
"content_hash": "ccf1138a93e26877262e62bfb873a712",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 91,
"avg_line_length": 29.908,
"alnum_prop": 0.5994382773839775,
"repo_name": "thetimpotter/bitcoin_1.0",
"id": "df71e44b6037d9d2df2d4b621c1ed82894a5cd21",
"size": "7687",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qa/pull-tester/rpc-tests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "647242"
},
{
"name": "C++",
"bytes": "4176241"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18445"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "140799"
},
{
"name": "Makefile",
"bytes": "95711"
},
{
"name": "Objective-C",
"bytes": "3552"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "593482"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Shell",
"bytes": "31469"
},
{
"name": "TypeScript",
"bytes": "3229995"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.forms.models import modelformset_factory, modelform_factory
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template import loader
from django_ajax.decorators import ajax
from corporate.auxiliary_scripts import update_resume_zips
from corporate.forms import AddContactForm, ContactFormSet
from corporate.models import CorporateTextField, CorporateResourceGuide
from corporate.models import CompanyContact, Company, JobField, CorporateEmail
from mig_main.utility import get_message_dict, Permissions
FORM_ERROR = 'Your submision contained errors, please correct and resubmit.'
def get_permissions(user):
permission_dict = {
'can_edit_corporate': Permissions.can_edit_corporate_page(user),
'can_add_contact': Permissions.can_add_corporate_contact(user),
'can_edit_contacts': Permissions.can_edit_corporate_page(user),
'can_add_company': Permissions.can_add_company(user),
}
return permission_dict
def get_common_context(request):
context_dict = get_message_dict(request)
contact_text = CorporateTextField.objects.filter(section='CT')
context_dict.update({
'request': request,
'contact_text': contact_text,
'main_nav': 'corporate',
})
return context_dict
def index(request):
request.session['current_page'] = request.path
template = loader.get_template('corporate/corporate.html')
involvement_text = CorporateTextField.objects.filter(section='OP')
context_dict = {
'involvement_text': involvement_text,
'subnav': 'index',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def resumes(request):
request.session['current_page'] = request.path
template = loader.get_template('corporate/resume_book.html')
context_dict = {
'by_major_zip': 'TBP_resumes_by_major.zip',
'by_year_zip': 'TBP_resumes_by_year.zip',
'subnav': 'resumes',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
return HttpResponse(template.render(context_dict, request))
def update_corporate_page(request):
if not Permissions.can_edit_corporate_page(request.user):
request.session['error_message'] = 'You are not authorized to edit the corporate page'
return redirect('corporate:index')
prefix = 'corporate_page'
CorporateTextForm = modelformset_factory(CorporateTextField,
extra=1, exclude=[])
formset = CorporateTextForm(request.POST or None,prefix=prefix)
if request.method == 'POST':
if formset.is_valid():
instances = formset.save()
request.session['success_message'] = 'Corporate page successfully updated.'
return redirect('corporate:index')
else:
request.session['error_message'] = FORM_ERROR
context_dict = {
'formset': formset,
'subnav': 'index',
'prefix': prefix,
'has_files': False,
'submit_name': 'Update Corporate Page',
'back_button': {'link': reverse('corporate:index'),
'text': 'To Corporate Page'},
'form_title': 'Edit Corporate Page Text',
'help_text': ('The text shown on the corporate main page. This text '
'uses markdown syntax.'),
'can_add_row': False,
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('generic_formset.html')
return HttpResponse(template.render(context_dict, request))
def update_resource_guide(request):
if not Permissions.can_edit_corporate_page(request.user):
request.session['error_message'] = 'You are not authorized to edit the corporate page'
return redirect('corporate:index')
ResourceGuideForm = modelform_factory(CorporateResourceGuide, exclude=('active',))
if request.method == 'POST':
form = ResourceGuideForm(request.POST, request.FILES)
if form.is_valid():
instance = form.save(commit=False)
previously_active_guides = CorporateResourceGuide.objects.filter(active=True)
for guide in previously_active_guides:
guide.active = False
guide.save()
instance.active = True
instance.save()
update_resume_zips()
request.session['success_message'] = 'Corporate resource guide successfully updated.'
return redirect('corporate:index')
else:
request.session['error_message'] = FORM_ERROR
else:
form = ResourceGuideForm()
context_dict = {
'form': form,
'subnav': 'index',
'has_files': True,
'submit_name': 'Update Corporate Resource Guide',
'back_button': {'link': reverse('corporate:index'),
'text': 'To Corporate Page'},
'form_title': 'Edit Corporate Resource Guide',
'help_text': ('This guide is inluded in the resume zip files. Update '
'it when the information (or the officer) changes.'),
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('generic_form.html')
return HttpResponse(template.render(context_dict, request))
def add_company_contact(request):
if not Permissions.can_add_corporate_contact(request.user):
request.session['error_message'] = 'You are not authorized to add company contacts'
return redirect('corporate:index')
prefix = 'corporate_page'
can_edit = Permissions.can_edit_corporate_page(request.user)
form = AddContactForm(request.POST or None,prefix=prefix,can_edit=can_edit)
if request.method == 'POST':
if form.is_valid():
if form.is_overdetermined():
request.session['warning_message'] = 'Name, email, phone, bio, and chapter are ignored when profile provided.'
instance = form.save()
request.session['success_message'] = 'Corporate contact successfully added.'
return redirect('corporate:index')
else:
request.session['error_message'] = FORM_ERROR
help_text = 'Add a contact to the company contacts database.'
if not can_edit:
help_text = help_text + (' Note: you are adding a suggested contact; '
'they will not be emailed unless approved by '
'the Corporate Relations Officer.')
context_dict = {
'form': form,
'subnav': 'index',
'prefix': prefix,
'has_files': False,
'submit_name': 'Add company contact',
'back_button': {'link': reverse('corporate:index'),
'text': 'To Corporate Page'},
'form_title': 'Add company contact',
'help_text': help_text,
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('generic_form.html')
return HttpResponse(template.render(context_dict, request))
def edit_company_contacts(request):
if not Permissions.can_edit_corporate_page(request.user):
request.session['error_message'] = 'You are not authorized to add company contacts'
return redirect('corporate:index')
prefix = 'corporate_page'
formset = ContactFormSet(request.POST or None,prefix=prefix,initial=CompanyContact.get_contacts())
if request.method == 'POST':
if formset.is_valid():
overdetermined = formset.save()
if overdetermined:
request.session['warning_message'] = 'Name, email, phone, bio, and chapter are ignored when profile provided.'
request.session['success_message'] = 'Corporate contact successfully added.'
return redirect('corporate:index')
else:
request.session['error_message'] = FORM_ERROR
context_dict = {
'formset': formset,
'subnav': 'index',
'prefix': prefix,
'has_files': False,
'submit_name': 'Update company contacts',
'back_button': {'link': reverse('corporate:index'),
'text': 'To Corporate Page'},
'form_title': 'Edit company contacts',
'help_text': ('Edit the list of company contacts. '
'Contact info is ignored if a profile is provided.'),
'can_add_row':True,
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('generic_formset.html')
return HttpResponse(template.render(context_dict, request))
def add_company(request):
if not Permissions.can_add_company(request.user):
request.session['error_message'] = 'You are not authorized to add companies'
return redirect('corporate:index')
prefix = 'corporate_page'
AddCompanyForm = modelform_factory(Company, exclude=[])
form = AddCompanyForm(request.POST or None,prefix=prefix)
if request.method == 'POST':
if form.is_valid():
instance = form.save()
request.session['success_message'] = 'Company successfully added.'
return redirect('corporate:index')
else:
request.session['error_message'] = FORM_ERROR
context_dict = {
'form': form,
'subnav': 'index',
'prefix': prefix,
'has_files': False,
'submit_name': 'Add company',
'back_button': {'link': reverse('corporate:index'),
'text': 'To Corporate Page'},
'form_title': 'Add company',
'help_text': ('Add company information. If the appropriate industry '
'is not present, you need to add that first'),
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('generic_form.html')
return HttpResponse(template.render(context_dict, request))
def add_jobfield(request):
if not Permissions.can_add_company(request.user):
request.session['error_message'] = 'You are not authorized to add industries'
return redirect('corporate:index')
prefix = 'corporate_page'
AddIndustryForm = modelform_factory(JobField, exclude=[])
form = AddIndustryForm(request.POST or None,prefix=prefix)
if request.method == 'POST':
if form.is_valid():
instance = form.save()
request.session['success_message'] = 'Industry successfully added.'
return redirect('corporate:index')
else:
request.session['error_message'] = FORM_ERROR
context_dict = {
'form': form,
'subnav': 'index',
'prefix': prefix,
'has_files': False,
'submit_name': 'Add industry',
'back_button': {'link': reverse('corporate:index'),
'text': 'To Corporate Page'},
'form_title': 'Add industry',
'help_text': ('Add industry information. Select all relevant majors.'),
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('generic_form.html')
return HttpResponse(template.render(context_dict, request))
def view_company_contacts(request):
if not Permissions.can_edit_corporate_page(request.user):
request.session['error_message'] = 'You are not authorized to view company contacts'
return redirect('corporate:index')
context_dict = {
'contacts': CompanyContact.get_contacts(),
'subnav': 'index',
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('corporate/contacts_table.html')
return HttpResponse(template.render(context_dict, request))
def view_and_send_email(request):
if not Permissions.can_edit_corporate_page(request.user):
request.session['error_message'] = 'You are not authorized to email companies'
return redirect('corporate:index')
existing_email = CorporateEmail.objects.filter(active=True)
if existing_email.exists():
existing_email = existing_email[0]
else:
request.session['error_message'] = 'No email specified'
return redirect('corporate:index')
contacts = CompanyContact.get_contacts(gets_email=True)
context_dict = {
'contacts': contacts,
'email':existing_email.preview_email(),
'mig_alum_email':existing_email.preview_email(mig_alum=True),
'other_alum_email':existing_email.preview_email(other_alum=True),
'previous_contact_email':existing_email.preview_email(previous_contact=True),
'personal_contact_email':existing_email.preview_email(personal_contact=True),
'subnav': 'index',
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('corporate/view_and_send_email.html')
return HttpResponse(template.render(context_dict, request))
@ajax
def send_corporate_email(request):
if not Permissions.can_edit_corporate_page(request.user):
request.session['error_message'] = 'You are not authorized to email companies'
return {'fragments':{'#ajax-message':r'''<div id="ajax-message" class="alert alert-danger">
<button type="button" class="close" data-dismiss="alert">×</button>
<strong>Error:</strong>%s</div>'''%(request.session.pop('error_message'))}}
existing_email = CorporateEmail.objects.filter(active=True)
if existing_email.exists():
existing_email[0].send_corporate_email()
request.session['success_message']='Companies successfully emailed'
return {'fragments':{'#ajax-message':r'''<div id="ajax-message" class="alert alert-success">
<button type="button" class="close" data-dismiss="alert">×</button>
<strong>Success:</strong>%s</div>'''%(request.session.pop('success_message'))}}
else:
request.session['error_message'] = 'Company email text does not exist'
return {'fragments':{'#ajax-message':r'''<div id="ajax-message" class="alert alert-danger">
<button type="button" class="close" data-dismiss="alert">×</button>
<strong>Error:</strong>%s</div>'''%(request.session.pop('error_message'))}}
def update_corporate_email(request):
if not Permissions.can_edit_corporate_page(request.user):
request.session['error_message'] = 'You are not authorized to email companies'
return redirect('corporate:index')
prefix = 'corporate_email'
existing_email = CorporateEmail.objects.filter(active=True)
UpdateEmailForm = modelform_factory(CorporateEmail, exclude=[])
if existing_email.exists():
form = UpdateEmailForm(request.POST or None,prefix=prefix,instance=existing_email[0])
else:
form = UpdateEmailForm(request.POST or None,prefix=prefix)
if request.method == 'POST':
if form.is_valid():
instance = form.save(commit=False)
instance.id=None
instance.pk=None
instance.save()
if existing_email.exists():
ex=existing_email[0]
ex.active=False
ex.save()
request.session['success_message'] = 'Company email successfully updated.'
return redirect('corporate:index')
else:
request.session['error_message'] = FORM_ERROR
context_dict = {
'form': form,
'subnav': 'index',
'prefix': prefix,
'has_files': False,
'submit_name': 'Update corporate email',
'back_button': {'link': reverse('corporate:index'),
'text': 'To Corporate Page'},
'form_title': 'Update corporate email',
'help_text': ('Update the email sent to companies to encourage their'
'participation in TBP corporate events.\n\nUse '
'{{company_name}} in the subject line as a placeholder'
'and {{extra_text}} in the body as a placeholder for the'
'extra text to members or personal contacts.'),
'base': 'corporate/base_corporate.html',
}
context_dict.update(get_common_context(request))
context_dict.update(get_permissions(request.user))
template = loader.get_template('generic_form.html')
return HttpResponse(template.render(context_dict, request))
| {
"content_hash": "06f13ec45256c255a90f281bd0254333",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 128,
"avg_line_length": 44.622739018087856,
"alnum_prop": 0.6424228386125427,
"repo_name": "tbpmig/mig-website",
"id": "a869d158baeed4559d320fae1876fffb354d3225",
"size": "17269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corporate/views.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8576"
},
{
"name": "HTML",
"bytes": "760931"
},
{
"name": "JavaScript",
"bytes": "64350"
},
{
"name": "Less",
"bytes": "2022"
},
{
"name": "Python",
"bytes": "1637977"
},
{
"name": "TeX",
"bytes": "5289"
}
],
"symlink_target": ""
} |
"""
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetik data.
"""
print __doc__
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD Style.
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print "======================="
print "Round %d %d" % (i, j)
print "n_features:", n_features
print "n_samples:", n_train
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print "- benching ElasticNet"
clf = ElasticNet(alpha=alpha, rho=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print "- benching SGD"
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print "- benching RidgeRegression"
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure(figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| {
"content_hash": "fb09dcc5698a385837c090700f331784",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 76,
"avg_line_length": 34.646153846153844,
"alnum_prop": 0.511101243339254,
"repo_name": "sgenoud/scikit-learn",
"id": "e07bd0add590b7fac6b6e594640542eb505629d5",
"size": "4504",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "benchmarks/bench_sgd_regression.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7396960"
},
{
"name": "C++",
"bytes": "408753"
},
{
"name": "JavaScript",
"bytes": "4736"
},
{
"name": "Objective-C",
"bytes": "4595"
},
{
"name": "Python",
"bytes": "3013862"
},
{
"name": "Shell",
"bytes": "687"
}
],
"symlink_target": ""
} |
from survey.error import ValidationError
from survey.models import SurveyResponse
from validate_email import validate_email
BOOLEAN = (True, False)
GENDERS = ("male", "female")
COLORS = (
"red",
"blue",
"green",
"brown",
"black",
"white",
"orange",
"pink",
"yellow",
"purple")
def _check_max_length(field_name, value):
"""Check whether the length of a field value exceeds the max size"""
# Figure out the max length from the DB schema
column = getattr(SurveyResponse, field_name)
max_length = getattr(column, "max_length", None)
if max_length is not None:
error_msg = "Value for field {0} exceeds max length of {1} characters"
if len(value) > max_length:
raise ValidationError(error_msg.format(field_name, max_length))
def _check_in_range(field_name, value, lower, upper):
"""Check that an integer value is within the specified range"""
error_msg = "Value for field {0} is outside the range {1}-{2}"
if (value < lower) or (value > upper):
raise ValidationError(error_msg.format(field_name, lower, upper))
def _check_in_enum(field_name, value, enum):
"""Checks whether an enum-style value is represented in the enum"""
error_msg = "Value for field {0} can only be one of these: {1}"
if value not in enum:
raise ValidationError(error_msg.format(field_name, str(enum)))
def _validate_name(name):
_check_max_length("name", name)
def _validate_email(email):
_check_max_length("email", email)
if not validate_email(email):
error_msg = "Value for field email is not a valid email address"
raise ValidationError(error_msg)
def _validate_age(age):
if isinstance(age, int):
_check_in_range("age", age, 3, 125)
def _validate_about_me(about_me):
_check_max_length("about_me", about_me)
def _validate_address(address):
_check_max_length("address", address)
def _validate_gender(gender):
if len(gender) > 0:
_check_in_enum("gender", gender, GENDERS)
def _validate_favorite_book(favorite_book):
_check_max_length("favorite_book", favorite_book)
def _validate_favorite_colors(favorite_colors):
if len(favorite_colors) > 0:
if "," in favorite_colors:
favorite_colors = favorite_colors.split(",")
if isinstance(favorite_colors, list) or isinstance(
favorite_colors, tuple):
for color in favorite_colors:
_check_in_enum("favorite_colors", color, COLORS)
else:
_check_in_enum("favorite_colors", favorite_colors, COLORS)
def _validate_finished(finished):
_check_in_enum("finished", finished, BOOLEAN)
def _validate_field(field_name, field_value):
"""Validates the normalized value for the specified field"""
func_name = "_validate_{}".format(field_name)
if func_name not in globals():
raise ValueError(
"Unknown field {0} can't be validated".format(field_name))
# Invoke the validation function
globals()[func_name](field_value)
def _validate_mandatory_fields(data, required_fields):
"""Ensure that all of the mandatory fields are in the input data"""
for field in required_fields:
if field not in data:
raise ValidationError("Missing mandatory field {}".format(field))
def validate(data, required_fields=None):
"""Validates each key-value pair
Args:
data (dict): Incoming data from request
required_fields (list): Mandatory fields
"""
if required_fields is not None:
_validate_mandatory_fields(data, required_fields)
for key, value in data.items():
_validate_field(key, value)
| {
"content_hash": "15b68cb3dc39ec928b3aa726839e131f",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 78,
"avg_line_length": 27.51851851851852,
"alnum_prop": 0.6506056527590848,
"repo_name": "jllivermont/hotjar-task",
"id": "1f93f7ae5f0cd3e41962af217b8e8b20815c24c2",
"size": "3715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "survey/validator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1656"
},
{
"name": "HTML",
"bytes": "12479"
},
{
"name": "JavaScript",
"bytes": "12668"
},
{
"name": "Python",
"bytes": "29486"
}
],
"symlink_target": ""
} |
import fixtures
import mock
import os
from testtools.matchers import HasLength
from ironic.common import disk_partitioner
from ironic.common import exception
from ironic.common import utils
from ironic.tests import base
class DiskPartitionerTestCase(base.TestCase):
def setUp(self):
super(DiskPartitionerTestCase, self).setUp()
def noop(*args, **kwargs):
pass
self.useFixture(fixtures.MonkeyPatch('eventlet.greenthread.sleep',
noop))
def test_add_partition(self):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
dp.add_partition(1024)
dp.add_partition(512, fs_type='linux-swap')
dp.add_partition(2048, bootable=True)
expected = [(1, {'bootable': False,
'fs_type': '',
'type': 'primary',
'size': 1024}),
(2, {'bootable': False,
'fs_type': 'linux-swap',
'type': 'primary',
'size': 512}),
(3, {'bootable': True,
'fs_type': '',
'type': 'primary',
'size': 2048})]
partitions = [(n, p) for n, p in dp.get_partitions()]
self.assertThat(partitions, HasLength(3))
self.assertEqual(expected, partitions)
@mock.patch.object(disk_partitioner.DiskPartitioner, '_exec')
@mock.patch.object(utils, 'execute')
def test_commit(self, mock_utils_exc, mock_disk_partitioner_exec):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
fake_parts = [(1, {'bootable': False,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1}),
(2, {'bootable': True,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1})]
with mock.patch.object(dp, 'get_partitions') as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.return_value = (None, None)
dp.commit()
mock_disk_partitioner_exec.assert_called_once_with('mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
mock_utils_exc.assert_called_once_with('fuser', '/dev/fake',
run_as_root=True, check_exit_code=[0, 1])
@mock.patch.object(disk_partitioner.DiskPartitioner, '_exec')
@mock.patch.object(utils, 'execute')
def test_commit_with_device_is_busy_once(self, mock_utils_exc,
mock_disk_partitioner_exec):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
fake_parts = [(1, {'bootable': False,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1}),
(2, {'bootable': True,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1})]
fuser_outputs = [("/dev/fake: 10000 10001", None), (None, None)]
with mock.patch.object(dp, 'get_partitions') as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.side_effect = fuser_outputs
dp.commit()
mock_disk_partitioner_exec.assert_called_once_with('mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
mock_utils_exc.assert_called_with('fuser', '/dev/fake',
run_as_root=True, check_exit_code=[0, 1])
self.assertEqual(2, mock_utils_exc.call_count)
@mock.patch.object(disk_partitioner.DiskPartitioner, '_exec')
@mock.patch.object(utils, 'execute')
def test_commit_with_device_is_always_busy(self, mock_utils_exc,
mock_disk_partitioner_exec):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
fake_parts = [(1, {'bootable': False,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1}),
(2, {'bootable': True,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1})]
with mock.patch.object(dp, 'get_partitions') as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.return_value = ("/dev/fake: 10000 10001", None)
self.assertRaises(exception.InstanceDeployFailure, dp.commit)
mock_disk_partitioner_exec.assert_called_once_with('mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
mock_utils_exc.assert_called_with('fuser', '/dev/fake',
run_as_root=True, check_exit_code=[0, 1])
self.assertEqual(20, mock_utils_exc.call_count)
@mock.patch.object(disk_partitioner.DiskPartitioner, '_exec')
@mock.patch.object(utils, 'execute')
def test_commit_with_device_disconnected(self, mock_utils_exc,
mock_disk_partitioner_exec):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
fake_parts = [(1, {'bootable': False,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1}),
(2, {'bootable': True,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1})]
with mock.patch.object(dp, 'get_partitions') as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.return_value = (None, "Specified filename /dev/fake"
" does not exist.")
self.assertRaises(exception.InstanceDeployFailure, dp.commit)
mock_disk_partitioner_exec.assert_called_once_with('mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
mock_utils_exc.assert_called_with('fuser', '/dev/fake',
run_as_root=True, check_exit_code=[0, 1])
self.assertEqual(20, mock_utils_exc.call_count)
@mock.patch.object(utils, 'execute')
class ListPartitionsTestCase(base.TestCase):
@mock.patch.object(os.environ, 'copy', return_value={})
def test_correct(self, env_mock, execute_mock):
output = """
BYT;
/dev/sda:500107862016B:scsi:512:4096:msdos:ATA HGST HTS725050A7:;
1:1.00MiB:501MiB:500MiB:ext4::boot;
2:501MiB:476940MiB:476439MiB:::;
"""
expected = [
{'start': 1, 'end': 501, 'size': 500,
'filesystem': 'ext4', 'flags': 'boot'},
{'start': 501, 'end': 476940, 'size': 476439,
'filesystem': '', 'flags': ''},
]
execute_mock.return_value = (output, '')
env = {'LC_ALL': 'C'}
result = disk_partitioner.list_partitions('/dev/fake')
self.assertEqual(expected, result)
execute_mock.assert_called_once_with(
'parted', '-s', '-m', '/dev/fake', 'unit', 'MiB', 'print',
env_variables=env)
@mock.patch.object(disk_partitioner.LOG, 'warn')
def test_incorrect(self, log_mock, execute_mock):
output = """
BYT;
/dev/sda:500107862016B:scsi:512:4096:msdos:ATA HGST HTS725050A7:;
1:XX1076MiB:---:524MiB:ext4::boot;
"""
execute_mock.return_value = (output, '')
self.assertEqual([], disk_partitioner.list_partitions('/dev/fake'))
self.assertEqual(1, log_mock.call_count)
| {
"content_hash": "77821b993b80f361ce92d2ff76cc3122",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 79,
"avg_line_length": 43.47849462365591,
"alnum_prop": 0.5119327315444541,
"repo_name": "faizan-barmawer/openstack_ironic",
"id": "494510f0310cfc3bb8469bd8473b9a9af2894c41",
"size": "8716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/tests/test_disk_partitioner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2168035"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Photo.height'
db.alter_column('vkontakte_photos_photo', 'height', self.gf('django.db.models.fields.PositiveIntegerField')(null=True))
# Changing field 'Photo.width'
db.alter_column('vkontakte_photos_photo', 'width', self.gf('django.db.models.fields.PositiveIntegerField')(null=True))
def backwards(self, orm):
# Changing field 'Photo.height'
db.alter_column('vkontakte_photos_photo', 'height', self.gf('django.db.models.fields.PositiveIntegerField')(default=0))
# Changing field 'Photo.width'
db.alter_column('vkontakte_photos_photo', 'width', self.gf('django.db.models.fields.PositiveIntegerField')(default=0))
models = {
'vkontakte_groups.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '800'}),
'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['vkontakte_users.User']", 'symmetrical': 'False'})
},
'vkontakte_photos.album': {
'Meta': {'ordering': "['remote_id']", 'object_name': 'Album'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photo_albums'", 'null': 'True', 'to': "orm['vkontakte_groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photo_albums'", 'null': 'True', 'to': "orm['vkontakte_users.User']"}),
'privacy': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}),
'size': ('django.db.models.fields.PositiveIntegerField', [], {}),
'thumb_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'thumb_src': ('django.db.models.fields.CharField', [], {'max_length': "'200'"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': "'200'"}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
'vkontakte_photos.photo': {
'Meta': {'ordering': "['remote_id']", 'object_name': 'Photo'},
'album': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photos'", 'to': "orm['vkontakte_photos.Album']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photos'", 'null': 'True', 'to': "orm['vkontakte_groups.Group']"}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photos'", 'null': 'True', 'to': "orm['vkontakte_users.User']"}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}),
'src': ('django.db.models.fields.CharField', [], {'max_length': "'200'"}),
'src_big': ('django.db.models.fields.CharField', [], {'max_length': "'200'"}),
'src_small': ('django.db.models.fields.CharField', [], {'max_length': "'200'"}),
'src_xbig': ('django.db.models.fields.CharField', [], {'max_length': "'200'"}),
'text': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photos_author'", 'to': "orm['vkontakte_users.User']"}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'})
},
'vkontakte_places.city': {
'Meta': {'ordering': "['name']", 'object_name': 'City'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cities'", 'null': 'True', 'to': "orm['vkontakte_places.Country']"}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'})
},
'vkontakte_places.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'})
},
'vkontakte_users.user': {
'Meta': {'ordering': "['remote_id']", 'object_name': 'User'},
'activity': ('django.db.models.fields.TextField', [], {}),
'albums': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'audios': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'bdate': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_places.City']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'counters_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_places.Country']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'faculty': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'faculty_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'followers': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'graduation': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'has_mobile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'home_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'mutual_friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'notes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'rate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'relation': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sex': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'subscriptions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'sum_counters': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'timezone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'university': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'university_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'user_photos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'user_videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'wall_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
}
}
complete_apps = ['vkontakte_photos'] | {
"content_hash": "1f2c51fc42b20013a792f819e31dbbc5",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 165,
"avg_line_length": 78.79411764705883,
"alnum_prop": 0.5617767823814857,
"repo_name": "ramusus/django-vkontakte-photos",
"id": "19c1baa66532ac94ccd15a145025afe34da235a7",
"size": "10740",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vkontakte_photos/migrations/0002_auto__chg_field_photo_height__chg_field_photo_width.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "296252"
}
],
"symlink_target": ""
} |
import wx
from cairis.core.armid import *
import WidgetFactory
from EnvironmentPropertiesPanel import EnvironmentPropertiesPanel
from ValueTensionsGrid import ValueTensionsGrid
__author__ = 'Shamal Faily'
class SummaryPage(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent,GOAL_PANELSUMMARY_ID)
topSizer = wx.BoxSizer(wx.VERTICAL)
topSizer.Add(WidgetFactory.buildTextSizer(self,'Short Code',(87,30),ENVIRONMENT_TEXTSHORTCODE_ID,'Code which prefixes requirements which are specific to this environment'),0,wx.EXPAND)
topSizer.Add(WidgetFactory.buildMLTextSizer(self,'Description',(87,30),ENVIRONMENT_TEXTDESCRIPTION_ID),1,wx.EXPAND)
self.SetSizer(topSizer)
class CompositePage(wx.Panel):
def __init__(self,parent,dp):
wx.Panel.__init__(self,parent)
topSizer = wx.BoxSizer(wx.VERTICAL)
cBox = wx.StaticBox(self,-1)
cBoxSizer = wx.StaticBoxSizer(cBox,wx.HORIZONTAL)
topSizer.Add(cBoxSizer,1,wx.EXPAND)
self.compositeCtrl = EnvironmentPropertiesPanel(self,dp)
cBoxSizer.Add(self.compositeCtrl,1,wx.EXPAND)
self.SetSizer(topSizer)
class TensionsPage(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent)
self.thePrevRow = -1
self.thePrevCol = -1
topSizer = wx.BoxSizer(wx.VERTICAL)
cBox = wx.StaticBox(self,-1)
cBoxSizer = wx.StaticBoxSizer(cBox,wx.HORIZONTAL)
topSizer.Add(cBoxSizer,1,wx.EXPAND)
self.tensionsCtrl = ValueTensionsGrid(self)
self.tensionsCtrl.Bind(wx.grid.EVT_GRID_SELECT_CELL,self.onSelectRationale)
cBoxSizer.Add(self.tensionsCtrl,1,wx.EXPAND)
rBox = wx.StaticBox(self,-1,'Rationale')
rBoxSizer = wx.StaticBoxSizer(rBox,wx.VERTICAL)
topSizer.Add(rBoxSizer,1,wx.EXPAND)
self.rationaleCtrl = wx.TextCtrl(self,ENVIRONMENT_TEXTTENSIONRATIONALE_ID,"",size=(200,100),style=wx.TE_MULTILINE)
rBoxSizer.Add(self.rationaleCtrl,0,wx.EXPAND)
self.tensionsCtrl.setRationaleCtrl(self.rationaleCtrl)
self.SetSizer(topSizer)
def onSelectRationale(self,evt):
if (self.thePrevRow != -1 or self.thePrevCol != -1):
lastRationale = self.rationaleCtrl.GetValue()
self.tensionsCtrl.setRationale(self.thePrevRow,self.thePrevCol,lastRationale)
currentRow = evt.GetRow()
currentCol = evt.GetCol()
tRat = self.tensionsCtrl.rationale(currentRow,currentCol)
self.rationaleCtrl.SetValue(tRat)
self.thePrevRow = currentRow
self.thePrevCol = currentCol
evt.Skip()
class EnvironmentNotebook(wx.Notebook):
def __init__(self,parent,dp):
wx.Notebook.__init__(self,parent,ENVIRONMENT_NOTEBOOKENVIRONMENT_ID)
p1 = SummaryPage(self)
p2 = CompositePage(self,dp)
p3 = TensionsPage(self)
self.AddPage(p1,'Summary')
self.AddPage(p2,'Composite')
self.AddPage(p3,'Tensions')
| {
"content_hash": "d45f02ad7bee1cd2bac055d10e8f4b9f",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 188,
"avg_line_length": 39.857142857142854,
"alnum_prop": 0.7351254480286739,
"repo_name": "nathanbjenx/cairis",
"id": "6b14704517b36f0560e56e73770d9478f8598aeb",
"size": "3589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cairis/gui/EnvironmentNotebook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588306"
},
{
"name": "Dockerfile",
"bytes": "829"
},
{
"name": "Gherkin",
"bytes": "1615"
},
{
"name": "HTML",
"bytes": "1664076"
},
{
"name": "JavaScript",
"bytes": "416319"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "PLpgSQL",
"bytes": "1494775"
},
{
"name": "Python",
"bytes": "4006311"
},
{
"name": "Shell",
"bytes": "7035"
}
],
"symlink_target": ""
} |
"""
Check formatting for Swift.Array<T> that are bridged from ObjC
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftBridgedArray(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@swiftTest
@expectedFailureAll(bugnumber="<rdar://problem/32024572>")
def test_swift_bridged_array(self):
"""Check formatting for Swift.Array<T> that are bridged from ObjC"""
self.build()
self.do_test()
def setUp(self):
TestBase.setUp(self)
self.main_source = "main.swift"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
def do_test(self):
"""Check formatting for Swift.Array<T> that are bridged from ObjC"""
exe_name = "a.out"
exe = self.getBuildArtifact(exe_name)
# Create the target
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set the breakpoints
breakpoint = target.BreakpointCreateBySourceRegex(
'break here', self.main_source_spec)
self.assertTrue(breakpoint.GetNumLocations() > 0, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(None, None, os.getcwd())
self.assertTrue(process, PROCESS_IS_VALID)
# Frame #0 should be at our breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, breakpoint)
self.assertTrue(len(threads) == 1)
self.thread = threads[0]
self.expect(
"frame variable -d run -- swarr",
substrs=['Int(123456)', 'Int32(234567)', 'UInt16(45678)', 'Double(1.250000)', 'Float(2.500000)'])
self.expect(
"expression -d run -- swarr",
substrs=['Int(123456)', 'Int32(234567)', 'UInt16(45678)', 'Double(1.250000)', 'Float(2.500000)'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
| {
"content_hash": "462d1cd842d1abdf94d845c5882dfdd7",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 109,
"avg_line_length": 32.515151515151516,
"alnum_prop": 0.6397949673811743,
"repo_name": "apple/swift-lldb",
"id": "15d35c2a863b683ef962f4894574d2c584f9ab36",
"size": "2602",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "packages/Python/lldbsuite/test/lang/swift/variables/bridged_array/TestSwiftBridgedArray.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "130449"
},
{
"name": "C",
"bytes": "198536"
},
{
"name": "C++",
"bytes": "27687071"
},
{
"name": "CMake",
"bytes": "172176"
},
{
"name": "DTrace",
"bytes": "334"
},
{
"name": "LLVM",
"bytes": "6106"
},
{
"name": "Makefile",
"bytes": "106804"
},
{
"name": "Objective-C",
"bytes": "106821"
},
{
"name": "Objective-C++",
"bytes": "25658"
},
{
"name": "Perl",
"bytes": "72175"
},
{
"name": "Python",
"bytes": "4680483"
},
{
"name": "Shell",
"bytes": "6573"
},
{
"name": "Swift",
"bytes": "260786"
},
{
"name": "Vim script",
"bytes": "8434"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0038_auto_20180815_0108'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='auth0_user_id',
field=models.CharField(blank=True, default=b'', max_length=1024),
),
]
| {
"content_hash": "53cdb9552ab23ed4e221d072c1ea5a58",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 77,
"avg_line_length": 22.944444444444443,
"alnum_prop": 0.6004842615012107,
"repo_name": "akatsoulas/mozillians",
"id": "f446b302a04e2ed79e7cfb5882a57272e28815bf",
"size": "487",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mozillians/users/migrations/0039_userprofile_auth0_user_id.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "181649"
},
{
"name": "HTML",
"bytes": "176608"
},
{
"name": "JavaScript",
"bytes": "141945"
},
{
"name": "Makefile",
"bytes": "478"
},
{
"name": "Python",
"bytes": "1240350"
},
{
"name": "Shell",
"bytes": "2336"
}
],
"symlink_target": ""
} |
from django.db.models import Count
from committees.models import Committee, CommitteeMeeting
from plenum import create_protocol_parts
import logging
def Parse(reparse, logger, meeting_pks=None):
logger.debug('Parse (reparse=%s, meeting_pks=%s)'%(reparse, meeting_pks))
if meeting_pks is not None:
meetings = CommitteeMeeting.objects.filter(pk__in=meeting_pks)
else:
plenum=Committee.objects.filter(type='plenum')[0]
meetings=CommitteeMeeting.objects.filter(committee=plenum).exclude(protocol_text='')
if not reparse:
meetings=meetings.annotate(Count('parts')).filter(parts__count=0)
(mks,mk_names)=create_protocol_parts.get_all_mk_names()
logger.debug('got mk names: %s, %s'%(mks, mk_names))
for meeting in meetings:
logger.debug('creating protocol parts for meeting %s'%(meeting,))
meeting.create_protocol_parts(delete_existing=reparse,mks=mks,mk_names=mk_names)
def parse_for_existing_meeting(meeting):
logger = logging.getLogger('open-knesset')
Parse(True, logger, [meeting.pk])
| {
"content_hash": "3f13aa4cb1cb9380057c835982d3fef8",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 92,
"avg_line_length": 46.43478260869565,
"alnum_prop": 0.7172284644194756,
"repo_name": "navotsil/Open-Knesset",
"id": "5b950ea46a1edbc35c410fd69cd2290b546602d3",
"size": "1086",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plenum/management/commands/parse_plenum_protocols_subcommands/parse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "346228"
},
{
"name": "HTML",
"bytes": "690043"
},
{
"name": "JavaScript",
"bytes": "214741"
},
{
"name": "Python",
"bytes": "4051776"
},
{
"name": "Shell",
"bytes": "203"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import json
import os
import sys
def merge_shard_results(summary_json, jsons_to_merge):
"""Reads JSON test output from all shards and combines them into one.
Returns dict with merged test output on success or None on failure. Emits
annotations.
"""
try:
with open(summary_json) as f:
summary = json.load(f)
except (IOError, ValueError):
raise Exception('Summary json cannot be loaded.')
# Merge all JSON files together. Keep track of missing shards.
merged = {
'all_tests': set(),
'disabled_tests': set(),
'global_tags': set(),
'missing_shards': [],
'per_iteration_data': [],
'swarming_summary': summary,
'links': set()
}
for index, result in enumerate(summary['shards']):
if result is None:
merged['missing_shards'].append(index)
continue
# Author note: this code path doesn't trigger convert_to_old_format() in
# client/swarming.py, which means the state enum is saved in its string
# name form, not in the number form.
state = result.get('state')
if state == u'BOT_DIED':
print(
'Shard #%d had a Swarming internal failure' % index, file=sys.stderr)
elif state == u'EXPIRED':
print('There wasn\'t enough capacity to run your test', file=sys.stderr)
elif state == u'TIMED_OUT':
print('Test runtime exceeded allocated time'
'Either it ran for too long (hard timeout) or it didn\'t produce '
'I/O for an extended period of time (I/O timeout)',
file=sys.stderr)
elif state != u'COMPLETED':
print('Invalid Swarming task state: %s' % state, file=sys.stderr)
json_data, err_msg = load_shard_json(index, result.get('task_id'),
jsons_to_merge)
if json_data:
# Set-like fields.
for key in ('all_tests', 'disabled_tests', 'global_tags', 'links'):
merged[key].update(json_data.get(key), [])
# 'per_iteration_data' is a list of dicts. Dicts should be merged
# together, not the 'per_iteration_data' list itself.
merged['per_iteration_data'] = merge_list_of_dicts(
merged['per_iteration_data'], json_data.get('per_iteration_data', []))
else:
merged['missing_shards'].append(index)
print('No result was found: %s' % err_msg, file=sys.stderr)
# If some shards are missing, make it known. Continue parsing anyway. Step
# should be red anyway, since swarming.py return non-zero exit code in that
# case.
if merged['missing_shards']:
as_str = ', '.join([str(shard) for shard in merged['missing_shards']])
print('some shards did not complete: %s' % as_str, file=sys.stderr)
# Not all tests run, combined JSON summary can not be trusted.
merged['global_tags'].add('UNRELIABLE_RESULTS')
# Convert to jsonish dict.
for key in ('all_tests', 'disabled_tests', 'global_tags', 'links'):
merged[key] = sorted(merged[key])
return merged
OUTPUT_JSON_SIZE_LIMIT = 100 * 1024 * 1024 # 100 MB
def load_shard_json(index, task_id, jsons_to_merge):
"""Reads JSON output of the specified shard.
Args:
output_dir: The directory in which to look for the JSON output to load.
index: The index of the shard to load data for, this is for old api.
task_id: The directory of the shard to load data for, this is for new api.
Returns: A tuple containing:
* The contents of path, deserialized into a python object.
* An error string.
(exactly one of the tuple elements will be non-None).
"""
matching_json_files = [
j for j in jsons_to_merge
if (os.path.basename(j) == 'output.json' and
(os.path.basename(os.path.dirname(j)) == str(index) or
os.path.basename(os.path.dirname(j)) == task_id))]
if not matching_json_files:
print('shard %s test output missing' % index, file=sys.stderr)
return (None, 'shard %s test output was missing' % index)
elif len(matching_json_files) > 1:
print('duplicate test output for shard %s' % index, file=sys.stderr)
return (None, 'shard %s test output was duplicated' % index)
path = matching_json_files[0]
try:
filesize = os.stat(path).st_size
if filesize > OUTPUT_JSON_SIZE_LIMIT:
print(
'output.json is %d bytes. Max size is %d' % (filesize,
OUTPUT_JSON_SIZE_LIMIT),
file=sys.stderr)
return (None, 'shard %s test output exceeded the size limit' % index)
with open(path) as f:
return (json.load(f), None)
except (IOError, ValueError, OSError) as e:
print('Missing or invalid gtest JSON file: %s' % path, file=sys.stderr)
print('%s: %s' % (type(e).__name__, e), file=sys.stderr)
return (None, 'shard %s test output was missing or invalid' % index)
def merge_list_of_dicts(left, right):
"""Merges dicts left[0] with right[0], left[1] with right[1], etc."""
output = []
for i in xrange(max(len(left), len(right))):
left_dict = left[i] if i < len(left) else {}
right_dict = right[i] if i < len(right) else {}
merged_dict = left_dict.copy()
merged_dict.update(right_dict)
output.append(merged_dict)
return output
def standard_gtest_merge(
output_json, summary_json, jsons_to_merge):
output = merge_shard_results(summary_json, jsons_to_merge)
with open(output_json, 'wb') as f:
json.dump(output, f)
return 0
def main(raw_args):
parser = argparse.ArgumentParser()
parser.add_argument('--summary-json')
parser.add_argument('-o', '--output-json', required=True)
parser.add_argument('jsons_to_merge', nargs='*')
args = parser.parse_args(raw_args)
return standard_gtest_merge(
args.output_json, args.summary_json, args.jsons_to_merge)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "4805a43ddd132b425acff15443d22d75",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 80,
"avg_line_length": 35.053892215568865,
"alnum_prop": 0.641270925862658,
"repo_name": "youtube/cobalt",
"id": "58a29366c4f2814791a5867716d4e424061610f4",
"size": "6042",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/android/pylib/results/presentation/standard_gtest_merge.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import apihelper
import time
from mock import patch
import unittest
import os
import sys
sys.path.append("..")
if __name__ == "__main__":
sys.path.append("/opt/xos")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
def side_effect_bad_password(*args, **kwargs):
raise Exception()
class MockObject:
def __init__(self, **kwargs):
for (k, v) in kwargs.items():
setattr(self, k, v)
class TestCachedAuthenticator(unittest.TestCase):
@patch("apihelper.User.objects")
@patch("apihelper.django_authenticate")
def test_authenticate_notcached(self, mock_django_authenticate, mock_user_filter):
the_user = MockObject(
id=123,
email="testuser@test.com",
username="testuser@test.com",
password="foobar",
)
mock_django_authenticate.return_value = the_user
mock_user_filter.return_value = [the_user]
ca = apihelper.CachedAuthenticator()
result = ca.authenticate("testuser@test.com", "foobar")
self.assertTrue(result)
mock_django_authenticate.assert_called()
@patch("apihelper.User.objects")
@patch("apihelper.django_authenticate")
def test_authenticate_notcached_badpassword(
self, mock_django_authenticate, mock_user_filter
):
the_user = MockObject(
id=123,
email="testuser@test.com",
username="testuser@test.com",
password="foobar",
)
mock_django_authenticate.side_effect = side_effect_bad_password
mock_user_filter.return_value = [the_user]
ca = apihelper.CachedAuthenticator()
with self.assertRaises(Exception):
ca.authenticate("testuser@test.com", "not_foobar")
mock_django_authenticate.assert_called()
@patch("apihelper.User.objects")
@patch("apihelper.django_authenticate")
def test_authenticate_cached(self, mock_django_authenticate, mock_user_filter):
the_user = MockObject(
id=123,
email="testuser@test.com",
username="testuser@test.com",
password="foobar",
)
mock_django_authenticate.return_value = the_user
mock_user_filter.return_value = [the_user]
ca = apihelper.CachedAuthenticator()
key = "%s:%s" % (the_user.username, the_user.password)
ca.cached_creds[key] = {"timeout": time.time() + 10, "user_id": the_user.id}
result = ca.authenticate("testuser@test.com", "foobar")
self.assertTrue(result)
mock_django_authenticate.assert_not_called()
def test_trim(self):
user_one = MockObject(
id=123,
email="testuser@test.com",
username="testuser@test.com",
password="foobar",
)
user_two = MockObject(
id=124,
email="testuser4@test.com",
username="testuser@test.com",
password="foobar4",
)
ca = apihelper.CachedAuthenticator()
key_one = "%s:%s" % (user_one.username, user_one.password)
ca.cached_creds[key_one] = {
"timeout": time.time() - 11,
"user_id": user_one.id,
} # this will get trimmed
key_two = "%s:%s" % (user_two.username, user_two.password)
ca.cached_creds[key_two] = {
"timeout": time.time() + 10,
"user_id": user_two.id,
} # this will not
ca.trim()
assert len(ca.cached_creds.keys()) == 1
assert ca.cached_creds.values()[0]["user_id"] == user_two.id
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "9e3a619bc8c2b4d836caa111ad6aee8d",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 86,
"avg_line_length": 30.375,
"alnum_prop": 0.5887517146776406,
"repo_name": "open-cloud/xos",
"id": "89b343c0aa4c2ad6f20dac748c5324fc616e7dc0",
"size": "4434",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "xos/coreapi/tests/apihelper_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "5024"
},
{
"name": "Makefile",
"bytes": "13624"
},
{
"name": "Python",
"bytes": "1329912"
},
{
"name": "Shell",
"bytes": "57651"
},
{
"name": "Smarty",
"bytes": "3161"
}
],
"symlink_target": ""
} |
import pytest
import tpfd
import logging
logging.basicConfig(level=logging.DEBUG)
logging.debug('test')
p = tpfd.Parser()
p.debug = False
PARSE_LIST_RESULTS = []
FIND_LIST_RESULTS = []
FILE_RESULTS = []
@p.on_parse('{Animal} are cool')
def main(animal):
return animal
@p.on_parse('List test {number}')
def list_test(number):
PARSE_LIST_RESULTS.append(True)
@p.on_parse('{Animal} beats Battlestar galactica')
def file_test(animal):
FILE_RESULTS.append(True)
@p.on_parse('The awnser is {number:d}')
def int_test(number):
return number
@p.on_parse('The {noun} who say {thing}!')
def two_word_test(noun, thing):
return (noun, thing)
@p.on_find('>{}<')
def html_find(words):
FIND_LIST_RESULTS.append(True)
@p.on_find('the')
def word_find(words):
print (words)
def test_string_parse1():
assert p.parse('Sloths are cool') == 'sloths'
def test_string_parse2():
assert p.parse('Turtles are cool') == 'turtles'
def test_string_parse3():
assert p.parse('Bears beats Battle Star Galactica') == None
def test_string_parse4():
assert p.parse('The knights who say Ni!') == ('knights', 'ni')
def test_utf_parse1():
assert p.parse('The 🇬🇧 who say ⚡️!') == ('🇬🇧', '⚡️')
def test_string_find1():
p.find('The man drove the car to the store.') == 'the the the'
def test_string_find3():
p.find('This string should return None') == None
def test_iter_parse1():
l = ['List test 1', 'List test 2', 'List antitest 1']
p.parse(l)
assert 2 == len(PARSE_LIST_RESULTS)
def test_iter_find1():
l = ['<p>the <b>bold</b> text</p>', '<p>the <i>italicized</i> text</p>', 'This statement has no html tags']
p.find(l)
assert 5 == len(FIND_LIST_RESULTS)
def test_iter_parse_file1():
p.parse_file('Test1.txt')
assert 1 == len(FILE_RESULTS)
def test_int_parse1():
assert p.parse('The awnser is 42') == 42
if __name__ == '__main__':
pytest.main()
| {
"content_hash": "f3b7bed04c44190ca7597ad80c72b2e6",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 111,
"avg_line_length": 19.95145631067961,
"alnum_prop": 0.5970802919708029,
"repo_name": "erinxocon/tpfd",
"id": "3a92c38ea1bf8dfed8cd2b4e333c5a83cf008d01",
"size": "2090",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_parse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9279"
}
],
"symlink_target": ""
} |
from zope.interface import implements
from twisted.python import usage
from twisted.plugin import IPlugin
from twisted.application.service import IServiceMaker
from vumidash.graphite_client import GraphiteClient
from vumidash.dummy_client import DummyClient
from vumidash.gecko_server import GeckoServer
class Options(usage.Options):
optFlags = [
["dummy", None, "Use a dummy metrics source instead of reading"
" from Graphite."],
]
optParameters = [
["graphite-url", "g", None, "The URL of the Graphite web service."],
["port", "p", 1235, "The port number to serve JSON to Geckoboard on."],
]
class Graphite2GeckoServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = "graphite2gecko"
description = "Read data from Graphite and serve it to Geckoboard"
options = Options
def makeService(self, options):
graphite_url = options["graphite-url"]
port = int(options["port"])
if options["dummy"]:
metrics_source = DummyClient()
else:
metrics_source = GraphiteClient(graphite_url)
gecko_server = GeckoServer(metrics_source, port)
return gecko_server
# service maker instance for twistd
graphite2gecko = Graphite2GeckoServiceMaker()
| {
"content_hash": "1b929e7b6f688d26673d218da7429bdb",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 30.511627906976745,
"alnum_prop": 0.680640243902439,
"repo_name": "praekelt/vumi-dashboard",
"id": "d6b6ca1325b3da1fe3c66a5494d38b567b79d0a7",
"size": "1312",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "twisted/plugins/graphite2gecko.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "58912"
},
{
"name": "Shell",
"bytes": "510"
}
],
"symlink_target": ""
} |
"""
Utilities for pretty-printing data[types].
"""
__all__ = ('pretty_date', 'plural', 'trim', 'fix_link')
import re
import sys
from datetime import datetime
def pretty_date(time=False):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
now = datetime.utcnow()
diff = now - time
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return "just now"
if second_diff < 60:
return str(second_diff) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str(second_diff / 60) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str(second_diff / 3600) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str(day_diff) + " days ago"
if day_diff < 31:
return str(day_diff / 7) + " weeks ago"
if day_diff < 365:
return str(day_diff / 30) + " months ago"
return str(day_diff / 365) + " years ago"
def plural(v, singular, plural):
return plural.format(v=v) if v > 1 else singular.format(v=v)
def trim(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxint:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def fix_link(s):
"""
If the string `s` (which is a link) does not begin with http or
https, append http and return it.
"""
if not re.match(r'^https?://', s):
s = 'http://{0}'.format(s)
return s
| {
"content_hash": "297a60e0cd8944ed130d13bd239094f5",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 67,
"avg_line_length": 29.094117647058823,
"alnum_prop": 0.5818843509906996,
"repo_name": "notifico/notifico",
"id": "47fd7bb9ff8b1e98f9b3059708682aca3790a5ed",
"size": "2497",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "notifico/util/pretty.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "151679"
},
{
"name": "HTML",
"bytes": "105821"
},
{
"name": "Python",
"bytes": "173541"
},
{
"name": "Shell",
"bytes": "90"
}
],
"symlink_target": ""
} |
from django.contrib.auth.backends import ModelBackend
class AuthenticationBackend(ModelBackend):
"""
Permissions that do not receive an object:
* formly.view_survey_list
* formly.create_survey
Permissions that receive a survey object:
* formly.view_survey_detail
* formly.change_survey_name
* formly.publish_survey
* formly.duplicate_survey
* formly.edit_survey
* formly.view_results
Permissions that receive different object types:
* formly.delete_object
"""
supports_object_permissions = True
supports_anonymous_user = True
def has_perm(self, user, perm, obj=None):
permissions = [
"formly.view_survey_list",
"formly.create_survey",
]
survey_permissions = [
"formly.view_survey_detail",
"formly.change_survey_name",
"formly.publish_survey",
"formly.duplicate_survey",
"formly.edit_survey",
"formly.view_results"
]
if perm in permissions:
return user.is_authenticated
if perm in survey_permissions:
return obj and user == obj.creator
if perm == "formly.delete_object":
if obj is None:
return False
if hasattr(obj, "creator"):
return user == obj.creator
if hasattr(obj, "survey"):
return user == obj.survey.creator
if hasattr(obj, "page"):
return user == obj.page.survey.creator
if hasattr(obj, "field"):
return user == obj.field.page.survey.creator
return False
return super(AuthenticationBackend, self).has_perm(user, perm)
| {
"content_hash": "1089c91ec021890a31eee297e63ce659",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 70,
"avg_line_length": 31.357142857142858,
"alnum_prop": 0.5808656036446469,
"repo_name": "eldarion/formly",
"id": "934c7dcd6b0ff066bfabc5eaf62b3b586de3ef3b",
"size": "1756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "formly/auth_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "27863"
},
{
"name": "Makefile",
"bytes": "120"
},
{
"name": "Python",
"bytes": "118321"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wooey', '0044_change_script_parameter_choices_to_text'),
]
operations = [
migrations.AddField(
model_name='scriptversion',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='created_script_version_set', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='scriptversion',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='modified_script_version_set', to=settings.AUTH_USER_MODEL),
),
]
| {
"content_hash": "29b0510c582add23f54e167dc5789133",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 178,
"avg_line_length": 38.166666666666664,
"alnum_prop": 0.665938864628821,
"repo_name": "wooey/Wooey",
"id": "7c0d6ecdb885136158e0b29b12bfd44af816bae0",
"size": "966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wooey/migrations/0045_add_created_modified_by.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8898"
},
{
"name": "Dockerfile",
"bytes": "1004"
},
{
"name": "HTML",
"bytes": "75964"
},
{
"name": "JavaScript",
"bytes": "811"
},
{
"name": "Makefile",
"bytes": "1212"
},
{
"name": "Python",
"bytes": "298550"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import abc
import shelve
from pprint import pprint
from cloudmesh_base.tables import dict_printer
from cloudmesh_base.Shell import Shell
from cloudmesh_base.util import banner
from cloudmesh_base.util import path_expand
from cloudmesh_pbs.OpenPBS import OpenPBS
class pbs_db_interface(object):
__metaclass__ = abc.ABCMeta
db = None
def data(self):
return dict(self.db)
def __getitem__(self, index):
return self.db[index]
def __setitem__(self, index, value):
self.db[index] = value
@abc.abstractmethod
def load(self, filename):
"""loads the saved databsa from the file"""
@abc.abstractmethod
def get(self, id):
"""get the object with the id"""
@abc.abstractmethod
def set(self, id, value):
"""set the objet with the id to value"""
def set_filename(self, filename):
"""set the objet with the id to value"""
self.filename = filename
def remove(self):
try:
os.remove(self.filename)
except:
pass
@abc.abstractmethod
def save(self):
"""save the cloudmesh_job"""
@abc.abstractmethod
def update(self):
"""load the cloudmesh_job"""
class DbPBS(pbs_db_interface):
def __init__(self, filename=None):
self.pbs = OpenPBS(deploy=True)
self.open()
def open(self, filename=None):
if filename is not None:
self.filename = filename
else:
self.filename = path_expand(self.pbs.database_filename())
path = os.path.dirname(self.filename)
Shell.mkdir(path)
self.load()
def clear(self):
for id in self.db:
del self.db[id]
self.save()
def load(self):
"""load the cloudmesh_job"""
print('loading', self.filename)
# remove db ending so that shelve automatically adds it
self.filename = self.filename.replace(".db", "")
self.db = shelve.open(self.filename, writeback=True)
def save(self):
self.db.sync()
def get(self, id):
return self.db[id]
def status(self, id):
return self.get(id)["job_state"]
def set(self, id, value):
self.db[id] = value
self.save()
def keys(self):
self.data().keys()
def delete(self, id):
del self.db[id]
def close(self):
self.db.close()
def update(self, host=None, user=True):
if host is None:
print("host is none is not supported yet")
raise
print("QSTAT")
r = dict(self.pbs.qstat(host, user=user, format='dict'))
pprint(r)
if r is not {}:
for jobid in r:
self.db[jobid] = r[jobid]
self.save()
else:
print("no jobs found after query")
print("update completed")
def info(self):
print("Filename:", self.filename)
def list(self, attributes=None, output="table"):
if self.db is None or len(self.db) == 0:
print("No jobs found")
return None
columns = attributes
if columns is None:
columns = ["cm_jobid", "cm_host", "cm_user", "Job_Name", "job_state", "exit_status"]
# prepare the dict
d = {}
for jobid in self.db:
content = {}
for attribute in columns:
try:
content[attribute] = self.db[jobid][attribute]
except:
content[attribute] = "None"
d[jobid] = content
# print the dict
if output in ["csv", "table", "dict", "yaml"]:
return dict_printer(d, order=columns, output=output)
return None
def qsub(self, name, host, script, template=None, kind="dict"):
r = self.pbs.qsub(name, host, script, template=template, kind=kind)
pprint(r)
return dict(r)
if __name__ == "__main__":
qsub = False
db = DbPBS()
db.clear()
db.info()
db.update(host="india", user=False)
print(db.list(output="table"))
print(db.list(output="csv"))
print(db.list(output="dict"))
print(db.list(output="yaml"))
banner("user")
db.clear()
db.update(host="india")
print(db.list(output="table"))
if qsub:
banner('qsub')
pbs = OpenPBS()
jobname = "job-" + pbs.jobid + ".pbs"
host = "india"
script_template = pbs.read_script("etc/job.pbs")
print(script_template)
r = db.qsub(jobname, host, 'echo "Hello"', template=script_template)
pprint(r)
banner('variable list')
pprint(OpenPBS.variable_list(r)) | {
"content_hash": "6ce7eebfb5b2d6abbd603d24c8c8df85",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 96,
"avg_line_length": 25.154255319148938,
"alnum_prop": 0.5601607105096215,
"repo_name": "rajpushkar83/pbs",
"id": "eb0f2f6d25732d291bf475135b2e4ef9a847e4a7",
"size": "4729",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cloudmesh_pbs/DbPBS.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3936"
},
{
"name": "Makefile",
"bytes": "583"
},
{
"name": "Python",
"bytes": "154133"
},
{
"name": "Shell",
"bytes": "1841"
}
],
"symlink_target": ""
} |
'''
Create per SPW test images of a pipeline-calibrated MS.
This version is intended for line SPWs.
Run EVLA_pipe_restore.py before this script.
'''
logprint("Starting EVLA_pipe_testimage_lines.py",
logfileout='logs/testimage_lines.log')
import os
import sys
from warnings import warn
from tasks import clean
from CASA_functions import (set_imagermode, set_imagesize, set_cellsize,
has_field)
from paths import image_script_path, path
execfile(path("imaging_cleanup.py", image_script_path))
# This script should still be usable if the user didn't enable imaging at the
# beginning. In this case, sources will be empty. Prompt the user at this
# point.
if len(imaging_sources) == 0:
print("No sources given. Input which field(s) should be imaged (mosaics"
" can be created by giving a common name for the set; i.e., 'M33'"
" for 'M33LP1', 'M33LP2', etc)")
print("Multiple images can be created by separating the list w/ commas"
" (i.e., '3C48, M33')")
imaging_sources = raw_input("Input fields to be imaged: ")
# Remove whitespaces then split by commas
imaging_sources = imaging_sources.replace(" ", "").split(",")
# Make directory for images to go.
if not os.path.exists('test_images'):
os.mkdir("test_images")
# Check list of given sources against the field list
valid_sources = []
for source in imaging_sources:
if has_field(ms_active, source):
valid_sources.append(source)
else:
warn('No field contains the given source: {}'.format(source))
logprint('No field contains the given source: {}'.format(source),
logfileout='logs/testimage_lines.log')
if len(valid_sources) == 0:
warn('No valid sources given. Exiting without imaging.')
logprint('No valid sources given. Exiting without imaging.',
logfileout='logs/testimage_lines.log')
sys.exit()
sources = valid_sources
# Most common setups are going to have the same SPW coverage for each field
# So set spws to be the first one from field_spws
spws = field_spws[0]
for source in imaging_sources:
for idx, spw_num in enumerate(spws):
logprint("Imaging SPW {0} of {1}".format(idx, len(spws)),
logfileout='logs/testimage_lines.log')
default("clean")
weighting = 'natural'
minpb = 0.1
max_size = 15000
# Determine imagermode, cell size, and image size
imagermode = set_imagermode(ms_active, source)
cellsize = set_cellsize(ms_active, spw_num, sample_factor=6.)
imagesize = set_imagesize(ms_active, spw_num, source, sample_factor=6.,
pblevel=minpb, max_size=max_size)
if imagermode == "mosaic":
# XXX Set this to centre of M33 for now.
phasecenter = 'J2000 01h33m50.904 +30d39m35.79'
else:
phasecenter = ''
# Instead of trying to image a cube, just use one large channel
# at the center (where large = 10 channels).
center_chan = channels[idx] // 2
width = 10
start_chan = int(center_chan - width // 2)
imagename = \
'test_images/{0}.{1}.spw_{2}'.format(ms_active[:-3],
source,
spw_num)
# Remove old image products
rmtables(imagename + "*")
clean(vis=ms_active,
imagename=imagename,
field='*' + source + '*', spw=str(spw_num),
mode='channel', niter=0,
imagermode=imagermode, cell=cellsize,
imsize=imagesize,
start=start_chan, width=width, nchan=1,
weighting=weighting, pbcor=False, minpb=0.1,
phasecenter=phasecenter)
remove_products(imagename)
logprint("Finished EVLA_pipe_testimage_lines.py",
logfileout='logs/testimage_lines.log')
pipeline_save()
| {
"content_hash": "5e7df3dfc0b673ed86526059b5b38da6",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 79,
"avg_line_length": 33.720338983050844,
"alnum_prop": 0.614727318421714,
"repo_name": "e-koch/VLA_Lband",
"id": "f186e8f2269cdc8064c8be9f27a3e8b0c8672c3e",
"size": "3980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "12A-403/pipeline4.6.0/imaging_pipeline/EVLA_pipe_testimage_lines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2740022"
},
{
"name": "Shell",
"bytes": "98570"
}
],
"symlink_target": ""
} |
"""Test that longer and mixed texts are tokenized correctly."""
from __future__ import unicode_literals
import pytest
def test_tokenizer_handles_long_text(en_tokenizer):
text = """Tributes pour in for late British Labour Party leader
Tributes poured in from around the world Thursday
to the late Labour Party leader John Smith, who died earlier from a massive
heart attack aged 55.
In Washington, the US State Department issued a statement regretting "the
untimely death" of the rapier-tongued Scottish barrister and parliamentarian.
"Mr. Smith, throughout his distinguished"""
tokens = en_tokenizer(text)
assert len(tokens) == 76
@pytest.mark.parametrize('text,length', [
("The U.S. Army likes Shock and Awe.", 8),
("U.N. regulations are not a part of their concern.", 10),
("“Isn't it?”", 6),
("""Yes! "I'd rather have a walk", Ms. Comble sighed. """, 15),
("""'Me too!', Mr. P. Delaware cried. """, 11),
("They ran about 10km.", 6),
pytest.mark.xfail(("But then the 6,000-year ice age came...", 10))])
def test_tokenizer_handles_cnts(en_tokenizer, text, length):
tokens = en_tokenizer(text)
assert len(tokens) == length
| {
"content_hash": "5d0d1f4e8adda71f250bd9c9bbe661ea",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 77,
"avg_line_length": 34.73529411764706,
"alnum_prop": 0.6900931414055885,
"repo_name": "banglakit/spaCy",
"id": "a99cfa29ee0851e025f639a9ca5da29fcb1fe2e2",
"size": "1201",
"binary": false,
"copies": "4",
"ref": "refs/heads/bn",
"path": "spacy/tests/en/test_text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "462587"
},
{
"name": "C++",
"bytes": "117174"
},
{
"name": "CSS",
"bytes": "25793"
},
{
"name": "HTML",
"bytes": "227881"
},
{
"name": "JavaScript",
"bytes": "880"
},
{
"name": "M4",
"bytes": "11398"
},
{
"name": "Makefile",
"bytes": "143233"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "1273825"
},
{
"name": "Roff",
"bytes": "30884"
},
{
"name": "Shell",
"bytes": "96278"
}
],
"symlink_target": ""
} |
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from collections import defaultdict
import os
import re
from pants.base.build_environment import get_buildroot
from pants.backend.jvm.tasks.jvm_compile.analysis_parser import AnalysisParser, ParseError
from pants.backend.jvm.tasks.jvm_compile.java.jmake_analysis import JMakeAnalysis
class JMakeAnalysisParser(AnalysisParser):
"""Parse a file containing representation of an analysis for some JVM language."""
def empty_prefix(self):
return 'pcd entries:\n0 items\n'
def parse(self, infile):
self._expect_header(infile.readline(), 'pcd entries')
num_pcd_entries = self._parse_num_items(infile.readline())
pcd_entries = []
for i in xrange(0, num_pcd_entries):
line = infile.readline()
tpl = line.split('\t')
if len(tpl) != 5:
raise ParseError('Line must contain 5 tab-separated fields: %s' % line)
pcd_entries.append(tpl) # Note: we preserve the \n on the last entry.
src_to_deps = self._parse_deps_at_position(infile)
return JMakeAnalysis(pcd_entries, src_to_deps)
def parse_products(self, infile):
self._expect_header(infile.readline(), 'pcd entries')
num_pcd_entries = self._parse_num_items(infile.readline())
ret = defaultdict(list)
# Parse more efficiently than above, since we only care about
# the first two elements in the line.
for _ in xrange(0, num_pcd_entries):
line = infile.readline()
p1 = line.find('\t')
clsfile = os.path.join(self.classes_dir, line[0:p1] + '.class')
p2 = line.find('\t', p1 + 1)
src = line[p1+1:p2]
ret[src].append(clsfile)
return ret
def parse_deps(self, infile, classpath_indexer):
buildroot = get_buildroot()
classpath_elements_by_class = classpath_indexer()
self._expect_header(infile.readline(), 'pcd entries')
num_pcd_entries = self._parse_num_items(infile.readline())
for _ in xrange(0, num_pcd_entries):
infile.readline() # Skip these lines.
src_to_deps = self._parse_deps_at_position(infile)
ret = defaultdict(set)
for src, deps in src_to_deps.items():
for dep in deps:
rel_classfile = dep + '.class'
# Check if we have an internal class first.
internal_classfile = os.path.join(buildroot, self.classes_dir, rel_classfile)
if os.path.exists(internal_classfile):
# Dep is on an internal class.
ret[src].add(internal_classfile)
elif rel_classfile in classpath_elements_by_class:
# Dep is on an external jar/classes dir.
ret[src].add(classpath_elements_by_class.get(rel_classfile))
return ret
def _parse_deps_at_position(self, infile):
self._expect_header(infile.readline(), 'dependencies')
num_deps = self._parse_num_items(infile.readline())
src_to_deps = {}
for i in xrange(0, num_deps):
tpl = infile.readline().split('\t')
src = tpl[0]
deps = tpl[1:]
deps[-1] = deps[-1][0:-1] # Trim off the \n.
src_to_deps[src] = deps
return src_to_deps
num_items_re = re.compile(r'(\d+) items\n')
def _parse_num_items(self, line):
"""Parse a line of the form '<num> items' and returns <num> as an int."""
matchobj = JMakeAnalysisParser.num_items_re.match(line)
if not matchobj:
raise ParseError('Expected: "<num> items". Found: "%s"' % line)
return int(matchobj.group(1))
def _expect_header(self, line, header):
expected = header + ':\n'
if line != expected:
raise ParseError('Expected: %s. Found: %s' % (expected, line))
| {
"content_hash": "e78ff53f3a58aebfc3af033240d4530c",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 93,
"avg_line_length": 39.08510638297872,
"alnum_prop": 0.655688622754491,
"repo_name": "square/pants",
"id": "a50db298e014be491d52332c1ed998b9b68679cd",
"size": "3821",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/jvm/tasks/jvm_compile/java/jmake_analysis_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "273"
},
{
"name": "CSS",
"bytes": "9347"
},
{
"name": "GAP",
"bytes": "4684"
},
{
"name": "Java",
"bytes": "46389"
},
{
"name": "JavaScript",
"bytes": "9523"
},
{
"name": "Python",
"bytes": "2250380"
},
{
"name": "Scala",
"bytes": "5517"
},
{
"name": "Shell",
"bytes": "29381"
},
{
"name": "Thrift",
"bytes": "1674"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.