text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""An implementation of Matching Layer."""
import typing
import tensorflow as tf
from keras.engine import Layer
class MatchingLayer(Layer):
"""
Layer that computes a matching matrix between samples in two tensors.
:param normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
:param matching_type: the similarity function for matching
:param kwargs: Standard layer keyword arguments.
Examples:
>>> import matchzoo as mz
>>> layer = mz.layers.MatchingLayer(matching_type='dot',
... normalize=True)
>>> num_batch, left_len, right_len, num_dim = 5, 3, 2, 10
>>> layer.build([[num_batch, left_len, num_dim],
... [num_batch, right_len, num_dim]])
"""
def __init__(self, normalize: bool = False,
matching_type: str = 'dot', **kwargs):
""":class:`MatchingLayer` constructor."""
super().__init__(**kwargs)
self._normalize = normalize
self._validate_matching_type(matching_type)
self._matching_type = matching_type
self._shape1 = None
self._shape2 = None
@classmethod
def _validate_matching_type(cls, matching_type: str = 'dot'):
valid_matching_type = ['dot', 'mul', 'plus', 'minus', 'concat']
if matching_type not in valid_matching_type:
raise ValueError(f"{matching_type} is not a valid matching type, "
f"{valid_matching_type} expected.")
def build(self, input_shape: list):
"""
Build the layer.
:param input_shape: the shapes of the input tensors,
for MatchingLayer we need tow input tensors.
"""
# Used purely for shape validation.
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `MatchingLayer` layer should be called '
'on a list of 2 inputs.')
self._shape1 = input_shape[0]
self._shape2 = input_shape[1]
for idx in 0, 2:
if self._shape1[idx] != self._shape2[idx]:
raise ValueError(
'Incompatible dimensions: '
f'{self._shape1[idx]} != {self._shape2[idx]}.'
f'Layer shapes: {self._shape1}, {self._shape2}.'
)
def call(self, inputs: list, **kwargs) -> typing.Any:
"""
The computation logic of MatchingLayer.
:param inputs: two input tensors.
"""
x1 = inputs[0]
x2 = inputs[1]
if self._matching_type == 'dot':
if self._normalize:
x1 = tf.math.l2_normalize(x1, axis=2)
x2 = tf.math.l2_normalize(x2, axis=2)
return tf.expand_dims(tf.einsum('abd,acd->abc', x1, x2), 3)
else:
if self._matching_type == 'mul':
def func(x, y):
return x * y
elif self._matching_type == 'plus':
def func(x, y):
return x + y
elif self._matching_type == 'minus':
def func(x, y):
return x - y
elif self._matching_type == 'concat':
def func(x, y):
return tf.concat([x, y], axis=3)
else:
raise ValueError(f"Invalid matching type."
f"{self._matching_type} received."
f"Mut be in `dot`, `mul`, `plus`, "
f"`minus` and `concat`.")
x1_exp = tf.stack([x1] * self._shape2[1], 2)
x2_exp = tf.stack([x2] * self._shape1[1], 1)
return func(x1_exp, x2_exp)
def compute_output_shape(self, input_shape: list) -> tuple:
"""
Calculate the layer output shape.
:param input_shape: the shapes of the input tensors,
for MatchingLayer we need tow input tensors.
"""
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `MatchingLayer` layer should be called '
'on a list of 2 inputs.')
shape1 = list(input_shape[0])
shape2 = list(input_shape[1])
if len(shape1) != 3 or len(shape2) != 3:
raise ValueError('A `MatchingLayer` layer should be called '
'on 2 inputs with 3 dimensions.')
if shape1[0] != shape2[0] or shape1[2] != shape2[2]:
raise ValueError('A `MatchingLayer` layer should be called '
'on 2 inputs with same 0,2 dimensions.')
if self._matching_type in ['mul', 'plus', 'minus']:
return shape1[0], shape1[1], shape2[1], shape1[2]
elif self._matching_type == 'dot':
return shape1[0], shape1[1], shape2[1], 1
elif self._matching_type == 'concat':
return shape1[0], shape1[1], shape2[1], shape1[2] + shape2[2]
else:
raise ValueError(f"Invalid `matching_type`."
f"{self._matching_type} received."
f"Must be in `mul`, `plus`, `minus` "
f"`dot` and `concat`.")
def get_config(self) -> dict:
"""Get the config dict of MatchingLayer."""
config = {
'normalize': self._normalize,
'matching_type': self._matching_type,
}
base_config = super(MatchingLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
{
"content_hash": "fce33d4ac6969d6d820c99af4253ddd1",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 78,
"avg_line_length": 41.09285714285714,
"alnum_prop": 0.5235529289066574,
"repo_name": "faneshion/MatchZoo",
"id": "54dead2a7b4440161ac8352d249bc199d7c1d677",
"size": "5753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matchzoo/layers/matching_layer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "201"
},
{
"name": "Python",
"bytes": "249119"
},
{
"name": "Shell",
"bytes": "2746"
}
],
"symlink_target": ""
}
|
import json
from flask import Flask, request, abort
from st2reactor.sensor.base import Sensor
TRIGGER_REF = 'circle_ci.build_event'
class CircleCIWebhookSensor(Sensor):
def setup(self):
self.host = self._config['host']
self.port = self._config['port']
self._endpoints = self._config['endpoints']
self.app = Flask(__name__)
self.trigger_ref = TRIGGER_REF
self.log = self._sensor_service.get_logger(__name__)
@self.app.route('/status')
def status():
return json.dumps({'response': 'OK'})
@self.app.route('/webhooks/<path:endpoint>', methods=['POST'])
def build_events(endpoint):
if endpoint not in self._endpoints:
self.log.error('Ignoring unknown endpoint : %s', endpoint)
abort(404)
webhook_body = request.get_json()
payload = {}
payload['headers'] = self._get_headers_as_dict(request.headers)
payload['body'] = webhook_body
response = self._sensor_service.dispatch(self.trigger_ref, payload)
self.log.debug(json.dumps(response))
return json.dumps({'response': 'triggerposted'})
def run(self):
self.app.run(host=self.host, port=self.port, threaded=True)
def cleanup(self):
# This is called when the st2 system goes down. You can perform cleanup operations like
# closing the connections to external system here.
pass
def _get_headers_as_dict(self, headers):
headers_dict = {}
for key, value in headers:
headers_dict[key] = value
return headers_dict
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
|
{
"content_hash": "6742148cb98437611e670d81efae12c1",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 95,
"avg_line_length": 30.366666666666667,
"alnum_prop": 0.5998902305159166,
"repo_name": "StackStorm/st2contrib",
"id": "761cd57538bbf89495487aa25d00e67b99921004",
"size": "2602",
"binary": false,
"copies": "6",
"ref": "refs/heads/st2contrib-deprecated-archive",
"path": "archive/packs/circle_ci/sensors/webhook_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "Makefile",
"bytes": "5581"
},
{
"name": "Python",
"bytes": "1362240"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "7781"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from setuptools import setup, find_packages
setup(
name='reQuiver',
version='0.0.1',
packages=find_packages(exclude=['*test*']),
install_requires=['requests', 'beautifulsoup4', 'CacheControl'],
setup_requires=["nose>=1.3"],
tests_require=["httmock"],
author='Kyle McChesney',
author_email='mbio.kyle@gmail.com',
description='Unoffical Python client for querying Archer DX Quiver DB',
license='MIT',
keywords='Archer DX, Quiver, Gene Fusions, DB',
url='TBD',
classifiers=[],
test_suite='nose.collector'
)
|
{
"content_hash": "615819dc500c889c5e2b346a425e6536",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 31.63157894736842,
"alnum_prop": 0.6688851913477537,
"repo_name": "mbiokyle29/reQuiver",
"id": "0ec2c1d883a0ad6875da39a38d668749ef1c8f22",
"size": "601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "473432"
},
{
"name": "Python",
"bytes": "7598"
}
],
"symlink_target": ""
}
|
from google.cloud import aiplatform
def get_model_evaluation_video_object_tracking_sample(
project: str,
model_id: str,
evaluation_id: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
"""
To obtain evaluation_id run the following commands where LOCATION
is the region where the model is stored, PROJECT is the project ID,
and MODEL_ID is the ID of your model.
model_client = aiplatform.gapic.ModelServiceClient(
client_options={
'api_endpoint':'LOCATION-aiplatform.googleapis.com'
}
)
evaluations = model_client.list_model_evaluations(parent='projects/PROJECT/locations/LOCATION/models/MODEL_ID')
print("evaluations:", evaluations)
"""
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.ModelServiceClient(client_options=client_options)
name = client.model_evaluation_path(
project=project, location=location, model=model_id, evaluation=evaluation_id
)
response = client.get_model_evaluation(name=name)
print("response:", response)
# [END aiplatform_get_model_evaluation_video_object_tracking_sample]
|
{
"content_hash": "ad8f5147c4eb4627269c27283a3079ec",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 115,
"avg_line_length": 39.666666666666664,
"alnum_prop": 0.7100840336134454,
"repo_name": "googleapis/python-aiplatform",
"id": "2726681178ea13ab856065103e8f196d02a2a53c",
"size": "2075",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/model_service/get_model_evaluation_video_object_tracking_sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
}
|
__version__ = "trunk"
import copy
import csv
import random
import re
import sys
import textwrap
import itertools
import unicodedata
from evennia.utils.ansi import parse_ansi
py3k = sys.version_info[0] >= 3
if py3k:
unicode = str
basestring = str
itermap = map
iterzip = zip
uni_chr = chr
from html.parser import HTMLParser
else:
itermap = itertools.imap
iterzip = itertools.izip
uni_chr = unichr
from HTMLParser import HTMLParser
if py3k and sys.version_info[1] >= 2:
from html import escape
else:
from cgi import escape
# hrule styles
FRAME = 0
ALL = 1
NONE = 2
HEADER = 3
# Table styles
DEFAULT = 10
MSWORD_FRIENDLY = 11
PLAIN_COLUMNS = 12
RANDOM = 20
_re = re.compile("\033\[[0-9;]*m")
def _ansi(method):
"decorator for converting ansi in input"
def wrapper(self, *args, **kwargs):
def convert(inp):
if isinstance(inp, basestring):
return parse_ansi("{n%s{n" % inp)
elif hasattr(inp, '__iter__'):
li = []
for element in inp:
if isinstance(element, basestring):
li.append(convert(element))
elif hasattr(element, '__iter__'):
li.append(convert(element))
else:
li.append(element)
return li
return inp
args = [convert(arg) for arg in args]
#kwargs = dict((key, convert(val)) for key, val in kwargs.items())
return method(self, *args, **kwargs)
return wrapper
def _get_size(text):
lines = text.split("\n")
height = len(lines)
width = max([_str_block_width(line) for line in lines])
return (width, height)
class PrettyTable(object):
@_ansi
def __init__(self, field_names=None, **kwargs):
"""Return a new PrettyTable instance
Arguments:
encoding - Unicode encoding scheme used to decode any encoded input
field_names - list or tuple of field names
fields - list or tuple of field names to include in displays
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
header - print a header showing field names (True or False)
header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: FRAME, HEADER, ALL, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
valign - default valign for each row (None, "t", "m" or "b")
reversesort - True or False to sort in descending or ascending order"""
self.encoding = kwargs.get("encoding", "UTF-8")
# Data
self._field_names = []
self._align = {}
self._valign = {}
self._max_width = {}
self._rows = []
if field_names:
self.field_names = field_names
else:
self._widths = []
# Options
self._options = "start end fields header border sortby reversesort sort_key attributes format hrules vrules".split()
self._options.extend("int_format float_format padding_width left_padding_width right_padding_width".split())
self._options.extend("vertical_char horizontal_char junction_char header_style valign xhtml print_empty".split())
for option in self._options:
if option in kwargs:
self._validate_option(option, kwargs[option])
else:
kwargs[option] = None
self._start = kwargs["start"] or 0
self._end = kwargs["end"] or None
self._fields = kwargs["fields"] or None
if kwargs["header"] in (True, False):
self._header = kwargs["header"]
else:
self._header = True
self._header_style = kwargs["header_style"] or None
if kwargs["border"] in (True, False):
self._border = kwargs["border"]
else:
self._border = True
self._hrules = kwargs["hrules"] or FRAME
self._vrules = kwargs["vrules"] or ALL
self._sortby = kwargs["sortby"] or None
if kwargs["reversesort"] in (True, False):
self._reversesort = kwargs["reversesort"]
else:
self._reversesort = False
self._sort_key = kwargs["sort_key"] or (lambda x: x)
self._int_format = kwargs["int_format"] or {}
self._float_format = kwargs["float_format"] or {}
self._padding_width = kwargs["padding_width"] or 1
self._left_padding_width = kwargs["left_padding_width"] or None
self._right_padding_width = kwargs["right_padding_width"] or None
self._vertical_char = kwargs["vertical_char"] or self._unicode("|")
self._horizontal_char = kwargs["horizontal_char"] or self._unicode("-")
self._junction_char = kwargs["junction_char"] or self._unicode("+")
if kwargs["print_empty"] in (True, False):
self._print_empty = kwargs["print_empty"]
else:
self._print_empty = True
self._format = kwargs["format"] or False
self._xhtml = kwargs["xhtml"] or False
self._attributes = kwargs["attributes"] or {}
def _unicode(self, value):
if not isinstance(value, basestring):
value = str(value)
if not isinstance(value, unicode):
value = unicode(value, self.encoding, "strict")
return value
def _justify(self, text, width, align):
excess = width - _str_block_width(text)
if align == "l":
return text + excess * " "
elif align == "r":
return excess * " " + text
else:
if excess % 2:
# Uneven padding
# Put more space on right if text is of odd length...
if _str_block_width(text) % 2:
return (excess//2)*" " + text + (excess//2 + 1)*" "
# and more space on left if text is of even length
else:
return (excess//2 + 1)*" " + text + (excess//2)*" "
# Why distribute extra space this way? To match the behaviour of
# the inbuilt str.center() method.
else:
# Equal padding on either side
return (excess//2)*" " + text + (excess//2)*" "
def __getattr__(self, name):
if name == "rowcount":
return len(self._rows)
elif name == "colcount":
if self._field_names:
return len(self._field_names)
elif self._rows:
return len(self._rows[0])
else:
return 0
else:
raise AttributeError(name)
def __getitem__(self, index):
new = PrettyTable()
new.field_names = self.field_names
for attr in self._options:
setattr(new, "_"+attr, getattr(self, "_"+attr))
setattr(new, "_align", getattr(self, "_align"))
if isinstance(index, slice):
for row in self._rows[index]:
new.add_row(row)
elif isinstance(index, int):
new.add_row(self._rows[index])
else:
raise Exception("Index %s is invalid, must be an integer or slice" % str(index))
return new
if py3k:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.__unicode__().encode(self.encoding)
def __unicode__(self):
return self.get_string()
##############################
# ATTRIBUTE VALIDATORS #
##############################
# The method _validate_option is all that should be used elsewhere in the code base to validate options.
# It will call the appropriate validation method for that option. The individual validation methods should
# never need to be called directly (although nothing bad will happen if they *are*).
# Validation happens in TWO places.
# Firstly, in the property setters defined in the ATTRIBUTE MANAGMENT section.
# Secondly, in the _get_options method, where keyword arguments are mixed with persistent settings
def _validate_option(self, option, val):
if option in ("field_names"):
self._validate_field_names(val)
elif option in ("start", "end", "max_width", "padding_width", "left_padding_width", "right_padding_width", "format"):
self._validate_nonnegative_int(option, val)
elif option in ("sortby"):
self._validate_field_name(option, val)
elif option in ("sort_key"):
self._validate_function(option, val)
elif option in ("hrules"):
self._validate_hrules(option, val)
elif option in ("vrules"):
self._validate_vrules(option, val)
elif option in ("fields"):
self._validate_all_field_names(option, val)
elif option in ("header", "border", "reversesort", "xhtml", "print_empty"):
self._validate_true_or_false(option, val)
elif option in ("header_style"):
self._validate_header_style(val)
elif option in ("int_format"):
self._validate_int_format(option, val)
elif option in ("float_format"):
self._validate_float_format(option, val)
elif option in ("vertical_char", "horizontal_char", "junction_char"):
self._validate_single_char(option, val)
elif option in ("attributes"):
self._validate_attributes(option, val)
else:
raise Exception("Unrecognised option: %s!" % option)
def _validate_field_names(self, val):
# Check for appropriate length
if self._field_names:
try:
assert len(val) == len(self._field_names)
except AssertionError:
raise Exception("Field name list has incorrect number of values, (actual) %d!=%d (expected)" % (len(val), len(self._field_names)))
if self._rows:
try:
assert len(val) == len(self._rows[0])
except AssertionError:
raise Exception("Field name list has incorrect number of values, (actual) %d!=%d (expected)" % (len(val), len(self._rows[0])))
# Check for uniqueness
try:
assert len(val) == len(set(val))
except AssertionError:
raise Exception("Field names must be unique!")
def _validate_header_style(self, val):
try:
assert val in ("cap", "title", "upper", "lower", None)
except AssertionError:
raise Exception("Invalid header style, use cap, title, upper, lower or None!")
def _validate_align(self, val):
try:
assert val in ["l","c","r"]
except AssertionError:
raise Exception("Alignment %s is invalid, use l, c or r!" % val)
def _validate_valign(self, val):
try:
assert val in ["t","m","b",None]
except AssertionError:
raise Exception("Alignment %s is invalid, use t, m, b or None!" % val)
def _validate_nonnegative_int(self, name, val):
try:
assert int(val) >= 0
except AssertionError:
raise Exception("Invalid value for %s: %s!" % (name, self._unicode(val)))
def _validate_true_or_false(self, name, val):
try:
assert val in (True, False)
except AssertionError:
raise Exception("Invalid value for %s! Must be True or False." % name)
def _validate_int_format(self, name, val):
if val == "":
return
try:
assert type(val) in (str, unicode)
assert val.isdigit()
except AssertionError:
raise Exception("Invalid value for %s! Must be an integer format string." % name)
def _validate_float_format(self, name, val):
if val == "":
return
try:
assert type(val) in (str, unicode)
assert "." in val
bits = val.split(".")
assert len(bits) <= 2
assert bits[0] == "" or bits[0].isdigit()
assert bits[1] == "" or bits[1].isdigit()
except AssertionError:
raise Exception("Invalid value for %s! Must be a float format string." % name)
def _validate_function(self, name, val):
try:
assert hasattr(val, "__call__")
except AssertionError:
raise Exception("Invalid value for %s! Must be a function." % name)
def _validate_hrules(self, name, val):
try:
assert val in (ALL, FRAME, HEADER, NONE)
except AssertionError:
raise Exception("Invalid value for %s! Must be ALL, FRAME, HEADER or NONE." % name)
def _validate_vrules(self, name, val):
try:
assert val in (ALL, FRAME, NONE)
except AssertionError:
raise Exception("Invalid value for %s! Must be ALL, FRAME, or NONE." % name)
def _validate_field_name(self, name, val):
try:
assert (val in self._field_names) or (val is None)
except AssertionError:
raise Exception("Invalid field name: %s!" % val)
def _validate_all_field_names(self, name, val):
try:
for x in val:
self._validate_field_name(name, x)
except AssertionError:
raise Exception("fields must be a sequence of field names!")
def _validate_single_char(self, name, val):
try:
assert _str_block_width(val) == 1
except AssertionError:
raise Exception("Invalid value for %s! Must be a string of length 1." % name)
def _validate_attributes(self, name, val):
try:
assert isinstance(val, dict)
except AssertionError:
raise Exception("attributes must be a dictionary of name/value pairs!")
##############################
# ATTRIBUTE MANAGEMENT #
##############################
def _get_field_names(self):
return self._field_names
"""The names of the fields
Arguments:
fields - list or tuple of field names"""
def _set_field_names(self, val):
val = [self._unicode(x) for x in val]
self._validate_option("field_names", val)
if self._field_names:
old_names = self._field_names[:]
self._field_names = val
if self._align and old_names:
for old_name, new_name in zip(old_names, val):
self._align[new_name] = self._align[old_name]
for old_name in old_names:
if old_name not in self._align:
self._align.pop(old_name)
else:
for field in self._field_names:
self._align[field] = "l"
if self._valign and old_names:
for old_name, new_name in zip(old_names, val):
self._valign[new_name] = self._valign[old_name]
for old_name in old_names:
if old_name not in self._valign:
self._valign.pop(old_name)
else:
for field in self._field_names:
self._valign[field] = "t"
field_names = property(_get_field_names, _set_field_names)
def _get_align(self):
return self._align
def _set_align(self, val):
self._validate_align(val)
for field in self._field_names:
self._align[field] = val
align = property(_get_align, _set_align)
def _get_valign(self):
return self._valign
def _set_valign(self, val):
self._validate_valign(val)
for field in self._field_names:
self._valign[field] = val
valign = property(_get_valign, _set_valign)
def _get_max_width(self):
return self._max_width
def _set_max_width(self, val):
self._validate_option("max_width", val)
for field in self._field_names:
self._max_width[field] = val
max_width = property(_get_max_width, _set_max_width)
def _get_fields(self):
"""List or tuple of field names to include in displays
Arguments:
fields - list or tuple of field names to include in displays"""
return self._fields
def _set_fields(self, val):
self._validate_option("fields", val)
self._fields = val
fields = property(_get_fields, _set_fields)
def _get_start(self):
"""Start index of the range of rows to print
Arguments:
start - index of first data row to include in output"""
return self._start
def _set_start(self, val):
self._validate_option("start", val)
self._start = val
start = property(_get_start, _set_start)
def _get_end(self):
"""End index of the range of rows to print
Arguments:
end - index of last data row to include in output PLUS ONE (list slice style)"""
return self._end
def _set_end(self, val):
self._validate_option("end", val)
self._end = val
end = property(_get_end, _set_end)
def _get_sortby(self):
"""Name of field by which to sort rows
Arguments:
sortby - field name to sort by"""
return self._sortby
def _set_sortby(self, val):
self._validate_option("sortby", val)
self._sortby = val
sortby = property(_get_sortby, _set_sortby)
def _get_reversesort(self):
"""Controls direction of sorting (ascending vs descending)
Arguments:
reveresort - set to True to sort by descending order, or False to sort by ascending order"""
return self._reversesort
def _set_reversesort(self, val):
self._validate_option("reversesort", val)
self._reversesort = val
reversesort = property(_get_reversesort, _set_reversesort)
def _get_sort_key(self):
"""Sorting key function, applied to data points before sorting
Arguments:
sort_key - a function which takes one argument and returns something to be sorted"""
return self._sort_key
def _set_sort_key(self, val):
self._validate_option("sort_key", val)
self._sort_key = val
sort_key = property(_get_sort_key, _set_sort_key)
def _get_header(self):
"""Controls printing of table header with field names
Arguments:
header - print a header showing field names (True or False)"""
return self._header
def _set_header(self, val):
self._validate_option("header", val)
self._header = val
header = property(_get_header, _set_header)
def _get_header_style(self):
"""Controls stylisation applied to field names in header
Arguments:
header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None)"""
return self._header_style
def _set_header_style(self, val):
self._validate_header_style(val)
self._header_style = val
header_style = property(_get_header_style, _set_header_style)
def _get_border(self):
"""Controls printing of border around table
Arguments:
border - print a border around the table (True or False)"""
return self._border
def _set_border(self, val):
self._validate_option("border", val)
self._border = val
border = property(_get_border, _set_border)
def _get_hrules(self):
"""Controls printing of horizontal rules after rows
Arguments:
hrules - horizontal rules style. Allowed values: FRAME, ALL, HEADER, NONE"""
return self._hrules
def _set_hrules(self, val):
self._validate_option("hrules", val)
self._hrules = val
hrules = property(_get_hrules, _set_hrules)
def _get_vrules(self):
"""Controls printing of vertical rules between columns
Arguments:
vrules - vertical rules style. Allowed values: FRAME, ALL, NONE"""
return self._vrules
def _set_vrules(self, val):
self._validate_option("vrules", val)
self._vrules = val
vrules = property(_get_vrules, _set_vrules)
def _get_int_format(self):
"""Controls formatting of integer data
Arguments:
int_format - integer format string"""
return self._int_format
def _set_int_format(self, val):
# self._validate_option("int_format", val)
for field in self._field_names:
self._int_format[field] = val
int_format = property(_get_int_format, _set_int_format)
def _get_float_format(self):
"""Controls formatting of floating point data
Arguments:
float_format - floating point format string"""
return self._float_format
def _set_float_format(self, val):
# self._validate_option("float_format", val)
for field in self._field_names:
self._float_format[field] = val
float_format = property(_get_float_format, _set_float_format)
def _get_padding_width(self):
"""The number of empty spaces between a column's edge and its content
Arguments:
padding_width - number of spaces, must be a positive integer"""
return self._padding_width
def _set_padding_width(self, val):
self._validate_option("padding_width", val)
self._padding_width = val
padding_width = property(_get_padding_width, _set_padding_width)
def _get_left_padding_width(self):
"""The number of empty spaces between a column's left edge and its content
Arguments:
left_padding - number of spaces, must be a positive integer"""
return self._left_padding_width
def _set_left_padding_width(self, val):
self._validate_option("left_padding_width", val)
self._left_padding_width = val
left_padding_width = property(_get_left_padding_width, _set_left_padding_width)
def _get_right_padding_width(self):
"""The number of empty spaces between a column's right edge and its content
Arguments:
right_padding - number of spaces, must be a positive integer"""
return self._right_padding_width
def _set_right_padding_width(self, val):
self._validate_option("right_padding_width", val)
self._right_padding_width = val
right_padding_width = property(_get_right_padding_width, _set_right_padding_width)
def _get_vertical_char(self):
"""The charcter used when printing table borders to draw vertical lines
Arguments:
vertical_char - single character string used to draw vertical lines"""
return self._vertical_char
def _set_vertical_char(self, val):
val = self._unicode(val)
self._validate_option("vertical_char", val)
self._vertical_char = val
vertical_char = property(_get_vertical_char, _set_vertical_char)
def _get_horizontal_char(self):
"""The charcter used when printing table borders to draw horizontal lines
Arguments:
horizontal_char - single character string used to draw horizontal lines"""
return self._horizontal_char
def _set_horizontal_char(self, val):
val = self._unicode(val)
self._validate_option("horizontal_char", val)
self._horizontal_char = val
horizontal_char = property(_get_horizontal_char, _set_horizontal_char)
def _get_junction_char(self):
"""The charcter used when printing table borders to draw line junctions
Arguments:
junction_char - single character string used to draw line junctions"""
return self._junction_char
def _set_junction_char(self, val):
val = self._unicode(val)
self._validate_option("vertical_char", val)
self._junction_char = val
junction_char = property(_get_junction_char, _set_junction_char)
def _get_format(self):
"""Controls whether or not HTML tables are formatted to match styling options
Arguments:
format - True or False"""
return self._format
def _set_format(self, val):
self._validate_option("format", val)
self._format = val
format = property(_get_format, _set_format)
def _get_print_empty(self):
"""Controls whether or not empty tables produce a header and frame or just an empty string
Arguments:
print_empty - True or False"""
return self._print_empty
def _set_print_empty(self, val):
self._validate_option("print_empty", val)
self._print_empty = val
print_empty = property(_get_print_empty, _set_print_empty)
def _get_attributes(self):
"""A dictionary of HTML attribute name/value pairs to be included in the <table> tag when printing HTML
Arguments:
attributes - dictionary of attributes"""
return self._attributes
def _set_attributes(self, val):
self._validate_option("attributes", val)
self._attributes = val
attributes = property(_get_attributes, _set_attributes)
##############################
# OPTION MIXER #
##############################
def _get_options(self, kwargs):
options = {}
for option in self._options:
if option in kwargs:
self._validate_option(option, kwargs[option])
options[option] = kwargs[option]
else:
options[option] = getattr(self, "_"+option)
return options
##############################
# PRESET STYLE LOGIC #
##############################
def set_style(self, style):
if style == DEFAULT:
self._set_default_style()
elif style == MSWORD_FRIENDLY:
self._set_msword_style()
elif style == PLAIN_COLUMNS:
self._set_columns_style()
elif style == RANDOM:
self._set_random_style()
else:
raise Exception("Invalid pre-set style!")
def _set_default_style(self):
self.header = True
self.border = True
self._hrules = FRAME
self._vrules = ALL
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = "|"
self.horizontal_char = "-"
self.junction_char = "+"
def _set_msword_style(self):
self.header = True
self.border = True
self._hrules = NONE
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = "|"
def _set_columns_style(self):
self.header = True
self.border = False
self.padding_width = 1
self.left_padding_width = 0
self.right_padding_width = 8
def _set_random_style(self):
# Just for fun!
self.header = random.choice((True, False))
self.border = random.choice((True, False))
self._hrules = random.choice((ALL, FRAME, HEADER, NONE))
self._vrules = random.choice((ALL, FRAME, NONE))
self.left_padding_width = random.randint(0,5)
self.right_padding_width = random.randint(0,5)
self.vertical_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
self.horizontal_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
self.junction_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
##############################
# DATA INPUT METHODS #
##############################
@_ansi
def add_row(self, row):
"""Add a row to the table
Arguments:
row - row of data, should be a list with as many elements as the table
has fields"""
if self._field_names and len(row) != len(self._field_names):
raise Exception("Row has incorrect number of values, (actual) %d!=%d (expected)" %(len(row),len(self._field_names)))
if not self._field_names:
self.field_names = [("Field %d" % (n+1)) for n in range(0,len(row))]
self._rows.append(list(row))
def del_row(self, row_index):
"""Delete a row to the table
Arguments:
row_index - The index of the row you want to delete. Indexing starts at 0."""
if row_index > len(self._rows)-1:
raise Exception("Cant delete row at index %d, table only has %d rows!" % (row_index, len(self._rows)))
del self._rows[row_index]
@_ansi
def add_column(self, fieldname, column, align="l", valign="t"):
"""Add a column to the table.
Arguments:
fieldname - name of the field to contain the new column of data
column - column of data, should be a list with as many elements as the
table has rows
align - desired alignment for this column - "l" for left, "c" for centre and "r" for right
valign - desired vertical alignment for new columns - "t" for top, "m" for middle and "b" for bottom"""
if len(self._rows) in (0, len(column)):
self._validate_align(align)
self._validate_valign(valign)
self._field_names.append(fieldname)
self._align[fieldname] = align
self._valign[fieldname] = valign
for i in range(0, len(column)):
if len(self._rows) < i+1:
self._rows.append([])
self._rows[i].append(column[i])
else:
raise Exception("Column length %d does not match number of rows %d!" % (len(column), len(self._rows)))
def clear_rows(self):
"""Delete all rows from the table but keep the current field names"""
self._rows = []
def clear(self):
"""Delete all rows and field names from the table, maintaining nothing but styling options"""
self._rows = []
self._field_names = []
self._widths = []
##############################
# MISC PUBLIC METHODS #
##############################
def copy(self):
return copy.deepcopy(self)
##############################
# MISC PRIVATE METHODS #
##############################
def _format_value(self, field, value):
if isinstance(value, int) and field in self._int_format:
value = self._unicode(("%%%sd" % self._int_format[field]) % value)
elif isinstance(value, float) and field in self._float_format:
value = self._unicode(("%%%sf" % self._float_format[field]) % value)
return self._unicode(value)
def _compute_widths(self, rows, options):
if options["header"]:
widths = [_get_size(field)[0] for field in self._field_names]
else:
widths = len(self.field_names) * [0]
for row in rows:
for index, value in enumerate(row):
fieldname = self.field_names[index]
if fieldname in self.max_width:
widths[index] = max(widths[index], min(_get_size(value)[0], self.max_width[fieldname]))
else:
widths[index] = max(widths[index], _get_size(value)[0])
self._widths = widths
def _get_padding_widths(self, options):
if options["left_padding_width"] is not None:
lpad = options["left_padding_width"]
else:
lpad = options["padding_width"]
if options["right_padding_width"] is not None:
rpad = options["right_padding_width"]
else:
rpad = options["padding_width"]
return lpad, rpad
def _get_rows(self, options):
"""Return only those data rows that should be printed, based on slicing and sorting.
Arguments:
options - dictionary of option settings."""
# Make a copy of only those rows in the slice range
rows = copy.deepcopy(self._rows[options["start"]:options["end"]])
# Sort if necessary
if options["sortby"]:
sortindex = self._field_names.index(options["sortby"])
# Decorate
rows = [[row[sortindex]]+row for row in rows]
# Sort
rows.sort(reverse=options["reversesort"], key=options["sort_key"])
# Undecorate
rows = [row[1:] for row in rows]
return rows
def _format_row(self, row, options):
return [self._format_value(field, value) for (field, value) in zip(self._field_names, row)]
def _format_rows(self, rows, options):
return [self._format_row(row, options) for row in rows]
##############################
# PLAIN TEXT STRING METHODS #
##############################
def get_string(self, **kwargs):
"""Return string representation of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
reversesort - True or False to sort in descending or ascending order
print empty - if True, stringify just the header for an empty table, if False return an empty string """
options = self._get_options(kwargs)
lines = []
# Don't think too hard about an empty table
# Is this the desired behaviour? Maybe we should still print the header?
if self.rowcount == 0 and (not options["print_empty"] or not options["border"]):
return ""
#print "prettytable:", self._rows
# Get the rows we need to print, taking into account slicing, sorting, etc.
rows = self._get_rows(options)
# Turn all data in all rows into Unicode, formatted as desired
formatted_rows = self._format_rows(rows, options)
# Compute column widths
self._compute_widths(formatted_rows, options)
# Add header or top of border
self._hrule = self._stringify_hrule(options)
if options["header"]:
lines.append(self._stringify_header(options))
elif options["border"] and options["hrules"] in (ALL, FRAME):
lines.append(self._hrule)
# Add rows
for row in formatted_rows:
lines.append(self._stringify_row(row, options))
# Add bottom of border
if options["border"] and options["hrules"] == FRAME:
lines.append(self._hrule)
return self._unicode("\n").join(lines)
def _stringify_hrule(self, options):
if not options["border"]:
return ""
lpad, rpad = self._get_padding_widths(options)
if options['vrules'] in (ALL, FRAME):
bits = [options["junction_char"]]
else:
bits = [options["horizontal_char"]]
# For tables with no data or fieldnames
if not self._field_names:
bits.append(options["junction_char"])
return "".join(bits)
for field, width in zip(self._field_names, self._widths):
if options["fields"] and field not in options["fields"]:
continue
bits.append((width+lpad+rpad)*options["horizontal_char"])
if options['vrules'] == ALL:
bits.append(options["junction_char"])
else:
bits.append(options["horizontal_char"])
if options["vrules"] == FRAME:
bits.pop()
bits.append(options["junction_char"])
return "".join(bits)
def _stringify_header(self, options):
bits = []
lpad, rpad = self._get_padding_widths(options)
if options["border"]:
if options["hrules"] in (ALL, FRAME):
bits.append(self._hrule)
bits.append("\n")
if options["vrules"] in (ALL, FRAME):
bits.append(options["vertical_char"])
else:
bits.append(" ")
# For tables with no data or field names
if not self._field_names:
if options["vrules"] in (ALL, FRAME):
bits.append(options["vertical_char"])
else:
bits.append(" ")
for field, width, in zip(self._field_names, self._widths):
if options["fields"] and field not in options["fields"]:
continue
if self._header_style == "cap":
fieldname = field.capitalize()
elif self._header_style == "title":
fieldname = field.title()
elif self._header_style == "upper":
fieldname = field.upper()
elif self._header_style == "lower":
fieldname = field.lower()
else:
fieldname = field
bits.append(" " * lpad + self._justify(fieldname, width, self._align[field]) + " " * rpad)
if options["border"]:
if options["vrules"] == ALL:
bits.append(options["vertical_char"])
else:
bits.append(" ")
# If vrules is FRAME, then we just appended a space at the end
# of the last field, when we really want a vertical character
if options["border"] and options["vrules"] == FRAME:
bits.pop()
bits.append(options["vertical_char"])
if options["border"] and options["hrules"] != NONE:
bits.append("\n")
bits.append(self._hrule)
return "".join(bits)
def _stringify_row(self, row, options):
for index, field, value, width, in zip(range(0,len(row)), self._field_names, row, self._widths):
# Enforce max widths
lines = value.split("\n")
new_lines = []
for line in lines:
if _str_block_width(line) > width:
line = textwrap.fill(line, width)
new_lines.append(line)
lines = new_lines
value = "\n".join(lines)
row[index] = value
row_height = 0
for c in row:
h = _get_size(c)[1]
if h > row_height:
row_height = h
bits = []
lpad, rpad = self._get_padding_widths(options)
for y in range(0, row_height):
bits.append([])
if options["border"]:
if options["vrules"] in (ALL, FRAME):
bits[y].append(self.vertical_char)
else:
bits[y].append(" ")
for field, value, width, in zip(self._field_names, row, self._widths):
valign = self._valign[field]
lines = value.split("\n")
dHeight = row_height - len(lines)
if dHeight:
if valign == "m":
lines = [""] * int(dHeight / 2) + lines + [""] * (dHeight - int(dHeight / 2))
elif valign == "b":
lines = [""] * dHeight + lines
else:
lines = lines + [""] * dHeight
y = 0
for l in lines:
if options["fields"] and field not in options["fields"]:
continue
bits[y].append(" " * lpad + self._justify(l, width, self._align[field]) + " " * rpad)
if options["border"]:
if options["vrules"] == ALL:
bits[y].append(self.vertical_char)
else:
bits[y].append(" ")
y += 1
# If vrules is FRAME, then we just appended a space at the end
# of the last field, when we really want a vertical character
for y in range(0, row_height):
if options["border"] and options["vrules"] == FRAME:
bits[y].pop()
bits[y].append(options["vertical_char"])
if options["border"] and options["hrules"]== ALL:
bits[row_height-1].append("\n")
bits[row_height-1].append(self._hrule)
for y in range(0, row_height):
bits[y] = "".join(bits[y])
return "\n".join(bits)
##############################
# HTML STRING METHODS #
##############################
def get_html_string(self, **kwargs):
"""Return string representation of HTML formatted version of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
attributes - dictionary of name/value pairs to include as HTML attributes in the <table> tag
xhtml - print <br/> tags if True, <br> tags if false"""
options = self._get_options(kwargs)
if options["format"]:
string = self._get_formatted_html_string(options)
else:
string = self._get_simple_html_string(options)
return string
def _get_simple_html_string(self, options):
lines = []
if options["xhtml"]:
linebreak = "<br/>"
else:
linebreak = "<br>"
open_tag = []
open_tag.append("<table")
if options["attributes"]:
for attr_name in options["attributes"]:
open_tag.append(" %s=\"%s\"" % (attr_name, options["attributes"][attr_name]))
open_tag.append(">")
lines.append("".join(open_tag))
# Headers
if options["header"]:
lines.append(" <tr>")
for field in self._field_names:
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <th>%s</th>" % escape(field).replace("\n", linebreak))
lines.append(" </tr>")
# Data
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows, options)
for row in formatted_rows:
lines.append(" <tr>")
for field, datum in zip(self._field_names, row):
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <td>%s</td>" % escape(datum).replace("\n", linebreak))
lines.append(" </tr>")
lines.append("</table>")
return self._unicode("\n").join(lines)
def _get_formatted_html_string(self, options):
lines = []
lpad, rpad = self._get_padding_widths(options)
if options["xhtml"]:
linebreak = "<br/>"
else:
linebreak = "<br>"
open_tag = []
open_tag.append("<table")
if options["border"]:
if options["hrules"] == ALL and options["vrules"] == ALL:
open_tag.append(" frame=\"box\" rules=\"all\"")
elif options["hrules"] == FRAME and options["vrules"] == FRAME:
open_tag.append(" frame=\"box\"")
elif options["hrules"] == FRAME and options["vrules"] == ALL:
open_tag.append(" frame=\"box\" rules=\"cols\"")
elif options["hrules"] == FRAME:
open_tag.append(" frame=\"hsides\"")
elif options["hrules"] == ALL:
open_tag.append(" frame=\"hsides\" rules=\"rows\"")
elif options["vrules"] == FRAME:
open_tag.append(" frame=\"vsides\"")
elif options["vrules"] == ALL:
open_tag.append(" frame=\"vsides\" rules=\"cols\"")
if options["attributes"]:
for attr_name in options["attributes"]:
open_tag.append(" %s=\"%s\"" % (attr_name, options["attributes"][attr_name]))
open_tag.append(">")
lines.append("".join(open_tag))
# Headers
if options["header"]:
lines.append(" <tr>")
for field in self._field_names:
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <th style=\"padding-left: %dem; padding-right: %dem; text-align: center\">%s</th>" % (lpad, rpad, escape(field).replace("\n", linebreak)))
lines.append(" </tr>")
# Data
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows, options)
aligns = []
valigns = []
for field in self._field_names:
aligns.append({ "l" : "left", "r" : "right", "c" : "center" }[self._align[field]])
valigns.append({"t" : "top", "m" : "middle", "b" : "bottom"}[self._valign[field]])
for row in formatted_rows:
lines.append(" <tr>")
for field, datum, align, valign in zip(self._field_names, row, aligns, valigns):
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <td style=\"padding-left: %dem; padding-right: %dem; text-align: %s; vertical-align: %s\">%s</td>" % (lpad, rpad, align, valign, escape(datum).replace("\n", linebreak)))
lines.append(" </tr>")
lines.append("</table>")
return self._unicode("\n").join(lines)
##############################
# UNICODE WIDTH FUNCTIONS #
##############################
def _char_block_width(char):
# Basic Latin, which is probably the most common case
#if char in xrange(0x0021, 0x007e):
#if char >= 0x0021 and char <= 0x007e:
if 0x0021 <= char <= 0x007e:
return 1
# Chinese, Japanese, Korean (common)
if 0x4e00 <= char <= 0x9fff:
return 2
# Hangul
if 0xac00 <= char <= 0xd7af:
return 2
# Combining?
if unicodedata.combining(uni_chr(char)):
return 0
# Hiragana and Katakana
if 0x3040 <= char <= 0x309f or 0x30a0 <= char <= 0x30ff:
return 2
# Full-width Latin characters
if 0xff01 <= char <= 0xff60:
return 2
# CJK punctuation
if 0x3000 <= char <= 0x303e:
return 2
# Backspace and delete
if char in (0x0008, 0x007f):
return -1
# Other control characters
elif char in (0x0000, 0x001f):
return 0
# Take a guess
return 1
def _str_block_width(val):
return sum(itermap(_char_block_width, itermap(ord, _re.sub("", val))))
##############################
# TABLE FACTORIES #
##############################
def from_csv(fp, field_names = None, **kwargs):
dialect = csv.Sniffer().sniff(fp.read(1024))
fp.seek(0)
reader = csv.reader(fp, dialect)
table = PrettyTable(**kwargs)
if field_names:
table.field_names = field_names
else:
if py3k:
table.field_names = [x.strip() for x in next(reader)]
else:
table.field_names = [x.strip() for x in reader.next()]
for row in reader:
table.add_row([x.strip() for x in row])
return table
def from_db_cursor(cursor, **kwargs):
if cursor.description:
table = PrettyTable(**kwargs)
table.field_names = [col[0] for col in cursor.description]
for row in cursor.fetchall():
table.add_row(row)
return table
class TableHandler(HTMLParser):
def __init__(self, **kwargs):
HTMLParser.__init__(self)
self.kwargs = kwargs
self.tables = []
self.last_row = []
self.rows = []
self.max_row_width = 0
self.active = None
self.last_content = ""
self.is_last_row_header = False
def handle_starttag(self,tag, attrs):
self.active = tag
if tag == "th":
self.is_last_row_header = True
def handle_endtag(self,tag):
if tag in ["th", "td"]:
stripped_content = self.last_content.strip()
self.last_row.append(stripped_content)
if tag == "tr":
self.rows.append(
(self.last_row, self.is_last_row_header))
self.max_row_width = max(self.max_row_width, len(self.last_row))
self.last_row = []
self.is_last_row_header = False
if tag == "table":
table = self.generate_table(self.rows)
self.tables.append(table)
self.rows = []
self.last_content = " "
self.active = None
def handle_data(self, data):
self.last_content += data
def generate_table(self, rows):
"""
Generates from a list of rows a PrettyTable object.
"""
table = PrettyTable(**self.kwargs)
for row in self.rows:
if len(row[0]) < self.max_row_width:
appends = self.max_row_width - len(row[0])
for i in range(1,appends):
row[0].append("-")
if row[1] == True:
self.make_fields_unique(row[0])
table.field_names = row[0]
else:
table.add_row(row[0])
return table
def make_fields_unique(self, fields):
"""
iterates over the row and make each field unique
"""
for i in range(0, len(fields)):
for j in range(i+1, len(fields)):
if fields[i] == fields[j]:
fields[j] += "'"
def from_html(html_code, **kwargs):
"""
Generates a list of PrettyTables from a string of HTML code. Each <table> in
the HTML becomes one PrettyTable object.
"""
parser = TableHandler(**kwargs)
parser.feed(html_code)
return parser.tables
def from_html_one(html_code, **kwargs):
"""
Generates a PrettyTables from a string of HTML code which contains only a
single <table>
"""
tables = from_html(html_code, **kwargs)
try:
assert len(tables) == 1
except AssertionError:
raise Exception("More than one <table> in provided HTML code! Use from_html instead.")
return tables[0]
##############################
# MAIN (TEST FUNCTION) #
##############################
def main():
x = PrettyTable(["City name", "Area", "Population", "Annual Rainfall"])
x.sortby = "Population"
x.reversesort = True
x.int_format["Area"] = "04d"
x.float_format = "6.1f"
x.align["City name"] = "l" # Left align city names
x.add_row(["Adelaide", 1295, 1158259, 600.5])
x.add_row(["Brisbane", 5905, 1857594, 1146.4])
x.add_row(["Darwin", 112, 120900, 1714.7])
x.add_row(["Hobart", 1357, 205556, 619.5])
x.add_row(["Sydney", 2058, 4336374, 1214.8])
x.add_row(["Melbourne", 1566, 3806092, 646.9])
x.add_row(["Perth", 5386, 1554769, 869.4])
print(x)
if __name__ == "__main__":
main()
|
{
"content_hash": "ad72d9db1584fe142d16d02c4a95ff1a",
"timestamp": "",
"source": "github",
"line_count": 1474,
"max_line_length": 207,
"avg_line_length": 36.29579375848033,
"alnum_prop": 0.563196261682243,
"repo_name": "TheTypoMaster/evennia",
"id": "800afb3d7c7a993a6ba0bd99536a4720d06e3bff",
"size": "55099",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "evennia/utils/prettytable.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19127"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "HTML",
"bytes": "13351"
},
{
"name": "JavaScript",
"bytes": "24398"
},
{
"name": "Python",
"bytes": "2053446"
}
],
"symlink_target": ""
}
|
import unittest
from gluon import *
# =============================================================================
class DefaultApproverOverrideTests(unittest.TestCase):
""" Test ability to override default approver in imports """
def setUp(self):
xmlstr = """
<s3xml>
<resource name="org_organisation" uuid="DAOOrganisation1">
<data field="name">DAOOrganisation1</data>
</resource>
<resource name="org_organisation" uuid="DAOOrganisation2" approved="false">
<data field="name">DAOOrganisation2</data>
</resource>
</s3xml>"""
from lxml import etree
self.tree = etree.ElementTree(etree.fromstring(xmlstr))
def testDefaultApproverOverride(self):
""" Test import with approve-attribute """
db = current.db
s3db = current.s3db
current.auth.override = True
resource = s3db.resource("org_organisation")
# Check default approver
self.assertEqual(resource.table.approved_by.default, 0)
# Import the elements
resource.import_xml(self.tree)
table = resource.table
# Without approved-flag should be set to default approver
query = (table.uuid == "DAOOrganisation1")
row = db(query).select(table.approved_by, limitby=(0, 1)).first()
self.assertEqual(row.approved_by, 0)
# With approved-flag false should be set to None
query = (table.uuid == "DAOOrganisation2")
row = db(query).select(table.approved_by, limitby=(0, 1)).first()
self.assertEqual(row.approved_by, None)
current.auth.override = False
def tearDown(self):
current.db.rollback()
# =============================================================================
class ComponentDisambiguationTests(unittest.TestCase):
""" Test component disambiguation using the alias-attribute """
def setUp(self):
xmlstr1 = """
<s3xml>
<resource name="org_organisation">
<data field="name">MasterOrg1</data>
<resource name="org_organisation_branch" alias="branch">
<reference field="branch_id" tuid="TUID_OF_THE_BRANCH_ORG"/>
</resource>
</resource>
<resource name="org_organisation" tuid="TUID_OF_THE_BRANCH_ORG">
<data field="name">BranchOrg1</data>
</resource>
</s3xml>"""
xmlstr2 = """
<s3xml>
<resource name="org_organisation">
<data field="name">BranchOrg2</data>
<resource name="org_organisation_branch" alias="parent">
<reference field="organisation_id" tuid="TUID_OF_THE_MASTER_ORG"/>
</resource>
</resource>
<resource name="org_organisation" tuid="TUID_OF_THE_MASTER_ORG">
<data field="name">MasterOrg2</data>
</resource>
</s3xml>"""
from lxml import etree
self.branch_tree = etree.ElementTree(etree.fromstring(xmlstr1))
self.parent_tree = etree.ElementTree(etree.fromstring(xmlstr2))
def testOrganisationBranchImport(self):
""" Test import of organisation branches using alias-attribute """
db = current.db
s3db = current.s3db
current.auth.override = True
resource = s3db.resource("org_organisation")
msg = resource.import_xml(self.branch_tree)
table = resource.table
query = (table.name == "MasterOrg1")
master = db(query).select(table._id, limitby=(0, 1)).first()
self.assertNotEqual(master, None)
query = (table.name == "BranchOrg1")
branch = db(query).select(table._id, limitby=(0, 1)).first()
self.assertNotEqual(branch, None)
table = s3db.org_organisation_branch
query = (table.organisation_id == master.id) & \
(table.branch_id == branch.id)
link = db(query).select(limitby=(0, 1)).first()
self.assertNotEqual(link, None)
def testParentImport(self):
""" Test import of organisation parents using alias-attribute """
db = current.db
s3db = current.s3db
current.auth.override = True
resource = s3db.resource("org_organisation")
msg = resource.import_xml(self.parent_tree)
table = resource.table
query = (table.name == "MasterOrg2")
master = db(query).select(table._id, limitby=(0, 1)).first()
self.assertNotEqual(master, None)
query = (table.name == "BranchOrg2")
branch = db(query).select(table._id, limitby=(0, 1)).first()
self.assertNotEqual(branch, None)
table = s3db.org_organisation_branch
query = (table.organisation_id == master.id) & \
(table.branch_id == branch.id)
link = db(query).select(limitby=(0, 1)).first()
self.assertNotEqual(link, None)
def tearDown(self):
current.db.rollback()
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner(verbosity=2).run(suite)
return
if __name__ == "__main__":
run_suite(
DefaultApproverOverrideTests,
ComponentDisambiguationTests,
)
# END ========================================================================
|
{
"content_hash": "49442a1fb8399d784098a6d5786878c0",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 82,
"avg_line_length": 32.07058823529412,
"alnum_prop": 0.5876742479823918,
"repo_name": "snpabilonia/rgims",
"id": "47678bf6adb754d7108e419a9ac1ab28f88bc681",
"size": "5628",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "modules/unit_tests/s3/s3import.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1082340"
},
{
"name": "JavaScript",
"bytes": "14415870"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "23003354"
},
{
"name": "Shell",
"bytes": "697"
},
{
"name": "XSLT",
"bytes": "1303725"
}
],
"symlink_target": ""
}
|
"""Test the Network UPS Tools (NUT) config flow."""
from unittest.mock import patch
from pynut2.nut2 import PyNUTError
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components import zeroconf
from homeassistant.components.nut.const import DOMAIN
from homeassistant.const import (
CONF_ALIAS,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_RESOURCES,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from .util import _get_mock_pynutclient
from tests.common import MockConfigEntry
VALID_CONFIG = {
CONF_HOST: "localhost",
CONF_PORT: 123,
CONF_NAME: "name",
CONF_RESOURCES: ["battery.charge"],
}
async def test_form_zeroconf(hass):
"""Test we can setup from zeroconf."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="192.168.1.5",
addresses=["192.168.1.5"],
hostname="mock_hostname",
name="mock_name",
port=1234,
properties={},
type="mock_type",
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage", "ups.status": "OL"}, list_ups=["ups1"]
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch(
"homeassistant.components.nut.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_USERNAME: "test-username", CONF_PASSWORD: "test-password"},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "192.168.1.5:1234"
assert result2["data"] == {
CONF_HOST: "192.168.1.5",
CONF_PASSWORD: "test-password",
CONF_PORT: 1234,
CONF_USERNAME: "test-username",
}
assert result2["result"].unique_id is None
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_one_ups(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage", "ups.status": "OL"}, list_ups=["ups1"]
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch(
"homeassistant.components.nut.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
CONF_PORT: 2222,
},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "1.1.1.1:2222"
assert result2["data"] == {
CONF_HOST: "1.1.1.1",
CONF_PASSWORD: "test-password",
CONF_PORT: 2222,
CONF_USERNAME: "test-username",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_multiple_ups(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "2.2.2.2", CONF_PORT: 123, CONF_RESOURCES: ["battery.charge"]},
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage"},
list_ups={"ups1": "UPS 1", "ups2": "UPS2"},
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
CONF_PORT: 2222,
},
)
assert result2["step_id"] == "ups"
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch(
"homeassistant.components.nut.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{CONF_ALIAS: "ups2"},
)
await hass.async_block_till_done()
assert result3["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result3["title"] == "ups2@1.1.1.1:2222"
assert result3["data"] == {
CONF_HOST: "1.1.1.1",
CONF_PASSWORD: "test-password",
CONF_ALIAS: "ups2",
CONF_PORT: 2222,
CONF_USERNAME: "test-username",
}
assert len(mock_setup_entry.mock_calls) == 2
async def test_form_user_one_ups_with_ignored_entry(hass):
"""Test we can setup a new one when there is an ignored one."""
ignored_entry = MockConfigEntry(
domain=DOMAIN, data={}, source=config_entries.SOURCE_IGNORE
)
ignored_entry.add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage", "ups.status": "OL"}, list_ups=["ups1"]
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch(
"homeassistant.components.nut.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
CONF_PORT: 2222,
},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "1.1.1.1:2222"
assert result2["data"] == {
CONF_HOST: "1.1.1.1",
CONF_PASSWORD: "test-password",
CONF_PORT: 2222,
CONF_USERNAME: "test-username",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_pynut = _get_mock_pynutclient()
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
CONF_PORT: 2222,
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.nut.PyNUTClient.list_ups",
side_effect=PyNUTError,
), patch(
"homeassistant.components.nut.PyNUTClient.list_vars",
side_effect=PyNUTError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
CONF_PORT: 2222,
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.nut.PyNUTClient.list_ups",
return_value=["ups1"],
), patch(
"homeassistant.components.nut.PyNUTClient.list_vars",
side_effect=TypeError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
CONF_PORT: 2222,
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_abort_if_already_setup(hass):
"""Test we abort if component is already setup."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_HOST: "1.1.1.1",
CONF_PORT: 123,
CONF_RESOURCES: ["battery.voltage"],
},
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage"},
list_ups={"ups1": "UPS 1"},
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_PORT: 123,
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "already_configured"
async def test_abort_if_already_setup_alias(hass):
"""Test we abort if component is already setup with same alias."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_HOST: "1.1.1.1",
CONF_PORT: 123,
CONF_RESOURCES: ["battery.voltage"],
CONF_ALIAS: "ups1",
},
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage"},
list_ups={"ups1": "UPS 1", "ups2": "UPS 2"},
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_PORT: 123,
},
)
assert result2["step_id"] == "ups"
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{CONF_ALIAS: "ups1"},
)
assert result3["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result3["reason"] == "already_configured"
async def test_options_flow(hass):
"""Test config flow options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="abcde12345",
data=VALID_CONFIG,
)
config_entry.add_to_hass(hass)
with patch("homeassistant.components.nut.async_setup_entry", return_value=True):
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
CONF_SCAN_INTERVAL: 60,
}
with patch("homeassistant.components.nut.async_setup_entry", return_value=True):
result2 = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "init"
result2 = await hass.config_entries.options.async_configure(
result2["flow_id"],
user_input={CONF_SCAN_INTERVAL: 12},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
CONF_SCAN_INTERVAL: 12,
}
|
{
"content_hash": "ff0e6fdba1d306e09b3239d2e690d876",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 88,
"avg_line_length": 31.404705882352943,
"alnum_prop": 0.5839514497639919,
"repo_name": "rohitranjan1991/home-assistant",
"id": "2261fec3a86147a1a2bc9bbd681f3cf093ea9e59",
"size": "13347",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/nut/test_config_flow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
}
|
import logging
import os
import subprocess
LOG = logging.getLogger('archvyrt')
class Base:
"""
Base provisioner for domain
"""
def __init__(self, domain):
"""
Initialize provisioner
"""
self._domain = domain
@property
def domain(self):
"""
Libvirt domain, this provisioner is attached to
"""
return self._domain
@staticmethod
def _runcmd(cmds, output=False, failhard=True, **kwargs):
"""
Run a unix command
"""
# output shall be captured
if output:
LOG.debug('Run command: %s', ' '.join(cmds))
rval = subprocess.check_output(
cmds,
stderr=subprocess.STDOUT,
**kwargs
).decode()
# output does not matter, send it to /dev/null
else:
with open(os.devnull, 'w') as devnull:
LOG.debug('Run command: %s', ' '.join(cmds))
rval = subprocess.call(
cmds,
stdout=devnull,
stderr=devnull,
**kwargs
)
if not rval == 0:
if failhard:
raise RuntimeError('Command %s failed' % " ".join(cmds))
return rval
@staticmethod
def writefile(filename, lines, mode='w'):
"""
Write to a file
"""
LOG.debug('Write file %s', filename)
with open(filename, mode) as fobj:
fobj.write('%s\n' % '\n'.join(lines))
def cleanup(self):
"""
cleanup actions
"""
raise NotImplementedError
|
{
"content_hash": "d55d447d32d092c11b3f250df99a8742",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 76,
"avg_line_length": 25.119402985074625,
"alnum_prop": 0.48128342245989303,
"repo_name": "Bigluu/arch-provision",
"id": "08a94073d2883d3b11488261f4f1373a0817eddb",
"size": "1683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "archvyrt/provisioner/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43560"
}
],
"symlink_target": ""
}
|
from . import apc
from . import coco
from . import voc
from .utils import view_dataset
from .utils import visualize_heatmap
from .utils import visualize_segmentation
from .utils import visualize_label
from .utils import view_class_seg_dataset
from .utils import visualize_instance_segmentation
from .utils import view_instance_seg_dataset
|
{
"content_hash": "af8e7e58a117146b4987f04b894e3584",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 50,
"avg_line_length": 21.5625,
"alnum_prop": 0.8028985507246377,
"repo_name": "pazeshun/jsk_apc",
"id": "e97828c85ee5e9a554279bd7229e9d4a51cdb3b3",
"size": "361",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "demos/instance_occlsegm/instance_occlsegm_lib/datasets/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "101871"
},
{
"name": "CMake",
"bytes": "42995"
},
{
"name": "Common Lisp",
"bytes": "695864"
},
{
"name": "Dockerfile",
"bytes": "1503"
},
{
"name": "HTML",
"bytes": "6364"
},
{
"name": "Python",
"bytes": "406153"
},
{
"name": "Shell",
"bytes": "4475"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import itertools
from time import time
from . import Errors
from . import DebugFlags
from . import Options
from .Visitor import CythonTransform
from .Errors import CompileError, InternalError, AbortError
from . import Naming
#
# Really small pipeline stages
#
def dumptree(t):
# For quick debugging in pipelines
print t.dump()
return t
def abort_on_errors(node):
# Stop the pipeline if there are any errors.
if Errors.num_errors != 0:
raise AbortError("pipeline break")
return node
def parse_stage_factory(context):
def parse(compsrc):
source_desc = compsrc.source_desc
full_module_name = compsrc.full_module_name
initial_pos = (source_desc, 1, 0)
saved_cimport_from_pyx, Options.cimport_from_pyx = Options.cimport_from_pyx, False
scope = context.find_module(full_module_name, pos = initial_pos, need_pxd = 0)
Options.cimport_from_pyx = saved_cimport_from_pyx
tree = context.parse(source_desc, scope, pxd = 0, full_module_name = full_module_name)
tree.compilation_source = compsrc
tree.scope = scope
tree.is_pxd = False
return tree
return parse
def parse_pxd_stage_factory(context, scope, module_name):
def parse(source_desc):
tree = context.parse(source_desc, scope, pxd=True,
full_module_name=module_name)
tree.scope = scope
tree.is_pxd = True
return tree
return parse
def generate_pyx_code_stage_factory(options, result):
def generate_pyx_code_stage(module_node):
module_node.process_implementation(options, result)
result.compilation_source = module_node.compilation_source
return result
return generate_pyx_code_stage
def inject_pxd_code_stage_factory(context):
def inject_pxd_code_stage(module_node):
for name, (statlistnode, scope) in context.pxds.iteritems():
module_node.merge_in(statlistnode, scope)
return module_node
return inject_pxd_code_stage
def use_utility_code_definitions(scope, target, seen=None):
if seen is None:
seen = set()
for entry in scope.entries.itervalues():
if entry in seen:
continue
seen.add(entry)
if entry.used and entry.utility_code_definition:
target.use_utility_code(entry.utility_code_definition)
for required_utility in entry.utility_code_definition.requires:
target.use_utility_code(required_utility)
elif entry.as_module:
use_utility_code_definitions(entry.as_module, target, seen)
def inject_utility_code_stage_factory(context):
def inject_utility_code_stage(module_node):
use_utility_code_definitions(context.cython_scope, module_node.scope)
added = []
# Note: the list might be extended inside the loop (if some utility code
# pulls in other utility code, explicitly or implicitly)
for utilcode in module_node.scope.utility_code_list:
if utilcode in added: continue
added.append(utilcode)
if utilcode.requires:
for dep in utilcode.requires:
if not dep in added and not dep in module_node.scope.utility_code_list:
module_node.scope.utility_code_list.append(dep)
tree = utilcode.get_tree()
if tree:
module_node.merge_in(tree.body, tree.scope, merge_scope=True)
return module_node
return inject_utility_code_stage
class UseUtilityCodeDefinitions(CythonTransform):
# Temporary hack to use any utility code in nodes' "utility_code_definitions".
# This should be moved to the code generation phase of the relevant nodes once
# it is safe to generate CythonUtilityCode at code generation time.
def __call__(self, node):
self.scope = node.scope
return super(UseUtilityCodeDefinitions, self).__call__(node)
def process_entry(self, entry):
if entry:
for utility_code in (entry.utility_code, entry.utility_code_definition):
if utility_code:
self.scope.use_utility_code(utility_code)
def visit_AttributeNode(self, node):
self.process_entry(node.entry)
return node
def visit_NameNode(self, node):
self.process_entry(node.entry)
self.process_entry(node.type_entry)
return node
#
# Pipeline factories
#
def create_pipeline(context, mode, exclude_classes=()):
assert mode in ('pyx', 'py', 'pxd')
from .Visitor import PrintTree
from .ParseTreeTransforms import WithTransform, NormalizeTree, PostParse, PxdPostParse
from .ParseTreeTransforms import ForwardDeclareTypes, AnalyseDeclarationsTransform
from .ParseTreeTransforms import AnalyseExpressionsTransform, FindInvalidUseOfFusedTypes
from .ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform
from .ParseTreeTransforms import InterpretCompilerDirectives, TransformBuiltinMethods
from .ParseTreeTransforms import ExpandInplaceOperators, ParallelRangeTransform
from .ParseTreeTransforms import CalculateQualifiedNamesTransform
from .TypeInference import MarkParallelAssignments, MarkOverflowingArithmetic
from .ParseTreeTransforms import AdjustDefByDirectives, AlignFunctionDefinitions
from .ParseTreeTransforms import RemoveUnreachableCode, GilCheck
from .FlowControl import ControlFlowAnalysis
from .AnalysedTreeTransforms import AutoTestDictTransform
from .AutoDocTransforms import EmbedSignature
from .Optimize import FlattenInListTransform, SwitchTransform, IterationTransform
from .Optimize import EarlyReplaceBuiltinCalls, OptimizeBuiltinCalls
from .Optimize import InlineDefNodeCalls
from .Optimize import ConstantFolding, FinalOptimizePhase
from .Optimize import DropRefcountingTransform
from .Optimize import ConsolidateOverflowCheck
from .Buffer import IntroduceBufferAuxiliaryVars
from .ModuleNode import check_c_declarations, check_c_declarations_pxd
if mode == 'pxd':
_check_c_declarations = check_c_declarations_pxd
_specific_post_parse = PxdPostParse(context)
else:
_check_c_declarations = check_c_declarations
_specific_post_parse = None
if mode == 'py':
_align_function_definitions = AlignFunctionDefinitions(context)
else:
_align_function_definitions = None
# NOTE: This is the "common" parts of the pipeline, which is also
# code in pxd files. So it will be run multiple times in a
# compilation stage.
stages = [
NormalizeTree(context),
PostParse(context),
_specific_post_parse,
InterpretCompilerDirectives(context, context.compiler_directives),
ParallelRangeTransform(context),
AdjustDefByDirectives(context),
WithTransform(context),
MarkClosureVisitor(context),
_align_function_definitions,
RemoveUnreachableCode(context),
ConstantFolding(),
FlattenInListTransform(),
DecoratorTransform(context),
ForwardDeclareTypes(context),
AnalyseDeclarationsTransform(context),
AutoTestDictTransform(context),
EmbedSignature(context),
EarlyReplaceBuiltinCalls(context), ## Necessary?
TransformBuiltinMethods(context),
MarkParallelAssignments(context),
ControlFlowAnalysis(context),
RemoveUnreachableCode(context),
# MarkParallelAssignments(context),
MarkOverflowingArithmetic(context),
IntroduceBufferAuxiliaryVars(context),
_check_c_declarations,
InlineDefNodeCalls(context),
AnalyseExpressionsTransform(context),
FindInvalidUseOfFusedTypes(context),
ExpandInplaceOperators(context),
IterationTransform(context),
SwitchTransform(context),
OptimizeBuiltinCalls(context), ## Necessary?
CreateClosureClasses(context), ## After all lookups and type inference
CalculateQualifiedNamesTransform(context),
ConsolidateOverflowCheck(context),
DropRefcountingTransform(),
FinalOptimizePhase(context),
GilCheck(),
UseUtilityCodeDefinitions(context),
]
filtered_stages = []
for s in stages:
if s.__class__ not in exclude_classes:
filtered_stages.append(s)
return filtered_stages
def create_pyx_pipeline(context, options, result, py=False, exclude_classes=()):
if py:
mode = 'py'
else:
mode = 'pyx'
test_support = []
if options.evaluate_tree_assertions:
from ..TestUtils import TreeAssertVisitor
test_support.append(TreeAssertVisitor())
if options.gdb_debug:
from ..Debugger import DebugWriter # requires Py2.5+
from .ParseTreeTransforms import DebugTransform
context.gdb_debug_outputwriter = DebugWriter.CythonDebugWriter(
options.output_dir)
debug_transform = [DebugTransform(context, options, result)]
else:
debug_transform = []
return list(itertools.chain(
[parse_stage_factory(context)],
create_pipeline(context, mode, exclude_classes=exclude_classes),
test_support,
[inject_pxd_code_stage_factory(context),
inject_utility_code_stage_factory(context),
abort_on_errors],
debug_transform,
[generate_pyx_code_stage_factory(options, result)]))
def create_pxd_pipeline(context, scope, module_name):
from .CodeGeneration import ExtractPxdCode
# The pxd pipeline ends up with a CCodeWriter containing the
# code of the pxd, as well as a pxd scope.
return [
parse_pxd_stage_factory(context, scope, module_name)
] + create_pipeline(context, 'pxd') + [
ExtractPxdCode()
]
def create_py_pipeline(context, options, result):
return create_pyx_pipeline(context, options, result, py=True)
def create_pyx_as_pxd_pipeline(context, result):
from .ParseTreeTransforms import AlignFunctionDefinitions, \
MarkClosureVisitor, WithTransform, AnalyseDeclarationsTransform
from .Optimize import ConstantFolding, FlattenInListTransform
from .Nodes import StatListNode
pipeline = []
pyx_pipeline = create_pyx_pipeline(context, context.options, result,
exclude_classes=[
AlignFunctionDefinitions,
MarkClosureVisitor,
ConstantFolding,
FlattenInListTransform,
WithTransform
])
for stage in pyx_pipeline:
pipeline.append(stage)
if isinstance(stage, AnalyseDeclarationsTransform):
# This is the last stage we need.
break
def fake_pxd(root):
for entry in root.scope.entries.values():
if not entry.in_cinclude:
entry.defined_in_pxd = 1
if entry.name == entry.cname and entry.visibility != 'extern':
# Always mangle non-extern cimported entries.
entry.cname = entry.scope.mangle(Naming.func_prefix, entry.name)
return StatListNode(root.pos, stats=[]), root.scope
pipeline.append(fake_pxd)
return pipeline
def insert_into_pipeline(pipeline, transform, before=None, after=None):
"""
Insert a new transform into the pipeline after or before an instance of
the given class. e.g.
pipeline = insert_into_pipeline(pipeline, transform,
after=AnalyseDeclarationsTransform)
"""
assert before or after
cls = before or after
for i, t in enumerate(pipeline):
if isinstance(t, cls):
break
if after:
i += 1
return pipeline[:i] + [transform] + pipeline[i:]
#
# Running a pipeline
#
def run_pipeline(pipeline, source, printtree=True):
from .Visitor import PrintTree
error = None
data = source
try:
try:
for phase in pipeline:
if phase is not None:
if DebugFlags.debug_verbose_pipeline:
t = time()
print "Entering pipeline phase %r" % phase
if not printtree and isinstance(phase, PrintTree):
continue
data = phase(data)
if DebugFlags.debug_verbose_pipeline:
print " %.3f seconds" % (time() - t)
except CompileError, err:
# err is set
Errors.report_error(err)
error = err
except InternalError, err:
# Only raise if there was not an earlier error
if Errors.num_errors == 0:
raise
error = err
except AbortError, err:
error = err
return (error, data)
|
{
"content_hash": "6fffecb87f2f0c403457083437f6d7b8",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 97,
"avg_line_length": 38.37243401759531,
"alnum_prop": 0.6543370271303018,
"repo_name": "madjar/cython",
"id": "dd90fac2892900ac126b9debbac3a19630ebf520",
"size": "13085",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Cython/Compiler/Pipeline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2098"
},
{
"name": "C",
"bytes": "445986"
},
{
"name": "C++",
"bytes": "16585"
},
{
"name": "CSS",
"bytes": "11567"
},
{
"name": "Emacs Lisp",
"bytes": "11931"
},
{
"name": "HTML",
"bytes": "112723"
},
{
"name": "JavaScript",
"bytes": "15703"
},
{
"name": "Makefile",
"bytes": "4740"
},
{
"name": "PowerShell",
"bytes": "3243"
},
{
"name": "Python",
"bytes": "5185695"
},
{
"name": "Smalltalk",
"bytes": "618"
}
],
"symlink_target": ""
}
|
from datetime import datetime, date, timedelta
from pyxmli import (Invoice, Group, Line, Tax, Discount, Address,
DeliveryMethod, Payment, INVOICE_PAID, RATE_TYPE_FIXED,
RATE_TYPE_PERCENTAGE, DELIVERY_METHOD_EMAIL)
invoice = Invoice(name="Online shopping on Guitar Heroes Store",
description="Perfect gears for a quick jam.",
currency="USD",
status=INVOICE_PAID,
date=datetime.now(),
due_date=date.today() + timedelta(days=60),
mentions='Guitar Heros Store LLC.',
terms='All order changes or cancellations should be ' \
'reported prior to shipping and by phone on ' \
'1-555-555-5555. Email is not accepted.', #optional
domain="greendizer.com")
#Billing contact and address
invoice.buyer.identifier = '12345' # or stevie@ray-vaughan.com
invoice.buyer.name = "Stevie Ray Vaughan"
invoice.buyer.address = Address(street_address="E Riverside Dr",
city="Austin",
state='TX',
zipcode="78704",
country="US")
#Shipping recipient and address (optional)
invoice.shipping.recipient = invoice.buyer
#Groups allow you to structure your invoice and organize your items
#in categories. An invoice must at least have one Group instance.
group = Group()
invoice.groups.append(group)
#Define a Discount instance that we'll attach to one or many invoice lines
promo_code = Discount(name='Promo Code',
description="$30 discount",
rate=30,
rate_type=RATE_TYPE_FIXED)
#Define a Tax instance that we'll attach to one or many invoice lines
vat = Tax(name='VAT',
description="Sales Tax",
rate=8.25,
rate_type=RATE_TYPE_PERCENTAGE)
#Instantiate a line to describe an invoice item, and add it to a group
group.lines.append(Line(name="SRV Fender Stratocaster",
description="SRV's collaboration with Fender",
quantity=1, unit_price=2399.99))
group.lines.append(Line(name="Marshall AS100D Amplifier",
description='50 Watt + 50 Watt, 2x8" combo with ' \
'two high fidelity, polymer dome tweeters.',
quantity=1, unit_price=699.99))
group.lines.append(Line(name="Dunlop JH1B wah-wah pedal",
description='Reproduce the famous tones of Hendrix ' \
'wah solos from the late 60s.',
quantity=1, unit_price=129.99))
#Attach taxes and discounts to lines
for line in group.lines:
line.taxes.append(vat)
line.discounts.append(promo_code)
invoice.payments.append(Payment(amount=invoice.total,))
invoice.deliveries.append(DeliveryMethod(method=DELIVERY_METHOD_EMAIL))
from greendizer.clients import SellerClient
seller = SellerClient(email="jimi.hendrix@greendizer.net",
password="password").seller
sent_invoice = seller.emails['jimi.hendrix@greendizer.net'].invoices.send(invoice)
print sent_invoice.total
|
{
"content_hash": "83dd560622b5e6970ccc25ab93a13cba",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 82,
"avg_line_length": 40.675,
"alnum_prop": 0.610940381069453,
"repo_name": "mohamedattahri/Greendizer-Python-Library",
"id": "dab285b9c6376e057bc38c179405d4d94f52462d",
"size": "3278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "106196"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from pants.util.contextutil import temporary_file
def atomic_copy(src, dst):
"""Copy the file src to dst, overwriting dst atomically."""
with temporary_file(root_dir=os.path.dirname(dst)) as tmp_dst:
shutil.copyfile(src, tmp_dst.name)
os.rename(tmp_dst.name, dst)
|
{
"content_hash": "da89845ae5ad250c231e369818d1c6a5",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 93,
"avg_line_length": 32.57142857142857,
"alnum_prop": 0.706140350877193,
"repo_name": "TansyArron/pants",
"id": "b12716374ec4f04c906166f663172d4ad12034cd",
"size": "603",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "src/python/pants/util/fileutil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11139"
},
{
"name": "GAP",
"bytes": "4818"
},
{
"name": "Go",
"bytes": "1596"
},
{
"name": "HTML",
"bytes": "68162"
},
{
"name": "Java",
"bytes": "316044"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "7038"
},
{
"name": "Python",
"bytes": "3362372"
},
{
"name": "Scala",
"bytes": "77693"
},
{
"name": "Shell",
"bytes": "48118"
},
{
"name": "Thrift",
"bytes": "3485"
}
],
"symlink_target": ""
}
|
"""
~~~~~
:copyright: (c) 2015 by Halfmoon Labs, Inc.
:license: MIT, see LICENSE for more details.
"""
import json
import traceback
from jwt import DecodeError
from pybitcoin import BitcoinPublicKey
from .tokenizer import Tokenizer
from .resolver import Resolver
class AuthMessage():
tokenizer = Tokenizer()
def __init__(self):
raise NotImplementedError('')
def _payload(self):
raise NotImplementedError('')
@classmethod
def has_valid_issuer(cls):
raise NotImplementedError('')
def token(self):
return self.tokenizer.encode(self._payload(), self.signing_key)
def json(self):
return json.loads(self.decode(self.token()))
@classmethod
def decode(cls, token):
if not isinstance(token, (str, unicode)):
raise ValueError('Token must be a string')
# decode the token without any verification
return cls.tokenizer.decode(token)
@classmethod
def is_valid_jwt(cls, token):
# decode the token
try:
decoded_token = cls.decode(token)
except DecodeError:
traceback.print_exc()
return False
# extract the public key from the token
try:
payload = decoded_token['payload']
public_key_str = payload['issuer']['publicKey']
public_key = BitcoinPublicKey(str(public_key_str))
except KeyError:
traceback.print_exc()
return False
# return whether the token is verified/valid
return cls.tokenizer.verify(token, public_key.to_pem())
@classmethod
def verify(cls, token, resolver=None):
is_valid_jwt = cls.is_valid_jwt(token)
if not resolver:
return is_valid_jwt
if not isinstance(resolver, Resolver):
raise ValueError('"resolver" must be a valid Resolver object')
has_valid_issuer = cls.has_valid_issuer(token, resolver)
is_valid_auth_token = is_valid_jwt and has_valid_issuer
return is_valid_auth_token
|
{
"content_hash": "6deee98d09167158f705d220ddf50283",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 74,
"avg_line_length": 29.056338028169016,
"alnum_prop": 0.624818225884634,
"repo_name": "mine-code/blockchain-auth-python",
"id": "6a0fad55fe0958e267037951a5ce87ac0437ee3a",
"size": "2109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blockchainauth/auth_message.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25785"
}
],
"symlink_target": ""
}
|
from suds import *
from suds.client import Client
from sys import exit
from optparse import OptionParser
from aviary.util import *
# change these for other default locations and ports
wsdl = 'file:/var/lib/condor/aviary/services/job/aviary-job.wsdl'
cmds = ['holdJob', 'releaseJob', 'removeJob', 'suspendJob', 'continueJob']
parser = build_basic_parser('Control job state remotely via SOAP.','http://localhost:9090/services/job/')
parser.add_option('--cmd', action="store", choices=(cmds), dest='cmd', help=str(cmds))
parser.add_option('--cproc', action="store", dest='cproc', help="a cluster.proc id like '1.0' or '5.3'")
(opts,args) = parser.parse_args()
# the joy that is optparse...
if opts.cmd is None:
print 'One of these commands must be supplied', cmds
parser.print_help()
exit(1)
if opts.cproc is None:
print 'You must provide a cluster.proc job id'
parser.print_help()
exit(1)
client = create_suds_client(opts,wsdl,None)
opts.url += opts.cmd
client.set_options(location=opts.url)
# set up our JobID
jobId = client.factory.create('ns0:JobID')
jobId.job = opts.cproc
try:
func = getattr(client.service, opts.cmd, None)
if callable(func):
result = func(jobId,"test")
except Exception, e:
print "unable to access scheduler at: ", opts.url
print e
exit(1)
if result.code != "OK":
print result.code,":", result.text
else:
print opts.cmd, 'succeeded'
|
{
"content_hash": "642d369fdb517f074ddeeefc065878ac",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 105,
"avg_line_length": 29.319148936170212,
"alnum_prop": 0.7177068214804064,
"repo_name": "djw8605/condor",
"id": "7178d64adcb769ea825b022a40beaacb9472cdcf",
"size": "2056",
"binary": false,
"copies": "3",
"ref": "refs/heads/add-force",
"path": "src/condor_contrib/aviary/test/jobcontrol.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3213123"
},
{
"name": "C++",
"bytes": "21167042"
},
{
"name": "FORTRAN",
"bytes": "110251"
},
{
"name": "Java",
"bytes": "44326"
},
{
"name": "JavaScript",
"bytes": "2095"
},
{
"name": "Objective-C",
"bytes": "352224"
},
{
"name": "Perl",
"bytes": "2104665"
},
{
"name": "Python",
"bytes": "931001"
},
{
"name": "Ruby",
"bytes": "24647"
},
{
"name": "Shell",
"bytes": "780948"
},
{
"name": "Visual Basic",
"bytes": "4985"
}
],
"symlink_target": ""
}
|
from paddle.trainer_config_helpers import *
"""
paper: https://arxiv.org/abs/1512.03385
"""
is_test = get_config_arg("is_test", bool, False)
is_predict = get_config_arg("is_predict", bool, False)
data_provider = get_config_arg("data_provider", bool, True)
layer_num = get_config_arg("layer_num", int, 50)
if not is_predict and data_provider:
train_list = 'train.list' if not is_test else None
# mean.meta is mean file of ImageNet dataset.
# mean.meta size : 3 x 224 x 224.
# If you use three mean value, set like:
# "mean_value:103.939,116.779,123.68;"
args = {
'mean_meta': "model/mean_meta_224/mean.meta",
'image_size': 224,
'crop_size': 224,
'color': True,
'swap_channel:': [2, 1, 0]
}
define_py_data_sources2(
train_list,
'example/test.list',
module="example.image_list_provider",
obj="processData",
args=args)
batch_size = 1
learning_rate = 0.1 / batch_size
momentum = 0.9
weight_decay = 0.0001 * batch_size
default_momentum(momentum)
default_decay_rate(weight_decay)
Settings(
algorithm='sgd',
batch_size=batch_size,
learning_rate=learning_rate,
# set the appropriate parameters according your schedule
learning_method='momentum',
learning_rate_decay_a=0.5,
learning_rate_decay_b=1200000 * 10,
learning_rate_schedule="discexp", )
def conv_bn_layer(name,
input,
filter_size,
num_filters,
stride,
padding,
channels=None,
active_type=ReluActivation()):
"""
A wrapper for conv layer with batch normalization layers.
Note:
conv layer has no activation.
"""
tmp = img_conv_layer(
name=name + "_conv",
input=input,
filter_size=filter_size,
num_channels=channels,
num_filters=num_filters,
stride=stride,
padding=padding,
act=LinearActivation(),
bias_attr=False)
return batch_norm_layer(
name=name + "_bn", input=tmp, act=active_type, use_global_stats=is_test)
def bottleneck_block(name, input, num_filters1, num_filters2):
"""
A wrapper for bottlenect building block in ResNet.
Last conv_bn_layer has no activation.
Addto layer has activation of relu.
"""
last_name = conv_bn_layer(
name=name + '_branch2a',
input=input,
filter_size=1,
num_filters=num_filters1,
stride=1,
padding=0)
last_name = conv_bn_layer(
name=name + '_branch2b',
input=last_name,
filter_size=3,
num_filters=num_filters1,
stride=1,
padding=1)
last_name = conv_bn_layer(
name=name + '_branch2c',
input=last_name,
filter_size=1,
num_filters=num_filters2,
stride=1,
padding=0,
active_type=LinearActivation())
return addto_layer(
name=name + "_addto", input=[input, last_name], act=ReluActivation())
def mid_projection(name, input, num_filters1, num_filters2, stride=2):
"""
A wrapper for middile projection in ResNet.
projection shortcuts are used for increasing dimensions,
and other shortcuts are identity
branch1: projection shortcuts are used for increasing
dimensions, has no activation.
branch2x: bottleneck building block, shortcuts are identity.
"""
# stride = 2
branch1 = conv_bn_layer(
name=name + '_branch1',
input=input,
filter_size=1,
num_filters=num_filters2,
stride=stride,
padding=0,
active_type=LinearActivation())
last_name = conv_bn_layer(
name=name + '_branch2a',
input=input,
filter_size=1,
num_filters=num_filters1,
stride=stride,
padding=0)
last_name = conv_bn_layer(
name=name + '_branch2b',
input=last_name,
filter_size=3,
num_filters=num_filters1,
stride=1,
padding=1)
last_name = conv_bn_layer(
name=name + '_branch2c',
input=last_name,
filter_size=1,
num_filters=num_filters2,
stride=1,
padding=0,
active_type=LinearActivation())
return addto_layer(
name=name + "_addto", input=[branch1, last_name], act=ReluActivation())
def deep_res_net(res2_num=3, res3_num=4, res4_num=6, res5_num=3):
"""
A wrapper for 50,101,152 layers of ResNet.
res2_num: number of blocks stacked in conv2_x
res3_num: number of blocks stacked in conv3_x
res4_num: number of blocks stacked in conv4_x
res5_num: number of blocks stacked in conv5_x
"""
# For ImageNet
# conv1: 112x112
img = data_layer(name='input', size=224 * 224 * 3)
tmp = conv_bn_layer(
"conv1",
img,
filter_size=7,
channels=3,
num_filters=64,
stride=2,
padding=3)
tmp = img_pool_layer(name="pool1", input=tmp, pool_size=3, stride=2)
# conv2_x: 56x56
tmp = mid_projection(
name="res2_1", input=tmp, num_filters1=64, num_filters2=256, stride=1)
for i in xrange(2, res2_num + 1, 1):
tmp = bottleneck_block(
name="res2_" + str(i), input=tmp, num_filters1=64, num_filters2=256)
# conv3_x: 28x28
tmp = mid_projection(
name="res3_1", input=tmp, num_filters1=128, num_filters2=512)
for i in xrange(2, res3_num + 1, 1):
tmp = bottleneck_block(
name="res3_" + str(i),
input=tmp,
num_filters1=128,
num_filters2=512)
# conv4_x: 14x14
tmp = mid_projection(
name="res4_1", input=tmp, num_filters1=256, num_filters2=1024)
for i in xrange(2, res4_num + 1, 1):
tmp = bottleneck_block(
name="res4_" + str(i),
input=tmp,
num_filters1=256,
num_filters2=1024)
# conv5_x: 7x7
tmp = mid_projection(
name="res5_1", input=tmp, num_filters1=512, num_filters2=2048)
for i in xrange(2, res5_num + 1, 1):
tmp = bottleneck_block(
name="res5_" + str(i),
input=tmp,
num_filters1=512,
num_filters2=2048)
tmp = img_pool_layer(
name='avgpool',
input=tmp,
pool_size=7,
stride=1,
pool_type=AvgPooling())
output = fc_layer(
name='output', input=tmp, size=1000, act=SoftmaxActivation())
if not is_predict:
classification_cost(
input=output, label=data_layer(
name='label', size=1))
def res_net_50():
deep_res_net(3, 4, 6, 3)
def res_net_101():
deep_res_net(3, 4, 23, 3)
def res_net_152():
deep_res_net(3, 8, 36, 3)
if not is_predict:
Inputs("input", "label")
else:
Inputs("input")
# Outputs("cost-softmax" if not is_predict else "output")
Outputs("res5_3_branch2c_conv", "res5_3_branch2c_bn")
if layer_num == 50:
res_net_50()
elif layer_num == 101:
res_net_101()
elif layer_num == 152:
res_net_152()
else:
print("Wrong layer number.")
|
{
"content_hash": "24f275c5c041681ab919640f942a91c0",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 80,
"avg_line_length": 27.68093385214008,
"alnum_prop": 0.5822322181613719,
"repo_name": "qingqing01/Paddle",
"id": "015b74cd484596039b9fcf010576ca340d044db7",
"size": "7714",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "demo/model_zoo/resnet/resnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "217842"
},
{
"name": "C++",
"bytes": "2768145"
},
{
"name": "CMake",
"bytes": "113668"
},
{
"name": "Cuda",
"bytes": "424141"
},
{
"name": "M4",
"bytes": "40911"
},
{
"name": "Perl",
"bytes": "11412"
},
{
"name": "Python",
"bytes": "889886"
},
{
"name": "Shell",
"bytes": "63769"
}
],
"symlink_target": ""
}
|
import os
import sys
from setuptools import setup
INSTALL_REQUIRES = ['requests >=1.0.3', 'boto >=2.1.1', 'six >=1.2.0', 'urllib3 >= 1.0.2', 'inflection >= 0.3.1']
if sys.version_info < (2, 7, 0):
INSTALL_REQUIRES.append('argparse>=1.1')
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="qds_sdk",
version="1.9.2",
author="Qubole",
author_email="dev@qubole.com",
description=("Python SDK for coding to the Qubole Data Service API"),
keywords="qubole sdk api",
url="https://github.com/qubole/qds-sdk-py",
packages=['qds_sdk'],
scripts=['bin/qds.py'],
install_requires=INSTALL_REQUIRES,
long_description=read('README.rst'),
classifiers=[
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4"
]
)
|
{
"content_hash": "32566604edb6dcc280614048ed70963c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 113,
"avg_line_length": 33.108108108108105,
"alnum_prop": 0.6065306122448979,
"repo_name": "msumit/qds-sdk-py",
"id": "c4a29efeb73e3d7eda9bdd222dff20bff4f30d8d",
"size": "1225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "451888"
}
],
"symlink_target": ""
}
|
"""
Simple script to generate stub files for the clingo python module.
"""
import re
import types
import inspect
import os.path
from textwrap import indent
from mako.template import Template
import clingo
TYPING_TYPES = [
'AbstractSet', 'Any', 'AnyStr', 'AsyncContextManager', 'AsyncGenerator', 'AsyncIterable', 'AsyncIterator',
'Awaitable', 'AwaitableGenerator', 'BinaryIO', 'ByteString', 'Callable', 'ChainMap', 'Collection',
'Container', 'ContextManager', 'Coroutine', 'Counter', 'DefaultDict', 'Deque', 'Dict',
'FrozenSet', 'Generator', 'Generic', 'GenericMeta', 'Hashable', 'IO', 'ItemsView',
'Iterable', 'Iterator', 'KeysView', 'List', 'Mapping', 'MappingView', 'Match',
'MutableMapping', 'MutableSequence', 'MutableSet', 'NamedTuple', 'NoReturn', 'Optional', 'Pattern',
'Protocol', 'Reversible', 'Sequence', 'Set', 'Sized', 'SupportsAbs', 'SupportsBytes',
'SupportsComplex', 'SupportsFloat', 'SupportsIndex', 'SupportsInt', 'SupportsRound', 'Text', 'TextIO',
'Tuple', 'TypeAlias', 'TypeVar', 'Union', 'ValuesView']
class TypeTemplate(Template):
'''
Patched template class to only add used types from typing module.
'''
def render(self, *args, **kwargs):
'''
This heuristicly adds types from a curated list used in the template to
the import list.
'''
s = Template.render(self, *args, **kwargs)
type_list = []
for x in TYPING_TYPES:
if re.search(r'\b{}\b'.format(x), s):
type_list.append(x)
return s.replace("{TYPES}", ", ".join(type_list))
BASE = os.path.dirname(__file__)
TYPES_TEMPLATE = TypeTemplate("""\
# DO NOT EDIT THIS FILE! It was generated by scratch/mypy.py.
from typing import {TYPES}
C = TypeVar("C", bound="Comparable")
class Comparable(Protocol):
def __lt__(self, other: C) -> bool: ...
def __gt__(self, other: C) -> bool: ...
def __le__(self, other: C) -> bool: ...
def __ge__(self, other: C) -> bool: ...
Key = TypeVar('Key')
Value = TypeVar('Value')
class Lookup(Generic[Key], Collection[Value]):
def __getitem__(self, key: Key) -> Optional[Value]: ...
""")
CLASS_TEMPLATE = Template("""\
class ${sig}:
% for x in functions:
${indent(x.stub(), " ")}
% endfor
% for x in variables:
${x.stub()}
% endfor
% if not functions and not variables:
pass
% endif
""")
MODULE_TEMPLATE = TypeTemplate("""\
# DO NOT EDIT THIS FILE! It was generated by scratch/mypy.py.
from typing import {TYPES}
from abc import ABCMeta
from .types import Comparable, Lookup
from . import ast
% for x in variables:
${x.stub()}
% endfor
% for x in functions:
${x.stub()}
% endfor
% for x in classes:
${x.stub()}
% endfor
""")
def get_sig(value):
"""
Extract the signature of a docstring.
"""
return value.__doc__.strip().splitlines()[0]
class Function:
"""
Something that resembles a function.
"""
def __init__(self, name, value):
self.name = name
self.value = value
def stub(self):
"""
Generate stub for function.
"""
if self.value.__doc__ is None:
return "def {}(*args: Any, **kwargs: Any) -> Any: ...".format(self.name)
return "def {}: ...".format(get_sig(self.value))
class Other:
"""
Something arbitrary.
"""
def __init__(self, name, value):
self.name = name
self.value = value
def stub(self):
"""
Generate stub.
"""
return self.value
class Property:
"""
Something that resembles a property (with a docstring).
"""
def __init__(self, name, value):
self.name = name
self.value = value
def stub(self):
"""
Generate stub for property.
"""
return "{}".format(get_sig(self.value))
class Variable:
"""
Something that resembles a member variable.
"""
def __init__(self, name, value):
self.name = name
self.value = value
def stub(self):
"""
Generate stub for member variable.
"""
return "{}: {}".format(self.name, self.value.__class__.__name__)
class Class:
"""
Something that resembles a class.
"""
def __init__(self, name, value):
self.name = name
self.value = value
def declare(self):
"""
Generate a forward declaration for the class.
"""
return '{} = ForwardRef("{}")'.format(self.name, self.name)
def stub(self):
"""
Generate stub for class.
"""
doc = self.value.__doc__.strip()
abstract = not doc.startswith("{}(".format(self.name))
anc = []
match = re.search(r"Implements: `[^`]*`(, `[^`]*`)*", doc)
if match is not None:
for match in re.finditer(r"`([^`]*)`", match.group(0)):
anc.append(match.group(1))
if abstract:
anc.append("metaclass=ABCMeta")
sig = "{}({})".format(self.name, ", ".join(anc)) if anc else self.name
functions = []
variables = []
if not abstract:
init = doc.splitlines()[0].strip()
init = init.replace(self.name + "(", "__init__(self, ", 1)
init = init.replace(" -> {}".format(self.name), "")
functions.append(Other("__init__", "def {}: ...".format(init)))
for name, value in sorted(self.value.__dict__.items(), key=lambda x: x[0]):
if name.startswith("_"):
continue
if inspect.ismethoddescriptor(value):
functions.append(Function(name, value))
elif inspect.isgetsetdescriptor(value):
variables.append(Property(name, value))
else:
variables.append(Variable(name, value))
return CLASS_TEMPLATE.render(
indent=indent,
sig=sig,
variables=variables,
functions=functions)
class Module:
"""
Something that resembles a module.
"""
def __init__(self, name, value, classes=()):
self.name = name
self.value = value
self.classes = classes
def stub(self):
"""
Print stub for module.
"""
functions = []
classes = list(self.classes)
modules = []
variables = []
for name, value in sorted(self.value.__dict__.items(), key=lambda x: x[0]):
if name.startswith("_") and name != "__version__":
continue
if isinstance(value, type):
if value.__doc__ is None:
continue
classes.append(Class(name, value))
elif isinstance(value, types.BuiltinFunctionType):
functions.append(Function(name, value))
elif isinstance(value, types.ModuleType):
modules.append((name, value))
else:
variables.append(Variable(name, value))
return MODULE_TEMPLATE.render(
classes=classes,
functions=functions,
variables=variables).rstrip() + "\n"
def parse_class(name, doc):
"""
Extract a class declaration from a docstring.
"""
match = re.search(r"class ({}(\([^)]*\))?):".format(name), doc)
csig = match.group(1)
start = match.start()
end = doc.find("```", start)
doc = doc[start:end]
variables = []
start = doc.find("Attributes")
end = doc.find('"""', start)
attributes = doc[start:end]
for match in re.finditer(r"^ ([^ :]*): (.*)$", attributes, flags=re.MULTILINE):
variables.append(Other(match.group(1), "{}: {}".format(match.group(1), match.group(2))))
functions = []
for match in re.finditer(r"(@abstractmethod.*?)?(def .*? -> .*?:)", doc, flags=re.MULTILINE | re.DOTALL):
fsig = match.group(2)
fun = re.match(r"def ([^(]*)", fsig).group(1)
fsig = re.sub("\n *", " ", fsig, flags=re.MULTILINE)
if match.group(1) is not None:
fsig = "@abstractmethod\n{}".format(fsig)
functions.append(Other(fun, "{} ...".format(fsig)))
return CLASS_TEMPLATE.render(
indent=indent,
sig=csig,
functions=functions,
variables=variables)
def main():
"""
Write the types.pyi, __init__.pyi, ast.pyi files for the clingo module.
The files are completely generated from the docstrings.
"""
classes = [
Other("Application", parse_class("Application", clingo.clingo_main.__doc__)),
Other("Propagator", parse_class("Propagator", clingo.Control.register_propagator.__doc__)),
Other("Observer", parse_class("Observer", clingo.Control.register_observer.__doc__))]
with open(os.path.join(BASE, "..", "libpyclingo", "clingo", "types.pyi"), "w") as handle:
handle.write(TYPES_TEMPLATE.render())
with open(os.path.join(BASE, "..", "libpyclingo", "clingo", "__init__.pyi"), "w") as handle:
handle.write(Module("clingo", clingo, classes).stub())
with open(os.path.join(BASE, "..", "libpyclingo", "clingo", "ast.pyi"), "w") as handle:
handle.write(Module("clingo.ast", clingo.ast).stub())
if __name__ == "__main__":
main()
|
{
"content_hash": "12e6efb54c280d43b5f30fbf0a740c50",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 110,
"avg_line_length": 28.11890243902439,
"alnum_prop": 0.562181502764827,
"repo_name": "potassco/clingo",
"id": "63cdd48211e8aec3ba5ec8aa58e451c18a1453a7",
"size": "9245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scratch/mypy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "145"
},
{
"name": "C",
"bytes": "1632693"
},
{
"name": "C++",
"bytes": "2932571"
},
{
"name": "CMake",
"bytes": "47271"
},
{
"name": "Haskell",
"bytes": "1685"
},
{
"name": "Makefile",
"bytes": "4461"
},
{
"name": "Python",
"bytes": "418485"
},
{
"name": "SWIG",
"bytes": "821"
},
{
"name": "Shell",
"bytes": "21236"
},
{
"name": "Yacc",
"bytes": "48232"
}
],
"symlink_target": ""
}
|
import pytest
from rlp.sedes import big_endian_int
from rlp.sedes.lists import CountableList
from rlp import SerializationError
def test_countable_list():
l1 = CountableList(big_endian_int)
serializable = ([], [1, 2], list(range(500)))
for s in serializable:
r = l1.serialize(s)
assert l1.deserialize(r) == s
not_serializable = ([1, 'asdf'], ['asdf'], [1, [2]], [[]])
for n in not_serializable:
with pytest.raises(SerializationError):
l1.serialize(n)
l2 = CountableList(CountableList(big_endian_int))
serializable = ([], [[]], [[1, 2, 3], [4]], [[5], [6, 7, 8]], [[], [],
[9, 0]])
for s in serializable:
r = l2.serialize(s)
assert l2.deserialize(r) == s
not_serializable = ([[[]]], [1, 2], [1, ['asdf'], ['fdsa']])
for n in not_serializable:
with pytest.raises(SerializationError):
l2.serialize(n)
|
{
"content_hash": "000dcb6b8274de5c80e2ea2095df39da",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 74,
"avg_line_length": 34.55555555555556,
"alnum_prop": 0.572347266881029,
"repo_name": "holiman/pyrlp",
"id": "f4f00fd578205759f8adb7b28eb8ef985c1e90aa",
"size": "933",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "tests/test_countablelist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1680"
},
{
"name": "Python",
"bytes": "48999"
}
],
"symlink_target": ""
}
|
import requests
import json
import redis
import random
# max user numbers for sampling
MAX_SYSTEM_USERS = 4000000
MAX_USER=50000
# coefficient for all clean users,not in loan collection system
ALL_CLEAN_USER_COE= 0.315222
ALL_CLEAN_USER_SAMPLE_METRIC = int(MAX_USER*ALL_CLEAN_USER_COE)
# coefficient for clean user, without loan postpone, both in and not in loan collection system
CLEAN_USER_COE= 0.77922
CLEAN_USER_SAMPLE_METRIC = int(MAX_USER*CLEAN_USER_COE)
# coefficient for clean users, in loan collection system
COLLECTION_CLEAN_COE= 0.463997
COLLECTION_CLEAN_USER_SAMPLE_METRIC = int(MAX_USER*COLLECTION_CLEAN_COE)
# coefficient for loan postpone user in loan collection system
COLLECTION_USER_COE= 0.22078
COLLECTION_USER_SAMPLE_METRIC = int(MAX_USER*COLLECTION_USER_COE)
#global redis client initializatoin
g_rediscli = redis.Redis("119.39.46.78",6379)
# redis set name for clean user
REDIS_SET_NAME_CLEAN_USER = "clean_user_set"
# redis loan collection user set
REDIS_SET_NAME_COLLECTION_USER = "collection_user_set"
def httpRequest4Id(id):
headers0 = {
"Content-type": "application/json"
}
data0 = {
"user_id": [id]
}
resp = requests.post("http://10.10.3.42:17015/risk/getUserPostLoanVo", data0, data0, headers=headers0)
return resp.content
def SamplingUsers(max_user, wanted_users, redis_set_name, checkfield):
'''
max_user the system user capacity
wanted_users the sampling user number for some type of users
redis_set_name the set name to hold user_id in redis eg: REDIS_SET_NAME_CLEAN_USER REDIS_SET_NAME_COLLECTION_USER
checkfield: field in json object return to check, fieldname is isAllClean, isDebt
'''
range_num = max_user
remaining = range_num
origin_wanted = wanted_users
#range_num = 400000
#remaining = range_num
#origin_wanted = 100
setlength = 0
try:
while True:
for round in xrange(0,range_num):
id = random.randrange(0,range_num) % remaining
setlength = g_rediscli.scard(redis_set_name)
if setlength < origin_wanted:
if not g_rediscli.sismember(redis_set_name,id):
resp = httpRequest4Id(id)
jsonobj = json.loads(resp)
if jsonobj[checkfield]:
g_rediscli.sadd(redis_set_name,id)
print "user id added for" + checkfield
print resp
else:
continue
else:
continue
setlength = g_rediscli.scard(redis_set_name)
if setlength >=origin_wanted:
break
remaining = remaining - 1
if not remaining:
remaining = range_num
setlength = g_rediscli.scard(redis_set_name)
if setlength >= origin_wanted:
break
else:
remaining = range_num
except Exception as e:
print("error encountered when sampling users!")
print(e)
finally:
pass
return setlength
def getknuthRandom(range_num, wanted):
id_set = set()
remaining = range_num
origin_wanted = wanted
while True:
for i in range(0, range_num):
id = random.randrange(0, range_num) % remaining
if len(id_set) < origin_wanted:
if id not in id_set:
id_set.add(id)
else:
continue
# job done!
if len(id_set) >= origin_wanted:
break
remaining = remaining - 1
# if remaining is zero,restore it
if not remaining:
remaining = range_num
#job done
if len(id_set) >= origin_wanted:
break
else:
remaining = range_num
return id_set
httpRequest4Id(1080)
SamplingUsers(400000,1000,REDIS_SET_NAME_CLEAN_USER,"isAllClean")
|
{
"content_hash": "691ad80d925b87d8cb3ef813158e66cc",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 118,
"avg_line_length": 29.657342657342657,
"alnum_prop": 0.5703843433152558,
"repo_name": "snakedragon/scrapy-hive",
"id": "decc7b036f4d8cbfb2b003a4d3f2d7cfba506b88",
"size": "4267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "starlord/test/http_channel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "109420"
}
],
"symlink_target": ""
}
|
import sqlite3
con = sqlite3.connect('todo.db') # Warning: This file is created in the current directory
con.execute("CREATE TABLE todo (id INTEGER PRIMARY KEY, task char(100) NOT NULL, status bool NOT NULL)")
con.execute("INSERT INTO todo (task,status) VALUES ('test',0)")
con.execute("INSERT INTO todo (task,status) VALUES ('Read A-byte-of-python to get a good introduction into Python',0)")
con.execute("INSERT INTO todo (task,status) VALUES ('Visit the Python website',1)")
con.execute("INSERT INTO todo (task,status) VALUES ('Test various editors for and check the syntax highlighting',1)")
con.execute("INSERT INTO todo (task,status) VALUES ('Choose your favorite WSGI-Framework',0)")
con.commit()
|
{
"content_hash": "94dbcd32a4f664e38a49bb511b1d7925",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 119,
"avg_line_length": 64.18181818181819,
"alnum_prop": 0.7492917847025495,
"repo_name": "rarewin/my-studies",
"id": "000619768e9bae332c4fe3779362f3185b3e52ae",
"size": "754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/bottle-test/tutorial_todo-list/create_table.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Go",
"bytes": "7047"
},
{
"name": "HTML",
"bytes": "4558"
},
{
"name": "JavaScript",
"bytes": "61041"
},
{
"name": "Python",
"bytes": "18048"
},
{
"name": "Ruby",
"bytes": "182"
},
{
"name": "Rust",
"bytes": "24815"
},
{
"name": "Shell",
"bytes": "762"
},
{
"name": "Vue",
"bytes": "4565"
}
],
"symlink_target": ""
}
|
"""
test_formatting
~~~~~~~~~~~~~~~
Tests for the :mod:`~ydf.formatting` module.
"""
import pytest
from ydf import formatting
@pytest.fixture(scope='module', params=[
['foo'],
['bar'],
['baz']
])
def one_item_list(request):
"""
Fixture that yields one item lists.
"""
return request.param
@pytest.fixture(scope='module', params=[
['foo', 'bar'],
['foo', 'bar', 'baz'],
['foo', 'bar', 'baz', 'jaz'],
['foo', 'bar', 'baz', 'jaz', 'faz'],
['foo', 'bar', 'baz', 'jaz', 'faz', 'zaz']
])
def multi_item_list(request):
"""
Fixture that yields multi item lists.
"""
return request.param
@pytest.fixture(scope='module', params=[
dict(name='foo'),
dict(foo='bar'),
dict(bar='baz')
])
def one_item_dict(request):
"""
Fixture that yields one item dicts.
"""
return request.param
@pytest.fixture(scope='module', params=[
dict(foo='foo'),
dict(foo='foo', bar='bar'),
dict(foo='foo', bar='bar', baz='baz'),
dict(foo='foo', bar='bar', baz='baz', jaz='jaz'),
dict(foo='foo', bar='bar', baz='baz', jaz='jaz', faz='faz'),
dict(foo='foo', bar='bar', baz='baz', jaz='jaz', faz='faz', zaz='zaz')
])
def multi_item_dict(request):
"""
Fixture that yields multi item dicts.
"""
return request.param
def test_list_doesnt_line_break_on_single_item(one_item_list):
"""
Assert that :func:`~ydf.formatting.list_with_conditional_line_breaks` doesn't insert a line break
for lists with only one item.
"""
assert formatting.list_with_conditional_line_breaks(one_item_list) == one_item_list[0]
def test_list_uses_given_line_break(multi_item_list):
"""
Assert that :func:`~ydf.formatting.list_with_conditional_line_breaks` uses the given line break
string as expected.
"""
line_break = '$'
expected = '{}{}'.format(line_break, ' ' * formatting.DEFAULT_INDENT).join(multi_item_list)
assert formatting.list_with_conditional_line_breaks(multi_item_list, line_break) == expected
def test_list_uses_given_indentation(multi_item_list):
"""
Assert that :func:`~ydf.formatting.list_with_conditional_line_breaks` uses the given indentation size
as expected.
"""
indent = 2
expected = '{}{}'.format(formatting.DEFAULT_LINE_BREAK, ' ' * indent).join(multi_item_list)
assert formatting.list_with_conditional_line_breaks(multi_item_list, indent=indent) == expected
def test_dict_doesnt_line_break_on_single_item(one_item_dict):
"""
Assert that :func:`~ydf.formatting.dict_with_conditional_line_breaks` doesn't insert a line break
for dicts with only one item.
"""
kvp = list(one_item_dict.items())[0]
assert formatting.dict_with_conditional_line_breaks(one_item_dict) == '{}={}'.format(*kvp)
|
{
"content_hash": "c497d019bf0bcebfd2afb56eab07ac45",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 105,
"avg_line_length": 28.07,
"alnum_prop": 0.6255789098681867,
"repo_name": "ahawker/ydf",
"id": "35288a1f264f062e327173d950189053ebafa344",
"size": "2807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_formatting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2912"
},
{
"name": "Python",
"bytes": "45037"
},
{
"name": "Smarty",
"bytes": "475"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from proj.celery import celery
@celery.task
def add(x, y):
return x + y
@celery.task
def mul(x, y):
return x * y
@celery.task
def xsum(numbers):
return sum(numbers)
|
{
"content_hash": "50993da7e81c65a6baf2ca1fc7f996d9",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 38,
"avg_line_length": 11.789473684210526,
"alnum_prop": 0.6607142857142857,
"repo_name": "telamonian/saga-python",
"id": "129db73b1429c2d457b7f82a92267c960cfd3ce2",
"size": "224",
"binary": false,
"copies": "5",
"ref": "refs/heads/devel",
"path": "tests/issues/issue_211/proj/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gnuplot",
"bytes": "2790"
},
{
"name": "Makefile",
"bytes": "142"
},
{
"name": "Python",
"bytes": "1551101"
},
{
"name": "Shell",
"bytes": "55277"
}
],
"symlink_target": ""
}
|
'''
$Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $
'''
from datetime import datetime
from struct import unpack, calcsize
from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo
from pytz.tzinfo import memorized_datetime, memorized_timedelta
def _byte_string(s):
"""Cast a string or byte string to an ASCII byte string."""
return s.encode('ASCII')
_NULL = _byte_string('\0')
def _std_string(s):
"""Cast a string or byte string to an ASCII string."""
return str(s.decode('ASCII'))
def build_tzinfo(zone, fp):
head_fmt = '>4s c 15x 6l'
head_size = calcsize(head_fmt)
(magic, format, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt,
typecnt, charcnt) = unpack(head_fmt, fp.read(head_size))
# Make sure it is a tzfile(5) file
assert magic == _byte_string('TZif'), 'Got magic %s' % repr(magic)
# Read out the transition times, localtime indices and ttinfo structures.
data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
timecnt=timecnt, ttinfo='lBB' * typecnt, charcnt=charcnt)
data_size = calcsize(data_fmt)
data = unpack(data_fmt, fp.read(data_size))
# make sure we unpacked the right number of values
assert len(data) == 2 * timecnt + 3 * typecnt + 1
transitions = [memorized_datetime(trans)
for trans in data[:timecnt]]
lindexes = list(data[timecnt:2 * timecnt])
ttinfo_raw = data[2 * timecnt:-1]
tznames_raw = data[-1]
del data
# Process ttinfo into separate structs
ttinfo = []
tznames = {}
i = 0
while i < len(ttinfo_raw):
# have we looked up this timezone name yet?
tzname_offset = ttinfo_raw[i + 2]
if tzname_offset not in tznames:
nul = tznames_raw.find(_NULL, tzname_offset)
if nul < 0:
nul = len(tznames_raw)
tznames[tzname_offset] = _std_string(
tznames_raw[tzname_offset:nul])
ttinfo.append((ttinfo_raw[i],
bool(ttinfo_raw[i + 1]),
tznames[tzname_offset]))
i += 3
# Now build the timezone object
if len(ttinfo) == 1 or len(transitions) == 0:
ttinfo[0][0], ttinfo[0][2]
cls = type(zone, (StaticTzInfo,), dict(
zone=zone,
_utcoffset=memorized_timedelta(ttinfo[0][0]),
_tzname=ttinfo[0][2]))
else:
# Early dates use the first standard time ttinfo
i = 0
while ttinfo[i][1]:
i += 1
if ttinfo[i] == ttinfo[lindexes[0]]:
transitions[0] = datetime.min
else:
transitions.insert(0, datetime.min)
lindexes.insert(0, i)
# calculate transition info
transition_info = []
for i in range(len(transitions)):
inf = ttinfo[lindexes[i]]
utcoffset = inf[0]
if not inf[1]:
dst = 0
else:
for j in range(i - 1, -1, -1):
prev_inf = ttinfo[lindexes[j]]
if not prev_inf[1]:
break
dst = inf[0] - prev_inf[0] # dst offset
# Bad dst? Look further. DST > 24 hours happens when
# a timzone has moved across the international dateline.
if dst <= 0 or dst > 3600 * 3:
for j in range(i + 1, len(transitions)):
stdinf = ttinfo[lindexes[j]]
if not stdinf[1]:
dst = inf[0] - stdinf[0]
if dst > 0:
break # Found a useful std time.
tzname = inf[2]
# Round utcoffset and dst to the nearest minute or the
# datetime library will complain. Conversions to these timezones
# might be up to plus or minus 30 seconds out, but it is
# the best we can do.
utcoffset = int((utcoffset + 30) // 60) * 60
dst = int((dst + 30) // 60) * 60
transition_info.append(memorized_ttinfo(utcoffset, dst, tzname))
cls = type(zone, (DstTzInfo,), dict(
zone=zone,
_utc_transition_times=transitions,
_transition_info=transition_info))
return cls()
if __name__ == '__main__':
import os.path
from pprint import pprint
base = os.path.join(os.path.dirname(__file__), 'zoneinfo')
tz = build_tzinfo('Australia/Melbourne',
open(os.path.join(base, 'Australia', 'Melbourne'), 'rb'))
tz = build_tzinfo('US/Eastern',
open(os.path.join(base, 'US', 'Eastern'), 'rb'))
pprint(tz._utc_transition_times)
|
{
"content_hash": "49756e22d3fa2cbc72b8bccc30d3940f",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 79,
"avg_line_length": 35.51127819548872,
"alnum_prop": 0.5456277789540547,
"repo_name": "kimjinyong/i2nsf-framework",
"id": "99e74489b859e21fcaa68e93089035c3d81a73c8",
"size": "4723",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "Hackathon-112/analyzer/.local/lib/python3.5/site-packages/pytz/tzfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4396520"
},
{
"name": "C++",
"bytes": "9389"
},
{
"name": "CSS",
"bytes": "51736"
},
{
"name": "Dockerfile",
"bytes": "3839"
},
{
"name": "Emacs Lisp",
"bytes": "24812"
},
{
"name": "Erlang",
"bytes": "1364078"
},
{
"name": "HTML",
"bytes": "42486541"
},
{
"name": "Hack",
"bytes": "6349"
},
{
"name": "Java",
"bytes": "7976"
},
{
"name": "JavaScript",
"bytes": "533000"
},
{
"name": "Makefile",
"bytes": "401170"
},
{
"name": "PHP",
"bytes": "164007"
},
{
"name": "Perl",
"bytes": "2188"
},
{
"name": "Python",
"bytes": "3004949"
},
{
"name": "QMake",
"bytes": "360"
},
{
"name": "Roff",
"bytes": "3906372"
},
{
"name": "Shell",
"bytes": "83872"
},
{
"name": "XSLT",
"bytes": "167018"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('customer_subsystem', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='reservation',
old_name='checkout_tiem',
new_name='checkout_time',
),
migrations.AddField(
model_name='images',
name='image',
field=models.ImageField(null=True, upload_to=b''),
preserve_default=True,
),
]
|
{
"content_hash": "7c6d529c70320207879b4c84bd8d01c8",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 62,
"avg_line_length": 24.125,
"alnum_prop": 0.5630397236614854,
"repo_name": "erfannoury/Simorgh",
"id": "b102d622812a3e1dc9e29de25654fd35aa26e06f",
"size": "603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SimorghDjango/customer_subsystem/migrations/0002_auto_20150201_1042.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "98"
},
{
"name": "C#",
"bytes": "143823"
},
{
"name": "CSS",
"bytes": "17064"
},
{
"name": "HTML",
"bytes": "171172"
},
{
"name": "JavaScript",
"bytes": "15310"
},
{
"name": "Python",
"bytes": "23140"
}
],
"symlink_target": ""
}
|
from graphics import *
SCALE = 70.0
MATERIALS_COLOR = {"stone": "darkgrey",
"sheep": color_rgb(149,200,89),
"brick": color_rgb(183,117,73),
"wood": "darkgreen",
"wheat": "gold",
"desert": "beige"}
class Display:
def __init__(self, title = "Catan", size = (800, 800)):
# Window Height / Width
self.width = size[0]
self.height = size[1]
self.w_offset = size[0]/2
self.h_offset = size[1]/2
# Create the Window
self.window = GraphWin(title, size[0], size[1], autoflush = False)
self.window.setBackground(color_rgb(96,172,226))
"""
Drawing Functions
"""
def drawBoard(self, board):
for hexagon in board.hexagons.values():
self.drawHexagon(hexagon)
for vertex in board.vertices.values():
self.drawWeight(vertex)
def drawHexagon(self, hexagon):
self._drawPolygon(MATERIALS_COLOR[hexagon.resource], *[v.pos for v in hexagon.vertices])
self.drawResource(hexagon)
def drawResource(self, hexagon):
self._drawCircle(hexagon.pos, 20, "white")
self._drawText(hexagon.pos, str(hexagon.value))
def drawWeight(self, vertex):
self._drawRectangle((vertex.pos[0] - .2, vertex.pos[1] - .2), (vertex.pos[0] + .2, vertex.pos[1] + .2), color = "white")
self._drawText(vertex.pos, str(vertex.weight))
def drawDistance(self, vertex):
self._drawText(vertex.pos, str(vertex.distance))
def drawButton(self, text, pos1, pos2, color = "white", scale = False):
if scale:
pos1 = self._convertToNormal(pos1)
pos2 = self._convertToNormal(pos2)
return \
self._drawRectangle(pos1, pos2, color), \
self._drawText(((pos1[0] + pos2[0])/2.0, (pos1[1] + pos2[1])/2.0), text)
def drawPath(self, points, color = "black"):
return [self._drawLine(p, color = color) for p in zip(points, points[1:])]
"""
Lifecycle Functions
"""
def update(self):
self.window.update()
def input(self):
return self.window.checkMouse()
def wait(self):
return self.window.getMouse()
def close(self):
self.window.close()
"""
Helper Functions
"""
def _drawPolygon(self, color, *points):
_shape = Polygon(map(self._makePoint, points))
_shape.setFill(color)
_shape.draw(self.window)
return _shape
def _drawRectangle(self, pos1, pos2, color):
_rect = Rectangle(self._makePoint(pos1), self._makePoint(pos2))
_rect.setFill(color)
_rect.draw(self.window)
return _rect
def _drawCircle(self, pos, rad, color):
_circle = Circle(self._makePoint(pos), rad)
_circle.setFill(color)
_circle.draw(self.window)
return _circle
def _drawLine(self, A, size = 5, color = "black"):
_line = Line(self._makePoint(A[0]), self._makePoint(A[1]))
_line.setArrow("last")
_line.setFill(color)
_line.setWidth(size)
_line.draw(self.window)
return _line
def _drawText(self, pos, value, size = 14):
_text = Text(self._makePoint(pos),value)
_text.setSize(size)
_text.draw(self.window)
return _text
def _makePoint(self, pos):
return Point(self.w_offset + SCALE * pos[0], self.h_offset - SCALE * pos[1])
def _convertToNormal(self, pos):
return (pos[0] - self.w_offset) / SCALE, (- pos[1] + self.h_offset) / SCALE
if __name__ == "__main__":
display = Display()
display._drawPolygon("grey", (0,0), (0,100), (100,100), (100,0))
display.wait()
display.close()
|
{
"content_hash": "d545946f118b9c0a08a13f63de43f43b",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 128,
"avg_line_length": 30.479674796747968,
"alnum_prop": 0.5710856228327554,
"repo_name": "alexadkins/Catan-Pathfinder",
"id": "172c9006fd63873a086bb39c7618d69248e5c7d8",
"size": "3749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/views/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70096"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
setup(
name="nagios-mesos",
description="A selection of Nagios plugins to monitor Apache Mesos.",
long_description=open('README.rst').read(),
version="0.2.3",
packages=find_packages(),
author='Steven Schlansker',
author_email='sschlansker@opentable.com',
url="https://github.com/opentable/nagios-mesos",
scripts=["check_mesos.py"],
license="Apache 2",
install_requires=parse_requirements("requirements.txt"),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking :: Monitoring'
]
)
|
{
"content_hash": "cf34ecfd4206e710a6031774dfa56515",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 75,
"avg_line_length": 36.44827586206897,
"alnum_prop": 0.6688741721854304,
"repo_name": "kamaradclimber/nagios-mesos",
"id": "ce9e3ec3543e615037b0cd419ff28a948dc67811",
"size": "1057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5016"
}
],
"symlink_target": ""
}
|
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnvserver_vpnsessionpolicy_binding(base_resource) :
""" Binding class showing the vpnsessionpolicy that can be bound to vpnvserver.
"""
def __init__(self) :
self._policy = ""
self._priority = 0
self._acttype = 0
self._name = ""
self._secondary = False
self._groupextraction = False
self._gotopriorityexpression = ""
self._bindpoint = ""
self.___count = 0
@property
def priority(self) :
"""The priority, if any, of the VPN virtual server policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""The priority, if any, of the VPN virtual server policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression or other value specifying the next policy to evaluate if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax or classic expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a number that is larger than the largest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression or other value specifying the next policy to evaluate if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax or classic expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a number that is larger than the largest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policy(self) :
"""The name of the policy, if any, bound to the VPN virtual server.
"""
try :
return self._policy
except Exception as e:
raise e
@policy.setter
def policy(self, policy) :
"""The name of the policy, if any, bound to the VPN virtual server.
"""
try :
self._policy = policy
except Exception as e:
raise e
@property
def groupextraction(self) :
"""Binds the authentication policy to a tertiary chain which will be used only for group extraction. The user will not authenticate against this server, and this will only be called if primary and/or secondary authentication has succeeded.
"""
try :
return self._groupextraction
except Exception as e:
raise e
@groupextraction.setter
def groupextraction(self, groupextraction) :
"""Binds the authentication policy to a tertiary chain which will be used only for group extraction. The user will not authenticate against this server, and this will only be called if primary and/or secondary authentication has succeeded.
"""
try :
self._groupextraction = groupextraction
except Exception as e:
raise e
@property
def name(self) :
"""Name of the virtual server.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the virtual server.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def secondary(self) :
"""Binds the authentication policy as the secondary policy to use in a two-factor configuration. A user must then authenticate not only via a primary authentication method but also via a secondary authentication method. User groups are aggregated across both. The user name must be exactly the same for both authentication methods, but they can require different passwords.
"""
try :
return self._secondary
except Exception as e:
raise e
@secondary.setter
def secondary(self, secondary) :
"""Binds the authentication policy as the secondary policy to use in a two-factor configuration. A user must then authenticate not only via a primary authentication method but also via a secondary authentication method. User groups are aggregated across both. The user name must be exactly the same for both authentication methods, but they can require different passwords.
"""
try :
self._secondary = secondary
except Exception as e:
raise e
@property
def bindpoint(self) :
"""Bind point to which to bind the policy. Applies only to rewrite and cache policies. If you do not set this parameter, the policy is bound to REQ_DEFAULT or RES_DEFAULT, depending on whether the policy rule is a response-time or a request-time expression.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
"""Bind point to which to bind the policy. Applies only to rewrite and cache policies. If you do not set this parameter, the policy is bound to REQ_DEFAULT or RES_DEFAULT, depending on whether the policy rule is a response-time or a request-time expression.
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def acttype(self) :
try :
return self._acttype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnvserver_vpnsessionpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnvserver_vpnsessionpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = vpnvserver_vpnsessionpolicy_binding()
updateresource.name = resource.name
updateresource.policy = resource.policy
updateresource.secondary = resource.secondary
updateresource.groupextraction = resource.groupextraction
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vpnvserver_vpnsessionpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policy = resource[i].policy
updateresources[i].secondary = resource[i].secondary
updateresources[i].groupextraction = resource[i].groupextraction
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = vpnvserver_vpnsessionpolicy_binding()
deleteresource.name = resource.name
deleteresource.policy = resource.policy
deleteresource.secondary = resource.secondary
deleteresource.groupextraction = resource.groupextraction
deleteresource.bindpoint = resource.bindpoint
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vpnvserver_vpnsessionpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policy = resource[i].policy
deleteresources[i].secondary = resource[i].secondary
deleteresources[i].groupextraction = resource[i].groupextraction
deleteresources[i].bindpoint = resource[i].bindpoint
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch vpnvserver_vpnsessionpolicy_binding resources.
"""
try :
obj = vpnvserver_vpnsessionpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of vpnvserver_vpnsessionpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnvserver_vpnsessionpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count vpnvserver_vpnsessionpolicy_binding resources configued on NetScaler.
"""
try :
obj = vpnvserver_vpnsessionpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of vpnvserver_vpnsessionpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnvserver_vpnsessionpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Staaddresstype:
IPV4 = "IPV4"
IPV6 = "IPV6"
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
ICA_REQUEST = "ICA_REQUEST"
OTHERTCP_REQUEST = "OTHERTCP_REQUEST"
class vpnvserver_vpnsessionpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.vpnvserver_vpnsessionpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnvserver_vpnsessionpolicy_binding = [vpnvserver_vpnsessionpolicy_binding() for _ in range(length)]
|
{
"content_hash": "7709c99dd760a8ec0e93365196081215",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 430,
"avg_line_length": 40.36498516320475,
"alnum_prop": 0.7384400499889731,
"repo_name": "mahabs/nitro",
"id": "84e62f052283380f81976f5e4f3dc0c10b4e475c",
"size": "14217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnsessionpolicy_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "10647176"
}
],
"symlink_target": ""
}
|
from pony.orm import *
from datetime import datetime
from model.group import Group
from model.contact import Contact
from pymysql.converters import encoders, decoders, convert_mysql_timestamp
class ORMFixture:
db = Database()
class ORMGroup(db.Entity):
_table_ = 'group_list'
id = PrimaryKey(int, column='group_id')
name = Optional(str, column='group_name')
header = Optional(str, column='group_header')
footer = Optional(str, column='group_footer')
contacts = Set(lambda: ORMFixture.ORMContact, table="address_in_groups", column="id", reverse="groups", lazy=True)
class ORMContact(db.Entity):
_table_ = 'addressbook'
id = PrimaryKey(int, column='id')
firstname = Optional(str, column='firstname')
lastname = Optional(str, column='lastname')
deprecated = Optional(datetime, column='deprecated')
groups = Set(lambda: ORMFixture.ORMGroup, table="address_in_groups", column="group_id", reverse="contacts", lazy=True)
def __init__(self, host, name, user, password):
conv = encoders
conv.update(decoders)
conv[datetime] = convert_mysql_timestamp
self.db.bind('mysql', host=host, database=name, user=user, password=password, conv=conv)
self.db.generate_mapping()
sql_debug(True)
def convert_groups_to_model(self, groups):
def convert(group):
return Group(id=str(group.id), name=group.name, header=group.header, footer=group.footer)
return list(map(convert, groups))
@db_session
def get_group_list(self):
return self.convert_groups_to_model(list(select(g for g in ORMFixture.ORMGroup))
@db_session
def get_contact_list(self):
return self.convert_contacts_to_model(list(select(c for c in ORMFixture.ORMContact if c.deprecated is None)))
def convert_contacts_to_model(self, contacts):
def convert(contact):
return Contact(id=str(contact.id), firstname=contact.firstname, lastname=contact.lastname)
return list(map(convert, contacts))
@db_session
def get_contacts_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(orm_group.contacts)
@db_session
def get_contacts_not_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(
select(c for c in ORMFixture.ORMContact if c.deprecated is None and orm_group not in c.groups))
|
{
"content_hash": "05367214896a1083fb2c9dcde12da507",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 126,
"avg_line_length": 40.23076923076923,
"alnum_prop": 0.6684512428298279,
"repo_name": "ble669/python_training",
"id": "53663bcd5e9c47472b4ae7385a6b1bab5fc83c5f",
"size": "2616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/orm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "33562"
}
],
"symlink_target": ""
}
|
import os
import argparse
import sys
import platform
from collections import namedtuple
from pysmt.cmd.installers import MSatInstaller, Z3Installer, PicoSATInstaller
from pysmt.cmd.installers import CVC4Installer, YicesInstaller, BtorInstaller
from pysmt.cmd.installers import CuddInstaller
from pysmt.cmd.installers.base import solver_install_site
from pysmt.environment import get_env
from pysmt.exceptions import PysmtException
from pysmt import __version__ as pysmt_version
# Build a list of installers, one for each solver
Installer = namedtuple("Installer",
["InstallerClass", "version", "extra_params"])
INSTALLERS = [
Installer(MSatInstaller, "5.6.7", {}),
Installer(CVC4Installer, "1.7-prerelease",
{"git_version" : "391ab9df6c3fd9a3771864900c1718534c1e4666"}),
Installer(Z3Installer, "4.8.17", {"osx": "10.16"}),
Installer(YicesInstaller, "2.6.2",
{"yicespy_version": "f0768ffeec15ea310f830d10878971c9998454ac"}),
Installer(BtorInstaller, "3.2.1", {}),
Installer(PicoSATInstaller, "965",
{"pypicosat_minor_version" : "1708010052"}),
Installer(CuddInstaller, "2.0.3",
{"git_version" : "ecb03d6d231273343178f566cc4d7258dcce52b4"}),
]
def get_requested_solvers():
"""Parses the PYSMT_SOLVER env variable to extract requirements to fulfill"""
requested_solvers_str = os.environ.get("PYSMT_SOLVER")
requested_solvers = []
if requested_solvers_str is not None:
keys = requested_solvers_str.split(",")
requested_solvers = [x.lower().strip() for x in keys]
if "all" in requested_solvers:
requested_solvers = [x.InstallerClass.SOLVER for x in INSTALLERS]
return requested_solvers
def check_installed(required_solvers, install_dir, bindings_dir, mirror_link):
"""Checks which solvers are visible to pySMT."""
# Check which solvers are accessible from the Factory
pypath_solvers = get_env().factory.all_solvers()
global_solvers_status = []
print("Installed Solvers:")
for i in INSTALLERS:
installer_ = i.InstallerClass(install_dir=install_dir,
bindings_dir=bindings_dir,
solver_version=i.version,
mirror_link=mirror_link,
**i.extra_params)
solver = installer_.SOLVER
version = installer_.get_installed_version()
is_installed = (version is not None)
global_solvers_status.append((solver, is_installed, version))
del installer_
for solver in required_solvers:
if solver not in pypath_solvers:
raise PysmtException("Was expecting to find %s installed" % solver)
#
# Output information
#
for (solver, is_installed, version) in global_solvers_status:
msg = " %s%s " % (solver.ljust(10), is_installed)
msg += ("(%s)" % version).ljust(20)
if solver not in pypath_solvers:
msg += "Not in Python's path!"
print(msg)
print("")
print("Solvers: %s" % ", ".join(name for name in pypath_solvers))
qes = get_env().factory.all_quantifier_eliminators()
print("Quantifier Eliminators: %s" % ", ".join(name for name in qes))
ucs = get_env().factory.all_unsat_core_solvers()
print("UNSAT-Cores: %s" % ", ".join(name for name in ucs))
interps = get_env().factory.all_interpolators()
print("Interpolators: %s" % ", ".join(name for name in interps))
def parse_options():
parser = argparse.ArgumentParser(description='Install SMT Solvers.\n\n'
'This script installs the solvers specified'
' on the command line or in the environment'
' variable PYSMT_SOLVER if not already '
'instaled on the system.')
parser.add_argument('--version', action='version',
version='%(prog)s {version}'.format(version=pysmt_version))
for i in INSTALLERS:
name = i.InstallerClass.SOLVER
parser.add_argument('--%s' % name, dest=name, action='store_true',
default=False, help='Install %s' % name)
parser.add_argument('--all', dest='all_solvers', action='store_true',
default=False,
help='Install all the solvers')
parser.add_argument('--force', dest='force_redo', action='store_true',
default=False,
help='Forcedly rebuild the solvers even if already found')
parser.add_argument('--check', dest='check', action='store_true',
default=False,
help='Checks the installation of the solvers')
parser.add_argument('--env', dest='env', action='store_true',
default=False,
help='Prints a bash export command to extend the PYTHONPATH')
parser.add_argument('--powershell', dest='powershell', action='store_true',
default=False,
help='In combination with --env under windows, prints the commands in powershell format')
parser.add_argument('--confirm-agreement', dest='skip_intro',
action='store_true', default=False,
help='Confirm that you agree with the licenses of the'
' solvers and skip the interactive question')
install_path_default = os.path.join("~", ".smt_solvers")
parser.add_argument('--install-path', dest='install_path',
type=str, default=install_path_default,
help='The folder to use for the installation'
' (defaults to: {!r})'.format(install_path_default))
py_bindings = solver_install_site(plat_specific=True)
parser.add_argument('--bindings-path', dest='bindings_path',
type=str, default=py_bindings,
help='The folder to use for the bindings (defaults to the'
' relevant site-packages directory: {!r})'.format(py_bindings))
options = parser.parse_args()
return options
################################################################################
# Main functions
def print_welcome():
msg = """\
This script allows you to install the solvers supported by pySMT.
By executing this script, you confirm that you have read and agreed
with the licenses of each solver.
Notice: the installation process might require building tools
(e.g., make and gcc).
"""
print(msg)
res = input("Continue? [Y]es/[N]o: ").lower()
if res != "y":
exit(-1)
def main():
options = parse_options()
# Address of a mirror website containing packages to avoid continuous
# downloads from original websites in CI
mirror_url = os.environ.get('PYSMT_INSTALL_MIRROR')
if mirror_url is not None:
mirror_url += "/{archive_name}"
# This should work on any platform
install_dir = os.path.expanduser(options.install_path)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
# This should work on any platform
bindings_dir = os.path.expanduser(options.bindings_path)
if not os.path.exists(bindings_dir):
os.makedirs(bindings_dir)
solvers_to_install = []
all_solvers = options.all_solvers
for i in INSTALLERS:
name = i.InstallerClass.SOLVER
if all_solvers or getattr(options, name):
solvers_to_install.append(i)
# Env variable controlling the solvers to be installed or checked
requested_solvers = get_requested_solvers()
if len(solvers_to_install) != 0 and len(requested_solvers) != 0:
print("Warning: Solvers specified on the command line, "
"ignoring env variable 'PYSMT_SOLVER'")
if len(solvers_to_install) == 0:
# No solver requested from cmd-line, checking ENV
for i in INSTALLERS:
name = i.InstallerClass.SOLVER
if name in requested_solvers:
solvers_to_install.append(i)
if options.check:
check_installed([x.InstallerClass.SOLVER for x in solvers_to_install],
install_dir=install_dir,
bindings_dir=bindings_dir,
mirror_link=mirror_url)
exit(0)
elif options.env:
bindings_dir= os.path.expanduser(options.bindings_path)
if platform.system().lower() == "windows":
if options.powershell:
print('$env:PythonPath += ";%s"' % bindings_dir)
else:
print("set PYTHONPATH=" + bindings_dir + ";%PYTHONPATH%")
else:
print("export PYTHONPATH=\"" + bindings_dir + ":${PYTHONPATH}\"")
else:
if len(solvers_to_install) == 0:
print("Nothing to do.\nTry with '%s --help'" % sys.argv[0])
exit(0)
# Do the actual install
if not options.skip_intro:
print_welcome()
for i in solvers_to_install:
installer = i.InstallerClass(install_dir=install_dir,
bindings_dir=bindings_dir,
solver_version=i.version,
mirror_link=mirror_url,
**i.extra_params)
installer.install(force_redo=options.force_redo)
|
{
"content_hash": "39716b76c3150432796596fc05433db6",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 113,
"avg_line_length": 39.534979423868315,
"alnum_prop": 0.5879046528572915,
"repo_name": "pysmt/pysmt",
"id": "66bd5bc7acf7f253559a4c1fa1c61a4f5a9238cb",
"size": "10202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysmt/cmd/install.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1227123"
},
{
"name": "Shell",
"bytes": "8632"
}
],
"symlink_target": ""
}
|
import copy
from unittest import mock
from neutron_lib import constants
from neutron_lib import context
from neutron_lib.services.qos import constants as qos_consts
from oslo_utils import uuidutils
from neutron.objects.qos import policy
from neutron.objects.qos import rule
from neutron.plugins.ml2.drivers.openvswitch.agent import (
ovs_agent_extension_api as ovs_ext_api)
from neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers import (
qos_driver)
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native import (
ovs_bridge)
from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent import (
ovs_test_base)
class QosOVSAgentDriverTestCase(ovs_test_base.OVSAgentConfigTestBase):
def setUp(self):
super(QosOVSAgentDriverTestCase, self).setUp()
conn_patcher = mock.patch(
'neutron.agent.ovsdb.impl_idl._connection')
conn_patcher.start()
self.addCleanup(conn_patcher.stop)
self.context = context.get_admin_context()
self.qos_driver = qos_driver.QosOVSAgentDriver()
self.mock_clear_minimum_bandwidth_qos = mock.patch.object(
self.qos_driver, '_qos_bandwidth_initialize').start()
os_ken_app = mock.Mock()
self.agent_api = ovs_ext_api.OVSAgentExtensionAPI(
ovs_bridge.OVSAgentBridge(
'br-int', os_ken_app=os_ken_app),
ovs_bridge.OVSAgentBridge(
'br-tun', os_ken_app=os_ken_app),
{'phys1': ovs_bridge.OVSAgentBridge(
'br-phys1', os_ken_app=os_ken_app)})
self.qos_driver.consume_api(self.agent_api)
mock.patch.object(
qos_driver.MeterRuleManager, '_init_max_meter_id').start()
self.qos_driver.initialize()
self.qos_driver.br_int = mock.Mock()
self.qos_driver.br_int.get_dp = mock.Mock(return_value=(mock.Mock(),
mock.Mock(),
mock.Mock()))
self.qos_driver.meter_cache.br_int = self.qos_driver.br_int
self.qos_driver.meter_cache.max_meter = 65535
self.qos_driver.br_int.list_meter_features = mock.Mock(
return_value=[{"max_meter": 65535,
"band_types": 2,
"capabilities": 15,
"max_bands": 8}])
self.qos_driver.br_int.get_egress_bw_limit_for_port = mock.Mock(
return_value=(1000, 10))
self.get_egress = self.qos_driver.br_int.get_egress_bw_limit_for_port
self.qos_driver.br_int.del_egress_bw_limit_for_port = mock.Mock()
self.delete_egress = (
self.qos_driver.br_int.delete_egress_bw_limit_for_port)
self.delete_ingress = (
self.qos_driver.br_int.delete_ingress_bw_limit_for_port)
self.create_egress = (
self.qos_driver.br_int.create_egress_bw_limit_for_port)
self.update_ingress = (
self.qos_driver.br_int.update_ingress_bw_limit_for_port)
self.apply_meter_to_port = (
self.qos_driver.br_int.apply_meter_to_port)
self.remove_meter_from_port = (
self.qos_driver.br_int.remove_meter_from_port)
self.delete_meter = (
self.qos_driver.br_int.delete_meter)
self.create_meter = (
self.qos_driver.br_int.create_meter)
self.update_meter = (
self.qos_driver.br_int.update_meter)
self.rules = [
self._create_bw_limit_rule_obj(constants.EGRESS_DIRECTION),
self._create_bw_limit_rule_obj(constants.INGRESS_DIRECTION),
self._create_pps_limit_rule_obj(constants.EGRESS_DIRECTION),
self._create_pps_limit_rule_obj(constants.INGRESS_DIRECTION),
self._create_dscp_marking_rule_obj()]
self.qos_policy = self._create_qos_policy_obj(self.rules)
self.port = self._create_fake_port(self.qos_policy.id)
self.qos_driver.br_int.get_port_tag_by_name = mock.Mock(
return_value=1)
def _create_bw_limit_rule_obj(self, direction):
rule_obj = rule.QosBandwidthLimitRule()
rule_obj.id = uuidutils.generate_uuid()
rule_obj.max_kbps = 2
rule_obj.max_burst_kbps = 200
rule_obj.direction = direction
rule_obj.obj_reset_changes()
return rule_obj
def _create_dscp_marking_rule_obj(self):
rule_obj = rule.QosDscpMarkingRule()
rule_obj.id = uuidutils.generate_uuid()
rule_obj.dscp_mark = 32
rule_obj.obj_reset_changes()
return rule_obj
def _create_pps_limit_rule_obj(self, direction):
rule_obj = rule.QosPacketRateLimitRule()
rule_obj.id = uuidutils.generate_uuid()
rule_obj.max_kpps = 2000
rule_obj.max_burst_kpps = 200
rule_obj.direction = direction
rule_obj.obj_reset_changes()
return rule_obj
def _create_qos_policy_obj(self, rules):
policy_dict = {'id': uuidutils.generate_uuid(),
'project_id': uuidutils.generate_uuid(),
'name': 'test',
'description': 'test',
'shared': False,
'rules': rules}
policy_obj = policy.QosPolicy(self.context, **policy_dict)
policy_obj.obj_reset_changes()
for policy_rule in policy_obj.rules:
policy_rule.qos_policy_id = policy_obj.id
policy_rule.obj_reset_changes()
return policy_obj
def _create_fake_port(self, policy_id):
self.port_name = 'fakeport'
port_id = uuidutils.generate_uuid()
class FakeVifPort(object):
port_name = self.port_name
ofport = 111
vif_id = port_id
vif_mac = "aa:bb:cc:dd:ee:ff"
return {'vif_port': FakeVifPort(),
'qos_policy_id': policy_id,
'qos_network_policy_id': None,
'port_id': port_id,
'device_owner': uuidutils.generate_uuid()}
def test_create_new_rules(self):
self.qos_driver.br_int.get_value_from_other_config = mock.Mock()
self.qos_driver.br_int.set_value_to_other_config = mock.Mock()
self.qos_driver.br_int.get_egress_bw_limit_for_port = mock.Mock(
return_value=(None, None))
self.qos_driver.create(self.port, self.qos_policy)
self.assertEqual(0, self.delete_egress.call_count)
self.assertEqual(0, self.delete_ingress.call_count)
self.create_egress.assert_called_once_with(
self.port_name, self.rules[0].max_kbps,
self.rules[0].max_burst_kbps)
self.update_ingress.assert_called_once_with(
self.port_name, self.rules[1].max_kbps,
self.rules[1].max_burst_kbps)
self._assert_dscp_rule_create_updated()
self.create_meter.assert_has_calls(
[mock.call(mock.ANY, self.rules[2].max_kpps * 1000,
burst=self.rules[2].max_burst_kpps * 1000),
mock.call(mock.ANY, self.rules[3].max_kpps * 1000,
burst=self.rules[3].max_burst_kpps * 1000)])
self.apply_meter_to_port.assert_has_calls(
[mock.call(mock.ANY, constants.EGRESS_DIRECTION,
"aa:bb:cc:dd:ee:ff",
in_port=111),
mock.call(mock.ANY, constants.INGRESS_DIRECTION,
"aa:bb:cc:dd:ee:ff",
local_vlan=1)])
def test_create_existing_rules(self):
self.qos_driver.create(self.port, self.qos_policy)
self._assert_rules_create_updated()
self._assert_dscp_rule_create_updated()
def test_update_rules(self):
self.qos_driver.update(self.port, self.qos_policy)
self._assert_rules_create_updated()
self._assert_dscp_rule_create_updated()
def test_update_rules_no_vif_port(self):
port = copy.copy(self.port)
port.pop("vif_port")
self.qos_driver.update(port, self.qos_policy)
self.create_egress.assert_not_called()
self.update_ingress.assert_not_called()
self.create_meter.assert_not_called()
self.apply_meter_to_port.assert_not_called()
def _test_delete_rules(self, qos_policy):
self.qos_driver.create(self.port, qos_policy)
self.qos_driver.delete(self.port, qos_policy)
self.delete_egress.assert_called_once_with(self.port_name)
self.delete_ingress.assert_called_once_with(self.port_name)
self.assertEqual(2, self.delete_meter.call_count)
self.remove_meter_from_port.assert_has_calls(
[mock.call(constants.EGRESS_DIRECTION,
"aa:bb:cc:dd:ee:ff",
in_port=111),
mock.call(constants.INGRESS_DIRECTION,
"aa:bb:cc:dd:ee:ff",
local_vlan=1)])
def _test_delete_rules_no_policy(self):
self.qos_driver.delete(self.port)
self.delete_egress.assert_called_once_with(self.port_name)
self.delete_ingress.assert_called_once_with(self.port_name)
def test_delete_rules(self):
self._test_delete_rules(self.qos_policy)
def test_delete_rules_no_policy(self):
self._test_delete_rules_no_policy()
def test_delete_rules_no_vif_port(self):
port = copy.copy(self.port)
port.pop("vif_port")
self.qos_driver.delete(port, self.qos_policy)
self.delete_egress.assert_not_called()
self.delete_ingress.assert_not_called()
self.delete_meter.assert_not_called()
self.delete_meter.assert_not_called()
self.remove_meter_from_port.assert_not_called()
def _assert_rules_create_updated(self):
self.create_egress.assert_called_once_with(
self.port_name, self.rules[0].max_kbps,
self.rules[0].max_burst_kbps)
self.update_ingress.assert_called_once_with(
self.port_name, self.rules[1].max_kbps,
self.rules[1].max_burst_kbps)
self.create_meter.assert_has_calls(
[mock.call(mock.ANY, self.rules[2].max_kpps * 1000,
burst=self.rules[2].max_burst_kpps * 1000),
mock.call(mock.ANY, self.rules[3].max_kpps * 1000,
burst=self.rules[3].max_burst_kpps * 1000)])
self.apply_meter_to_port.assert_has_calls(
[mock.call(mock.ANY, constants.EGRESS_DIRECTION,
"aa:bb:cc:dd:ee:ff",
in_port=111),
mock.call(mock.ANY, constants.INGRESS_DIRECTION,
"aa:bb:cc:dd:ee:ff",
local_vlan=1)])
def _assert_dscp_rule_create_updated(self):
# Assert install_instructions is the last call
self.assertEqual(
'install_dscp_marking_rule',
self.qos_driver.br_int.method_calls[-1][0])
self.qos_driver.br_int.install_dscp_marking_rule.\
assert_called_once_with(dscp_mark=mock.ANY, port=mock.ANY)
def test_create_minimum_bandwidth(self):
with mock.patch.object(self.qos_driver, 'update_minimum_bandwidth') \
as mock_update_minimum_bandwidth:
self.qos_driver.create_minimum_bandwidth('port_name', 'rule')
mock_update_minimum_bandwidth.assert_called_once_with('port_name',
'rule')
def test_delete_minimum_bandwidth(self):
with mock.patch.object(self.qos_driver.br_int,
'delete_minimum_bandwidth_queue') \
as mock_delete_minimum_bandwidth_queue:
self.qos_driver.ports['p_id'] = {}
self.qos_driver.delete_minimum_bandwidth({'port_id': 'p_id'})
mock_delete_minimum_bandwidth_queue.assert_not_called()
mock_delete_minimum_bandwidth_queue.reset_mock()
self.qos_driver.ports['p_id'] = {
(qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH,
constants.EGRESS_DIRECTION): 'rule_port'}
self.qos_driver.delete_minimum_bandwidth({'port_id': 'p_id'})
mock_delete_minimum_bandwidth_queue.assert_called_once_with('p_id')
def test_update_minimum_bandwidth_no_vif_port(self):
with mock.patch.object(self.qos_driver.br_int,
'update_minimum_bandwidth_queue') \
as mock_delete_minimum_bandwidth_queue:
self.qos_driver.update_minimum_bandwidth({}, mock.ANY)
mock_delete_minimum_bandwidth_queue.assert_not_called()
def test_update_minimum_bandwidth_no_phy_brs(self):
vif_port = mock.Mock()
vif_port.ofport = 'ofport'
rule = mock.Mock()
rule.min_kbps = 1500
port = {'port_id': 'port_id', 'vif_port': vif_port}
with mock.patch.object(self.qos_driver.br_int,
'update_minimum_bandwidth_queue') \
as mock_delete_minimum_bandwidth_queue, \
mock.patch.object(self.qos_driver.agent_api,
'request_phy_brs'):
self.qos_driver.update_minimum_bandwidth(port, rule)
mock_delete_minimum_bandwidth_queue.assert_called_once_with(
'port_id', [], 'ofport', 1500)
def test_update_minimum_bandwidth(self):
vif_port = mock.Mock()
vif_port.ofport = 'ofport'
rule = mock.Mock()
rule.min_kbps = 1500
port = {'port_id': 'port_id', 'vif_port': vif_port}
with mock.patch.object(self.qos_driver.br_int,
'update_minimum_bandwidth_queue') \
as mock_delete_minimum_bandwidth_queue, \
mock.patch.object(self.qos_driver.agent_api,
'request_phy_brs') as mock_request_phy_brs:
phy_br = mock.Mock()
phy_br.get_bridge_ports.return_value = ['port1', 'port2']
mock_request_phy_brs.return_value = [phy_br]
self.qos_driver.update_minimum_bandwidth(port, rule)
mock_delete_minimum_bandwidth_queue.assert_called_once_with(
'port_id', ['port1', 'port2'], 'ofport', 1500)
# TODO(przszc): Update tests when dataplane enforcement is implemented for
# minimum packet rate rule
def test_create_minimum_packet_rate(self):
try:
port = {'port_id': 'p_id'}
rule = mock.MagicMock(id='rule_id')
self.qos_driver.create_minimum_packet_rate(port, rule)
except Exception:
self.fail('create_minimum_packet_rate failed')
def test_update_minimum_packet_rate(self):
try:
port = {'port_id': 'p_id'}
rule = mock.MagicMock(id='rule_id')
self.qos_driver.update_minimum_packet_rate(port, rule)
except Exception:
self.fail('update_minimum_packet_rate failed')
def test_delete_minimum_packet_rate(self):
try:
port = {'port_id': 'p_id'}
self.qos_driver.delete_minimum_packet_rate(port)
except Exception:
self.fail('delete_minimum_packet_rate failed')
def test_delete_minimum_packet_rate_ingress(self):
try:
port = {'port_id': 'p_id'}
self.qos_driver.delete_minimum_packet_rate_ingress(port)
except Exception:
self.fail('delete_minimum_packet_rate_ingress failed')
|
{
"content_hash": "1d2db1a3e6d89f278c26083c12825d75",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 79,
"avg_line_length": 43.63483146067416,
"alnum_prop": 0.5878717651602936,
"repo_name": "openstack/neutron",
"id": "e2bef51f1fc5e386462b45644e7ba7c27438205e",
"size": "16107",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15932611"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
}
|
"""Generate Blink V8 bindings (.h and .cpp files).
If run itself, caches Jinja templates (and creates dummy file for build,
since cache filenames are unpredictable and opaque).
This module is *not* concurrency-safe without care: bytecode caching creates
a race condition on cache *write* (crashes if one process tries to read a
partially-written cache). However, if you pre-cache the templates (by running
the module itself), then you can parallelize compiling individual files, since
cache *reading* is safe.
Input: An object of class IdlDefinitions, containing an IDL interface X
Output: V8X.h and V8X.cpp
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import os
import posixpath
import re
import sys
# Path handling for libraries and templates
# Paths have to be normalized because Jinja uses the exact template path to
# determine the hash used in the cache filename, and we need a pre-caching step
# to be concurrency-safe. Use absolute path because __file__ is absolute if
# module is imported, and relative if executed directly.
# If paths differ between pre-caching and individual file compilation, the cache
# is regenerated, which causes a race condition and breaks concurrent build,
# since some compile processes will try to read the partially written cache.
module_path, module_filename = os.path.split(os.path.realpath(__file__))
third_party_dir = os.path.normpath(os.path.join(
module_path, os.pardir, os.pardir, os.pardir, os.pardir))
templates_dir = os.path.normpath(os.path.join(
module_path, os.pardir, 'templates'))
# Make sure extension is .py, not .pyc or .pyo, so doesn't depend on caching
module_pyname = os.path.splitext(module_filename)[0] + '.py'
# jinja2 is in chromium's third_party directory.
# Insert at 1 so at front to override system libraries, and
# after path[0] == invoking script dir
sys.path.insert(1, third_party_dir)
import jinja2
from idl_definitions import Visitor
import idl_types
from idl_types import IdlType
import v8_callback_interface
import v8_dictionary
from v8_globals import includes, interfaces
import v8_interface
import v8_types
import v8_union
from v8_utilities import capitalize, cpp_name, conditional_string, v8_class_name
from utilities import KNOWN_COMPONENTS, idl_filename_to_component, is_valid_component_dependency, is_testing_target
def render_template(include_paths, header_template, cpp_template,
template_context, component=None):
template_context['code_generator'] = module_pyname
# Add includes for any dependencies
template_context['header_includes'] = sorted(
template_context['header_includes'])
for include_path in include_paths:
if component:
dependency = idl_filename_to_component(include_path)
assert is_valid_component_dependency(component, dependency)
includes.add(include_path)
template_context['cpp_includes'] = sorted(includes)
header_text = header_template.render(template_context)
cpp_text = cpp_template.render(template_context)
return header_text, cpp_text
def set_global_type_info(info_provider):
interfaces_info = info_provider.interfaces_info
idl_types.set_ancestors(interfaces_info['ancestors'])
IdlType.set_callback_interfaces(interfaces_info['callback_interfaces'])
IdlType.set_dictionaries(interfaces_info['dictionaries'])
IdlType.set_enums(info_provider.enumerations)
IdlType.set_implemented_as_interfaces(interfaces_info['implemented_as_interfaces'])
IdlType.set_garbage_collected_types(interfaces_info['garbage_collected_interfaces'])
IdlType.set_will_be_garbage_collected_types(interfaces_info['will_be_garbage_collected_interfaces'])
v8_types.set_component_dirs(interfaces_info['component_dirs'])
def should_generate_code(definitions):
return definitions.interfaces or definitions.dictionaries
def depends_on_union_types(idl_type):
"""Returns true when a given idl_type depends on union containers
directly.
"""
if idl_type.is_union_type:
return True
if idl_type.is_array_or_sequence_type:
return idl_type.element_type.is_union_type
return False
class TypedefResolver(Visitor):
def __init__(self, info_provider):
self.info_provider = info_provider
def resolve(self, definitions, definition_name):
"""Traverse definitions and resolves typedefs with the actual types."""
self.typedefs = {}
for name, typedef in self.info_provider.typedefs.iteritems():
self.typedefs[name] = typedef.idl_type
self.additional_includes = set()
definitions.accept(self)
self._update_dependencies_include_paths(definition_name)
def _update_dependencies_include_paths(self, definition_name):
interface_info = self.info_provider.interfaces_info[definition_name]
dependencies_include_paths = interface_info['dependencies_include_paths']
for include_path in self.additional_includes:
if include_path not in dependencies_include_paths:
dependencies_include_paths.append(include_path)
def _resolve_typedefs(self, typed_object):
"""Resolve typedefs to actual types in the object."""
for attribute_name in typed_object.idl_type_attributes:
try:
idl_type = getattr(typed_object, attribute_name)
except AttributeError:
continue
if not idl_type:
continue
resolved_idl_type = idl_type.resolve_typedefs(self.typedefs)
if depends_on_union_types(resolved_idl_type):
self.additional_includes.add(
self.info_provider.include_path_for_union_types)
# Need to re-assign the attribute, not just mutate idl_type, since
# type(idl_type) may change.
setattr(typed_object, attribute_name, resolved_idl_type)
def visit_typed_object(self, typed_object):
self._resolve_typedefs(typed_object)
class CodeGeneratorBase(object):
"""Base class for v8 bindings generator and IDL dictionary impl generator"""
def __init__(self, info_provider, cache_dir, output_dir):
self.info_provider = info_provider
self.jinja_env = initialize_jinja_env(cache_dir)
self.output_dir = output_dir
self.typedef_resolver = TypedefResolver(info_provider)
set_global_type_info(info_provider)
def generate_code(self, definitions, definition_name):
"""Returns .h/.cpp code as ((path, content)...)."""
# Set local type info
if not should_generate_code(definitions):
return set()
IdlType.set_callback_functions(definitions.callback_functions.keys())
# Resolve typedefs
self.typedef_resolver.resolve(definitions, definition_name)
return self.generate_code_internal(definitions, definition_name)
def generate_code_internal(self, definitions, definition_name):
# This should be implemented in subclasses.
raise NotImplementedError()
class CodeGeneratorV8(CodeGeneratorBase):
def __init__(self, info_provider, cache_dir, output_dir):
CodeGeneratorBase.__init__(self, info_provider, cache_dir, output_dir)
def output_paths(self, definition_name):
header_path = posixpath.join(self.output_dir,
'V8%s.h' % definition_name)
cpp_path = posixpath.join(self.output_dir, 'V8%s.cpp' % definition_name)
return header_path, cpp_path
def generate_code_internal(self, definitions, definition_name):
if definition_name in definitions.interfaces:
return self.generate_interface_code(
definitions, definition_name,
definitions.interfaces[definition_name])
if definition_name in definitions.dictionaries:
return self.generate_dictionary_code(
definitions, definition_name,
definitions.dictionaries[definition_name])
raise ValueError('%s is not in IDL definitions' % definition_name)
def generate_interface_code(self, definitions, interface_name, interface):
# Store other interfaces for introspection
interfaces.update(definitions.interfaces)
interface_info = self.info_provider.interfaces_info[interface_name]
full_path = interface_info.get('full_path')
component = idl_filename_to_component(full_path)
include_paths = interface_info.get('dependencies_include_paths')
# Select appropriate Jinja template and contents function
if interface.is_callback:
header_template_filename = 'callback_interface.h'
cpp_template_filename = 'callback_interface.cpp'
interface_context = v8_callback_interface.callback_interface_context
elif interface.is_partial:
interface_context = v8_interface.interface_context
header_template_filename = 'partial_interface.h'
cpp_template_filename = 'partial_interface.cpp'
interface_name += 'Partial'
assert component == 'core'
component = 'modules'
include_paths = interface_info.get('dependencies_other_component_include_paths')
else:
header_template_filename = 'interface.h'
cpp_template_filename = 'interface.cpp'
interface_context = v8_interface.interface_context
header_template = self.jinja_env.get_template(header_template_filename)
cpp_template = self.jinja_env.get_template(cpp_template_filename)
template_context = interface_context(interface)
if not interface.is_partial and not is_testing_target(full_path):
template_context['header_includes'].add(self.info_provider.include_path_for_export)
template_context['exported'] = self.info_provider.specifier_for_export
# Add the include for interface itself
if IdlType(interface_name).is_typed_array:
template_context['header_includes'].add('core/dom/DOMTypedArray.h')
elif interface_info['include_path']:
template_context['header_includes'].add(interface_info['include_path'])
header_text, cpp_text = render_template(
include_paths, header_template, cpp_template, template_context,
component)
header_path, cpp_path = self.output_paths(interface_name)
return (
(header_path, header_text),
(cpp_path, cpp_text),
)
def generate_dictionary_code(self, definitions, dictionary_name,
dictionary):
interfaces_info = self.info_provider.interfaces_info
header_template = self.jinja_env.get_template('dictionary_v8.h')
cpp_template = self.jinja_env.get_template('dictionary_v8.cpp')
interface_info = interfaces_info[dictionary_name]
template_context = v8_dictionary.dictionary_context(
dictionary, interfaces_info)
include_paths = interface_info.get('dependencies_include_paths')
# Add the include for interface itself
if interface_info['include_path']:
template_context['header_includes'].add(interface_info['include_path'])
if not is_testing_target(interface_info.get('full_path')):
template_context['header_includes'].add(self.info_provider.include_path_for_export)
template_context['exported'] = self.info_provider.specifier_for_export
header_text, cpp_text = render_template(
include_paths, header_template, cpp_template, template_context)
header_path, cpp_path = self.output_paths(dictionary_name)
return (
(header_path, header_text),
(cpp_path, cpp_text),
)
class CodeGeneratorDictionaryImpl(CodeGeneratorBase):
def __init__(self, info_provider, cache_dir, output_dir):
CodeGeneratorBase.__init__(self, info_provider, cache_dir, output_dir)
def output_paths(self, definition_name, interface_info):
output_dir = posixpath.join(self.output_dir,
interface_info['relative_dir'])
header_path = posixpath.join(output_dir, '%s.h' % definition_name)
cpp_path = posixpath.join(output_dir, '%s.cpp' % definition_name)
return header_path, cpp_path
def generate_code_internal(self, definitions, definition_name):
if not definition_name in definitions.dictionaries:
raise ValueError('%s is not an IDL dictionary')
interfaces_info = self.info_provider.interfaces_info
dictionary = definitions.dictionaries[definition_name]
interface_info = interfaces_info[definition_name]
header_template = self.jinja_env.get_template('dictionary_impl.h')
cpp_template = self.jinja_env.get_template('dictionary_impl.cpp')
template_context = v8_dictionary.dictionary_impl_context(
dictionary, interfaces_info)
include_paths = interface_info.get('dependencies_include_paths')
# Add union containers header file to header_includes rather than
# cpp file so that union containers can be used in dictionary headers.
union_container_headers = [header for header in include_paths
if header.find('UnionTypes') > 0]
include_paths = [header for header in include_paths
if header not in union_container_headers]
template_context['header_includes'].update(union_container_headers)
if not is_testing_target(interface_info.get('full_path')):
template_context['exported'] = self.info_provider.specifier_for_export
template_context['header_includes'].add(self.info_provider.include_path_for_export)
header_text, cpp_text = render_template(
include_paths, header_template, cpp_template, template_context)
header_path, cpp_path = self.output_paths(
cpp_name(dictionary), interface_info)
return (
(header_path, header_text),
(cpp_path, cpp_text),
)
class CodeGeneratorUnionType(object):
"""Generates union type container classes.
This generator is different from CodeGeneratorV8 and
CodeGeneratorDictionaryImpl. It assumes that all union types are already
collected. It doesn't process idl files directly.
"""
def __init__(self, info_provider, cache_dir, output_dir, target_component):
self.info_provider = info_provider
self.jinja_env = initialize_jinja_env(cache_dir)
self.output_dir = output_dir
self.target_component = target_component
set_global_type_info(info_provider)
def generate_code(self):
union_types = self.info_provider.union_types
if not union_types:
return ()
header_template = self.jinja_env.get_template('union.h')
cpp_template = self.jinja_env.get_template('union.cpp')
template_context = v8_union.union_context(
union_types, self.info_provider.interfaces_info)
template_context['code_generator'] = module_pyname
capitalized_component = self.target_component.capitalize()
template_context['exported'] = self.info_provider.specifier_for_export
template_context['header_filename'] = 'bindings/%s/v8/UnionTypes%s.h' % (
self.target_component, capitalized_component)
template_context['macro_guard'] = 'UnionType%s_h' % capitalized_component
additional_header_includes = [self.info_provider.include_path_for_export]
# Add UnionTypesCore.h as a dependency when we generate modules union types
# because we only generate union type containers which are used by both
# core and modules in UnionTypesCore.h.
# FIXME: This is an ad hoc workaround and we need a general way to
# handle core <-> modules dependency.
if self.target_component == 'modules':
additional_header_includes.append(
'bindings/core/v8/UnionTypesCore.h')
template_context['header_includes'] = sorted(
template_context['header_includes'] + additional_header_includes)
header_text = header_template.render(template_context)
cpp_text = cpp_template.render(template_context)
header_path = posixpath.join(self.output_dir,
'UnionTypes%s.h' % capitalized_component)
cpp_path = posixpath.join(self.output_dir,
'UnionTypes%s.cpp' % capitalized_component)
return (
(header_path, header_text),
(cpp_path, cpp_text),
)
def initialize_jinja_env(cache_dir):
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(templates_dir),
# Bytecode cache is not concurrency-safe unless pre-cached:
# if pre-cached this is read-only, but writing creates a race condition.
bytecode_cache=jinja2.FileSystemBytecodeCache(cache_dir),
keep_trailing_newline=True, # newline-terminate generated files
lstrip_blocks=True, # so can indent control flow tags
trim_blocks=True)
jinja_env.filters.update({
'blink_capitalize': capitalize,
'conditional': conditional_if_endif,
'exposed': exposed_if,
'runtime_enabled': runtime_enabled_if,
})
return jinja_env
def generate_indented_conditional(code, conditional):
# Indent if statement to level of original code
indent = re.match(' *', code).group(0)
return ('%sif (%s) {\n' % (indent, conditional) +
' %s\n' % '\n '.join(code.splitlines()) +
'%s}\n' % indent)
# [Conditional]
def conditional_if_endif(code, conditional_string):
# Jinja2 filter to generate if/endif directive blocks
if not conditional_string:
return code
return ('#if %s\n' % conditional_string +
code +
'#endif // %s\n' % conditional_string)
# [Exposed]
def exposed_if(code, exposed_test):
if not exposed_test:
return code
return generate_indented_conditional(code, 'context && (%s)' % exposed_test)
# [RuntimeEnabled]
def runtime_enabled_if(code, runtime_enabled_function_name):
if not runtime_enabled_function_name:
return code
return generate_indented_conditional(code, '%s()' % runtime_enabled_function_name)
################################################################################
def main(argv):
# If file itself executed, cache templates
try:
cache_dir = argv[1]
dummy_filename = argv[2]
except IndexError as err:
print 'Usage: %s CACHE_DIR DUMMY_FILENAME' % argv[0]
return 1
# Cache templates
jinja_env = initialize_jinja_env(cache_dir)
template_filenames = [filename for filename in os.listdir(templates_dir)
# Skip .svn, directories, etc.
if filename.endswith(('.cpp', '.h'))]
for template_filename in template_filenames:
jinja_env.get_template(template_filename)
# Create a dummy file as output for the build system,
# since filenames of individual cache files are unpredictable and opaque
# (they are hashes of the template path, which varies based on environment)
with open(dummy_filename, 'w') as dummy_file:
pass # |open| creates or touches the file
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
{
"content_hash": "6885c52e528cd4aae97a43b197375d5c",
"timestamp": "",
"source": "github",
"line_count": 437,
"max_line_length": 115,
"avg_line_length": 44.67505720823799,
"alnum_prop": 0.6702351073093274,
"repo_name": "zero-rp/miniblink49",
"id": "4169c23a2edca55f96e650d5aa4846aeb8229ea6",
"size": "21053",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Source/bindings/scripts/code_generator_v8.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "11324414"
},
{
"name": "Batchfile",
"bytes": "52488"
},
{
"name": "C",
"bytes": "31014938"
},
{
"name": "C++",
"bytes": "281193388"
},
{
"name": "CMake",
"bytes": "88548"
},
{
"name": "CSS",
"bytes": "20839"
},
{
"name": "DIGITAL Command Language",
"bytes": "226954"
},
{
"name": "HTML",
"bytes": "202637"
},
{
"name": "JavaScript",
"bytes": "32544926"
},
{
"name": "Lua",
"bytes": "32432"
},
{
"name": "M4",
"bytes": "125191"
},
{
"name": "Makefile",
"bytes": "1517330"
},
{
"name": "Objective-C",
"bytes": "87691"
},
{
"name": "Objective-C++",
"bytes": "35037"
},
{
"name": "PHP",
"bytes": "307541"
},
{
"name": "Perl",
"bytes": "3283676"
},
{
"name": "Prolog",
"bytes": "29177"
},
{
"name": "Python",
"bytes": "4308928"
},
{
"name": "R",
"bytes": "10248"
},
{
"name": "Scheme",
"bytes": "25457"
},
{
"name": "Shell",
"bytes": "264021"
},
{
"name": "TypeScript",
"bytes": "162421"
},
{
"name": "Vim script",
"bytes": "11362"
},
{
"name": "XS",
"bytes": "4319"
},
{
"name": "eC",
"bytes": "4383"
}
],
"symlink_target": ""
}
|
'''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from .show import show, show_dispatch
|
{
"content_hash": "b2e8d1824317fc68c475b0384833073f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 61,
"avg_line_length": 23.22222222222222,
"alnum_prop": 0.7607655502392344,
"repo_name": "dato-code/SFrame",
"id": "b41785ce16c110faffe4f4e18f034c6283baa219",
"size": "209",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "oss_src/unity/python/sframe/visualization/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "145346"
},
{
"name": "C++",
"bytes": "11631253"
},
{
"name": "CMake",
"bytes": "105184"
},
{
"name": "CSS",
"bytes": "127000"
},
{
"name": "HTML",
"bytes": "24575"
},
{
"name": "Hack",
"bytes": "277"
},
{
"name": "JavaScript",
"bytes": "20909"
},
{
"name": "Makefile",
"bytes": "9614"
},
{
"name": "Perl",
"bytes": "9663"
},
{
"name": "Python",
"bytes": "2222176"
},
{
"name": "R",
"bytes": "537"
},
{
"name": "Scala",
"bytes": "5232"
},
{
"name": "Shell",
"bytes": "52850"
},
{
"name": "Smarty",
"bytes": "966"
},
{
"name": "XSLT",
"bytes": "74068"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorforce import util, TensorForceError
from tensorforce.core.memories import Memory
from tensorforce.core.optimizers import Optimizer
from tensorforce.models import Model
class MemoryModel(Model):
"""
A memory model is a generical model to accumulate and sample data.
"""
def __init__(
self,
states,
actions,
scope,
device,
saver,
summarizer,
execution,
batching_capacity,
variable_noise,
states_preprocessing,
actions_exploration,
reward_preprocessing,
update_mode,
memory,
optimizer,
discount
):
"""
Memory model.
Args:
states (spec): The state-space description dictionary.
actions (spec): The action-space description dictionary.
scope (str): The root scope str to use for tf variable scoping.
device (str): The name of the device to run the graph of this model on.
saver (spec): Dict specifying whether and how to save the model's parameters.
summarizer (spec): Dict specifying which tensorboard summaries should be created and added to the graph.
execution (spec): Dict specifying whether and how to do distributed training on the model's graph.
batching_capacity (int): Batching capacity.
variable_noise (float): The stddev value of a Normal distribution used for adding random
noise to the model's output (for each batch, noise can be toggled and - if active - will be resampled).
Use None for not adding any noise.
states_preprocessing (spec / dict of specs): Dict specifying whether and how to preprocess state signals
(e.g. normalization, greyscale, etc..).
actions_exploration (spec / dict of specs): Dict specifying whether and how to add exploration to the model's
"action outputs" (e.g. epsilon-greedy).
reward_preprocessing (spec): Dict specifying whether and how to preprocess rewards coming
from the Environment (e.g. reward normalization).
update_mode (spec): Update mode.
memory (spec): Memory.
optimizer (spec): Dict specifying the tf optimizer to use for tuning the model's trainable parameters.
discount (float): The RL reward discount factor (gamma).
"""
self.update_mode = update_mode
self.memory_spec = memory
self.optimizer_spec = optimizer
# Discount
assert discount is None or discount >= 0.0
self.discount = discount
self.memory = None
self.optimizer = None
self.fn_discounted_cumulative_reward = None
self.fn_loss_per_instance = None
self.fn_regularization_losses = None
self.fn_loss = None
self.fn_optimization = None
super(MemoryModel, self).__init__(
states=states,
actions=actions,
scope=scope,
device=device,
saver=saver,
summarizer=summarizer,
execution=execution,
batching_capacity=batching_capacity,
variable_noise=variable_noise,
states_preprocessing=states_preprocessing,
actions_exploration=actions_exploration,
reward_preprocessing=reward_preprocessing
)
def as_local_model(self):
super(MemoryModel, self).as_local_model()
self.optimizer_spec = dict(
type='global_optimizer',
optimizer=self.optimizer_spec
)
def initialize(self, custom_getter):
super(MemoryModel, self).initialize(custom_getter)
# Memory
self.memory = Memory.from_spec(
spec=self.memory_spec,
kwargs=dict(
states=self.states_spec,
internals=self.internals_spec,
actions=self.actions_spec,
summary_labels=self.summary_labels
)
)
# Optimizer
self.optimizer = Optimizer.from_spec(
spec=self.optimizer_spec,
kwargs=dict(summary_labels=self.summary_labels)
)
# TensorFlow functions
self.fn_discounted_cumulative_reward = tf.make_template(
name_='discounted-cumulative-reward',
func_=self.tf_discounted_cumulative_reward,
custom_getter_=custom_getter
)
self.fn_reference = tf.make_template(
name_='reference',
func_=self.tf_reference,
custom_getter_=custom_getter
)
self.fn_loss_per_instance = tf.make_template(
name_='loss-per-instance',
func_=self.tf_loss_per_instance,
custom_getter_=custom_getter
)
self.fn_regularization_losses = tf.make_template(
name_='regularization-losses',
func_=self.tf_regularization_losses,
custom_getter_=custom_getter
)
self.fn_loss = tf.make_template(
name_='loss',
func_=self.tf_loss,
custom_getter_=custom_getter
)
self.fn_optimization = tf.make_template(
name_='optimization',
func_=self.tf_optimization,
custom_getter_=custom_getter
)
self.fn_import_experience = tf.make_template(
name_='import-experience',
func_=self.tf_import_experience,
custom_getter_=custom_getter
)
def tf_initialize(self):
super(MemoryModel, self).tf_initialize()
self.memory.initialize()
def tf_discounted_cumulative_reward(self, terminal, reward, discount, final_reward=0.0):
"""
Creates the TensorFlow operations for calculating the discounted cumulative rewards
for a given sequence of rewards.
Args:
terminal: Terminal boolean tensor.
reward: Reward tensor.
discount: Discount factor.
final_reward: Last reward value in the sequence.
Returns:
Discounted cumulative reward tensor.
"""
# TODO: n-step cumulative reward (particularly for envs without terminal)
def cumulate(cumulative, reward_and_terminal):
rew, term = reward_and_terminal
return tf.where(condition=term, x=rew, y=(rew + cumulative * discount))
# Reverse since reward cumulation is calculated right-to-left, but tf.scan only works left-to-right
reward = tf.reverse(tensor=reward, axis=(0,))
terminal = tf.reverse(tensor=terminal, axis=(0,))
reward = tf.scan(fn=cumulate, elems=(reward, terminal), initializer=tf.stop_gradient(input=final_reward))
return tf.reverse(tensor=reward, axis=(0,))
# # TODO: this could be a utility helper function if we remove self.discount and only allow external discount-value input
# def tf_discounted_cumulative_reward(self, terminal, reward, discount=None, final_reward=0.0, horizon=0):
# """
# Creates and returns the TensorFlow operations for calculating the sequence of discounted cumulative rewards
# for a given sequence of single rewards.
# Example:
# single rewards = 2.0 1.0 0.0 0.5 1.0 -1.0
# terminal = False, False, False, False True False
# gamma = 0.95
# final_reward = 100.0 (only matters for last episode (r=-1.0) as this episode has no terminal signal)
# horizon=3
# output = 2.95 1.45 1.38 1.45 1.0 94.0
# Args:
# terminal: Tensor (bool) holding the is-terminal sequence. This sequence may contain more than one
# True value. If its very last element is False (not terminating), the given `final_reward` value
# is assumed to follow the last value in the single rewards sequence (see below).
# reward: Tensor (float) holding the sequence of single rewards. If the last element of `terminal` is False,
# an assumed last reward of the value of `final_reward` will be used.
# discount (float): The discount factor (gamma). By default, take the Model's discount factor.
# final_reward (float): Reward value to use if last episode in sequence does not terminate (terminal sequence
# ends with False). This value will be ignored if horizon == 1 or discount == 0.0.
# horizon (int): The length of the horizon (e.g. for n-step cumulative rewards in continuous tasks
# without terminal signals). Use 0 (default) for an infinite horizon. Note that horizon=1 leads to the
# exact same results as a discount factor of 0.0.
# Returns:
# Discounted cumulative reward tensor with the same shape as `reward`.
# """
# # By default -> take Model's gamma value
# if discount is None:
# discount = self.discount
# # Accumulates discounted (n-step) reward (start new if terminal)
# def cumulate(cumulative, reward_terminal_horizon_subtract):
# rew, is_terminal, is_over_horizon, sub = reward_terminal_horizon_subtract
# return tf.where(
# # If terminal, start new cumulation.
# condition=is_terminal,
# x=rew,
# y=tf.where(
# # If we are above the horizon length (H) -> subtract discounted value from H steps back.
# condition=is_over_horizon,
# x=(rew + cumulative * discount - sub),
# y=(rew + cumulative * discount)
# )
# )
# # Accumulates length of episodes (starts new if terminal)
# def len_(cumulative, term):
# return tf.where(
# condition=term,
# # Start counting from 1 after is-terminal signal
# x=tf.ones(shape=(), dtype=tf.int32),
# # Otherwise, increase length by 1
# y=cumulative + 1
# )
# # Reverse, since reward cumulation is calculated right-to-left, but tf.scan only works left-to-right.
# reward = tf.reverse(tensor=reward, axis=(0,))
# # e.g. -1.0 1.0 0.5 0.0 1.0 2.0
# terminal = tf.reverse(tensor=terminal, axis=(0,))
# # e.g. F T F F F F
# # Store the steps until end of the episode(s) determined by the input terminal signals (True starts new count).
# lengths = tf.scan(fn=len_, elems=terminal, initializer=0)
# # e.g. 1 1 2 3 4 5
# off_horizon = tf.greater(lengths, tf.fill(dims=tf.shape(lengths), value=horizon))
# # e.g. F F F F T T
# # Calculate the horizon-subtraction value for each step.
# if horizon > 0:
# horizon_subtractions = tf.map_fn(lambda x: (discount ** horizon) * x, reward, dtype=tf.float32)
# # Shift right by size of horizon (fill rest with 0.0).
# horizon_subtractions = tf.concat([np.zeros(shape=(horizon,)), horizon_subtractions], axis=0)
# horizon_subtractions = tf.slice(horizon_subtractions, begin=(0,), size=tf.shape(reward))
# # e.g. 0.0, 0.0, 0.0, -1.0*g^3, 1.0*g^3, 0.5*g^3
# # all 0.0 if infinite horizon (special case: horizon=0)
# else:
# horizon_subtractions = tf.zeros(shape=tf.shape(reward))
# # Now do the scan, each time summing up the previous step (discounted by gamma) and
# # subtracting the respective `horizon_subtraction`.
# reward = tf.scan(
# fn=cumulate,
# elems=(reward, terminal, off_horizon, horizon_subtractions),
# initializer=final_reward if horizon != 1 else 0.0
# )
# # Re-reverse again to match input sequences.
# return tf.reverse(tensor=reward, axis=(0,))
def tf_reference(self, states, internals, actions, terminal, reward, next_states, next_internals, update):
"""
Creates the TensorFlow operations for obtaining the reference tensor(s), in case of a
comparative loss.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
Returns:
Reference tensor(s).
"""
return None
def tf_loss_per_instance(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):
"""
Creates the TensorFlow operations for calculating the loss per batch instance.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
reference: Optional reference tensor(s), in case of a comparative loss.
Returns:
Loss per instance tensor.
"""
raise NotImplementedError
def tf_regularization_losses(self, states, internals, update):
"""
Creates the TensorFlow operations for calculating the regularization losses for the given input states.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
Returns:
Dict of regularization loss tensors.
"""
return dict()
def tf_loss(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):
"""
Creates the TensorFlow operations for calculating the full loss of a batch.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
reference: Optional reference tensor(s), in case of a comparative loss.
Returns:
Loss tensor.
"""
# Mean loss per instance
loss_per_instance = self.fn_loss_per_instance(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals,
update=update,
reference=reference
)
# Returns no-op.
updated = self.memory.update_batch(loss_per_instance=loss_per_instance)
with tf.control_dependencies(control_inputs=(updated,)):
loss = tf.reduce_mean(input_tensor=loss_per_instance, axis=0)
# Loss without regularization summary.
if 'losses' in self.summary_labels:
summary = tf.summary.scalar(name='loss-without-regularization', tensor=loss)
self.summaries.append(summary)
# Regularization losses.
losses = self.fn_regularization_losses(states=states, internals=internals, update=update)
if len(losses) > 0:
loss += tf.add_n(inputs=list(losses.values()))
if 'regularization' in self.summary_labels:
for name, loss_val in losses.items():
summary = tf.summary.scalar(name=('regularization/' + name), tensor=loss_val)
self.summaries.append(summary)
# Total loss summary.
if 'losses' in self.summary_labels or 'total-loss' in self.summary_labels:
summary = tf.summary.scalar(name='total-loss', tensor=loss)
self.summaries.append(summary)
return loss
def optimizer_arguments(self, states, internals, actions, terminal, reward, next_states, next_internals):
"""
Returns the optimizer arguments including the time, the list of variables to optimize,
and various functions which the optimizer might require to perform an update step.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
Returns:
Optimizer arguments as dict.
"""
arguments = dict(
time=self.global_timestep,
variables=self.get_variables(),
arguments=dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals,
update=tf.constant(value=True)
),
fn_reference=self.fn_reference,
fn_loss=self.fn_loss
)
if self.global_model is not None:
arguments['global_variables'] = self.global_model.get_variables()
return arguments
def tf_optimization(self, states, internals, actions, terminal, reward, next_states=None, next_internals=None):
"""
Creates the TensorFlow operations for performing an optimization update step based
on the given input states and actions batch.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
Returns:
The optimization operation.
"""
arguments = self.optimizer_arguments(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals
)
return self.optimizer.minimize(**arguments)
def tf_observe_timestep(self, states, internals, actions, terminal, reward):
# Store timestep in memory
stored = self.memory.store(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
# Periodic optimization
with tf.control_dependencies(control_inputs=(stored,)):
unit = self.update_mode['unit']
batch_size = self.update_mode['batch_size']
frequency = self.update_mode.get('frequency', batch_size)
first_update = self.update_mode.get('first_update', 0)
if unit == 'timesteps':
# Timestep-based batch
optimize = tf.logical_and(
x=tf.equal(x=(self.timestep % frequency), y=0),
y=tf.logical_and(
x=tf.greater_equal(x=self.timestep, y=batch_size),
y=tf.greater_equal(x=self.timestep, y=first_update)
)
)
batch = self.memory.retrieve_timesteps(n=batch_size)
elif unit == 'episodes':
# Episode-based batch
optimize = tf.logical_and(
x=tf.equal(x=(self.episode % frequency), y=0),
y=tf.logical_and(
# Only update once per episode increment.
x=tf.greater(x=tf.count_nonzero(input_tensor=terminal), y=0),
y=tf.logical_and(
x=tf.greater_equal(x=self.episode, y=batch_size),
y=tf.greater_equal(x=self.episode, y=first_update)
)
)
)
batch = self.memory.retrieve_episodes(n=batch_size)
elif unit == 'sequences':
# Timestep-sequence-based batch
sequence_length = self.update_mode.get('length', 8)
optimize = tf.logical_and(
x=tf.equal(x=(self.timestep % frequency), y=0),
y=tf.logical_and(
x=tf.greater_equal(x=self.timestep, y=(batch_size + sequence_length - 1)),
y=tf.greater_equal(x=self.timestep, y=first_update)
)
)
batch = self.memory.retrieve_sequences(n=batch_size, sequence_length=sequence_length)
else:
raise TensorForceError("Invalid update unit: {}.".format(unit))
# Do not calculate gradients for memory-internal operations.
batch = util.map_tensors(
fn=(lambda tensor: tf.stop_gradient(input=tensor)),
tensors=batch
)
optimization = tf.cond(
pred=optimize,
true_fn=(lambda: self.fn_optimization(**batch)),
false_fn=tf.no_op
)
return optimization
def tf_import_experience(self, states, internals, actions, terminal, reward):
"""
Imports experiences into the TensorFlow memory structure. Can be used to import
off-policy data.
:param states: Dict of state values to import with keys as state names and values as values to set.
:param internals: Internal values to set, can be fetched from agent via agent.current_internals
if no values available.
:param actions: Dict of action values to import with keys as action names and values as values to set.
:param terminal: Terminal value(s)
:param reward: Reward value(s)
"""
return self.memory.store(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
def create_operations(self, states, internals, actions, terminal, reward, deterministic, independent):
# Import experience operation.
self.import_experience_output = self.fn_import_experience(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
super(MemoryModel, self).create_operations(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
deterministic=deterministic,
independent=independent
)
def get_variables(self, include_submodules=False, include_nontrainable=False):
model_variables = super(MemoryModel, self).get_variables(
include_submodules=include_submodules,
include_nontrainable=include_nontrainable
)
if include_nontrainable:
memory_variables = self.memory.get_variables()
model_variables += memory_variables
optimizer_variables = self.optimizer.get_variables()
# For some reason, some optimizer variables are only registered in the model.
for variable in optimizer_variables:
if variable in model_variables:
model_variables.remove(variable)
model_variables += optimizer_variables
return model_variables
def get_summaries(self):
model_summaries = super(MemoryModel, self).get_summaries()
memory_summaries = self.memory.get_summaries()
optimizer_summaries = self.optimizer.get_summaries()
return model_summaries + memory_summaries + optimizer_summaries
def import_experience(self, states, internals, actions, terminal, reward):
"""
Stores experiences.
"""
fetches = self.import_experience_output
feed_dict = self.get_feed_dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)
|
{
"content_hash": "d2638f97426678d62220336159c26874",
"timestamp": "",
"source": "github",
"line_count": 611,
"max_line_length": 134,
"avg_line_length": 41.33387888707038,
"alnum_prop": 0.5912096614531775,
"repo_name": "lefnire/tensorforce",
"id": "92a44d7f537bfa47eebff6c0fba1aaf3e01ec952",
"size": "25935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorforce/models/memory_model.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "753422"
}
],
"symlink_target": ""
}
|
'''
Multitarget planar tracking
==================
Example of using features2d framework for interactive video homography matching.
ORB features and FLANN matcher are used. This sample provides PlaneTracker class
and an example of its usage.
video: http://www.youtube.com/watch?v=pzVbhxx6aog
Usage
-----
plane_tracker.py [<video source>]
Keys:
SPACE - pause video
c - clear targets
Select a textured planar object to track by drawing a box with a mouse.
'''
import numpy as np
import cv2
# built-in modules
from collections import namedtuple
# local modules
import video
import common
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_LSH = 6
flann_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
MIN_MATCH_COUNT = 10
'''
image - image to track
rect - tracked rectangle (x1, y1, x2, y2)
keypoints - keypoints detected inside rect
descrs - their descriptors
data - some user-provided data
'''
PlanarTarget = namedtuple('PlaneTarget', 'image, rect, keypoints, descrs, data')
'''
target - reference to PlanarTarget
p0 - matched points coords in target image
p1 - matched points coords in input frame
H - homography matrix from p0 to p1
quad - target bounary quad in input frame
'''
TrackedTarget = namedtuple('TrackedTarget', 'target, p0, p1, H, quad')
class PlaneTracker:
def __init__(self):
self.detector = cv2.ORB_create( nfeatures = 1000 )
self.matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
self.targets = []
def add_target(self, image, rect, data=None):
'''Add a new tracking target.'''
x0, y0, x1, y1 = rect
raw_points, raw_descrs = self.detect_features(image)
points, descs = [], []
for kp, desc in zip(raw_points, raw_descrs):
x, y = kp.pt
if x0 <= x <= x1 and y0 <= y <= y1:
points.append(kp)
descs.append(desc)
descs = np.uint8(descs)
self.matcher.add([descs])
target = PlanarTarget(image = image, rect=rect, keypoints = points, descrs=descs, data=data)
self.targets.append(target)
def clear(self):
'''Remove all targets'''
self.targets = []
self.matcher.clear()
def track(self, frame):
'''Returns a list of detected TrackedTarget objects'''
frame_points, frame_descrs = self.detect_features(frame)
if len(frame_points) < MIN_MATCH_COUNT:
return []
matches = self.matcher.knnMatch(frame_descrs, k = 2)
matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75]
if len(matches) < MIN_MATCH_COUNT:
return []
matches_by_id = [[] for _ in xrange(len(self.targets))]
for m in matches:
matches_by_id[m.imgIdx].append(m)
tracked = []
for imgIdx, matches in enumerate(matches_by_id):
if len(matches) < MIN_MATCH_COUNT:
continue
target = self.targets[imgIdx]
p0 = [target.keypoints[m.trainIdx].pt for m in matches]
p1 = [frame_points[m.queryIdx].pt for m in matches]
p0, p1 = np.float32((p0, p1))
H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
status = status.ravel() != 0
if status.sum() < MIN_MATCH_COUNT:
continue
p0, p1 = p0[status], p1[status]
x0, y0, x1, y1 = target.rect
quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)
track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad)
tracked.append(track)
tracked.sort(key = lambda t: len(t.p0), reverse=True)
return tracked
def detect_features(self, frame):
'''detect_features(self, frame) -> keypoints, descrs'''
keypoints, descrs = self.detector.detectAndCompute(frame, None)
if descrs is None: # detectAndCompute returns descs=None if not keypoints found
descrs = []
return keypoints, descrs
class App:
def __init__(self, src):
self.cap = video.create_capture(src)
self.frame = None
self.paused = False
self.tracker = PlaneTracker()
cv2.namedWindow('plane')
self.rect_sel = common.RectSelector('plane', self.on_rect)
def on_rect(self, rect):
self.tracker.add_target(self.frame, rect)
def run(self):
while True:
playing = not self.paused and not self.rect_sel.dragging
if playing or self.frame is None:
ret, frame = self.cap.read()
if not ret:
break
self.frame = frame.copy()
vis = self.frame.copy()
if playing:
tracked = self.tracker.track(self.frame)
for tr in tracked:
cv2.polylines(vis, [np.int32(tr.quad)], True, (255, 255, 255), 2)
for (x, y) in np.int32(tr.p1):
cv2.circle(vis, (x, y), 2, (255, 255, 255))
self.rect_sel.draw(vis)
cv2.imshow('plane', vis)
ch = cv2.waitKey(1) & 0xFF
if ch == ord(' '):
self.paused = not self.paused
if ch == ord('c'):
self.tracker.clear()
if ch == 27:
break
if __name__ == '__main__':
print __doc__
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
App(video_src).run()
|
{
"content_hash": "eb1b53553fc7b2050d39e591a06a00df",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 103,
"avg_line_length": 32.632768361581924,
"alnum_prop": 0.5647506925207756,
"repo_name": "apavlenko/opencv",
"id": "c32f65a442379109d7c79af9044f5e770bc837e3",
"size": "5799",
"binary": false,
"copies": "5",
"ref": "refs/heads/copyright_fixes",
"path": "samples/python2/plane_tracker.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "7270"
},
{
"name": "C",
"bytes": "11698890"
},
{
"name": "C++",
"bytes": "24280164"
},
{
"name": "Java",
"bytes": "688479"
},
{
"name": "JavaScript",
"bytes": "352"
},
{
"name": "Objective-C",
"bytes": "323668"
},
{
"name": "Python",
"bytes": "719384"
},
{
"name": "Shell",
"bytes": "2521"
},
{
"name": "TeX",
"bytes": "48853"
}
],
"symlink_target": ""
}
|
import os
import sys
import json
import subprocess
import lighthouse_result_pb2 as lhr_pb2
from google.protobuf.json_format import Parse, MessageToJson
path = os.path.realpath(__file__)
path_dir = os.path.dirname(path)
path_sample_preprocessed = path_dir + '/sample_v2_processed.json'
path_sample = path_dir + '/../../lighthouse-core/test/results/sample_v2.json'
path_round_trip = path_dir + '/../sample_v2_round_trip.json'
def clean():
try:
os.remove(path_sample_preprocessed)
except OSError:
pass
# clean workspace
clean()
# preprocess the sample json
cmd = ["node", "./../../lighthouse-core/lib/proto-preprocessor.js", "--in=./../../lighthouse-core/test/results/sample_v2.json", "--out=./sample_v2_processed.json"]
process = subprocess.call(cmd)
# open json
with open(path_sample_preprocessed, 'r') as f:
data = json.load(f)
# make empty proto lhr
proto_lhr = lhr_pb2.LighthouseResult()
# fill proto lhr with data from JSON
Parse(json.dumps(data), proto_lhr)
# convert proto back into json
round_trip_lhr = json.loads(MessageToJson(proto_lhr, including_default_value_fields=False))
# write the json to disk
with open(path_round_trip, 'w') as f:
json.dump(round_trip_lhr, f, indent=4, sort_keys=True, separators=(',', ': '))
|
{
"content_hash": "0bf3b6d1d5281b1e4372eb7f1da9cf0f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 163,
"avg_line_length": 29.511627906976745,
"alnum_prop": 0.7060677698975572,
"repo_name": "deepanjanroy/lighthouse",
"id": "a70b8f485c4be80bb01210ccaaed0416b6c2d21f",
"size": "1269",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "proto/scripts/json_roundtrip_via_proto.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "52013"
},
{
"name": "HTML",
"bytes": "110790"
},
{
"name": "JavaScript",
"bytes": "2777688"
},
{
"name": "Python",
"bytes": "1269"
},
{
"name": "Ruby",
"bytes": "7024"
},
{
"name": "Shell",
"bytes": "19492"
}
],
"symlink_target": ""
}
|
from ea import adult_selection
from ea import parent_selection
from ea import reproduction
from ea import main
from ea import binary_gtype
from ea.ea_globals import *
def fitness_test(population):
'''Naive fitness test for onemax, just the number of ones'''
tested = []
for ind in population:
tested += [gpfa_t(gtype=ind.gtype, ptype=ind.ptype, fitness=sum(ind.ptype), age=ind.age)]
return tested
def develop(population):
'''Development function for onemax (just copies the genotype)'''
developed = []
for ind in population:
developed += [gpa_t(gtype=ind.gtype, ptype=list(ind.gtype), age=ind.age)]
return developed
def visualize(generation_list):
'''Generate visualizations using matplotlib'''
return None
if __name__=='__main__':
size = int(raw_input("Input problem size:\n"))
popsize = int(raw_input("Input population size:\n"))
adult_selection, litter_size = adult_selection.gen_adult_selection(popsize)
parent_selection = parent_selection.gen_parent_selection(litter_size)
mutate = binary_gtype.gen_mutate()
crossover = binary_gtype.gen_crossover()
reproduction = reproduction.gen_reproduction(mutate, crossover)
generations = int(input("Input max number of generations:\n"))
fitness_goal = float(input("Input fitness goal, 0 for none:\n"))
initial = [ga_t(gtype=binary_gtype.generate(size), age=0) for i in xrange(popsize)]
generation_list = main.evolutionary_algorithm(initial, develop, fitness_test, adult_selection, parent_selection, reproduction, generations, fitness_goal)
print "Program ran for " + str(len(generation_list)) + " generations"
|
{
"content_hash": "67610fd67c22ae862bf08cf9cf3086e1",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 157,
"avg_line_length": 38.76744186046512,
"alnum_prop": 0.7096580683863227,
"repo_name": "imre-kerr/better-ea",
"id": "f4c94845c8154c68ab50617bfdc932bbf85450ad",
"size": "1667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onemax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62005"
},
{
"name": "TeX",
"bytes": "23920"
}
],
"symlink_target": ""
}
|
"""
:copyright: (c) 2013-2020 by Mike Taylor and Kartik Prabhu
:license: MIT, see LICENSE for more details.
"""
import json
import requests
path_testdata = './data/webmention_rocks_test'
test_data = {}
def storePageData(pageURL, pageFilename):
"""Store the returned HTML as a file
"""
r = requests.get(pageURL)
with open('%s.html' % pageFilename, 'w') as h:
h.write(r.text)
with open('%s.json' % pageFilename, 'w') as h:
h.write(json.dumps(dict(r.headers), indent=2))
for n in range(1, 23):
url = 'https://webmention.rocks/test/%d' % n
fname = '%s_%d' % (path_testdata, n)
storePageData(url, fname)
|
{
"content_hash": "86285b9651ac689be3f811c7689741a1",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 58,
"avg_line_length": 25.346153846153847,
"alnum_prop": 0.6282245827010622,
"repo_name": "bear/ronkyuu",
"id": "91695e6007b632614825b7c305f18d7d72126bd1",
"size": "683",
"binary": false,
"copies": "1",
"ref": "refs/heads/setup-codeql",
"path": "tests/rebuild_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "447978"
},
{
"name": "Makefile",
"bytes": "1045"
},
{
"name": "Python",
"bytes": "32915"
}
],
"symlink_target": ""
}
|
from random import randint
num = randint(1, 100)
print('Guess what I think?')
bingo = False
while bingo == False:
answer = input()
if answer > num:
print('too big!')
if answer < num:
print('too small!')
if answer == num:
print('BINGO!')
bingo = True
|
{
"content_hash": "bd7bef7c791fa031ee936a92b5224bab",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 28,
"avg_line_length": 16.833333333333332,
"alnum_prop": 0.5577557755775577,
"repo_name": "QuinceySun/Python",
"id": "fb4bc7af4d94e4971e5afcb7c7d8627048570a8d",
"size": "303",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "crossincode.com/Python_Getting_Started/random-randint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "786"
},
{
"name": "Python",
"bytes": "149759"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
from lxml import etree
import pyusps.urlutil
api_url = 'https://production.shippingapis.com/ShippingAPI.dll'
address_max = 5
def _find_error(root):
if root.tag == 'Error':
num = root.find('Number')
desc = root.find('Description')
return (num, desc)
def _get_error(error):
(num, desc) = error
return ValueError(
'{num}: {desc}'.format(
num=num.text,
desc=desc.text,
)
)
def _get_address_error(address):
error = address.find('Error')
if error is not None:
error = _find_error(error)
return _get_error(error)
def _parse_address(address):
result = OrderedDict()
for child in address.iterchildren():
# elements are yielded in order
name = child.tag.lower()
# More user-friendly names for street
# attributes
if name == 'address2':
name = 'address'
elif name == 'address1':
name = 'address_extended'
elif name == 'firmname':
name = 'firm_name'
result[name] = child.text
return result
def _process_one(address):
# Raise address error if there's only one item
error = _get_address_error(address)
if error is not None:
raise error
return _parse_address(address)
def _process_multiple(addresses):
results = []
count = 0
for address in addresses:
# Return error object if there are
# multiple items
error = _get_address_error(address)
if error is not None:
result = error
else:
result = _parse_address(address)
if str(count) != address.get('ID'):
msg = ('The addresses returned are not in the same '
'order they were requested'
)
raise IndexError(msg)
results.append(result)
count += 1
return results
def _parse_response(res):
# General error, e.g., authorization
error = _find_error(res.getroot())
if error is not None:
raise _get_error(error)
results = res.findall('Address')
length = len(results)
if length == 0:
raise TypeError(
'Could not find any address or error information'
)
if length == 1:
return _process_one(results.pop())
return _process_multiple(results)
def _get_response(xml):
params = OrderedDict([
('API', 'Verify'),
('XML', etree.tostring(xml)),
])
url = '{api_url}?{params}'.format(
api_url=api_url,
params=pyusps.urlutil.urlencode(params),
)
res = pyusps.urlutil.urlopen(url)
res = etree.parse(res)
return res
def _create_xml(
user_id,
*args
):
root = etree.Element('AddressValidateRequest', USERID=user_id)
if len(args) > address_max:
# Raise here. The Verify API will not return an error. It will
# just return the first 5 results
raise ValueError(
'Only {address_max} addresses are allowed per '
'request'.format(
address_max=address_max,
)
)
for i,arg in enumerate(args):
address = arg['address']
city = arg['city']
state = arg.get('state', None)
zip_code = arg.get('zip_code', None)
address_extended = arg.get('address_extended', None)
firm_name = arg.get('firm_name', None)
urbanization = arg.get('urbanization', None)
address_el = etree.Element('Address', ID=str(i))
root.append(address_el)
# Documentation says this tag is required but tests
# show it isn't
if firm_name is not None:
firm_name_el = etree.Element('FirmName')
firm_name_el.text = firm_name
address_el.append(firm_name_el)
address_1_el = etree.Element('Address1')
if address_extended is not None:
address_1_el.text = address_extended
address_el.append(address_1_el)
address_2_el = etree.Element('Address2')
address_2_el.text = address
address_el.append(address_2_el)
city_el = etree.Element('City')
city_el.text = city
address_el.append(city_el)
state_el = etree.Element('State')
if state is not None:
state_el.text = state
address_el.append(state_el)
if urbanization is not None:
urbanization_el = etree.Element('Urbanization')
urbanization_el.text = urbanization
address_el.append(urbanization_el)
zip5 = None
zip4 = None
if zip_code is not None:
zip5 = zip_code[:5]
zip4 = zip_code[5:]
if zip4.startswith('-'):
zip4 = zip4[1:]
zip5_el = etree.Element('Zip5')
if zip5 is not None:
zip5_el.text = zip5
address_el.append(zip5_el)
zip4_el = etree.Element('Zip4')
if zip4 is not None:
zip4_el.text = zip4
address_el.append(zip4_el)
return root
def verify(user_id, *args):
xml = _create_xml(user_id, *args)
res = _get_response(xml)
res = _parse_response(res)
return res
|
{
"content_hash": "a4b772c405431d4f9553469c1aa6a934",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 70,
"avg_line_length": 27.572916666666668,
"alnum_prop": 0.561956932376275,
"repo_name": "thelinuxkid/pyusps",
"id": "31964c01177663f85e07a39ba1264cbd9d5cac34",
"size": "5294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyusps/address_information.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29612"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# extensions = ['']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'edittag'
copyright = u'2011, Brant C. Faircloth'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'edittagdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'edittag.tex', u'edittag Documentation',
u'Brant C. Faircloth', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'edittag', u'edittag Documentation',
[u'Brant C. Faircloth'], 1)
]
|
{
"content_hash": "698cd02db4cd366c2d1bb87bc5c426ec",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 80,
"avg_line_length": 32.43414634146342,
"alnum_prop": 0.7043164385621898,
"repo_name": "carlosp420/edittag",
"id": "e8d46e612bc3cda336e929f1860b8ef00b15e124",
"size": "7067",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "976"
},
{
"name": "Python",
"bytes": "118386"
},
{
"name": "Shell",
"bytes": "4513"
}
],
"symlink_target": ""
}
|
"""
sentry.models.team
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import warnings
from django.conf import settings
from django.db import connections, IntegrityError, models, router, transaction
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.app import env, locks
from sentry.db.models import (
BaseManager, BoundedPositiveIntegerField, FlexibleForeignKey, Model, sane_repr
)
from sentry.db.models.utils import slugify_instance
from sentry.utils.retries import TimedRetryPolicy
class TeamManager(BaseManager):
def get_for_user(self, organization, user, scope=None, with_projects=False):
"""
Returns a list of all teams a user has some level of access to.
"""
from sentry.auth.superuser import is_active_superuser
from sentry.models import (
OrganizationMemberTeam,
Project,
ProjectStatus,
OrganizationMember,
)
if not user.is_authenticated():
return []
base_team_qs = self.filter(organization=organization, status=TeamStatus.VISIBLE)
if env.request and is_active_superuser(env.request) or settings.SENTRY_PUBLIC:
team_list = list(base_team_qs)
else:
try:
om = OrganizationMember.objects.get(
user=user,
organization=organization,
)
except OrganizationMember.DoesNotExist:
# User is not a member of the organization at all
return []
# If a scope is passed through, make sure this scope is
# available on the OrganizationMember object.
if scope is not None and scope not in om.get_scopes():
return []
team_list = list(
base_team_qs.filter(
id__in=OrganizationMemberTeam.objects.filter(
organizationmember=om,
is_active=True,
).values_list('team'),
)
)
results = sorted(team_list, key=lambda x: x.name.lower())
if with_projects:
project_list = sorted(
Project.objects.filter(
team__in=team_list,
status=ProjectStatus.VISIBLE,
),
key=lambda x: x.name.lower()
)
projects_by_team = {t.id: [] for t in team_list}
for project in project_list:
projects_by_team[project.team_id].append(project)
# these kinds of queries make people sad :(
for idx, team in enumerate(results):
team_projects = projects_by_team[team.id]
for project in team_projects:
project.team = team
results[idx] = (team, team_projects)
return results
# TODO(dcramer): pull in enum library
class TeamStatus(object):
VISIBLE = 0
PENDING_DELETION = 1
DELETION_IN_PROGRESS = 2
class Team(Model):
"""
A team represents a group of individuals which maintain ownership of projects.
"""
__core__ = True
organization = FlexibleForeignKey('sentry.Organization')
slug = models.SlugField()
name = models.CharField(max_length=64)
status = BoundedPositiveIntegerField(
choices=(
(TeamStatus.VISIBLE, _('Active')), (TeamStatus.PENDING_DELETION, _('Pending Deletion')),
(TeamStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
),
default=TeamStatus.VISIBLE
)
date_added = models.DateTimeField(default=timezone.now, null=True)
objects = TeamManager(cache_fields=('pk', 'slug', ))
class Meta:
app_label = 'sentry'
db_table = 'sentry_team'
unique_together = (('organization', 'slug'), )
__repr__ = sane_repr('name', 'slug')
def __unicode__(self):
return u'%s (%s)' % (self.name, self.slug)
def save(self, *args, **kwargs):
if not self.slug:
lock = locks.get('slug:team', duration=5)
with TimedRetryPolicy(10)(lock.acquire):
slugify_instance(self, self.name, organization=self.organization)
super(Team, self).save(*args, **kwargs)
else:
super(Team, self).save(*args, **kwargs)
@property
def member_set(self):
return self.organization.member_set.filter(
organizationmemberteam__team=self,
organizationmemberteam__is_active=True,
user__is_active=True,
).distinct()
def has_access(self, user, access=None):
from sentry.models import AuthIdentity, OrganizationMember
warnings.warn('Team.has_access is deprecated.', DeprecationWarning)
queryset = self.member_set.filter(
user=user,
)
if access is not None:
queryset = queryset.filter(type__lte=access)
try:
member = queryset.get()
except OrganizationMember.DoesNotExist:
return False
try:
auth_identity = AuthIdentity.objects.get(
auth_provider__organization=self.organization_id,
user=member.user_id,
)
except AuthIdentity.DoesNotExist:
return True
return auth_identity.is_valid(member)
def transfer_to(self, organization):
"""
Transfers a team and all projects under it to the given organization.
"""
from sentry.models import (
OrganizationAccessRequest, OrganizationMember, OrganizationMemberTeam, Project,
ProjectTeam, ReleaseProject
)
try:
with transaction.atomic():
self.update(organization=organization)
except IntegrityError:
# likely this means a team already exists, let's try to coerce to
# it instead of a blind transfer
new_team = Team.objects.get(
organization=organization,
slug=self.slug,
)
else:
new_team = self
project_ids = list(
Project.objects.filter(
team=self,
).exclude(
organization=organization,
).values_list('id', flat=True)
)
# remove associations with releases from other org
ReleaseProject.objects.filter(
project_id__in=project_ids,
).delete()
Project.objects.filter(
id__in=project_ids,
).update(
team=new_team,
organization=organization,
)
ProjectTeam.objects.filter(
project_id__in=project_ids,
).update(
team=new_team,
)
# remove any pending access requests from the old organization
if self != new_team:
OrganizationAccessRequest.objects.filter(
team=self,
).delete()
# identify shared members and ensure they retain team access
# under the new organization
old_memberships = OrganizationMember.objects.filter(
teams=self,
).exclude(
organization=organization,
)
for member in old_memberships:
try:
new_member = OrganizationMember.objects.get(
user=member.user,
organization=organization,
)
except OrganizationMember.DoesNotExist:
continue
try:
with transaction.atomic():
OrganizationMemberTeam.objects.create(
team=new_team,
organizationmember=new_member,
)
except IntegrityError:
pass
OrganizationMemberTeam.objects.filter(
team=self,
).exclude(
organizationmember__organization=organization,
).delete()
if new_team != self:
cursor = connections[router.db_for_write(Team)].cursor()
# we use a cursor here to avoid automatic cascading of relations
# in Django
try:
cursor.execute('DELETE FROM sentry_team WHERE id = %s', [self.id])
finally:
cursor.close()
def get_audit_log_data(self):
return {
'id': self.id,
'slug': self.slug,
'name': self.name,
'status': self.status,
}
|
{
"content_hash": "152a82b9ef9ad716827447a40404c09b",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 100,
"avg_line_length": 32.010989010989015,
"alnum_prop": 0.5642522027691955,
"repo_name": "gencer/sentry",
"id": "ed0c99d04d360d134c4c7fdf0c4a94e0f0f1459e",
"size": "8739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/models/team.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "318167"
},
{
"name": "HTML",
"bytes": "281885"
},
{
"name": "JavaScript",
"bytes": "2342569"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "8393"
},
{
"name": "Python",
"bytes": "28161647"
},
{
"name": "Ruby",
"bytes": "4233"
},
{
"name": "Shell",
"bytes": "2149"
}
],
"symlink_target": ""
}
|
import numpy as np
# Scipy
from scipy.stats import linregress
# Matplotlib
import matplotlib.pyplot as plt
def plot_spectrum(spectrum, freqs=None, drop_zero_frequency=True, ax=None,
xlog=False, ylog=False, loglog=False,
variance_preserving=False, xlim=None,
ylim=None, title=None, **kwargs):
"""Define a nice spectrum with twin x-axis, one with frequencies, the
other one with periods.
Parameters
----------
spectrum : 1d xarray.DataArray or 1darray
The array where the spectrum is stored
freqs: 1d vector, optional
The frequency vector. If None, the frequency vector is inferred
from the DataArray
drop_zero_frequency : bool, optional
If True, do not plot the zero frequency
ax : matplotlib axes, optional
If None, uses the current axis.
xlog : bool, optional
If True, use log scaling for the x axis
ylog : bool, optional
If True, use log scaling for the y axis
loglog : bool, optional
If True, use log scaling for both axis
variance_preserving : bool, optional
If True, scale the spectrum by the log of frequencies to use the
variance preserving form
xlim : tuple, optional
Set x-axis limits
ylim : tuple, optional
Set y-axis limits
title : string, optional
Set the title
**kwargs : optional
Additional arguments to matplotlib.pyplot.plot
"""
if ax is None:
ax = plt.gca()
if freqs is None:
freqs = spectrum[spectrum.dims[0]]
if drop_zero_frequency:
spectrum = spectrum.where(freqs != 0.)
freqs = freqs.where(freqs != 0.)
#import pytest
#pytest.set_trace()
if variance_preserving:
spectrum = freqs * spectrum
xlog = True
ax.plot(freqs, spectrum, **kwargs)
if xlog or loglog:
ax.set_xscale('log', nonposx='clip')
try:
xmin = np.ceil(np.log10(np.abs(xlim[0]))) - 1
xmax = np.ceil(np.log10(np.abs(xlim[1])))
ax.set_xlim((10 ** xmin, 10 ** xmax))
except TypeError:
try:
xmin = np.ceil(np.log10(abs(freqs[1]))) - 1
xmax = np.ceil(np.log10(abs(freqs[-1])))
ax.set_xlim((10 ** xmin, 10 ** xmax))
except TypeError:
pass
else:
ax.set_xlim(xlim)
if ylog or loglog:
ax.set_yscale('log', nonposy='clip')
try:
ymin = np.ceil(np.log10(np.abs(ylim[0]))) - 1
ymax = np.ceil(np.log10(np.abs(ylim[1])))
ax.set_ylim((10 ** ymin, 10 ** ymax))
except TypeError:
try:
ymin = np.ceil(np.log10(spectrum.min())) - 1
ymax = np.ceil(np.log10(spectrum.max()))
ax.set_ylim((10 ** ymin, 10 ** ymax))
except TypeError:
pass
else:
ax.set_ylim(ylim)
twiny = ax.twiny()
if xlog or loglog:
twiny.set_xscale('log', nonposx='clip')
twiny.set_xlim((10 ** xmin, 10 ** xmax))
new_major_ticks = 10 ** np.arange(xmin + 1, xmax, 1)
new_major_ticklabels = 1. / new_major_ticks
new_major_ticklabels = ["%.3g" % i for i in new_major_ticklabels]
twiny.set_xticks(new_major_ticks)
twiny.set_xticklabels(new_major_ticklabels, rotation=60, fontsize=12)
A = np.arange(2, 10, 2)[np.newaxis]
B = 10 ** (np.arange(-xmax, -xmin, 1)[np.newaxis])
C = np.dot(B.transpose(), A)
new_minor_ticklabels = C.flatten()
new_minor_ticks = 1. / new_minor_ticklabels
new_minor_ticklabels = ["%.3g" % i for i in new_minor_ticklabels]
twiny.set_xticks(new_minor_ticks, minor=True)
twiny.set_xticklabels(new_minor_ticklabels, minor=True, rotation=60,
fontsize=12)
ax.grid(True, which='both')
def plot_power_law(power, scale_factor=1., ax=None, **kwargs):
"""Plot a logarithmic power law
Parameters
----------
power : float
The exponent of the power law
scale_factor : float, optional
The factor to scale the power law with
ax : matplotlib axes, optional
If None, uses the current axis.
**kwargs : optional
Additional arguments to matplotlib.pyplot.plot
Returns
-------
lines : Line2D
Return a Line2D object created by the matplotlib.axes.Axes.plot method
"""
if ax is None:
ax = plt.gca()
xlim = np.array(ax.get_xlim())
power_law = scale_factor * xlim ** power
return ax.plot(xlim, power_law, **kwargs)
def fit_power_law(freq, spectrum):
"""Fit a logarithmic spectral law based on the input one
dimensional spectrum
Parameters
----------
freq : 1darray
The frequency coordinates
spectrum : 1darray
The one-dimensional spectrum
Returns
-------
power : float
The power characteristic of a power law spectrul
scale_factor: float
The scale factor related to fit the power law with the input spectrum
"""
power, intercept, _, _, _ = linregress(np.log(freq), np.log(spectrum))
scale_factor = np.exp(intercept)
return power, scale_factor
def _plot_spectrum2d(ax, x, y, z, xlog=False, ylog=False, zlog=False, **kwargs):
"""
Define a nice spectrum with twin x-axis and twin y-axis, one with
frequencies, the other one with periods, on a predefined axis
object.
Parameters
----------
x,y : array_like
1D array defining the coordinates
z : array_like
2D array
xlog, ylog, zlog : bool, optional
Define if the x-axis, y-axis and z-axis are plotted with a
log scale
** kwargs : optional keyword arguments
See matplotlib.axes.Axes.contourf method in matplotlib
documentation
"""
if not 'xlim' in kwargs:
xlim = None
else:
xlim = kwargs['xlim']
del kwargs['xlim']
if not 'ylim' in kwargs:
ylim = None
else:
ylim = kwargs['ylim']
del kwargs['ylim']
if not 'zlim' in kwargs:
zlim = None
else:
zlim = kwargs['zlim']
del kwargs['zlim']
n_lev = 40
# if symmetric:
# lim = max(np.max(z), abs(np.min(z)))
# lev = np.hstack((np.linspace(- lim, 0, n_lev / 2 + 1),
# np.linspace(0, lim, n_lev / 2)[1:]))
#
# else:
# lev = np.linspace(np.min(z), np.max(z), n_lev / 2 + 1)
if zlog:
plot = ax.pcolormesh(np.log10(z), **kwargs)
else:
plot = ax.pcolormesh(z, **kwargs)
# X limits
if xlog:
ax.set_xscale('symlog', nonposx='clip')
xmin = np.ceil(np.log10(x[1,])) - 1
xmax = np.ceil(np.log10(x[-1,]))
ax.set_xlim((10 ** xmin, 10 ** xmax))
else:
try:
ax.set_xlim(xlim)
except:
ax.set_xlim(np.min(x), np.max(x))
# Y limits
if ylog:
ax.set_yscale('symlog', nonposx='clip')
ymin = np.ceil(np.log10(x[1,])) - 1
ymax = np.ceil(np.log10(x[-1,]))
ax.set_ylim((-10 ** ymin, 10 ** ymax))
else:
try:
ax.set_ylim(ylim)
except:
ax.set_ylim(np.min(y), np.max(y))
axtwiny = ax.twiny()
if xlog:
axtwiny.set_xscale('symlog', nonposx='clip')
axtwiny.set_xlim((-10 ** xmin, 10 ** xmax))
A = np.arange(2, 10, 2)[np.newaxis]
B = 10 ** (np.arange(-xmax, -xmin, 1)[np.newaxis])
C = np.dot(B.transpose(), A)
new_major_ticks = 10 ** np.arange(xmin + 1, xmax, 1)
new_minor_ticklabels = C.flatten()
new_minor_ticklabels = new_minor_ticklabels.astype(int)
new_minor_ticks = 1. / new_minor_ticklabels
axtwiny.set_xticks(new_minor_ticks, minor=True)
axtwiny.set_xticklabels(new_minor_ticklabels, minor=True,
rotation=30)
new_major_ticklabels = 1. / new_major_ticks
new_major_ticklabels = new_major_ticklabels.astype(int)
axtwiny.set_xticks(new_major_ticks)
axtwiny.set_xticklabels(new_major_ticklabels, rotation=30)
axtwinx = ax.twinx()
if ylog:
axtwinx.set_yscale('symlog', nonposx='clip')
axtwinx.set_ylim(y[1], y[-1])
axtwinx.set_ylim((10 ** ymin, 10 ** ymax))
new_major_ticks = 10 ** np.arange(ymin + 1, ymax, 1)
new_major_ticklabels = 1. / new_major_ticks
new_major_ticklabels = new_major_ticklabels.astype(int)
axtwinx.set_yticks(new_major_ticks)
axtwinx.set_yticklabels(new_major_ticklabels)
A = np.arange(2, 10, 2)[np.newaxis]
B = 10 ** (np.arange(-ymax, -ymin, 1)[np.newaxis])
C = np.dot(B.transpose(), A)
new_minor_ticklabels = C.flatten()
new_minor_ticklabels = new_minor_ticklabels.astype(int)
new_minor_ticks = 1. / new_minor_ticklabels
axtwinx.set_yticks(new_minor_ticks, minor=True)
axtwinx.set_yticklabels(new_minor_ticklabels, minor=True)
ax.grid(True, which='both')
|
{
"content_hash": "aa62d53ab32237af2d9b1c7c66f1f575",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 80,
"avg_line_length": 29.71698113207547,
"alnum_prop": 0.6627301587301587,
"repo_name": "serazing/xscale",
"id": "27b08d0a1b8c0ea7e7fcd1de9325a88fb6b4e326",
"size": "7883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xscale/spectral/tools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "164158"
}
],
"symlink_target": ""
}
|
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_proxy(field, value):
return get_default_field_value(field, value)
def shared_service(field, value):
return get_default_field_value(field, value)
def shared_skip_proxy(field, value):
return False
def shared_timeout(field, value):
return 10
def instance_allow_redirects(field, value):
return True
def instance_auth_token(field, value):
return get_default_field_value(field, value)
def instance_auth_type(field, value):
return 'basic'
def instance_aws_host(field, value):
return get_default_field_value(field, value)
def instance_aws_region(field, value):
return get_default_field_value(field, value)
def instance_aws_service(field, value):
return get_default_field_value(field, value)
def instance_bearer_token_auth(field, value):
return get_default_field_value(field, value)
def instance_bearer_token_path(field, value):
return get_default_field_value(field, value)
def instance_bearer_token_refresh_interval(field, value):
return 60
def instance_collect_nginx_histograms(field, value):
return False
def instance_connect_timeout(field, value):
return get_default_field_value(field, value)
def instance_disable_generic_tags(field, value):
return False
def instance_empty_default_hostname(field, value):
return False
def instance_exclude_labels(field, value):
return get_default_field_value(field, value)
def instance_extra_headers(field, value):
return get_default_field_value(field, value)
def instance_headers(field, value):
return get_default_field_value(field, value)
def instance_health_service_check(field, value):
return True
def instance_ignore_metrics(field, value):
return get_default_field_value(field, value)
def instance_ignore_metrics_by_labels(field, value):
return get_default_field_value(field, value)
def instance_ignore_tags(field, value):
return get_default_field_value(field, value)
def instance_include_labels(field, value):
return get_default_field_value(field, value)
def instance_kerberos_auth(field, value):
return 'disabled'
def instance_kerberos_cache(field, value):
return get_default_field_value(field, value)
def instance_kerberos_delegate(field, value):
return False
def instance_kerberos_force_initiate(field, value):
return False
def instance_kerberos_hostname(field, value):
return get_default_field_value(field, value)
def instance_kerberos_keytab(field, value):
return get_default_field_value(field, value)
def instance_kerberos_principal(field, value):
return get_default_field_value(field, value)
def instance_label_joins(field, value):
return get_default_field_value(field, value)
def instance_label_to_hostname(field, value):
return get_default_field_value(field, value)
def instance_labels_mapper(field, value):
return get_default_field_value(field, value)
def instance_log_requests(field, value):
return False
def instance_metric_patterns(field, value):
return get_default_field_value(field, value)
def instance_metrics(field, value):
return get_default_field_value(field, value)
def instance_min_collection_interval(field, value):
return 15
def instance_namespace(field, value):
return 'service'
def instance_ntlm_domain(field, value):
return get_default_field_value(field, value)
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_persist_connections(field, value):
return False
def instance_prometheus_metrics_prefix(field, value):
return get_default_field_value(field, value)
def instance_proxy(field, value):
return get_default_field_value(field, value)
def instance_read_timeout(field, value):
return get_default_field_value(field, value)
def instance_request_size(field, value):
return 10
def instance_send_distribution_buckets(field, value):
return False
def instance_send_distribution_counts_as_monotonic(field, value):
return False
def instance_send_distribution_sums_as_monotonic(field, value):
return False
def instance_send_histograms_buckets(field, value):
return True
def instance_send_monotonic_counter(field, value):
return True
def instance_send_monotonic_with_gauge(field, value):
return False
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_skip_proxy(field, value):
return False
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_timeout(field, value):
return 10
def instance_tls_ca_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_ignore_warning(field, value):
return False
def instance_tls_private_key(field, value):
return get_default_field_value(field, value)
def instance_tls_protocols_allowed(field, value):
return get_default_field_value(field, value)
def instance_tls_use_host_header(field, value):
return False
def instance_tls_verify(field, value):
return True
def instance_type_overrides(field, value):
return get_default_field_value(field, value)
def instance_use_legacy_auth_encoding(field, value):
return True
def instance_use_process_start_time(field, value):
return False
def instance_username(field, value):
return get_default_field_value(field, value)
|
{
"content_hash": "2ef29dfbadd26c814bea5b083a662906",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 75,
"avg_line_length": 20.41025641025641,
"alnum_prop": 0.7374371859296482,
"repo_name": "DataDog/integrations-core",
"id": "ac37081d21df7126a7260484b1586e98d5ec1255",
"size": "5929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nginx_ingress_controller/datadog_checks/nginx_ingress_controller/config_models/defaults.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
}
|
import dominoes
import unittest
class TestResult(unittest.TestCase):
def test_result(self):
p = 0
w = True
pts = 100
r = dominoes.Result(p, w, pts)
self.assertEqual(r.player, p)
self.assertEqual(r.won, True)
self.assertEqual(r.points, pts)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "bf1bbb662c74de7d25c4b11169548273",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 39,
"avg_line_length": 21.875,
"alnum_prop": 0.5771428571428572,
"repo_name": "abw333/dominoes",
"id": "4b6b9fa53795c0defe8848bd15f2d10257eeab90",
"size": "350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131060"
}
],
"symlink_target": ""
}
|
from PIL import Image
import logging
from pixelsort.argparams import parse_args
from pixelsort.main import pixelsort
from pixelsort.util import id_generator
args = parse_args()
image_input_path = args.pop("image_input_path")
image_output_path = args.pop("image_output_path")
interval_file_path = args.pop("interval_file_path")
mask_path = args.pop("mask_path")
if image_output_path is None:
image_output_path = id_generator() + ".png"
logging.warning("No output path provided, using " + image_output_path)
logging.debug("Opening image...")
args["image"] = Image.open(image_input_path)
if mask_path:
logging.debug("Opening mask...")
args["mask_image"] = Image.open(mask_path)
if interval_file_path:
logging.debug("Opening interval file...")
args["interval_image"] = Image.open(interval_file_path)
logging.debug("Saving image...")
pixelsort(**args).save(image_output_path)
|
{
"content_hash": "c560e3a0d8b11530ee9b5512deb9f9a3",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 74,
"avg_line_length": 33.2962962962963,
"alnum_prop": 0.7252502780867631,
"repo_name": "satyarth/pixelsort",
"id": "532712db3405eea4c6a3473c7a579bc1362ca797",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pixelsort/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16997"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(name='django-country-utils', version='0.1',
description='Add a CountryField to Django',
author='Gustaf Sjöberg', author_email='gs@distrop.com',
packages=['country_utils'],
zip_safe=False)
|
{
"content_hash": "03fb4f67374a92ca690860a325114a70",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 61,
"avg_line_length": 36,
"alnum_prop": 0.6865079365079365,
"repo_name": "strange/django-country-utils",
"id": "bac17b74655cc6dadab051ededdac6df88088cab",
"size": "294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12183"
}
],
"symlink_target": ""
}
|
from paddle.trainer.config_parser import parse_config
from paddle.proto import TrainerConfig_pb2
import sys
__all__ = []
if __name__ == '__main__':
whole_conf = False
if len(sys.argv) == 2:
conf = parse_config(sys.argv[1], '')
elif len(sys.argv) == 3:
conf = parse_config(sys.argv[1], sys.argv[2])
elif len(sys.argv) == 4:
conf = parse_config(sys.argv[1], sys.argv[2])
if sys.argv[3] == '--whole':
whole_conf = True
else:
raise RuntimeError()
assert isinstance(conf, TrainerConfig_pb2.TrainerConfig)
if whole_conf:
print conf
else:
print conf.model_config
|
{
"content_hash": "135e58d82dff58aaf4d7898e62a52f17",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 60,
"avg_line_length": 26.4,
"alnum_prop": 0.5893939393939394,
"repo_name": "alvations/Paddle",
"id": "c5ce5c8d9a084d68b250d091808f528459f46921",
"size": "1260",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/paddle/utils/dump_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "217842"
},
{
"name": "C++",
"bytes": "2768145"
},
{
"name": "CMake",
"bytes": "113668"
},
{
"name": "Cuda",
"bytes": "424141"
},
{
"name": "M4",
"bytes": "40911"
},
{
"name": "Perl",
"bytes": "11412"
},
{
"name": "Python",
"bytes": "889886"
},
{
"name": "Shell",
"bytes": "63769"
}
],
"symlink_target": ""
}
|
"""Support for OwnTracks."""
from collections import defaultdict
import json
import logging
import re
from aiohttp.web import json_response
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import mqtt
from homeassistant.const import (
ATTR_GPS_ACCURACY,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_WEBHOOK_ID,
Platform,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.setup import async_when_setup
from .config_flow import CONF_SECRET
from .const import DOMAIN
from .messages import async_handle_message, encrypt_message
_LOGGER = logging.getLogger(__name__)
CONF_MAX_GPS_ACCURACY = "max_gps_accuracy"
CONF_WAYPOINT_IMPORT = "waypoints"
CONF_WAYPOINT_WHITELIST = "waypoint_whitelist"
CONF_MQTT_TOPIC = "mqtt_topic"
CONF_REGION_MAPPING = "region_mapping"
CONF_EVENTS_ONLY = "events_only"
BEACON_DEV_ID = "beacon"
PLATFORMS = [Platform.DEVICE_TRACKER]
DEFAULT_OWNTRACKS_TOPIC = "owntracks/#"
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN, default={}): {
vol.Optional(CONF_MAX_GPS_ACCURACY): vol.Coerce(float),
vol.Optional(CONF_WAYPOINT_IMPORT, default=True): cv.boolean,
vol.Optional(CONF_EVENTS_ONLY, default=False): cv.boolean,
vol.Optional(
CONF_MQTT_TOPIC, default=DEFAULT_OWNTRACKS_TOPIC
): mqtt.valid_subscribe_topic,
vol.Optional(CONF_WAYPOINT_WHITELIST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_SECRET): vol.Any(
vol.Schema({vol.Optional(cv.string): cv.string}), cv.string
),
vol.Optional(CONF_REGION_MAPPING, default={}): dict,
vol.Optional(CONF_WEBHOOK_ID): cv.string,
}
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Initialize OwnTracks component."""
hass.data[DOMAIN] = {"config": config[DOMAIN], "devices": {}, "unsub": None}
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={}
)
)
return True
async def async_setup_entry(hass, entry):
"""Set up OwnTracks entry."""
config = hass.data[DOMAIN]["config"]
max_gps_accuracy = config.get(CONF_MAX_GPS_ACCURACY)
waypoint_import = config.get(CONF_WAYPOINT_IMPORT)
waypoint_whitelist = config.get(CONF_WAYPOINT_WHITELIST)
secret = config.get(CONF_SECRET) or entry.data[CONF_SECRET]
region_mapping = config.get(CONF_REGION_MAPPING)
events_only = config.get(CONF_EVENTS_ONLY)
mqtt_topic = config.get(CONF_MQTT_TOPIC)
context = OwnTracksContext(
hass,
secret,
max_gps_accuracy,
waypoint_import,
waypoint_whitelist,
region_mapping,
events_only,
mqtt_topic,
)
webhook_id = config.get(CONF_WEBHOOK_ID) or entry.data[CONF_WEBHOOK_ID]
hass.data[DOMAIN]["context"] = context
async_when_setup(hass, "mqtt", async_connect_mqtt)
hass.components.webhook.async_register(
DOMAIN, "OwnTracks", webhook_id, handle_webhook
)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
hass.data[DOMAIN]["unsub"] = hass.helpers.dispatcher.async_dispatcher_connect(
DOMAIN, async_handle_message
)
return True
async def async_unload_entry(hass, entry):
"""Unload an OwnTracks config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
hass.data[DOMAIN]["unsub"]()
return unload_ok
async def async_remove_entry(hass, entry):
"""Remove an OwnTracks config entry."""
if not entry.data.get("cloudhook"):
return
await hass.components.cloud.async_delete_cloudhook(entry.data[CONF_WEBHOOK_ID])
async def async_connect_mqtt(hass, component):
"""Subscribe to MQTT topic."""
context = hass.data[DOMAIN]["context"]
async def async_handle_mqtt_message(msg):
"""Handle incoming OwnTracks message."""
try:
message = json.loads(msg.payload)
except ValueError:
# If invalid JSON
_LOGGER.error("Unable to parse payload as JSON: %s", msg.payload)
return
message["topic"] = msg.topic
hass.helpers.dispatcher.async_dispatcher_send(DOMAIN, hass, context, message)
await hass.components.mqtt.async_subscribe(
context.mqtt_topic, async_handle_mqtt_message, 1
)
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle webhook callback.
iOS sets the "topic" as part of the payload.
Android does not set a topic but adds headers to the request.
"""
context = hass.data[DOMAIN]["context"]
topic_base = re.sub("/#$", "", context.mqtt_topic)
try:
message = await request.json()
except ValueError:
_LOGGER.warning("Received invalid JSON from OwnTracks")
return json_response([])
# Android doesn't populate topic
if "topic" not in message:
headers = request.headers
user = headers.get("X-Limit-U")
device = headers.get("X-Limit-D", user)
if user:
message["topic"] = f"{topic_base}/{user}/{device}"
elif message["_type"] != "encrypted":
_LOGGER.warning(
"No topic or user found in message. If on Android,"
" set a username in Connection -> Identification"
)
# Keep it as a 200 response so the incorrect packet is discarded
return json_response([])
hass.helpers.dispatcher.async_dispatcher_send(DOMAIN, hass, context, message)
response = []
for person in hass.states.async_all("person"):
if "latitude" in person.attributes and "longitude" in person.attributes:
response.append(
{
"_type": "location",
"lat": person.attributes["latitude"],
"lon": person.attributes["longitude"],
"tid": "".join(p[0] for p in person.name.split(" ")[:2]),
"tst": int(person.last_updated.timestamp()),
}
)
if message["_type"] == "encrypted" and context.secret:
return json_response(
{
"_type": "encrypted",
"data": encrypt_message(
context.secret, message["topic"], json.dumps(response)
),
}
)
return json_response(response)
class OwnTracksContext:
"""Hold the current OwnTracks context."""
def __init__(
self,
hass,
secret,
max_gps_accuracy,
import_waypoints,
waypoint_whitelist,
region_mapping,
events_only,
mqtt_topic,
):
"""Initialize an OwnTracks context."""
self.hass = hass
self.secret = secret
self.max_gps_accuracy = max_gps_accuracy
self.mobile_beacons_active = defaultdict(set)
self.regions_entered = defaultdict(list)
self.import_waypoints = import_waypoints
self.waypoint_whitelist = waypoint_whitelist
self.region_mapping = region_mapping
self.events_only = events_only
self.mqtt_topic = mqtt_topic
self._pending_msg = []
@callback
def async_valid_accuracy(self, message):
"""Check if we should ignore this message."""
if (acc := message.get("acc")) is None:
return False
try:
acc = float(acc)
except ValueError:
return False
if acc == 0:
_LOGGER.warning(
"Ignoring %s update because GPS accuracy is zero: %s",
message["_type"],
message,
)
return False
if self.max_gps_accuracy is not None and acc > self.max_gps_accuracy:
_LOGGER.info(
"Ignoring %s update because expected GPS accuracy %s is not met: %s",
message["_type"],
self.max_gps_accuracy,
message,
)
return False
return True
@callback
def set_async_see(self, func):
"""Set a new async_see function."""
self.async_see = func
for msg in self._pending_msg:
func(**msg)
self._pending_msg.clear()
# pylint: disable=method-hidden
@callback
def async_see(self, **data):
"""Send a see message to the device tracker."""
self._pending_msg.append(data)
@callback
def async_see_beacons(self, hass, dev_id, kwargs_param):
"""Set active beacons to the current location."""
kwargs = kwargs_param.copy()
# Mobile beacons should always be set to the location of the
# tracking device. I get the device state and make the necessary
# changes to kwargs.
device_tracker_state = hass.states.get(f"device_tracker.{dev_id}")
if device_tracker_state is not None:
acc = device_tracker_state.attributes.get(ATTR_GPS_ACCURACY)
lat = device_tracker_state.attributes.get(ATTR_LATITUDE)
lon = device_tracker_state.attributes.get(ATTR_LONGITUDE)
if lat is not None and lon is not None:
kwargs["gps"] = (lat, lon)
kwargs["gps_accuracy"] = acc
else:
kwargs["gps"] = None
kwargs["gps_accuracy"] = None
# the battery state applies to the tracking device, not the beacon
# kwargs location is the beacon's configured lat/lon
kwargs.pop("battery", None)
for beacon in self.mobile_beacons_active[dev_id]:
kwargs["dev_id"] = f"{BEACON_DEV_ID}_{beacon}"
kwargs["host_name"] = beacon
self.async_see(**kwargs)
|
{
"content_hash": "8e628fef6748e105b23c0850450aff0a",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 88,
"avg_line_length": 31.923809523809524,
"alnum_prop": 0.6071002386634845,
"repo_name": "home-assistant/home-assistant",
"id": "3cae9505ee8b41f7f9ac365a4562b1aff42e9b72",
"size": "10056",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/owntracks/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
}
|
"""Config flow for flume integration."""
from functools import partial
import logging
from pyflume import FlumeAuth, FlumeDeviceList
from requests.exceptions import RequestException
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_PASSWORD,
CONF_USERNAME,
)
from .const import BASE_TOKEN_FILENAME
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
# If flume ever implements a login page for oauth
# we can use the oauth2 support built into Home Assistant.
#
# Currently they only implement the token endpoint
#
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Required(CONF_CLIENT_ID): str,
vol.Required(CONF_CLIENT_SECRET): str,
}
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
username = data[CONF_USERNAME]
password = data[CONF_PASSWORD]
client_id = data[CONF_CLIENT_ID]
client_secret = data[CONF_CLIENT_SECRET]
flume_token_full_path = hass.config.path(f"{BASE_TOKEN_FILENAME}-{username}")
try:
flume_auth = await hass.async_add_executor_job(
partial(
FlumeAuth,
username,
password,
client_id,
client_secret,
flume_token_file=flume_token_full_path,
)
)
flume_devices = await hass.async_add_executor_job(FlumeDeviceList, flume_auth)
except RequestException:
raise CannotConnect
except Exception: # pylint: disable=broad-except
raise InvalidAuth
if not flume_devices or not flume_devices.device_list:
raise CannotConnect
# Return info that you want to store in the config entry.
return {"title": username}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for flume."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
try:
info = await validate_input(self.hass, user_input)
return self.async_create_entry(title=info["title"], data=user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_import(self, user_input):
"""Handle import."""
return await self.async_step_user(user_input)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
{
"content_hash": "5e0e1f696655a9506872dafa8956ac9c",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 86,
"avg_line_length": 31.568807339449542,
"alnum_prop": 0.6469049694856146,
"repo_name": "pschmitt/home-assistant",
"id": "f26be5f1e1de802f910a66cb8b4dd255f39d6cce",
"size": "3441",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/flume/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Python",
"bytes": "24807200"
},
{
"name": "Shell",
"bytes": "4342"
}
],
"symlink_target": ""
}
|
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import _num_samples, check_array
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
import collections
__all__ = ['Bootstrap',
'KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'check_cv',
'cross_val_score',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n, indices=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
self._indices = indices
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
indices = self._indices
if indices:
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
if indices:
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return list(range(self.n))
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p, indices=None):
super(LeavePOut, self).__init__(n, indices)
self.p = p
def _iter_test_indices(self):
for comb in combinations(list(range(self.n)), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, indices, shuffle, random_state):
super(_BaseKFold, self).__init__(n, indices)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, indices, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, indices, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = np.bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(list(zip(*per_label_cvs))):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels, indices=None):
super(LeaveOneLabelOut, self).__init__(len(labels), indices)
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p, indices=None):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels), indices)
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(list(range(self.n_unique_labels)), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class Bootstrap(object):
"""Random sampling with replacement cross-validation iterator
Provides train/test indices to split data in train test sets
while resampling the input n_iter times: each time a new
random split of the data is performed and then samples are drawn
(with replacement) on each side of the split to build the training
and test sets.
Note: contrary to other cross-validation strategies, bootstrapping
will allow some samples to occur several times in each splits. However
a sample that occurs in the train split will never occur in the test
split and vice-versa.
If you want each sample to occur at most once you should probably
use ShuffleSplit cross validation instead.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default is 3)
Number of bootstrapping iterations
train_size : int or float (default is 0.5)
If int, number of samples to include in the training split
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split.
test_size : int or float or None (default is None)
If int, number of samples to include in the training set
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split.
If None, n_test is set as the complement of n_train.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> bs = cross_validation.Bootstrap(9, random_state=0)
>>> len(bs)
3
>>> print(bs)
Bootstrap(9, n_iter=3, train_size=5, test_size=4, random_state=0)
>>> for train_index, test_index in bs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [1 8 7 7 8] TEST: [0 3 0 5]
TRAIN: [5 4 2 4 2] TEST: [6 7 1 0]
TRAIN: [4 7 0 1 1] TEST: [5 3 6 5]
See also
--------
ShuffleSplit: cross validation using random permutations.
"""
# Static marker to be able to introspect the CV type
indices = True
def __init__(self, n, n_iter=3, train_size=.5, test_size=None,
random_state=None, n_bootstraps=None):
# See, e.g., http://youtu.be/BzHz0J9a6k0?t=9m38s for a motivation
# behind this deprecation
warnings.warn("Bootstrap will no longer be supported as a " +
"cross-validation method as of version 0.15 and " +
"will be removed in 0.17", DeprecationWarning)
self.n = n
if n_bootstraps is not None: # pragma: no cover
warnings.warn("n_bootstraps was renamed to n_iter and will "
"be removed in 0.16.", DeprecationWarning)
n_iter = n_bootstraps
self.n_iter = n_iter
if (isinstance(train_size, numbers.Real) and train_size >= 0.0
and train_size <= 1.0):
self.train_size = int(ceil(train_size * n))
elif isinstance(train_size, numbers.Integral):
self.train_size = train_size
else:
raise ValueError("Invalid value for train_size: %r" %
train_size)
if self.train_size > n:
raise ValueError("train_size=%d should not be larger than n=%d" %
(self.train_size, n))
if isinstance(test_size, numbers.Real) and 0.0 <= test_size <= 1.0:
self.test_size = int(ceil(test_size * n))
elif isinstance(test_size, numbers.Integral):
self.test_size = test_size
elif test_size is None:
self.test_size = self.n - self.train_size
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if self.test_size > n - self.train_size:
raise ValueError(("test_size + train_size=%d, should not be " +
"larger than n=%d") %
(self.test_size + self.train_size, n))
self.random_state = random_state
def __iter__(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_train = permutation[:self.train_size]
ind_test = permutation[self.train_size:self.train_size
+ self.test_size]
# bootstrap in each split individually
train = rng.randint(0, self.train_size,
size=(self.train_size,))
test = rng.randint(0, self.test_size,
size=(self.test_size,))
yield ind_train[train], ind_test[test]
def __repr__(self):
return ('%s(%d, n_iter=%d, train_size=%d, test_size=%d, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
self.train_size,
self.test_size,
self.random_state,
))
def __len__(self):
return self.n_iter
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning)
self.n = n
self.n_iter = n_iter
if n_iterations is not None: # pragma: no cover
warnings.warn("n_iterations was renamed to n_iter for consistency "
" and will be removed in 0.16.")
self.n_iter = n_iterations
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self._indices = indices
self.n_train, self.n_test = _validate_shuffle_split(n,
test_size,
train_size)
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
if self._indices:
for train, test in self._iter_indices():
yield train, test
return
for train, test in self._iter_indices():
train_m = np.zeros(self.n, dtype=bool)
test_m = np.zeros(self.n, dtype=bool)
train_m[train] = True
test_m[test] = True
yield train_m, test_m
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
See also
--------
Bootstrap: cross-validation using re-sampling with replacement.
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, indices, random_state,
n_iterations)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(np.bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = np.bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(np.bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
##############################################################################
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters,
fit_params, return_train_score=False,
return_parameters=False):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape = (n_train_samples,)
Indices of training samples.
test : array-like, shape = (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in list(parameters.items())))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust lenght of sample weights
n_samples = _num_samples(X)
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, np.asarray(v)[train]
if hasattr(v, '__len__') and len(v) == n_samples else v)
for k, v in list(fit_params.items())])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and isinstance(estimator.kernel, collections.Callable):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
return _check_cv(cv, X=X, y=y, classifier=classifier, warn_mask=True)
def _check_cv(cv, X=None, y=None, classifier=False, warn_mask=False):
# This exists for internal use while indices is being deprecated.
is_sparse = sp.issparse(X)
needs_indices = is_sparse or not hasattr(X, "shape")
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if warn_mask and not needs_indices:
warnings.warn('check_cv will return indices instead of boolean '
'masks from 0.17', DeprecationWarning)
else:
needs_indices = None
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv, indices=needs_indices)
else:
cv = KFold(_num_samples(y), cv, indices=needs_indices)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv, indices=needs_indices)
if needs_indices and not getattr(cv, "_indices", True):
raise ValueError("Sparse data and lists require indices-based cross"
" validation generator, got: %r", cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape = [n_permutations]
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> a, b = np.arange(10).reshape((5, 2)), range(5)
>>> a
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(b)
[0, 1, 2, 3, 4]
>>> a_train, a_test, b_train, b_test = train_test_split(
... a, b, test_size=0.33, random_state=42)
...
>>> a_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> b_train
[2, 0, 3]
>>> a_test
array([[2, 3],
[8, 9]])
>>> b_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.")
force_arrays = options.pop('force_arrays', False)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if force_arrays:
warnings.warn("The force_arrays option is deprecated and will be "
"removed in 0.18.", DeprecationWarning)
arrays = [check_array(x, 'csr', ensure_2d=False, force_all_finite=False)
if x is not None else x for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
|
{
"content_hash": "a8ce5686c15537ea41704e758f9398f6",
"timestamp": "",
"source": "github",
"line_count": 1571,
"max_line_length": 91,
"avg_line_length": 35.91725015913431,
"alnum_prop": 0.58001630454046,
"repo_name": "albahnsen/CostSensitiveClassification",
"id": "45a554301e5fb6a4504ba7b2e4820a38c901816b",
"size": "56426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "costcla/utils/cross_validation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "197165"
}
],
"symlink_target": ""
}
|
"""show the configuration as discovered from the current directory."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# aoncmd_showconfig
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import phlsys_makeconduit
def getFromfilePrefixChars():
return ""
def setupParser(parser):
phlsys_makeconduit.add_argparse_arguments(parser)
def process(args):
getExplanation = phlsys_makeconduit.get_uri_user_cert_explanation
uri, user, cert, explanation = getExplanation(
args.uri, args.user, args.cert)
print(explanation)
print()
print("uri : ", uri)
print("user: ", user)
print("cert: ", phlsys_makeconduit.obscured_cert(cert))
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
{
"content_hash": "8ef4df33936cfc56efd2f559ec9257c9",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 34.05357142857143,
"alnum_prop": 0.5527005768222338,
"repo_name": "bloomberg/phabricator-tools",
"id": "a2c916e174887a9c50d227225eb98bc767405bd8",
"size": "1907",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "py/aon/aoncmd_showconfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "342"
},
{
"name": "Dockerfile",
"bytes": "3828"
},
{
"name": "HTML",
"bytes": "471"
},
{
"name": "Puppet",
"bytes": "4016"
},
{
"name": "Python",
"bytes": "1069073"
},
{
"name": "Ruby",
"bytes": "1945"
},
{
"name": "Shell",
"bytes": "135331"
}
],
"symlink_target": ""
}
|
"""Chart where free things are.
The StuffCharter class is a wrapper around the folium
openstreetmap python object, which in turn generates a
leaflet map.
Example usage:
>>> from stuff_scraper import StuffScraper
>>> from stuff_charter import StuffCharter
>>> stuffs = StuffScraper('montreal', 5, precise=True).stuffs
>>> treasure_map = StuffCharter(stuffs)
call save_map(path) generate html map
>>> treasure_map.save_test_map() # saves map in current dir
BEWARNED, this map is likely inaccurate:
Craigslist denizens care not for computer-precision
"""
import os, re
from geopy.geocoders import Nominatim
from bs4 import BeautifulSoup
import requests, folium, webbrowser
from folium.element import IFrame
class StuffCharter:
"""Post folium map of freestuffs.
After constructing Mappify map object, call
create_map and pass in map_path in order to create
the HTML map.
Attributes:
- treasure_map -- an OSM folium map object
- stuffs -- list of free stuff
- user_location -- the user's location
- start_coordinates -- the origin coordinates for city
- zoom -- default map zoom
Keyword arguments:
- stuffs -- a list of stuff objects
- address -- for an optional map marker of the user address.
- do_create_map -- set to False to override modify attributes
before create_map.
- is_testing -- use to test module from commandline
- is_flask -- automatically create map for treasure-map
- zoom -- the map default zoom level
"""
def __init__(self, stuffs, address=None, zoom=13,
do_create_map=True,
is_testing=False, is_flask=False):
self.user_location = stuffs[0].user_location
self.start_coordinates = self.find_city_center(self.user_location)
self.zoom = zoom
self.stuffs = stuffs
self.radius = 500
self.address = address
if do_create_map:
self.create_map(is_testing, is_flask)
def create_map(self, is_testing=False, is_flask=False):
"""Create a folium Map object, treasure_map.
treasure_map can be used to save an html leaflet map.
This method is called automatically on __init__ unless
do_create_map is set to False.
Keyword arguments:
- is_testing -- creates a map in webmap directory
- is_flask -- creates a flask map
"""
map_osm = folium.Map([self.start_coordinates[0], self.start_coordinates[1]], zoom_start=self.zoom)
for stuff in self.stuffs:
place = stuff.location
thing = stuff.thing # thing title
url = stuff.url
image = stuff.image
color = self.sort_stuff(stuff.thing) # Map marker's color
name = """
<link rel='stylesheet' type='text/css'
href='https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css'>
<img src='%s' height='auto' width='160px' />
<h3>%s</h3>
<h4>%s</h4>
<a href='%s' target='_blank'>View Posting in New Tab</a>
""" % (image, thing, place, url)
# TODO: Contigency Plan for 0, 0?
lat = stuff.coordinates[0] # Latitude
lon = stuff.coordinates[1] # Longitude
popup = folium.Popup(IFrame(name, width=200, height=300),
max_width=3000)
folium.CircleMarker([lat, lon], radius=self.radius, popup=popup,
fill_color=color, fill_opacity=0.2).add_to(map_osm)
self.radius -= 10 # Diminishing order
self.treasure_map = map_osm
self.add_address(self.address)
if is_testing:
self.save_test_map()
elif is_flask:
self.save_flask_map()
else:
print("call save_map(path) to generate html map")
def save_test_map(self):
"""Create html map in current directory.
Should have python -m http.server running in directory
"""
path = os.getcwd()
self.treasure_map.save(os.path.join(path, 'treasure_map.html'))
print("BEWARNED, this map is likely inaccurate:\nCraigslist denizens care not for computer-precision")
# webbrowser.open_new_tab("localhost:8000/webmap/findit.html") # Open the map in a tab
def save_flask_map(self):
"""Create html map in flask server."""
folium_figure = self.treasure_map.get_root()
folium_figure.header._children['bootstrap'] = folium.element.CssLink('static', 'css', 'style.css') #'/static/css/style.css'
folium_figure.header._children['Woops'] = folium.element.CssLink('static', 'css', 'map.css') # Why woops?
self.treasure_map.save(os.path.join('treasuremap', 'templates', 'raw_map.html'))
def save_map(self, map_path, css_path=None): # make **argv
"""Create html map in map_path.
Keyword arguments:
- map_path -- the path to create_map in
- css_path -- the path to override css
(defaults to bootstrap via folium)
"""
path = os.getcwd()
if not os.path.exists(os.path.join(path, map_path)):
os.makedirs(os.path.join(path, map_path))
if css_path is not None:
folium_figure = self.treasure_map.get_root() # Leaflet Style conflicts with custom Bootstrap
folium_figure.header._children['Woops'] = folium.element.CssLink(css_path)
self.treasure_map.save(map_path)
def find_city_center(self, location):
"""Return city center longitude latitude."""
geolocator = Nominatim()
if re.match("montreal", location, re.I):
coord = [45.5088, -73.5878]
elif re.match("newyork", location, re.I):
coord = [40.7127, -74.0058]
elif re.match("toronto", location, re.I):
coord = [43.7, -79.4000]
elif re.match("washingtondc", location, re.I):
coord = [38.9047, -77.0164]
elif re.match("vancouver", location, re.I):
coord = [49.2827, -123.1207]
elif re.match("sanfrancisco", location, re.I):
coord = [37.773972, -122.431297]
else:
try:
findit = geolocator.geocode(location) # Last resort
lat = findit.latitude
lon = findit.longitude
coord = [lat, lon]
except:
coord = [0,0] # This is a bit silly, nulle island
return coord
def add_address(self, _address):
"""Add address to folium map"""
self.address = _address
if _address != None:
geolocator = Nominatim()
try:
add_lat = geolocator.geocode(self.address).latitude
add_lon = geolocator.geocode(self.address).longitude
except:
add_lat = 0
add_lon = 0
pop_up = self.address + str(add_lat) + str(add_lon)
folium.Marker(location=[add_lat, add_lon],popup=self.address,
icon=folium.Icon(color='red',icon='home')
).add_to(self.treasure_map)
def sort_stuff(self, stuff):
"""Return a color according to regex search.
1. Furniture pattern, red
2. Electronics pattern, blue
3. Miscellaneous pattern, black
4. no match, white
sort_stuff will return with the first pattern found in
that order.
TODO:
- Set and patterns as modifiable attributes.
"""
PATTERN_1 = "(wood|shelf|shelves|table|chair|scrap|desk|oak|pine|armoire|dresser)"
PATTERN_2 = "(tv|screen|écran|speakers|wire|electronic|saw|headphones|arduino|print|television)" #search NOT match
PATTERN_3 = "(book|games|cool|guide|box)"
COLOR_1 = "#FF0000" #red
COLOR_2 = "#3186cc" #blue
COLOR_3 = "#000000" #black
COLOR_DEFAULT = "white"
if re.search(PATTERN_1, stuff, re.I):
color = COLOR_1 #red # TODO: set as Variable
elif re.search(PATTERN_2, stuff, re.I): #the end all
color = COLOR_2 #blue at once
elif re.search(PATTERN_3, stuff, re.I):
color = COLOR_3 #black
else:
color = COLOR_DEFAULT #white
# color = "#ffff00"
return color
|
{
"content_hash": "82400afab708484aefb59d930ec487c5",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 131,
"avg_line_length": 40.562790697674416,
"alnum_prop": 0.5681687879830295,
"repo_name": "polypmer/freestuffs",
"id": "d50389315fcfe93f70a13d89196fe8aeefcfa629",
"size": "8744",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "freestuffs/stuff_charter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7740"
},
{
"name": "Makefile",
"bytes": "7622"
},
{
"name": "Python",
"bytes": "48925"
}
],
"symlink_target": ""
}
|
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TrainingTempShare.user'
db.add_column('website_trainingtempshare', 'user',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TrainingTempShare.user'
db.delete_column('website_trainingtempshare', 'user_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.country': {
'Meta': {'object_name': 'Country'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'website.field': {
'Meta': {'object_name': 'Field'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Country']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members_role': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.publicprofilepermissions': {
'Meta': {'object_name': 'PublicProfilePermissions'},
'allowed_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'allowed_user'", 'unique': 'True', 'to': "orm['auth.User']"}),
'public_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'public_user'", 'primary_key': 'True', 'to': "orm['auth.User']"})
},
'website.training': {
'Meta': {'object_name': 'Training'},
'cowriters': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cowriters'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'creator'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_displayed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_live': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_updated': ('django.db.models.fields.IntegerField', [], {'default': '1327217400'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'participants'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'resource_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'website.trainingparticipation': {
'Meta': {'object_name': 'TrainingParticipation'},
'count': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'training': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Training']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'website.trainingschedule': {
'Meta': {'object_name': 'TrainingSchedule'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_scheduled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {}),
'training': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Training']"})
},
'website.trainingtempshare': {
'Meta': {'object_name': 'TrainingTempShare'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 22, 0, 0)'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'training': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Training']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'website.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'about_me': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'enable_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Field']", 'null': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'isUniStar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_organization_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_student': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'linkedin_profile_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True'}),
'raw_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'twitter_profile_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'website_url': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['website']
|
{
"content_hash": "6ca25835812a2c0bc9c1bf07ff691212",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 182,
"avg_line_length": 74.02097902097903,
"alnum_prop": 0.5516296646197449,
"repo_name": "UniShared/unishared",
"id": "641944c48e53b9d386f888637a526e31c9808cc7",
"size": "10609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UniShared_python/website/migrations/0008_auto__add_field_trainingtempshare_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4964"
},
{
"name": "JavaScript",
"bytes": "647537"
},
{
"name": "Python",
"bytes": "440660"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('badgificator', '0004_auto_20150101_1106'),
]
operations = [
migrations.RemoveField(
model_name='badge',
name='condition_function_args',
),
migrations.AddField(
model_name='badge',
name='condition_parameters',
field=models.CharField(max_length=350, null=True, verbose_name="Parameters for Condition's Function", blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='badge',
name='condition_type',
field=models.CharField(blank=True, max_length=10, verbose_name='Type of condition for the badge', choices=[('NRM', 'Number of models related to a user'), ('NCD', 'Number of consecutive days of presence'), ('ND', 'Number of days of presence'), ('NHFV', 'Number of hits for a view'), ('FWRM', 'Use a function who tests things about model')]),
preserve_default=True,
),
]
|
{
"content_hash": "45f990f328e90640faaffafbf922466d",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 352,
"avg_line_length": 38.6551724137931,
"alnum_prop": 0.6119536128456735,
"repo_name": "mrjmad/django_badgificator",
"id": "dbfe7c3a05cf51be9380dd39a71944fbc871e67c",
"size": "1145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "badgificator/migrations/0005_auto_20150103_0041.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "243"
},
{
"name": "Python",
"bytes": "45559"
}
],
"symlink_target": ""
}
|
"""SCons.Tool.gcc
Tool-specific initialization for MinGW (http://www.mingw.org/)
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import os.path
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Tool
import SCons.Util
# This is what we search for to find mingw:
key_program = 'mingw32-gcc'
def find(env):
# First search in the SCons path
path=env.WhereIs(key_program)
if (path):
return path
# then the OS path:
path=SCons.Util.WhereIs(key_program)
if (path):
return path
# If that doesn't work try default location for mingw
save_path=env['ENV']['PATH']
env.AppendENVPath('PATH',r'c:\MinGW\bin')
path =env.WhereIs(key_program)
if not path:
env['ENV']['PATH']=save_path
return path
def shlib_generator(target, source, env, for_signature):
cmd = SCons.Util.CLVar(['$SHLINK', '$SHLINKFLAGS'])
dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX')
if dll: cmd.extend(['-o', dll])
cmd.extend(['$SOURCES', '$_LIBDIRFLAGS', '$_LIBFLAGS'])
implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX')
if implib: cmd.append('-Wl,--out-implib,'+implib.get_string(for_signature))
def_target = env.FindIxes(target, 'WINDOWSDEFPREFIX', 'WINDOWSDEFSUFFIX')
insert_def = env.subst("$WINDOWS_INSERT_DEF")
if not insert_def in ['', '0', 0] and def_target: \
cmd.append('-Wl,--output-def,'+def_target.get_string(for_signature))
return [cmd]
def shlib_emitter(target, source, env):
dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX')
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError("A shared library should have exactly one target with the suffix: %s" % env.subst("$SHLIBSUFFIX"))
if not no_import_lib and \
not env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX'):
# Create list of target libraries as strings
targetStrings=env.ReplaceIxes(dll,
'SHLIBPREFIX', 'SHLIBSUFFIX',
'LIBPREFIX', 'LIBSUFFIX')
# Now add file nodes to target list
target.append(env.fs.File(targetStrings))
# Append a def file target if there isn't already a def file target
# or a def file source or the user has explicitly asked for the target
# to be emitted.
def_source = env.FindIxes(source, 'WINDOWSDEFPREFIX', 'WINDOWSDEFSUFFIX')
def_target = env.FindIxes(target, 'WINDOWSDEFPREFIX', 'WINDOWSDEFSUFFIX')
skip_def_insert = env.subst("$WINDOWS_INSERT_DEF") in ['', '0', 0]
if not def_source and not def_target and not skip_def_insert:
# Create list of target libraries and def files as strings
targetStrings=env.ReplaceIxes(dll,
'SHLIBPREFIX', 'SHLIBSUFFIX',
'WINDOWSDEFPREFIX', 'WINDOWSDEFSUFFIX')
# Now add file nodes to target list
target.append(env.fs.File(targetStrings))
return (target, source)
shlib_action = SCons.Action.Action(shlib_generator, generator=1)
res_action = SCons.Action.Action('$RCCOM', '$RCCOMSTR')
res_builder = SCons.Builder.Builder(action=res_action, suffix='.o',
source_scanner=SCons.Tool.SourceFileScanner)
SCons.Tool.SourceFileScanner.add_scanner('.rc', SCons.Defaults.CScan)
def generate(env):
mingw = find(env)
if mingw:
dir = os.path.dirname(mingw)
env.PrependENVPath('PATH', dir )
# Most of mingw is the same as gcc and friends...
gnu_tools = ['gcc', 'g++', 'gnulink', 'ar', 'gas', 'gfortran', 'm4']
for tool in gnu_tools:
SCons.Tool.Tool(tool)(env)
#... but a few things differ:
env['CC'] = 'gcc'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
env['CXX'] = 'g++'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared')
env['SHLINKCOM'] = shlib_action
env['LDMODULECOM'] = shlib_action
env.Append(SHLIBEMITTER = [shlib_emitter])
env['AS'] = 'as'
env['WIN32DEFPREFIX'] = ''
env['WIN32DEFSUFFIX'] = '.def'
env['WINDOWSDEFPREFIX'] = '${WIN32DEFPREFIX}'
env['WINDOWSDEFSUFFIX'] = '${WIN32DEFSUFFIX}'
env['SHOBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
env['RC'] = 'windres'
env['RCFLAGS'] = SCons.Util.CLVar('')
env['RCINCFLAGS'] = '$( ${_concat(RCINCPREFIX, CPPPATH, RCINCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
env['RCINCPREFIX'] = '--include-dir '
env['RCINCSUFFIX'] = ''
env['RCCOM'] = '$RC $_CPPDEFFLAGS $RCINCFLAGS ${RCINCPREFIX} ${SOURCE.dir} $RCFLAGS -i $SOURCE -o $TARGET'
env['BUILDERS']['RES'] = res_builder
# Some setting from the platform also have to be overridden:
env['OBJSUFFIX'] = '.o'
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
env['PROGSUFFIX'] = '.exe'
def exists(env):
return find(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "9063ee26148d4864b4b81762d1b32088",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 135,
"avg_line_length": 35.74444444444445,
"alnum_prop": 0.6465651227852036,
"repo_name": "Distrotech/scons",
"id": "601ec3b81552ebfffa0241296812af33bcd10e71",
"size": "6434",
"binary": false,
"copies": "3",
"ref": "refs/heads/distrotech-scons",
"path": "src/engine/SCons/Tool/mingw.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259"
},
{
"name": "JavaScript",
"bytes": "17316"
},
{
"name": "Perl",
"bytes": "45214"
},
{
"name": "Python",
"bytes": "12517068"
},
{
"name": "Shell",
"bytes": "20589"
}
],
"symlink_target": ""
}
|
from .compat import StringIO, str
import seria
import click
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
input = None
output = None
out_fmt = None
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('--xml', '-x', 'out_fmt', flag_value='xml')
@click.option('--yaml', '--yml', '-y', 'out_fmt', flag_value='yaml')
@click.option('--yml', 'out_fmt', flag_value='yaml')
@click.option('--json', '-j', 'out_fmt', flag_value='json')
@click.argument('input', type=click.File('r'), default='-')
@click.argument('output', type=click.File('w'), default='-')
def cli(out_fmt, input, output):
"""Converts text."""
_input = StringIO()
for l in input:
try:
_input.write(str(l))
except TypeError:
_input.write(bytes(l, 'utf-8'))
_input = seria.load(_input)
_out = (_input.dump(out_fmt))
output.write(_out)
if __name__ == '__main__':
cli(out_fmt, input, output)
|
{
"content_hash": "3381914a8b8a4f60a89f8caac36fedbe",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 68,
"avg_line_length": 27.02857142857143,
"alnum_prop": 0.6014799154334038,
"repo_name": "rtluckie/seria",
"id": "9ad691b01ee82525a0a4360b877ea4512ed05064",
"size": "971",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "seria/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23298"
}
],
"symlink_target": ""
}
|
"""Utility functions to retrieve OAuth2 credentials using a service account.
A service account is used to allow multiple 'Admin' users access to resources
owned by multiple users in an apps domain.
"""
import os
import http_utils
import log_utils
from oauth2client import client
import recall_errors
import service_account
from google.appengine.api import memcache
_ACCESS_TOKEN_CACHE_S = 60 * 59 # Access tokens last about 1 hour.
_CACHE_NAMESPACE = 'messagerecall_accesstoken#ns'
_LOG = log_utils.GetLogger('messagerecall.credentials_utils')
# Load the key in PKCS 12 format that you downloaded from the Google API
# Console when you created your Service account.
_SERVICE_ACCOUNT_PEM_FILE_NAME = os.path.join(
os.path.dirname(__file__), 'messagerecall_privatekey.pem')
with open(_SERVICE_ACCOUNT_PEM_FILE_NAME, 'rb') as f:
_SERVICE_ACCOUNT_KEY = f.read()
def _GetSignedJwtAssertionCredentials(user_email):
"""Retrieve an OAuth2 credentials object impersonating user_email.
This object is then used to authorize an http connection that will be
used to connect with Google services such as the Admin SDK.
Also includes an access_token that is used to connect to IMAP.
The first parameter, service_account_name,is the Email address
created for the Service account from the API Console. The sub parameter
is the Authenticated user to impersonate.
Args:
user_email: String of the user email account to impersonate.
Returns:
oauth2client credentials object.
"""
return client.SignedJwtAssertionCredentials(
service_account_name=service_account.SERVICE_ACCOUNT_NAME,
private_key=_SERVICE_ACCOUNT_KEY,
scope=service_account.SERVICE_SCOPES,
sub=user_email)
def GetAuthorizedHttp(user_email):
"""Establish authorized http connection needed for API access.
All authorizations via service account rely on an initial authorized
connection using a domain admin. Authorized http connections are needed
for Google Apiary API access and an authorized access_token is needed
for IMAP access.
Credentials objects come with an empty access_token by default. To avoid
quota issues with the oauth server, we manage access_tokens in the memcache.
If we didn't update the access_token, every credentials.authorize() would
force a round-trip with the oauth server.
Args:
user_email: String of the authorizing user email account.
Returns:
Authorized http connection able to access Google API services.
"""
credentials = _GetSignedJwtAssertionCredentials(user_email)
credentials.access_token = GetUserAccessToken(user_email)
return credentials.authorize(http_utils.GetHttpObject())
def GetUserAccessToken(user_email, force_refresh=False):
"""Helper to get a refreshed access_token for a user via service account.
Args:
user_email: User email for which access_token will be retrieved.
force_refresh: Boolean, if True force a token refresh.
Returns:
Cached access_token or a new one.
"""
access_token = memcache.get(user_email, namespace=_CACHE_NAMESPACE)
if access_token and not force_refresh:
return access_token
credentials = _GetSignedJwtAssertionCredentials(user_email)
# Have observed the following error from refresh():
# 'Unable to fetch URL: https://accounts.google.com/o/oauth2/token'
_LOG.debug('Refreshing access token for %s.', user_email)
credentials.refresh(http_utils.GetHttpObject())
access_token = credentials.access_token
if memcache.set(user_email, access_token, time=_ACCESS_TOKEN_CACHE_S,
namespace=_CACHE_NAMESPACE):
return access_token
raise recall_errors.MessageRecallCounterError(
'Exceeded retry limit in GetUserAccessToken: %s.' % user_email)
|
{
"content_hash": "0d3da77a07ef011223486a7c867c603e",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 78,
"avg_line_length": 36.33009708737864,
"alnum_prop": 0.7600213789417424,
"repo_name": "google/googleapps-message-recall",
"id": "b62d714b11856f3cd9cb81a90277416ed5d32a9e",
"size": "4340",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "message_recall/credentials_utils.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "27490"
},
{
"name": "Python",
"bytes": "834068"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
import os
import re
from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.dialogflowcx_v3beta1.services.test_cases import pagers
from google.cloud.dialogflowcx_v3beta1.types import test_case
from google.cloud.dialogflowcx_v3beta1.types import test_case as gcdc_test_case
from google.cloud.location import locations_pb2 # type: ignore
from google.longrunning import operations_pb2
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import TestCasesTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import TestCasesGrpcTransport
from .transports.grpc_asyncio import TestCasesGrpcAsyncIOTransport
class TestCasesClientMeta(type):
"""Metaclass for the TestCases client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[TestCasesTransport]]
_transport_registry["grpc"] = TestCasesGrpcTransport
_transport_registry["grpc_asyncio"] = TestCasesGrpcAsyncIOTransport
def get_transport_class(
cls,
label: str = None,
) -> Type[TestCasesTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class TestCasesClient(metaclass=TestCasesClientMeta):
"""Service for managing [Test
Cases][google.cloud.dialogflow.cx.v3beta1.TestCase] and [Test Case
Results][google.cloud.dialogflow.cx.v3beta1.TestCaseResult].
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dialogflow.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TestCasesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TestCasesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> TestCasesTransport:
"""Returns the transport used by the client instance.
Returns:
TestCasesTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def agent_path(
project: str,
location: str,
agent: str,
) -> str:
"""Returns a fully-qualified agent string."""
return "projects/{project}/locations/{location}/agents/{agent}".format(
project=project,
location=location,
agent=agent,
)
@staticmethod
def parse_agent_path(path: str) -> Dict[str, str]:
"""Parses a agent path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def entity_type_path(
project: str,
location: str,
agent: str,
entity_type: str,
) -> str:
"""Returns a fully-qualified entity_type string."""
return "projects/{project}/locations/{location}/agents/{agent}/entityTypes/{entity_type}".format(
project=project,
location=location,
agent=agent,
entity_type=entity_type,
)
@staticmethod
def parse_entity_type_path(path: str) -> Dict[str, str]:
"""Parses a entity_type path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/entityTypes/(?P<entity_type>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def environment_path(
project: str,
location: str,
agent: str,
environment: str,
) -> str:
"""Returns a fully-qualified environment string."""
return "projects/{project}/locations/{location}/agents/{agent}/environments/{environment}".format(
project=project,
location=location,
agent=agent,
environment=environment,
)
@staticmethod
def parse_environment_path(path: str) -> Dict[str, str]:
"""Parses a environment path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/environments/(?P<environment>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def flow_path(
project: str,
location: str,
agent: str,
flow: str,
) -> str:
"""Returns a fully-qualified flow string."""
return "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}".format(
project=project,
location=location,
agent=agent,
flow=flow,
)
@staticmethod
def parse_flow_path(path: str) -> Dict[str, str]:
"""Parses a flow path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/flows/(?P<flow>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def intent_path(
project: str,
location: str,
agent: str,
intent: str,
) -> str:
"""Returns a fully-qualified intent string."""
return "projects/{project}/locations/{location}/agents/{agent}/intents/{intent}".format(
project=project,
location=location,
agent=agent,
intent=intent,
)
@staticmethod
def parse_intent_path(path: str) -> Dict[str, str]:
"""Parses a intent path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/intents/(?P<intent>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def page_path(
project: str,
location: str,
agent: str,
flow: str,
page: str,
) -> str:
"""Returns a fully-qualified page string."""
return "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/pages/{page}".format(
project=project,
location=location,
agent=agent,
flow=flow,
page=page,
)
@staticmethod
def parse_page_path(path: str) -> Dict[str, str]:
"""Parses a page path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/flows/(?P<flow>.+?)/pages/(?P<page>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def test_case_path(
project: str,
location: str,
agent: str,
test_case: str,
) -> str:
"""Returns a fully-qualified test_case string."""
return "projects/{project}/locations/{location}/agents/{agent}/testCases/{test_case}".format(
project=project,
location=location,
agent=agent,
test_case=test_case,
)
@staticmethod
def parse_test_case_path(path: str) -> Dict[str, str]:
"""Parses a test_case path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/testCases/(?P<test_case>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def test_case_result_path(
project: str,
location: str,
agent: str,
test_case: str,
result: str,
) -> str:
"""Returns a fully-qualified test_case_result string."""
return "projects/{project}/locations/{location}/agents/{agent}/testCases/{test_case}/results/{result}".format(
project=project,
location=location,
agent=agent,
test_case=test_case,
result=result,
)
@staticmethod
def parse_test_case_result_path(path: str) -> Dict[str, str]:
"""Parses a test_case_result path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/testCases/(?P<test_case>.+?)/results/(?P<result>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def transition_route_group_path(
project: str,
location: str,
agent: str,
flow: str,
transition_route_group: str,
) -> str:
"""Returns a fully-qualified transition_route_group string."""
return "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/transitionRouteGroups/{transition_route_group}".format(
project=project,
location=location,
agent=agent,
flow=flow,
transition_route_group=transition_route_group,
)
@staticmethod
def parse_transition_route_group_path(path: str) -> Dict[str, str]:
"""Parses a transition_route_group path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/flows/(?P<flow>.+?)/transitionRouteGroups/(?P<transition_route_group>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def webhook_path(
project: str,
location: str,
agent: str,
webhook: str,
) -> str:
"""Returns a fully-qualified webhook string."""
return "projects/{project}/locations/{location}/agents/{agent}/webhooks/{webhook}".format(
project=project,
location=location,
agent=agent,
webhook=webhook,
)
@staticmethod
def parse_webhook_path(path: str) -> Dict[str, str]:
"""Parses a webhook path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/webhooks/(?P<webhook>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, TestCasesTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the test cases client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, TestCasesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, TestCasesTransport):
# transport is a TestCasesTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
api_audience=client_options.api_audience,
)
def list_test_cases(
self,
request: Union[test_case.ListTestCasesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTestCasesPager:
r"""Fetches a list of test cases for a given agent.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dialogflowcx_v3beta1
def sample_list_test_cases():
# Create a client
client = dialogflowcx_v3beta1.TestCasesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.ListTestCasesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_test_cases(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.ListTestCasesRequest, dict]):
The request object. The request message for
[TestCases.ListTestCases][google.cloud.dialogflow.cx.v3beta1.TestCases.ListTestCases].
parent (str):
Required. The agent to list all pages for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.services.test_cases.pagers.ListTestCasesPager:
The response message for
[TestCases.ListTestCases][google.cloud.dialogflow.cx.v3beta1.TestCases.ListTestCases].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a test_case.ListTestCasesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, test_case.ListTestCasesRequest):
request = test_case.ListTestCasesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_test_cases]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTestCasesPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def batch_delete_test_cases(
self,
request: Union[test_case.BatchDeleteTestCasesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Batch deletes test cases.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dialogflowcx_v3beta1
def sample_batch_delete_test_cases():
# Create a client
client = dialogflowcx_v3beta1.TestCasesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.BatchDeleteTestCasesRequest(
parent="parent_value",
names=['names_value1', 'names_value2'],
)
# Make the request
client.batch_delete_test_cases(request=request)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.BatchDeleteTestCasesRequest, dict]):
The request object. The request message for
[TestCases.BatchDeleteTestCases][google.cloud.dialogflow.cx.v3beta1.TestCases.BatchDeleteTestCases].
parent (str):
Required. The agent to delete test cases from. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a test_case.BatchDeleteTestCasesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, test_case.BatchDeleteTestCasesRequest):
request = test_case.BatchDeleteTestCasesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_delete_test_cases]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def get_test_case(
self,
request: Union[test_case.GetTestCaseRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> test_case.TestCase:
r"""Gets a test case.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dialogflowcx_v3beta1
def sample_get_test_case():
# Create a client
client = dialogflowcx_v3beta1.TestCasesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.GetTestCaseRequest(
name="name_value",
)
# Make the request
response = client.get_test_case(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.GetTestCaseRequest, dict]):
The request object. The request message for
[TestCases.GetTestCase][google.cloud.dialogflow.cx.v3beta1.TestCases.GetTestCase].
name (str):
Required. The name of the testcase. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/testCases/<TestCase ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.TestCase:
Represents a test case.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a test_case.GetTestCaseRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, test_case.GetTestCaseRequest):
request = test_case.GetTestCaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_test_case]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_test_case(
self,
request: Union[gcdc_test_case.CreateTestCaseRequest, dict] = None,
*,
parent: str = None,
test_case: gcdc_test_case.TestCase = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_test_case.TestCase:
r"""Creates a test case for the given agent.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dialogflowcx_v3beta1
def sample_create_test_case():
# Create a client
client = dialogflowcx_v3beta1.TestCasesClient()
# Initialize request argument(s)
test_case = dialogflowcx_v3beta1.TestCase()
test_case.display_name = "display_name_value"
request = dialogflowcx_v3beta1.CreateTestCaseRequest(
parent="parent_value",
test_case=test_case,
)
# Make the request
response = client.create_test_case(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.CreateTestCaseRequest, dict]):
The request object. The request message for
[TestCases.CreateTestCase][google.cloud.dialogflow.cx.v3beta1.TestCases.CreateTestCase].
parent (str):
Required. The agent to create the test case for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
test_case (google.cloud.dialogflowcx_v3beta1.types.TestCase):
Required. The test case to create.
This corresponds to the ``test_case`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.TestCase:
Represents a test case.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, test_case])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcdc_test_case.CreateTestCaseRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcdc_test_case.CreateTestCaseRequest):
request = gcdc_test_case.CreateTestCaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if test_case is not None:
request.test_case = test_case
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_test_case]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_test_case(
self,
request: Union[gcdc_test_case.UpdateTestCaseRequest, dict] = None,
*,
test_case: gcdc_test_case.TestCase = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_test_case.TestCase:
r"""Updates the specified test case.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dialogflowcx_v3beta1
def sample_update_test_case():
# Create a client
client = dialogflowcx_v3beta1.TestCasesClient()
# Initialize request argument(s)
test_case = dialogflowcx_v3beta1.TestCase()
test_case.display_name = "display_name_value"
request = dialogflowcx_v3beta1.UpdateTestCaseRequest(
test_case=test_case,
)
# Make the request
response = client.update_test_case(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.UpdateTestCaseRequest, dict]):
The request object. The request message for
[TestCases.UpdateTestCase][google.cloud.dialogflow.cx.v3beta1.TestCases.UpdateTestCase].
test_case (google.cloud.dialogflowcx_v3beta1.types.TestCase):
Required. The test case to update.
This corresponds to the ``test_case`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to specify which fields should be
updated. The
[``creationTime``][google.cloud.dialogflow.cx.v3beta1.TestCase.creation_time]
and
[``lastTestResult``][google.cloud.dialogflow.cx.v3beta1.TestCase.last_test_result]
cannot be updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.TestCase:
Represents a test case.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([test_case, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcdc_test_case.UpdateTestCaseRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcdc_test_case.UpdateTestCaseRequest):
request = gcdc_test_case.UpdateTestCaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if test_case is not None:
request.test_case = test_case
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_test_case]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("test_case.name", request.test_case.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def run_test_case(
self,
request: Union[test_case.RunTestCaseRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Kicks off a test case run.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``:
[RunTestCaseMetadata][google.cloud.dialogflow.cx.v3beta1.RunTestCaseMetadata]
- ``response``:
[RunTestCaseResponse][google.cloud.dialogflow.cx.v3beta1.RunTestCaseResponse]
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dialogflowcx_v3beta1
def sample_run_test_case():
# Create a client
client = dialogflowcx_v3beta1.TestCasesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.RunTestCaseRequest(
name="name_value",
)
# Make the request
operation = client.run_test_case(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.RunTestCaseRequest, dict]):
The request object. The request message for
[TestCases.RunTestCase][google.cloud.dialogflow.cx.v3beta1.TestCases.RunTestCase].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.dialogflowcx_v3beta1.types.RunTestCaseResponse`
The response message for
[TestCases.RunTestCase][google.cloud.dialogflow.cx.v3beta1.TestCases.RunTestCase].
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a test_case.RunTestCaseRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, test_case.RunTestCaseRequest):
request = test_case.RunTestCaseRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.run_test_case]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
test_case.RunTestCaseResponse,
metadata_type=test_case.RunTestCaseMetadata,
)
# Done; return the response.
return response
def batch_run_test_cases(
self,
request: Union[test_case.BatchRunTestCasesRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Kicks off a batch run of test cases.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``:
[BatchRunTestCasesMetadata][google.cloud.dialogflow.cx.v3beta1.BatchRunTestCasesMetadata]
- ``response``:
[BatchRunTestCasesResponse][google.cloud.dialogflow.cx.v3beta1.BatchRunTestCasesResponse]
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dialogflowcx_v3beta1
def sample_batch_run_test_cases():
# Create a client
client = dialogflowcx_v3beta1.TestCasesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.BatchRunTestCasesRequest(
parent="parent_value",
test_cases=['test_cases_value1', 'test_cases_value2'],
)
# Make the request
operation = client.batch_run_test_cases(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.BatchRunTestCasesRequest, dict]):
The request object. The request message for
[TestCases.BatchRunTestCases][google.cloud.dialogflow.cx.v3beta1.TestCases.BatchRunTestCases].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.dialogflowcx_v3beta1.types.BatchRunTestCasesResponse`
The response message for
[TestCases.BatchRunTestCases][google.cloud.dialogflow.cx.v3beta1.TestCases.BatchRunTestCases].
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a test_case.BatchRunTestCasesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, test_case.BatchRunTestCasesRequest):
request = test_case.BatchRunTestCasesRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_run_test_cases]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
test_case.BatchRunTestCasesResponse,
metadata_type=test_case.BatchRunTestCasesMetadata,
)
# Done; return the response.
return response
def calculate_coverage(
self,
request: Union[test_case.CalculateCoverageRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> test_case.CalculateCoverageResponse:
r"""Calculates the test coverage for an agent.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dialogflowcx_v3beta1
def sample_calculate_coverage():
# Create a client
client = dialogflowcx_v3beta1.TestCasesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.CalculateCoverageRequest(
agent="agent_value",
type_="TRANSITION_ROUTE_GROUP",
)
# Make the request
response = client.calculate_coverage(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.CalculateCoverageRequest, dict]):
The request object. The request message for
[TestCases.CalculateCoverage][google.cloud.dialogflow.cx.v3beta1.TestCases.CalculateCoverage].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.CalculateCoverageResponse:
The response message for
[TestCases.CalculateCoverage][google.cloud.dialogflow.cx.v3beta1.TestCases.CalculateCoverage].
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a test_case.CalculateCoverageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, test_case.CalculateCoverageRequest):
request = test_case.CalculateCoverageRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.calculate_coverage]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("agent", request.agent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def import_test_cases(
self,
request: Union[test_case.ImportTestCasesRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Imports the test cases from a Cloud Storage bucket or a local
file. It always creates new test cases and won't overwrite any
existing ones. The provided ID in the imported test case is
neglected.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``:
[ImportTestCasesMetadata][google.cloud.dialogflow.cx.v3beta1.ImportTestCasesMetadata]
- ``response``:
[ImportTestCasesResponse][google.cloud.dialogflow.cx.v3beta1.ImportTestCasesResponse]
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dialogflowcx_v3beta1
def sample_import_test_cases():
# Create a client
client = dialogflowcx_v3beta1.TestCasesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.ImportTestCasesRequest(
gcs_uri="gcs_uri_value",
parent="parent_value",
)
# Make the request
operation = client.import_test_cases(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.ImportTestCasesRequest, dict]):
The request object. The request message for
[TestCases.ImportTestCases][google.cloud.dialogflow.cx.v3beta1.TestCases.ImportTestCases].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.dialogflowcx_v3beta1.types.ImportTestCasesResponse`
The response message for
[TestCases.ImportTestCases][google.cloud.dialogflow.cx.v3beta1.TestCases.ImportTestCases].
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a test_case.ImportTestCasesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, test_case.ImportTestCasesRequest):
request = test_case.ImportTestCasesRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.import_test_cases]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
test_case.ImportTestCasesResponse,
metadata_type=test_case.ImportTestCasesMetadata,
)
# Done; return the response.
return response
def export_test_cases(
self,
request: Union[test_case.ExportTestCasesRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Exports the test cases under the agent to a Cloud Storage bucket
or a local file. Filter can be applied to export a subset of
test cases.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``:
[ExportTestCasesMetadata][google.cloud.dialogflow.cx.v3beta1.ExportTestCasesMetadata]
- ``response``:
[ExportTestCasesResponse][google.cloud.dialogflow.cx.v3beta1.ExportTestCasesResponse]
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dialogflowcx_v3beta1
def sample_export_test_cases():
# Create a client
client = dialogflowcx_v3beta1.TestCasesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.ExportTestCasesRequest(
gcs_uri="gcs_uri_value",
parent="parent_value",
)
# Make the request
operation = client.export_test_cases(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.ExportTestCasesRequest, dict]):
The request object. The request message for
[TestCases.ExportTestCases][google.cloud.dialogflow.cx.v3beta1.TestCases.ExportTestCases].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.dialogflowcx_v3beta1.types.ExportTestCasesResponse`
The response message for
[TestCases.ExportTestCases][google.cloud.dialogflow.cx.v3beta1.TestCases.ExportTestCases].
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a test_case.ExportTestCasesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, test_case.ExportTestCasesRequest):
request = test_case.ExportTestCasesRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.export_test_cases]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
test_case.ExportTestCasesResponse,
metadata_type=test_case.ExportTestCasesMetadata,
)
# Done; return the response.
return response
def list_test_case_results(
self,
request: Union[test_case.ListTestCaseResultsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTestCaseResultsPager:
r"""Fetches a list of results for a given test case.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dialogflowcx_v3beta1
def sample_list_test_case_results():
# Create a client
client = dialogflowcx_v3beta1.TestCasesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.ListTestCaseResultsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_test_case_results(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.ListTestCaseResultsRequest, dict]):
The request object. The request message for
[TestCases.ListTestCaseResults][google.cloud.dialogflow.cx.v3beta1.TestCases.ListTestCaseResults].
parent (str):
Required. The test case to list results for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/ testCases/<TestCase ID>``.
Specify a ``-`` as a wildcard for TestCase ID to list
results across multiple test cases.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.services.test_cases.pagers.ListTestCaseResultsPager:
The response message for
[TestCases.ListTestCaseResults][google.cloud.dialogflow.cx.v3beta1.TestCases.ListTestCaseResults].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a test_case.ListTestCaseResultsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, test_case.ListTestCaseResultsRequest):
request = test_case.ListTestCaseResultsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_test_case_results]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTestCaseResultsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_test_case_result(
self,
request: Union[test_case.GetTestCaseResultRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> test_case.TestCaseResult:
r"""Gets a test case result.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dialogflowcx_v3beta1
def sample_get_test_case_result():
# Create a client
client = dialogflowcx_v3beta1.TestCasesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.GetTestCaseResultRequest(
name="name_value",
)
# Make the request
response = client.get_test_case_result(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.GetTestCaseResultRequest, dict]):
The request object. The request message for
[TestCases.GetTestCaseResult][google.cloud.dialogflow.cx.v3beta1.TestCases.GetTestCaseResult].
name (str):
Required. The name of the testcase. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/testCases/<TestCase ID>/results/<TestCaseResult ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.TestCaseResult:
Represents a result from running a
test case in an agent environment.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a test_case.GetTestCaseResultRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, test_case.GetTestCaseResultRequest):
request = test_case.GetTestCaseResultRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_test_case_result]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
def list_operations(
self,
request: operations_pb2.ListOperationsRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.ListOperationsResponse:
r"""Lists operations that match the specified filter in the request.
Args:
request (:class:`~.operations_pb2.ListOperationsRequest`):
The request object. Request message for
`ListOperations` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.ListOperationsResponse:
Response message for ``ListOperations`` method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.ListOperationsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.list_operations,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_operation(
self,
request: operations_pb2.GetOperationRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Gets the latest state of a long-running operation.
Args:
request (:class:`~.operations_pb2.GetOperationRequest`):
The request object. Request message for
`GetOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
An ``Operation`` object.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.GetOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def cancel_operation(
self,
request: operations_pb2.CancelOperationRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Starts asynchronous cancellation on a long-running operation.
The server makes a best effort to cancel the operation, but success
is not guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request (:class:`~.operations_pb2.CancelOperationRequest`):
The request object. Request message for
`CancelOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
None
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.CancelOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.cancel_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def get_location(
self,
request: locations_pb2.GetLocationRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> locations_pb2.Location:
r"""Gets information about a location.
Args:
request (:class:`~.location_pb2.GetLocationRequest`):
The request object. Request message for
`GetLocation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.location_pb2.Location:
Location object.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = locations_pb2.GetLocationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_location,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_locations(
self,
request: locations_pb2.ListLocationsRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> locations_pb2.ListLocationsResponse:
r"""Lists information about the supported locations for this service.
Args:
request (:class:`~.location_pb2.ListLocationsRequest`):
The request object. Request message for
`ListLocations` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.location_pb2.ListLocationsResponse:
Response message for ``ListLocations`` method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = locations_pb2.ListLocationsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.list_locations,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflowcx",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("TestCasesClient",)
|
{
"content_hash": "eeaecc83da83922a0385489a643270b3",
"timestamp": "",
"source": "github",
"line_count": 2211,
"max_line_length": 167,
"avg_line_length": 40.207146087743105,
"alnum_prop": 0.598674885824203,
"repo_name": "googleapis/python-dialogflow-cx",
"id": "f463b19556199e51ffcbb69e93390ed5d6c3a650",
"size": "89498",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/dialogflowcx_v3beta1/services/test_cases/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "10904903"
},
{
"name": "Shell",
"bytes": "30681"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management import *
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_XML
from webhcat import webhcat
from webhcat_service import webhcat_service
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
class WebHCatServer(Script):
def install(self, env):
import params
self.install_packages(env)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env) # FOR SECURITY
webhcat_service(action='start', upgrade_type=upgrade_type)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
webhcat_service(action='stop')
def configure(self, env):
import params
env.set_params(params)
webhcat()
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class WebHCatServerWindows(WebHCatServer):
def status(self, env):
import status_params
env.set_params(status_params)
check_windows_service_status(status_params.webhcat_server_win_service_name)
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class WebHCatServerDefault(WebHCatServer):
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.webhcat_pid_file)
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing WebHCat Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
stack_select.select_packages(params.version)
def get_log_folder(self):
import params
return params.hcat_log_dir
def get_user(self):
import params
return params.webhcat_user
if __name__ == "__main__":
WebHCatServer().execute()
|
{
"content_hash": "9bd53a41191c04774479c9556b1ce0ee",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 93,
"avg_line_length": 33.03409090909091,
"alnum_prop": 0.7667698658410733,
"repo_name": "arenadata/ambari",
"id": "c4e99fa34179bdc3d483cdc239a82cc8654c43e5",
"size": "2907",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
}
|
import functools
from collections import OrderedDict
from utils import displayupdate, hw
from guicore.switcher import SwitchScreen
import config
import debug
import logsupport
from screens import screen
import issuecommands
from logsupport import ConsoleWarning
from screens.maintscreenbase import MaintScreenDesc, fixedoverrides
import consolestatus
import screens.supportscreens as supportscreens
from keys.keyutils import DispOpt, ChooseType
MaintScreen = None
def SetUpMaintScreens():
global MaintScreen
screenset = []
LogDisp = supportscreens.PagedDisplay('LocalLog', PickStartingSpot,
functools.partial(logsupport.LineRenderer, uselog=logsupport.Logs.log),
logsupport.Logs.PageTitle, config.sysStore.LogFontSize, 'white')
screenset.append(LogDisp)
Status = consolestatus.SetUpConsoleStatus()
if Status is not None:
screenset.append(Status)
ExitMenu = OrderedDict()
for cmd, action in issuecommands.cmdcalls.items():
if issuecommands.Where.LocalMenuExits in action.where:
ExitMenu[cmd] = (action.DisplayName, action.Proc, None, action.Verify)
Exits = MaintScreenDesc('System Exit/Restart', ExitMenu)
screenset.append(Exits)
VersMenu = OrderedDict()
VersMenuAdv = OrderedDict()
for cmd, action in issuecommands.cmdcalls.items():
if issuecommands.Where.LocalMenuVersions in action.where:
VersMenu[cmd] = (action.DisplayName, action.Proc)
VersMenuAdv[cmd] = (action.DisplayName, action.Proc)
elif issuecommands.Where.LocalMenuVersionsAdv in action.where:
VersMenuAdv[cmd] = (action.DisplayName, action.Proc)
Versions = MaintScreenDesc('Version Control', VersMenu)
VersionsAdv = MaintScreenDesc('Advanced Version Control', VersMenuAdv)
screenset.append(Versions)
screenset.append(VersionsAdv)
FlagsScreens = []
nflags = len(debug.DbgFlags) + 3
# will need key for each debug flag plus a return plus a loglevel up and loglevel down
# (label, tapproc, dbltapproc, verify, dispopts, defopts, var)
loglevdispup = DispOpt(choosertype=ChooseType.rangeval, chooser=(0, 9), color=('gold', 'blue', 'black'),
deflabel=('Log Less', 'Detail', '$',))
loglevdispdef = DispOpt(choosertype=ChooseType.rangeval, chooser=(0, 9), color=('gold', 'blue', 'black'),
deflabel=('Log Bad', 'Level', '$'))
logleveldispdn = []
for i in range(len(logsupport.LogLevels)):
logleveldispdn.append(DispOpt(choosertype=ChooseType.intval, chooser=i, color=('gold', 'blue', 'black'),
deflabel=('Log More', 'Detail', logsupport.LogLevels[i])))
nextdisp = (
DispOpt(choosertype=ChooseType.Noneval, chooser=None, color=('pink', 'blue', 'black'), deflabel=('Next',)),)
debFlagInput = [("LogLevelUp", None, None, False, '', loglevdispup, "System:LogLevel"),
("LogLevelDown", None, None, False, logleveldispdn, loglevdispdef, "System:LogLevel")]
for f in debug.DbgFlags:
debFlagInput.append((f, None, None, False,
(DispOpt(choosertype=ChooseType.stateval, chooser='state*on',
color=('gold', 'blue', 'white'), deflabel=(f,)),
DispOpt(choosertype=ChooseType.stateval, chooser='state*off',
color=('gold', 'blue', 'black'), deflabel=(f,))),
None, 'Debug:' + f))
flagspercol = hw.screenheight // 120 # todo switch to new screen sizing
flagsperrow = hw.screenwidth // 120
flagoverrides = fixedoverrides.copy()
flagoverrides.update(KeysPerColumn=flagspercol, KeysPerRow=flagsperrow)
flagscreencnt = 0
while nflags > 0: # this needs to have a richer input where the flags have source of their state and a DispOpt
thisscrn = min(nflags, flagspercol * flagsperrow)
nflags = nflags - flagspercol * flagsperrow + 1
tmp = OrderedDict()
for i in range(thisscrn - 1): # leave space for next or return
n = debFlagInput[0][0]
tmp[n] = debFlagInput.pop(0)
if nflags > 0: # will need another flag screen so build a "next"
tmp['next'] = ('Next', None, None, False, nextdisp, None)
# 'Next', functools.partial(goto, MaintScreen),False,'') # this gets fixed below to be a real next
FlagsScreens.append(MaintScreenDesc('Flags Setting ({})'.format(flagscreencnt), tmp,
overrides=flagoverrides)) # this needs key descriptors passed in
flagscreencnt += 1
FlagsScreens[-1].KeysPerColumn = flagspercol
FlagsScreens[-1].KeysPerRow = flagsperrow
for i in range(len(FlagsScreens) - 1):
FlagsScreens[i].Keys['next'].Proc = functools.partial(goto, FlagsScreens[i + 1])
for s in FlagsScreens:
screenset.append(s)
debug.DebugFlagKeys.update(s.Keys)
for kn, k in s.Keys.items():
if kn in debug.DbgFlags:
k.State = debug.dbgStore.GetVal(k.name)
debug.dbgStore.AddAlert(k.name, (syncKeytoStore, k))
k.Proc = functools.partial(setdbg, k)
debug.DebugFlagKeys["LogLevelUp"].Proc = functools.partial(adjloglevel, debug.DebugFlagKeys["LogLevelUp"])
debug.DebugFlagKeys["LogLevelDown"].Proc = functools.partial(adjloglevel, debug.DebugFlagKeys["LogLevelDown"])
TopLevel = OrderedDict([('log', ('Show Log', functools.partial(screen.PushToScreen, LogDisp, 'Maint'))),
('versions', ('Select Version', functools.partial(screen.PushToScreen, Versions, 'Maint'),
functools.partial(screen.PushToScreen, VersionsAdv, 'Maint'))),
('flags', ('Set Flags', functools.partial(screen.PushToScreen, FlagsScreens[0], 'Maint')))])
if Status is not None: TopLevel['status'] = (
'Network Consoles', functools.partial(screen.PushToScreen, Status, 'Maint'))
TopLevel['exit'] = ('Exit/Restart', functools.partial(screen.PushToScreen, Exits, 'Maint'))
MaintScreen = MaintScreenDesc('Console Maintenance', TopLevel)
for s in screenset:
s.userstore.ReParent(MaintScreen)
# noinspection PyUnusedLocal,PyUnusedLocal,PyUnusedLocal
def syncKeytoStore(storeitem, old, new, key, chgsource):
key.State = new
# noinspection PyUnusedLocal
def setdbg(K):
st = debug.dbgStore.GetVal(K.name)
K.State = not st
K.PaintKey()
displayupdate.updatedisplay()
debug.dbgStore.SetVal(K.name, not st)
K.State = debug.dbgStore.GetVal(K.name)
# this allows for case where flag gets reset by proc called servicing the set
K.PaintKey()
displayupdate.updatedisplay()
logsupport.Logs.Log("Debug flag ", K.name, ' = ', K.State)
# noinspection PyUnusedLocal
def adjloglevel(K):
if K.name == "LogLevelUp":
if config.sysStore.LogLevel < len(logsupport.LogLevels) - 1:
config.sysStore.LogLevel += 1
else:
if config.sysStore.LogLevel > 0:
config.sysStore.LogLevel -= 1
debug.DebugFlagKeys["LogLevelUp"].PaintKey()
debug.DebugFlagKeys["LogLevelDown"].PaintKey()
logsupport.Logs.Log("Log Level changed via ", K.name, " to ", config.sysStore.LogLevel, severity=ConsoleWarning)
displayupdate.updatedisplay()
# noinspection PyUnusedLocal
def goto(newscreen):
SwitchScreen(newscreen, 'Bright', 'Maint goto' + newscreen.name, newstate='Maint')
def PickStartingSpot():
if config.sysStore.ErrorNotice != -1:
startat = config.sysStore.ErrorNotice
config.sysStore.ErrorNotice = -1
consolestatus.ReportStatus('error ind cleared')
else:
startat = 0
return startat
|
{
"content_hash": "e43c3ce1d2facac1f01cdd6360cc3a07",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 113,
"avg_line_length": 41.411764705882355,
"alnum_prop": 0.7329545454545454,
"repo_name": "kevinkahn/softconsole",
"id": "7e99c6b0f56f50d195de411472f628815355557c",
"size": "7040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "screens/maintscreen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Euphoria",
"bytes": "267"
},
{
"name": "Python",
"bytes": "839903"
},
{
"name": "Shell",
"bytes": "101927"
}
],
"symlink_target": ""
}
|
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
import traceback
if __name__ == "__main__":
instance_class = 'notebook'
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
notebook_config = dict()
try:
notebook_config['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
except:
notebook_config['exploratory_name'] = ''
notebook_config['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
notebook_config['instance_type'] = os.environ['gcp_notebook_instance_size']
notebook_config['key_name'] = os.environ['conf_key_name']
notebook_config['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
notebook_config['instance_name'] = '{0}-{1}-nb-{2}'.format(notebook_config['service_base_name'],
notebook_config['edge_user_name'],
notebook_config['exploratory_name'])
# generating variables regarding EDGE proxy on Notebook instance
instance_hostname = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
edge_instance_name = '{0}-{1}-edge'.format(notebook_config['service_base_name'], notebook_config['edge_user_name'])
notebook_config['ssh_key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['zone'] = os.environ['gcp_zone']
try:
if os.environ['conf_os_family'] == 'debian':
initial_user = 'ubuntu'
sudo_group = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
initial_user = 'ec2-user'
sudo_group = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
(instance_hostname, notebook_config['ssh_key_path'], initial_user,
notebook_config['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed creating ssh user 'dlab'.", str(err))
GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# configuring proxy on Notebook instance
try:
logging.info('[CONFIGURE PROXY ON ZEPPELIN INSTANCE]')
print('[CONFIGURE PROXY ON ZEPPELIN INSTANCE]')
additional_config = {"proxy_host": edge_instance_name, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}" \
.format(instance_hostname, notebook_config['instance_name'], notebook_config['ssh_key_path'],
json.dumps(additional_config), notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure proxy.", str(err))
GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# updating repositories & installing python packages
try:
logging.info('[INSTALLING PREREQUISITES TO ZEPPELIN NOTEBOOK INSTANCE]')
print('[INSTALLING PREREQUISITES TO ZEPPELIN NOTEBOOK INSTANCE]')
params = "--hostname {} --keyfile {} --user {} --region {}". \
format(instance_hostname, notebook_config['ssh_key_path'], notebook_config['dlab_ssh_user'],
os.environ['gcp_region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed installing apps: apt & pip.", str(err))
GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# installing and configuring zeppelin and all dependencies
try:
logging.info('[CONFIGURE ZEPPELIN NOTEBOOK INSTANCE]')
print('[CONFIGURE ZEPPELIN NOTEBOOK INSTANCE]')
additional_config = {"frontend_hostname": edge_instance_name,
"backend_hostname": instance_hostname,
"backend_port": "8080",
"nginx_template_dir": "/root/templates/"}
params = "--hostname {} --instance_name {} " \
"--keyfile {} --region {} " \
"--additional_config '{}' --os_user {} " \
"--spark_version {} --hadoop_version {} " \
"--edge_hostname {} --proxy_port {} " \
"--zeppelin_version {} --scala_version {} " \
"--livy_version {} --multiple_clusters {} " \
"--r_mirror {} --endpoint_url {} " \
"--exploratory_name {}" \
.format(instance_hostname, notebook_config['instance_name'], notebook_config['ssh_key_path'], os.environ['gcp_region'],
json.dumps(additional_config), notebook_config['dlab_ssh_user'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], edge_instance_name, '3128',
os.environ['notebook_zeppelin_version'], os.environ['notebook_scala_version'],
os.environ['notebook_livy_version'], os.environ['notebook_multiple_clusters'],
os.environ['notebook_r_mirror'], 'null',
notebook_config['exploratory_name'])
try:
local("~/scripts/{}.py {}".format('configure_zeppelin_node', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure zeppelin.", str(err))
GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
print('[INSTALLING USERs KEY]')
logging.info('[INSTALLING USERs KEY]')
additional_config = {"user_keyname": os.environ['edge_user_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
instance_hostname, notebook_config['ssh_key_path'], json.dumps(additional_config),
notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
append_result("Failed installing users key")
raise Exception
except Exception as err:
append_result("Failed installing users key.", str(err))
GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
try:
print('[SETUP USER GIT CREDENTIALS]')
logging.info('[SETUP USER GIT CREDENTIALS]')
params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
.format(notebook_config['dlab_ssh_user'], instance_hostname, notebook_config['ssh_key_path'])
try:
local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
append_result("Failed setup git credentials")
raise Exception
except Exception as err:
append_result("Failed to setup git credentials.", str(err))
GCPActions().remove_instance(notebook_config['instance_name'], notebook_config['zone'])
sys.exit(1)
# generating output information
ip_address = GCPMeta().get_private_ip_address(notebook_config['instance_name'])
zeppelin_ip_url = "http://" + ip_address + ":8080/"
ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
print('[SUMMARY]')
logging.info('[SUMMARY]')
print("Instance name: {}".format(notebook_config['instance_name']))
print("Private IP: {}".format(ip_address))
print("Instance type: {}".format(notebook_config['instance_type']))
print("Key name: {}".format(notebook_config['key_name']))
print("User key name: {}".format(os.environ['edge_user_name']))
print("Zeppelin URL: {}".format(zeppelin_ip_url))
print("Ungit URL: {}".format(ungit_ip_url))
print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'],
notebook_config['dlab_ssh_user'],
ip_address))
with open("/root/result.json", 'w') as result:
res = {"hostname": ip_address,
"ip": ip_address,
"instance_id": notebook_config['instance_name'],
"master_keyname": os.environ['conf_key_name'],
"notebook_name": notebook_config['instance_name'],
"Action": "Create new notebook server",
"exploratory_url": [
{"description": "Zeppelin",
"url": zeppelin_ip_url},
{"description": "Ungit",
"url": ungit_ip_url}]}
result.write(json.dumps(res))
|
{
"content_hash": "e228f6f856cec00c18ae8c2620258a4c",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 131,
"avg_line_length": 51.566326530612244,
"alnum_prop": 0.5693084001187296,
"repo_name": "epam/DLab",
"id": "803998340d6704edc1bee913d5ade49cbdc65a6a",
"size": "10876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infrastructure-provisioning/src/general/scripts/gcp/zeppelin_configure.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "81633"
},
{
"name": "HTML",
"bytes": "110323"
},
{
"name": "Java",
"bytes": "2473499"
},
{
"name": "Jupyter Notebook",
"bytes": "80955"
},
{
"name": "Python",
"bytes": "1861086"
},
{
"name": "R",
"bytes": "4894"
},
{
"name": "Ruby",
"bytes": "62731"
},
{
"name": "Shell",
"bytes": "18826"
},
{
"name": "TypeScript",
"bytes": "363308"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from indico.modules.events.logs.controllers import RHEventLogs, RHEventLogsJSON
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('event_logs', __name__, template_folder='templates', virtual_template_folder='events/logs',
url_prefix='/event/<confId>/manage/logs')
_bp.add_url_rule('/', 'index', RHEventLogs)
_bp.add_url_rule('/api/logs', 'logs', RHEventLogsJSON)
|
{
"content_hash": "66fd667b68e94f2cdec275a540646704",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 113,
"avg_line_length": 41.36363636363637,
"alnum_prop": 0.7208791208791209,
"repo_name": "OmeGak/indico",
"id": "21528905dfda014451e3514e8f1a906454f7d383",
"size": "669",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/modules/events/logs/blueprint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "547418"
},
{
"name": "HTML",
"bytes": "1366687"
},
{
"name": "JavaScript",
"bytes": "1678182"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4488419"
},
{
"name": "Shell",
"bytes": "2724"
},
{
"name": "TeX",
"bytes": "23051"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
config = {
'login_url': 'https://entgaming.net/forum/ucp.php',
'games_url': 'https://entgaming.net/bans/games.php',
'game_url': 'https://entgaming.net/bans/game.php',
'rangeban_url': 'https://entgaming.net/bans/rangeban.php',
'verbose': 0,
'ent-user' : <Username>,
'ent-pass' : <Password>
}
def getKey(key):
return config[key]
def getUrl(key,params):
return config[key]+'?'+params
|
{
"content_hash": "7ffc7b55d0be8cd43f0f84c6652ccbfb",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 59,
"avg_line_length": 27.266666666666666,
"alnum_prop": 0.6381418092909535,
"repo_name": "Carterj3/Ent_Hephaestus",
"id": "0dd3ac6766441851791289a51674ec1e3eafdeb0",
"size": "409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9882"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="tickfont", parent_name="carpet.aaxis", **kwargs):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs,
)
|
{
"content_hash": "e0e5e2cf37c076586dbfbbac708b8323",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 85,
"avg_line_length": 41.2972972972973,
"alnum_prop": 0.5445026178010471,
"repo_name": "plotly/plotly.py",
"id": "39f6745e710c75c72120fc90666661b3bb996faf",
"size": "1528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/carpet/aaxis/_tickfont.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/terrain/tatooine/shared_rock_spire_fin_tatooine.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "3f3853b91099d8ad1ae360a86d32bfc7",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 86,
"avg_line_length": 24.23076923076923,
"alnum_prop": 0.6984126984126984,
"repo_name": "obi-two/Rebelion",
"id": "65c2d98510e3c1b52f2ba9bcf9e8926abd5b2b20",
"size": "460",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/static/terrain/tatooine/shared_rock_spire_fin_tatooine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
from tensorforce.core import parameter_modules, SignatureDict, TensorSpec, tf_function, tf_util
from tensorforce.core.layers import Layer
class PreprocessingLayer(Layer):
"""
Base class for preprocessing layers which require to be reset.
Args:
name (string): Layer name
(<span style="color:#00C000"><b>default</b></span>: internally chosen).
input_spec (specification): <span style="color:#00C000"><b>internal use</b></span>.
"""
def __init__(self, *, name=None, input_spec=None):
super().__init__(name=name, input_spec=input_spec)
def input_signature(self, *, function):
if function == 'reset':
return SignatureDict()
else:
return super().input_signature(function=function)
def output_signature(self, *, function):
if function == 'reset':
return SignatureDict(
singleton=TensorSpec(type='bool', shape=()).signature(batched=False)
)
else:
return super().output_signature(function=function)
@tf_function(num_args=0)
def reset(self):
raise NotImplementedError
class Clipping(Layer):
"""
Clipping layer (specification key: `clipping`).
Args:
lower (parameter, float): Lower clipping value
(<span style="color:#00C000"><b>default</b></span>: no lower bound).
upper (parameter, float): Upper clipping value
(<span style="color:#00C000"><b>default</b></span>: no upper bound).
name (string): Layer name
(<span style="color:#00C000"><b>default</b></span>: internally chosen).
input_spec (specification): <span style="color:#00C000"><b>internal use</b></span>.
"""
def __init__(self, *, lower=None, upper=None, name=None, input_spec=None):
super().__init__(name=name, input_spec=input_spec)
if lower is None:
assert upper is not None
self.lower = None
else:
self.lower = self.submodule(
name='lower', module=lower, modules=parameter_modules, dtype='float'
)
if upper is None:
assert lower is not None
self.upper = None
else:
self.upper = self.submodule(
name='upper', module=upper, modules=parameter_modules, dtype='float'
)
if lower is not None:
self.architecture_kwargs['lower'] = str(lower)
if upper is not None:
self.architecture_kwargs['upper'] = str(upper)
def default_input_spec(self):
return TensorSpec(type='float', shape=None)
@tf_function(num_args=1)
def apply(self, *, x):
if self.lower is None:
upper = self.upper.value()
return tf.math.minimum(x=x, y=upper)
elif self.upper is None:
lower = self.lower.value()
return tf.math.maximum(x=x, y=lower)
else:
lower = self.lower.value()
upper = self.upper.value()
assertions = list()
if self.config.create_tf_assertions:
assertions.append(tf.debugging.assert_greater_equal(
x=upper, y=lower, message="Incompatible lower and upper clipping bound."
))
with tf.control_dependencies(control_inputs=assertions):
return tf.clip_by_value(t=x, clip_value_min=lower, clip_value_max=upper)
class Deltafier(PreprocessingLayer):
"""
Deltafier layer computing the difference between the current and the previous input; can only
be used as preprocessing layer (specification key: `deltafier`).
Args:
concatenate (False | int >= 0): Whether to concatenate instead of replace deltas with
input, and if so, concatenation axis
(<span style="color:#00C000"><b>default</b></span>: false).
name (string): Layer name
(<span style="color:#00C000"><b>default</b></span>: internally chosen).
input_spec (specification): <span style="color:#00C000"><b>internal use</b></span>.
"""
def __init__(self, *, concatenate=False, name=None, input_spec=None):
self.concatenate = concatenate
super().__init__(name=name, input_spec=input_spec)
self.architecture_kwargs['concatenate'] = str(concatenate)
def default_input_spec(self):
return TensorSpec(type='float', shape=None)
def output_spec(self):
output_spec = super().output_spec()
if self.concatenate is not False:
output_spec.shape = tuple(
2 * dims if axis == self.concatenate else dims
for axis, dims in enumerate(output_spec.shape)
)
return output_spec
def initialize(self):
super().initialize()
self.has_previous = self.variable(
name='has-previous', spec=TensorSpec(type='bool', shape=()), initializer='zeros',
is_trainable=False, is_saved=False
)
self.previous = self.variable(
name='previous', spec=TensorSpec(type='float', shape=((1,) + self.input_spec.shape)),
initializer='zeros', is_trainable=False, is_saved=False
)
@tf_function(num_args=0)
def reset(self):
false = tf_util.constant(value=False, dtype='bool')
assignment = self.has_previous.assign(value=false, read_value=False)
with tf.control_dependencies(control_inputs=(assignment,)):
return tf_util.identity(input=false)
@tf_function(num_args=1)
def apply(self, *, x):
assertions = list()
if self.config.create_tf_assertions:
assertions.append(tf.debugging.assert_less_equal(
x=tf.shape(input=x)[0], y=1,
message="Deltafier preprocessor currently not compatible with batched Agent.act."
))
# TODO: hack for empty batch (for self.previous.assign below)
extended = tf.concat(values=(self.previous, x), axis=0)
def first_delta():
assignment = self.has_previous.assign(
value=tf_util.constant(value=True, dtype='bool'), read_value=False
)
with tf.control_dependencies(control_inputs=(assignment,)):
return tf.concat(values=(tf.zeros_like(input=x[:1]), x[1:] - x[:-1]), axis=0)
def later_delta():
return x - extended[:-1]
with tf.control_dependencies(control_inputs=assertions):
empty_batch = tf.math.equal(x=tf.shape(input=x)[0], y=0)
pred = tf.math.logical_or(x=self.has_previous, y=empty_batch)
delta = tf.cond(pred=pred, true_fn=later_delta, false_fn=first_delta)
assignment = self.previous.assign(value=extended[-1:], read_value=False)
with tf.control_dependencies(control_inputs=(assignment,)):
if self.concatenate is False:
return tf_util.identity(input=delta)
else:
return tf.concat(values=(x, delta), axis=(self.concatenate + 1))
class Image(Layer):
"""
Image preprocessing layer (specification key: `image`).
Args:
height (int): Height of resized image
(<span style="color:#00C000"><b>default</b></span>: no resizing or relative to width).
width (int): Width of resized image
(<span style="color:#00C000"><b>default</b></span>: no resizing or relative to height).
grayscale (bool | iter[float]): Turn into grayscale image, optionally using given weights
(<span style="color:#00C000"><b>default</b></span>: false).
name (string): Layer name
(<span style="color:#00C000"><b>default</b></span>: internally chosen).
input_spec (specification): <span style="color:#00C000"><b>internal use</b></span>.
"""
def __init__(self, *, height=None, width=None, grayscale=False, name=None, input_spec=None):
self.height = height
self.width = width
self.grayscale = grayscale
super().__init__(name=name, input_spec=input_spec)
if height is not None:
self.architecture_kwargs['height'] = str(height)
if width is not None:
self.architecture_kwargs['width'] = str(width)
if grayscale:
self.architecture_kwargs['grayscale'] = str(grayscale)
def default_input_spec(self):
return TensorSpec(type='float', shape=(0, 0, 0))
def output_spec(self):
output_spec = super().output_spec()
if self.height is not None:
if self.width is None:
self.width = round(self.height * output_spec.shape[1] / output_spec.shape[0])
output_spec.shape = (self.height, self.width, output_spec.shape[2])
elif self.width is not None:
self.height = round(self.width * output_spec.shape[0] / output_spec.shape[1])
output_spec.shape = (self.height, self.width, output_spec.shape[2])
if not isinstance(self.grayscale, bool) or self.grayscale:
output_spec.shape = output_spec.shape[:2] + (1,)
return output_spec
@tf_function(num_args=1)
def apply(self, *, x):
if self.height is not None:
x = tf.image.resize(images=x, size=(self.height, self.width))
if not isinstance(self.grayscale, bool):
weights = tf_util.constant(
value=self.grayscale, dtype='float', shape=(1, 1, 1, len(self.grayscale))
)
x = tf.reduce_sum(input_tensor=(x * weights), axis=3, keepdims=True)
elif self.grayscale:
x = tf.image.rgb_to_grayscale(images=x)
return x
class Sequence(PreprocessingLayer):
"""
Sequence layer stacking the current and previous inputs; can only be used as preprocessing
layer (specification key: `sequence`).
Args:
length (int > 0): Number of inputs to concatenate
(<span style="color:#C00000"><b>required</b></span>).
axis (int >= 0): Concatenation axis, excluding batch axis
(<span style="color:#00C000"><b>default</b></span>: last axis).
concatenate (bool): Whether to concatenate inputs at given axis, otherwise introduce new
sequence axis
(<span style="color:#00C000"><b>default</b></span>: true).
name (string): Layer name
(<span style="color:#00C000"><b>default</b></span>: internally chosen).
input_spec (specification): <span style="color:#00C000"><b>internal use</b></span>.
"""
def __init__(self, *, length, axis=-1, concatenate=True, name=None, input_spec=None):
assert length > 1
self.length = length
self.axis = axis
self.concatenate = concatenate
super().__init__(name=name, input_spec=input_spec)
self.architecture_kwargs['length'] = str(length)
self.architecture_kwargs['axis'] = str(axis)
self.architecture_kwargs['concatenate'] = str(concatenate)
def output_spec(self):
output_spec = super().output_spec()
if self.concatenate:
if self.axis == -1:
self.axis = len(output_spec.shape) - 1
output_spec.shape = tuple(
self.length * dims if axis == self.axis else dims
for axis, dims in enumerate(output_spec.shape)
)
else:
if self.axis == -1:
self.axis = len(output_spec.shape)
shape = output_spec.shape
output_spec.shape = shape[:self.axis] + (self.length,) + shape[self.axis:]
return output_spec
def initialize(self):
super().initialize()
self.has_previous = self.variable(
name='has-previous', spec=TensorSpec(type='bool', shape=()), initializer='zeros',
is_trainable=False, is_saved=False
)
shape = self.input_spec.shape
if self.concatenate:
shape = (1,) + shape[:self.axis] + (shape[self.axis] * (self.length - 1),) + \
shape[self.axis + 1:]
else:
shape = (1,) + shape[:self.axis] + (self.length - 1,) + shape[self.axis:]
self.previous = self.variable(
name='previous', spec=TensorSpec(type='float', shape=shape), initializer='zeros',
is_trainable=False, is_saved=False
)
@tf_function(num_args=0)
def reset(self):
false = tf_util.constant(value=False, dtype='bool')
assignment = self.has_previous.assign(value=false, read_value=False)
with tf.control_dependencies(control_inputs=(assignment,)):
return tf_util.identity(input=false)
@tf_function(num_args=1)
def apply(self, *, x):
assertions = list()
if self.config.create_tf_assertions:
assertions.append(tf.debugging.assert_less_equal(
x=tf.shape(input=x)[0], y=1,
message="Sequence preprocessor currently not compatible with batched Agent.act."
))
with tf.control_dependencies(control_inputs=assertions):
def empty_batch():
if self.concatenate:
current = x
else:
current = tf.expand_dims(input=x, axis=(self.axis + 1))
multiples = tuple(
self.length if dims == self.axis + 1 else 1
for dims in range(self.output_spec().rank + 1)
)
return tf.tile(input=current, multiples=multiples)
def not_empty_batch():
def first_timestep():
assignment = self.has_previous.assign(
value=tf_util.constant(value=True, dtype='bool'), read_value=False
)
with tf.control_dependencies(control_inputs=(assignment,)):
if self.concatenate:
current = x
else:
current = tf.expand_dims(input=x, axis=(self.axis + 1))
multiples = tuple(
self.length if dims == self.axis + 1 else 1
for dims in range(self.output_spec().rank + 1)
)
return tf.tile(input=current, multiples=multiples)
def other_timesteps():
if self.concatenate:
current = x
else:
current = tf.expand_dims(input=x, axis=(self.axis + 1))
return tf.concat(values=(self.previous, current), axis=(self.axis + 1))
xs = tf.cond(
pred=self.has_previous, true_fn=other_timesteps, false_fn=first_timestep
)
if self.concatenate:
begin = tuple(
self.input_spec.shape[dims - 1] if dims == self.axis + 1 else 0
for dims in range(self.output_spec().rank + 1)
)
else:
begin = tuple(
1 if dims == self.axis + 1 else 0
for dims in range(self.output_spec().rank + 1)
)
assignment = self.previous.assign(
value=tf.slice(input_=xs, begin=begin, size=self.previous.shape),
read_value=False
)
with tf.control_dependencies(control_inputs=(assignment,)):
return tf_util.identity(input=xs)
is_empty_batch = tf.math.equal(x=tf.shape(input=x)[0], y=0)
return tf.cond(pred=is_empty_batch, true_fn=empty_batch, false_fn=not_empty_batch)
|
{
"content_hash": "d60b04ec81df04f04715699fc35b4b1e",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 99,
"avg_line_length": 39.25373134328358,
"alnum_prop": 0.5678073510773131,
"repo_name": "reinforceio/tensorforce",
"id": "55d74d849c1295281a374cfda0be60e6d6c62b58",
"size": "16464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorforce/core/layers/preprocessing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "869657"
}
],
"symlink_target": ""
}
|
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine.clients import progress
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
class HealthMonitor(neutron.NeutronResource):
"""A resource for managing health monitors for loadbalancers in Neutron."""
PROPERTIES = (
DELAY, TYPE, MAX_RETRIES, TIMEOUT, ADMIN_STATE_UP,
HTTP_METHOD, EXPECTED_CODES, URL_PATH,
) = (
'delay', 'type', 'max_retries', 'timeout', 'admin_state_up',
'http_method', 'expected_codes', 'url_path',
)
ATTRIBUTES = (
ADMIN_STATE_UP_ATTR, DELAY_ATTR, EXPECTED_CODES_ATTR, HTTP_METHOD_ATTR,
MAX_RETRIES_ATTR, TIMEOUT_ATTR, TYPE_ATTR, URL_PATH_ATTR, TENANT_ID,
) = (
'admin_state_up', 'delay', 'expected_codes', 'http_method',
'max_retries', 'timeout', 'type', 'url_path', 'tenant_id',
)
properties_schema = {
DELAY: properties.Schema(
properties.Schema.INTEGER,
_('The minimum time in seconds between regular connections of '
'the member.'),
required=True,
update_allowed=True
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('One of predefined health monitor types.'),
required=True,
constraints=[
constraints.AllowedValues(['PING', 'TCP', 'HTTP', 'HTTPS']),
]
),
MAX_RETRIES: properties.Schema(
properties.Schema.INTEGER,
_('Number of permissible connection failures before changing the '
'member status to INACTIVE.'),
required=True,
update_allowed=True
),
TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('Maximum number of seconds for a monitor to wait for a '
'connection to be established before it times out.'),
required=True,
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of the health monitor.'),
default=True,
update_allowed=True
),
HTTP_METHOD: properties.Schema(
properties.Schema.STRING,
_('The HTTP method used for requests by the monitor of type '
'HTTP.'),
update_allowed=True
),
EXPECTED_CODES: properties.Schema(
properties.Schema.STRING,
_('The list of HTTP status codes expected in response from the '
'member to declare it healthy.'),
update_allowed=True
),
URL_PATH: properties.Schema(
properties.Schema.STRING,
_('The HTTP path used in the HTTP request used by the monitor to '
'test a member health.'),
update_allowed=True
),
}
attributes_schema = {
ADMIN_STATE_UP_ATTR: attributes.Schema(
_('The administrative state of this health monitor.'),
type=attributes.Schema.STRING
),
DELAY_ATTR: attributes.Schema(
_('The minimum time in seconds between regular connections '
'of the member.'),
type=attributes.Schema.STRING
),
EXPECTED_CODES_ATTR: attributes.Schema(
_('The list of HTTP status codes expected in response '
'from the member to declare it healthy.'),
type=attributes.Schema.LIST
),
HTTP_METHOD_ATTR: attributes.Schema(
_('The HTTP method used for requests by the monitor of '
'type HTTP.'),
type=attributes.Schema.STRING
),
MAX_RETRIES_ATTR: attributes.Schema(
_('Number of permissible connection failures before changing '
'the member status to INACTIVE.'),
type=attributes.Schema.STRING
),
TIMEOUT_ATTR: attributes.Schema(
_('Maximum number of seconds for a monitor to wait for a '
'connection to be established before it times out.'),
type=attributes.Schema.STRING
),
TYPE_ATTR: attributes.Schema(
_('One of predefined health monitor types.'),
type=attributes.Schema.STRING
),
URL_PATH_ATTR: attributes.Schema(
_('The HTTP path used in the HTTP request used by the monitor '
'to test a member health.'),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_('Tenant owning the health monitor.'),
type=attributes.Schema.STRING
),
}
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
health_monitor = self.client().create_health_monitor(
{'health_monitor': properties})['health_monitor']
self.resource_id_set(health_monitor['id'])
def _show_resource(self):
return self.client().show_health_monitor(
self.resource_id)['health_monitor']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.client().update_health_monitor(
self.resource_id, {'health_monitor': prop_diff})
def handle_delete(self):
try:
self.client().delete_health_monitor(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
class Pool(neutron.NeutronResource):
"""A resource for managing load balancer pools in Neutron."""
PROPERTIES = (
PROTOCOL, SUBNET_ID, SUBNET, LB_METHOD, NAME, DESCRIPTION,
ADMIN_STATE_UP, VIP, MONITORS, PROVIDER,
) = (
'protocol', 'subnet_id', 'subnet', 'lb_method', 'name', 'description',
'admin_state_up', 'vip', 'monitors', 'provider',
)
_VIP_KEYS = (
VIP_NAME, VIP_DESCRIPTION, VIP_SUBNET, VIP_ADDRESS,
VIP_CONNECTION_LIMIT, VIP_PROTOCOL_PORT,
VIP_SESSION_PERSISTENCE, VIP_ADMIN_STATE_UP,
) = (
'name', 'description', 'subnet', 'address',
'connection_limit', 'protocol_port',
'session_persistence', 'admin_state_up',
)
_VIP_SESSION_PERSISTENCE_KEYS = (
VIP_SESSION_PERSISTENCE_TYPE, VIP_SESSION_PERSISTENCE_COOKIE_NAME,
) = (
'type', 'cookie_name',
)
ATTRIBUTES = (
ADMIN_STATE_UP_ATTR, NAME_ATTR, PROTOCOL_ATTR, SUBNET_ID_ATTR,
LB_METHOD_ATTR, DESCRIPTION_ATTR, TENANT_ID, VIP_ATTR, PROVIDER_ATTR,
) = (
'admin_state_up', 'name', 'protocol', 'subnet_id',
'lb_method', 'description', 'tenant_id', 'vip', 'provider',
)
properties_schema = {
PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Protocol for balancing.'),
required=True,
constraints=[
constraints.AllowedValues(['TCP', 'HTTP', 'HTTPS']),
]
),
SUBNET_ID: properties.Schema(
properties.Schema.STRING,
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('Use property %s.') % SUBNET,
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.2'
)
),
constraints=[
constraints.CustomConstraint('neutron.subnet')
]
),
SUBNET: properties.Schema(
properties.Schema.STRING,
_('The subnet for the port on which the members '
'of the pool will be connected.'),
support_status=support.SupportStatus(version='2014.2'),
constraints=[
constraints.CustomConstraint('neutron.subnet')
]
),
LB_METHOD: properties.Schema(
properties.Schema.STRING,
_('The algorithm used to distribute load between the members of '
'the pool.'),
required=True,
constraints=[
constraints.AllowedValues(['ROUND_ROBIN',
'LEAST_CONNECTIONS', 'SOURCE_IP']),
],
update_allowed=True
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the pool.')
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the pool.'),
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of this pool.'),
default=True,
update_allowed=True
),
PROVIDER: properties.Schema(
properties.Schema.STRING,
_('LBaaS provider to implement this load balancer instance.'),
support_status=support.SupportStatus(version='5.0.0'),
),
VIP: properties.Schema(
properties.Schema.MAP,
_('IP address and port of the pool.'),
schema={
VIP_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the vip.')
),
VIP_DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the vip.')
),
VIP_SUBNET: properties.Schema(
properties.Schema.STRING,
_('Subnet of the vip.'),
constraints=[
constraints.CustomConstraint('neutron.subnet')
]
),
VIP_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('IP address of the vip.'),
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
VIP_CONNECTION_LIMIT: properties.Schema(
properties.Schema.INTEGER,
_('The maximum number of connections per second '
'allowed for the vip.')
),
VIP_PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('TCP port on which to listen for client traffic '
'that is associated with the vip address.'),
required=True
),
VIP_SESSION_PERSISTENCE: properties.Schema(
properties.Schema.MAP,
_('Configuration of session persistence.'),
schema={
VIP_SESSION_PERSISTENCE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Method of implementation of session '
'persistence feature.'),
required=True,
constraints=[constraints.AllowedValues(
['SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE']
)]
),
VIP_SESSION_PERSISTENCE_COOKIE_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the cookie, '
'required if type is APP_COOKIE.')
)
}
),
VIP_ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of this vip.'),
default=True
),
},
required=True
),
MONITORS: properties.Schema(
properties.Schema.LIST,
_('List of health monitors associated with the pool.'),
default=[],
update_allowed=True
),
}
attributes_schema = {
ADMIN_STATE_UP_ATTR: attributes.Schema(
_('The administrative state of this pool.'),
type=attributes.Schema.STRING
),
NAME_ATTR: attributes.Schema(
_('Name of the pool.'),
type=attributes.Schema.STRING
),
PROTOCOL_ATTR: attributes.Schema(
_('Protocol to balance.'),
type=attributes.Schema.STRING
),
SUBNET_ID_ATTR: attributes.Schema(
_('The subnet for the port on which the members of the pool '
'will be connected.'),
type=attributes.Schema.STRING
),
LB_METHOD_ATTR: attributes.Schema(
_('The algorithm used to distribute load between the members '
'of the pool.'),
type=attributes.Schema.STRING
),
DESCRIPTION_ATTR: attributes.Schema(
_('Description of the pool.'),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_('Tenant owning the pool.'),
type=attributes.Schema.STRING
),
VIP_ATTR: attributes.Schema(
_('Vip associated with the pool.'),
type=attributes.Schema.MAP
),
PROVIDER_ATTR: attributes.Schema(
_('Provider implementing this load balancer instance.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING,
),
}
def translation_rules(self):
return [
properties.TranslationRule(
self.properties,
properties.TranslationRule.REPLACE,
[self.SUBNET],
value_path=[self.SUBNET_ID]
)
]
def validate(self):
res = super(Pool, self).validate()
if res:
return res
self._validate_depr_property_required(
self.properties, self.SUBNET, self.SUBNET_ID)
session_p = self.properties[self.VIP].get(self.VIP_SESSION_PERSISTENCE)
if session_p is None:
# session persistence is not configured, skip validation
return
persistence_type = session_p[self.VIP_SESSION_PERSISTENCE_TYPE]
if persistence_type == 'APP_COOKIE':
if session_p.get(self.VIP_SESSION_PERSISTENCE_COOKIE_NAME):
return
msg = _('Property cookie_name is required, when '
'session_persistence type is set to APP_COOKIE.')
raise exception.StackValidationFailed(message=msg)
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
self.client_plugin().resolve_subnet(
properties, self.SUBNET, 'subnet_id')
vip_properties = properties.pop(self.VIP)
monitors = properties.pop(self.MONITORS)
pool = self.client().create_pool({'pool': properties})['pool']
self.resource_id_set(pool['id'])
for monitor in monitors:
self.client().associate_health_monitor(
pool['id'], {'health_monitor': {'id': monitor}})
vip_arguments = self.prepare_properties(
vip_properties,
'%s.vip' % (self.name,))
session_p = vip_arguments.get(self.VIP_SESSION_PERSISTENCE)
if session_p is not None:
prepared_props = self.prepare_properties(session_p, None)
vip_arguments['session_persistence'] = prepared_props
vip_arguments['protocol'] = self.properties[self.PROTOCOL]
if vip_arguments.get(self.VIP_SUBNET) is None:
vip_arguments['subnet_id'] = properties[self.SUBNET_ID]
else:
vip_arguments['subnet_id'] = self.client_plugin().resolve_subnet(
vip_arguments, self.VIP_SUBNET, 'subnet_id')
vip_arguments['pool_id'] = pool['id']
vip = self.client().create_vip({'vip': vip_arguments})['vip']
self.metadata_set({'vip': vip['id']})
def _show_resource(self):
return self.client().show_pool(self.resource_id)['pool']
def check_create_complete(self, data):
attributes = self._show_resource()
status = attributes['status']
if status == 'PENDING_CREATE':
return False
elif status == 'ACTIVE':
vip_attributes = self.client().show_vip(
self.metadata_get()['vip'])['vip']
vip_status = vip_attributes['status']
if vip_status == 'PENDING_CREATE':
return False
if vip_status == 'ACTIVE':
return True
if vip_status == 'ERROR':
raise exception.ResourceInError(
resource_status=vip_status,
status_reason=_('error in vip'))
raise exception.ResourceUnknownStatus(
resource_status=vip_status,
result=_('Pool creation failed due to vip'))
elif status == 'ERROR':
raise exception.ResourceInError(
resource_status=status,
status_reason=_('error in pool'))
else:
raise exception.ResourceUnknownStatus(
resource_status=status,
result=_('Pool creation failed'))
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
if self.MONITORS in prop_diff:
monitors = set(prop_diff.pop(self.MONITORS))
old_monitors = set(self.properties[self.MONITORS])
for monitor in old_monitors - monitors:
self.client().disassociate_health_monitor(
self.resource_id, monitor)
for monitor in monitors - old_monitors:
self.client().associate_health_monitor(
self.resource_id, {'health_monitor': {'id': monitor}})
if prop_diff:
self.client().update_pool(self.resource_id,
{'pool': prop_diff})
def _resolve_attribute(self, name):
if name == self.VIP_ATTR:
return self.client().show_vip(self.metadata_get()['vip'])['vip']
return super(Pool, self)._resolve_attribute(name)
def handle_delete(self):
if not self.resource_id:
prg = progress.PoolDeleteProgress(True)
return prg
prg = progress.PoolDeleteProgress()
if not self.metadata_get():
prg.vip['delete_called'] = True
prg.vip['deleted'] = True
return prg
def _delete_vip(self):
return self._not_found_in_call(
self.client().delete_vip, self.metadata_get()['vip'])
def _check_vip_deleted(self):
return self._not_found_in_call(
self.client().show_vip, self.metadata_get()['vip'])
def _delete_pool(self):
return self._not_found_in_call(
self.client().delete_pool, self.resource_id)
def check_delete_complete(self, prg):
if not prg.vip['delete_called']:
prg.vip['deleted'] = self._delete_vip()
prg.vip['delete_called'] = True
return False
if not prg.vip['deleted']:
prg.vip['deleted'] = self._check_vip_deleted()
return False
if not prg.pool['delete_called']:
prg.pool['deleted'] = self._delete_pool()
prg.pool['delete_called'] = True
return prg.pool['deleted']
if not prg.pool['deleted']:
prg.pool['deleted'] = super(Pool, self).check_delete_complete(True)
return prg.pool['deleted']
return True
class PoolMember(neutron.NeutronResource):
"""A resource to handle loadbalancer members."""
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
POOL_ID, ADDRESS, PROTOCOL_PORT, WEIGHT, ADMIN_STATE_UP,
) = (
'pool_id', 'address', 'protocol_port', 'weight', 'admin_state_up',
)
ATTRIBUTES = (
ADMIN_STATE_UP_ATTR, TENANT_ID, WEIGHT_ATTR, ADDRESS_ATTR,
POOL_ID_ATTR, PROTOCOL_PORT_ATTR,
) = (
'admin_state_up', 'tenant_id', 'weight', 'address',
'pool_id', 'protocol_port',
)
properties_schema = {
POOL_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the load balancing pool.'),
required=True,
update_allowed=True
),
ADDRESS: properties.Schema(
properties.Schema.STRING,
_('IP address of the pool member on the pool network.'),
required=True,
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('TCP port on which the pool member listens for requests or '
'connections.'),
required=True,
constraints=[
constraints.Range(0, 65535),
]
),
WEIGHT: properties.Schema(
properties.Schema.INTEGER,
_('Weight of pool member in the pool (default to 1).'),
constraints=[
constraints.Range(0, 256),
],
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of the pool member.'),
default=True
),
}
attributes_schema = {
ADMIN_STATE_UP_ATTR: attributes.Schema(
_('The administrative state of this pool member.'),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_('Tenant owning the pool member.'),
type=attributes.Schema.STRING
),
WEIGHT_ATTR: attributes.Schema(
_('Weight of the pool member in the pool.'),
type=attributes.Schema.STRING
),
ADDRESS_ATTR: attributes.Schema(
_('IP address of the pool member.'),
type=attributes.Schema.STRING
),
POOL_ID_ATTR: attributes.Schema(
_('The ID of the load balancing pool.'),
type=attributes.Schema.STRING
),
PROTOCOL_PORT_ATTR: attributes.Schema(
_('TCP port on which the pool member listens for requests or '
'connections.'),
type=attributes.Schema.STRING
),
}
def handle_create(self):
pool = self.properties[self.POOL_ID]
protocol_port = self.properties[self.PROTOCOL_PORT]
address = self.properties[self.ADDRESS]
admin_state_up = self.properties[self.ADMIN_STATE_UP]
weight = self.properties[self.WEIGHT]
params = {
'pool_id': pool,
'address': address,
'protocol_port': protocol_port,
'admin_state_up': admin_state_up
}
if weight is not None:
params['weight'] = weight
member = self.client().create_member({'member': params})['member']
self.resource_id_set(member['id'])
def _show_resource(self):
return self.client().show_member(self.resource_id)['member']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.client().update_member(
self.resource_id, {'member': prop_diff})
def handle_delete(self):
try:
self.client().delete_member(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
class LoadBalancer(resource.Resource):
"""A resource to link a neutron pool with servers."""
PROPERTIES = (
POOL_ID, PROTOCOL_PORT, MEMBERS,
) = (
'pool_id', 'protocol_port', 'members',
)
properties_schema = {
POOL_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the load balancing pool.'),
required=True,
update_allowed=True
),
PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('Port number on which the servers are running on the members.'),
required=True,
constraints=[
constraints.Range(0, 65535),
]
),
MEMBERS: properties.Schema(
properties.Schema.LIST,
_('The list of Nova server IDs load balanced.'),
update_allowed=True
),
}
default_client_name = 'neutron'
def handle_create(self):
pool = self.properties[self.POOL_ID]
protocol_port = self.properties[self.PROTOCOL_PORT]
for member in self.properties[self.MEMBERS] or []:
address = self.client_plugin('nova').server_to_ipaddress(member)
lb_member = self.client().create_member({
'member': {
'pool_id': pool,
'address': address,
'protocol_port': protocol_port}})['member']
self.data_set(member, lb_member['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
new_props = json_snippet.properties(self.properties_schema,
self.context)
# Valid use cases are:
# - Membership controlled by members property in template
# - Empty members property in template; membership controlled by
# "updates" triggered from autoscaling group.
# Mixing the two will lead to undefined behaviour.
if (self.MEMBERS in prop_diff and
(self.properties[self.MEMBERS] is not None or
new_props[self.MEMBERS] is not None)):
members = set(new_props[self.MEMBERS] or [])
rd_members = self.data()
old_members = set(six.iterkeys(rd_members))
for member in old_members - members:
member_id = rd_members[member]
try:
self.client().delete_member(member_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
self.data_delete(member)
pool = self.properties[self.POOL_ID]
protocol_port = self.properties[self.PROTOCOL_PORT]
for member in members - old_members:
address = self.client_plugin('nova').server_to_ipaddress(
member)
lb_member = self.client().create_member({
'member': {
'pool_id': pool,
'address': address,
'protocol_port': protocol_port}})['member']
self.data_set(member, lb_member['id'])
def handle_delete(self):
# FIXME(pshchelo): this deletes members in a tight loop,
# so is prone to OverLimit bug similar to LP 1265937
for member, member_id in self.data().items():
try:
self.client().delete_member(member_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
self.data_delete(member)
def resource_mapping():
return {
'OS::Neutron::HealthMonitor': HealthMonitor,
'OS::Neutron::Pool': Pool,
'OS::Neutron::PoolMember': PoolMember,
'OS::Neutron::LoadBalancer': LoadBalancer,
}
|
{
"content_hash": "ec6e0619f80fe500e64d643342240dac",
"timestamp": "",
"source": "github",
"line_count": 756,
"max_line_length": 79,
"avg_line_length": 36.92857142857143,
"alnum_prop": 0.5412278816534136,
"repo_name": "pratikmallya/heat",
"id": "27945d3bec706f2054a5f0459a75b6e7864778e0",
"size": "28493",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/neutron/loadbalancer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6929579"
},
{
"name": "Shell",
"bytes": "33092"
}
],
"symlink_target": ""
}
|
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import mock
import pytest
from google.api_core import path_template
@pytest.mark.parametrize('tmpl, args, kwargs, expected_result', [
# Basic positional params
['/v1/*', ['a'], {}, '/v1/a'],
['/v1/**', ['a/b'], {}, '/v1/a/b'],
['/v1/*/*', ['a', 'b'], {}, '/v1/a/b'],
['/v1/*/*/**', ['a', 'b', 'c/d'], {}, '/v1/a/b/c/d'],
# Basic named params
['/v1/{name}', [], {'name': 'parent'}, '/v1/parent'],
['/v1/{name=**}', [], {'name': 'parent/child'}, '/v1/parent/child'],
# Named params with a sub-template
['/v1/{name=parent/*}', [], {'name': 'parent/child'}, '/v1/parent/child'],
['/v1/{name=parent/**}', [], {'name': 'parent/child/object'},
'/v1/parent/child/object'],
# Combining positional and named params
['/v1/*/{name}', ['a'], {'name': 'parent'}, '/v1/a/parent'],
['/v1/{name}/*', ['a'], {'name': 'parent'}, '/v1/parent/a'],
['/v1/{parent}/*/{child}/*', ['a', 'b'],
{'parent': 'thor', 'child': 'thorson'}, '/v1/thor/a/thorson/b'],
['/v1/{name}/**', ['a/b'], {'name': 'parent'}, '/v1/parent/a/b'],
# Combining positional and named params with sub-templates.
['/v1/{name=parent/*}/*', ['a'], {'name': 'parent/child'},
'/v1/parent/child/a'],
['/v1/*/{name=parent/**}', ['a'], {'name': 'parent/child/object'},
'/v1/a/parent/child/object'],
])
def test_expand_success(tmpl, args, kwargs, expected_result):
result = path_template.expand(tmpl, *args, **kwargs)
assert result == expected_result
assert path_template.validate(tmpl, result)
@pytest.mark.parametrize('tmpl, args, kwargs, exc_match', [
# Missing positional arg.
['v1/*', [], {}, 'Positional'],
# Missing named arg.
['v1/{name}', [], {}, 'Named'],
])
def test_expanded_failure(tmpl, args, kwargs, exc_match):
with pytest.raises(ValueError, match=exc_match):
path_template.expand(tmpl, *args, **kwargs)
@pytest.mark.parametrize('tmpl, path', [
# Single segment template, but multi segment value
['v1/*', 'v1/a/b'],
['v1/*/*', 'v1/a/b/c'],
# Single segement named template, but multi segment value
['v1/{name}', 'v1/a/b'],
['v1/{name}/{value}', 'v1/a/b/c'],
# Named value with a sub-template but invalid value
['v1/{name=parent/*}', 'v1/grandparent/child'],
])
def test_validate_failure(tmpl, path):
assert not path_template.validate(tmpl, path)
def test__expand_variable_match_unexpected():
match = mock.Mock(spec=['group'])
match.group.return_value = None
with pytest.raises(ValueError, match='Unknown'):
path_template._expand_variable_match([], {}, match)
def test__replace_variable_with_pattern():
match = mock.Mock(spec=['group'])
match.group.return_value = None
with pytest.raises(ValueError, match='Unknown'):
path_template._replace_variable_with_pattern(match)
|
{
"content_hash": "134fb919df471daed1a6d623ad9bca6b",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 78,
"avg_line_length": 38.46666666666667,
"alnum_prop": 0.6091854419410745,
"repo_name": "jonparrott/gcloud-python",
"id": "267a048af4cf8811a6724188af8040b633958f06",
"size": "3462",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "api_core/tests/unit/test_path_template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62009"
},
{
"name": "Python",
"bytes": "3459300"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
}
|
import sqlalchemy.types as types
import json
from ggrc.utils import as_json
from .exceptions import ValidationError
class JsonType(types.TypeDecorator):
'''
Marshals Python structures to and from JSON stored
as Text in the db
'''
# FIXME: Change this to a larger column type and fix validation below
impl = types.Text
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
def process_bind_param(self, value, dialect):
if value is None or isinstance(value, basestring):
pass
else:
value = as_json(value)
# Detect if the byte-length of the encoded JSON is larger than the
# database "TEXT" column type can handle
if len(value.encode('utf-8')) > 65534:
raise ValidationError("Log record content too long")
return value
class CompressedType(types.TypeDecorator):
'''
Marshals Python structures to and from a compressed pickle format
as LargeBinary in the db
'''
impl = types.LargeBinary(length=16777215)
def process_result_value(self, value, dialect):
import pickle, zlib
if value is not None:
value = pickle.loads(zlib.decompress(value))
return value
def process_bind_param(self, value, dialect):
import pickle, zlib
value = zlib.compress(pickle.dumps(value))
# Detect if the byte-length of the compressed pickle is larger than the
# database "LargeBinary" column type can handle
if len(value) > 16777215:
raise ValidationError("Log record content too long")
return value
|
{
"content_hash": "7b787531b3554afc5a6add1843223b96",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 75,
"avg_line_length": 31.44,
"alnum_prop": 0.7061068702290076,
"repo_name": "hyperNURb/ggrc-core",
"id": "3d055b0209ba1f0a54248382f2c780acb05e4402",
"size": "1812",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "src/ggrc/models/types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "235811"
},
{
"name": "Cucumber",
"bytes": "140478"
},
{
"name": "HTML",
"bytes": "943963"
},
{
"name": "JavaScript",
"bytes": "1205888"
},
{
"name": "Makefile",
"bytes": "5936"
},
{
"name": "Mako",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "1875139"
},
{
"name": "Ruby",
"bytes": "1496"
},
{
"name": "Shell",
"bytes": "11719"
}
],
"symlink_target": ""
}
|
from django.core import mail
from nose.tools import eq_
from kitsune.kbadge.tests import AwardFactory, BadgeFactory
from kitsune.sumo.tests import TestCase
class AwardNotificationTests(TestCase):
def test_notification(self):
# Note: Need to do this import here so the
# notify_award_recipient function handles the
# badge_was_awarded signal. This works fine in production
# because badges gets loaded by django-badger in startup.
from kitsune.kbadge import badges # noqa
new_badge = BadgeFactory()
# Check the mail queue first.
eq_(0, len(mail.outbox))
# Create an award and save it. This triggers the notification.
AwardFactory(description=u'yay!', badge=new_badge)
eq_(1, len(mail.outbox))
# TODO: test contents--not doing that now because it's a
# mockup.
|
{
"content_hash": "c93a784c71a2c11b5c03c417d93d2c60",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 70,
"avg_line_length": 31.321428571428573,
"alnum_prop": 0.677309007981756,
"repo_name": "safwanrahman/kitsune",
"id": "b4cbb744de25f65fb281d4edb5d69382483dd255",
"size": "877",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "kitsune/kbadge/tests/test_awards.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "287433"
},
{
"name": "Groovy",
"bytes": "4221"
},
{
"name": "HTML",
"bytes": "626814"
},
{
"name": "JavaScript",
"bytes": "798581"
},
{
"name": "Python",
"bytes": "2978458"
},
{
"name": "Shell",
"bytes": "12564"
}
],
"symlink_target": ""
}
|
"""Tests the Metadata Server utility."""
import httplib
import json
import mock
import socket
from tests.unittest_utils import ForsetiTestCase
from google.cloud.security.common.util import metadata_server
from google.cloud.security.common.util import errors
from StringIO import StringIO
class _MockHttpError(socket.error):
"""Mock Http Error"""
pass
class _MockMetadataServerHttpError(errors.MetadataServerHttpError):
"""Mock MetadataServerHttpError"""
pass
class MetadataServerTest(ForsetiTestCase):
"""Test the Metadata Server util."""
@mock.patch.object(httplib.HTTPConnection, 'request', autospec=True)
def test_issue_http_request_raises_metadatahttperror(self, mock_req):
"""Test _issue_http_request raises an exception with socket.error
in httplib.HTTPConnection.request().
Setup:
* Insist httplib.HTTPConnection.request raises socket.error.
Expected results:
* metadata_server.MetadataServerHttpError is raised and asserted.
"""
mock_req.side_effect = _MockHttpError('Unreachable')
with self.assertRaises(errors.MetadataServerHttpError):
metadata_server._issue_http_request('','',{})
def test_obtain_http_client_returns_httplib_httpconnection_object(self):
"""Test _obtain_http_client returns the proper object.
Expected results:
* Assert a httplib.HTTPConnection object is returned.
"""
returned_object = metadata_server._obtain_http_client()
self.assertIsInstance(returned_object, httplib.HTTPConnection)
@mock.patch.object(metadata_server, '_issue_http_request', autospec=True)
def test_can_reach_metadata_server_with_valid_response(self, mock_meta_req):
"""Test can_reach_metadata_server returns True with a valid response.
Setup:
* Have httplib return a valid respone and response.status.
Expected results:
* A True result.
"""
with mock.patch('httplib.HTTPResponse') as mock_http_resp:
mock_http_resp.return_value.status = httplib.OK
mock_meta_req.side_effect = mock_http_resp
actual_response = metadata_server.can_reach_metadata_server()
self.assertTrue(actual_response)
@mock.patch.object(metadata_server, '_issue_http_request', autospec=True)
def test_can_reach_metadata_server_with_error_response(self, mock_meta_req):
"""Test can_reach_metadata_server returns Falise with an
invalid response.
Setup:
* Have httplib raise socket.error.
Expected results:
* A False result.
"""
mock_meta_req.side_effect = _MockMetadataServerHttpError('Unreachable')
actual_response = metadata_server.can_reach_metadata_server()
self.assertFalse(actual_response)
@mock.patch.object(metadata_server, '_issue_http_request', autospec=True)
def test_get_value_for_attribute_with_exception(self, mock_meta_req):
"""Test get_value_for_attribute returns correctly.
Setup:
* Have _issue_http_request raise errors.MetadataServerHttpError
Expected results:
* A matching string.
"""
mock_meta_req.side_effect = _MockMetadataServerHttpError('Unreachable')
actual_response = metadata_server.get_value_for_attribute('')
self.assertIsNone(actual_response)
@mock.patch.object(metadata_server, '_issue_http_request', autospec=True)
def test_get_value_for_attribute_with_a_present_attribute(self, mock_meta_req):
"""Test get_value_for_attribute returns correctly.
Setup:
* Mock out a httplib.HTTPResponse .
* Return that from _issue_http_request.
Expected results:
* A matching string.
"""
mock_response = 'expected_response'
with mock.patch('httplib.HTTPResponse',
mock.mock_open(read_data=mock_response)) as mock_http_resp:
mock_http_resp.return_value.status = httplib.OK
mock_meta_req.side_effect = mock_http_resp
actual_response = metadata_server.get_value_for_attribute('')
self.assertEqual(actual_response, mock_response)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "32594776dab3d17a257359f51092c507",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 83,
"avg_line_length": 35.442622950819676,
"alnum_prop": 0.6667437557816837,
"repo_name": "cschnei3/forseti-security",
"id": "5b29d03f6203a43399716f19dc05cd9a5c8bc393",
"size": "4899",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/common/util/metadata_server_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5851"
},
{
"name": "Protocol Buffer",
"bytes": "10441"
},
{
"name": "Python",
"bytes": "2038262"
},
{
"name": "Shell",
"bytes": "2737"
}
],
"symlink_target": ""
}
|
from app import db
class VAT(db.Model):
"""Define VAT Model"""
id = db.Column(db.Integer, primary_key=True)
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
name = db.Column(db.String(25))
amount = db.Column(db.Integer)
class Settings(db.Model):
"""Define Settings Model"""
id = db.Column(db.Integer, primary_key=True)
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
currency = db.Column(db.String(10))
file_repo = db.Column(db.String(10))
nb_of_stores = db.Column(db.Integer)
|
{
"content_hash": "30635d0af72934d9ef54619b328230c5",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 37.57142857142857,
"alnum_prop": 0.7186311787072244,
"repo_name": "oeilgauche/vicuna",
"id": "e0242ed4eade305f451b9b0bed94b925cee2bed3",
"size": "789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/settings/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "178300"
},
{
"name": "HTML",
"bytes": "25525"
},
{
"name": "JavaScript",
"bytes": "2276"
},
{
"name": "Python",
"bytes": "25642"
},
{
"name": "Ruby",
"bytes": "923"
}
],
"symlink_target": ""
}
|
from lather import exceptions
class Response(object):
"""
Simple class which represents the suds response
"""
def __init__(self, keylist, dict):
self.__keylist__ = keylist
for key in self.__keylist__:
setattr(self, key, dict[key])
class MaxLengthValidaiton(object):
"""
Simple validation class
"""
def __init__(self, limit_value):
self.limit_value = limit_value
def __call__(self, value):
if len(value) > self.limit_value:
raise exceptions.ValidationError('Max length reached')
|
{
"content_hash": "dec896932a59f46fd69f7eac7e872e2f",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 66,
"avg_line_length": 23.08,
"alnum_prop": 0.6031195840554593,
"repo_name": "ctxis/lather",
"id": "285a262ea8efb647f13d723906af91b98bc0ad0f",
"size": "601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "89552"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from frappe import _
import zxcvbn
def test_password_strength(password, user_inputs=None):
'''Wrapper around zxcvbn.password_strength'''
result = zxcvbn.password_strength(password, user_inputs)
result['feedback'] = get_feedback(result['score'], result['match_sequence'])
return result
# NOTE: code modified for frappe translations
# -------------------------------------------
# feedback functionality code from https://github.com/sans-serif/python-zxcvbn/blob/master/zxcvbn/feedback.py
# see license for feedback code at https://github.com/sans-serif/python-zxcvbn/blob/master/LICENSE.txt
# Used for regex matching capitalization
import re
# Used to get the regex patterns for capitalization
# (Used the same way in the original zxcvbn)
from zxcvbn import scoring
# Default feedback value
feedback = {
"warning": "",
"suggestions":[
_("Use a few words, avoid common phrases."),
_("No need for symbols, digits, or uppercase letters."),
],
}
def get_feedback (score, sequence):
"""
Returns the feedback dictionary consisting of ("warning","suggestions") for the given sequences.
"""
# Starting feedback
if len(sequence) == 0:
return feedback
# No feedback if score is good or great
if score > 2:
return dict({"warning": "","suggestions": []})
# Tie feedback to the longest match for longer sequences
longest_match = max(sequence, key=lambda x: len(x['token']))
# Get feedback for this match
feedback = get_match_feedback(longest_match, len(sequence) == 1)
# If no concrete feedback returned, give more general feedback
if not feedback:
feedback = {
"warning": "",
"suggestions":[
_("Better add a few more letters or another word")
],
}
return feedback
def get_match_feedback(match, is_sole_match):
"""
Returns feedback as a dictionary for a certain match
"""
# Define a number of functions that are used in a look up dictionary
def fun_bruteforce():
return None
def fun_dictionary():
# If the match is of type dictionary, call specific function
return get_dictionary_match_feedback(match, is_sole_match)
def fun_spatial():
if match["turns"] == 1:
feedback ={
"warning": _('Straight rows of keys are easy to guess'),
"suggestions":[
_("Try to use a longer keyboard pattern with more turns")
],
}
else:
feedback ={
"warning": _('Short keyboard patterns are easy to guess'),
"suggestions":[
_("Make use of longer keyboard patterns")
],
}
return feedback
def fun_repeat():
if len(match["repeated_char"]) == 1:
feedback ={
"warning": _('Repeats like "aaa" are easy to guess'),
"suggestions":[
_("Let's avoid repeated words and characters")
],
}
else:
feedback ={
"warning": _('Repeats like "abcabcabc" are only slightly harder to guess than "abc"'),
"suggestions":[
_("Try to avoid repeated words and characters")
],
}
return feedback
def fun_sequence():
return {
"suggestions":[
_("Avoid sequences like abc or 6543 as they are easy to guess")
],
}
def fun_regex():
if match["regex_name"] == "recent_year":
return {
"warning": _("Recent years are easy to guess."),
"suggestions":[
_("Avoid recent years."),
_("Avoid years that are associated with you.")
],
}
def fun_date():
return {
"warning": _("Dates are often easy to guess."),
"suggestions":[
_("Avoid dates and years that are associated with you.")
],
}
# Dictionary that maps pattern names to funtions that return feedback
patterns = {
"bruteforce": fun_bruteforce,
"dictionary": fun_dictionary,
"spatial": fun_spatial,
"repeat": fun_repeat,
"sequence": fun_sequence,
"regex": fun_regex,
"date": fun_date,
}
return(patterns[match['pattern']]())
def get_dictionary_match_feedback(match, is_sole_match):
"""
Returns feedback for a match that is found in a dictionary
"""
warning = ""
suggestions = []
# If the match is a common password
if match["dictionary_name"] == "passwords":
if is_sole_match and not match["l33t_entropy"]:
if match["rank"] <= 10:
warning = _("This is a top-10 common password.")
elif match["rank"] <= 100:
warning = _("This is a top-100 common password.")
else:
warning = _("This is a very common password.")
else:
warning = _("This is similar to a commonly used password.")
# If the match is a common english word
elif match["dictionary_name"] == "english":
if is_sole_match:
warning = _("A word by itself is easy to guess.")
# If the match is a common surname/name
elif match["dictionary_name"] in ["surnames", "male_names", "female_names"]:
if is_sole_match:
warning = _("Names and surnames by themselves are easy to guess.")
else:
warning = _("Common names and surnames are easy to guess.")
word = match["token"]
# Variations of the match like UPPERCASES
if re.match(scoring.START_UPPER, word):
suggestions.append(_("Capitalization doesn't help very much."))
elif re.match(scoring.ALL_UPPER, word):
suggestions.append(_("All-uppercase is almost as easy to guess as all-lowercase."))
# Match contains l33t speak substitutions
if match["l33t_entropy"]:
suggestions.append(_("Predictable substitutions like '@' instead of 'a' don't help very much."))
return {"warning": warning, "suggestions": suggestions}
|
{
"content_hash": "2582c6caadcf0c2010da5bc34aa3adf5",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 109,
"avg_line_length": 31.851190476190474,
"alnum_prop": 0.6714632778919828,
"repo_name": "anandpdoshi/frappe",
"id": "bc0b6ad8c370bfe5f892835d7446633e045bcf1e",
"size": "5452",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "frappe/utils/password_strength.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "285216"
},
{
"name": "HTML",
"bytes": "1349168"
},
{
"name": "JavaScript",
"bytes": "1092822"
},
{
"name": "Python",
"bytes": "1259016"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
import logging
from django.conf import settings
from django.http import (HttpResponseRedirect, HttpResponse,
HttpResponseForbidden, HttpResponseNotFound, Http404)
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import (require_GET, require_POST,
require_http_methods)
from constance import config as c_config
import badger.views
@require_http_methods(['GET', 'POST'])
@login_required
def create(request):
# Restrict badge creation to mozillians, if enabled.
if c_config.BADGER_ALLOW_ADD_ONLY_BY_MOZILLIANS:
profile = request.user.get_profile()
if not profile.is_vouched_mozillian():
return HttpResponseForbidden()
return badger.views.create(request)
|
{
"content_hash": "3cc755c92a4a15634a7c2c525513d9a7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 32.18518518518518,
"alnum_prop": 0.7238204833141542,
"repo_name": "mozilla/badges.mozilla.org",
"id": "4773d5644924615b31a630ad6ddb59c4b8a8c520",
"size": "869",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "badgus/base/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10452"
},
{
"name": "Dockerfile",
"bytes": "499"
},
{
"name": "HTML",
"bytes": "83183"
},
{
"name": "JavaScript",
"bytes": "6538"
},
{
"name": "Python",
"bytes": "407537"
},
{
"name": "Shell",
"bytes": "871"
},
{
"name": "Smarty",
"bytes": "215"
}
],
"symlink_target": ""
}
|
from .test_simple_types import *
from .test_string_types import *
|
{
"content_hash": "eeac7e93f72e23a57869b529984007d8",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 32,
"avg_line_length": 33,
"alnum_prop": 0.7575757575757576,
"repo_name": "ajmarks/gymnast",
"id": "8b2815daa910d8ba1a36f515584e24de9167f845",
"size": "66",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "937"
},
{
"name": "Python",
"bytes": "127195"
}
],
"symlink_target": ""
}
|
import serial, time, struct, sys
ser = serial.Serial()
#ser.port = "/dev/cu.wchusbserial1410" # Set this to your serial port
ser.port = "/dev/ttyUSB0" # Set this to your serial port
ser.baudrate = 9600
ser.open()
ser.flushInput()
byte, lastbyte = "\x00", "\x00"
while True:
lastbyte = byte
byte = ser.read(size=1)
# We got a valid packet header
if lastbyte == "\xAA" and byte == "\xC0":
sentence = ser.read(size=8) # Read 8 more bytes
readings = struct.unpack('<HHxxcc',sentence) # Decode the packet - big endian, 2 shorts for pm2.5 and pm10, 2 reserved bytes, checksum, message tail
if (readings[3] != "\xAB"):
print("Tail failed")
continue
pm_25 = readings[0]/10.0
pm_10 = readings[1]/10.0
# ignoring the checksum and message tail
#print "PM 2.5:",pm_25,"μg/m^3 PM 10:",pm_10,"μg/m^3"
print("0:{0}".format(pm_25))
print("1:{0}".format(pm_10))
#sys.stdout.write("0:{0}\r\n".format(pm_25))
sys.stdout.flush()
|
{
"content_hash": "137d8c30e6f854fbe9fbd866cfa2d86b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 156,
"avg_line_length": 31.21875,
"alnum_prop": 0.6206206206206206,
"repo_name": "kiu/feinstaubnode",
"id": "6f17d93d16bd00150d5f6bdeaa8b75684a615c57",
"size": "1044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "realtime-gnuplot/pm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "KiCad",
"bytes": "35562"
},
{
"name": "Perl",
"bytes": "3669"
},
{
"name": "Python",
"bytes": "1044"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
}
|
import log as logger
import threading
import time
EXP_TIME = 50
logger.init("jacket-cache", output=False)
_cache_lock = threading.Lock()
class JacketStatusCache(object):
def __init__(self, synchronizer):
logger.info("init jacket status cache.")
self.synchronizer = synchronizer
self.status_map = self.synchronizer.synchronize_status()
self.last_sync_time = time.time()
def query_status(self, instance_id):
now = time.time()
_cache_lock.acquire()
try:
if now - self.last_sync_time > EXP_TIME:
logger.info("cache have expire. sync cache. now = %s, last sync time = %s" % (now, self.last_sync_time))
self.status_map = self.synchronizer.synchronize_status()
self.last_sync_time = time.time()
logger.info("sync cache success, cache size = %s." % len(self.status_map))
except Exception as e:
logger.error("sync cache failed.", e)
finally:
_cache_lock.release()
if instance_id in self.status_map:
status = self.status_map.get(instance_id)
return status
logger.debug("query status, can not find instance record, instance_id = %s." % instance_id)
return None
|
{
"content_hash": "520ab1c7b33b538ae81f358d2aa81fc4",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 120,
"avg_line_length": 30.023255813953487,
"alnum_prop": 0.6034082106893881,
"repo_name": "back1860/jacket-status-cache",
"id": "ff6f0dd5e2a52f6681cfbce20276d82bdfba4ee9",
"size": "1315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacketstatuscache/jacketcache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "52710"
}
],
"symlink_target": ""
}
|
import serial
import math
import time
import thread
import threading
# some module-level definitions for the robot commands
START = chr(128) # already converted to bytes...
BAUD = chr(129) # + 1 byte
CONTROL = chr(130) # deprecated for Create
SAFE = chr(131)
FULL = chr(132)
POWER = chr(133)
SPOT = chr(134) # Same for the Roomba and Create
CLEAN = chr(135) # Clean button - Roomba
COVER = chr(135) # Cover demo - Create
MAX = chr(136) # Roomba
DEMO = chr(136) # Create
DRIVE = chr(137) # + 4 bytes
MOTORS = chr(138) # + 1 byte
LEDS = chr(139) # + 3 bytes
SONG = chr(140) # + 2N+2 bytes, where N is the number of notes
PLAY = chr(141) # + 1 byte
SENSORS = chr(142) # + 1 byte
FORCESEEKINGDOCK = chr(143) # same on Roomba and Create
# the above command is called "Cover and Dock" on the Create
DRIVEDIRECT = chr(145) # Create only
STREAM = chr(148) # Create only
QUERYLIST = chr(149) # Create only
PAUSERESUME = chr(150) # Create only
#### Sean
SCRIPT = chr(152)
ENDSCRIPT = chr(153)
WAITDIST = chr(156)
WAITANGLE = chr(157)
# the four SCI modes
# the code will try to keep track of which mode the system is in,
# but this might not be 100% trivial...
OFF_MODE = 0
PASSIVE_MODE = 1
SAFE_MODE = 2
FULL_MODE = 3
# the sensors
BUMPS_AND_WHEEL_DROPS = 7
WALL_IR_SENSOR = 8
CLIFF_LEFT = 9
CLIFF_FRONT_LEFT = 10
CLIFF_FRONT_RIGHT = 11
CLIFF_RIGHT = 12
VIRTUAL_WALL = 13
LSD_AND_OVERCURRENTS = 14
INFRARED_BYTE = 17
BUTTONS = 18
DISTANCE = 19
ANGLE = 20
CHARGING_STATE = 21
VOLTAGE = 22
CURRENT = 23
BATTERY_TEMP = 24
BATTERY_CHARGE = 25
BATTERY_CAPACITY = 26
WALL_SIGNAL = 27
CLIFF_LEFT_SIGNAL = 28
CLIFF_FRONT_LEFT_SIGNAL = 29
CLIFF_FRONT_RIGHT_SIGNAL = 30
CLIFF_RIGHT_SIGNAL = 31
CARGO_BAY_DIGITAL_INPUTS = 32
CARGO_BAY_ANALOG_SIGNAL = 33
CHARGING_SOURCES_AVAILABLE = 34
OI_MODE = 35
SONG_NUMBER = 36
SONG_PLAYING = 37
NUM_STREAM_PACKETS = 38
REQUESTED_VELOCITY = 39
REQUESTED_RADIUS = 40
REQUESTED_RIGHT_VELOCITY = 41
REQUESTED_LEFT_VELOCITY = 42
# others just for easy access to particular parts of the data
POSE = 100
LEFT_BUMP = 101
RIGHT_BUMP = 102
LEFT_WHEEL_DROP = 103
RIGHT_WHEEL_DROP = 104
CENTER_WHEEL_DROP = 105
LEFT_WHEEL_OVERCURRENT = 106
RIGHT_WHEEL_OVERCURRENT = 107
ADVANCE_BUTTON = 108
PLAY_BUTTON = 109
# 0 1 2 3 4 5 6 7 8 9101112131415161718192021222324252627282930313233343536373839404142
SENSOR_DATA_WIDTH = [0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,2,2,1,2,2,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,2,2,2,2]
# for printing the SCI modes
def modeStr( mode ):
""" prints a string representing the input SCI mode """
if mode == OFF_MODE: return 'OFF_MODE'
if mode == PASSIVE_MODE: return 'PASSIVE_MODE'
if mode == SAFE_MODE: return 'SAFE_MODE'
if mode == FULL_MODE: return 'FULL_MODE'
print 'Warning: unknown mode', mode, 'seen in modeStr'
return 'UNKNOWN_MODE'
#
# some module-level functions for dealing with bits and bytes
#
def _bytesOfR( r ):
""" for looking at the raw bytes of a sensor reply, r """
print 'raw r is', r
for i in range(len(r)):
print 'byte', i, 'is', ord(r[i])
print 'finished with formatR'
def _bitOfByte( bit, byte ):
""" returns a 0 or 1: the value of the 'bit' of 'byte' """
if bit < 0 or bit > 7:
print 'Your bit of', bit, 'is out of range (0-7)'
print 'returning 0'
return 0
return ((byte >> bit) & 0x01)
def _toBinary( val, numbits ):
""" prints numbits digits of val in binary """
if numbits == 0: return
_toBinary( val>>1 , numbits-1 )
print (val & 0x01), # print least significant bit
def _fromBinary( s ):
""" s is a string of 0's and 1's """
if s == '': return 0
lowbit = ord(s[-1]) - ord('0')
return lowbit + 2*_fromBinary( s[:-1] )
def _twosComplementInt1byte( byte ):
""" returns an int of the same value of the input
int (a byte), but interpreted in two's
complement
the output range should be -128 to 127
"""
# take everything except the top bit
topbit = _bitOfByte( 7, byte )
lowerbits = byte & 127
if topbit == 1:
return lowerbits - (1 << 7)
else:
return lowerbits
def _twosComplementInt2bytes( highByte, lowByte ):
""" returns an int which has the same value
as the twosComplement value stored in
the two bytes passed in
the output range should be -32768 to 32767
chars or ints can be input, both will be
truncated to 8 bits
"""
# take everything except the top bit
topbit = _bitOfByte( 7, highByte )
lowerbits = highByte & 127
unsignedInt = lowerbits << 8 | (lowByte & 0xFF)
if topbit == 1:
# with sufficient thought, I've convinced
# myself of this... we'll see, I suppose.
return unsignedInt - (1 << 15)
else:
return unsignedInt
def _toTwosComplement2Bytes( value ):
""" returns two bytes (ints) in high, low order
whose bits form the input value when interpreted in
two's complement
"""
# if positive or zero, it's OK
if value >= 0:
eqBitVal = value
# if it's negative, I think it is this
else:
eqBitVal = (1<<16) + value
return ( (eqBitVal >> 8) & 0xFF, eqBitVal & 0xFF )
def _poseDeltaFromVelRadSec( vel_mm_sec, ROC, sec ):
""" returns the pose change (dx,dy,dthr) in (mm,mm,radians)
undergone by a differential-drive robot
with a wheelspan of 258mm that is traveling with
a "velocity" of vel_mm_sec, along a radius of
ROC_mm, for sec seconds
NOTE that the pose change is represented in the canonical
"robot-centric" coordinate system:
Hooray for ASCII art!
| +x aligned to robot's heading
|
^ | ^
| | |
+y <---WL--+--WR--- -y perp to robot's heading
|
| DELTA = 1/2 distance from WL to WR
| -x
vel_mm_sec is the average of the velocities of WL and WR
it is positive when the robot is moving forward
the center of the robot's circular arc is at (0,ROC)
positive ROC => turning to the left
negative ROC => turning to the right
Special cases: ROC == 1 => counterclockwise
ROC == -1 => clockwise
ROC == 32768 => straight
"""
# the robot moves along the arc of a circle
#
# the robot's position after the arc is
# (0,ROC) + (ROC*sin(thr),-ROC*cos(thr))
#
# so we first find thr
#
# handle special cases
#
DELTA = 258.0/2.0 # there are 258 mm between the roomba's wheels
if ROC == 32768:
# going straight, so the wheels have equal velocities
# and there is no angular change
thr_delta = 0.0
x_delta = vel_mm_sec * sec # D = RT in action!
y_delta = 0.0
elif ROC == 1 or ROC == 0:
# turning in place counterclockwise = positive thr_delta
x_delta = 0.0
y_delta = 0.0
# to do - check if the sign of vel_mm_sec matters!
thr_delta = (vel_mm_sec * sec)/float(DELTA)
elif ROC == -1:
# turning in place counterclockwise = positive thr_delta
x_delta = 0.0
y_delta = 0.0
# to do - check if the sign of vel_mm_sec matters!
thr_delta = - ( (vel_mm_sec * sec)/float(DELTA) )
else:
# general case
# how much distance did the center travel
# the specification is not 100% clear whether vel_mm_sec
# is the average velocity (signed) or speed (unsigned)
# of the wheels... we need to test this!
# we assume average speed (unsigned) for now...
# we handle the case where vel_mm_sec and ROC are both > 0
# and then check for signs later...
pos_vel_mm_sec = math.fabs(vel_mm_sec)
pos_ROC = math.fabs(ROC)
# need to switch the sign of the left wheel when the ROC < DELTA
if DELTA <= pos_ROC:
# both wheels are going the same direction
# center is traveling at pos_vel_mm_sec
# center is traveling at a radians/second
a = pos_vel_mm_sec / pos_ROC
else:
# wheels going in opposite directions
# center is traveling at a radians/second
a = pos_vel_mm_sec / DELTA
# we find the total (positive) angle traveled, pos_thr
pos_thr = a * sec
# we handle four different cases
# case 1: ROC >= 0 and vel_mm_sec >= 0 (forward left)
if ROC >= 0 and vel_mm_sec >= 0:
thr_delta = pos_thr
# (0,ROC) + (ROC*sin(thr_delta),-ROC*cos(thr_delta))
x_delta = 0.0 + ROC*math.sin(thr_delta)
y_delta = ROC - ROC*math.cos(thr_delta)
# case 2: ROC < 0 and vel_mm_sec >= 0 (forward right)
if ROC < 0 and vel_mm_sec >= 0:
thr_delta = -pos_thr
# (0,ROC) + (ROC*sin(thr_delta),-ROC*cos(thr_delta))
x_delta = 0.0 + ROC*math.sin(thr_delta)
y_delta = ROC - ROC*math.cos(thr_delta)
# case 3: ROC >= 0 and vel_mm_sec < 0 (backward left)
if ROC >= 0 and vel_mm_sec < 0:
thr_delta = -pos_thr
# (0,ROC) + (ROC*sin(thr_delta),-ROC*cos(thr_delta))
x_delta = 0.0 + ROC*math.sin(thr_delta)
y_delta = ROC - ROC*math.cos(thr_delta)
# case 4: ROC < 0 and vel_mm_sec < 0 (backward right)
if ROC < 0 and vel_mm_sec < 0:
thr_delta = pos_thr
# (0,ROC) + (ROC*sin(thr_delta),-ROC*cos(thr_delta))
x_delta = 0.0 + ROC*math.sin(thr_delta)
y_delta = ROC - ROC*math.cos(thr_delta)
return (x_delta, y_delta, thr_delta)
#
# this class represents a snapshot of the robot's data
#
class SensorFrame:
""" the sensorFrame class is really a struct whose
fields are filled in by sensorStatus
"""
def __init__(self):
""" constructor -- set all fields to 0
see _interpretSensorString for details
on all of these fields
"""
self.casterDrop = 0
self.leftWheelDrop = 0
self.rightWheelDrop = 0
self.leftBump = 0
self.rightBump = 0
self.wallSensor = 0
self.leftCliff = 0
self.frontLeftCliff = 0
self.frontRightCliff = 0
self.rightCliff = 0
self.virtualWall = 0
self.driveLeft = 0
self.driveRight = 0
self.mainBrush = 0
self.vacuum = 0
self.sideBrush = 0
self.leftDirt = 0
self.rightDirt = 0
self.remoteControlCommand = 0
self.powerButton = 0
self.spotButton = 0
self.cleanButton = 0
self.maxButton = 0
self.distance = 0
self.rawAngle = 0
self.angleInRadians = 0
self.chargingState = 0
self.voltage = 0
self.current = 0
self.temperature = 0
self.charge = 0
self.capacity = 0
def __str__(self):
""" returns a string with the information
from this SensorFrame
"""
# there's probably a more efficient way to do this...
# perhaps just making it all + instead of the separate
# += would be more efficient
#
# actually, we should make a list and call ''.join(list)
# not that we will...
#
s = ''
s += 'casterDrop: ' + str(self.casterDrop) + '\n'
s += 'leftWheelDrop: ' + str(self.leftWheelDrop) + '\n'
s += 'rightWheelDrop: ' + str(self.rightWheelDrop) + '\n'
s += 'leftBump: ' + str(self.leftBump) + '\n'
s += 'rightBump: ' + str(self.rightBump) + '\n'
s += 'wallSensor: ' + str(self.wallSensor) + '\n'
s += 'leftCliff: ' + str(self.leftCliff) + '\n'
s += 'frontLeftCliff: ' + str(self.frontLeftCliff) + '\n'
s += 'frontRightCliff: ' + str(self.frontRightCliff) + '\n'
s += 'rightCliff: ' + str(self.rightCliff) + '\n'
s += 'virtualWall: ' + str(self.virtualWall) + '\n'
s += 'driveLeft: ' + str(self.driveLeft) + '\n'
s += 'driveRight: ' + str(self.driveRight) + '\n'
s += 'mainBrush: ' + str(self.mainBrush) + '\n'
s += 'vacuum: ' + str(self.vacuum) + '\n'
s += 'sideBrush: ' + str(self.sideBrush) + '\n'
s += 'leftDirt: ' + str(self.leftDirt) + '\n'
s += 'rightDirt: ' + str(self.rightDirt) + '\n'
s += 'remoteControlCommand: ' + str(self.remoteControlCommand) + '\n'
s += 'powerButton: ' + str(self.powerButton) + '\n'
s += 'spotButton: ' + str(self.spotButton) + '\n'
s += 'cleanButton: ' + str(self.cleanButton) + '\n'
s += 'maxButton: ' + str(self.maxButton) + '\n'
s += 'distance: ' + str(self.distance) + '\n'
s += 'rawAngle: ' + str(self.rawAngle) + '\n'
s += 'angleInRadians: ' + str(self.angleInRadians) + '\n'
# no data member needed for this next line
s += 'angleInDegrees: ' + str(math.degrees(self.angleInRadians)) + '\n'
s += 'chargingState: ' + str(self.chargingState) + '\n'
s += 'voltage: ' + str(self.voltage) + '\n'
s += 'current: ' + str(self.current) + '\n'
s += 'temperature: ' + str(self.temperature) + '\n'
s += 'charge: ' + str(self.charge) + '\n'
s += 'capacity: ' + str(self.capacity) + '\n'
return s
def _toBinaryString(self):
""" this converts the calling SensorFrame into a 26-byte
string of the format the roomba sends back
"""
# todo: handle the different subsets (frames) of sensor data
# here are the 26 bytes in list form
slist = [0]*26
# First Frame
# byte 0: bumps and wheeldrops
slist[0] = self.casterDrop << 4 | \
self.leftWheelDrop << 3 | \
self.rightWheelDrop << 2 | \
self.leftBump << 1 | \
self.rightBump
# byte 1: wall data
slist[1] = self.wallSensor
# byte 2: cliff left
slist[2] = self.leftCliff
# byte 3: cliff front left
slist[3] = self.frontLeftCliff
# byte 4: cliff front right
slist[4] = self.frontRightCliff
# byte 5: cliff right
slist[5] = self.rightCliff
# byte 6: virtual wall
slist[6] = self.virtualWall
# byte 7: motor overcurrents
slist[7] = self.driveLeft << 4 | \
self.driveRight << 3 | \
self.mainBrush << 2 | \
self.vacuum << 1 | \
self.sideBrush
# byte 8: dirt detector left
slist[8] = self.leftDirt
# byte 9: dirt detector left
slist[9] = self.rightDirt
# Second Frame
# byte 10: remote control command
slist[10] = self.remoteControlCommand
# byte 11: buttons
slist[11] = self.powerButton << 3 | \
self.spotButton << 2 | \
self.cleanButton << 1 | \
self.maxButton
# bytes 12, 13: distance
highVal, lowVal = _toTwosComplement2Bytes( self.distance )
slist[12] = highVal
slist[13] = lowVal
# bytes 14, 15: angle
highVal, lowVal = _toTwosComplement2Bytes( self.rawAngle )
slist[14] = highVal
slist[15] = lowVal
# Third Frame
# byte 16: charging state
slist[16] = self.chargingState
# bytes 17, 18: voltage
slist[17] = (self.voltage >> 8) & 0xFF
slist[18] = self.voltage & 0xFF
# bytes 19, 20: current
highVal, lowVal = _toTwosComplement2Bytes( self.current )
slist[19] = highVal
slist[20] = lowVal
# byte 21: temperature
slist[21] = self.temperature
# bytes 22, 23: charge
slist[22] = (self.charge >> 8) & 0xFF
slist[23] = self.charge & 0xFF
# bytes 24, 25: capacity
slist[24] = (self.capacity >> 8) & 0xFF
slist[25] = self.capacity & 0xFF
# convert to a string
s = ''.join([ chr(x) for x in slist ])
return s
#
# the robot class
#
class Create:
""" the Create class is an abstraction of the iRobot Create's
SCI interface, including communication and a bit
of processing of the strings passed back and forth
when you create an object of type Create, the code
will try to open a connection to it - so, it will fail
if it's not attached!
"""
# to do: check if we can start in other modes...
def __init__(self, PORT, startingMode=SAFE_MODE):
""" the constructor which tries to open the
connection to the robot at port PORT
"""
_debug = False
# to do: find the shortest safe serial timeout value...
# to do: use the timeout to do more error checking than
# is currently done...
#
# the -1 here is because windows starts counting from 1
# in the hardware control panel, but not in pyserial, it seems
# if PORT is the string 'simulated' (or any string for the moment)
# we use our SRSerial class
print 'PORT is', PORT
if type(PORT) == type('string'):
if PORT == 'sim':
print 'In simulated mode...'
self.ser = 'sim'; # SRSerial('mapSquare.txt')
else:
# for Mac/Linux - use whole port name
# print 'In Mac/Linux mode...'
self.ser = serial.Serial(PORT, baudrate=57600, timeout=0.5)
# otherwise, we try to open the numeric serial port...
else:
# print 'In Windows mode...'
self.ser = serial.Serial(PORT-1, baudrate=57600, timeout=0.5)
# did the serial port actually open?
if self.ser != 'sim' and self.ser.isOpen():
print 'Serial port did open, presumably to a roomba...'
else:
print 'Serial port did NOT open, check the'
print ' - port number'
print ' - physical connection'
print ' - baud rate of the roomba (it\'s _possible_, if unlikely,'
print ' that it might be set to 19200 instead'
print ' of the default 57600 - removing and'
print ' reinstalling the battery should reset it.'
# our OI mode
self.sciMode = OFF_MODE
# our sensor dictionary, currently empty
self.sensord = {}
# here are the variables that constitute the robot's
# estimated odometry, thr is theta in radians...
# these are updated by integrateNextOdometricStep
# which is called in _interpretSensorString
self.xPose = 0.0
self.yPose = 0.0
self.thrPose = 0.0
time.sleep(0.3)
self._start() # go to passive mode - want to do this
# regardless of the final mode we'd like to be in...
time.sleep(0.3)
if (startingMode == SAFE_MODE):
print 'Putting the robot into safe mode...'
self.toSafeMode()
if (startingMode == FULL_MODE):
print 'Putting the robot into full mode...'
self.toSafeMode()
time.sleep(0.3)
self.toFullMode()
# We need to read the angle and distance sensors so that
# their values clear out!
time.sleep(0.25)
#self.sensors(6) # read all sensors to establish the sensord dictionary
self.setPose(0,0,0)
_debug = False
def _write(self, byte):
if self._debug==True:
print ord(byte)
self.ser.write(byte)
def getPose(self, dist='cm', angle='deg'):
""" getPose returns the current estimate of the
robot's global pose
dist may be 'cm' or 'mm'
angle may be 'deg' or 'rad'
"""
x = 0; y = 0; th = 0
if dist == 'cm':
x = self.xPose/10.0; y = self.yPose/10.0
else:
x = self.xPose; y = self.yPose
if angle == 'deg':
th = math.degrees(self.thrPose)
else:
th = self.thrPose
return (x,y,th)
def setPose(self, x, y, th, dist='cm', angle='deg'):
""" setPose sets the internal odometry to the input values
x: global x in mm
y: global y in mm
th: global th in radians
dist: 'cm' or 'mm' for x and y
angle: 'deg' or 'rad' for th
"""
if dist == 'cm':
self.xPose = x*10.0; self.yPose = y*10.0
else:
self.xPose = x; self.yPose = y
if angle == 'deg':
self.thrPose = math.radians(th)
else:
self.thrPose = th
def resetPose(self):
""" resetPose simply sets the internal odometry to 0,0,0
"""
self.setPose(0.0,0.0,0.0)
def _integrateNextOdometricStepCreate(self, distance, rawAngle):
""" integrateNextOdometricStep adds the reported inputs
distance in mm
rawAngle in degrees
to the estimate of the robot's global pose
"""
# OK, so this _should_ be easy
# distance is, supposedly, the arc length that the center
# of the robot has traveled (the average of
# the two wheel's linear distances)
#
# rawAngle is, supposedly, the (RightWheel-LeftWheel)/2.0
#
# the distance (diameter) between the two wheels is 258mm
# keep in mind that the robot's physical diameter is larger ~
#
# 0.5*258 == 129mm radius
#
# perhaps there's nothing to do...
if distance == 0 and rawAngle == 0:
return
# then again, mayber there is something to do...
dthr = math.radians(rawAngle) # angle traveled
d = distance # distance traveled
# compute offsets in the local coordinate system,
# with the x-axis pointing in the direction the robot was
# initially facing (for this move) and the y-axis pointing
# perpendicularly to the left (when facing forward)
#
# first, the special case when the angle is zero...
if rawAngle == 0:
dx = float(d)
dy = 0.0
# or if the distance is zero...
elif distance == 0:
dx = 0.0
dy = 0.0
# or when neither is zero...
else:
# finite radius of curvature
ROC = float(d)/dthr # remember, this is signed!
dx = ROC*math.sin(dthr) # because ROC is signed,
dy = ROC-ROC*math.cos(dthr) # we don't need two cases
#
# we need to add dx, dy, and dthr to the global pose
# and so we need to do so in the global direction in
# which the robot was facing at the start of this movement
#
# here is the unit vector describing that direction
unitForwardX = math.cos( self.thrPose )
unitForwardY = math.sin( self.thrPose )
# here is the unit vector perpendicular to the left
unitPerpX = math.cos( self.thrPose + math.pi/2.0 )
unitPerpY = math.sin( self.thrPose + math.pi/2.0 )
# now we compute our global offsets
dx_global = dx*unitForwardX + dy*unitPerpX
dy_global = dx*unitForwardY + dy*unitPerpY
##print 'distance and rawAngle', distance, rawAngle
##print 'local offsets, x, y, thd', dx, dy, math.degrees(dthr)
##print 'global offsets, x, y, thd', dx_global, dy_global, math.degrees(dthr)
# and we add them all in...
self.xPose += dx_global
self.yPose += dy_global
self.thrPose += dthr
#print 'final pose', self.xPose, self.yPose, self.thrPose
return
def setWheelVelocities( self, left_cm_sec, right_cm_sec ):
""" sends velocities of each wheel independently
left_cm_sec: left wheel velocity in cm/sec (capped at +- 50)
right_cm_sec: right wheel velocity in cm/sec (capped at +- 50)
"""
if left_cm_sec < -50: left_cm_sec = -50;
if left_cm_sec > 50: left_cm_sec = 50;
if right_cm_sec < -50: right_cm_sec = -50;
if right_cm_sec > 50: right_cm_sec = 50;
# convert to mm/sec, ensure we have integers
leftHighVal, leftLowVal = _toTwosComplement2Bytes( int(left_cm_sec*10) )
rightHighVal, rightLowVal = _toTwosComplement2Bytes( int(right_cm_sec*10) )
# send these bytes and set the stored velocities
self._write( DRIVEDIRECT )
self._write( chr(rightHighVal) )
self._write( chr(rightLowVal) )
self._write( chr(leftHighVal) )
self._write( chr(leftLowVal) )
def stop(self):
""" stop calls go(0,0) """
self.go(0,0)
# we've gotta update pose information
foo = self.sensors([POSE])
def go( self, cm_per_sec=0, deg_per_sec=0 ):
""" go(cmpsec, degpsec) sets the robot's velocity to
cmpsec centimeters per second
degpsec degrees per second
go() is equivalent to go(0,0)
"""
# need to convert to the roomba's drive parameters
#
# for now, just one or the other...
if cm_per_sec == 0:
# just handle rotation
# convert to radians
rad_per_sec = math.radians(deg_per_sec)
# make sure the direction is correct
if rad_per_sec >= 0: dirstr = 'CCW'
else: dirstr = 'CW'
# compute the velocity, given that the robot's
# radius is 258mm/2.0
vel_mm_sec = math.fabs(rad_per_sec) * (258.0/2.0)
# send it off to the robot
self._drive( vel_mm_sec, 0, dirstr )
elif deg_per_sec == 0:
# just handle forward/backward translation
vel_mm_sec = 10.0*cm_per_sec
big_radius = 32767
# send it off to the robot
self._drive( vel_mm_sec, big_radius )
else:
# move in the appropriate arc
rad_per_sec = math.radians(deg_per_sec)
vel_mm_sec = 10.0*cm_per_sec
radius_mm = vel_mm_sec / rad_per_sec
# check for extremes
if radius_mm > 32767: radius_mm = 32767
if radius_mm < -32767: radius_mm = -32767
self._drive( vel_mm_sec, radius_mm )
return
def _start(self):
""" changes from OFF_MODE to PASSIVE_MODE """
self._write( START )
# they recommend 20 ms between mode-changing commands
time.sleep(0.25)
# change the mode we think we're in...
return
def close(self):
""" tries to shutdown the robot as kindly as possible, by
clearing any remaining odometric data
going to passive mode
closing the serial port
"""
# is there other clean up to be done?
# let's get rid of any lingering odometric data
# we don't call getSensorList, because we don't want to integrate the odometry...
self._getRawSensorDataAsList( [19,20] )
time.sleep(0.1)
self._start() # send Create back to passive mode
time.sleep(0.1)
self.ser.close()
return
def _closeSer(self):
""" just disconnects the serial port """
self.ser.close()
return
def _openSer(self):
""" opens the port again """
self.ser.open()
return
def _drive(self, roomba_mm_sec, roomba_radius_mm, turn_dir='CCW'):
""" implements the drive command as specified
the turn_dir should be either 'CW' or 'CCW' for
clockwise or counterclockwise - this is only
used if roomba_radius_mm == 0 (or rounds down to 0)
other drive-related calls are available
"""
#self.sensors([POSE]) # updated by Sean
# first, they should be ints
# in case they're being generated mathematically
if type(roomba_mm_sec) != type(42):
roomba_mm_sec = int(roomba_mm_sec)
if type(roomba_radius_mm) != type(42):
roomba_radius_mm = int(roomba_radius_mm)
# we check that the inputs are within limits
# if not, we cap them there
if roomba_mm_sec < -500:
roomba_mm_sec = -500
if roomba_mm_sec > 500:
roomba_mm_sec = 500
# if the radius is beyond the limits, we go straight
# it doesn't really seem to go straight, however...
if roomba_radius_mm < -2000:
roomba_radius_mm = 32768
if roomba_radius_mm > 2000:
roomba_radius_mm = 32768
# get the two bytes from the velocity
# these come back as numbers, so we will chr them
velHighVal, velLowVal = _toTwosComplement2Bytes( roomba_mm_sec )
# get the two bytes from the radius in the same way
# note the special cases
if roomba_radius_mm == 0:
if turn_dir == 'CW':
roomba_radius_mm = -1
else: # default is 'CCW' (turning left)
roomba_radius_mm = 1
radiusHighVal, radiusLowVal = _toTwosComplement2Bytes( roomba_radius_mm )
#print 'bytes are', velHighVal, velLowVal, radiusHighVal, radiusLowVal
# send these bytes and set the stored velocities
self._write( DRIVE )
self._write( chr(velHighVal) )
self._write( chr(velLowVal) )
self._write( chr(radiusHighVal) )
self._write( chr(radiusLowVal) )
def setLEDs(self, power_color, power_intensity, play, advance ):
""" The setLEDs method sets each of the three LEDs, from left to right:
the power LED, the play LED, and the status LED.
The power LED at the left can display colors from green (0) to red (255)
and its intensity can be specified, as well. Hence, power_color and
power_intensity are values from 0 to 255. The other two LED inputs
should either be 0 (off) or 1 (on).
"""
# make sure we're within range...
if advance != 0: advance = 1
if play != 0: play = 1
try:
power = int(power_intensity)
powercolor = int(power_color)
except TypeError:
power = 128
powercolor = 128
print 'Type excpetion caught in setAbsoluteLEDs in roomba.py'
print 'Your power_color or power_intensity was not of type int.'
if power < 0: power = 0
if power > 255: power = 255
if powercolor < 0: powercolor = 0
if powercolor > 255: powercolor = 255
# create the first byte
#firstByteVal = (status << 4) | (spot << 3) | (clean << 2) | (max << 1) | dirtdetect
firstByteVal = (advance << 3) | (play << 1)
# send these as bytes
# print 'bytes are', firstByteVal, powercolor, power
self._write( LEDS )
self._write( chr(firstByteVal) )
self._write( chr(powercolor) )
self._write( chr(power) )
return
#
# DO NOT CALL THIS FUNCTION!
# call readSensors instead - it will integrate odometry
# for what that's worth, admittedly...
# if you call this without integrating odometry, the
# distance and rawAngle reported will be lost...
#
def _getRawSensorFrameAsList(self, packetnumber):
""" gets back a raw string of sensor data
which then can be used to create a SensorFrame
"""
if type(packetnumber) != type(1):
packetnumber = 6
if packetnumber < 0 or packetnumber > 6:
packetnumber = 6
self._write( SENSORS )
self._write( chr(packetnumber) )
if packetnumber == 0:
r = self.ser.read(size=26)
if packetnumber == 1:
r = self.ser.read(size=10)
if packetnumber == 2:
r = self.ser.read(size=6)
if packetnumber == 3:
r = self.ser.read(size=10)
if packetnumber == 4:
r = self.ser.read(size=14)
if packetnumber == 5:
r = self.ser.read(size=12)
if packetnumber == 6:
r = self.ser.read(size=52)
r = [ ord(c) for c in r ] # convert to ints
return r
def _getRawSensorDataAsList(self, listofsensors):
""" gets the chosen sensors
and returns the raw bytes, as a string
needs to be converted to integers...
"""
numberOfSensors = len(listofsensors)
self._write( QUERYLIST )
self._write( chr(numberOfSensors) )
resultLength = 0
for sensornum in listofsensors:
self._write( chr(sensornum) )
resultLength += SENSOR_DATA_WIDTH[sensornum]
r = self.ser.read(size=resultLength)
r = [ ord(c) for c in r ] # convert to ints
#print 'r is ', r
return r
def seekDock(self):
""" sends the force-seeking-dock signal
"""
self.demo(1)
def demo(self, demoNumber=-1):
""" runs one of the built-in demos for Create
if demoNumber is
<omitted> or
-1 stop current demo
0 wander the surrounding area
1 wander and dock, when the docking station is seen
2 wander a more local area
3 wander to a wall and then follow along it
4 figure 8
5 "wimp" demo: when pushed, move forward
when bumped, move back and away
6 home: will home in on a virtual wall, as
long as the back and sides of the IR receiver
are covered with tape
7 tag: homes in on sequential virtual walls
8 pachelbel: plays the first few notes of the canon in D
9 banjo: plays chord notes according to its cliff sensors
chord key is selected via the bumper
"""
if (demoNumber < -1 or demoNumber > 9):
demoNumber = -1 # stop current demo
self._write( DEMO )
if demoNumber < 0 or demoNumber > 9:
# invalid values are equivalent to stopping
self._write( chr(255) ) # -1
else:
self._write( chr(demoNumber) )
def setSong(self, songNumber, songDataList):
""" this stores a song to roomba's memory to play later
with the playSong command
songNumber must be between 0 and 15 (inclusive)
songDataList is a list of (note, duration) pairs (up to 16)
note is the midi note number, from 31 to 127
(outside this range, the note is a rest)
duration is from 0 to 255 in 1/64ths of a second
"""
# any notes to play?
if type(songDataList) != type([]) and type(songDataList) != type(()):
print 'songDataList was', songDataList
return
if len(songDataList) < 1:
print 'No data in the songDataList'
return
if songNumber < 0: songNumber = 0
if songNumber > 15: songNumber = 15
# indicate that a song is coming
self._write( SONG )
self._write( chr(songNumber) )
L = min(len(songDataList), 16)
self._write( chr(L) )
# loop through the notes, up to 16
for note in songDataList[:L]:
# make sure its a tuple, or else we rest for 1/4 second
if type(note) == type( () ):
#more error checking here!
self._write( chr(note[0]) ) # note number
self._write( chr(note[1]) ) # duration
else:
self._write( chr(30) ) # a rest note
self._write( chr(16) ) # 1/4 of a second
return
def playSong(self, list_of_notes):
""" The input to <tt>playSong</tt> should be specified as a list
of pairs of [ note_number, note_duration ] format. Thus,
r.playSong( [(60,8),(64,8),(67,8),(72,8)] ) plays a quick C chord.
"""
# implemented by setting song #1 to the notes and then playing it
self.setSong(1, list_of_notes)
self.playSongNumber(1)
def playSongNumber(self, songNumber):
""" plays song songNumber """
if songNumber < 0: songNumber = 0
if songNumber > 15: songNumber = 15
self._write( PLAY )
self._write( chr(songNumber) )
def playNote(self, noteNumber, duration, songNumber=0):
""" plays a single note as a song (at songNumber)
duration is in 64ths of a second (1-255)
the note number chart is on page 12 of the open interface manual
"""
# set the song
self.setSong(songNumber, [(noteNumber,duration)])
self.playSongNumber(songNumber)
def _getLower5Bits( self, r ):
""" r is one byte as an integer """
return [ _bitOfByte(4,r), _bitOfByte(3,r), _bitOfByte(2,r), _bitOfByte(1,r), _bitOfByte(0,r) ]
def _getOneBit( self, r ):
""" r is one byte as an integer """
if r == 1: return 1
else: return 0
def _getOneByteUnsigned( self, r ):
""" r is one byte as an integer """
return r
def _getOneByteSigned( self, r ):
""" r is one byte as a signed integer """
return _twosComplementInt1byte( r )
def _getTwoBytesSigned( self, r1, r2 ):
""" r1, r2 are two bytes as a signed integer """
return _twosComplementInt2bytes( r1, r2 )
def _getTwoBytesUnsigned( self, r1, r2 ):
""" r1, r2 are two bytes as an unsigned integer """
return r1 << 8 | r2
def _getButtonBits( self, r ):
""" r is one byte as an integer """
return [ _bitOfByte(2,r), _bitOfByte(0,r) ]
def _setNextDataFrame(self):
""" This function _asks_ the robot to collect ALL of
the sensor data into the next packet to send back.
"""
self._write( SENSORS )
self._write( chr(6) )
def _getNextDataFrame(self):
""" This function then gets back ALL of
the sensor data and organizes it into the sensor
dictionary, sensord.
"""
r = self.ser.read(size=52)
r = [ ord(c) for c in r ]
#return self._readSensorList(r)
def _rawSend( self, listofints ):
for x in listofints:
self._write( chr(x) )
def _rawRecv( self ):
nBytesWaiting = self.ser.inWaiting()
#print 'nBytesWaiting is', nBytesWaiting
r = self.ser.read(size=nBytesWaiting)
r = [ ord(x) for x in r ]
#print 'r is', r
return r
def _rawRecvStr( self ):
nBytesWaiting = self.ser.inWaiting()
#print 'nBytesWaiting is', nBytesWaiting
r = self.ser.read(size=nBytesWaiting)
return r
def sensors( self, list_of_sensors_to_poll=6 ):
""" this function updates the robot's currently maintained
state of its robot sensors for those sensors requested
If none are requested, then all of the sensors are updated
(which takes a bit more time...)
"""
if type(list_of_sensors_to_poll) == type([]):
# first, we change any pieces of sensor values to
# the single digit that is required here
distangle = 0
if POSE in list_of_sensors_to_poll:
list_of_sensors_to_poll.remove(POSE)
# should check if they're already there
list_of_sensors_to_poll.append(DISTANCE)
list_of_sensors_to_poll.append(ANGLE)
if LEFT_BUMP in list_of_sensors_to_poll:
list_of_sensors_to_poll.remove(LEFT_BUMP)
if BUMPS_AND_WHEEL_DROPS not in list_of_sensors_to_poll:
list_of_sensors_to_poll.append(BUMPS_AND_WHEEL_DROPS)
if RIGHT_BUMP in list_of_sensors_to_poll:
list_of_sensors_to_poll.remove(RIGHT_BUMP)
if BUMPS_AND_WHEEL_DROPS not in list_of_sensors_to_poll:
list_of_sensors_to_poll.append(BUMPS_AND_WHEEL_DROPS)
if RIGHT_WHEEL_DROP in list_of_sensors_to_poll:
list_of_sensors_to_poll.remove(RIGHT_WHEEL_DROP)
if BUMPS_AND_WHEEL_DROPS not in list_of_sensors_to_poll:
list_of_sensors_to_poll.append(BUMPS_AND_WHEEL_DROPS)
if LEFT_WHEEL_DROP in list_of_sensors_to_poll:
list_of_sensors_to_poll.remove(LEFT_WHEEL_DROP)
if BUMPS_AND_WHEEL_DROPS not in list_of_sensors_to_poll:
list_of_sensors_to_poll.append(BUMPS_AND_WHEEL_DROPS)
if CENTER_WHEEL_DROP in list_of_sensors_to_poll:
list_of_sensors_to_poll.remove(CENTER_WHEEL_DROP)
if BUMPS_AND_WHEEL_DROPS not in list_of_sensors_to_poll:
list_of_sensors_to_poll.append(BUMPS_AND_WHEEL_DROPS)
if LEFT_WHEEL_OVERCURRENT in list_of_sensors_to_poll:
list_of_sensors_to_poll.remove(LEFT_WHEEL_OVERCURRENT)
if LSD_AND_OVERCURRENTS not in list_of_sensors_to_poll:
list_of_sensors_to_poll.append(LSD_AND_OVERCURRENTS)
if RIGHT_WHEEL_OVERCURRENT in list_of_sensors_to_poll:
list_of_sensors_to_poll.remove(RIGHT_WHEEL_OVERCURRENT)
if LSD_AND_OVERCURRENTS not in list_of_sensors_to_poll:
list_of_sensors_to_poll.append(LSD_AND_OVERCURRENTS)
if ADVANCE_BUTTON in list_of_sensors_to_poll:
list_of_sensors_to_poll.remove(ADVANCE_BUTTON)
if BUTTONS not in list_of_sensors_to_poll:
list_of_sensors_to_poll.append(BUTTONS)
if PLAY_BUTTON in list_of_sensors_to_poll:
list_of_sensors_to_poll.remove(PLAY_BUTTON)
if BUTTONS not in list_of_sensors_to_poll:
list_of_sensors_to_poll.append(BUTTONS)
r = self._getRawSensorDataAsList(list_of_sensors_to_poll)
else:
# if it's an integer, its a frame number
r = self._getRawSensorFrameAsList( list_of_sensors_to_poll )
# now, we set list_of_sensors_to_poll
frameNumber = list_of_sensors_to_poll
if frameNumber == 0:
list_of_sensors_to_poll = range(7,27)
elif frameNumber == 1:
list_of_sensors_to_poll = range(7,17)
elif frameNumber == 2:
list_of_sensors_to_poll = range(17,21)
elif frameNumber == 3:
list_of_sensors_to_poll = range(21,27)
elif frameNumber == 4:
list_of_sensors_to_poll = range(27,35)
elif frameNumber == 5:
list_of_sensors_to_poll = range(35,43)
else:
list_of_sensors_to_poll = range(7,43)
# change our dictionary
self._readSensorList(list_of_sensors_to_poll, r)
return self.sensord
def printSensors(self):
""" convenience function to show sensed data in d
if d is None, the current self.sensord is used instead
"""
self.sensors([LEFT_BUMP,RIGHT_BUMP,LEFT_WHEEL_DROP,RIGHT_WHEEL_DROP,CENTER_WHEEL_DROP,WALL_IR_SENSOR,CLIFF_LEFT,CLIFF_FRONT_LEFT,CLIFF_FRONT_RIGHT,CLIFF_RIGHT,VIRTUAL_WALL,LEFT_WHEEL_OVERCURRENT,RIGHT_WHEEL_OVERCURRENT,INFRARED_BYTE,PLAY_BUTTON,ADVANCE_BUTTON,POSE,CHARGING_STATE,VOLTAGE,CURRENT,BATTERY_TEMP,BATTERY_CHARGE,BATTERY_CAPACITY,WALL_SIGNAL,CLIFF_LEFT_SIGNAL,CLIFF_FRONT_LEFT_SIGNAL,CLIFF_FRONT_RIGHT_SIGNAL,CLIFF_RIGHT_SIGNAL,OI_MODE,SONG_NUMBER,SONG_PLAYING,CHARGING_SOURCES_AVAILABLE])
d = self.sensord
pose = d[POSE]
print ' LEFT_BUMP:', d[LEFT_BUMP]
print ' RIGHT_BUMP:', d[RIGHT_BUMP]
print ' LEFT_WHEEL_DROP:', d[LEFT_WHEEL_DROP]
print ' RIGHT_WHEEL_DROP:', d[RIGHT_WHEEL_DROP]
print ' CENTER_WHEEL_DROP:', d[CENTER_WHEEL_DROP]
print ' WALL_IR_SENSOR:', d[WALL_IR_SENSOR]
print ' CLIFF_LEFT:', d[CLIFF_LEFT]
print ' CLIFF_FRONT_LEFT:', d[CLIFF_FRONT_LEFT]
print ' CLIFF_FRONT_RIGHT:', d[CLIFF_FRONT_RIGHT]
print ' CLIFF_RIGHT:', d[CLIFF_RIGHT]
print ' VIRTUAL_WALL:', d[VIRTUAL_WALL]
print ' LEFT_WHEEL_OVERCURRENT:', d[LEFT_WHEEL_OVERCURRENT]
print ' RIGHT_WHEEL_OVERCURRENT:', d[RIGHT_WHEEL_OVERCURRENT]
print ' INFRARED_BYTE:', d[INFRARED_BYTE]
print ' PLAY_BUTTON:', d[PLAY_BUTTON]
print ' ADVANCE_BUTTON:', d[ADVANCE_BUTTON]
print ' POSE X (cm):', pose[0]
print ' POSE Y (cm):', pose[1]
print ' POSE TH (deg):', pose[2]
print ' CHARGING_STATE:', d[CHARGING_STATE]
print ' VOLTAGE:', d[VOLTAGE]
print ' CURRENT:', d[CURRENT]
print ' BATTERY_TEMP:', d[BATTERY_TEMP]
print ' BATTERY_CHARGE:', d[BATTERY_CHARGE]
print ' BATTERY_CAPACITY:', d[BATTERY_CAPACITY]
print ' WALL_SIGNAL:', d[WALL_SIGNAL]
print ' CLIFF_LEFT_SIGNAL:', d[CLIFF_LEFT_SIGNAL]
print ' CLIFF_FRONT_LEFT_SIGNAL:', d[CLIFF_FRONT_LEFT_SIGNAL]
print ' CLIFF_FRONT_RIGHT_SIGNAL:', d[CLIFF_FRONT_RIGHT_SIGNAL]
print ' CLIFF_RIGHT_SIGNAL:', d[CLIFF_RIGHT_SIGNAL]
print ' OI_MODE:', d[OI_MODE]
print ' SONG_NUMBER:', d[SONG_NUMBER]
print ' SONG_PLAYING:', d[SONG_PLAYING]
print ' CHARGING_SOURCES_AVAILABLE:', d[CHARGING_SOURCES_AVAILABLE]
return d
def _readSensorList(self, sensor_data_list, r):
""" this returns the latest values from the particular
sensors requested in the listofvalues
"""
if len(sensor_data_list) == 0:
print 'No data was read in _readSensorList.'
return self.sensord
sensorDataInterpreter = [ None, # 0
None, # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
self._getLower5Bits, # 7 BUMPS_AND_WHEEL_DROPS
self._getOneBit, # 8 WALL_IR_SENSOR
self._getOneBit, # 9 CLIFF_LEFT = 9
self._getOneBit, # 10 CLIFF_FRONT_LEFT = 10
self._getOneBit, # 11 CLIFF_FRONT_RIGHT = 11
self._getOneBit, # 12 CLIFF_RIGHT = 12
self._getOneBit, # 13 VIRTUAL_WALL
self._getLower5Bits, # 14 LSD_AND_OVERCURRENTS
self._getOneBit, # 15 unused
self._getOneBit, # 16 unused
self._getOneByteUnsigned, # 17 INFRARED_BYTE
self._getButtonBits, # 18 BUTTONS
self._getTwoBytesSigned, # 19 DISTANCE
self._getTwoBytesSigned, # 20 ANGLE
self._getOneByteUnsigned, # 21 CHARGING_STATE
self._getTwoBytesUnsigned, # 22 VOLTAGE
self._getTwoBytesSigned, # 23 CURRENT
self._getOneByteSigned, # 24 BATTERY_TEMP
self._getTwoBytesUnsigned, # 25 BATTERY_CHARGE
self._getTwoBytesUnsigned, # 26 BATTERY_CAPACITY
self._getTwoBytesUnsigned, # 27 WALL_SIGNAL
self._getTwoBytesUnsigned, # 28 CLIFF_LEFT_SIGNAL
self._getTwoBytesUnsigned, # 29 CLIFF_FRONT_LEFT_SIGNAL
self._getTwoBytesUnsigned, # 30 CLIFF_FRONT_RIGHT_SIGNAL
self._getTwoBytesUnsigned, # 31 CLIFF_RIGHT_SIGNAL
self._getLower5Bits, # 32 CARGO_BAY_DIGITAL_INPUTS
self._getTwoBytesUnsigned, # 33 CARGO_BAY_ANALOG_SIGNAL
self._getOneByteUnsigned, # 34 CHARGING_SOURCES_AVAILABLE
self._getOneByteUnsigned, # 35 OI_MODE
self._getOneByteUnsigned, # 36 SONG_NUMBER
self._getOneByteUnsigned, # 37 SONG_PLAYING
self._getOneByteUnsigned, # 38 NUM_STREAM_PACKETS
self._getTwoBytesSigned, # 39 REQUESTED_VELOCITY
self._getTwoBytesSigned, # 40 REQUESTED_RADIUS
self._getTwoBytesSigned, # 41 REQUESTED_RIGHT_VELOCITY
self._getTwoBytesSigned, # 42 REQUESTED_LEFT_VELOCITY
None # only 42 as of right now
]
startofdata = 0
distance = 0
angle = 0
for sensorNum in sensor_data_list:
width = SENSOR_DATA_WIDTH[sensorNum]
dataGetter = sensorDataInterpreter[sensorNum]
interpretedData = 0
if (width == 1):
if startofdata >= len(r):
print "Incomplete Sensor Packet"
break
else: interpretedData = dataGetter(r[startofdata])
if (width == 2):
if startofdata >= len(r) - 1:
print "Incomplete Sensor Packet"
break
else: interpretedData = dataGetter(r[startofdata], r[startofdata+1] )
# add to our dictionary
self.sensord[sensorNum] = interpretedData
# POSE = 100 - later
#LEFT_BUMP = 101
#RIGHT_BUMP = 102
#LEFT_WHEEL_DROP = 103
#RIGHT_WHEEL_DROP = 104
#CENTER_WHEEL_DROP = 105
if sensorNum == BUMPS_AND_WHEEL_DROPS:
self.sensord[CENTER_WHEEL_DROP] = interpretedData[0]
self.sensord[LEFT_WHEEL_DROP] = interpretedData[1]
self.sensord[RIGHT_WHEEL_DROP] = interpretedData[2]
self.sensord[LEFT_BUMP] = interpretedData[3]
self.sensord[RIGHT_BUMP] = interpretedData[4]
#LEFT_WHEEL_OVERCURRENT = 106
#RIGHT_WHEEL_OVERCURRENT = 107
if sensorNum == LSD_AND_OVERCURRENTS:
self.sensord[LEFT_WHEEL_OVERCURRENT] = interpretedData[0]
self.sensord[RIGHT_WHEEL_OVERCURRENT] = interpretedData[1]
#ADVANCE_BUTTON = 108
#PLAY_BUTTON = 109
if sensorNum == BUTTONS:
self.sensord[ADVANCE_BUTTON] = interpretedData[0]
self.sensord[PLAY_BUTTON] = interpretedData[1]
# handle special cases
if (sensorNum == DISTANCE):
distance = interpretedData
if self._debug == True: # james' change
print distance
if (sensorNum == ANGLE):
angle = interpretedData
if self._debug == True: # james' change
print angle
#resultingValues.append(interpretedData)
# update index for next sensor...
startofdata = startofdata + width
if (distance != 0 or angle != 0):
self._integrateNextOdometricStepCreate(distance,angle)
self.sensord[POSE] = self.getPose(dist='cm',angle='deg')
def toFullMode(self):
""" changes the state to FULL_MODE
"""
self._start()
time.sleep(0.03)
self.toSafeMode()
time.sleep(0.03)
self._write( FULL )
time.sleep(0.03)
self.sciMode = FULL_MODE
return
def toSafeMode(self):
""" changes the state (from PASSIVE_MODE or FULL_MODE)
to SAFE_MODE
"""
self._start()
time.sleep(0.03)
# now we're in PASSIVE_MODE, so we repeat the above code...
self._write( SAFE )
# they recommend 20 ms between mode-changing commands
time.sleep(0.03)
# change the mode we think we're in...
self.sciMode = SAFE_MODE
# no response here, so we don't get any...
return
def getMode(self):
""" returns one of OFF_MODE, PASSIVE_MODE, SAFE_MODE, FULL_MODE """
# but how right is it?
return self.sciMode
def _setBaudRate(self, baudrate=10):
""" sets the communications rate to the desired value """
# check for OK value
#baudcode = 10 # 57600, the default
if baudrate == 300: baudcode = 0
elif baudrate == 600: baudcode = 1
elif baudrate == 1200: baudcode = 2
elif baudrate == 2400: baudcode = 3
elif baudrate == 4800: baudcode = 4
elif baudrate == 9600: baudcode = 5
elif baudrate == 14400: baudcode = 6
elif baudrate == 19200: baudcode = 7
elif baudrate == 28800: baudcode = 8
elif baudrate == 38400: baudcode = 9
elif baudrate == 57600: baudcode = 10
elif baudrate == 115200: baudcode = 11
else:
print 'The baudrate of', baudrate, 'in _setBaudRate'
print 'was not recognized. Not sending anything.'
return
# otherwise, send off the message
self._write( START )
self._write( chr(baudcode) )
# the recommended pause
time.sleep(0.1)
# change the mode we think we're in...
self.sciMode = PASSIVE_MODE
# no response here, so we don't get any...
return
def _interpretSensorString( self, r ):
""" This returns a sensorFrame object with its fields
filled in from the raw sensor return string, r, which
has to be the full 3-packet (26-byte) string.
r is obtained by writing [142][0] to the serial port.
"""
# check length
# we should save a bit of time by handling each sub-string
# appropriately, but we don't do this yet...
if len(r) != 26:
#print 'You have input an incorrectly formatted string to'
#print 'sensorStatus. It needs to have 26 bytes (full sensors).'
#print 'The input is', r
return
s = SensorFrame()
# convert r so that it is a list of 26 ints instead of 26 chrs
r = [ ord(c) for c in r ]
# packet number 1 (10 bytes)
# byte 0: bumps and wheeldrops
s.casterDrop = _bitOfByte( 4, r[0] )
s.leftWheelDrop = _bitOfByte( 3, r[0] )
s.rightWheelDrop = _bitOfByte( 2, r[0] )
s.leftBump = _bitOfByte( 1, r[0] )
s.rightBump = _bitOfByte( 0, r[0] )
# byte 1: wall sensor, the IR looking to the right
s.wallSensor = _bitOfByte( 0, r[1] )
# byte 2: left cliff sensor
s.leftCliff = _bitOfByte( 0, r[2] )
# byte 3: front left cliff sensor
s.frontLeftCliff = _bitOfByte( 0, r[3] )
# byte 4: front right cliff sensor
s.frontRightCliff = _bitOfByte( 0, r[4] )
# byte 5: right cliff sensor
s.rightCliff = _bitOfByte( 0, r[5] )
# byte 6: virtual wall detector (the separate unit)
s.virtualWall = _bitOfByte( 0, r[6] )
# byte 7: motor overcurrents byte
s.driveLeft = _bitOfByte( 4, r[7] )
s.driveRight = _bitOfByte( 3, r[7] )
s.mainBrush = _bitOfByte( 2, r[7] )
s.vacuum = _bitOfByte( 1, r[7] )
s.sideBrush = _bitOfByte( 0, r[7] )
# byte 8: dirt detector left
# the dirt-detecting sensors are acoustic impact sensors
# basically, they hear the dirt (or don't) going by toward the back
# this value ranges from 0 (no dirt) to 255 (lots of dirt)
s.leftDirt = r[8]
# byte 9: dirt detector right
# some roomba's don't have the right dirt detector
# the dirt detectors are metallic disks near the brushes
s.rightDirt = r[9]
# packet number 2 (6 bytes)
# byte 10: remote control command
# this is the value of the remote control command currently
# being seen by the roomba, it is 255 if there is no command
# not all roombas have a remote control...
s.remoteControlCommand = r[10]
# byte 11: button presses
s.powerButton = _bitOfByte( 3, r[11] )
s.spotButton = _bitOfByte( 2, r[11] )
s.cleanButton = _bitOfByte( 1, r[11] )
s.maxButton = _bitOfByte( 0, r[11] )
# bytes 12 and 13: distance
# the distance that roomba has traveled, in mm, since the
# last time this data was requested (not from a SensorFrame,
# but from the roomba)
# It will stay at the max or min (32767 or -32768) if
# not polled often enough, i.e., it then means "a long way"
# It is the sum of the two drive wheels' distances, divided by 2
s.distance = _twosComplementInt2bytes( r[12], r[13] )
# bytes 14 and 15: angle
s.rawAngle = _twosComplementInt2bytes( r[14], r[15] )
# the distance between the wheels is 258 mm
s.angleInRadians = 2.0 * s.rawAngle / 258.0
# packet number 3 (10 bytes)
# byte 16: charging state
# 0 == not charging
# 1 == charging recovery
# 2 == charging
# 3 == trickle charging
# 4 == waiting
# 5 == charging error
s.chargingState = r[16]
# bytes 17 and 18: voltage in millivolts
# this is unsigned, so we don't use two's complement
s.voltage = r[17] << 8 | r[18]
# check this for byte order!!
# bytes 19 and 20: current in milliamps
# this is signed, from -32768 to 32767
# negative currents are flowing out of the battery
# positive currents are flowing into the battery (charging)
s.current = _twosComplementInt2bytes( r[19], r[20] )
# byte 21: temperature of the battery
# this is in degrees celsius
s.temperature = _twosComplementInt1byte( r[21] )
# bytes 22 and 23: charge of the battery in milliamp-hours
# this is two unsigned bytes
s.charge = r[22] << 8 | r[23]
# bytes 24 and 25: estimated capacity of the roomba's battery
# in units of milliamp-hours
# when the charge reaches this value, the battery is
# considered fully charged
s.capacity = r[24] << 8 | r[25]
# OK, here we call a function to integrate the odometric
# step taken here (unless distance and rawAngle are 0)
self._integrateNextOdometricStepCreate(s.distance,s.rawAngle)
return s
# Some new stuff added by Sean
def _startScript(self, number_of_bytes):
self._write( SCRIPT )
self._write( chr(number_of_bytes) )
return
def _endScript(self, timeout=-1.0):
# issue the ENDSCRIPT command to start the script
self._write( ENDSCRIPT )
interval = 1.0
total = 0.0
# strip out all existing crap
while(self.ser.read(8192) != ''):
continue
# poll
while(timeout<0.0 or total < timeout):
self._write(SENSORS)
self._write(chr(7)) # smallest packet value that I can tell
if self.ser.read(1) != '':
break
time.sleep(interval - 0.5)
total = total + interval
# strip out again, we buffered up lots of junk
while(self.ser.read(8192) != ''):
continue
def _waitForDistance(self, distance_mm):
self._write(WAITDIST)
leftHighVal, leftLowVal = _toTwosComplement2Bytes( distance_mm )
self._write( chr(leftHighVal) )
self._write( chr(leftLowVal) )
return
def _waitForAngle(self, angle_deg):
self._write(WAITANGLE)
leftHighVal, leftLowVal = _toTwosComplement2Bytes( angle_deg )
self._write( chr(leftHighVal) )
self._write( chr(leftLowVal) )
return
def turn(self, angle_deg, deg_per_sec=20):
if angle_deg==0:
return
if deg_per_sec==0:
deg_per_sec=20
if (angle_deg < 0 and deg_per_sec > 0) or (angle_deg > 0 and deg_per_sec < 0):
deg_per_sec = 0 - deg_per_sec
self._startScript(13)
self.go(0, deg_per_sec)
self._waitForAngle(angle_deg)
self.stop()
self._endScript()
#self.sensors([POSE]) # updated by Sean
def move(self, distance_cm, cm_per_sec=10):
if distance_cm==0:
return
if cm_per_sec==0:
cm_per_sec=10
if (distance_cm < 0 and cm_per_sec > 0) or (distance_cm > 0 and cm_per_sec < 0):
cm_per_sec = 0 - cm_per_sec
self._startScript(13)
self.go(cm_per_sec, 0)
self._waitForDistance(distance_cm*10)
self.stop()
self._endScript()
#self.sensors([POSE]) # updated by Sean
# James' syntactic sugar/kludgebox
def senseFunc(self, sensorName):
"""Returns a function which, when called, updates and returns
information for a specified sensor (sensorName).
e.g. cliffState = robot.senseFunc(create.CLIFF_FRONT_LEFT_SIGNAL)
info = cliffState()
No added functionality, just nicer to look at."""
f = lambda: self.sensors([sensorName])[sensorName]
return f
def sleepTill(self, sensorFunc, comparison, value):
"""Have the robot continue what it's doing until some halting
criterion is met, determined by a repeated polling of
sensorFunc until it's comparison (a function) to value is true.
e.g. greater = lambda a,b: a > b
bumpSense = robot.sensorFunc(create.LEFT_BUMP)
robot.go(10)
robot.sleepTill(bumpSense, greater, 0)
This will have the robot go until the left bump sensor is pushed.
"""
while (not comparison(sensorFunc(), value)):
time.sleep(0.05)
|
{
"content_hash": "dbaa377027af65fab8870aaf44136802",
"timestamp": "",
"source": "github",
"line_count": 1700,
"max_line_length": 508,
"avg_line_length": 37.51294117647059,
"alnum_prop": 0.5477952706516966,
"repo_name": "huangy6/vroombot",
"id": "dabe308ef8849a52e0d208336ece79745362e1d9",
"size": "64251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/create.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1244686"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import logging
from django.core.management.base import BaseCommand
import kolibri
from ...utils import dbbackup
from kolibri.utils import server
logger = logging.getLogger(__name__)
class Command(BaseCommand):
output_transaction = True
# @ReservedAssignment
help = (
"Create a database backup of Kolibri. This is not intended for "
"replication across different devices, but *only* for restoring a "
"single device from a local backup of the database."
)
def add_arguments(self, parser):
parser.add_argument(
"dest_folder",
nargs="?",
type=str,
help=(
"Specifies which folder to create the dump in, otherwise it "
"is created in the default location ~/.kolibri/backups"
),
)
def handle(self, *args, **options):
try:
server.get_status()
self.stderr.write(
self.style.ERROR(
"Cannot restore while Kolibri is running, please run:\n"
"\n"
" kolibri stop\n"
)
)
raise SystemExit()
except server.NotRunning:
# Great, it's not running!
pass
dest_folder = options.get("dest_folder", None)
backup = dbbackup(kolibri.__version__, dest_folder=dest_folder)
self.stdout.write(
self.style.SUCCESS("Backed up database to: {path}".format(path=backup))
)
|
{
"content_hash": "106892dbe0fcbd7022a422046b983d6e",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 83,
"avg_line_length": 27.983050847457626,
"alnum_prop": 0.5723803755299818,
"repo_name": "lyw07/kolibri",
"id": "a9120bfaea3c96001e802f221d558143efc72e37",
"size": "1651",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "kolibri/core/deviceadmin/management/commands/dbbackup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "601"
},
{
"name": "CSS",
"bytes": "2007902"
},
{
"name": "Dockerfile",
"bytes": "6930"
},
{
"name": "Gherkin",
"bytes": "199214"
},
{
"name": "HTML",
"bytes": "34393"
},
{
"name": "JavaScript",
"bytes": "1376767"
},
{
"name": "Makefile",
"bytes": "11718"
},
{
"name": "Python",
"bytes": "1896793"
},
{
"name": "Shell",
"bytes": "11350"
},
{
"name": "Vue",
"bytes": "1278479"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
import app.views as av
import django.contrib.auth.views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', av.index),
url(r'^login/$', django.contrib.auth.views.login, {'template_name': 'login.html'}),
url(r'^logout/$', av.logout),
url(r'^mypage/$', av.mypage),
url(r'^users/$', av.user_list),
url(r'^users/(?P<user_id>\d+)/$', av.user_detail),
url(r'^users/(?P<user_id>\d+)/dump/$', av.dump_user_glyphset),
url(r'^glyphs/$', av.glyph_list),
url(r'^glyphs/(?P<glyph_id>\d+)/$', av.glyph_detail),
url(r'^characters/$', av.character_list),
url(r'^characters/(?P<code>[\da-f]{4,5})/$', av.character_detail),
url(r'^manatsum/$', av.manatsum),
url(r'^manatsum/post/$', av.post_glyph),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
{
"content_hash": "97b9eddda10a8cc7f7a618d591af01a0",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 87,
"avg_line_length": 32.93333333333333,
"alnum_prop": 0.6275303643724697,
"repo_name": "mashabow/manatsum",
"id": "5c776b30da86672e76cd07ea1c069d9c1d905eb0",
"size": "1005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2053"
},
{
"name": "HTML",
"bytes": "10626"
},
{
"name": "JavaScript",
"bytes": "12979"
},
{
"name": "Python",
"bytes": "16773"
}
],
"symlink_target": ""
}
|
import numpy as np
import pytest
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_series_equal
@pytest.fixture(params=[['inner'], ['inner', 'outer']])
def frame(request):
levels = request.param
df = pd.DataFrame({'outer': ['a', 'a', 'a', 'b', 'b', 'b'],
'inner': [1, 2, 3, 1, 2, 3],
'A': np.arange(6),
'B': ['one', 'one', 'two', 'two', 'one', 'one']})
if levels:
df = df.set_index(levels)
return df
@pytest.fixture()
def series():
df = pd.DataFrame({'outer': ['a', 'a', 'a', 'b', 'b', 'b'],
'inner': [1, 2, 3, 1, 2, 3],
'A': np.arange(6),
'B': ['one', 'one', 'two', 'two', 'one', 'one']})
s = df.set_index(['outer', 'inner', 'B'])['A']
return s
@pytest.mark.parametrize('key_strs,groupers', [
('inner', # Index name
pd.Grouper(level='inner')
),
(['inner'], # List of index name
[pd.Grouper(level='inner')]
),
(['B', 'inner'], # Column and index
['B', pd.Grouper(level='inner')]
),
(['inner', 'B'], # Index and column
[pd.Grouper(level='inner'), 'B'])])
def test_grouper_index_level_as_string(frame, key_strs, groupers):
result = frame.groupby(key_strs).mean()
expected = frame.groupby(groupers).mean()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('levels', [
'inner', 'outer', 'B',
['inner'], ['outer'], ['B'],
['inner', 'outer'], ['outer', 'inner'],
['inner', 'outer', 'B'], ['B', 'outer', 'inner']
])
def test_grouper_index_level_as_string_series(series, levels):
# Compute expected result
if isinstance(levels, list):
groupers = [pd.Grouper(level=lv) for lv in levels]
else:
groupers = pd.Grouper(level=levels)
expected = series.groupby(groupers).mean()
# Compute and check result
result = series.groupby(levels).mean()
assert_series_equal(result, expected)
|
{
"content_hash": "ac85c826f5c41c519c3fa6ffb04092d0",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 72,
"avg_line_length": 29.75,
"alnum_prop": 0.5348492338111716,
"repo_name": "MJuddBooth/pandas",
"id": "141381f84300b540423478099d9a878859f19c55",
"size": "2023",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pandas/tests/groupby/test_index_as_string.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406766"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "14858932"
},
{
"name": "Shell",
"bytes": "29575"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
"""
Interface for tagging each token in a sentence with supplementary
information, such as its part of speech.
"""
from nltk.internals import overridden
class TaggerI(object):
"""
A processing interface for assigning a tag to each token in a list.
Tags are case sensitive strings that identify some property of each
token, such as its part of speech or its sense.
Subclasses must define:
- either L{tag()} or L{batch_tag()} (or both)
"""
def tag(self, tokens):
"""
Determine the most appropriate tag sequence for the given
token sequence, and return a corresponding list of tagged
tokens. A tagged token is encoded as a tuple C{(token, tag)}.
@rtype: C{list} of C{(token, tag)}
"""
if overridden(self.batch_tag):
return self.batch_tag([tokens])[0]
else:
raise NotImplementedError()
def batch_tag(self, sentences):
"""
Apply L{self.tag()} to each element of C{sentences}. I.e.:
>>> return [self.tag(tokens) for tokens in sentences]
"""
return [self.tag(tokens) for tokens in sentences]
|
{
"content_hash": "7e54b4c6d4c743650c50fab9b54afd2e",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 71,
"avg_line_length": 32.30555555555556,
"alnum_prop": 0.6276870163370594,
"repo_name": "hectormartinez/rougexstem",
"id": "1ba2bffd96e24e3b18a56a87a1a8d3034d68a1dc",
"size": "1456",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/tag/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "252646"
},
{
"name": "Batchfile",
"bytes": "2712"
},
{
"name": "C",
"bytes": "3446743"
},
{
"name": "C#",
"bytes": "3511"
},
{
"name": "CSS",
"bytes": "1240"
},
{
"name": "HTML",
"bytes": "315849"
},
{
"name": "M4",
"bytes": "4099"
},
{
"name": "Makefile",
"bytes": "199393"
},
{
"name": "Perl",
"bytes": "378641"
},
{
"name": "Perl6",
"bytes": "67212"
},
{
"name": "Python",
"bytes": "3712683"
},
{
"name": "Shell",
"bytes": "319340"
},
{
"name": "TeX",
"bytes": "536677"
},
{
"name": "XQuery",
"bytes": "5987"
},
{
"name": "XS",
"bytes": "45555"
}
],
"symlink_target": ""
}
|
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
import threading
import hashlib
from py4j.protocol import Py4JJavaError
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
if sys.version_info[0] >= 3:
xrange = range
basestring = str
if sys.version >= "3":
from io import StringIO
else:
from StringIO import StringIO
from pyspark import keyword_only
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, \
PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, \
FlattenedValuesSerializer
from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter
from pyspark import shuffle
from pyspark.profiler import BasicProfiler
from pyspark.taskcontext import BarrierTaskContext, TaskContext
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class MergerTests(unittest.TestCase):
def setUp(self):
self.N = 1 << 12
self.l = [i for i in xrange(self.N)]
self.data = list(zip(self.l, self.l))
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 20)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda x_y2: (x_y2[0], [x_y2[1]]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 5, partitions=3)
m.mergeCombiners(map(lambda k_v: (k_v[0], [str(k_v[1])]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m.items()),
self.N * 10)
m._cleanup()
def test_group_by_key(self):
def gen_data(N, step):
for i in range(1, N + 1, step):
for j in range(i):
yield (i, [j])
def gen_gs(N, step=1):
return shuffle.GroupByKey(gen_data(N, step))
self.assertEqual(1, len(list(gen_gs(1))))
self.assertEqual(2, len(list(gen_gs(2))))
self.assertEqual(100, len(list(gen_gs(100))))
self.assertEqual(list(range(1, 101)), [k for k, _ in gen_gs(100)])
self.assertTrue(all(list(range(k)) == list(vs) for k, vs in gen_gs(100)))
for k, vs in gen_gs(50002, 10000):
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
ser = PickleSerializer()
l = ser.loads(ser.dumps(list(gen_gs(50002, 30000))))
for k, vs in l:
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
def test_stopiteration_is_raised(self):
def stopit(*args, **kwargs):
raise StopIteration()
def legit_create_combiner(x):
return [x]
def legit_merge_value(x, y):
return x.append(y) or x
def legit_merge_combiners(x, y):
return x.extend(y) or x
data = [(x % 2, x) for x in range(100)]
# wrong create combiner
m = ExternalMerger(Aggregator(stopit, legit_merge_value, legit_merge_combiners), 20)
with self.assertRaises((Py4JJavaError, RuntimeError)) as cm:
m.mergeValues(data)
# wrong merge value
m = ExternalMerger(Aggregator(legit_create_combiner, stopit, legit_merge_combiners), 20)
with self.assertRaises((Py4JJavaError, RuntimeError)) as cm:
m.mergeValues(data)
# wrong merge combiners
m = ExternalMerger(Aggregator(legit_create_combiner, legit_merge_value, stopit), 20)
with self.assertRaises((Py4JJavaError, RuntimeError)) as cm:
m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), data))
class SorterTests(unittest.TestCase):
def test_in_memory_sort(self):
l = list(range(1024))
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
class CustomizedSorter(ExternalSorter):
def _next_limit(self):
return self.memory_limit
l = list(range(1024))
random.shuffle(l)
sorter = CustomizedSorter(1)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = list(range(10240))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from pickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEqual(p1, p2)
from pyspark.cloudpickle import dumps
P2 = loads(dumps(P))
p3 = P2(1, 3)
self.assertEqual(p1, p3)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_function_module_name(self):
ser = CloudPickleSerializer()
func = lambda x: x
func2 = ser.loads(ser.dumps(func))
self.assertEqual(func.__module__, func2.__module__)
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
# to be corrected with SPARK-11160
if not xmlrunner:
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
ser.dumps(foo)
def test_compressed_serializer(self):
ser = CompressedSerializer(PickleSerializer())
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
io = StringIO()
ser.dump_stream(["abc", u"123", range(5)], io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io)))
ser.dump_stream(range(1000), io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)] + list(range(1000)), list(ser.load_stream(io)))
io.close()
def test_hash_serializer(self):
hash(NoOpSerializer())
hash(UTF8Deserializer())
hash(PickleSerializer())
hash(MarshalSerializer())
hash(AutoSerializer())
hash(BatchedSerializer(PickleSerializer()))
hash(AutoBatchedSerializer(MarshalSerializer()))
hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CompressedSerializer(PickleSerializer()))
hash(FlattenedValuesSerializer(PickleSerializer()))
class QuietTest(object):
def __init__(self, sc):
self.log4j = sc._jvm.org.apache.log4j
def __enter__(self):
self.old_level = self.log4j.LogManager.getRootLogger().getLevel()
self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)
def __exit__(self, exc_type, exc_val, exc_tb):
self.log4j.LogManager.getRootLogger().setLevel(self.old_level)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class LocalCheckpointTests(ReusedPySparkTestCase):
def test_basic_localcheckpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertFalse(flatMappedRDD.isLocallyCheckpointed())
flatMappedRDD.localCheckpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.isLocallyCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_file_recursively_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello")
self.sc.addFile(path, True)
download_path = SparkFiles.get("hello")
self.assertNotEqual(path, download_path)
with open(download_path + "/hello.txt") as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
with open(download_path + "/sub_hello/sub_hello.txt") as test_file:
self.assertEqual("Sub Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class TaskContextTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
# Allow retries even though they are normally disabled in local mode
self.sc = SparkContext('local[4, 2]', class_name)
def test_stage_id(self):
"""Test the stage ids are available and incrementing as expected."""
rdd = self.sc.parallelize(range(10))
stage1 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
stage2 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
# Test using the constructor directly rather than the get()
stage3 = rdd.map(lambda x: TaskContext().stageId()).take(1)[0]
self.assertEqual(stage1 + 1, stage2)
self.assertEqual(stage1 + 2, stage3)
self.assertEqual(stage2 + 1, stage3)
def test_partition_id(self):
"""Test the partition id."""
rdd1 = self.sc.parallelize(range(10), 1)
rdd2 = self.sc.parallelize(range(10), 2)
pids1 = rdd1.map(lambda x: TaskContext.get().partitionId()).collect()
pids2 = rdd2.map(lambda x: TaskContext.get().partitionId()).collect()
self.assertEqual(0, pids1[0])
self.assertEqual(0, pids1[9])
self.assertEqual(0, pids2[0])
self.assertEqual(1, pids2[9])
def test_attempt_number(self):
"""Verify the attempt numbers are correctly reported."""
rdd = self.sc.parallelize(range(10))
# Verify a simple job with no failures
attempt_numbers = rdd.map(lambda x: TaskContext.get().attemptNumber()).collect()
map(lambda attempt: self.assertEqual(0, attempt), attempt_numbers)
def fail_on_first(x):
"""Fail on the first attempt so we get a positive attempt number"""
tc = TaskContext.get()
attempt_number = tc.attemptNumber()
partition_id = tc.partitionId()
attempt_id = tc.taskAttemptId()
if attempt_number == 0 and partition_id == 0:
raise Exception("Failing on first attempt")
else:
return [x, partition_id, attempt_number, attempt_id]
result = rdd.map(fail_on_first).collect()
# We should re-submit the first partition to it but other partitions should be attempt 0
self.assertEqual([0, 0, 1], result[0][0:3])
self.assertEqual([9, 3, 0], result[9][0:3])
first_partition = filter(lambda x: x[1] == 0, result)
map(lambda x: self.assertEqual(1, x[2]), first_partition)
other_partitions = filter(lambda x: x[1] != 0, result)
map(lambda x: self.assertEqual(0, x[2]), other_partitions)
# The task attempt id should be different
self.assertTrue(result[0][3] != result[9][3])
def test_tc_on_driver(self):
"""Verify that getting the TaskContext on the driver returns None."""
tc = TaskContext.get()
self.assertTrue(tc is None)
def test_get_local_property(self):
"""Verify that local properties set on the driver are available in TaskContext."""
key = "testkey"
value = "testvalue"
self.sc.setLocalProperty(key, value)
try:
rdd = self.sc.parallelize(range(1), 1)
prop1 = rdd.map(lambda _: TaskContext.get().getLocalProperty(key)).collect()[0]
self.assertEqual(prop1, value)
prop2 = rdd.map(lambda _: TaskContext.get().getLocalProperty("otherkey")).collect()[0]
self.assertTrue(prop2 is None)
finally:
self.sc.setLocalProperty(key, None)
def test_barrier(self):
"""
Verify that BarrierTaskContext.barrier() performs global sync among all barrier tasks
within a stage.
"""
rdd = self.sc.parallelize(range(10), 4)
def f(iterator):
yield sum(iterator)
def context_barrier(x):
tc = BarrierTaskContext.get()
time.sleep(random.randint(1, 10))
tc.barrier()
return time.time()
times = rdd.barrier().mapPartitions(f).map(context_barrier).collect()
self.assertTrue(max(times) - min(times) < 1)
def test_barrier_infos(self):
"""
Verify that BarrierTaskContext.getTaskInfos() returns a list of all task infos in the
barrier stage.
"""
rdd = self.sc.parallelize(range(10), 4)
def f(iterator):
yield sum(iterator)
taskInfos = rdd.barrier().mapPartitions(f).map(lambda x: BarrierTaskContext.get()
.getTaskInfos()).collect()
self.assertTrue(len(taskInfos) == 4)
self.assertTrue(len(taskInfos[0]) == 4)
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_to_localiterator(self):
from time import sleep
rdd = self.sc.parallelize([1, 2, 3])
it = rdd.toLocalIterator()
sleep(5)
self.assertEqual([1, 2, 3], sorted(it))
rdd2 = rdd.repartition(1000)
it2 = rdd2.toLocalIterator()
sleep(5)
self.assertEqual([1, 2, 3], sorted(it2))
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_cartesian_chaining(self):
# Tests for SPARK-16589
rdd = self.sc.parallelize(range(10), 2)
self.assertSetEqual(
set(rdd.cartesian(rdd).cartesian(rdd).collect()),
set([((x, y), z) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.cartesian(rdd)).collect()),
set([(x, (y, z)) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.zip(rdd)).collect()),
set([(x, (y, y)) for x in range(10) for y in range(10)])
)
def test_zip_chaining(self):
# Tests for SPARK-21985
rdd = self.sc.parallelize('abc', 2)
self.assertSetEqual(
set(rdd.zip(rdd).zip(rdd).collect()),
set([((x, x), x) for x in 'abc'])
)
self.assertSetEqual(
set(rdd.zip(rdd.zip(rdd)).collect()),
set([(x, (x, x)) for x in 'abc'])
)
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(xrange(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregate and treeAggregate to build dict
# representing a counter of ints
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
# Show that single or multiple partitions work
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregateByKey to make a pair RDD that
# contains lists of all values for each key in the original RDD
# list(range(...)) for Python 3.x compatibility (can't use * operator
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_unpersist(self):
N = 1000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 3MB
bdata.unpersist()
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
bdata.destroy()
try:
self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
except Exception as e:
pass
else:
raise Exception("job should fail after destroy the broadcast")
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_multithread_broadcast_pickle(self):
import threading
b1 = self.sc.broadcast(list(range(3)))
b2 = self.sc.broadcast(list(range(3)))
def f1():
return b1.value
def f2():
return b2.value
funcs_num_pickled = {f1: None, f2: None}
def do_pickle(f, sc):
command = (f, None, sc.serializer, sc.serializer)
ser = CloudPickleSerializer()
ser.dumps(command)
def process_vars(sc):
broadcast_vars = list(sc._pickled_broadcast_vars)
num_pickled = len(broadcast_vars)
sc._pickled_broadcast_vars.clear()
return num_pickled
def run(f, sc):
do_pickle(f, sc)
funcs_num_pickled[f] = process_vars(sc)
# pickle f1, adds b1 to sc._pickled_broadcast_vars in main thread local storage
do_pickle(f1, self.sc)
# run all for f2, should only add/count/clear b2 from worker thread local storage
t = threading.Thread(target=run, args=(f2, self.sc))
t.start()
t.join()
# count number of vars pickled in main thread, only b1 should be counted and cleared
funcs_num_pickled[f1] = process_vars(self.sc)
self.assertEqual(funcs_num_pickled[f1], 1)
self.assertEqual(funcs_num_pickled[f2], 1)
self.assertEqual(len(list(self.sc._pickled_broadcast_vars)), 0)
def test_large_closure(self):
N = 200000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(xrange(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(xrange(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(xrange(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions_asc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, True)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_repartitionAndSortWithinPartitions_desc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, False)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(2, 6), (0, 5), (0, 8)])
self.assertEqual(partitions[1], [(3, 8), (3, 8), (1, 3)])
def test_repartition_no_skewed(self):
num_partitions = 20
a = self.sc.parallelize(range(int(1000)), 2)
l = a.repartition(num_partitions).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
l = a.coalesce(num_partitions, True).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
def test_repartition_on_textfile(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
rdd = self.sc.textFile(path)
result = rdd.repartition(1).collect()
self.assertEqual(u"Hello World!", result[0])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 200001
kv = self.sc.parallelize(xrange(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
def test_narrow_dependency_in_join(self):
rdd = self.sc.parallelize(range(10)).map(lambda x: (x, x))
parted = rdd.partitionBy(2)
self.assertEqual(2, parted.union(parted).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, parted.union(rdd).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, rdd.union(parted).getNumPartitions())
tracker = self.sc.statusTracker()
self.sc.setJobGroup("test1", "test", True)
d = sorted(parted.join(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test1")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test2", "test", True)
d = sorted(parted.join(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test2")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test3", "test", True)
d = sorted(parted.cogroup(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test3")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test4", "test", True)
d = sorted(parted.cogroup(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test4")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(xrange(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
def test_pipe_functions(self):
data = ['1', '2', '3']
rdd = self.sc.parallelize(data)
with QuietTest(self.sc):
self.assertEqual([], rdd.pipe('cc').collect())
self.assertRaises(Py4JJavaError, rdd.pipe('cc', checkCode=True).collect)
result = rdd.pipe('cat').collect()
result.sort()
for x, y in zip(data, result):
self.assertEqual(x, y)
self.assertRaises(Py4JJavaError, rdd.pipe('grep 4', checkCode=True).collect)
self.assertEqual([], rdd.pipe('grep 4').collect())
def test_pipe_unicode(self):
# Regression test for SPARK-20947
data = [u'\u6d4b\u8bd5', '1']
rdd = self.sc.parallelize(data)
result = rdd.pipe('cat').collect()
self.assertEqual(data, result)
def test_stopiteration_in_user_code(self):
def stopit(*x):
raise StopIteration()
seq_rdd = self.sc.parallelize(range(10))
keyed_rdd = self.sc.parallelize((x % 2, x) for x in range(10))
msg = "Caught StopIteration thrown from user's code; failing the task"
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.map(stopit).collect)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.filter(stopit).collect)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.foreach, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.reduce, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.fold, 0, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.foreach, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg,
seq_rdd.cartesian(seq_rdd).flatMap(stopit).collect)
# these methods call the user function both in the driver and in the executor
# the exception raised is different according to where the StopIteration happens
# RuntimeError is raised if in the driver
# Py4JJavaError is raised if in the executor (wraps the RuntimeError raised in the worker)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
keyed_rdd.reduceByKeyLocally, stopit)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
seq_rdd.aggregate, 0, stopit, lambda *x: 1)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
seq_rdd.aggregate, 0, lambda *x: 1, stopit)
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_profiler(self):
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
id, profiler, _ = profilers[0]
stats = profiler.stats()
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
old_stdout = sys.stdout
sys.stdout = io = StringIO()
self.sc.show_profiles()
self.assertTrue("heavy_foo" in io.getvalue())
sys.stdout = old_stdout
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
def test_custom_profiler(self):
class TestCustomProfiler(BasicProfiler):
def show(self, id):
self.result = "Custom formatting"
self.sc.profiler_collector.profiler_cls = TestCustomProfiler
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
_, profiler, _ = profilers[0]
self.assertTrue(isinstance(profiler, TestCustomProfiler))
self.sc.show_profiles()
self.assertEqual("Custom formatting", profiler.result)
def do_computation(self):
def heavy_foo(x):
for i in range(1 << 18):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
class ProfilerTests2(unittest.TestCase):
def test_profiler_disabled(self):
sc = SparkContext(conf=SparkConf().set("spark.python.profile", "false"))
try:
self.assertRaisesRegexp(
RuntimeError,
"'spark.python.profile' configuration must be set",
lambda: sc.show_profiles())
self.assertRaisesRegexp(
RuntimeError,
"'spark.python.profile' configuration must be set",
lambda: sc.dump_profiles("/tmp/abc"))
finally:
sc.stop()
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
for v in maps:
self.assertTrue(v in em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
cname = u'org.apache.spark.api.python.TestWritable'
ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}),
(u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}),
(u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}),
(u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}),
(u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})]
self.assertEqual(clazz, ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
).collect())
self.assertEqual(unbatched_clazz, ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
oldconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
newconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
def test_binary_files(self):
path = os.path.join(self.tempdir.name, "binaryfiles")
os.mkdir(path)
data = b"short binary data"
with open(os.path.join(path, "part-0000"), 'wb') as f:
f.write(data)
[(p, d)] = self.sc.binaryFiles(path).collect()
self.assertTrue(p.endswith("part-0000"))
self.assertEqual(d, data)
def test_binary_records(self):
path = os.path.join(self.tempdir.name, "binaryrecords")
os.mkdir(path)
with open(os.path.join(path, "part-0000"), 'w') as f:
for i in range(100):
f.write('%04d' % i)
result = self.sc.binaryRecords(path, 4).map(int).collect()
self.assertEqual(list(range(100)), result)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = self.sc.sequenceFile(basepath + "/sfmap/").collect()
for v in maps:
self.assertTrue(v, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
for v in result:
self.assertTrue(v, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/olddataset/"}
result = self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect()
for v in result:
self.assertTrue(v, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.Text",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
@unittest.skipIf(sys.version >= "3", "serialize of array")
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = list(zip(x, y))
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/newdataset"
}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send(b"\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
python_exec = sys.executable or os.environ.get("PYSPARK_PYTHON")
daemon = Popen([python_exec, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(xrange(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(xrange(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
class SparkSubmitTests(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
tmp_dir = tempfile.gettempdir()
self.sparkSubmit = [
os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit"),
"--conf", "spark.driver.extraJavaOptions=-Djava.io.tmpdir={0}".format(tmp_dir),
"--conf", "spark.executor.extraJavaOptions=-Djava.io.tmpdir={0}".format(tmp_dir),
]
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content, dir=None):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name)
else:
os.makedirs(os.path.join(self.programDir, dir))
path = os.path.join(self.programDir, dir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content, ext=".zip", dir=None, zip_name=None):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name + ext)
else:
path = os.path.join(self.programDir, dir, zip_name + ext)
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def create_spark_package(self, artifact_name):
group_id, artifact_id, version = artifact_name.split(":")
self.createTempFile("%s-%s.pom" % (artifact_id, version), ("""
|<?xml version="1.0" encoding="UTF-8"?>
|<project xmlns="http://maven.apache.org/POM/4.0.0"
| xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
| xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
| http://maven.apache.org/xsd/maven-4.0.0.xsd">
| <modelVersion>4.0.0</modelVersion>
| <groupId>%s</groupId>
| <artifactId>%s</artifactId>
| <version>%s</version>
|</project>
""" % (group_id, artifact_id, version)).lstrip(),
os.path.join(group_id, artifact_id, version))
self.createFileInZip("%s.py" % artifact_id, """
|def myfunc(x):
| return x + 1
""", ".jar", os.path.join(group_id, artifact_id, version),
"%s-%s" % (artifact_id, version))
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect())
""")
proc = subprocess.Popen(self.sparkSubmit + [script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
proc = subprocess.Popen(self.sparkSubmit + [script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out.decode('utf-8'))
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen(self.sparkSubmit + ["--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen(self.sparkSubmit + ["--py-files", zip, "--master",
"local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency(self):
"""Submit and test a script with a dependency on a Spark Package"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen(
self.sparkSubmit + ["--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency_on_cluster(self):
"""Submit and test a script with a dependency on a Spark Package on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen(
self.sparkSubmit + ["--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, "--master", "local-cluster[1,1,1024]",
script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
# this will fail if you have different spark.executor.memory
# in conf/spark-defaults.conf
proc = subprocess.Popen(
self.sparkSubmit + ["--master", "local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_user_configuration(self):
"""Make sure user configuration is respected (SPARK-19307)"""
script = self.createTempFile("test.py", """
|from pyspark import SparkConf, SparkContext
|
|conf = SparkConf().set("spark.test_config", "1")
|sc = SparkContext(conf = conf)
|try:
| if sc._conf.get("spark.test_config") != "1":
| raise Exception("Cannot find spark.test_config in SparkContext's conf.")
|finally:
| sc.stop()
""")
proc = subprocess.Popen(
self.sparkSubmit + ["--master", "local", script],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode, msg="Process failed with error:\n {0}".format(out))
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_parallelize_eager_cleanup(self):
with SparkContext() as sc:
temp_files = os.listdir(sc._temp_dir)
rdd = sc.parallelize([0, 1, 2])
post_parallalize_temp_files = os.listdir(sc._temp_dir)
self.assertEqual(temp_files, post_parallalize_temp_files)
def test_set_conf(self):
# This is for an internal use case. When there is an existing SparkContext,
# SparkSession's builder needs to set configs into SparkContext's conf.
sc = SparkContext()
sc._conf.set("spark.test.SPARK16224", "SPARK16224")
self.assertEqual(sc._jsc.sc().conf().get("spark.test.SPARK16224"), "SPARK16224")
sc.stop()
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
class ConfTests(unittest.TestCase):
def test_memory_conf(self):
memoryList = ["1T", "1G", "1M", "1024K"]
for memory in memoryList:
sc = SparkContext(conf=SparkConf().set("spark.python.worker.memory", memory))
l = list(range(1024))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class KeywordOnlyTests(unittest.TestCase):
class Wrapped(object):
@keyword_only
def set(self, x=None, y=None):
if "x" in self._input_kwargs:
self._x = self._input_kwargs["x"]
if "y" in self._input_kwargs:
self._y = self._input_kwargs["y"]
return x, y
def test_keywords(self):
w = self.Wrapped()
x, y = w.set(y=1)
self.assertEqual(y, 1)
self.assertEqual(y, w._y)
self.assertIsNone(x)
self.assertFalse(hasattr(w, "_x"))
def test_non_keywords(self):
w = self.Wrapped()
self.assertRaises(TypeError, lambda: w.set(0, y=1))
def test_kwarg_ownership(self):
# test _input_kwargs is owned by each class instance and not a shared static variable
class Setter(object):
@keyword_only
def set(self, x=None, other=None, other_x=None):
if "other" in self._input_kwargs:
self._input_kwargs["other"].set(x=self._input_kwargs["other_x"])
self._x = self._input_kwargs["x"]
a = Setter()
b = Setter()
a.set(x=1, other=b, other_x=2)
self.assertEqual(a._x, 1)
self.assertEqual(b._x, 2)
class UtilTests(PySparkTestCase):
def test_py4j_exception_message(self):
from pyspark.util import _exception_message
with self.assertRaises(Py4JJavaError) as context:
# This attempts java.lang.String(null) which throws an NPE.
self.sc._jvm.java.lang.String(None)
self.assertTrue('NullPointerException' in _exception_message(context.exception))
def test_parsing_version_string(self):
from pyspark.util import VersionUtils
self.assertRaises(ValueError, lambda: VersionUtils.majorMinorVersion("abced"))
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = list(map(gammaln, x))
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
stats_dict = s.asDict()
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_dict['sum'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['stdev'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['variance'].tolist())
stats_sample_dict = s.asDict(sample=True)
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_sample_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_sample_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_sample_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_sample_dict['sum'].tolist())
self.assertSequenceEqual(
[0.816496580927726, 0.816496580927726], stats_sample_dict['stdev'].tolist())
self.assertSequenceEqual(
[0.6666666666666666, 0.6666666666666666], stats_sample_dict['variance'].tolist())
if __name__ == "__main__":
from pyspark.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'), verbosity=2)
else:
unittest.main(verbosity=2)
|
{
"content_hash": "80f5fe2e15b21eb4eb2b436f969829d4",
"timestamp": "",
"source": "github",
"line_count": 2463,
"max_line_length": 100,
"avg_line_length": 40.92367032074706,
"alnum_prop": 0.5883625179820428,
"repo_name": "rekhajoshm/spark",
"id": "8ac1df52fc59777a06fdbeb36910c61102e3b043",
"size": "101580",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/pyspark/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "35042"
},
{
"name": "Batchfile",
"bytes": "30285"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "23956"
},
{
"name": "Dockerfile",
"bytes": "8266"
},
{
"name": "HTML",
"bytes": "65141"
},
{
"name": "HiveQL",
"bytes": "1823426"
},
{
"name": "Java",
"bytes": "3367711"
},
{
"name": "JavaScript",
"bytes": "144886"
},
{
"name": "Makefile",
"bytes": "9395"
},
{
"name": "PLpgSQL",
"bytes": "163419"
},
{
"name": "PowerShell",
"bytes": "3756"
},
{
"name": "Python",
"bytes": "2742712"
},
{
"name": "R",
"bytes": "1138726"
},
{
"name": "Roff",
"bytes": "20534"
},
{
"name": "SQLPL",
"bytes": "30039"
},
{
"name": "Scala",
"bytes": "27391370"
},
{
"name": "Shell",
"bytes": "191511"
},
{
"name": "Thrift",
"bytes": "33605"
},
{
"name": "q",
"bytes": "146878"
}
],
"symlink_target": ""
}
|
import pytest
from multidl.downloaders.local_file_downloader import LocalFileDownloader
@pytest.mark.parametrize('url, expected', [
('file:///dir/file1.txt', 'file1.txt'),
('file:///file2.txt', 'file2.txt'),
])
def test_get_file_name(tmpdir, url, expected):
downloader = LocalFileDownloader(url, str(tmpdir))
assert downloader.get_file_name() == expected
|
{
"content_hash": "367b61bd82d889609601ac95c64f6ea0",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 73,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.7058823529411765,
"repo_name": "julienc91/multidl",
"id": "1c456f4aeae0fd4829f9f0818a661eb6433b1c8e",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/downloaders/test_local_file_downloader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31002"
}
],
"symlink_target": ""
}
|
import os.path
import sys, imp, getopt
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from model import *
def build():
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
group = Group(id=1, name="All Lights", parent_id=None)
first_user = User(username="Pi",password="pilight")
settings = [Setting(name='city', value='Seattle')]
db.session.add(first_user)
for s in settings:
db.session.add(s)
db.session.add(group)
db.session.commit()
def upgrade():
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('Current database version: ' + str(v))
def migrate():
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + ('/versions/%03d_migration.py' % (v+1))
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec(old_model, tmp_module.__dict__)
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('New migration saved as ' + migration)
print('Current database version: ' + str(v))
def usage():
print "Database management script"
print "Valid arguments:"
print "--build to build the database for the first time"
print "--upgrade to change the database to include new tables"
print "--migrate to move data from one database to the new empty one"
def main(argv):
try:
opts, args = getopt.getopt(argv,"hbum",["help","build","upgrade","migrate"])
except getopt.GetoptError as err:
print str(err)
usage()
exit(2)
for o,a in opts:
if o in ("-h", "--help"):
usage()
sys.exit(0)
elif o in ("-b", "--build"):
build()
elif o in ("-u", "--upgrade"):
upgrade()
elif o in ("-m", "--migrate"):
migrate()
else:
assert False, "Unhandled Option!"
if __name__ == "__main__":
main(sys.argv[1:])
|
{
"content_hash": "b299605d9c23bb12eec74beacbd9e0a3",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 125,
"avg_line_length": 37.041666666666664,
"alnum_prop": 0.6527934008248969,
"repo_name": "rettigs/cs-senior-capstone",
"id": "391d76988a12f30c0d7e91c52c2e74b25e7f7e52",
"size": "2689",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "web/db_management.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "2336"
},
{
"name": "BitBake",
"bytes": "21050"
},
{
"name": "C",
"bytes": "243551"
},
{
"name": "C++",
"bytes": "9235"
},
{
"name": "CSS",
"bytes": "2130"
},
{
"name": "HTML",
"bytes": "15677"
},
{
"name": "JavaScript",
"bytes": "7126"
},
{
"name": "PHP",
"bytes": "1121"
},
{
"name": "Python",
"bytes": "31918"
},
{
"name": "Shell",
"bytes": "988"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django import forms
from django.core.exceptions import ValidationError
from django.db import transaction
from django.utils.text import slugify
from django.utils.translation import (
ugettext,
ugettext_lazy as _,
)
from cms.admin.forms import AddPageForm
from cms.plugin_pool import plugin_pool
from cms.utils import get_current_site, permissions
from cms.utils.page import get_available_slug
from cms.utils.page_permissions import (
user_can_add_page,
user_can_add_subpage,
)
from cms.utils.conf import get_cms_setting
from cms.utils.urlutils import static_with_version
try:
# djangocms_text_ckeditor is not guaranteed to be available
from djangocms_text_ckeditor.widgets import TextEditorWidget
text_widget = TextEditorWidget
except ImportError:
text_widget = forms.Textarea
class SlugWidget(forms.widgets.TextInput):
"""
Special widget for the slug field that requires Title field to be there.
Adds the js for the slugifying.
"""
class Media:
js = (
'admin/js/urlify.js',
static_with_version('cms/js/dist/bundle.forms.slugwidget.min.js'),
)
class CreateCMSPageForm(AddPageForm):
page = None
sub_page_form = False
# Field overrides
menu_title = None
page_title = None
meta_description = None
content = forms.CharField(
label=_(u'Content'), widget=text_widget, required=False,
help_text=_(u"Optional. If supplied, will be automatically added "
u"within a new text plugin.")
)
class Media:
js = (
# This simply adds some JS for
# hiding/showing the content field based on the selection of this select.
'cms/js/widgets/wizard.pagetypeselect.js',
)
def __init__(self, *args, **kwargs):
self._site = get_current_site()
self._user = self.user
self._language = self.language_code
super(CreateCMSPageForm, self).__init__(*args, **kwargs)
self.fields['title'].help_text = _(u"Provide a title for the new page.")
self.fields['slug'].required = False
self.fields['slug'].widget = SlugWidget()
self.fields['slug'].help_text = _(u"Leave empty for automatic slug, or override as required.")
@staticmethod
def get_placeholder(page, slot=None):
"""
Returns the named placeholder or, if no «slot» provided, the first
editable, non-static placeholder or None.
"""
placeholders = page.get_placeholders()
if slot:
placeholders = placeholders.filter(slot=slot)
for ph in placeholders:
if not ph.is_static and ph.is_editable:
return ph
return None
def clean(self):
"""
Validates that either the slug is provided, or that slugification from
`title` produces a valid slug.
:return:
"""
data = self.cleaned_data
if self._errors:
return data
slug = data.get('slug') or slugify(data['title'])
parent_node = data.get('parent_node')
if parent_node:
base = parent_node.item.get_path(self._language)
path = u'%s/%s' % (base, slug) if base else slug
else:
base = ''
path = slug
data['slug'] = get_available_slug(self._site, path, self._language, suffix=None)
data['path'] = '%s/%s' % (base, data['slug']) if base else data['slug']
if not data['slug']:
raise forms.ValidationError("Please provide a valid slug.")
return data
def clean_parent_node(self):
# Check to see if this user has permissions to make this page. We've
# already checked this when producing a list of wizard entries, but this
# is to prevent people from possible form-hacking.
if self.page and self.sub_page_form:
# User is adding a page which will be a direct
# child of the current page.
parent_page = self.page
elif self.page and self.page.parent_page:
# User is adding a page which will be a right
# sibling to the current page.
parent_page = self.page.parent_page
else:
parent_page = None
if parent_page:
has_perm = user_can_add_subpage(self.user, target=parent_page)
else:
has_perm = user_can_add_page(self.user)
if not has_perm:
message = ugettext('You don\'t have the permissions required to add a page.')
raise ValidationError(message)
return parent_page.node if parent_page else None
def clean_slug(self):
# Don't let the PageAddForm validate this
# on the wizard it is not a required field
return self.cleaned_data['slug']
def get_template(self):
return get_cms_setting('PAGE_WIZARD_DEFAULT_TEMPLATE')
@transaction.atomic
def save(self, **kwargs):
from cms.api import add_plugin
new_page = super(CreateCMSPageForm, self).save(**kwargs)
if self.cleaned_data.get("page_type"):
return new_page
parent_node = self.cleaned_data.get('parent_node')
if parent_node and new_page.parent_page.is_page_type:
# the new page was created under a page-type page
# set the new page as a page-type too
new_page.update(
draft_only=True,
is_page_type=True,
in_navigation=False,
)
# If the user provided content, then use that instead.
content = self.cleaned_data.get('content')
plugin_type = get_cms_setting('PAGE_WIZARD_CONTENT_PLUGIN')
plugin_body = get_cms_setting('PAGE_WIZARD_CONTENT_PLUGIN_BODY')
slot = get_cms_setting('PAGE_WIZARD_CONTENT_PLACEHOLDER')
if plugin_type in plugin_pool.plugins and plugin_body:
if content and permissions.has_plugin_permission(
self.user, plugin_type, "add"):
new_page.rescan_placeholders()
placeholder = self.get_placeholder(new_page, slot=slot)
if placeholder:
opts = {
'placeholder': placeholder,
'plugin_type': plugin_type,
'language': self.language_code,
plugin_body: content,
}
add_plugin(**opts)
return new_page
class CreateCMSSubPageForm(CreateCMSPageForm):
sub_page_form = True
|
{
"content_hash": "c1da4a1f274486bc9cf8febc96d3f4d4",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 102,
"avg_line_length": 33.515151515151516,
"alnum_prop": 0.6050331525015069,
"repo_name": "benzkji/django-cms",
"id": "77c76ed74673dca9ae8fcc72b8eb69e07568ba32",
"size": "6663",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "cms/forms/wizards.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "132972"
},
{
"name": "HTML",
"bytes": "201324"
},
{
"name": "JavaScript",
"bytes": "1238070"
},
{
"name": "Python",
"bytes": "2356866"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
}
|
import pytest
asdf = pytest.importorskip('asdf')
import datetime
import numpy as np
from astropy import time
from asdf import AsdfFile, yamlutil, tagged
from asdf.tests import helpers
import asdf.schema as asdf_schema
def _flatten_combiners(schema):
newschema = dict()
def add_entry(path, schema, combiner):
# TODO: Simplify?
cursor = newschema
for i in range(len(path)):
part = path[i]
if isinstance(part, int):
cursor = cursor.setdefault('items', [])
while len(cursor) <= part:
cursor.append({})
cursor = cursor[part]
elif part == 'items':
cursor = cursor.setdefault('items', dict())
else:
cursor = cursor.setdefault('properties', dict())
if i < len(path) - 1 and isinstance(path[i+1], int):
cursor = cursor.setdefault(part, [])
else:
cursor = cursor.setdefault(part, dict())
cursor.update(schema)
def test_time(tmpdir):
time_array = time.Time(
np.arange(100), format="unix")
tree = {
'large_time_array': time_array
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location(tmpdir):
# See https://github.com/spacetelescope/asdf/issues/341
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=[1,2]*u.m, y=[3,4]*u.m, z=[5,6]*u.m)
t = time.Time([1,2], location=location, format='cxcsec')
tree = {'time': t}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location_1_0_0(tmpdir):
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=6378100*u.m, y=0*u.m, z=0*u.m)
t = time.Time('J2000.000', location=location, format='jyear_str')
tree = {'time': t}
# The version refers to ASDF Standard 1.0.0, which includes time-1.0.0
helpers.assert_roundtrip_tree(tree, tmpdir, init_options={"version": "1.0.0"})
def test_isot(tmpdir):
isot = time.Time('2000-01-01T00:00:00.000')
tree = {
'time': isot
}
helpers.assert_roundtrip_tree(tree, tmpdir)
ff = asdf.AsdfFile(tree)
tree = yamlutil.custom_tree_to_tagged_tree(ff.tree, ff)
if isinstance(tree['time'], str):
assert str(tree['time']) == isot.value
elif isinstance(tree['time'], dict):
assert str(tree['time']['value']) == isot.value
assert str(tree['time']['base_format']) == "isot"
else:
assert False
def test_isot_array(tmpdir):
tree = {
'time': time.Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02'])
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_tag():
schema = asdf_schema.load_schema(
'http://stsci.edu/schemas/asdf/time/time-1.1.0',
resolve_references=True)
schema = _flatten_combiners(schema)
date = time.Time(datetime.datetime.now())
tree = {'date': date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree['date'], asdf)
asdf_schema.validate(instance, schema=schema)
tag = 'tag:stsci.edu:asdf/time/time-1.1.0'
date = tagged.tag_object(tag, date)
tree = {'date': date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree['date'], asdf)
asdf_schema.validate(instance, schema=schema)
|
{
"content_hash": "d19703f5e8bae5050d6926409d71f0c0",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 82,
"avg_line_length": 27.328125,
"alnum_prop": 0.6106346483704974,
"repo_name": "StuartLittlefair/astropy",
"id": "1709994b4acb0946c515907d48d97299300f10dc",
"size": "3563",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/io/misc/asdf/tags/time/tests/test_time.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11034753"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78631"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52457"
},
{
"name": "Python",
"bytes": "12224600"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
from RawInstreamFile import RawInstreamFile
from MidiFileParser import MidiFileParser
class MidiInFile:
"""
Parses a midi file, and triggers the midi events on the outStream
object.
Get example data from a minimal midi file, generated with cubase.
>>> test_file = 'C:/Documents and Settings/maxm/Desktop/temp/midi/src/midi/tests/midifiles/minimal-cubase-type0.mid'
Do parsing, and generate events with MidiToText,
so we can see what a minimal midi file contains
>>> from MidiToText import MidiToText
>>> midi_in = MidiInFile(MidiToText(), test_file)
>>> midi_in.read()
format: 0, nTracks: 1, division: 480
----------------------------------
<BLANKLINE>
Start - track #0
sequence_name: Type 0
tempo: 500000
time_signature: 4 2 24 8
note_on - ch:00, note:48, vel:64 time:0
note_off - ch:00, note:48, vel:40 time:480
End of track
<BLANKLINE>
End of file
"""
def __init__(self, outStream, infile):
# these could also have been mixins, would that be better? Nah!
self.raw_in = RawInstreamFile(infile)
self.parser = MidiFileParser(self.raw_in, outStream)
def read(self):
"Start parsing the file"
p = self.parser
p.parseMThdChunk()
p.parseMTrkChunks()
def setData(self, data=''):
"Sets the data from a plain string"
self.raw_in.setData(data)
|
{
"content_hash": "17e7681a1caab1905690e62faf9d4aab",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 120,
"avg_line_length": 27.547169811320753,
"alnum_prop": 0.6198630136986302,
"repo_name": "fretsonfire/fof-python",
"id": "88f5a7be94584e5df2246e45203c8d5208d5a250",
"size": "1490",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "src/midi/MidiInFile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "687720"
},
{
"name": "Shell",
"bytes": "673"
}
],
"symlink_target": ""
}
|
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_csc', [dirname(__file__)])
except ImportError:
import _csc
return _csc
if fp is not None:
try:
_mod = imp.load_module('_csc', fp, pathname, description)
finally:
fp.close()
return _mod
_csc = swig_import_helper()
del swig_import_helper
else:
import _csc
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def csc_matmat_pass1(*args):
"""
csc_matmat_pass1(int n_row, int n_col, int Ap, int Ai, int Bp, int Bi,
int Cp)
"""
return _csc.csc_matmat_pass1(*args)
# This file is compatible with both classic and new-style classes.
def csc_diagonal(*args):
"""
csc_diagonal(int n_row, int n_col, int Ap, int Aj, signed char Ax,
signed char Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
unsigned char Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, short Ax, short Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
unsigned short Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, int Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
unsigned int Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, long long Ax,
long long Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
unsigned long long Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, float Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, double Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, long double Ax,
long double Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
npy_cfloat_wrapper Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
npy_cdouble_wrapper Yx)
csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
npy_clongdouble_wrapper Yx)
"""
return _csc.csc_diagonal(*args)
def csc_tocsr(*args):
"""
csc_tocsr(int n_row, int n_col, int Ap, int Ai, signed char Ax,
int Bp, int Bj, signed char Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
int Bp, int Bj, unsigned char Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
int Bj, short Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
int Bp, int Bj, unsigned short Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
int Bj, int Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
int Bp, int Bj, unsigned int Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, long long Ax,
int Bp, int Bj, long long Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
int Bp, int Bj, unsigned long long Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
int Bj, float Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
int Bj, double Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, long double Ax,
int Bp, int Bj, long double Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
int Bp, int Bj, npy_cfloat_wrapper Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
int Bp, int Bj, npy_cdouble_wrapper Bx)
csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
int Bp, int Bj, npy_clongdouble_wrapper Bx)
"""
return _csc.csc_tocsr(*args)
def csc_matmat_pass2(*args):
"""
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, signed char Ax,
int Bp, int Bi, signed char Bx, int Cp, int Ci,
signed char Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
int Bp, int Bi, unsigned char Bx, int Cp,
int Ci, unsigned char Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
int Bi, short Bx, int Cp, int Ci, short Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
int Bp, int Bi, unsigned short Bx, int Cp,
int Ci, unsigned short Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
int Bi, int Bx, int Cp, int Ci, int Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
int Bp, int Bi, unsigned int Bx, int Cp,
int Ci, unsigned int Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, long long Ax,
int Bp, int Bi, long long Bx, int Cp, int Ci,
long long Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
int Bp, int Bi, unsigned long long Bx,
int Cp, int Ci, unsigned long long Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
int Bi, float Bx, int Cp, int Ci, float Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
int Bi, double Bx, int Cp, int Ci, double Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, long double Ax,
int Bp, int Bi, long double Bx, int Cp, int Ci,
long double Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
int Bp, int Bi, npy_cfloat_wrapper Bx,
int Cp, int Ci, npy_cfloat_wrapper Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
int Bp, int Bi, npy_cdouble_wrapper Bx,
int Cp, int Ci, npy_cdouble_wrapper Cx)
csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
int Bp, int Bi, npy_clongdouble_wrapper Bx,
int Cp, int Ci, npy_clongdouble_wrapper Cx)
"""
return _csc.csc_matmat_pass2(*args)
def csc_matvec(*args):
"""
csc_matvec(int n_row, int n_col, int Ap, int Ai, signed char Ax,
signed char Xx, signed char Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
unsigned char Xx, unsigned char Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, short Ax, short Xx,
short Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
unsigned short Xx, unsigned short Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, int Ax, int Xx,
int Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
unsigned int Xx, unsigned int Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, long long Ax,
long long Xx, long long Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
unsigned long long Xx, unsigned long long Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, float Ax, float Xx,
float Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, double Ax, double Xx,
double Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, long double Ax,
long double Xx, long double Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
npy_cfloat_wrapper Xx, npy_cfloat_wrapper Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
npy_cdouble_wrapper Xx, npy_cdouble_wrapper Yx)
csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
npy_clongdouble_wrapper Xx, npy_clongdouble_wrapper Yx)
"""
return _csc.csc_matvec(*args)
def csc_matvecs(*args):
"""
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, signed char Ax,
signed char Xx, signed char Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned char Ax,
unsigned char Xx, unsigned char Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, short Ax,
short Xx, short Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned short Ax,
unsigned short Xx, unsigned short Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, int Ax,
int Xx, int Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned int Ax,
unsigned int Xx, unsigned int Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, long long Ax,
long long Xx, long long Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, unsigned long long Ax,
unsigned long long Xx,
unsigned long long Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, float Ax,
float Xx, float Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, double Ax,
double Xx, double Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, long double Ax,
long double Xx, long double Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, npy_cfloat_wrapper Ax,
npy_cfloat_wrapper Xx,
npy_cfloat_wrapper Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, npy_cdouble_wrapper Ax,
npy_cdouble_wrapper Xx,
npy_cdouble_wrapper Yx)
csc_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Ai, npy_clongdouble_wrapper Ax,
npy_clongdouble_wrapper Xx,
npy_clongdouble_wrapper Yx)
"""
return _csc.csc_matvecs(*args)
def csc_elmul_csc(*args):
"""
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax,
int Bp, int Bi, signed char Bx, int Cp, int Ci,
signed char Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
int Bp, int Bi, unsigned char Bx, int Cp,
int Ci, unsigned char Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
int Bi, short Bx, int Cp, int Ci, short Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
int Bp, int Bi, unsigned short Bx, int Cp,
int Ci, unsigned short Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
int Bi, int Bx, int Cp, int Ci, int Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
int Bp, int Bi, unsigned int Bx, int Cp,
int Ci, unsigned int Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, long long Ax,
int Bp, int Bi, long long Bx, int Cp, int Ci,
long long Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
int Bp, int Bi, unsigned long long Bx,
int Cp, int Ci, unsigned long long Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
int Bi, float Bx, int Cp, int Ci, float Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
int Bi, double Bx, int Cp, int Ci, double Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, long double Ax,
int Bp, int Bi, long double Bx, int Cp, int Ci,
long double Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
int Bp, int Bi, npy_cfloat_wrapper Bx,
int Cp, int Ci, npy_cfloat_wrapper Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
int Bp, int Bi, npy_cdouble_wrapper Bx,
int Cp, int Ci, npy_cdouble_wrapper Cx)
csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
int Bp, int Bi, npy_clongdouble_wrapper Bx,
int Cp, int Ci, npy_clongdouble_wrapper Cx)
"""
return _csc.csc_elmul_csc(*args)
def csc_eldiv_csc(*args):
"""
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax,
int Bp, int Bi, signed char Bx, int Cp, int Ci,
signed char Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
int Bp, int Bi, unsigned char Bx, int Cp,
int Ci, unsigned char Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
int Bi, short Bx, int Cp, int Ci, short Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
int Bp, int Bi, unsigned short Bx, int Cp,
int Ci, unsigned short Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
int Bi, int Bx, int Cp, int Ci, int Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
int Bp, int Bi, unsigned int Bx, int Cp,
int Ci, unsigned int Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, long long Ax,
int Bp, int Bi, long long Bx, int Cp, int Ci,
long long Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
int Bp, int Bi, unsigned long long Bx,
int Cp, int Ci, unsigned long long Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
int Bi, float Bx, int Cp, int Ci, float Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
int Bi, double Bx, int Cp, int Ci, double Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, long double Ax,
int Bp, int Bi, long double Bx, int Cp, int Ci,
long double Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
int Bp, int Bi, npy_cfloat_wrapper Bx,
int Cp, int Ci, npy_cfloat_wrapper Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
int Bp, int Bi, npy_cdouble_wrapper Bx,
int Cp, int Ci, npy_cdouble_wrapper Cx)
csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
int Bp, int Bi, npy_clongdouble_wrapper Bx,
int Cp, int Ci, npy_clongdouble_wrapper Cx)
"""
return _csc.csc_eldiv_csc(*args)
def csc_plus_csc(*args):
"""
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax,
int Bp, int Bi, signed char Bx, int Cp, int Ci,
signed char Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
int Bp, int Bi, unsigned char Bx, int Cp,
int Ci, unsigned char Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
int Bi, short Bx, int Cp, int Ci, short Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
int Bp, int Bi, unsigned short Bx, int Cp,
int Ci, unsigned short Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
int Bi, int Bx, int Cp, int Ci, int Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
int Bp, int Bi, unsigned int Bx, int Cp,
int Ci, unsigned int Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, long long Ax,
int Bp, int Bi, long long Bx, int Cp, int Ci,
long long Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
int Bp, int Bi, unsigned long long Bx,
int Cp, int Ci, unsigned long long Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
int Bi, float Bx, int Cp, int Ci, float Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
int Bi, double Bx, int Cp, int Ci, double Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, long double Ax,
int Bp, int Bi, long double Bx, int Cp, int Ci,
long double Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
int Bp, int Bi, npy_cfloat_wrapper Bx,
int Cp, int Ci, npy_cfloat_wrapper Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
int Bp, int Bi, npy_cdouble_wrapper Bx,
int Cp, int Ci, npy_cdouble_wrapper Cx)
csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
int Bp, int Bi, npy_clongdouble_wrapper Bx,
int Cp, int Ci, npy_clongdouble_wrapper Cx)
"""
return _csc.csc_plus_csc(*args)
def csc_minus_csc(*args):
"""
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax,
int Bp, int Bi, signed char Bx, int Cp, int Ci,
signed char Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
int Bp, int Bi, unsigned char Bx, int Cp,
int Ci, unsigned char Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
int Bi, short Bx, int Cp, int Ci, short Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
int Bp, int Bi, unsigned short Bx, int Cp,
int Ci, unsigned short Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
int Bi, int Bx, int Cp, int Ci, int Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
int Bp, int Bi, unsigned int Bx, int Cp,
int Ci, unsigned int Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, long long Ax,
int Bp, int Bi, long long Bx, int Cp, int Ci,
long long Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
int Bp, int Bi, unsigned long long Bx,
int Cp, int Ci, unsigned long long Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
int Bi, float Bx, int Cp, int Ci, float Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
int Bi, double Bx, int Cp, int Ci, double Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, long double Ax,
int Bp, int Bi, long double Bx, int Cp, int Ci,
long double Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
int Bp, int Bi, npy_cfloat_wrapper Bx,
int Cp, int Ci, npy_cfloat_wrapper Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
int Bp, int Bi, npy_cdouble_wrapper Bx,
int Cp, int Ci, npy_cdouble_wrapper Cx)
csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
int Bp, int Bi, npy_clongdouble_wrapper Bx,
int Cp, int Ci, npy_clongdouble_wrapper Cx)
"""
return _csc.csc_minus_csc(*args)
|
{
"content_hash": "17cf20587ac338e1e9ca302aca1af1a8",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 94,
"avg_line_length": 47.49760765550239,
"alnum_prop": 0.6119673617407072,
"repo_name": "teoliphant/scipy",
"id": "a1ed0645828af5e22c7e785706d60f27d1b4fa62",
"size": "20059",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "scipy/sparse/sparsetools/csc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11530901"
},
{
"name": "C++",
"bytes": "7695320"
},
{
"name": "FORTRAN",
"bytes": "5898903"
},
{
"name": "Matlab",
"bytes": "1861"
},
{
"name": "Objective-C",
"bytes": "137083"
},
{
"name": "Python",
"bytes": "5863600"
},
{
"name": "Shell",
"bytes": "1793"
}
],
"symlink_target": ""
}
|
"""Support for LCN switches."""
import pypck
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import CONF_ADDRESS
from . import LcnDevice
from .const import CONF_CONNECTIONS, CONF_OUTPUT, DATA_LCN, OUTPUT_PORTS
from .helpers import get_connection
async def async_setup_platform(
hass, hass_config, async_add_entities, discovery_info=None
):
"""Set up the LCN switch platform."""
if discovery_info is None:
return
devices = []
for config in discovery_info:
address, connection_id = config[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*address)
connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
connection = get_connection(connections, connection_id)
address_connection = connection.get_address_conn(addr)
if config[CONF_OUTPUT] in OUTPUT_PORTS:
device = LcnOutputSwitch(config, address_connection)
else: # in RELAY_PORTS
device = LcnRelaySwitch(config, address_connection)
devices.append(device)
async_add_entities(devices)
class LcnOutputSwitch(LcnDevice, SwitchDevice):
"""Representation of a LCN switch for output ports."""
def __init__(self, config, address_connection):
"""Initialize the LCN switch."""
super().__init__(config, address_connection)
self.output = pypck.lcn_defs.OutputPort[config[CONF_OUTPUT]]
self._is_on = None
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.address_connection.activate_status_request_handler(self.output)
@property
def is_on(self):
"""Return True if entity is on."""
return self._is_on
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
self._is_on = True
self.address_connection.dim_output(self.output.value, 100, 0)
await self.async_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
self._is_on = False
self.address_connection.dim_output(self.output.value, 0, 0)
await self.async_update_ha_state()
def input_received(self, input_obj):
"""Set switch state when LCN input object (command) is received."""
if (
not isinstance(input_obj, pypck.inputs.ModStatusOutput)
or input_obj.get_output_id() != self.output.value
):
return
self._is_on = input_obj.get_percent() > 0
self.async_schedule_update_ha_state()
class LcnRelaySwitch(LcnDevice, SwitchDevice):
"""Representation of a LCN switch for relay ports."""
def __init__(self, config, address_connection):
"""Initialize the LCN switch."""
super().__init__(config, address_connection)
self.output = pypck.lcn_defs.RelayPort[config[CONF_OUTPUT]]
self._is_on = None
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.address_connection.activate_status_request_handler(self.output)
@property
def is_on(self):
"""Return True if entity is on."""
return self._is_on
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
self._is_on = True
states = [pypck.lcn_defs.RelayStateModifier.NOCHANGE] * 8
states[self.output.value] = pypck.lcn_defs.RelayStateModifier.ON
self.address_connection.control_relays(states)
await self.async_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
self._is_on = False
states = [pypck.lcn_defs.RelayStateModifier.NOCHANGE] * 8
states[self.output.value] = pypck.lcn_defs.RelayStateModifier.OFF
self.address_connection.control_relays(states)
await self.async_update_ha_state()
def input_received(self, input_obj):
"""Set switch state when LCN input object (command) is received."""
if not isinstance(input_obj, pypck.inputs.ModStatusRelays):
return
self._is_on = input_obj.get_state(self.output.value)
self.async_schedule_update_ha_state()
|
{
"content_hash": "9f56f6977905808c9cede55fee686f91",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 82,
"avg_line_length": 33.78740157480315,
"alnum_prop": 0.6439058494523421,
"repo_name": "Teagan42/home-assistant",
"id": "f19548c4aee235ab61bcfd9a615bb0beec9e9a31",
"size": "4291",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/lcn/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
"""distutils.msvc9compiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio 2008.
The module is compatible with VS 2005 and VS 2008. You can find legacy support
for older versions of VS in distutils.msvccompiler.
"""
# Written by Perry Stoll
# hacked by Robin Becker and Thomas Heller to do a better job of
# finding DevStudio (through the registry)
# ported to VS2005 and VS 2008 by Christian Heimes
import os
import subprocess
import sys
import re
import warnings
from .errors import (
DistutilsExecError,
DistutilsPlatformError,
CompileError,
LibError,
LinkError,
)
from .ccompiler import CCompiler, gen_lib_options
from ._log import log
from .util import get_platform
import winreg
warnings.warn(
"msvc9compiler is deprecated and slated to be removed "
"in the future. Please discontinue use or file an issue "
"with pypa/distutils describing your use case.",
DeprecationWarning,
)
RegOpenKeyEx = winreg.OpenKeyEx
RegEnumKey = winreg.EnumKey
RegEnumValue = winreg.EnumValue
RegError = winreg.error
HKEYS = (
winreg.HKEY_USERS,
winreg.HKEY_CURRENT_USER,
winreg.HKEY_LOCAL_MACHINE,
winreg.HKEY_CLASSES_ROOT,
)
NATIVE_WIN64 = sys.platform == 'win32' and sys.maxsize > 2**32
if NATIVE_WIN64:
# Visual C++ is a 32-bit application, so we need to look in
# the corresponding registry branch, if we're running a
# 64-bit Python on Win64
VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
else:
VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Microsoft\.NETFramework"
# A map keyed by get_platform() return values to values accepted by
# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is
# the param to cross-compile on x86 targeting amd64.)
PLAT_TO_VCVARS = {
'win32': 'x86',
'win-amd64': 'amd64',
}
class Reg:
"""Helper class to read values from the registry"""
def get_value(cls, path, key):
for base in HKEYS:
d = cls.read_values(base, path)
if d and key in d:
return d[key]
raise KeyError(key)
get_value = classmethod(get_value)
def read_keys(cls, base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while True:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i += 1
return L
read_keys = classmethod(read_keys)
def read_values(cls, base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while True:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
i += 1
return d
read_values = classmethod(read_values)
def convert_mbcs(s):
dec = getattr(s, "decode", None)
if dec is not None:
try:
s = dec("mbcs")
except UnicodeError:
pass
return s
convert_mbcs = staticmethod(convert_mbcs)
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.vsbase = VS_BASE % version
self.load_macros(version)
def set_macro(self, macro, path, key):
self.macros["$(%s)" % macro] = Reg.get_value(path, key)
def load_macros(self, version):
self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
self.set_macro("FrameworkDir", NET_BASE, "installroot")
try:
if version >= 8.0:
self.set_macro("FrameworkSDKDir", NET_BASE, "sdkinstallrootv2.0")
else:
raise KeyError("sdkinstallrootv2.0")
except KeyError:
raise DistutilsPlatformError(
"""Python was built with Visual Studio 2008;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2008 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py."""
)
if version >= 9.0:
self.set_macro("FrameworkVersion", self.vsbase, "clr version")
self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
else:
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = Reg.get_value(base, r"{}\{}".format(p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = s.replace(k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
if majorVersion >= 13:
# v13 was skipped and should be v14
majorVersion += 1
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
def removeDuplicates(variable):
"""Remove duplicate values of an environment variable."""
oldList = variable.split(os.pathsep)
newList = []
for i in oldList:
if i not in newList:
newList.append(i)
newVariable = os.pathsep.join(newList)
return newVariable
def find_vcvarsall(version):
"""Find the vcvarsall.bat file
At first it tries to find the productdir of VS 2008 in the registry. If
that fails it falls back to the VS90COMNTOOLS env var.
"""
vsbase = VS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase, "productdir")
except KeyError:
log.debug("Unable to find productdir in registry")
productdir = None
if not productdir or not os.path.isdir(productdir):
toolskey = "VS%0.f0COMNTOOLS" % version
toolsdir = os.environ.get(toolskey, None)
if toolsdir and os.path.isdir(toolsdir):
productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
productdir = os.path.abspath(productdir)
if not os.path.isdir(productdir):
log.debug("%s is not a valid directory" % productdir)
return None
else:
log.debug("Env var %s is not set or invalid" % toolskey)
if not productdir:
log.debug("No productdir found")
return None
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
log.debug("Unable to find vcvarsall.bat")
return None
def query_vcvarsall(version, arch="x86"):
"""Launch vcvarsall.bat and read the settings from its environment"""
vcvarsall = find_vcvarsall(version)
interesting = {"include", "lib", "libpath", "path"}
result = {}
if vcvarsall is None:
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
popen = subprocess.Popen(
'"{}" {} & set'.format(vcvarsall, arch),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
try:
stdout, stderr = popen.communicate()
if popen.wait() != 0:
raise DistutilsPlatformError(stderr.decode("mbcs"))
stdout = stdout.decode("mbcs")
for line in stdout.split("\n"):
line = Reg.convert_mbcs(line)
if '=' not in line:
continue
line = line.strip()
key, value = line.split('=', 1)
key = key.lower()
if key in interesting:
if value.endswith(os.pathsep):
value = value[:-1]
result[key] = removeDuplicates(value)
finally:
popen.stdout.close()
popen.stderr.close()
if len(result) != len(interesting):
raise ValueError(str(list(result.keys())))
return result
# More globals
VERSION = get_build_version()
# MACROS = MacroExpander(VERSION)
class MSVCCompiler(CCompiler):
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = _c_extensions + _cpp_extensions + _rc_extensions + _mc_extensions
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
super().__init__(verbose, dry_run, force)
self.__version = VERSION
self.__root = r"Software\Microsoft\VisualStudio"
# self.__macros = MACROS
self.__paths = []
# target platform (.plat_name is consistent with 'bdist')
self.plat_name = None
self.__arch = None # deprecated name
self.initialized = False
def initialize(self, plat_name=None): # noqa: C901
# multi-init means we would need to check platform same each time...
assert not self.initialized, "don't init multiple times"
if self.__version < 8.0:
raise DistutilsPlatformError(
"VC %0.1f is not supported by this module" % self.__version
)
if plat_name is None:
plat_name = get_platform()
# sanity check for platforms to prevent obscure errors later.
ok_plats = 'win32', 'win-amd64'
if plat_name not in ok_plats:
raise DistutilsPlatformError(
"--plat-name must be one of {}".format(ok_plats)
)
if (
"DISTUTILS_USE_SDK" in os.environ
and "MSSdk" in os.environ
and self.find_exe("cl.exe")
):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
# On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
# to cross compile, you use 'x86_amd64'.
# On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
# compile use 'x86' (ie, it runs the x86 compiler directly)
if plat_name == get_platform() or plat_name == 'win32':
# native build or cross-compile to win32
plat_spec = PLAT_TO_VCVARS[plat_name]
else:
# cross compile from win32 -> some 64bit
plat_spec = (
PLAT_TO_VCVARS[get_platform()] + '_' + PLAT_TO_VCVARS[plat_name]
)
vc_env = query_vcvarsall(VERSION, plat_spec)
self.__paths = vc_env['path'].split(os.pathsep)
os.environ['lib'] = vc_env['lib']
os.environ['include'] = vc_env['include']
if len(self.__paths) == 0:
raise DistutilsPlatformError(
"Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed." % self.__product
)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
# self.set_path_env_var('lib')
# self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in os.environ['path'].split(';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = ";".join(self.__paths)
self.preprocess_options = None
if self.__arch == "x86":
self.compile_options = ['/nologo', '/O2', '/MD', '/W3', '/DNDEBUG']
self.compile_options_debug = [
'/nologo',
'/Od',
'/MDd',
'/W3',
'/Z7',
'/D_DEBUG',
]
else:
# Win64
self.compile_options = ['/nologo', '/O2', '/MD', '/W3', '/GS-', '/DNDEBUG']
self.compile_options_debug = [
'/nologo',
'/Od',
'/MDd',
'/W3',
'/GS-',
'/Z7',
'/D_DEBUG',
]
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = ['/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG']
self.ldflags_static = ['/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext(src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base) :] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename(base)
if ext in self._rc_extensions:
obj_names.append(os.path.join(output_dir, base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append(os.path.join(output_dir, base + self.res_extension))
else:
obj_names.append(os.path.join(output_dir, base + self.obj_extension))
return obj_names
def compile( # noqa: C901
self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None,
):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs
)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts + [output_opt] + [input_opt])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc] + ['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext(os.path.basename(src))
rc_file = os.path.join(rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc] + ["/fo" + obj] + [rc_file])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError(
"Don't know how to compile {} to {}".format(src, obj)
)
output_opt = "/Fo" + obj
try:
self.spawn(
[self.cc]
+ compile_opts
+ pp_opts
+ [input_opt, output_opt]
+ extra_postargs
)
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
def create_static_lib(
self, objects, output_libname, output_dir=None, debug=0, target_lang=None
):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn([self.lib] + lib_args)
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link( # noqa: C901
self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None,
):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
(libraries, library_dirs, runtime_library_dirs) = fixed_args
if runtime_library_dirs:
self.warn(
"I don't know what to do with 'runtime_library_dirs': "
+ str(runtime_library_dirs)
)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in export_symbols or []:
export_opts.append("/EXPORT:" + sym)
ld_args = (
ldflags + lib_opts + export_opts + objects + ['/OUT:' + output_filename]
)
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
build_temp = os.path.dirname(objects[0])
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename)
)
implib_file = os.path.join(build_temp, self.library_filename(dll_name))
ld_args.append('/IMPLIB:' + implib_file)
self.manifest_setup_ldargs(output_filename, build_temp, ld_args)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn([self.linker] + ld_args)
except DistutilsExecError as msg:
raise LinkError(msg)
# embed the manifest
# XXX - this is somewhat fragile - if mt.exe fails, distutils
# will still consider the DLL up-to-date, but it will not have a
# manifest. Maybe we should link to a temp file? OTOH, that
# implies a build environment error that shouldn't go undetected.
mfinfo = self.manifest_get_embed_info(target_desc, ld_args)
if mfinfo is not None:
mffilename, mfid = mfinfo
out_arg = '-outputresource:{};{}'.format(output_filename, mfid)
try:
self.spawn(['mt.exe', '-nologo', '-manifest', mffilename, out_arg])
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
# If we need a manifest at all, an embedded manifest is recommended.
# See MSDN article titled
# "How to: Embed a Manifest Inside a C/C++ Application"
# (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
# Ask the linker to generate the manifest in the temp dir, so
# we can check it, and possibly embed it, later.
temp_manifest = os.path.join(
build_temp, os.path.basename(output_filename) + ".manifest"
)
ld_args.append('/MANIFESTFILE:' + temp_manifest)
def manifest_get_embed_info(self, target_desc, ld_args):
# If a manifest should be embedded, return a tuple of
# (manifest_filename, resource_id). Returns None if no manifest
# should be embedded. See http://bugs.python.org/issue7833 for why
# we want to avoid any manifest for extension modules if we can)
for arg in ld_args:
if arg.startswith("/MANIFESTFILE:"):
temp_manifest = arg.split(":", 1)[1]
break
else:
# no /MANIFESTFILE so nothing to do.
return None
if target_desc == CCompiler.EXECUTABLE:
# by default, executables always get the manifest with the
# CRT referenced.
mfid = 1
else:
# Extension modules try and avoid any manifest if possible.
mfid = 2
temp_manifest = self._remove_visual_c_ref(temp_manifest)
if temp_manifest is None:
return None
return temp_manifest, mfid
def _remove_visual_c_ref(self, manifest_file):
try:
# Remove references to the Visual C runtime, so they will
# fall through to the Visual C dependency of Python.exe.
# This way, when installed for a restricted user (e.g.
# runtimes are not in WinSxS folder, but in Python's own
# folder), the runtimes do not need to be in every folder
# with .pyd's.
# Returns either the filename of the modified manifest or
# None if no manifest should be embedded.
manifest_f = open(manifest_file)
try:
manifest_buf = manifest_f.read()
finally:
manifest_f.close()
pattern = re.compile(
r"""<assemblyIdentity.*?name=("|')Microsoft\."""
r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
re.DOTALL,
)
manifest_buf = re.sub(pattern, "", manifest_buf)
pattern = r"<dependentAssembly>\s*</dependentAssembly>"
manifest_buf = re.sub(pattern, "", manifest_buf)
# Now see if any other assemblies are referenced - if not, we
# don't want a manifest embedded.
pattern = re.compile(
r"""<assemblyIdentity.*?name=(?:"|')(.+?)(?:"|')"""
r""".*?(?:/>|</assemblyIdentity>)""",
re.DOTALL,
)
if re.search(pattern, manifest_buf) is None:
return None
manifest_f = open(manifest_file, 'w')
try:
manifest_f.write(manifest_buf)
return manifest_file
finally:
manifest_f.close()
except OSError:
pass
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC++"
)
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in os.environ['Path'].split(';'):
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
return exe
|
{
"content_hash": "572ed70c0c71d1cd05246ec460f51a64",
"timestamp": "",
"source": "github",
"line_count": 832,
"max_line_length": 88,
"avg_line_length": 36.30288461538461,
"alnum_prop": 0.5547278506158124,
"repo_name": "pypa/setuptools",
"id": "a4714a559dcb695c996cadbac8be71ec9ccb40da",
"size": "30204",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setuptools/_distutils/msvc9compiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2173"
},
{
"name": "C",
"bytes": "36107"
},
{
"name": "HTML",
"bytes": "266"
},
{
"name": "Python",
"bytes": "4027592"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from nose.tools import eq_
from lib.utils import static_url, update_csp, validate_settings
class TestValidate(TestCase):
def test_secret_key(self):
with self.settings(DEBUG=True,
IN_TEST_SUITE=False,
SECRET_KEY='please change this',
SITE_URL='http://testserver'):
validate_settings()
with self.settings(DEBUG=False,
IN_TEST_SUITE=False,
SECRET_KEY='please change this',
SITE_URL='http://testserver'):
update_csp()
with self.assertRaises(ImproperlyConfigured):
validate_settings()
with self.settings(DEBUG=False,
IN_TEST_SUITE=False,
SECRET_KEY='so changed',
SESSION_COOKIE_SECURE=True,
APP_PURCHASE_SECRET='so changed'):
update_csp()
validate_settings()
def test_http(self):
with self.settings(CSP_SCRIPT_SRC=('http://f.c'), DEBUG=True,
IN_TEST_SUITE=False):
validate_settings()
def test_http_not_debug(self):
with self.settings(CSP_SCRIPT_SRC=('http://f.c'), DEBUG=False,
IN_TEST_SUITE=False):
with self.assertRaises(ImproperlyConfigured):
validate_settings()
def test_update_csp(self):
with self.settings(CSP_SCRIPT_SRC=('https://f.c', 'self',
'http://f.c'),
DEBUG=False,
IN_TEST_SUITE=False):
update_csp()
self.assertSetEqual(set(settings.CSP_SCRIPT_SRC),
set(('https://f.c', 'self')))
with self.settings(CSP_SCRIPT_SRC=('https://f.c', 'self',
'http://f.c'),
DEBUG=True):
update_csp()
self.assertSetEqual(set(settings.CSP_SCRIPT_SRC),
set(('https://f.c', 'self', 'http://f.c')))
class TestURL(TestCase):
def test_url(self):
with self.settings(WEBAPPS_RECEIPT_URL='/v', SITE_URL='http://f.com'):
eq_(static_url('WEBAPPS_RECEIPT_URL'), 'http://f.com/v')
with self.settings(DEBUG=True, SERVE_TMP_PATH=True):
eq_(static_url('WEBAPPS_RECEIPT_URL'),
'http://testserver/receipt-verifier/')
with self.settings(WEBAPPS_RECEIPT_URL='http://f.com'):
eq_(static_url('WEBAPPS_RECEIPT_URL'), 'http://f.com')
def test_leading_slash(self):
with self.settings(WEBAPP_ICON_URL='v', DEBUG=True,
SERVE_TMP_PATH=True):
eq_(static_url('WEBAPP_ICON_URL'), 'http://testserver/tmp/v')
with self.settings(WEBAPP_ICON_URL='/v', DEBUG=True,
SERVE_TMP_PATH=True):
eq_(static_url('WEBAPP_ICON_URL'), 'http://testserver/tmp/v')
|
{
"content_hash": "8355519792771278bce29b74456ed6de",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 78,
"avg_line_length": 38.66265060240964,
"alnum_prop": 0.5091928949828607,
"repo_name": "shahbaz17/zamboni",
"id": "aab0ad04d12d42d567680aab63e8aad9063b4702",
"size": "3209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/tests/test_utils_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "357511"
},
{
"name": "HTML",
"bytes": "2331440"
},
{
"name": "JavaScript",
"bytes": "536153"
},
{
"name": "Makefile",
"bytes": "4281"
},
{
"name": "Python",
"bytes": "4400945"
},
{
"name": "Shell",
"bytes": "11200"
},
{
"name": "Smarty",
"bytes": "1159"
}
],
"symlink_target": ""
}
|
"""
Simplex method for solving linear programming problems
"""
import numpy as np
from warnings import warn
from .optimize import OptimizeResult, OptimizeWarning, _check_unknown_options
from ._linprog_util import _postsolve
def _pivot_col(T, tol=1.0E-12, bland=False):
"""
Given a linear programming simplex tableau, determine the column
of the variable to enter the basis.
Parameters
----------
T : 2D array
The simplex tableau.
tol : float
Elements in the objective row larger than -tol will not be considered
for pivoting. Nominally this value is zero, but numerical issues
cause a tolerance about zero to be necessary.
bland : bool
If True, use Bland's rule for selection of the column (select the
first column with a negative coefficient in the objective row,
regardless of magnitude).
Returns
-------
status: bool
True if a suitable pivot column was found, otherwise False.
A return of False indicates that the linear programming simplex
algorithm is complete.
col: int
The index of the column of the pivot element.
If status is False, col will be returned as nan.
"""
ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False)
if ma.count() == 0:
return False, np.nan
if bland:
return True, np.nonzero(ma.mask == False)[0][0]
return True, np.ma.nonzero(ma == ma.min())[0][0]
def _pivot_row(T, basis, pivcol, phase, tol=1.0E-12, bland=False):
"""
Given a linear programming simplex tableau, determine the row for the
pivot operation.
Parameters
----------
T : 2D array
The simplex tableau.
basis : array
A list of the current basic variables.
pivcol : int
The index of the pivot column.
phase : int
The phase of the simplex algorithm (1 or 2).
tol : float
Elements in the pivot column smaller than tol will not be considered
for pivoting. Nominally this value is zero, but numerical issues
cause a tolerance about zero to be necessary.
bland : bool
If True, use Bland's rule for selection of the row (if more than one
row can be used, choose the one with the lowest variable index).
Returns
-------
status: bool
True if a suitable pivot row was found, otherwise False. A return
of False indicates that the linear programming problem is unbounded.
row: int
The index of the row of the pivot element. If status is False, row
will be returned as nan.
"""
if phase == 1:
k = 2
else:
k = 1
ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False)
if ma.count() == 0:
return False, np.nan
mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False)
q = mb / ma
min_rows = np.ma.nonzero(q == q.min())[0]
if bland:
return True, min_rows[np.argmin(np.take(basis, min_rows))]
return True, min_rows[0]
def _apply_pivot(T, basis, pivrow, pivcol, tol=1e-12):
"""
Pivot the simplex tableau inplace on the element given by (pivrow, pivol).
The entering variable corresponds to the column given by pivcol forcing
the variable basis[pivrow] to leave the basis.
Parameters
----------
T : 2D array
A 2D numpy array representing the simplex T to the corresponding
maximization problem.
basis : 1D array
An array of the indices of the basic variables, such that basis[i]
contains the column corresponding to the basic variable for row i.
Basis is modified in place by _apply_pivot.
pivrow : int
Row index of the pivot.
pivcol : int
Column index of the pivot.
"""
basis[pivrow] = pivcol
pivval = T[pivrow, pivcol]
T[pivrow] = T[pivrow] / pivval
for irow in range(T.shape[0]):
if irow != pivrow:
T[irow] = T[irow] - T[pivrow] * T[irow, pivcol]
# The selected pivot should never lead to a pivot value less than the tol.
if np.isclose(pivval, tol, atol=0, rtol=1e4):
message = (
"The pivot operation produces a pivot value of:{0: .1e}, "
"which is only slightly greater than the specified "
"tolerance{1: .1e}. This may lead to issues regarding the "
"numerical stability of the simplex method. "
"Removing redundant constraints, changing the pivot strategy "
"via Bland's rule or increasing the tolerance may "
"help reduce the issue.".format(pivval, tol))
warn(message, OptimizeWarning)
def _solve_simplex(T, n, basis, maxiter=1000, phase=2, status=0, message='',
callback=None, tol=1.0E-12, nit0=0, bland=False, _T_o=None):
"""
Solve a linear programming problem in "standard maximization form" using
the Simplex Method.
Minimize :math:`f = c^T x`
subject to
.. math::
Ax = b
x_i >= 0
b_j >= 0
Parameters
----------
T : 2D array
A 2D array representing the simplex T corresponding to the
maximization problem. It should have the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0]]
for a Phase 2 problem, or the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0],
[c'[0], c'[1], ..., c'[n_total], 0]]
for a Phase 1 problem (a Problem in which a basic feasible solution is
sought prior to maximizing the actual objective. T is modified in
place by _solve_simplex.
n : int
The number of true variables in the problem.
basis : 1D array
An array of the indices of the basic variables, such that basis[i]
contains the column corresponding to the basic variable for row i.
Basis is modified in place by _solve_simplex
maxiter : int
The maximum number of iterations to perform before aborting the
optimization.
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
callback : callable, optional
If a callback function is provided, it will be called within each
iteration of the simplex algorithm. The callback must have the
signature `callback(xk, **kwargs)` where xk is the current solution
vector and kwargs is a dictionary containing the following::
"T" : The current Simplex algorithm T
"nit" : The current iteration.
"pivot" : The pivot (row, column) used for the next iteration.
"phase" : Whether the algorithm is in Phase 1 or Phase 2.
"basis" : The indices of the columns of the basic variables.
tol : float
The tolerance which determines when a solution is "close enough" to
zero in Phase 1 to be considered a basic feasible solution or close
enough to positive to serve as an optimal solution.
nit0 : int
The initial iteration number used to keep an accurate iteration total
in a two-phase problem.
bland : bool
If True, choose pivots using Bland's rule [3]. In problems which
fail to converge due to cycling, using Bland's rule can provide
convergence at the expense of a less optimal path about the simplex.
Returns
-------
nit : int
The number of iterations. Used to keep an accurate iteration total
in a two-phase problem.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties which could not resolved using
a more robust, albeit less efficient, solver encountered
"""
nit = nit0
complete = False
if phase == 1:
m = T.shape[0]-2
elif phase == 2:
m = T.shape[0]-1
else:
raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2")
if phase == 2:
# Check if any artificial variables are still in the basis.
# If yes, check if any coefficients from this row and a column
# corresponding to one of the non-artificial variable is non-zero.
# If found, pivot at this term. If not, start phase 2.
# Do this for all artificial variables in the basis.
# Ref: "An Introduction to Linear Programming and Game Theory"
# by Paul R. Thie, Gerard E. Keough, 3rd Ed,
# Chapter 3.7 Redundant Systems (pag 102)
for pivrow in [row for row in range(basis.size)
if basis[row] > T.shape[1] - 2]:
non_zero_row = [col for col in range(T.shape[1] - 1)
if abs(T[pivrow, col]) > tol]
if len(non_zero_row) > 0:
pivcol = non_zero_row[0]
_apply_pivot(T, basis, pivrow, pivcol)
nit += 1
if len(basis[:m]) == 0:
solution = np.zeros(T.shape[1] - 1, dtype=np.float64)
else:
solution = np.zeros(max(T.shape[1] - 1, max(basis[:m]) + 1),
dtype=np.float64)
while not complete:
# Find the pivot column
pivcol_found, pivcol = _pivot_col(T, tol, bland)
if not pivcol_found:
pivcol = np.nan
pivrow = np.nan
status = 0
complete = True
else:
# Find the pivot row
pivrow_found, pivrow = _pivot_row(T, basis, pivcol, phase, tol, bland)
if not pivrow_found:
status = 3
complete = True
if callback is not None:
solution[basis[:n]] = T[:n, -1]
x = solution[:m]
c, A_ub, b_ub, A_eq, b_eq, bounds, undo = _T_o
x, fun, slack, con, _, _ = _postsolve(
x, c, A_ub, b_ub, A_eq, b_eq, bounds, undo=undo, tol=tol
)
res = OptimizeResult({
'x': x,
'fun': fun,
'slack': slack,
'con': con,
'status': status,
'message': message,
'nit': nit,
'success': status == 0 and complete,
'phase': phase,
'complete': complete,
})
callback(res)
if not complete:
if nit >= maxiter:
# Iteration limit exceeded
status = 1
complete = True
else:
_apply_pivot(T, basis, pivrow, pivcol)
nit += 1
return nit, status
def _linprog_simplex(c, c0, A, b, maxiter=1000, disp=False, callback=None,
tol=1.0E-12, bland=False, _T_o=None, **unknown_options):
"""
Solve the following linear programming problem via a two-phase
simplex algorithm.::
minimize: c^T * x
subject to: A * x == b
0 <= x < oo
Parameters
----------
c : 1D array
Coefficients of the linear objective function to be minimized.
c0 : float
Constant term in objective function due to fixed (and eliminated)
variables. (Purely for display.)
A : 2D array
2D array which, when matrix-multiplied by ``x``, gives the values of
the equality constraints at ``x``.
b : 1D array
1D array of values representing the RHS of each equality constraint
(row) in ``A_eq``.
callback : callable, optional (simplex only)
If a callback function is provided, it will be called within each
iteration of the simplex algorithm. The callback must require a
`scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1D array
The values of the slack variables. Each slack variable
corresponds to an inequality constraint. If the slack is zero,
the corresponding constraint is active.
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties which could not resolved
using a more robust, albeit less efficient, solver
encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
Options
-------
maxiter : int
The maximum number of iterations to perform.
disp : bool
If True, print exit status message to sys.stdout
tol : float
The tolerance which determines when a solution is "close enough" to
zero in Phase 1 to be considered a basic feasible solution or close
enough to positive to serve as an optimal solution.
bland : bool
If True, use Bland's anti-cycling rule [3] to choose pivots to
prevent cycling. If False, choose pivots which should lead to a
converged solution more quickly. The latter method is subject to
cycling (non-convergence) in rare instances.
Returns
-------
x : 1D array
Solution vector.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties which could not resolved using
a more robust, albeit less efficient, solver encountered
message : str
A string descriptor of the exit status of the optimization.
iteration : int
The number of iterations taken to solve the problem.
Examples
--------
Consider the following problem:
Minimize: f = -1*x[0] + 4*x[1]
Subject to: -3*x[0] + 1*x[1] <= 6
1*x[0] + 2*x[1] <= 4
x[1] >= -3
where: -inf <= x[0] <= inf
This problem deviates from the standard linear programming problem. In
standard form, linear programming problems assume the variables x are
non-negative. Since the variables don't have standard bounds where
0 <= x <= inf, the bounds of the variables must be explicitly set.
There are two upper-bound constraints, which can be expressed as
dot(A_ub, x) <= b_ub
The input for this problem is as follows:
>>> from scipy.optimize import linprog
>>> c = [-1, 4]
>>> A = [[-3, 1], [1, 2]]
>>> b = [6, 4]
>>> x0_bnds = (None, None)
>>> x1_bnds = (-3, None)
>>> res = linprog(c, A, b, bounds=(x0_bnds, x1_bnds))
>>> print(res)
fun: -22.0
message: 'Optimization terminated successfully.'
nit: 1
slack: array([ 39., 0.])
status: 0
success: True
x: array([ 10., -3.])
References
----------
.. [1] Dantzig, George B., Linear programming and extensions. Rand
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
1963
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
Mathematical Programming", McGraw-Hill, Chapter 4.
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
Mathematics of Operations Research (2), 1977: pp. 103-107.
"""
_check_unknown_options(unknown_options)
status = 0
messages = {0: "Optimization terminated successfully.",
1: "Iteration limit reached.",
2: "Optimization failed. Unable to find a feasible"
" starting point.",
3: "Optimization failed. The problem appears to be unbounded.",
4: "Optimization failed. Singular matrix encountered."}
n, m = A.shape
# All constraints must have b >= 0.
is_negative_constraint = np.less(b, 0)
A[is_negative_constraint] *= -1
b[is_negative_constraint] *= -1
# As all constraints are equality constraints the artifical variables
# will also be basic variables.
av = np.arange(n) + m
basis = av.copy()
# Format the phase one tableau by adding artificial variables and stacking
# the constraints, the objective row and pseudo-objective row.
row_constraints = np.hstack((A, np.eye(n), b[:, np.newaxis]))
row_objective = np.hstack((c, np.zeros(n), c0))
row_pseudo_objective = -row_constraints.sum(axis=0)
row_pseudo_objective[av] = 0
T = np.vstack((row_constraints, row_objective, row_pseudo_objective))
nit1, status = _solve_simplex(T, n, basis, phase=1, callback=callback,
maxiter=maxiter, tol=tol, bland=bland, _T_o=_T_o)
# if pseudo objective is zero, remove the last row from the tableau and
# proceed to phase 2
if abs(T[-1, -1]) < tol:
# Remove the pseudo-objective row from the tableau
T = T[:-1, :]
# Remove the artificial variable columns from the tableau
T = np.delete(T, av, 1)
else:
# Failure to find a feasible starting point
status = 2
nit2 = nit1
messages[status] = (
"Phase 1 of the simplex method failed to find a feasible "
"solution. The pseudo-objective function evaluates to {0:.1e} "
"which exceeds the required tolerance of {1} for a solution to be "
"considered 'close enough' to zero to be a basic solution. "
"Consider increasing the tolerance to be greater than {0:.1e}. "
"If this tolerance is unacceptably large the problem may be "
"infeasible.".format(abs(T[-1, -1]), tol)
)
if status == 0:
# Phase 2
nit2, status = _solve_simplex(T, n, basis, maxiter=maxiter,
phase=2, callback=callback, tol=tol,
nit0=nit1, bland=bland, _T_o=_T_o)
solution = np.zeros(n + m)
solution[basis[:n]] = T[:n, -1]
x = solution[:m]
return x, status, messages[status], int(nit2)
|
{
"content_hash": "3bf1751aa1472386b26f8ee94c0582ae",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 83,
"avg_line_length": 37.781007751937985,
"alnum_prop": 0.5847140292382662,
"repo_name": "gfyoung/scipy",
"id": "1915f0f84a8223d359b41bd5dbd7a77acfa586dd",
"size": "19495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/optimize/_linprog_simplex.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4142653"
},
{
"name": "C++",
"bytes": "498142"
},
{
"name": "Fortran",
"bytes": "5572451"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "11540629"
},
{
"name": "Shell",
"bytes": "2226"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
"""AsusWrt component constants."""
DOMAIN = "asuswrt"
CONF_DNSMASQ = "dnsmasq"
CONF_INTERFACE = "interface"
CONF_REQUIRE_IP = "require_ip"
CONF_SSH_KEY = "ssh_key"
CONF_TRACK_UNKNOWN = "track_unknown"
DATA_ASUSWRT = DOMAIN
DEFAULT_DNSMASQ = "/var/lib/misc"
DEFAULT_INTERFACE = "eth0"
DEFAULT_SSH_PORT = 22
DEFAULT_TRACK_UNKNOWN = False
MODE_AP = "ap"
MODE_ROUTER = "router"
PROTOCOL_SSH = "ssh"
PROTOCOL_TELNET = "telnet"
# Sensors
SENSORS_BYTES = ["sensor_rx_bytes", "sensor_tx_bytes"]
SENSORS_CONNECTED_DEVICE = ["sensor_connected_device"]
SENSORS_LOAD_AVG = ["sensor_load_avg1", "sensor_load_avg5", "sensor_load_avg15"]
SENSORS_RATES = ["sensor_rx_rates", "sensor_tx_rates"]
SENSORS_TEMPERATURES = ["2.4GHz", "5.0GHz", "CPU"]
|
{
"content_hash": "5f7a83dfe5d1b6e9523b7ecdae3e1f5f",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 80,
"avg_line_length": 26.25,
"alnum_prop": 0.7047619047619048,
"repo_name": "rohitranjan1991/home-assistant",
"id": "95e93e0ff2537edca94a876cf26e51e3bce73275",
"size": "735",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/asuswrt/const.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from nacl import exceptions as exc
from nacl._sodium import ffi, lib
from nacl.exceptions import ensure
crypto_secretbox_KEYBYTES = lib.crypto_secretbox_keybytes()
crypto_secretbox_NONCEBYTES = lib.crypto_secretbox_noncebytes()
crypto_secretbox_ZEROBYTES = lib.crypto_secretbox_zerobytes()
crypto_secretbox_BOXZEROBYTES = lib.crypto_secretbox_boxzerobytes()
crypto_secretbox_MACBYTES = lib.crypto_secretbox_macbytes()
crypto_secretbox_MESSAGEBYTES_MAX = lib.crypto_secretbox_messagebytes_max()
def crypto_secretbox(message, nonce, key):
"""
Encrypts and returns the message ``message`` with the secret ``key`` and
the nonce ``nonce``.
:param message: bytes
:param nonce: bytes
:param key: bytes
:rtype: bytes
"""
if len(key) != crypto_secretbox_KEYBYTES:
raise exc.ValueError("Invalid key")
if len(nonce) != crypto_secretbox_NONCEBYTES:
raise exc.ValueError("Invalid nonce")
padded = b"\x00" * crypto_secretbox_ZEROBYTES + message
ciphertext = ffi.new("unsigned char[]", len(padded))
res = lib.crypto_secretbox(ciphertext, padded, len(padded), nonce, key)
ensure(res == 0, "Encryption failed", raising=exc.CryptoError)
ciphertext = ffi.buffer(ciphertext, len(padded))
return ciphertext[crypto_secretbox_BOXZEROBYTES:]
def crypto_secretbox_open(ciphertext, nonce, key):
"""
Decrypt and returns the encrypted message ``ciphertext`` with the secret
``key`` and the nonce ``nonce``.
:param ciphertext: bytes
:param nonce: bytes
:param key: bytes
:rtype: bytes
"""
if len(key) != crypto_secretbox_KEYBYTES:
raise exc.ValueError("Invalid key")
if len(nonce) != crypto_secretbox_NONCEBYTES:
raise exc.ValueError("Invalid nonce")
padded = b"\x00" * crypto_secretbox_BOXZEROBYTES + ciphertext
plaintext = ffi.new("unsigned char[]", len(padded))
res = lib.crypto_secretbox_open(
plaintext, padded, len(padded), nonce, key)
ensure(res == 0, "Decryption failed. Ciphertext failed verification",
raising=exc.CryptoError)
plaintext = ffi.buffer(plaintext, len(padded))
return plaintext[crypto_secretbox_ZEROBYTES:]
|
{
"content_hash": "56b6106ac305fc67f68d5ca3185517fd",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 76,
"avg_line_length": 33.76119402985075,
"alnum_prop": 0.7015915119363395,
"repo_name": "lmctv/pynacl",
"id": "5323b26c8e4abff174fa1f4db5eafefb0a6e9c8d",
"size": "2864",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/nacl/bindings/crypto_secretbox.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "102052"
},
{
"name": "Batchfile",
"bytes": "4482"
},
{
"name": "C",
"bytes": "4440037"
},
{
"name": "C++",
"bytes": "91159"
},
{
"name": "CMake",
"bytes": "9743"
},
{
"name": "M4",
"bytes": "75163"
},
{
"name": "Makefile",
"bytes": "614527"
},
{
"name": "Objective-C",
"bytes": "166255"
},
{
"name": "PHP",
"bytes": "563"
},
{
"name": "Python",
"bytes": "310893"
},
{
"name": "Shell",
"bytes": "766357"
},
{
"name": "Visual Basic",
"bytes": "294"
}
],
"symlink_target": ""
}
|
__doc__="""
Select glyphs with the same color as the currently selected one.
"""
import GlyphsApp
thisFont = Glyphs.font # frontmost font
selectedLayers = thisFont.selectedLayers
def indexSetWithIndex( index ):
indexSet = NSIndexSet.alloc().initWithIndex_( index )
return indexSet
def hasColor( thisGlyph, colorIndex ):
returnValue = False
if thisGlyph.color == colorIndex:
returnValue = True
return returnValue
if selectedLayers:
thisDoc = Glyphs.currentDocument
thisController = thisDoc.windowController().tabBarControl().viewControllers()[0].glyphsArrayController()
displayedGlyphs = thisController.arrangedObjects()
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
for layer in selectedLayers:
selectedColor = layer.parent.color
for i in range(len( displayedGlyphs )):
thisGlyph = displayedGlyphs[i]
if hasColor( thisGlyph, selectedColor ):
thisController.addSelectionIndexes_( indexSetWithIndex(i) )
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
|
{
"content_hash": "dbbc135f407eab87ffa3d3e5dc8738d6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 105,
"avg_line_length": 32.1875,
"alnum_prop": 0.7796116504854369,
"repo_name": "weiweihuanghuang/Glyphs-Scripts",
"id": "b33db7d5bc7167018d7d1062181c713d86def054",
"size": "1084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Select/Select Same Color.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "316614"
}
],
"symlink_target": ""
}
|
import io
import os
import tempfile
import unittest.mock
from contextlib import redirect_stdout
import pytest
from airflow import models
from airflow.cli import cli_parser
from airflow.cli.commands import variable_command
from airflow.models import Variable
from tests.test_utils.db import clear_db_variables
class TestCliVariables(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dagbag = models.DagBag(include_examples=True)
cls.parser = cli_parser.get_parser()
def setUp(self):
clear_db_variables()
def tearDown(self):
clear_db_variables()
def test_variables_set(self):
"""Test variable_set command"""
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'foo', 'bar']))
assert Variable.get("foo") is not None
with pytest.raises(KeyError):
Variable.get("foo1")
def test_variables_get(self):
Variable.set('foo', {'foo': 'bar'}, serialize_json=True)
with redirect_stdout(io.StringIO()) as stdout:
variable_command.variables_get(self.parser.parse_args(['variables', 'get', 'foo']))
assert '{\n "foo": "bar"\n}\n' == stdout.getvalue()
def test_get_variable_default_value(self):
with redirect_stdout(io.StringIO()) as stdout:
variable_command.variables_get(
self.parser.parse_args(['variables', 'get', 'baz', '--default', 'bar'])
)
assert "bar\n" == stdout.getvalue()
def test_get_variable_missing_variable(self):
with pytest.raises(SystemExit):
variable_command.variables_get(self.parser.parse_args(['variables', 'get', 'no-existing-VAR']))
def test_variables_set_different_types(self):
"""Test storage of various data types"""
# Set a dict
variable_command.variables_set(
self.parser.parse_args(['variables', 'set', 'dict', '{"foo": "oops"}'])
)
# Set a list
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'list', '["oops"]']))
# Set str
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'str', 'hello string']))
# Set int
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'int', '42']))
# Set float
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'float', '42.0']))
# Set true
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'true', 'true']))
# Set false
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'false', 'false']))
# Set none
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'null', 'null']))
# Export and then import
variable_command.variables_export(
self.parser.parse_args(['variables', 'export', 'variables_types.json'])
)
variable_command.variables_import(
self.parser.parse_args(['variables', 'import', 'variables_types.json'])
)
# Assert value
assert {'foo': 'oops'} == Variable.get('dict', deserialize_json=True)
assert ['oops'] == Variable.get('list', deserialize_json=True)
assert 'hello string' == Variable.get('str') # cannot json.loads(str)
assert 42 == Variable.get('int', deserialize_json=True)
assert 42.0 == Variable.get('float', deserialize_json=True)
assert Variable.get('true', deserialize_json=True) is True
assert Variable.get('false', deserialize_json=True) is False
assert Variable.get('null', deserialize_json=True) is None
os.remove('variables_types.json')
def test_variables_list(self):
"""Test variable_list command"""
# Test command is received
variable_command.variables_list(self.parser.parse_args(['variables', 'list']))
def test_variables_delete(self):
"""Test variable_delete command"""
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'foo', 'bar']))
variable_command.variables_delete(self.parser.parse_args(['variables', 'delete', 'foo']))
with pytest.raises(KeyError):
Variable.get("foo")
def test_variables_import(self):
"""Test variables_import command"""
with pytest.raises(SystemExit, match=r"Invalid variables file"):
variable_command.variables_import(self.parser.parse_args(['variables', 'import', os.devnull]))
def test_variables_export(self):
"""Test variables_export command"""
variable_command.variables_export(self.parser.parse_args(['variables', 'export', os.devnull]))
def test_variables_isolation(self):
"""Test isolation of variables"""
with tempfile.NamedTemporaryFile(delete=True) as tmp1, tempfile.NamedTemporaryFile(
delete=True
) as tmp2:
# First export
variable_command.variables_set(
self.parser.parse_args(['variables', 'set', 'foo', '{"foo":"bar"}'])
)
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'bar', 'original']))
variable_command.variables_export(self.parser.parse_args(['variables', 'export', tmp1.name]))
with open(tmp1.name) as first_exp:
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'bar', 'updated']))
variable_command.variables_set(
self.parser.parse_args(['variables', 'set', 'foo', '{"foo":"oops"}'])
)
variable_command.variables_delete(self.parser.parse_args(['variables', 'delete', 'foo']))
variable_command.variables_import(self.parser.parse_args(['variables', 'import', tmp1.name]))
assert 'original' == Variable.get('bar')
assert '{\n "foo": "bar"\n}' == Variable.get('foo')
# Second export
variable_command.variables_export(self.parser.parse_args(['variables', 'export', tmp2.name]))
with open(tmp2.name) as second_exp:
assert first_exp.read() == second_exp.read()
|
{
"content_hash": "d04aa1853f81fc3b0bba86b0f0dbf4e9",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 110,
"avg_line_length": 43.56944444444444,
"alnum_prop": 0.6165125916480714,
"repo_name": "mistercrunch/airflow",
"id": "cd497f2f9c491addcbd38bf45b41144a0ff69f2c",
"size": "7063",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "tests/cli/commands/test_variable_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36341"
},
{
"name": "HTML",
"bytes": "99243"
},
{
"name": "JavaScript",
"bytes": "891460"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "773270"
},
{
"name": "Shell",
"bytes": "5659"
}
],
"symlink_target": ""
}
|
"""
CellState Manager
"""
import copy
import datetime
import functools
import time
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import units
from nova.cells import rpc_driver
from nova import context
from nova.db import base
from nova import exception
from nova.i18n import _LE
from nova import objects
from nova.openstack.common import fileutils
from nova import rpc
from nova import utils
cell_state_manager_opts = [
cfg.IntOpt('db_check_interval',
default=60,
help='Interval, in seconds, for getting fresh cell '
'information from the database.'),
cfg.StrOpt('cells_config',
help='Configuration file from which to read cells '
'configuration. If given, overrides reading cells '
'from the database.'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('reserve_percent', 'nova.cells.opts', group='cells')
CONF.import_opt('mute_child_interval', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_state_manager_opts, group='cells')
class CellState(object):
"""Holds information for a particular cell."""
def __init__(self, cell_name, is_me=False):
self.name = cell_name
self.is_me = is_me
self.last_seen = datetime.datetime.min
self.capabilities = {}
self.capacities = {}
self.db_info = {}
# TODO(comstud): The DB will specify the driver to use to talk
# to this cell, but there's no column for this yet. The only
# available driver is the rpc driver.
self.driver = rpc_driver.CellsRPCDriver()
def update_db_info(self, cell_db_info):
"""Update cell credentials from db."""
self.db_info = {k: v for k, v in cell_db_info.iteritems()
if k != 'name'}
def update_capabilities(self, cell_metadata):
"""Update cell capabilities for a cell."""
self.last_seen = timeutils.utcnow()
self.capabilities = cell_metadata
def update_capacities(self, capacities):
"""Update capacity information for a cell."""
self.last_seen = timeutils.utcnow()
self.capacities = capacities
def get_cell_info(self):
"""Return subset of cell information for OS API use."""
db_fields_to_return = ['is_parent', 'weight_scale', 'weight_offset']
url_fields_to_return = {
'username': 'username',
'hostname': 'rpc_host',
'port': 'rpc_port',
}
cell_info = dict(name=self.name, capabilities=self.capabilities)
if self.db_info:
for field in db_fields_to_return:
cell_info[field] = self.db_info[field]
url = rpc.get_transport_url(self.db_info['transport_url'])
if url.hosts:
for field, canonical in url_fields_to_return.items():
cell_info[canonical] = getattr(url.hosts[0], field)
return cell_info
def send_message(self, message):
"""Send a message to a cell. Just forward this to the driver,
passing ourselves and the message as arguments.
"""
self.driver.send_message_to_cell(self, message)
def __repr__(self):
me = "me" if self.is_me else "not_me"
return "Cell '%s' (%s)" % (self.name, me)
def sync_before(f):
"""Use as a decorator to wrap methods that use cell information to
make sure they sync the latest information from the DB periodically.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
self._cell_data_sync()
return f(self, *args, **kwargs)
return wrapper
def sync_after(f):
"""Use as a decorator to wrap methods that update cell information
in the database to make sure the data is synchronized immediately.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
result = f(self, *args, **kwargs)
self._cell_data_sync(force=True)
return result
return wrapper
_unset = object()
class CellStateManager(base.Base):
def __new__(cls, cell_state_cls=None, cells_config=_unset):
if cls is not CellStateManager:
return super(CellStateManager, cls).__new__(cls)
if cells_config is _unset:
cells_config = CONF.cells.cells_config
if cells_config:
return CellStateManagerFile(cell_state_cls)
return CellStateManagerDB(cell_state_cls)
def __init__(self, cell_state_cls=None):
super(CellStateManager, self).__init__()
if not cell_state_cls:
cell_state_cls = CellState
self.cell_state_cls = cell_state_cls
self.my_cell_state = cell_state_cls(CONF.cells.name, is_me=True)
self.parent_cells = {}
self.child_cells = {}
self.last_cell_db_check = datetime.datetime.min
attempts = 0
while True:
try:
self._cell_data_sync(force=True)
break
except db_exc.DBError:
attempts += 1
if attempts > 120:
raise
LOG.exception(_LE('DB error'))
time.sleep(30)
my_cell_capabs = {}
for cap in CONF.cells.capabilities:
name, value = cap.split('=', 1)
if ';' in value:
values = set(value.split(';'))
else:
values = set([value])
my_cell_capabs[name] = values
self.my_cell_state.update_capabilities(my_cell_capabs)
def _refresh_cells_from_dict(self, db_cells_dict):
"""Make our cell info map match the db."""
# Update current cells. Delete ones that disappeared
for cells_dict in (self.parent_cells, self.child_cells):
for cell_name, cell_info in cells_dict.items():
is_parent = cell_info.db_info['is_parent']
db_dict = db_cells_dict.get(cell_name)
if db_dict and is_parent == db_dict['is_parent']:
cell_info.update_db_info(db_dict)
else:
del cells_dict[cell_name]
# Add new cells
for cell_name, db_info in db_cells_dict.items():
if db_info['is_parent']:
cells_dict = self.parent_cells
else:
cells_dict = self.child_cells
if cell_name not in cells_dict:
cells_dict[cell_name] = self.cell_state_cls(cell_name)
cells_dict[cell_name].update_db_info(db_info)
def _time_to_sync(self):
"""Is it time to sync the DB against our memory cache?"""
diff = timeutils.utcnow() - self.last_cell_db_check
return diff.seconds >= CONF.cells.db_check_interval
def _update_our_capacity(self, ctxt=None):
"""Update our capacity in the self.my_cell_state CellState.
This will add/update 2 entries in our CellState.capacities,
'ram_free' and 'disk_free'.
The values of these are both dictionaries with the following
format:
{'total_mb': <total_memory_free_in_the_cell>,
'units_by_mb: <units_dictionary>}
<units_dictionary> contains the number of units that we can build for
every distinct memory or disk requirement that we have based on
instance types. This number is computed by looking at room available
on every compute_node.
Take the following instance_types as an example:
[{'memory_mb': 1024, 'root_gb': 10, 'ephemeral_gb': 100},
{'memory_mb': 2048, 'root_gb': 20, 'ephemeral_gb': 200}]
capacities['ram_free']['units_by_mb'] would contain the following:
{'1024': <number_of_instances_that_will_fit>,
'2048': <number_of_instances_that_will_fit>}
capacities['disk_free']['units_by_mb'] would contain the following:
{'122880': <number_of_instances_that_will_fit>,
'225280': <number_of_instances_that_will_fit>}
Units are in MB, so 122880 = (10 + 100) * 1024.
NOTE(comstud): Perhaps we should only report a single number
available per instance_type.
"""
if not ctxt:
ctxt = context.get_admin_context()
reserve_level = CONF.cells.reserve_percent / 100.0
compute_hosts = {}
def _get_compute_hosts():
service_refs = {service.host: service
for service in objects.ServiceList.get_by_binary(
ctxt, 'nova-compute')}
compute_nodes = objects.ComputeNodeList.get_all(ctxt)
for compute in compute_nodes:
host = compute.host
service = service_refs.get(host)
if not service or service['disabled']:
continue
compute_hosts[host] = {
'free_ram_mb': compute['free_ram_mb'],
'free_disk_mb': compute['free_disk_gb'] * 1024,
'total_ram_mb': compute['memory_mb'],
'total_disk_mb': compute['local_gb'] * 1024}
_get_compute_hosts()
if not compute_hosts:
self.my_cell_state.update_capacities({})
return
ram_mb_free_units = {}
disk_mb_free_units = {}
total_ram_mb_free = 0
total_disk_mb_free = 0
def _free_units(total, free, per_inst):
if per_inst:
min_free = total * reserve_level
free = max(0, free - min_free)
return int(free / per_inst)
else:
return 0
instance_types = self.db.flavor_get_all(ctxt)
memory_mb_slots = frozenset(
[inst_type['memory_mb'] for inst_type in instance_types])
disk_mb_slots = frozenset(
[(inst_type['root_gb'] + inst_type['ephemeral_gb']) * units.Ki
for inst_type in instance_types])
for compute_values in compute_hosts.values():
total_ram_mb_free += compute_values['free_ram_mb']
total_disk_mb_free += compute_values['free_disk_mb']
for memory_mb_slot in memory_mb_slots:
ram_mb_free_units.setdefault(str(memory_mb_slot), 0)
free_units = _free_units(compute_values['total_ram_mb'],
compute_values['free_ram_mb'], memory_mb_slot)
ram_mb_free_units[str(memory_mb_slot)] += free_units
for disk_mb_slot in disk_mb_slots:
disk_mb_free_units.setdefault(str(disk_mb_slot), 0)
free_units = _free_units(compute_values['total_disk_mb'],
compute_values['free_disk_mb'], disk_mb_slot)
disk_mb_free_units[str(disk_mb_slot)] += free_units
capacities = {'ram_free': {'total_mb': total_ram_mb_free,
'units_by_mb': ram_mb_free_units},
'disk_free': {'total_mb': total_disk_mb_free,
'units_by_mb': disk_mb_free_units}}
self.my_cell_state.update_capacities(capacities)
@sync_before
def get_cell_info_for_neighbors(self):
"""Return cell information for all neighbor cells."""
cell_list = [cell.get_cell_info()
for cell in self.child_cells.itervalues()]
cell_list.extend([cell.get_cell_info()
for cell in self.parent_cells.itervalues()])
return cell_list
@sync_before
def get_my_state(self):
"""Return information for my (this) cell."""
return self.my_cell_state
@sync_before
def get_child_cells(self):
"""Return list of child cell_infos."""
return self.child_cells.values()
@sync_before
def get_parent_cells(self):
"""Return list of parent cell_infos."""
return self.parent_cells.values()
@sync_before
def get_parent_cell(self, cell_name):
return self.parent_cells.get(cell_name)
@sync_before
def get_child_cell(self, cell_name):
return self.child_cells.get(cell_name)
@sync_before
def update_cell_capabilities(self, cell_name, capabilities):
"""Update capabilities for a cell."""
cell = (self.child_cells.get(cell_name) or
self.parent_cells.get(cell_name))
if not cell:
LOG.error(_LE("Unknown cell '%(cell_name)s' when trying to "
"update capabilities"),
{'cell_name': cell_name})
return
# Make sure capabilities are sets.
for capab_name, values in capabilities.items():
capabilities[capab_name] = set(values)
cell.update_capabilities(capabilities)
@sync_before
def update_cell_capacities(self, cell_name, capacities):
"""Update capacities for a cell."""
cell = (self.child_cells.get(cell_name) or
self.parent_cells.get(cell_name))
if not cell:
LOG.error(_LE("Unknown cell '%(cell_name)s' when trying to "
"update capacities"),
{'cell_name': cell_name})
return
cell.update_capacities(capacities)
@sync_before
def get_our_capabilities(self, include_children=True):
capabs = copy.deepcopy(self.my_cell_state.capabilities)
if include_children:
for cell in self.child_cells.values():
if timeutils.is_older_than(cell.last_seen,
CONF.cells.mute_child_interval):
continue
for capab_name, values in cell.capabilities.items():
if capab_name not in capabs:
capabs[capab_name] = set([])
capabs[capab_name] |= values
return capabs
def _add_to_dict(self, target, src):
for key, value in src.items():
if isinstance(value, dict):
target.setdefault(key, {})
self._add_to_dict(target[key], value)
continue
target.setdefault(key, 0)
target[key] += value
@sync_before
def get_our_capacities(self, include_children=True):
capacities = copy.deepcopy(self.my_cell_state.capacities)
if include_children:
for cell in self.child_cells.values():
self._add_to_dict(capacities, cell.capacities)
return capacities
@sync_before
def get_capacities(self, cell_name=None):
if not cell_name or cell_name == self.my_cell_state.name:
return self.get_our_capacities()
if cell_name in self.child_cells:
return self.child_cells[cell_name].capacities
raise exception.CellNotFound(cell_name=cell_name)
@sync_before
def cell_get(self, ctxt, cell_name):
for cells_dict in (self.parent_cells, self.child_cells):
if cell_name in cells_dict:
return cells_dict[cell_name]
raise exception.CellNotFound(cell_name=cell_name)
class CellStateManagerDB(CellStateManager):
@utils.synchronized('cell-db-sync')
def _cell_data_sync(self, force=False):
"""Update cell status for all cells from the backing data store
when necessary.
:param force: If True, cell status will be updated regardless
of whether it's time to do so.
"""
if force or self._time_to_sync():
LOG.debug("Updating cell cache from db.")
self.last_cell_db_check = timeutils.utcnow()
ctxt = context.get_admin_context()
db_cells = self.db.cell_get_all(ctxt)
db_cells_dict = {cell['name']: cell for cell in db_cells}
self._refresh_cells_from_dict(db_cells_dict)
self._update_our_capacity(ctxt)
@sync_after
def cell_create(self, ctxt, values):
return self.db.cell_create(ctxt, values)
@sync_after
def cell_update(self, ctxt, cell_name, values):
return self.db.cell_update(ctxt, cell_name, values)
@sync_after
def cell_delete(self, ctxt, cell_name):
return self.db.cell_delete(ctxt, cell_name)
class CellStateManagerFile(CellStateManager):
def __init__(self, cell_state_cls=None):
cells_config = CONF.cells.cells_config
self.cells_config_path = CONF.find_file(cells_config)
if not self.cells_config_path:
raise cfg.ConfigFilesNotFoundError(config_files=[cells_config])
super(CellStateManagerFile, self).__init__(cell_state_cls)
def _cell_data_sync(self, force=False):
"""Update cell status for all cells from the backing data store
when necessary.
:param force: If True, cell status will be updated regardless
of whether it's time to do so.
"""
reloaded, data = fileutils.read_cached_file(self.cells_config_path,
force_reload=force)
if reloaded:
LOG.debug("Updating cell cache from config file.")
self.cells_config_data = jsonutils.loads(data)
self._refresh_cells_from_dict(self.cells_config_data)
if force or self._time_to_sync():
self.last_cell_db_check = timeutils.utcnow()
self._update_our_capacity()
def cell_create(self, ctxt, values):
raise exception.CellsUpdateUnsupported()
def cell_update(self, ctxt, cell_name, values):
raise exception.CellsUpdateUnsupported()
def cell_delete(self, ctxt, cell_name):
raise exception.CellsUpdateUnsupported()
|
{
"content_hash": "5365427c6fa9aa44a089b946da9bb61b",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 78,
"avg_line_length": 36.917355371900825,
"alnum_prop": 0.5826617416610701,
"repo_name": "petrutlucian94/nova",
"id": "a53ae50cafea1e35e46daee64b696ced49f13fdc",
"size": "18505",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "nova/cells/state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16100209"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "337153"
}
],
"symlink_target": ""
}
|
import picocm
import unittest
from picocm_unittest import *
class Jinja2Tests(unittest.TestCase):
def test_jinja2(self):
template='Hello {{a}}{{b}}!'
expected='Hello PicoCM!'
with TempFile() as templatefile:
with TempFile() as dst:
picocm.file_write(templatefile.name, template)
self.assertTrue(picocm.jinja2(dst.name, templatefile.name, args={'a':'Pico', 'b':'CM'}))
self.assertFalse(picocm.jinja2(dst.name, templatefile.name, args={'a':'Pico', 'b':'CM'}))
self.assertEqual(expected, picocm.file_read(dst.name))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "e516f02b991ccbf322104d00f606464c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 93,
"avg_line_length": 32.72222222222222,
"alnum_prop": 0.6859083191850595,
"repo_name": "mediafly/picocm",
"id": "87a0521ea0c0103c0f039545b600577346aaf255",
"size": "613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/picocm_tests_jinja2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2270"
},
{
"name": "Python",
"bytes": "19710"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.