Search is not available for this dataset
text stringlengths 75 104k |
|---|
def split_model_kwargs(kw):
"""
django_any birds language parser
"""
from collections import defaultdict
model_fields = {}
fields_agrs = defaultdict(lambda : {})
for key in kw.keys():
if '__' in key:
field, _, subfield = key.partition('__')
fields_agrs[field][subfield] = kw[key]
else:
model_fields[key] = kw[key]
return model_fields, fields_agrs |
def register(self, field_type, impl=None):
"""
Register form field data function.
Could be used as decorator
"""
def _wrapper(func):
self.registry[field_type] = func
return func
if impl:
return _wrapper(impl)
return _wrapper |
def _create_value(self, *args, **kwargs):
"""
Lowest value generator.
Separated from __call__, because it seems that python
cache __call__ reference on module import
"""
if not len(args):
raise TypeError('Object instance is not provided')
if self.by_instance:
field_type = args[0]
else:
field_type = args[0].__class__
function = self.registry.get(field_type, self.default)
if function is None:
raise TypeError("no match %s" % field_type)
return function(*args, **kwargs) |
def any_form_default(form_cls, **kwargs):
"""
Returns tuple with form data and files
"""
form_data = {}
form_files = {}
form_fields, fields_args = split_model_kwargs(kwargs)
for name, field in form_cls.base_fields.iteritems():
if name in form_fields:
form_data[name] = kwargs[name]
else:
form_data[name] = any_form_field(field, **fields_args[name])
return form_data, form_files |
def field_required_attribute(function):
"""
Sometimes return None if field is not required
>>> result = any_form_field(forms.BooleanField(required=False))
>>> result in ['', 'True', 'False']
True
"""
def _wrapper(field, **kwargs):
if not field.required and random.random < 0.1:
return None
return function(field, **kwargs)
return _wrapper |
def field_choices_attibute(function):
"""
Selection from field.choices
"""
def _wrapper(field, **kwargs):
if hasattr(field.widget, 'choices'):
return random.choice(list(valid_choices(field.widget.choices)))
return function(field, **kwargs)
return _wrapper |
def char_field_data(field, **kwargs):
"""
Return random value for CharField
>>> result = any_form_field(forms.CharField(min_length=3, max_length=10))
>>> type(result)
<type 'str'>
"""
min_length = kwargs.get('min_length', 1)
max_length = kwargs.get('max_length', field.max_length or 255)
return xunit.any_string(min_length=field.min_length or min_length,
max_length=field.max_length or max_length) |
def decimal_field_data(field, **kwargs):
"""
Return random value for DecimalField
>>> result = any_form_field(forms.DecimalField(max_value=100, min_value=11, max_digits=4, decimal_places = 2))
>>> type(result)
<type 'str'>
>>> from decimal import Decimal
>>> Decimal(result) >= 11, Decimal(result) <= Decimal('99.99')
(True, True)
"""
min_value = 0
max_value = 10
from django.core.validators import MinValueValidator, MaxValueValidator
for elem in field.validators:
if isinstance(elem, MinValueValidator):
min_value = elem.limit_value
if isinstance(elem, MaxValueValidator):
max_value = elem.limit_value
if (field.max_digits and field.decimal_places):
from decimal import Decimal
max_value = min(max_value,
Decimal('%s.%s' % ('9'*(field.max_digits-field.decimal_places),
'9'*field.decimal_places)))
min_value = kwargs.get('min_value') or min_value
max_value = kwargs.get('max_value') or max_value
return str(xunit.any_decimal(min_value=min_value,
max_value=max_value,
decimal_places = field.decimal_places or 2)) |
def email_field_data(field, **kwargs):
"""
Return random value for EmailField
>>> result = any_form_field(forms.EmailField(min_length=10, max_length=30))
>>> type(result)
<type 'str'>
>>> len(result) <= 30, len(result) >= 10
(True, True)
"""
max_length = 10
if field.max_length:
max_length = (field.max_length -5) / 2
min_length = 10
if field.min_length:
min_length = (field.min_length-4) / 2
return "%s@%s.%s" % (
xunit.any_string(min_length=min_length, max_length=max_length),
xunit.any_string(min_length=min_length, max_length=max_length),
xunit.any_string(min_length=2, max_length=3)) |
def date_field_data(field, **kwargs):
"""
Return random value for DateField
>>> result = any_form_field(forms.DateField())
>>> type(result)
<type 'str'>
"""
from_date = kwargs.get('from_date', date(1990, 1, 1))
to_date = kwargs.get('to_date', date.today())
date_format = random.choice(field.input_formats or formats.get_format('DATE_INPUT_FORMATS'))
return xunit.any_date(from_date=from_date, to_date=to_date).strftime(date_format) |
def datetime_field_data(field, **kwargs):
"""
Return random value for DateTimeField
>>> result = any_form_field(forms.DateTimeField())
>>> type(result)
<type 'str'>
"""
from_date = kwargs.get('from_date', datetime(1990, 1, 1))
to_date = kwargs.get('to_date', datetime.today())
date_format = random.choice(field.input_formats or formats.get_format('DATETIME_INPUT_FORMATS'))
return xunit.any_datetime(from_date=from_date, to_date=to_date).strftime(date_format) |
def float_field_data(field, **kwargs):
"""
Return random value for FloatField
>>> result = any_form_field(forms.FloatField(max_value=200, min_value=100))
>>> type(result)
<type 'str'>
>>> float(result) >=100, float(result) <=200
(True, True)
"""
min_value = 0
max_value = 100
from django.core.validators import MinValueValidator, MaxValueValidator
for elem in field.validators:
if isinstance(elem, MinValueValidator):
min_value = elem.limit_value
if isinstance(elem, MaxValueValidator):
max_value = elem.limit_value
min_value = kwargs.get('min_value', min_value)
max_value = kwargs.get('max_value', max_value)
precision = kwargs.get('precision', 3)
return str(xunit.any_float(min_value=min_value, max_value=max_value, precision=precision)) |
def integer_field_data(field, **kwargs):
"""
Return random value for IntegerField
>>> result = any_form_field(forms.IntegerField(max_value=200, min_value=100))
>>> type(result)
<type 'str'>
>>> int(result) >=100, int(result) <=200
(True, True)
"""
min_value = 0
max_value = 100
from django.core.validators import MinValueValidator, MaxValueValidator
for elem in field.validators:
if isinstance(elem, MinValueValidator):
min_value = elem.limit_value
if isinstance(elem, MaxValueValidator):
max_value = elem.limit_value
min_value = kwargs.get('min_value', min_value)
max_value = kwargs.get('max_value', max_value)
return str(xunit.any_int(min_value=min_value, max_value=max_value)) |
def ipaddress_field_data(field, **kwargs):
"""
Return random value for IPAddressField
>>> result = any_form_field(forms.IPAddressField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import ipv4_re
>>> import re
>>> re.match(ipv4_re, result) is not None
True
"""
choices = kwargs.get('choices')
if choices:
return random.choice(choices)
else:
nums = [str(xunit.any_int(min_value=0, max_value=255)) for _ in xrange(0, 4)]
return ".".join(nums) |
def slug_field_data(field, **kwargs):
"""
Return random value for SlugField
>>> result = any_form_field(forms.SlugField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import slug_re
>>> import re
>>> re.match(slug_re, result) is not None
True
"""
min_length = kwargs.get('min_length', 1)
max_length = kwargs.get('max_length', field.max_length or 20)
from string import ascii_letters, digits
letters = ascii_letters + digits + '_-'
return xunit.any_string(letters = letters, min_length = min_length, max_length = max_length) |
def time_field_data(field, **kwargs):
"""
Return random value for TimeField
>>> result = any_form_field(forms.TimeField())
>>> type(result)
<type 'str'>
"""
time_format = random.choice(field.input_formats or formats.get_format('TIME_INPUT_FORMATS'))
return time(xunit.any_int(min_value=0, max_value=23),
xunit.any_int(min_value=0, max_value=59),
xunit.any_int(min_value=0, max_value=59)).strftime(time_format) |
def choice_field_data(field, **kwargs):
"""
Return random value for ChoiceField
>>> CHOICES = [('YNG', 'Child'), ('OLD', 'Parent')]
>>> result = any_form_field(forms.ChoiceField(choices=CHOICES))
>>> type(result)
<type 'str'>
>>> result in ['YNG', 'OLD']
True
>>> typed_result = any_form_field(forms.TypedChoiceField(choices=CHOICES))
>>> typed_result in ['YNG', 'OLD']
True
"""
if field.choices:
return str(random.choice(list(valid_choices(field.choices))))
return 'None' |
def multiple_choice_field_data(field, **kwargs):
"""
Return random value for MultipleChoiceField
>>> CHOICES = [('YNG', 'Child'), ('MIDDLE', 'Parent') ,('OLD', 'GrandParent')]
>>> result = any_form_field(forms.MultipleChoiceField(choices=CHOICES))
>>> type(result)
<type 'str'>
"""
if field.choices:
from django_any.functions import valid_choices
l = list(valid_choices(field.choices))
random.shuffle(l)
choices = []
count = xunit.any_int(min_value=1, max_value=len(field.choices))
for i in xrange(0, count):
choices.append(l[i])
return ' '.join(choices)
return 'None' |
def model_choice_field_data(field, **kwargs):
"""
Return one of first ten items for field queryset
"""
data = list(field.queryset[:10])
if data:
return random.choice(data)
else:
raise TypeError('No %s available in queryset' % field.queryset.model) |
def encode_xml(obj, E=None):
""" Encodes an OpenMath object as an XML node.
:param obj: OpenMath object (or related item) to encode as XML.
:type obj: OMAny
:param ns: Namespace prefix to use for
http://www.openmath.org/OpenMath", or None if default namespace.
:type ns: str, None
:return: The XML node representing the OpenMath data structure.
:rtype: etree._Element
"""
if E is None:
E = default_E
elif isinstance(E, str):
E = ElementMaker(namespace=xml.openmath_ns,
nsmap={ E: xml.openmath_ns })
name = ""
attr = {}
children = []
if isinstance(obj, om.CDBaseAttribute) and obj.cdbase is not None:
attr["cdbase"] = obj.cdbase
if isinstance(obj, om.CommonAttributes) and obj.id is not None:
attr["id"] = obj.id
# Wrapper object
if isinstance(obj, om.OMObject):
children.append(encode_xml(obj.omel, E))
attr["version"] = obj.version
# Derived Objects
elif isinstance(obj, om.OMReference):
attr["href"] = obj.href
# Basic Objects
elif isinstance(obj, om.OMInteger):
children.append(str(obj.integer))
elif isinstance(obj, om.OMFloat):
attr["dec"] = obj.double
elif isinstance(obj, om.OMString):
if obj.string is not None:
children.append(str(obj.string))
elif isinstance(obj, om.OMBytes):
children.append(base64.b64encode(obj.bytes).decode('ascii'))
elif isinstance(obj, om.OMSymbol):
attr["name"] = obj.name
attr["cd"] = obj.cd
elif isinstance(obj, om.OMVariable):
attr["name"] = obj.name
# Derived Elements
elif isinstance(obj, om.OMForeign):
attr["encoding"] = obj.encoding
children.append(str(obj.obj))
# Compound Elements
elif isinstance(obj, om.OMApplication):
children = [encode_xml(obj.elem, E)]
children.extend(encode_xml(x, E) for x in obj.arguments)
elif isinstance(obj, om.OMAttribution):
children = [encode_xml(obj.pairs, E), encode_xml(obj.obj, E)]
elif isinstance(obj, om.OMAttributionPairs):
for (k, v) in obj.pairs:
children.append(encode_xml(k, E))
children.append(encode_xml(v, E))
elif isinstance(obj, om.OMBinding):
children = [
encode_xml(obj.binder, E),
encode_xml(obj.vars, E),
encode_xml(obj.obj, E)
]
elif isinstance(obj, om.OMBindVariables):
children = [encode_xml(x, E) for x in obj.vars]
elif isinstance(obj, om.OMAttVar):
children = [encode_xml(obj.pairs, E), encode_xml(obj.obj, E)]
elif isinstance(obj, om.OMError):
children = [encode_xml(obj.name, E)]
children.extend(encode_xml(x, E) for x in obj.params)
else:
raise TypeError("Expected obj to be of type OMAny, found %s." % obj.__class__.__name__)
attr = dict((k,str(v)) for k, v in attr.items() if v is not None)
return E(xml.object_to_tag(obj), *children, **attr) |
def encode_bytes(obj, nsprefix=None):
""" Encodes an OpenMath element into a string.
:param obj: Object to encode as string.
:type obj: OMAny
:rtype: bytes
"""
node = encode_xml(obj, nsprefix)
return etree.tostring(node) |
def decode_bytes(xml, validator=None, snippet=False):
""" Decodes a stream into an OpenMath object.
:param xml: XML to decode.
:type xml: bytes
:param validator: Validator to use.
:param snippet: Is this an OpenMath snippet, or a full object?
:type snippet: Bool
:rtype: OMAny
"""
return decode_stream(io.BytesIO(xml), validator, snippet) |
def decode_stream(stream, validator=None, snippet=False):
""" Decodes a stream into an OpenMath object.
:param stream: Stream to decode.
:type stream: Any
:param validator: Validator to use.
:param snippet: Is this an OpenMath snippet, or a full object?
:type snippet: Bool
:rtype: OMAny
"""
# TODO: Complete the docstring above
tree = etree.parse(stream)
if validator is not None:
validator.assertValid(tree)
root = tree.getroot()
v = root.get("version")
if not snippet and (not v or v != "2.0"):
raise ValueError("Only OpenMath 2.0 is supported")
return decode_xml(root) |
def decode_xml(elem, _in_bind = False):
""" Decodes an XML element into an OpenMath object.
:param elem: Element to decode.
:type elem: etree._Element
:param _in_bind: Internal flag used to indicate if we should decode within
an OMBind.
:type _in_bind: bool
:rtype: OMAny
"""
obj = xml.tag_to_object(elem)
attrs = {}
def a2d(*props):
for p in props:
attrs[p] = elem.get(p)
if issubclass(obj, om.CommonAttributes):
a2d("id")
if issubclass(obj, om.CDBaseAttribute):
a2d("cdbase")
# Root Object
if issubclass(obj, om.OMObject):
a2d("version")
attrs["omel"] = decode_xml(elem[0])
# Reference Objects
elif issubclass(obj, om.OMReference):
a2d("href")
# Basic Objects
elif issubclass(obj, om.OMInteger):
attrs["integer"] = int(elem.text)
elif issubclass(obj, om.OMFloat):
# TODO: Support Hex
attrs["double"] = float(elem.get('dec'))
elif issubclass(obj, om.OMString):
attrs["string"] = elem.text
elif issubclass(obj, om.OMBytes):
try:
attrs["bytes"] = base64.b64decode(elem.text)
except TypeError:
attrs["bytes"] = base64.b64decode(bytes(elem.text, "ascii"))
elif issubclass(obj, om.OMSymbol):
a2d("name", "cd")
elif issubclass(obj, om.OMVariable):
a2d("name")
# Derived Elements
elif issubclass(obj, om.OMForeign):
attrs["obj"] = elem.text
a2d("encoding")
# Compound Elements
elif issubclass(obj, om.OMApplication):
attrs["elem"] = decode_xml(elem[0])
attrs["arguments"] = list(map(decode_xml, elem[1:]))
elif issubclass(obj, om.OMAttribution):
attrs["pairs"] = decode_xml(elem[0])
attrs["obj"] = decode_xml(elem[1])
elif issubclass(obj, om.OMAttributionPairs):
if not _in_bind:
attrs["pairs"] = [(decode_xml(k), decode_xml(v)) for k, v in zip(elem[::2], elem[1::2])]
else:
obj = om.OMAttVar
attrs["pairs"] = decode_xml(elem[0], True)
attrs["obj"] = decode_xml(elem[1], True)
elif issubclass(obj, om.OMBinding):
attrs["binder"] = decode_xml(elem[0])
attrs["vars"] = decode_xml(elem[1])
attrs["obj"] = decode_xml(elem[2])
elif issubclass(obj, om.OMBindVariables):
attrs["vars"] = list(map(lambda x:decode_xml(x, True), elem[:]))
elif issubclass(obj, om.OMError):
attrs["name"] = decode_xml(elem[0])
attrs["params"] = list(map(decode_xml, elem[1:]))
else:
raise TypeError("Expected OMAny, found %s." % obj.__name__)
return obj(**attrs) |
def publish(msg="checkpoint: publish package"):
"""Deploy the app to PYPI.
Args:
msg (str, optional): Description
"""
test = check()
if test.succeeded:
# clean()
# push(msg)
sdist = local("python setup.py sdist")
if sdist.succeeded:
build = local(
'python setup.py build && python setup.py bdist_egg')
if build.succeeded:
upload = local("twine upload dist/*")
if upload.succeeded:
tag() |
def tag(version=__version__):
"""Deploy a version tag."""
build = local("git tag {0}".format(version))
if build.succeeded:
local("git push --tags") |
def any_field_blank(function):
"""
Sometimes return None if field could be blank
"""
def wrapper(field, **kwargs):
if kwargs.get('isnull', False):
return None
if field.blank and random.random < 0.1:
return None
return function(field, **kwargs)
return wrapper |
def any_field_choices(function):
"""
Selection from field.choices
>>> CHOICES = [('YNG', 'Child'), ('OLD', 'Parent')]
>>> result = any_field(models.CharField(max_length=3, choices=CHOICES))
>>> result in ['YNG', 'OLD']
True
"""
def wrapper(field, **kwargs):
if field.choices:
return random.choice(list(valid_choices(field.choices)))
return function(field, **kwargs)
return wrapper |
def any_biginteger_field(field, **kwargs):
"""
Return random value for BigIntegerField
>>> result = any_field(models.BigIntegerField())
>>> type(result)
<type 'long'>
"""
min_value = kwargs.get('min_value', 1)
max_value = kwargs.get('max_value', 10**10)
return long(xunit.any_int(min_value=min_value, max_value=max_value)) |
def any_positiveinteger_field(field, **kwargs):
"""
An positive integer
>>> result = any_field(models.PositiveIntegerField())
>>> type(result)
<type 'int'>
>>> result > 0
True
"""
min_value = kwargs.get('min_value', 1)
max_value = kwargs.get('max_value', 9999)
return xunit.any_int(min_value=min_value, max_value=max_value) |
def any_char_field(field, **kwargs):
"""
Return random value for CharField
>>> result = any_field(models.CharField(max_length=10))
>>> type(result)
<type 'str'>
"""
min_length = kwargs.get('min_length', 1)
max_length = kwargs.get('max_length', field.max_length)
return xunit.any_string(min_length=min_length, max_length=max_length) |
def any_commaseparatedinteger_field(field, **kwargs):
"""
Return random value for CharField
>>> result = any_field(models.CommaSeparatedIntegerField(max_length=10))
>>> type(result)
<type 'str'>
>>> [int(num) for num in result.split(',')] and 'OK'
'OK'
"""
nums_count = field.max_length/2
nums = [str(xunit.any_int(min_value=0, max_value=9)) for _ in xrange(0, nums_count)]
return ",".join(nums) |
def any_date_field(field, **kwargs):
"""
Return random value for DateField,
skips auto_now and auto_now_add fields
>>> result = any_field(models.DateField())
>>> type(result)
<type 'datetime.date'>
"""
if field.auto_now or field.auto_now_add:
return None
from_date = kwargs.get('from_date', date(1990, 1, 1))
to_date = kwargs.get('to_date', date.today())
return xunit.any_date(from_date=from_date, to_date=to_date) |
def any_datetime_field(field, **kwargs):
"""
Return random value for DateTimeField,
skips auto_now and auto_now_add fields
>>> result = any_field(models.DateTimeField())
>>> type(result)
<type 'datetime.datetime'>
"""
from_date = kwargs.get('from_date', datetime(1990, 1, 1))
to_date = kwargs.get('to_date', datetime.today())
return xunit.any_datetime(from_date=from_date, to_date=to_date) |
def any_decimal_field(field, **kwargs):
"""
Return random value for DecimalField
>>> result = any_field(models.DecimalField(max_digits=5, decimal_places=2))
>>> type(result)
<class 'decimal.Decimal'>
"""
min_value = kwargs.get('min_value', 0)
max_value = kwargs.get('max_value',
Decimal('%s.%s' % ('9'*(field.max_digits-field.decimal_places),
'9'*field.decimal_places)))
decimal_places = kwargs.get('decimal_places', field.decimal_places)
return xunit.any_decimal(min_value=min_value, max_value=max_value,
decimal_places = decimal_places) |
def any_email_field(field, **kwargs):
"""
Return random value for EmailField
>>> result = any_field(models.EmailField())
>>> type(result)
<type 'str'>
>>> re.match(r"(?:^|\s)[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}(?:\s|$)", result, re.IGNORECASE) is not None
True
"""
return "%s@%s.%s" % (xunit.any_string(max_length=10),
xunit.any_string(max_length=10),
xunit.any_string(min_length=2, max_length=3)) |
def any_float_field(field, **kwargs):
"""
Return random value for FloatField
>>> result = any_field(models.FloatField())
>>> type(result)
<type 'float'>
"""
min_value = kwargs.get('min_value', 1)
max_value = kwargs.get('max_value', 100)
precision = kwargs.get('precision', 3)
return xunit.any_float(min_value=min_value, max_value=max_value, precision=precision) |
def any_file_field(field, **kwargs):
"""
Lookup for nearest existing file
"""
def get_some_file(path):
subdirs, files = field.storage.listdir(path)
if files:
result_file = random.choice(files)
instance = field.storage.open("%s/%s" % (path, result_file)).file
return FieldFile(instance, field, result_file)
for subdir in subdirs:
result = get_some_file("%s/%s" % (path, subdir))
if result:
return result
result = get_some_file(field.upload_to)
if result is None and not field.null:
raise TypeError("Can't found file in %s for non nullable FileField" % field.upload_to)
return result |
def any_filepath_field(field, **kwargs):
"""
Lookup for nearest existing file
"""
def get_some_file(path):
subdirs, files = [], []
for entry in os.listdir(path):
entry_path = os.path.join(path, entry)
if os.path.isdir(entry_path):
subdirs.append(entry_path)
else:
if not field.match or re.match(field.match,entry):
files.append(entry_path)
if files:
return random.choice(files)
if field.recursive:
for subdir in subdirs:
result = get_some_file(subdir)
if result:
return result
result = get_some_file(field.path)
if result is None and not field.null:
raise TypeError("Can't found file in %s for non nullable FilePathField" % field.path)
return result |
def any_ipaddress_field(field, **kwargs):
"""
Return random value for IPAddressField
>>> result = any_field(models.IPAddressField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import ipv4_re
>>> re.match(ipv4_re, result) is not None
True
"""
nums = [str(xunit.any_int(min_value=0, max_value=255)) for _ in xrange(0, 4)]
return ".".join(nums) |
def any_positivesmallinteger_field(field, **kwargs):
"""
Return random value for PositiveSmallIntegerField
>>> result = any_field(models.PositiveSmallIntegerField())
>>> type(result)
<type 'int'>
>>> result < 256, result > 0
(True, True)
"""
min_value = kwargs.get('min_value', 1)
max_value = kwargs.get('max_value', 255)
return xunit.any_int(min_value=min_value, max_value=max_value) |
def any_slug_field(field, **kwargs):
"""
Return random value for SlugField
>>> result = any_field(models.SlugField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import slug_re
>>> re.match(slug_re, result) is not None
True
"""
letters = ascii_letters + digits + '_-'
return xunit.any_string(letters = letters, max_length = field.max_length) |
def any_smallinteger_field(field, **kwargs):
"""
Return random value for SmallIntegerValue
>>> result = any_field(models.SmallIntegerField())
>>> type(result)
<type 'int'>
>>> result > -256, result < 256
(True, True)
"""
min_value = kwargs.get('min_value', -255)
max_value = kwargs.get('max_value', 255)
return xunit.any_int(min_value=min_value, max_value=max_value) |
def any_integer_field(field, **kwargs):
"""
Return random value for IntegerField
>>> result = any_field(models.IntegerField())
>>> type(result)
<type 'int'>
"""
min_value = kwargs.get('min_value', -10000)
max_value = kwargs.get('max_value', 10000)
return xunit.any_int(min_value=min_value, max_value=max_value) |
def any_url_field(field, **kwargs):
"""
Return random value for URLField
>>> result = any_field(models.URLField())
>>> from django.core.validators import URLValidator
>>> re.match(URLValidator.regex, result) is not None
True
"""
url = kwargs.get('url')
if not url:
verified = [validator for validator in field.validators \
if isinstance(validator, validators.URLValidator) and \
validator.verify_exists == True]
if verified:
url = choice(['http://news.yandex.ru/society.html',
'http://video.google.com/?hl=en&tab=wv',
'http://www.microsoft.com/en/us/default.aspx',
'http://habrahabr.ru/company/opera/',
'http://www.apple.com/support/hardware/',
'http://ya.ru',
'http://google.com',
'http://fr.wikipedia.org/wiki/France'])
else:
url = "http://%s.%s/%s" % (
xunit.any_string(max_length=10),
xunit.any_string(min_length=2, max_length=3),
xunit.any_string(max_length=20))
return url |
def any_time_field(field, **kwargs):
"""
Return random value for TimeField
>>> result = any_field(models.TimeField())
>>> type(result)
<type 'datetime.time'>
"""
return time(
xunit.any_int(min_value=0, max_value=23),
xunit.any_int(min_value=0, max_value=59),
xunit.any_int(min_value=0, max_value=59)) |
def load_python_global(module, name):
"""
Evaluate an OpenMath symbol describing a global Python object
EXAMPLES::
>>> from openmath.convert_pickle import to_python
>>> from openmath.convert_pickle import load_python_global
>>> load_python_global('math', 'sin')
<built-in function sin>
>>> from openmath import openmath as om
>>> o = om.OMSymbol(cdbase="http://python.org/", cd='math', name='sin')
>>> to_python(o)
<built-in function sin>
"""
# The builtin module has been renamed in python3
if module == '__builtin__' and six.PY3:
module = 'builtins'
module = importlib.import_module(module)
return getattr(module, name) |
def cls_build(inst, state):
"""
Apply the setstate protocol to initialize `inst` from `state`.
INPUT:
- ``inst`` -- a raw instance of a class
- ``state`` -- the state to restore; typically a dictionary mapping attribute names to their values
EXAMPLES::
>>> from openmath.convert_pickle import cls_build
>>> class A(object): pass
>>> inst = A.__new__(A)
>>> state = {"foo": 1, "bar": 4}
>>> inst2 = cls_build(inst,state)
>>> inst is inst2
True
>>> inst.foo
1
>>> inst.bar
4
"""
# Copied from Pickler.load_build
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return inst
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
d = inst.__dict__
try:
for k, v in six.iteritems(state):
d[six.moves.intern(k)] = v
# keys in state don't have to be strings
# don't blow up, but don't go out of our way
except TypeError:
d.update(state)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__
# is not accessible. Use the old way of unpickling
# the instance variables. This is a semantic
# difference when unpickling in restricted
# vs. unrestricted modes.
# Note, however, that cPickle has never tried to do the
# .update() business, and always uses
# PyObject_SetItem(inst.__dict__, key, value) in a
# loop over state.items().
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
return inst |
def OMSymbol(self, module, name):
r"""
Helper function to build an OMS object
EXAMPLES::
>>> from openmath.convert_pickle import PickleConverter
>>> converter = PickleConverter()
>>> o = converter.OMSymbol(module="foo.bar", name="baz"); o
OMSymbol(name='baz', cd='foo.bar', id=None, cdbase='http://python.org/')
"""
return om.OMSymbol(cdbase=self._cdbase, cd=module, name=name) |
def OMList(self, l):
"""
Convert a list of OM objects into an OM object
EXAMPLES::
>>> from openmath import openmath as om
>>> from openmath.convert_pickle import PickleConverter
>>> converter = PickleConverter()
>>> o = converter.OMList([om.OMInteger(2), om.OMInteger(2)]); o
OMApplication(elem=OMSymbol(name='list', cd='Python', id=None, cdbase='http://python.org/'),
arguments=[OMInteger(integer=2, id=None),
OMInteger(integer=2, id=None)],
id=None, cdbase=None)
>>> converter.to_python(o)
[2, 2]
"""
# Except for the conversion of operands, this duplicates the default
# implementation of python's list conversion to openmath in py_openmath
return om.OMApplication(elem=om.OMSymbol(cdbase=self._cdbase, cd='Python', name='list', ), arguments=l) |
def OMTuple(self, l):
"""
Convert a tuple of OM objects into an OM object
EXAMPLES::
>>> from openmath import openmath as om
>>> from openmath.convert_pickle import PickleConverter
>>> converter = PickleConverter()
>>> o = converter.OMTuple([om.OMInteger(2), om.OMInteger(3)]); o
OMApplication(elem=OMSymbol(name='tuple', cd='Python', id=None, cdbase='http://python.org/'),
arguments=[OMInteger(integer=2, id=None), OMInteger(integer=3, id=None)], id=None, cdbase=None)
>>> converter.to_python(o)
(2, 3)
"""
return om.OMApplication(elem=self.OMSymbol(module='Python', name='tuple'),
arguments=l) |
def OMDict(self, items):
"""
Convert a dictionary (or list of items thereof) of OM objects into an OM object
EXAMPLES::
>>> from openmath import openmath as om
>>> from openmath.convert_pickle import PickleConverter
>>> converter = PickleConverter()
>>> a = om.OMInteger(1)
>>> b = om.OMInteger(3)
>>> o = converter.OMDict([(a,b), (b,b)]); print(o)
OMApplication(
elem=OMSymbol(name='dict', cd='Python', cdbase='http://python.org/'),
arguments=[
OMApplication(
elem=OMSymbol(name='tuple', cd='Python', cdbase='http://python.org/'),
arguments=[
OMInteger(integer=1),
OMInteger(integer=3)]),
OMApplication(
elem=OMSymbol(name='tuple', cd='Python', cdbase='http://python.org/'),
arguments=[
OMInteger(integer=3),
OMInteger(integer=3)])])
>>> converter.to_python(o)
{1: 3, 3: 3}
"""
return om.OMApplication(elem=self.OMSymbol(module='Python', name='dict'),
arguments=[self.OMTuple(item) for item in items]) |
def decode(data):
"""
Decodes a PackBit encoded data.
"""
data = bytearray(data) # <- python 2/3 compatibility fix
result = bytearray()
pos = 0
while pos < len(data):
header_byte = data[pos]
if header_byte > 127:
header_byte -= 256
pos += 1
if 0 <= header_byte <= 127:
result.extend(data[pos:pos+header_byte+1])
pos += header_byte+1
elif header_byte == -128:
pass
else:
result.extend([data[pos]] * (1 - header_byte))
pos += 1
return bytes(result) |
def encode(data):
"""
Encodes data using PackBits encoding.
"""
if len(data) == 0:
return data
if len(data) == 1:
return b'\x00' + data
data = bytearray(data)
result = bytearray()
buf = bytearray()
pos = 0
repeat_count = 0
MAX_LENGTH = 127
# we can safely start with RAW as empty RAW sequences
# are handled by finish_raw()
state = 'RAW'
def finish_raw():
if len(buf) == 0:
return
result.append(len(buf)-1)
result.extend(buf)
buf[:] = bytearray()
def finish_rle():
result.append(256-(repeat_count - 1))
result.append(data[pos])
while pos < len(data)-1:
current_byte = data[pos]
if data[pos] == data[pos+1]:
if state == 'RAW':
# end of RAW data
finish_raw()
state = 'RLE'
repeat_count = 1
elif state == 'RLE':
if repeat_count == MAX_LENGTH:
# restart the encoding
finish_rle()
repeat_count = 0
# move to next byte
repeat_count += 1
else:
if state == 'RLE':
repeat_count += 1
finish_rle()
state = 'RAW'
repeat_count = 0
elif state == 'RAW':
if len(buf) == MAX_LENGTH:
# restart the encoding
finish_raw()
buf.append(current_byte)
pos += 1
if state == 'RAW':
buf.append(data[pos])
finish_raw()
else:
repeat_count += 1
finish_rle()
return bytes(result) |
def _check_currency_format(self, format=None):
"""
Summary.
Args:
format (TYPE, optional): Description
Returns:
name (TYPE): Description
"""
defaults = self.settings['currency']['format']
if hasattr(format, '__call__'):
format = format()
if is_str(format) and re.match('%v', format):
# Create and return positive, negative and zero formats:
return {
'pos': format,
'neg': format.replace("-", "").replace("%v", "-%v"),
'zero': format
}
elif not format or not format['por'] or not re.match('%v',
format['pos']):
self.settings['currency']['format'] = {
'pos': defaults,
'neg': defaults.replace("%v", "-%v"),
'zero': defaults
}
return self.settings
return format |
def _change_precision(self, val, base=0):
"""
Check and normalise the value of precision (must be positive integer).
Args:
val (INT): must be positive integer
base (INT): Description
Returns:
VAL (INT): Description
"""
if not isinstance(val, int):
raise TypeError('The first argument must be an integer.')
val = round(abs(val))
val = (lambda num: base if is_num(num) else num)(val)
return val |
def parse(self, value, decimal=None):
"""
Summary.
Takes a string/array of strings, removes all formatting/cruft and
returns the raw float value
Decimal must be included in the regular expression to match floats
(defaults to Accounting.settings.number.decimal),
so if the number uses a non-standard decimal
separator, provide it as the second argument.
*
Also matches bracketed negatives (eg. "$ (1.99)" => -1.99)
Doesn't throw any errors (`None`s become 0) but this may change
Args:
value (TYPE): Description
decimal (TYPE): Description
Returns:
name (TYPE): Description
"""
# Fails silently (need decent errors):
value = value or 0
# Recursively unformat arrays:
if check_type(value, 'list'):
return map(lambda val: self.parse(val, decimal))
# Return the value as-is if it's already a number:
if check_type(value, 'int') or check_type(value, 'float'):
return value
# Default decimal point comes from settings, but could be set to eg.","
decimal = decimal or self.settings.number.decimal
# Build regex to strip out everything except digits,
# decimal point and minus sign
regex = re.compile("[^0-9-" + decimal + "]")
unformatted = str(value)
unformatted = re.sub('/\((.*)\)/', "-$1", unformatted)
unformatted = re.sub(regex, '', unformatted)
unformatted = unformatted.replace('.', decimal)
formatted = (lambda val: unformatted if val else 0)(
is_num(unformatted))
return formatted |
def to_fixed(self, value, precision):
"""Implementation that treats floats more like decimals.
Fixes binary rounding issues (eg. (0.615).toFixed(2) === "0.61")
that present problems for accounting and finance-related software.
"""
precision = self._change_precision(
precision, self.settings['number']['precision'])
power = pow(10, precision)
# Multiply up by precision, round accurately, then divide
power = round(self.parse(value) * power) / power
return '{0} {1}.{2}f'.format(value, precision, precision) |
def format(self, number, **kwargs):
"""Format a given number.
Format a number, with comma-separated thousands and
custom precision/decimal places
Localise by overriding the precision and thousand / decimal separators
2nd parameter `precision` can be an object matching `settings.number`
Args:
number (TYPE): Description
precision (TYPE): Description
thousand (TYPE): Description
decimal (TYPE): Description
Returns:
name (TYPE): Description
"""
# Resursively format lists
if check_type(number, 'list'):
return map(lambda val: self.format(val, **kwargs))
# Clean up number
number = self.parse(number)
# Build options object from second param (if object) or all params,
# extending defaults
if check_type(kwargs, 'dict'):
options = (self.settings['number'].update(kwargs))
# Clean up precision
precision = self._change_precision(options['precision'])
negative = (lambda num: "-" if num < 0 else "")(number)
base = str(int(self.to_fixed(abs(number) or 0, precision)), 10)
mod = (lambda num: len(num) % 3 if len(num) > 3 else 0)(base)
# Format the number:
num = negative + (lambda num: base[0:num] if num else '')(mod)
num += re.sub('/(\d{3})(?=\d)/g', '$1' +
options['thousand'], base[mod:])
num += (lambda val: options[
'decimal'] + self.to_fixed(abs(number), precision)
.split('.')[1] if val else '')(precision)
return num |
def as_money(self, number, **options):
"""Format a number into currency.
Usage: accounting.formatMoney(number, symbol, precision, thousandsSep,
decimalSep, format)
defaults: (0, "$", 2, ",", ".", "%s%v")
Localise by overriding the symbol, precision,
thousand / decimal separators and format
Second param can be an object matching `settings.currency`
which is the easiest way.
Args:
number (TYPE): Description
precision (TYPE): Description
thousand (TYPE): Description
decimal (TYPE): Description
Returns:
name (TYPE): Description
"""
# Resursively format arrays
if isinstance(number, list):
return map(lambda val: self.as_money(val, **options))
# Clean up number
decimal = options.get('decimal')
number = self.parse(number, decimal)
# Build options object from second param (if object) or all params,
# extending defaults
if check_type(options, 'dict'):
options = (self.settings['currency'].update(options))
# Check format (returns object with pos, neg and zero)
formats = self._check_currency_format(options['format'])
# Choose which format to use for this value
use_format = (lambda num: formats['pos'] if num > 0 else formats[
'neg'] if num < 0 else formats['zero'])(number)
precision = self._change_precision(number, options['precision'])
thousands = options['thousand']
decimal = options['decimal']
formater = self.format(abs(number), precision, thousands, decimal)
# Return with currency symbol added
amount = use_format.replace(
'%s', options['symbol']).replace('%v', formater)
return amount |
def to_array(data):
"""
Import a blosc array into a numpy array.
Arguments:
data: A blosc packed numpy array
Returns:
A numpy array with data from a blosc compressed array
"""
try:
numpy_data = blosc.unpack_array(data)
except Exception as e:
raise ValueError("Could not load numpy data. {}".format(e))
return numpy_data |
def from_array(array):
"""
Export a numpy array to a blosc array.
Arguments:
array: The numpy array to compress to blosc array
Returns:
Bytes/String. A blosc compressed array
"""
try:
raw_data = blosc.pack_array(array)
except Exception as e:
raise ValueError("Could not compress data from array. {}".format(e))
return raw_data |
def add(self, name, path):
"""Add a workspace entry in user config file."""
if not (os.path.exists(path)):
raise ValueError("Workspace path `%s` doesn't exists." % path)
if (self.exists(name)):
raise ValueError("Workspace `%s` already exists." % name)
self.config["workspaces"][name] = {"path": path, "repositories": {}}
self.config.write() |
def remove(self, name):
"""Remove workspace from config file."""
if not (self.exists(name)):
raise ValueError("Workspace `%s` doesn't exists." % name)
self.config["workspaces"].pop(name, 0)
self.config.write() |
def list(self):
"""List all available workspaces."""
ws_list = {}
for key, value in self.config["workspaces"].items():
ws_list[key] = dict({"name": key}, **value)
return ws_list |
def get(self, name):
"""
Get workspace infos from name.
Return None if workspace doesn't exists.
"""
ws_list = self.list()
return ws_list[name] if name in ws_list else None |
def repository_exists(self, workspace, repo):
"""Return True if workspace contains repository name."""
if not self.exists(workspace):
return False
workspaces = self.list()
return repo in workspaces[workspace]["repositories"] |
def sync(self, ws_name):
"""Synchronise workspace's repositories."""
path = self.config["workspaces"][ws_name]["path"]
repositories = self.config["workspaces"][ws_name]["repositories"]
logger = logging.getLogger(__name__)
color = Color()
for r in os.listdir(path):
try:
repo = Repository(os.path.join(path, r))
except RepositoryError:
continue
else:
repositories[r] = repo.path
for repo_name, path in repositories.items():
logger.info(color.colored(
" - %s" % repo_name, "blue"))
self.config["workspaces"][ws_name]["repositories"]
self.config.write() |
def clone(url, path):
"""Clone a repository."""
adapter = None
if url[:4] == "git@" or url[-4:] == ".git":
adapter = Git(path)
if url[:6] == "svn://":
adapter = Svn(path)
if url[:6] == "bzr://":
adapter = Bzr(path)
if url[:9] == "ssh://hg@":
adapter = Hg(path)
if adapter is None:
raise RepositoryAdapterNotFound(
"Can't find adapter for `%s` repository url" % url)
return adapter.clone(url) |
def check_version():
"""
Tells you if you have an old version of ndio.
"""
import requests
r = requests.get('https://pypi.python.org/pypi/ndio/json').json()
r = r['info']['version']
if r != version:
print("A newer version of ndio is available. " +
"'pip install -U ndio' to update.")
return r |
def to_voxels(array):
"""
Converts an array to its voxel list.
Arguments:
array (numpy.ndarray): A numpy nd array. This must be boolean!
Returns:
A list of n-tuples
"""
if type(array) is not numpy.ndarray:
raise ValueError("array argument must be of type numpy.ndarray")
return numpy.argwhere(array) |
def from_voxels(voxels):
"""
Converts a voxel list to an ndarray.
Arguments:
voxels (tuple[]): A list of coordinates indicating coordinates of
populated voxels in an ndarray.
Returns:
numpy.ndarray The result of the transformation.
"""
dimensions = len(voxels[0])
for d in range(len(dimensions)):
size.append(max([i[d] for i in voxels]))
result = numpy.zeros(dimensions)
for v in voxels:
result[v] = 1
return result |
def execute(self, args):
"""Execute update subcommand."""
if args.name is not None:
self.print_workspace(args.name)
elif args.all is not None:
self.print_all() |
def print_update(self, repo_name, repo_path):
"""Print repository update."""
color = Color()
self.logger.info(color.colored(
"=> [%s] %s" % (repo_name, repo_path), "green"))
try:
repo = Repository(repo_path)
repo.update()
except RepositoryError as e:
self.logger.error(e)
pass
print("\n") |
def set_file_handler(self, logfile):
"""Set FileHandler"""
handler = logging.FileHandler(logfile)
handler.setLevel(logging.NOTSET)
handler.setFormatter(Formatter(FORMAT))
self.addHandler(handler) |
def set_console_handler(self, debug=False):
"""Set Console handler."""
console = logging.StreamHandler()
console.setFormatter(Formatter(LFORMAT))
if not debug:
console.setLevel(logging.INFO)
self.addHandler(console) |
def execute(self, command, path=None):
"""Execute command with os.popen and return output."""
logger = logging.getLogger(__name__)
self.check_executable()
logger.debug("Executing command `%s` (cwd: %s)" % (command, path))
process = subprocess.Popen(
command,
shell=True,
cwd=path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = process.communicate()
exit_code = process.wait()
if stdout:
logger.info(stdout.decode("utf-8"))
if stderr:
if exit_code != 0:
logger.error(stderr.decode("utf-8"))
else:
logger.info(stderr.decode("utf-8"))
return process |
def load(png_filename):
"""
Import a png file into a numpy array.
Arguments:
png_filename (str): A string filename of a png datafile
Returns:
A numpy array with data from the png file
"""
# Expand filename to be absolute
png_filename = os.path.expanduser(png_filename)
try:
img = Image.open(png_filename)
except Exception as e:
raise ValueError("Could not load file {0} for conversion."
.format(png_filename))
raise
return numpy.array(img) |
def save(filename, numpy_data):
"""
Export a numpy array to a png file.
Arguments:
filename (str): A filename to which to save the png data
numpy_data (numpy.ndarray OR str): The numpy array to save to png.
OR a string: If a string is provded, it should be a binary png str
Returns:
str. The expanded filename that now holds the png data
Raises:
ValueError: If the save fails; for instance if the binary string data
cannot be coerced into a png, or perhaps your numpy.ndarray is
ill-formed?
"""
# Expand filename to be absolute
png_filename = os.path.expanduser(filename)
if type(numpy_data) is str:
fp = open(png_filename, "wb")
fp.write(numpy_data)
fp.close()
return png_filename
try:
if numpy_data.dtype.name != 'uint8':
m = 'I'
img = Image.fromarray(numpy_data, mode=m)
else:
img = Image.fromarray(numpy_data)
img.save(png_filename)
except Exception as e:
raise ValueError("Could not save png file {0}.".format(png_filename))
return png_filename |
def save_collection(png_filename_base, numpy_data, start_layers_at=1):
"""
Export a numpy array to a set of png files, with each Z-index 2D
array as its own 2D file.
Arguments:
png_filename_base: A filename template, such as "my-image-*.png"
which will lead to a collection of files named
"my-image-0.png", "my-image-1.png", etc.
numpy_data: The numpy array data to save to png.
Returns:
Array. A list of expanded filenames that hold png data.
"""
file_ext = png_filename_base.split('.')[-1]
if file_ext in ['png']:
# Filename is "name*.ext", set file_base to "name*".
file_base = '.'.join(png_filename_base.split('.')[:-1])
else:
# Filename is "name*", set file_base to "name*".
# That is, extension wasn't included.
file_base = png_filename_base
file_ext = ".png"
file_base_array = file_base.split('*')
# The array of filenames to return
output_files = []
# Filename 0-padding
i = start_layers_at
for layer in numpy_data:
layer_filename = (str(i).zfill(6)).join(file_base_array) + file_ext
output_files.append(save(layer_filename, layer))
i += 1
return output_files |
def load_collection(png_filename_base):
"""
Import all files matching the filename base given with `png_filename_base`.
Images are ordered by alphabetical order, which means that you *MUST* 0-pad
your numbers if they span a power of ten (e.g. 0999-1000 or 09-10). This is
handled automatically by its complementary function, `png.save_collection`.
Also, look at how nicely these documentation lines are all the same length!
Arguments:
png_filename_base (str): An asterisk-wildcard string that should refer
to all PNGs in the stack. All *s are replaced according to regular
cmd-line expansion rules. See the 'glob' documentation for details
Returns:
A numpy array holding a 3D dataset
"""
# We expect images to be indexed by their alphabetical order.
files = glob.glob(png_filename_base)
files.sort()
numpy_data = []
for f in files:
numpy_data.append(load(f))
return numpy.concatenate(numpy_data) |
def print_workspace(self, name):
"""Print workspace status."""
path_list = find_path(name, self.config)
if len(path_list) == 0:
self.logger.error("No matches for `%s`" % name)
return False
for name, path in path_list.items():
self.print_status(name, path) |
def print_status(self, repo_name, repo_path):
"""Print repository status."""
color = Color()
self.logger.info(color.colored(
"=> [%s] %s" % (repo_name, repo_path), "green"))
try:
repo = Repository(repo_path)
repo.status()
except RepositoryError as e:
self.logger.error(e)
pass
print("\n") |
def get_block_size(self, token, resolution=None):
"""
Gets the block-size for a given token at a given resolution.
Arguments:
token (str): The token to inspect
resolution (int : None): The resolution at which to inspect data.
If none is specified, uses the minimum available.
Returns:
int[3]: The xyz blocksize.
"""
cdims = self.get_metadata(token)['dataset']['cube_dimension']
if resolution is None:
resolution = min(cdims.keys())
return cdims[str(resolution)] |
def get_xy_slice(self, token, channel,
x_start, x_stop,
y_start, y_stop,
z_index,
resolution=0):
"""
Return a binary-encoded, decompressed 2d image. You should
specify a 'token' and 'channel' pair. For image data, users
should use the channel 'image.'
Arguments:
token (str): Token to identify data to download
channel (str): Channel
resolution (int): Resolution level
Q_start (int):` The lower bound of dimension 'Q'
Q_stop (int): The upper bound of dimension 'Q'
z_index (int): The z-slice to image
Returns:
str: binary image data
"""
vol = self.get_cutout(token, channel, x_start, x_stop, y_start,
y_stop, z_index, z_index + 1, resolution)
vol = numpy.squeeze(vol) # 3D volume to 2D slice
return vol |
def get_volume(self, token, channel,
x_start, x_stop,
y_start, y_stop,
z_start, z_stop,
resolution=1,
block_size=DEFAULT_BLOCK_SIZE,
neariso=False):
"""
Get a RAMONVolume volumetric cutout from the neurodata server.
Arguments:
token (str): Token to identify data to download
channel (str): Channel
resolution (int): Resolution level
Q_start (int): The lower bound of dimension 'Q'
Q_stop (int): The upper bound of dimension 'Q'
block_size (int[3]): Block size of this dataset
neariso (bool : False): Passes the 'neariso' param to the cutout.
If you don't know what this means, ignore it!
Returns:
ndio.ramon.RAMONVolume: Downloaded data.
"""
size = (x_stop - x_start) * (y_stop - y_start) * (z_stop - z_start)
volume = ramon.RAMONVolume()
volume.xyz_offset = [x_start, y_start, z_start]
volume.resolution = resolution
volume.cutout = self.get_cutout(token, channel, x_start,
x_stop, y_start, y_stop,
z_start, z_stop,
resolution=resolution,
block_size=block_size,
neariso=neariso)
return volume |
def get_cutout(self, token, channel,
x_start, x_stop,
y_start, y_stop,
z_start, z_stop,
t_start=0, t_stop=1,
resolution=1,
block_size=DEFAULT_BLOCK_SIZE,
neariso=False):
"""
Get volumetric cutout data from the neurodata server.
Arguments:
token (str): Token to identify data to download
channel (str): Channel
resolution (int): Resolution level
Q_start (int): The lower bound of dimension 'Q'
Q_stop (int): The upper bound of dimension 'Q'
block_size (int[3]): Block size of this dataset. If not provided,
ndio uses the metadata of this tokenchannel to set. If you find
that your downloads are timing out or otherwise failing, it may
be wise to start off by making this smaller.
neariso (bool : False): Passes the 'neariso' param to the cutout.
If you don't know what this means, ignore it!
Returns:
numpy.ndarray: Downloaded data.
"""
if block_size is None:
# look up block size from metadata
block_size = self.get_block_size(token, resolution)
origin = self.get_image_offset(token, resolution)
# If z_stop - z_start is < 16, backend still pulls minimum 16 slices
if (z_stop - z_start) < 16:
z_slices = 16
else:
z_slices = z_stop - z_start
# Calculate size of the data to be downloaded.
size = (x_stop - x_start) * (y_stop - y_start) * z_slices * 4
# Switch which download function to use based on which libraries are
# available in this version of python.
if six.PY2:
dl_func = self._get_cutout_blosc_no_chunking
elif six.PY3:
dl_func = self._get_cutout_no_chunking
else:
raise ValueError("Invalid Python version.")
if size < self._chunk_threshold:
vol = dl_func(token, channel, resolution,
x_start, x_stop,
y_start, y_stop,
z_start, z_stop,
t_start, t_stop,
neariso=neariso)
vol = numpy.rollaxis(vol, 1)
vol = numpy.rollaxis(vol, 2)
return vol
else:
from ndio.utils.parallel import block_compute
blocks = block_compute(x_start, x_stop,
y_start, y_stop,
z_start, z_stop,
origin, block_size)
vol = numpy.zeros(((z_stop - z_start),
(y_stop - y_start),
(x_stop - x_start)))
for b in blocks:
data = dl_func(token, channel, resolution,
b[0][0], b[0][1],
b[1][0], b[1][1],
b[2][0], b[2][1],
0, 1,
neariso=neariso)
if b == blocks[0]: # first block
vol = numpy.zeros(((z_stop - z_start),
(y_stop - y_start),
(x_stop - x_start)), dtype=data.dtype)
vol[b[2][0] - z_start: b[2][1] - z_start,
b[1][0] - y_start: b[1][1] - y_start,
b[0][0] - x_start: b[0][1] - x_start] = data
vol = numpy.rollaxis(vol, 1)
vol = numpy.rollaxis(vol, 2)
return vol |
def post_cutout(self, token, channel,
x_start,
y_start,
z_start,
data,
resolution=0):
"""
Post a cutout to the server.
Arguments:
token (str)
channel (str)
x_start (int)
y_start (int)
z_start (int)
data (numpy.ndarray): A numpy array of data. Pass in (x, y, z)
resolution (int : 0): Resolution at which to insert the data
Returns:
bool: True on success
Raises:
RemoteDataUploadError: if there's an issue during upload.
"""
datatype = self.get_proj_info(token)['channels'][channel]['datatype']
if data.dtype.name != datatype:
data = data.astype(datatype)
data = numpy.rollaxis(data, 1)
data = numpy.rollaxis(data, 2)
if six.PY3 or data.nbytes > 1.5e9:
ul_func = self._post_cutout_no_chunking_npz
else:
ul_func = self._post_cutout_no_chunking_blosc
if data.size < self._chunk_threshold:
return ul_func(token, channel, x_start,
y_start, z_start, data,
resolution)
return self._post_cutout_with_chunking(token, channel,
x_start, y_start, z_start, data,
resolution, ul_func) |
def _post_cutout_no_chunking_blosc(self, token, channel,
x_start, y_start, z_start,
data, resolution):
"""
Accepts data in zyx. !!!
"""
data = numpy.expand_dims(data, axis=0)
blosc_data = blosc.pack_array(data)
url = self.url("{}/{}/blosc/{}/{},{}/{},{}/{},{}/0,0/".format(
token, channel,
resolution,
x_start, x_start + data.shape[3],
y_start, y_start + data.shape[2],
z_start, z_start + data.shape[1]
))
req = self.remote_utils.post_url(url, data=blosc_data, headers={
'Content-Type': 'application/octet-stream'
})
if req.status_code is not 200:
raise RemoteDataUploadError(req.text)
else:
return True |
def load(tiff_filename):
"""
Import a TIFF file into a numpy array.
Arguments:
tiff_filename: A string filename of a TIFF datafile
Returns:
A numpy array with data from the TIFF file
"""
# Expand filename to be absolute
tiff_filename = os.path.expanduser(tiff_filename)
try:
img = tiff.imread(tiff_filename)
except Exception as e:
raise ValueError("Could not load file {0} for conversion."
.format(tiff_filename))
raise
return numpy.array(img) |
def save(tiff_filename, numpy_data):
"""
Export a numpy array to a TIFF file.
Arguments:
tiff_filename: A filename to which to save the TIFF data
numpy_data: The numpy array to save to TIFF
Returns:
String. The expanded filename that now holds the TIFF data
"""
# Expand filename to be absolute
tiff_filename = os.path.expanduser(tiff_filename)
if type(numpy_data) is str:
fp = open(png_filename, "wb")
fp.write(numpy_data)
fp.close()
return png_filename
try:
img = tiff.imsave(tiff_filename, numpy_data)
except Exception as e:
raise ValueError("Could not save TIFF file {0}.".format(tiff_filename))
return tiff_filename |
def load_tiff_multipage(tiff_filename, dtype='float32'):
"""
Load a multipage tiff into a single variable in x,y,z format.
Arguments:
tiff_filename: Filename of source data
dtype: data type to use for the returned tensor
Returns:
Array containing contents from input tiff file in xyz order
"""
if not os.path.isfile(tiff_filename):
raise RuntimeError('could not find file "%s"' % tiff_filename)
# load the data from multi-layer TIF files
data = tiff.imread(tiff_filename)
im = []
while True:
Xi = numpy.array(data, dtype=dtype)
if Xi.ndim == 2:
Xi = Xi[numpy.newaxis, ...] # add slice dimension
im.append(Xi)
try:
data.seek(data.tell()+1)
except EOFError:
break # this just means hit end of file (not really an error)
im = numpy.concatenate(im, axis=0) # list of 2d -> tensor
im = numpy.rollaxis(im, 1)
im = numpy.rollaxis(im, 2)
return im |
def write(self):
"""
Write config in configuration file.
Data must me a dict.
"""
file = open(self.config_file, "w+")
file.write(yaml.dump(dict(self), default_flow_style=False))
file.close() |
def clone(self, url):
"""Clone repository from url."""
return self.execute("%s branch %s %s" % (self.executable,
url, self.path)) |
def get_version():
"""Get version from package resources."""
requirement = pkg_resources.Requirement.parse("yoda")
provider = pkg_resources.get_provider(requirement)
return provider.version |
def mix_and_match(name, greeting='Hello', yell=False):
'''Mixing and matching positional args and keyword options.'''
say = '%s, %s' % (greeting, name)
if yell:
print '%s!' % say.upper()
else:
print '%s.' % say |
def option_decorator(name, greeting, yell):
'''Same as mix_and_match, but using the @option decorator.'''
# Use the @option decorator when you need more control over the
# command line options.
say = '%s, %s' % (greeting, name)
if yell:
print '%s!' % say.upper()
else:
print '%s.' % say |
def load(nifti_filename):
"""
Import a nifti file into a numpy array. TODO: Currently only
transfers raw data for compatibility with annotation and ND formats
Arguments:
nifti_filename (str): A string filename of a nifti datafile
Returns:
A numpy array with data from the nifti file
"""
# Expand filename to be absolute
nifti_filename = os.path.expanduser(nifti_filename)
try:
data = nib.load(nifti_filename)
img = data.get_data()
except Exception as e:
raise ValueError("Could not load file {0} for conversion."
.format(nifti_filename))
raise
return img |
def save(nifti_filename, numpy_data):
"""
Export a numpy array to a nifti file. TODO: currently using dummy
headers and identity matrix affine transform. This can be expanded.
Arguments:
nifti_filename (str): A filename to which to save the nifti data
numpy_data (numpy.ndarray): The numpy array to save to nifti
Returns:
String. The expanded filename that now holds the nifti data
"""
# Expand filename to be absolute
nifti_filename = os.path.expanduser(nifti_filename)
try:
nifti_img = nib.Nifti1Image(numpy_data, numpy.eye(4))
nib.save(nifti_img, nifti_filename)
except Exception as e:
raise ValueError("Could not save file {0}.".format(nifti_filename))
return nifti_filename |
def ping(self, suffix='public_tokens/'):
"""
Return the status-code of the API (estimated using the public-tokens
lookup page).
Arguments:
suffix (str : 'public_tokens/'): The url endpoint to check
Returns:
int: status code
"""
return self.remote_utils.ping(super(neuroRemote, self).url(), suffix) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.