id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
11,900
|
toastdriven/restless
|
restless/tnd.py
|
TornadoResource.as_view
|
def as_view(cls, view_type, *init_args, **init_kwargs):
"""
Return a subclass of tornado.web.RequestHandler and
apply required setting.
"""
global _method
new_cls = type(
cls.__name__ + '_' + _BridgeMixin.__name__ + '_restless',
(_BridgeMixin, cls._request_handler_base_,),
dict(
__resource_cls__=cls,
__resource_args__=init_args,
__resource_kwargs__=init_kwargs,
__resource_view_type__=view_type)
)
"""
Add required http-methods to the newly created class
We need to scan through MRO to find what functions users declared,
and then add corresponding http-methods used by Tornado.
"""
bases = inspect.getmro(cls)
bases = bases[0:bases.index(Resource)-1]
for k, v in cls.http_methods[view_type].items():
if any(v in base_cls.__dict__ for base_cls in bases):
setattr(new_cls, k.lower(), _method)
return new_cls
|
python
|
def as_view(cls, view_type, *init_args, **init_kwargs):
"""
Return a subclass of tornado.web.RequestHandler and
apply required setting.
"""
global _method
new_cls = type(
cls.__name__ + '_' + _BridgeMixin.__name__ + '_restless',
(_BridgeMixin, cls._request_handler_base_,),
dict(
__resource_cls__=cls,
__resource_args__=init_args,
__resource_kwargs__=init_kwargs,
__resource_view_type__=view_type)
)
"""
Add required http-methods to the newly created class
We need to scan through MRO to find what functions users declared,
and then add corresponding http-methods used by Tornado.
"""
bases = inspect.getmro(cls)
bases = bases[0:bases.index(Resource)-1]
for k, v in cls.http_methods[view_type].items():
if any(v in base_cls.__dict__ for base_cls in bases):
setattr(new_cls, k.lower(), _method)
return new_cls
|
[
"def",
"as_view",
"(",
"cls",
",",
"view_type",
",",
"*",
"init_args",
",",
"*",
"*",
"init_kwargs",
")",
":",
"global",
"_method",
"new_cls",
"=",
"type",
"(",
"cls",
".",
"__name__",
"+",
"'_'",
"+",
"_BridgeMixin",
".",
"__name__",
"+",
"'_restless'",
",",
"(",
"_BridgeMixin",
",",
"cls",
".",
"_request_handler_base_",
",",
")",
",",
"dict",
"(",
"__resource_cls__",
"=",
"cls",
",",
"__resource_args__",
"=",
"init_args",
",",
"__resource_kwargs__",
"=",
"init_kwargs",
",",
"__resource_view_type__",
"=",
"view_type",
")",
")",
"\"\"\"\n Add required http-methods to the newly created class\n We need to scan through MRO to find what functions users declared,\n and then add corresponding http-methods used by Tornado.\n \"\"\"",
"bases",
"=",
"inspect",
".",
"getmro",
"(",
"cls",
")",
"bases",
"=",
"bases",
"[",
"0",
":",
"bases",
".",
"index",
"(",
"Resource",
")",
"-",
"1",
"]",
"for",
"k",
",",
"v",
"in",
"cls",
".",
"http_methods",
"[",
"view_type",
"]",
".",
"items",
"(",
")",
":",
"if",
"any",
"(",
"v",
"in",
"base_cls",
".",
"__dict__",
"for",
"base_cls",
"in",
"bases",
")",
":",
"setattr",
"(",
"new_cls",
",",
"k",
".",
"lower",
"(",
")",
",",
"_method",
")",
"return",
"new_cls"
] |
Return a subclass of tornado.web.RequestHandler and
apply required setting.
|
[
"Return",
"a",
"subclass",
"of",
"tornado",
".",
"web",
".",
"RequestHandler",
"and",
"apply",
"required",
"setting",
"."
] |
661593b7b43c42d1bc508dec795356297991255e
|
https://github.com/toastdriven/restless/blob/661593b7b43c42d1bc508dec795356297991255e/restless/tnd.py#L95-L123
|
11,901
|
toastdriven/restless
|
restless/tnd.py
|
TornadoResource.handle
|
def handle(self, endpoint, *args, **kwargs):
"""
almost identical to Resource.handle, except
the way we handle the return value of view_method.
"""
method = self.request_method()
try:
if not method in self.http_methods.get(endpoint, {}):
raise MethodNotImplemented(
"Unsupported method '{}' for {} endpoint.".format(
method,
endpoint
)
)
if not self.is_authenticated():
raise Unauthorized()
self.data = self.deserialize(method, endpoint, self.request_body())
view_method = getattr(self, self.http_methods[endpoint][method])
data = view_method(*args, **kwargs)
if is_future(data):
# need to check if the view_method is a generator or not
data = yield data
serialized = self.serialize(method, endpoint, data)
except Exception as err:
raise gen.Return(self.handle_error(err))
status = self.status_map.get(self.http_methods[endpoint][method], OK)
raise gen.Return(self.build_response(serialized, status=status))
|
python
|
def handle(self, endpoint, *args, **kwargs):
"""
almost identical to Resource.handle, except
the way we handle the return value of view_method.
"""
method = self.request_method()
try:
if not method in self.http_methods.get(endpoint, {}):
raise MethodNotImplemented(
"Unsupported method '{}' for {} endpoint.".format(
method,
endpoint
)
)
if not self.is_authenticated():
raise Unauthorized()
self.data = self.deserialize(method, endpoint, self.request_body())
view_method = getattr(self, self.http_methods[endpoint][method])
data = view_method(*args, **kwargs)
if is_future(data):
# need to check if the view_method is a generator or not
data = yield data
serialized = self.serialize(method, endpoint, data)
except Exception as err:
raise gen.Return(self.handle_error(err))
status = self.status_map.get(self.http_methods[endpoint][method], OK)
raise gen.Return(self.build_response(serialized, status=status))
|
[
"def",
"handle",
"(",
"self",
",",
"endpoint",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"method",
"=",
"self",
".",
"request_method",
"(",
")",
"try",
":",
"if",
"not",
"method",
"in",
"self",
".",
"http_methods",
".",
"get",
"(",
"endpoint",
",",
"{",
"}",
")",
":",
"raise",
"MethodNotImplemented",
"(",
"\"Unsupported method '{}' for {} endpoint.\"",
".",
"format",
"(",
"method",
",",
"endpoint",
")",
")",
"if",
"not",
"self",
".",
"is_authenticated",
"(",
")",
":",
"raise",
"Unauthorized",
"(",
")",
"self",
".",
"data",
"=",
"self",
".",
"deserialize",
"(",
"method",
",",
"endpoint",
",",
"self",
".",
"request_body",
"(",
")",
")",
"view_method",
"=",
"getattr",
"(",
"self",
",",
"self",
".",
"http_methods",
"[",
"endpoint",
"]",
"[",
"method",
"]",
")",
"data",
"=",
"view_method",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"is_future",
"(",
"data",
")",
":",
"# need to check if the view_method is a generator or not",
"data",
"=",
"yield",
"data",
"serialized",
"=",
"self",
".",
"serialize",
"(",
"method",
",",
"endpoint",
",",
"data",
")",
"except",
"Exception",
"as",
"err",
":",
"raise",
"gen",
".",
"Return",
"(",
"self",
".",
"handle_error",
"(",
"err",
")",
")",
"status",
"=",
"self",
".",
"status_map",
".",
"get",
"(",
"self",
".",
"http_methods",
"[",
"endpoint",
"]",
"[",
"method",
"]",
",",
"OK",
")",
"raise",
"gen",
".",
"Return",
"(",
"self",
".",
"build_response",
"(",
"serialized",
",",
"status",
"=",
"status",
")",
")"
] |
almost identical to Resource.handle, except
the way we handle the return value of view_method.
|
[
"almost",
"identical",
"to",
"Resource",
".",
"handle",
"except",
"the",
"way",
"we",
"handle",
"the",
"return",
"value",
"of",
"view_method",
"."
] |
661593b7b43c42d1bc508dec795356297991255e
|
https://github.com/toastdriven/restless/blob/661593b7b43c42d1bc508dec795356297991255e/restless/tnd.py#L147-L177
|
11,902
|
toastdriven/restless
|
restless/preparers.py
|
FieldsPreparer.prepare
|
def prepare(self, data):
"""
Handles transforming the provided data into the fielded data that should
be exposed to the end user.
Uses the ``lookup_data`` method to traverse dotted paths.
Returns a dictionary of data as the response.
"""
result = {}
if not self.fields:
# No fields specified. Serialize everything.
return data
for fieldname, lookup in self.fields.items():
if isinstance(lookup, SubPreparer):
result[fieldname] = lookup.prepare(data)
else:
result[fieldname] = self.lookup_data(lookup, data)
return result
|
python
|
def prepare(self, data):
"""
Handles transforming the provided data into the fielded data that should
be exposed to the end user.
Uses the ``lookup_data`` method to traverse dotted paths.
Returns a dictionary of data as the response.
"""
result = {}
if not self.fields:
# No fields specified. Serialize everything.
return data
for fieldname, lookup in self.fields.items():
if isinstance(lookup, SubPreparer):
result[fieldname] = lookup.prepare(data)
else:
result[fieldname] = self.lookup_data(lookup, data)
return result
|
[
"def",
"prepare",
"(",
"self",
",",
"data",
")",
":",
"result",
"=",
"{",
"}",
"if",
"not",
"self",
".",
"fields",
":",
"# No fields specified. Serialize everything.",
"return",
"data",
"for",
"fieldname",
",",
"lookup",
"in",
"self",
".",
"fields",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"lookup",
",",
"SubPreparer",
")",
":",
"result",
"[",
"fieldname",
"]",
"=",
"lookup",
".",
"prepare",
"(",
"data",
")",
"else",
":",
"result",
"[",
"fieldname",
"]",
"=",
"self",
".",
"lookup_data",
"(",
"lookup",
",",
"data",
")",
"return",
"result"
] |
Handles transforming the provided data into the fielded data that should
be exposed to the end user.
Uses the ``lookup_data`` method to traverse dotted paths.
Returns a dictionary of data as the response.
|
[
"Handles",
"transforming",
"the",
"provided",
"data",
"into",
"the",
"fielded",
"data",
"that",
"should",
"be",
"exposed",
"to",
"the",
"end",
"user",
"."
] |
661593b7b43c42d1bc508dec795356297991255e
|
https://github.com/toastdriven/restless/blob/661593b7b43c42d1bc508dec795356297991255e/restless/preparers.py#L42-L63
|
11,903
|
toastdriven/restless
|
restless/preparers.py
|
FieldsPreparer.lookup_data
|
def lookup_data(self, lookup, data):
"""
Given a lookup string, attempts to descend through nested data looking for
the value.
Can work with either dictionary-alikes or objects (or any combination of
those).
Lookups should be a string. If it is a dotted path, it will be split on
``.`` & it will traverse through to find the final value. If not, it will
simply attempt to find either a key or attribute of that name & return it.
Example::
>>> data = {
... 'type': 'message',
... 'greeting': {
... 'en': 'hello',
... 'fr': 'bonjour',
... 'es': 'hola',
... },
... 'person': Person(
... name='daniel'
... )
... }
>>> lookup_data('type', data)
'message'
>>> lookup_data('greeting.en', data)
'hello'
>>> lookup_data('person.name', data)
'daniel'
"""
value = data
parts = lookup.split('.')
if not parts or not parts[0]:
return value
part = parts[0]
remaining_lookup = '.'.join(parts[1:])
if callable(getattr(data, 'keys', None)) and hasattr(data, '__getitem__'):
# Dictionary enough for us.
value = data[part]
elif data is not None:
# Assume it's an object.
value = getattr(data, part)
# Call if it's callable except if it's a Django DB manager instance
# We check if is a manager by checking the db_manager (duck typing)
if callable(value) and not hasattr(value, 'db_manager'):
value = value()
if not remaining_lookup:
return value
# There's more to lookup, so dive in recursively.
return self.lookup_data(remaining_lookup, value)
|
python
|
def lookup_data(self, lookup, data):
"""
Given a lookup string, attempts to descend through nested data looking for
the value.
Can work with either dictionary-alikes or objects (or any combination of
those).
Lookups should be a string. If it is a dotted path, it will be split on
``.`` & it will traverse through to find the final value. If not, it will
simply attempt to find either a key or attribute of that name & return it.
Example::
>>> data = {
... 'type': 'message',
... 'greeting': {
... 'en': 'hello',
... 'fr': 'bonjour',
... 'es': 'hola',
... },
... 'person': Person(
... name='daniel'
... )
... }
>>> lookup_data('type', data)
'message'
>>> lookup_data('greeting.en', data)
'hello'
>>> lookup_data('person.name', data)
'daniel'
"""
value = data
parts = lookup.split('.')
if not parts or not parts[0]:
return value
part = parts[0]
remaining_lookup = '.'.join(parts[1:])
if callable(getattr(data, 'keys', None)) and hasattr(data, '__getitem__'):
# Dictionary enough for us.
value = data[part]
elif data is not None:
# Assume it's an object.
value = getattr(data, part)
# Call if it's callable except if it's a Django DB manager instance
# We check if is a manager by checking the db_manager (duck typing)
if callable(value) and not hasattr(value, 'db_manager'):
value = value()
if not remaining_lookup:
return value
# There's more to lookup, so dive in recursively.
return self.lookup_data(remaining_lookup, value)
|
[
"def",
"lookup_data",
"(",
"self",
",",
"lookup",
",",
"data",
")",
":",
"value",
"=",
"data",
"parts",
"=",
"lookup",
".",
"split",
"(",
"'.'",
")",
"if",
"not",
"parts",
"or",
"not",
"parts",
"[",
"0",
"]",
":",
"return",
"value",
"part",
"=",
"parts",
"[",
"0",
"]",
"remaining_lookup",
"=",
"'.'",
".",
"join",
"(",
"parts",
"[",
"1",
":",
"]",
")",
"if",
"callable",
"(",
"getattr",
"(",
"data",
",",
"'keys'",
",",
"None",
")",
")",
"and",
"hasattr",
"(",
"data",
",",
"'__getitem__'",
")",
":",
"# Dictionary enough for us.",
"value",
"=",
"data",
"[",
"part",
"]",
"elif",
"data",
"is",
"not",
"None",
":",
"# Assume it's an object.",
"value",
"=",
"getattr",
"(",
"data",
",",
"part",
")",
"# Call if it's callable except if it's a Django DB manager instance",
"# We check if is a manager by checking the db_manager (duck typing)",
"if",
"callable",
"(",
"value",
")",
"and",
"not",
"hasattr",
"(",
"value",
",",
"'db_manager'",
")",
":",
"value",
"=",
"value",
"(",
")",
"if",
"not",
"remaining_lookup",
":",
"return",
"value",
"# There's more to lookup, so dive in recursively.",
"return",
"self",
".",
"lookup_data",
"(",
"remaining_lookup",
",",
"value",
")"
] |
Given a lookup string, attempts to descend through nested data looking for
the value.
Can work with either dictionary-alikes or objects (or any combination of
those).
Lookups should be a string. If it is a dotted path, it will be split on
``.`` & it will traverse through to find the final value. If not, it will
simply attempt to find either a key or attribute of that name & return it.
Example::
>>> data = {
... 'type': 'message',
... 'greeting': {
... 'en': 'hello',
... 'fr': 'bonjour',
... 'es': 'hola',
... },
... 'person': Person(
... name='daniel'
... )
... }
>>> lookup_data('type', data)
'message'
>>> lookup_data('greeting.en', data)
'hello'
>>> lookup_data('person.name', data)
'daniel'
|
[
"Given",
"a",
"lookup",
"string",
"attempts",
"to",
"descend",
"through",
"nested",
"data",
"looking",
"for",
"the",
"value",
"."
] |
661593b7b43c42d1bc508dec795356297991255e
|
https://github.com/toastdriven/restless/blob/661593b7b43c42d1bc508dec795356297991255e/restless/preparers.py#L65-L123
|
11,904
|
toastdriven/restless
|
restless/preparers.py
|
CollectionSubPreparer.prepare
|
def prepare(self, data):
"""
Handles passing each item in the collection data to the configured
subpreparer.
Uses a loop and the ``get_inner_data`` method to provide the correct
item of the data.
Returns a list of data as the response.
"""
result = []
for item in self.get_inner_data(data):
result.append(self.preparer.prepare(item))
return result
|
python
|
def prepare(self, data):
"""
Handles passing each item in the collection data to the configured
subpreparer.
Uses a loop and the ``get_inner_data`` method to provide the correct
item of the data.
Returns a list of data as the response.
"""
result = []
for item in self.get_inner_data(data):
result.append(self.preparer.prepare(item))
return result
|
[
"def",
"prepare",
"(",
"self",
",",
"data",
")",
":",
"result",
"=",
"[",
"]",
"for",
"item",
"in",
"self",
".",
"get_inner_data",
"(",
"data",
")",
":",
"result",
".",
"append",
"(",
"self",
".",
"preparer",
".",
"prepare",
"(",
"item",
")",
")",
"return",
"result"
] |
Handles passing each item in the collection data to the configured
subpreparer.
Uses a loop and the ``get_inner_data`` method to provide the correct
item of the data.
Returns a list of data as the response.
|
[
"Handles",
"passing",
"each",
"item",
"in",
"the",
"collection",
"data",
"to",
"the",
"configured",
"subpreparer",
"."
] |
661593b7b43c42d1bc508dec795356297991255e
|
https://github.com/toastdriven/restless/blob/661593b7b43c42d1bc508dec795356297991255e/restless/preparers.py#L201-L216
|
11,905
|
toastdriven/restless
|
restless/dj.py
|
DjangoResource.build_url_name
|
def build_url_name(cls, name, name_prefix=None):
"""
Given a ``name`` & an optional ``name_prefix``, this generates a name
for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param name_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blog_post_list``
:type name_prefix: string
:returns: The final name
:rtype: string
"""
if name_prefix is None:
name_prefix = 'api_{}'.format(
cls.__name__.replace('Resource', '').lower()
)
name_prefix = name_prefix.rstrip('_')
return '_'.join([name_prefix, name])
|
python
|
def build_url_name(cls, name, name_prefix=None):
"""
Given a ``name`` & an optional ``name_prefix``, this generates a name
for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param name_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blog_post_list``
:type name_prefix: string
:returns: The final name
:rtype: string
"""
if name_prefix is None:
name_prefix = 'api_{}'.format(
cls.__name__.replace('Resource', '').lower()
)
name_prefix = name_prefix.rstrip('_')
return '_'.join([name_prefix, name])
|
[
"def",
"build_url_name",
"(",
"cls",
",",
"name",
",",
"name_prefix",
"=",
"None",
")",
":",
"if",
"name_prefix",
"is",
"None",
":",
"name_prefix",
"=",
"'api_{}'",
".",
"format",
"(",
"cls",
".",
"__name__",
".",
"replace",
"(",
"'Resource'",
",",
"''",
")",
".",
"lower",
"(",
")",
")",
"name_prefix",
"=",
"name_prefix",
".",
"rstrip",
"(",
"'_'",
")",
"return",
"'_'",
".",
"join",
"(",
"[",
"name_prefix",
",",
"name",
"]",
")"
] |
Given a ``name`` & an optional ``name_prefix``, this generates a name
for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param name_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blog_post_list``
:type name_prefix: string
:returns: The final name
:rtype: string
|
[
"Given",
"a",
"name",
"&",
"an",
"optional",
"name_prefix",
"this",
"generates",
"a",
"name",
"for",
"a",
"URL",
"."
] |
661593b7b43c42d1bc508dec795356297991255e
|
https://github.com/toastdriven/restless/blob/661593b7b43c42d1bc508dec795356297991255e/restless/dj.py#L90-L113
|
11,906
|
toastdriven/restless
|
restless/fl.py
|
FlaskResource.build_endpoint_name
|
def build_endpoint_name(cls, name, endpoint_prefix=None):
"""
Given a ``name`` & an optional ``endpoint_prefix``, this generates a name
for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param endpoint_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blogpost_list``
:type endpoint_prefix: string
:returns: The final name
:rtype: string
"""
if endpoint_prefix is None:
endpoint_prefix = 'api_{}'.format(
cls.__name__.replace('Resource', '').lower()
)
endpoint_prefix = endpoint_prefix.rstrip('_')
return '_'.join([endpoint_prefix, name])
|
python
|
def build_endpoint_name(cls, name, endpoint_prefix=None):
"""
Given a ``name`` & an optional ``endpoint_prefix``, this generates a name
for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param endpoint_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blogpost_list``
:type endpoint_prefix: string
:returns: The final name
:rtype: string
"""
if endpoint_prefix is None:
endpoint_prefix = 'api_{}'.format(
cls.__name__.replace('Resource', '').lower()
)
endpoint_prefix = endpoint_prefix.rstrip('_')
return '_'.join([endpoint_prefix, name])
|
[
"def",
"build_endpoint_name",
"(",
"cls",
",",
"name",
",",
"endpoint_prefix",
"=",
"None",
")",
":",
"if",
"endpoint_prefix",
"is",
"None",
":",
"endpoint_prefix",
"=",
"'api_{}'",
".",
"format",
"(",
"cls",
".",
"__name__",
".",
"replace",
"(",
"'Resource'",
",",
"''",
")",
".",
"lower",
"(",
")",
")",
"endpoint_prefix",
"=",
"endpoint_prefix",
".",
"rstrip",
"(",
"'_'",
")",
"return",
"'_'",
".",
"join",
"(",
"[",
"endpoint_prefix",
",",
"name",
"]",
")"
] |
Given a ``name`` & an optional ``endpoint_prefix``, this generates a name
for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param endpoint_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blogpost_list``
:type endpoint_prefix: string
:returns: The final name
:rtype: string
|
[
"Given",
"a",
"name",
"&",
"an",
"optional",
"endpoint_prefix",
"this",
"generates",
"a",
"name",
"for",
"a",
"URL",
"."
] |
661593b7b43c42d1bc508dec795356297991255e
|
https://github.com/toastdriven/restless/blob/661593b7b43c42d1bc508dec795356297991255e/restless/fl.py#L59-L82
|
11,907
|
toastdriven/restless
|
restless/pyr.py
|
PyramidResource.build_routename
|
def build_routename(cls, name, routename_prefix=None):
"""
Given a ``name`` & an optional ``routename_prefix``, this generates a
name for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param routename_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blogpost_list``
:type routename_prefix: string
:returns: The final name
:rtype: string
"""
if routename_prefix is None:
routename_prefix = 'api_{}'.format(
cls.__name__.replace('Resource', '').lower()
)
routename_prefix = routename_prefix.rstrip('_')
return '_'.join([routename_prefix, name])
|
python
|
def build_routename(cls, name, routename_prefix=None):
"""
Given a ``name`` & an optional ``routename_prefix``, this generates a
name for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param routename_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blogpost_list``
:type routename_prefix: string
:returns: The final name
:rtype: string
"""
if routename_prefix is None:
routename_prefix = 'api_{}'.format(
cls.__name__.replace('Resource', '').lower()
)
routename_prefix = routename_prefix.rstrip('_')
return '_'.join([routename_prefix, name])
|
[
"def",
"build_routename",
"(",
"cls",
",",
"name",
",",
"routename_prefix",
"=",
"None",
")",
":",
"if",
"routename_prefix",
"is",
"None",
":",
"routename_prefix",
"=",
"'api_{}'",
".",
"format",
"(",
"cls",
".",
"__name__",
".",
"replace",
"(",
"'Resource'",
",",
"''",
")",
".",
"lower",
"(",
")",
")",
"routename_prefix",
"=",
"routename_prefix",
".",
"rstrip",
"(",
"'_'",
")",
"return",
"'_'",
".",
"join",
"(",
"[",
"routename_prefix",
",",
"name",
"]",
")"
] |
Given a ``name`` & an optional ``routename_prefix``, this generates a
name for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param routename_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blogpost_list``
:type routename_prefix: string
:returns: The final name
:rtype: string
|
[
"Given",
"a",
"name",
"&",
"an",
"optional",
"routename_prefix",
"this",
"generates",
"a",
"name",
"for",
"a",
"URL",
"."
] |
661593b7b43c42d1bc508dec795356297991255e
|
https://github.com/toastdriven/restless/blob/661593b7b43c42d1bc508dec795356297991255e/restless/pyr.py#L41-L64
|
11,908
|
toastdriven/restless
|
restless/pyr.py
|
PyramidResource.add_views
|
def add_views(cls, config, rule_prefix, routename_prefix=None):
"""
A convenience method for registering the routes and views in pyramid.
This automatically adds a list and detail endpoint to your routes.
:param config: The pyramid ``Configurator`` object for your app.
:type config: ``pyramid.config.Configurator``
:param rule_prefix: The start of the URL to handle.
:type rule_prefix: string
:param routename_prefix: (Optional) A prefix for the route's name.
The default is ``None``, which will autocreate a prefix based on the
class name. Ex: ``PostResource`` -> ``api_post_list``
:type routename_prefix: string
:returns: ``pyramid.config.Configurator``
"""
methods = ('GET', 'POST', 'PUT', 'DELETE')
config.add_route(
cls.build_routename('list', routename_prefix),
rule_prefix
)
config.add_view(
cls.as_list(),
route_name=cls.build_routename('list', routename_prefix),
request_method=methods
)
config.add_route(
cls.build_routename('detail', routename_prefix),
rule_prefix + '{name}/'
)
config.add_view(
cls.as_detail(),
route_name=cls.build_routename('detail', routename_prefix),
request_method=methods
)
return config
|
python
|
def add_views(cls, config, rule_prefix, routename_prefix=None):
"""
A convenience method for registering the routes and views in pyramid.
This automatically adds a list and detail endpoint to your routes.
:param config: The pyramid ``Configurator`` object for your app.
:type config: ``pyramid.config.Configurator``
:param rule_prefix: The start of the URL to handle.
:type rule_prefix: string
:param routename_prefix: (Optional) A prefix for the route's name.
The default is ``None``, which will autocreate a prefix based on the
class name. Ex: ``PostResource`` -> ``api_post_list``
:type routename_prefix: string
:returns: ``pyramid.config.Configurator``
"""
methods = ('GET', 'POST', 'PUT', 'DELETE')
config.add_route(
cls.build_routename('list', routename_prefix),
rule_prefix
)
config.add_view(
cls.as_list(),
route_name=cls.build_routename('list', routename_prefix),
request_method=methods
)
config.add_route(
cls.build_routename('detail', routename_prefix),
rule_prefix + '{name}/'
)
config.add_view(
cls.as_detail(),
route_name=cls.build_routename('detail', routename_prefix),
request_method=methods
)
return config
|
[
"def",
"add_views",
"(",
"cls",
",",
"config",
",",
"rule_prefix",
",",
"routename_prefix",
"=",
"None",
")",
":",
"methods",
"=",
"(",
"'GET'",
",",
"'POST'",
",",
"'PUT'",
",",
"'DELETE'",
")",
"config",
".",
"add_route",
"(",
"cls",
".",
"build_routename",
"(",
"'list'",
",",
"routename_prefix",
")",
",",
"rule_prefix",
")",
"config",
".",
"add_view",
"(",
"cls",
".",
"as_list",
"(",
")",
",",
"route_name",
"=",
"cls",
".",
"build_routename",
"(",
"'list'",
",",
"routename_prefix",
")",
",",
"request_method",
"=",
"methods",
")",
"config",
".",
"add_route",
"(",
"cls",
".",
"build_routename",
"(",
"'detail'",
",",
"routename_prefix",
")",
",",
"rule_prefix",
"+",
"'{name}/'",
")",
"config",
".",
"add_view",
"(",
"cls",
".",
"as_detail",
"(",
")",
",",
"route_name",
"=",
"cls",
".",
"build_routename",
"(",
"'detail'",
",",
"routename_prefix",
")",
",",
"request_method",
"=",
"methods",
")",
"return",
"config"
] |
A convenience method for registering the routes and views in pyramid.
This automatically adds a list and detail endpoint to your routes.
:param config: The pyramid ``Configurator`` object for your app.
:type config: ``pyramid.config.Configurator``
:param rule_prefix: The start of the URL to handle.
:type rule_prefix: string
:param routename_prefix: (Optional) A prefix for the route's name.
The default is ``None``, which will autocreate a prefix based on the
class name. Ex: ``PostResource`` -> ``api_post_list``
:type routename_prefix: string
:returns: ``pyramid.config.Configurator``
|
[
"A",
"convenience",
"method",
"for",
"registering",
"the",
"routes",
"and",
"views",
"in",
"pyramid",
"."
] |
661593b7b43c42d1bc508dec795356297991255e
|
https://github.com/toastdriven/restless/blob/661593b7b43c42d1bc508dec795356297991255e/restless/pyr.py#L67-L107
|
11,909
|
toastdriven/restless
|
restless/serializers.py
|
JSONSerializer.deserialize
|
def deserialize(self, body):
"""
The low-level deserialization.
Underpins ``deserialize``, ``deserialize_list`` &
``deserialize_detail``.
Has no built-in smarts, simply loads the JSON.
:param body: The body of the current request
:type body: string
:returns: The deserialized data
:rtype: ``list`` or ``dict``
"""
try:
if isinstance(body, bytes):
return json.loads(body.decode('utf-8'))
return json.loads(body)
except ValueError:
raise BadRequest('Request body is not valid JSON')
|
python
|
def deserialize(self, body):
"""
The low-level deserialization.
Underpins ``deserialize``, ``deserialize_list`` &
``deserialize_detail``.
Has no built-in smarts, simply loads the JSON.
:param body: The body of the current request
:type body: string
:returns: The deserialized data
:rtype: ``list`` or ``dict``
"""
try:
if isinstance(body, bytes):
return json.loads(body.decode('utf-8'))
return json.loads(body)
except ValueError:
raise BadRequest('Request body is not valid JSON')
|
[
"def",
"deserialize",
"(",
"self",
",",
"body",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"body",
",",
"bytes",
")",
":",
"return",
"json",
".",
"loads",
"(",
"body",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"return",
"json",
".",
"loads",
"(",
"body",
")",
"except",
"ValueError",
":",
"raise",
"BadRequest",
"(",
"'Request body is not valid JSON'",
")"
] |
The low-level deserialization.
Underpins ``deserialize``, ``deserialize_list`` &
``deserialize_detail``.
Has no built-in smarts, simply loads the JSON.
:param body: The body of the current request
:type body: string
:returns: The deserialized data
:rtype: ``list`` or ``dict``
|
[
"The",
"low",
"-",
"level",
"deserialization",
"."
] |
661593b7b43c42d1bc508dec795356297991255e
|
https://github.com/toastdriven/restless/blob/661593b7b43c42d1bc508dec795356297991255e/restless/serializers.py#L47-L67
|
11,910
|
mila-iqia/fuel
|
fuel/converters/mnist.py
|
convert_mnist
|
def convert_mnist(directory, output_directory, output_filename=None,
dtype=None):
"""Converts the MNIST dataset to HDF5.
Converts the MNIST dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.MNIST`. The converted dataset is
saved as 'mnist.hdf5'.
This method assumes the existence of the following files:
`train-images-idx3-ubyte.gz`, `train-labels-idx1-ubyte.gz`
`t10k-images-idx3-ubyte.gz`, `t10k-labels-idx1-ubyte.gz`
It assumes the existence of the following files:
* `train-images-idx3-ubyte.gz`
* `train-labels-idx1-ubyte.gz`
* `t10k-images-idx3-ubyte.gz`
* `t10k-labels-idx1-ubyte.gz`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to `None`, in which case a name
based on `dtype` will be used.
dtype : str, optional
Either 'float32', 'float64', or 'bool'. Defaults to `None`,
in which case images will be returned in their original
unsigned byte format.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
if not output_filename:
if dtype:
output_filename = 'mnist_{}.hdf5'.format(dtype)
else:
output_filename = 'mnist.hdf5'
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
train_feat_path = os.path.join(directory, TRAIN_IMAGES)
train_features = read_mnist_images(train_feat_path, dtype)
train_lab_path = os.path.join(directory, TRAIN_LABELS)
train_labels = read_mnist_labels(train_lab_path)
test_feat_path = os.path.join(directory, TEST_IMAGES)
test_features = read_mnist_images(test_feat_path, dtype)
test_lab_path = os.path.join(directory, TEST_LABELS)
test_labels = read_mnist_labels(test_lab_path)
data = (('train', 'features', train_features),
('train', 'targets', train_labels),
('test', 'features', test_features),
('test', 'targets', test_labels))
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'channel'
h5file['features'].dims[2].label = 'height'
h5file['features'].dims[3].label = 'width'
h5file['targets'].dims[0].label = 'batch'
h5file['targets'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
|
python
|
def convert_mnist(directory, output_directory, output_filename=None,
dtype=None):
"""Converts the MNIST dataset to HDF5.
Converts the MNIST dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.MNIST`. The converted dataset is
saved as 'mnist.hdf5'.
This method assumes the existence of the following files:
`train-images-idx3-ubyte.gz`, `train-labels-idx1-ubyte.gz`
`t10k-images-idx3-ubyte.gz`, `t10k-labels-idx1-ubyte.gz`
It assumes the existence of the following files:
* `train-images-idx3-ubyte.gz`
* `train-labels-idx1-ubyte.gz`
* `t10k-images-idx3-ubyte.gz`
* `t10k-labels-idx1-ubyte.gz`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to `None`, in which case a name
based on `dtype` will be used.
dtype : str, optional
Either 'float32', 'float64', or 'bool'. Defaults to `None`,
in which case images will be returned in their original
unsigned byte format.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
if not output_filename:
if dtype:
output_filename = 'mnist_{}.hdf5'.format(dtype)
else:
output_filename = 'mnist.hdf5'
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
train_feat_path = os.path.join(directory, TRAIN_IMAGES)
train_features = read_mnist_images(train_feat_path, dtype)
train_lab_path = os.path.join(directory, TRAIN_LABELS)
train_labels = read_mnist_labels(train_lab_path)
test_feat_path = os.path.join(directory, TEST_IMAGES)
test_features = read_mnist_images(test_feat_path, dtype)
test_lab_path = os.path.join(directory, TEST_LABELS)
test_labels = read_mnist_labels(test_lab_path)
data = (('train', 'features', train_features),
('train', 'targets', train_labels),
('test', 'features', test_features),
('test', 'targets', test_labels))
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'channel'
h5file['features'].dims[2].label = 'height'
h5file['features'].dims[3].label = 'width'
h5file['targets'].dims[0].label = 'batch'
h5file['targets'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
|
[
"def",
"convert_mnist",
"(",
"directory",
",",
"output_directory",
",",
"output_filename",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"not",
"output_filename",
":",
"if",
"dtype",
":",
"output_filename",
"=",
"'mnist_{}.hdf5'",
".",
"format",
"(",
"dtype",
")",
"else",
":",
"output_filename",
"=",
"'mnist.hdf5'",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"h5file",
"=",
"h5py",
".",
"File",
"(",
"output_path",
",",
"mode",
"=",
"'w'",
")",
"train_feat_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"TRAIN_IMAGES",
")",
"train_features",
"=",
"read_mnist_images",
"(",
"train_feat_path",
",",
"dtype",
")",
"train_lab_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"TRAIN_LABELS",
")",
"train_labels",
"=",
"read_mnist_labels",
"(",
"train_lab_path",
")",
"test_feat_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"TEST_IMAGES",
")",
"test_features",
"=",
"read_mnist_images",
"(",
"test_feat_path",
",",
"dtype",
")",
"test_lab_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"TEST_LABELS",
")",
"test_labels",
"=",
"read_mnist_labels",
"(",
"test_lab_path",
")",
"data",
"=",
"(",
"(",
"'train'",
",",
"'features'",
",",
"train_features",
")",
",",
"(",
"'train'",
",",
"'targets'",
",",
"train_labels",
")",
",",
"(",
"'test'",
",",
"'features'",
",",
"test_features",
")",
",",
"(",
"'test'",
",",
"'targets'",
",",
"test_labels",
")",
")",
"fill_hdf5_file",
"(",
"h5file",
",",
"data",
")",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"'channel'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"2",
"]",
".",
"label",
"=",
"'height'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"3",
"]",
".",
"label",
"=",
"'width'",
"h5file",
"[",
"'targets'",
"]",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"h5file",
"[",
"'targets'",
"]",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"'index'",
"h5file",
".",
"flush",
"(",
")",
"h5file",
".",
"close",
"(",
")",
"return",
"(",
"output_path",
",",
")"
] |
Converts the MNIST dataset to HDF5.
Converts the MNIST dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.MNIST`. The converted dataset is
saved as 'mnist.hdf5'.
This method assumes the existence of the following files:
`train-images-idx3-ubyte.gz`, `train-labels-idx1-ubyte.gz`
`t10k-images-idx3-ubyte.gz`, `t10k-labels-idx1-ubyte.gz`
It assumes the existence of the following files:
* `train-images-idx3-ubyte.gz`
* `train-labels-idx1-ubyte.gz`
* `t10k-images-idx3-ubyte.gz`
* `t10k-labels-idx1-ubyte.gz`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to `None`, in which case a name
based on `dtype` will be used.
dtype : str, optional
Either 'float32', 'float64', or 'bool'. Defaults to `None`,
in which case images will be returned in their original
unsigned byte format.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
|
[
"Converts",
"the",
"MNIST",
"dataset",
"to",
"HDF5",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/mnist.py#L22-L92
|
11,911
|
mila-iqia/fuel
|
fuel/converters/mnist.py
|
fill_subparser
|
def fill_subparser(subparser):
"""Sets up a subparser to convert the MNIST dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command.
"""
subparser.add_argument(
"--dtype", help="dtype to save to; by default, images will be " +
"returned in their original unsigned byte format",
choices=('float32', 'float64', 'bool'), type=str, default=None)
return convert_mnist
|
python
|
def fill_subparser(subparser):
"""Sets up a subparser to convert the MNIST dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command.
"""
subparser.add_argument(
"--dtype", help="dtype to save to; by default, images will be " +
"returned in their original unsigned byte format",
choices=('float32', 'float64', 'bool'), type=str, default=None)
return convert_mnist
|
[
"def",
"fill_subparser",
"(",
"subparser",
")",
":",
"subparser",
".",
"add_argument",
"(",
"\"--dtype\"",
",",
"help",
"=",
"\"dtype to save to; by default, images will be \"",
"+",
"\"returned in their original unsigned byte format\"",
",",
"choices",
"=",
"(",
"'float32'",
",",
"'float64'",
",",
"'bool'",
")",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
")",
"return",
"convert_mnist"
] |
Sets up a subparser to convert the MNIST dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command.
|
[
"Sets",
"up",
"a",
"subparser",
"to",
"convert",
"the",
"MNIST",
"dataset",
"files",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/mnist.py#L95-L108
|
11,912
|
mila-iqia/fuel
|
fuel/converters/mnist.py
|
read_mnist_images
|
def read_mnist_images(filename, dtype=None):
"""Read MNIST images from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read images.
dtype : 'float32', 'float64', or 'bool'
If unspecified, images will be returned in their original
unsigned byte format.
Returns
-------
images : :class:`~numpy.ndarray`, shape (n_images, 1, n_rows, n_cols)
An image array, with individual examples indexed along the
first axis and the image dimensions along the second and
third axis.
Notes
-----
If the dtype provided was Boolean, the resulting array will
be Boolean with `True` if the corresponding pixel had a value
greater than or equal to 128, `False` otherwise.
If the dtype provided was a float dtype, the values will be mapped to
the unit interval [0, 1], with pixel values that were 255 in the
original unsigned byte representation equal to 1.0.
"""
with gzip.open(filename, 'rb') as f:
magic, number, rows, cols = struct.unpack('>iiii', f.read(16))
if magic != MNIST_IMAGE_MAGIC:
raise ValueError("Wrong magic number reading MNIST image file")
array = numpy.frombuffer(f.read(), dtype='uint8')
array = array.reshape((number, 1, rows, cols))
if dtype:
dtype = numpy.dtype(dtype)
if dtype.kind == 'b':
# If the user wants Booleans, threshold at half the range.
array = array >= 128
elif dtype.kind == 'f':
# Otherwise, just convert.
array = array.astype(dtype)
array /= 255.
else:
raise ValueError("Unknown dtype to convert MNIST to")
return array
|
python
|
def read_mnist_images(filename, dtype=None):
"""Read MNIST images from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read images.
dtype : 'float32', 'float64', or 'bool'
If unspecified, images will be returned in their original
unsigned byte format.
Returns
-------
images : :class:`~numpy.ndarray`, shape (n_images, 1, n_rows, n_cols)
An image array, with individual examples indexed along the
first axis and the image dimensions along the second and
third axis.
Notes
-----
If the dtype provided was Boolean, the resulting array will
be Boolean with `True` if the corresponding pixel had a value
greater than or equal to 128, `False` otherwise.
If the dtype provided was a float dtype, the values will be mapped to
the unit interval [0, 1], with pixel values that were 255 in the
original unsigned byte representation equal to 1.0.
"""
with gzip.open(filename, 'rb') as f:
magic, number, rows, cols = struct.unpack('>iiii', f.read(16))
if magic != MNIST_IMAGE_MAGIC:
raise ValueError("Wrong magic number reading MNIST image file")
array = numpy.frombuffer(f.read(), dtype='uint8')
array = array.reshape((number, 1, rows, cols))
if dtype:
dtype = numpy.dtype(dtype)
if dtype.kind == 'b':
# If the user wants Booleans, threshold at half the range.
array = array >= 128
elif dtype.kind == 'f':
# Otherwise, just convert.
array = array.astype(dtype)
array /= 255.
else:
raise ValueError("Unknown dtype to convert MNIST to")
return array
|
[
"def",
"read_mnist_images",
"(",
"filename",
",",
"dtype",
"=",
"None",
")",
":",
"with",
"gzip",
".",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"magic",
",",
"number",
",",
"rows",
",",
"cols",
"=",
"struct",
".",
"unpack",
"(",
"'>iiii'",
",",
"f",
".",
"read",
"(",
"16",
")",
")",
"if",
"magic",
"!=",
"MNIST_IMAGE_MAGIC",
":",
"raise",
"ValueError",
"(",
"\"Wrong magic number reading MNIST image file\"",
")",
"array",
"=",
"numpy",
".",
"frombuffer",
"(",
"f",
".",
"read",
"(",
")",
",",
"dtype",
"=",
"'uint8'",
")",
"array",
"=",
"array",
".",
"reshape",
"(",
"(",
"number",
",",
"1",
",",
"rows",
",",
"cols",
")",
")",
"if",
"dtype",
":",
"dtype",
"=",
"numpy",
".",
"dtype",
"(",
"dtype",
")",
"if",
"dtype",
".",
"kind",
"==",
"'b'",
":",
"# If the user wants Booleans, threshold at half the range.",
"array",
"=",
"array",
">=",
"128",
"elif",
"dtype",
".",
"kind",
"==",
"'f'",
":",
"# Otherwise, just convert.",
"array",
"=",
"array",
".",
"astype",
"(",
"dtype",
")",
"array",
"/=",
"255.",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown dtype to convert MNIST to\"",
")",
"return",
"array"
] |
Read MNIST images from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read images.
dtype : 'float32', 'float64', or 'bool'
If unspecified, images will be returned in their original
unsigned byte format.
Returns
-------
images : :class:`~numpy.ndarray`, shape (n_images, 1, n_rows, n_cols)
An image array, with individual examples indexed along the
first axis and the image dimensions along the second and
third axis.
Notes
-----
If the dtype provided was Boolean, the resulting array will
be Boolean with `True` if the corresponding pixel had a value
greater than or equal to 128, `False` otherwise.
If the dtype provided was a float dtype, the values will be mapped to
the unit interval [0, 1], with pixel values that were 255 in the
original unsigned byte representation equal to 1.0.
|
[
"Read",
"MNIST",
"images",
"from",
"the",
"original",
"ubyte",
"file",
"format",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/mnist.py#L111-L159
|
11,913
|
mila-iqia/fuel
|
fuel/converters/mnist.py
|
read_mnist_labels
|
def read_mnist_labels(filename):
"""Read MNIST labels from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read labels.
Returns
-------
labels : :class:`~numpy.ndarray`, shape (nlabels, 1)
A one-dimensional unsigned byte array containing the
labels as integers.
"""
with gzip.open(filename, 'rb') as f:
magic, _ = struct.unpack('>ii', f.read(8))
if magic != MNIST_LABEL_MAGIC:
raise ValueError("Wrong magic number reading MNIST label file")
array = numpy.frombuffer(f.read(), dtype='uint8')
array = array.reshape(array.size, 1)
return array
|
python
|
def read_mnist_labels(filename):
"""Read MNIST labels from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read labels.
Returns
-------
labels : :class:`~numpy.ndarray`, shape (nlabels, 1)
A one-dimensional unsigned byte array containing the
labels as integers.
"""
with gzip.open(filename, 'rb') as f:
magic, _ = struct.unpack('>ii', f.read(8))
if magic != MNIST_LABEL_MAGIC:
raise ValueError("Wrong magic number reading MNIST label file")
array = numpy.frombuffer(f.read(), dtype='uint8')
array = array.reshape(array.size, 1)
return array
|
[
"def",
"read_mnist_labels",
"(",
"filename",
")",
":",
"with",
"gzip",
".",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"magic",
",",
"_",
"=",
"struct",
".",
"unpack",
"(",
"'>ii'",
",",
"f",
".",
"read",
"(",
"8",
")",
")",
"if",
"magic",
"!=",
"MNIST_LABEL_MAGIC",
":",
"raise",
"ValueError",
"(",
"\"Wrong magic number reading MNIST label file\"",
")",
"array",
"=",
"numpy",
".",
"frombuffer",
"(",
"f",
".",
"read",
"(",
")",
",",
"dtype",
"=",
"'uint8'",
")",
"array",
"=",
"array",
".",
"reshape",
"(",
"array",
".",
"size",
",",
"1",
")",
"return",
"array"
] |
Read MNIST labels from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read labels.
Returns
-------
labels : :class:`~numpy.ndarray`, shape (nlabels, 1)
A one-dimensional unsigned byte array containing the
labels as integers.
|
[
"Read",
"MNIST",
"labels",
"from",
"the",
"original",
"ubyte",
"file",
"format",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/mnist.py#L162-L183
|
11,914
|
mila-iqia/fuel
|
fuel/converters/ilsvrc2010.py
|
prepare_hdf5_file
|
def prepare_hdf5_file(hdf5_file, n_train, n_valid, n_test):
"""Create datasets within a given HDF5 file.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write.
n_train : int
The number of training set examples.
n_valid : int
The number of validation set examples.
n_test : int
The number of test set examples.
"""
n_total = n_train + n_valid + n_test
splits = create_splits(n_train, n_valid, n_test)
hdf5_file.attrs['split'] = H5PYDataset.create_split_array(splits)
vlen_dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))
hdf5_file.create_dataset('encoded_images', shape=(n_total,),
dtype=vlen_dtype)
hdf5_file.create_dataset('targets', shape=(n_total, 1), dtype=numpy.int16)
hdf5_file.create_dataset('filenames', shape=(n_total, 1), dtype='S32')
|
python
|
def prepare_hdf5_file(hdf5_file, n_train, n_valid, n_test):
"""Create datasets within a given HDF5 file.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write.
n_train : int
The number of training set examples.
n_valid : int
The number of validation set examples.
n_test : int
The number of test set examples.
"""
n_total = n_train + n_valid + n_test
splits = create_splits(n_train, n_valid, n_test)
hdf5_file.attrs['split'] = H5PYDataset.create_split_array(splits)
vlen_dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))
hdf5_file.create_dataset('encoded_images', shape=(n_total,),
dtype=vlen_dtype)
hdf5_file.create_dataset('targets', shape=(n_total, 1), dtype=numpy.int16)
hdf5_file.create_dataset('filenames', shape=(n_total, 1), dtype='S32')
|
[
"def",
"prepare_hdf5_file",
"(",
"hdf5_file",
",",
"n_train",
",",
"n_valid",
",",
"n_test",
")",
":",
"n_total",
"=",
"n_train",
"+",
"n_valid",
"+",
"n_test",
"splits",
"=",
"create_splits",
"(",
"n_train",
",",
"n_valid",
",",
"n_test",
")",
"hdf5_file",
".",
"attrs",
"[",
"'split'",
"]",
"=",
"H5PYDataset",
".",
"create_split_array",
"(",
"splits",
")",
"vlen_dtype",
"=",
"h5py",
".",
"special_dtype",
"(",
"vlen",
"=",
"numpy",
".",
"dtype",
"(",
"'uint8'",
")",
")",
"hdf5_file",
".",
"create_dataset",
"(",
"'encoded_images'",
",",
"shape",
"=",
"(",
"n_total",
",",
")",
",",
"dtype",
"=",
"vlen_dtype",
")",
"hdf5_file",
".",
"create_dataset",
"(",
"'targets'",
",",
"shape",
"=",
"(",
"n_total",
",",
"1",
")",
",",
"dtype",
"=",
"numpy",
".",
"int16",
")",
"hdf5_file",
".",
"create_dataset",
"(",
"'filenames'",
",",
"shape",
"=",
"(",
"n_total",
",",
"1",
")",
",",
"dtype",
"=",
"'S32'",
")"
] |
Create datasets within a given HDF5 file.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write.
n_train : int
The number of training set examples.
n_valid : int
The number of validation set examples.
n_test : int
The number of test set examples.
|
[
"Create",
"datasets",
"within",
"a",
"given",
"HDF5",
"file",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2010.py#L179-L201
|
11,915
|
mila-iqia/fuel
|
fuel/converters/ilsvrc2010.py
|
process_train_set
|
def process_train_set(hdf5_file, train_archive, patch_archive, n_train,
wnid_map, shuffle_seed=None):
"""Process the ILSVRC2010 training set.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`n_train`.
train_archive : str or file-like object
Filename or file handle for the TAR archive of training images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
n_train : int
The number of items in the training set.
wnid_map : dict
A dictionary mapping WordNet IDs to class indices.
shuffle_seed : int or sequence, optional
Seed for a NumPy random number generator that permutes the
training set on disk. If `None`, no permutation is performed
(this is the default).
"""
producer = partial(train_set_producer, train_archive=train_archive,
patch_archive=patch_archive, wnid_map=wnid_map)
consumer = partial(image_consumer, hdf5_file=hdf5_file,
num_expected=n_train, shuffle_seed=shuffle_seed)
producer_consumer(producer, consumer)
|
python
|
def process_train_set(hdf5_file, train_archive, patch_archive, n_train,
wnid_map, shuffle_seed=None):
"""Process the ILSVRC2010 training set.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`n_train`.
train_archive : str or file-like object
Filename or file handle for the TAR archive of training images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
n_train : int
The number of items in the training set.
wnid_map : dict
A dictionary mapping WordNet IDs to class indices.
shuffle_seed : int or sequence, optional
Seed for a NumPy random number generator that permutes the
training set on disk. If `None`, no permutation is performed
(this is the default).
"""
producer = partial(train_set_producer, train_archive=train_archive,
patch_archive=patch_archive, wnid_map=wnid_map)
consumer = partial(image_consumer, hdf5_file=hdf5_file,
num_expected=n_train, shuffle_seed=shuffle_seed)
producer_consumer(producer, consumer)
|
[
"def",
"process_train_set",
"(",
"hdf5_file",
",",
"train_archive",
",",
"patch_archive",
",",
"n_train",
",",
"wnid_map",
",",
"shuffle_seed",
"=",
"None",
")",
":",
"producer",
"=",
"partial",
"(",
"train_set_producer",
",",
"train_archive",
"=",
"train_archive",
",",
"patch_archive",
"=",
"patch_archive",
",",
"wnid_map",
"=",
"wnid_map",
")",
"consumer",
"=",
"partial",
"(",
"image_consumer",
",",
"hdf5_file",
"=",
"hdf5_file",
",",
"num_expected",
"=",
"n_train",
",",
"shuffle_seed",
"=",
"shuffle_seed",
")",
"producer_consumer",
"(",
"producer",
",",
"consumer",
")"
] |
Process the ILSVRC2010 training set.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`n_train`.
train_archive : str or file-like object
Filename or file handle for the TAR archive of training images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
n_train : int
The number of items in the training set.
wnid_map : dict
A dictionary mapping WordNet IDs to class indices.
shuffle_seed : int or sequence, optional
Seed for a NumPy random number generator that permutes the
training set on disk. If `None`, no permutation is performed
(this is the default).
|
[
"Process",
"the",
"ILSVRC2010",
"training",
"set",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2010.py#L204-L232
|
11,916
|
mila-iqia/fuel
|
fuel/converters/ilsvrc2010.py
|
image_consumer
|
def image_consumer(socket, hdf5_file, num_expected, shuffle_seed=None,
offset=0):
"""Fill an HDF5 file with incoming images from a socket.
Parameters
----------
socket : :class:`zmq.Socket`
PULL socket on which to receive images.
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`sum(images_per_class)`.
num_expected : int
The number of items we expect to be sent over the socket.
shuffle_seed : int or sequence, optional
Seed for a NumPy random number generator that permutes the
images on disk.
offset : int, optional
The offset in the HDF5 datasets at which to start writing
received examples. Defaults to 0.
"""
with progress_bar('images', maxval=num_expected) as pb:
if shuffle_seed is None:
index_gen = iter(xrange(num_expected))
else:
rng = numpy.random.RandomState(shuffle_seed)
index_gen = iter(rng.permutation(num_expected))
for i, num in enumerate(index_gen):
image_filename, class_index = socket.recv_pyobj(zmq.SNDMORE)
image_data = numpy.fromstring(socket.recv(), dtype='uint8')
_write_to_hdf5(hdf5_file, num + offset, image_filename,
image_data, class_index)
pb.update(i + 1)
|
python
|
def image_consumer(socket, hdf5_file, num_expected, shuffle_seed=None,
offset=0):
"""Fill an HDF5 file with incoming images from a socket.
Parameters
----------
socket : :class:`zmq.Socket`
PULL socket on which to receive images.
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`sum(images_per_class)`.
num_expected : int
The number of items we expect to be sent over the socket.
shuffle_seed : int or sequence, optional
Seed for a NumPy random number generator that permutes the
images on disk.
offset : int, optional
The offset in the HDF5 datasets at which to start writing
received examples. Defaults to 0.
"""
with progress_bar('images', maxval=num_expected) as pb:
if shuffle_seed is None:
index_gen = iter(xrange(num_expected))
else:
rng = numpy.random.RandomState(shuffle_seed)
index_gen = iter(rng.permutation(num_expected))
for i, num in enumerate(index_gen):
image_filename, class_index = socket.recv_pyobj(zmq.SNDMORE)
image_data = numpy.fromstring(socket.recv(), dtype='uint8')
_write_to_hdf5(hdf5_file, num + offset, image_filename,
image_data, class_index)
pb.update(i + 1)
|
[
"def",
"image_consumer",
"(",
"socket",
",",
"hdf5_file",
",",
"num_expected",
",",
"shuffle_seed",
"=",
"None",
",",
"offset",
"=",
"0",
")",
":",
"with",
"progress_bar",
"(",
"'images'",
",",
"maxval",
"=",
"num_expected",
")",
"as",
"pb",
":",
"if",
"shuffle_seed",
"is",
"None",
":",
"index_gen",
"=",
"iter",
"(",
"xrange",
"(",
"num_expected",
")",
")",
"else",
":",
"rng",
"=",
"numpy",
".",
"random",
".",
"RandomState",
"(",
"shuffle_seed",
")",
"index_gen",
"=",
"iter",
"(",
"rng",
".",
"permutation",
"(",
"num_expected",
")",
")",
"for",
"i",
",",
"num",
"in",
"enumerate",
"(",
"index_gen",
")",
":",
"image_filename",
",",
"class_index",
"=",
"socket",
".",
"recv_pyobj",
"(",
"zmq",
".",
"SNDMORE",
")",
"image_data",
"=",
"numpy",
".",
"fromstring",
"(",
"socket",
".",
"recv",
"(",
")",
",",
"dtype",
"=",
"'uint8'",
")",
"_write_to_hdf5",
"(",
"hdf5_file",
",",
"num",
"+",
"offset",
",",
"image_filename",
",",
"image_data",
",",
"class_index",
")",
"pb",
".",
"update",
"(",
"i",
"+",
"1",
")"
] |
Fill an HDF5 file with incoming images from a socket.
Parameters
----------
socket : :class:`zmq.Socket`
PULL socket on which to receive images.
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`sum(images_per_class)`.
num_expected : int
The number of items we expect to be sent over the socket.
shuffle_seed : int or sequence, optional
Seed for a NumPy random number generator that permutes the
images on disk.
offset : int, optional
The offset in the HDF5 datasets at which to start writing
received examples. Defaults to 0.
|
[
"Fill",
"an",
"HDF5",
"file",
"with",
"incoming",
"images",
"from",
"a",
"socket",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2010.py#L283-L316
|
11,917
|
mila-iqia/fuel
|
fuel/converters/ilsvrc2010.py
|
process_other_set
|
def process_other_set(hdf5_file, which_set, image_archive, patch_archive,
groundtruth, offset):
"""Process the validation or test set.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`sum(images_per_class)`.
which_set : str
Which set of images is being processed. One of 'train', 'valid',
'test'. Used for extracting the appropriate images from the patch
archive.
image_archive : str or file-like object
The filename or file-handle for the TAR archive containing images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
groundtruth : iterable
Iterable container containing scalar 0-based class index for each
image, sorted by filename.
offset : int
The offset in the HDF5 datasets at which to start writing.
"""
producer = partial(other_set_producer, image_archive=image_archive,
patch_archive=patch_archive,
groundtruth=groundtruth, which_set=which_set)
consumer = partial(image_consumer, hdf5_file=hdf5_file,
num_expected=len(groundtruth), offset=offset)
producer_consumer(producer, consumer)
|
python
|
def process_other_set(hdf5_file, which_set, image_archive, patch_archive,
groundtruth, offset):
"""Process the validation or test set.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`sum(images_per_class)`.
which_set : str
Which set of images is being processed. One of 'train', 'valid',
'test'. Used for extracting the appropriate images from the patch
archive.
image_archive : str or file-like object
The filename or file-handle for the TAR archive containing images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
groundtruth : iterable
Iterable container containing scalar 0-based class index for each
image, sorted by filename.
offset : int
The offset in the HDF5 datasets at which to start writing.
"""
producer = partial(other_set_producer, image_archive=image_archive,
patch_archive=patch_archive,
groundtruth=groundtruth, which_set=which_set)
consumer = partial(image_consumer, hdf5_file=hdf5_file,
num_expected=len(groundtruth), offset=offset)
producer_consumer(producer, consumer)
|
[
"def",
"process_other_set",
"(",
"hdf5_file",
",",
"which_set",
",",
"image_archive",
",",
"patch_archive",
",",
"groundtruth",
",",
"offset",
")",
":",
"producer",
"=",
"partial",
"(",
"other_set_producer",
",",
"image_archive",
"=",
"image_archive",
",",
"patch_archive",
"=",
"patch_archive",
",",
"groundtruth",
"=",
"groundtruth",
",",
"which_set",
"=",
"which_set",
")",
"consumer",
"=",
"partial",
"(",
"image_consumer",
",",
"hdf5_file",
"=",
"hdf5_file",
",",
"num_expected",
"=",
"len",
"(",
"groundtruth",
")",
",",
"offset",
"=",
"offset",
")",
"producer_consumer",
"(",
"producer",
",",
"consumer",
")"
] |
Process the validation or test set.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`sum(images_per_class)`.
which_set : str
Which set of images is being processed. One of 'train', 'valid',
'test'. Used for extracting the appropriate images from the patch
archive.
image_archive : str or file-like object
The filename or file-handle for the TAR archive containing images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
groundtruth : iterable
Iterable container containing scalar 0-based class index for each
image, sorted by filename.
offset : int
The offset in the HDF5 datasets at which to start writing.
|
[
"Process",
"the",
"validation",
"or",
"test",
"set",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2010.py#L319-L349
|
11,918
|
mila-iqia/fuel
|
fuel/converters/ilsvrc2010.py
|
load_from_tar_or_patch
|
def load_from_tar_or_patch(tar, image_filename, patch_images):
"""Do everything necessary to process an image inside a TAR.
Parameters
----------
tar : `TarFile` instance
The tar from which to read `image_filename`.
image_filename : str
Fully-qualified path inside of `tar` from which to read an
image file.
patch_images : dict
A dictionary containing filenames (without path) of replacements
to be substituted in place of the version of the same file found
in `tar`.
Returns
-------
image_data : bytes
The JPEG bytes representing either the image from the TAR archive
or its replacement from the patch dictionary.
patched : bool
True if the image was retrieved from the patch dictionary. False
if it was retrieved from the TAR file.
"""
patched = True
image_bytes = patch_images.get(os.path.basename(image_filename), None)
if image_bytes is None:
patched = False
try:
image_bytes = tar.extractfile(image_filename).read()
numpy.array(Image.open(io.BytesIO(image_bytes)))
except (IOError, OSError):
with gzip.GzipFile(fileobj=tar.extractfile(image_filename)) as gz:
image_bytes = gz.read()
numpy.array(Image.open(io.BytesIO(image_bytes)))
return image_bytes, patched
|
python
|
def load_from_tar_or_patch(tar, image_filename, patch_images):
"""Do everything necessary to process an image inside a TAR.
Parameters
----------
tar : `TarFile` instance
The tar from which to read `image_filename`.
image_filename : str
Fully-qualified path inside of `tar` from which to read an
image file.
patch_images : dict
A dictionary containing filenames (without path) of replacements
to be substituted in place of the version of the same file found
in `tar`.
Returns
-------
image_data : bytes
The JPEG bytes representing either the image from the TAR archive
or its replacement from the patch dictionary.
patched : bool
True if the image was retrieved from the patch dictionary. False
if it was retrieved from the TAR file.
"""
patched = True
image_bytes = patch_images.get(os.path.basename(image_filename), None)
if image_bytes is None:
patched = False
try:
image_bytes = tar.extractfile(image_filename).read()
numpy.array(Image.open(io.BytesIO(image_bytes)))
except (IOError, OSError):
with gzip.GzipFile(fileobj=tar.extractfile(image_filename)) as gz:
image_bytes = gz.read()
numpy.array(Image.open(io.BytesIO(image_bytes)))
return image_bytes, patched
|
[
"def",
"load_from_tar_or_patch",
"(",
"tar",
",",
"image_filename",
",",
"patch_images",
")",
":",
"patched",
"=",
"True",
"image_bytes",
"=",
"patch_images",
".",
"get",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"image_filename",
")",
",",
"None",
")",
"if",
"image_bytes",
"is",
"None",
":",
"patched",
"=",
"False",
"try",
":",
"image_bytes",
"=",
"tar",
".",
"extractfile",
"(",
"image_filename",
")",
".",
"read",
"(",
")",
"numpy",
".",
"array",
"(",
"Image",
".",
"open",
"(",
"io",
".",
"BytesIO",
"(",
"image_bytes",
")",
")",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
":",
"with",
"gzip",
".",
"GzipFile",
"(",
"fileobj",
"=",
"tar",
".",
"extractfile",
"(",
"image_filename",
")",
")",
"as",
"gz",
":",
"image_bytes",
"=",
"gz",
".",
"read",
"(",
")",
"numpy",
".",
"array",
"(",
"Image",
".",
"open",
"(",
"io",
".",
"BytesIO",
"(",
"image_bytes",
")",
")",
")",
"return",
"image_bytes",
",",
"patched"
] |
Do everything necessary to process an image inside a TAR.
Parameters
----------
tar : `TarFile` instance
The tar from which to read `image_filename`.
image_filename : str
Fully-qualified path inside of `tar` from which to read an
image file.
patch_images : dict
A dictionary containing filenames (without path) of replacements
to be substituted in place of the version of the same file found
in `tar`.
Returns
-------
image_data : bytes
The JPEG bytes representing either the image from the TAR archive
or its replacement from the patch dictionary.
patched : bool
True if the image was retrieved from the patch dictionary. False
if it was retrieved from the TAR file.
|
[
"Do",
"everything",
"necessary",
"to",
"process",
"an",
"image",
"inside",
"a",
"TAR",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2010.py#L390-L426
|
11,919
|
mila-iqia/fuel
|
fuel/converters/ilsvrc2010.py
|
read_devkit
|
def read_devkit(f):
"""Read relevant information from the development kit archive.
Parameters
----------
f : str or file-like object
The filename or file-handle for the gzipped TAR archive
containing the ILSVRC2010 development kit.
Returns
-------
synsets : ndarray, 1-dimensional, compound dtype
See :func:`read_metadata_mat_file` for details.
cost_matrix : ndarray, 2-dimensional, uint8
See :func:`read_metadata_mat_file` for details.
raw_valid_groundtruth : ndarray, 1-dimensional, int16
The labels for the ILSVRC2010 validation set,
distributed with the development kit code.
"""
with tar_open(f) as tar:
# Metadata table containing class hierarchy, textual descriptions, etc.
meta_mat = tar.extractfile(DEVKIT_META_PATH)
synsets, cost_matrix = read_metadata_mat_file(meta_mat)
# Raw validation data groundtruth, ILSVRC2010 IDs. Confusingly
# distributed inside the development kit archive.
raw_valid_groundtruth = numpy.loadtxt(tar.extractfile(
DEVKIT_VALID_GROUNDTRUTH_PATH), dtype=numpy.int16)
return synsets, cost_matrix, raw_valid_groundtruth
|
python
|
def read_devkit(f):
"""Read relevant information from the development kit archive.
Parameters
----------
f : str or file-like object
The filename or file-handle for the gzipped TAR archive
containing the ILSVRC2010 development kit.
Returns
-------
synsets : ndarray, 1-dimensional, compound dtype
See :func:`read_metadata_mat_file` for details.
cost_matrix : ndarray, 2-dimensional, uint8
See :func:`read_metadata_mat_file` for details.
raw_valid_groundtruth : ndarray, 1-dimensional, int16
The labels for the ILSVRC2010 validation set,
distributed with the development kit code.
"""
with tar_open(f) as tar:
# Metadata table containing class hierarchy, textual descriptions, etc.
meta_mat = tar.extractfile(DEVKIT_META_PATH)
synsets, cost_matrix = read_metadata_mat_file(meta_mat)
# Raw validation data groundtruth, ILSVRC2010 IDs. Confusingly
# distributed inside the development kit archive.
raw_valid_groundtruth = numpy.loadtxt(tar.extractfile(
DEVKIT_VALID_GROUNDTRUTH_PATH), dtype=numpy.int16)
return synsets, cost_matrix, raw_valid_groundtruth
|
[
"def",
"read_devkit",
"(",
"f",
")",
":",
"with",
"tar_open",
"(",
"f",
")",
"as",
"tar",
":",
"# Metadata table containing class hierarchy, textual descriptions, etc.",
"meta_mat",
"=",
"tar",
".",
"extractfile",
"(",
"DEVKIT_META_PATH",
")",
"synsets",
",",
"cost_matrix",
"=",
"read_metadata_mat_file",
"(",
"meta_mat",
")",
"# Raw validation data groundtruth, ILSVRC2010 IDs. Confusingly",
"# distributed inside the development kit archive.",
"raw_valid_groundtruth",
"=",
"numpy",
".",
"loadtxt",
"(",
"tar",
".",
"extractfile",
"(",
"DEVKIT_VALID_GROUNDTRUTH_PATH",
")",
",",
"dtype",
"=",
"numpy",
".",
"int16",
")",
"return",
"synsets",
",",
"cost_matrix",
",",
"raw_valid_groundtruth"
] |
Read relevant information from the development kit archive.
Parameters
----------
f : str or file-like object
The filename or file-handle for the gzipped TAR archive
containing the ILSVRC2010 development kit.
Returns
-------
synsets : ndarray, 1-dimensional, compound dtype
See :func:`read_metadata_mat_file` for details.
cost_matrix : ndarray, 2-dimensional, uint8
See :func:`read_metadata_mat_file` for details.
raw_valid_groundtruth : ndarray, 1-dimensional, int16
The labels for the ILSVRC2010 validation set,
distributed with the development kit code.
|
[
"Read",
"relevant",
"information",
"from",
"the",
"development",
"kit",
"archive",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2010.py#L429-L458
|
11,920
|
mila-iqia/fuel
|
fuel/converters/ilsvrc2010.py
|
extract_patch_images
|
def extract_patch_images(f, which_set):
"""Extracts a dict of the "patch images" for ILSVRC2010.
Parameters
----------
f : str or file-like object
The filename or file-handle to the patch images TAR file.
which_set : str
Which set of images to extract. One of 'train', 'valid', 'test'.
Returns
-------
dict
A dictionary contains a mapping of filenames (without path) to a
bytes object containing the replacement image.
Notes
-----
Certain images in the distributed archives are blank, or display
an "image not available" banner. A separate TAR file of
"patch images" is distributed with the corrected versions of
these. It is this archive that this function is intended to read.
"""
if which_set not in ('train', 'valid', 'test'):
raise ValueError('which_set must be one of train, valid, or test')
which_set = 'val' if which_set == 'valid' else which_set
patch_images = {}
with tar_open(f) as tar:
for info_obj in tar:
if not info_obj.name.endswith('.JPEG'):
continue
# Pretty sure that '/' is used for tarfile regardless of
# os.path.sep, but I officially don't care about Windows.
tokens = info_obj.name.split('/')
file_which_set = tokens[-2]
if file_which_set != which_set:
continue
filename = tokens[-1]
patch_images[filename] = tar.extractfile(info_obj.name).read()
return patch_images
|
python
|
def extract_patch_images(f, which_set):
"""Extracts a dict of the "patch images" for ILSVRC2010.
Parameters
----------
f : str or file-like object
The filename or file-handle to the patch images TAR file.
which_set : str
Which set of images to extract. One of 'train', 'valid', 'test'.
Returns
-------
dict
A dictionary contains a mapping of filenames (without path) to a
bytes object containing the replacement image.
Notes
-----
Certain images in the distributed archives are blank, or display
an "image not available" banner. A separate TAR file of
"patch images" is distributed with the corrected versions of
these. It is this archive that this function is intended to read.
"""
if which_set not in ('train', 'valid', 'test'):
raise ValueError('which_set must be one of train, valid, or test')
which_set = 'val' if which_set == 'valid' else which_set
patch_images = {}
with tar_open(f) as tar:
for info_obj in tar:
if not info_obj.name.endswith('.JPEG'):
continue
# Pretty sure that '/' is used for tarfile regardless of
# os.path.sep, but I officially don't care about Windows.
tokens = info_obj.name.split('/')
file_which_set = tokens[-2]
if file_which_set != which_set:
continue
filename = tokens[-1]
patch_images[filename] = tar.extractfile(info_obj.name).read()
return patch_images
|
[
"def",
"extract_patch_images",
"(",
"f",
",",
"which_set",
")",
":",
"if",
"which_set",
"not",
"in",
"(",
"'train'",
",",
"'valid'",
",",
"'test'",
")",
":",
"raise",
"ValueError",
"(",
"'which_set must be one of train, valid, or test'",
")",
"which_set",
"=",
"'val'",
"if",
"which_set",
"==",
"'valid'",
"else",
"which_set",
"patch_images",
"=",
"{",
"}",
"with",
"tar_open",
"(",
"f",
")",
"as",
"tar",
":",
"for",
"info_obj",
"in",
"tar",
":",
"if",
"not",
"info_obj",
".",
"name",
".",
"endswith",
"(",
"'.JPEG'",
")",
":",
"continue",
"# Pretty sure that '/' is used for tarfile regardless of",
"# os.path.sep, but I officially don't care about Windows.",
"tokens",
"=",
"info_obj",
".",
"name",
".",
"split",
"(",
"'/'",
")",
"file_which_set",
"=",
"tokens",
"[",
"-",
"2",
"]",
"if",
"file_which_set",
"!=",
"which_set",
":",
"continue",
"filename",
"=",
"tokens",
"[",
"-",
"1",
"]",
"patch_images",
"[",
"filename",
"]",
"=",
"tar",
".",
"extractfile",
"(",
"info_obj",
".",
"name",
")",
".",
"read",
"(",
")",
"return",
"patch_images"
] |
Extracts a dict of the "patch images" for ILSVRC2010.
Parameters
----------
f : str or file-like object
The filename or file-handle to the patch images TAR file.
which_set : str
Which set of images to extract. One of 'train', 'valid', 'test'.
Returns
-------
dict
A dictionary contains a mapping of filenames (without path) to a
bytes object containing the replacement image.
Notes
-----
Certain images in the distributed archives are blank, or display
an "image not available" banner. A separate TAR file of
"patch images" is distributed with the corrected versions of
these. It is this archive that this function is intended to read.
|
[
"Extracts",
"a",
"dict",
"of",
"the",
"patch",
"images",
"for",
"ILSVRC2010",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2010.py#L533-L573
|
11,921
|
mila-iqia/fuel
|
fuel/converters/cifar10.py
|
convert_cifar10
|
def convert_cifar10(directory, output_directory,
output_filename='cifar10.hdf5'):
"""Converts the CIFAR-10 dataset to HDF5.
Converts the CIFAR-10 dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CIFAR10`. The converted dataset is saved as
'cifar10.hdf5'.
It assumes the existence of the following file:
* `cifar-10-python.tar.gz`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'cifar10.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
input_file = os.path.join(directory, DISTRIBUTION_FILE)
tar_file = tarfile.open(input_file, 'r:gz')
train_batches = []
for batch in range(1, 6):
file = tar_file.extractfile(
'cifar-10-batches-py/data_batch_%d' % batch)
try:
if six.PY3:
array = cPickle.load(file, encoding='latin1')
else:
array = cPickle.load(file)
train_batches.append(array)
finally:
file.close()
train_features = numpy.concatenate(
[batch['data'].reshape(batch['data'].shape[0], 3, 32, 32)
for batch in train_batches])
train_labels = numpy.concatenate(
[numpy.array(batch['labels'], dtype=numpy.uint8)
for batch in train_batches])
train_labels = numpy.expand_dims(train_labels, 1)
file = tar_file.extractfile('cifar-10-batches-py/test_batch')
try:
if six.PY3:
test = cPickle.load(file, encoding='latin1')
else:
test = cPickle.load(file)
finally:
file.close()
test_features = test['data'].reshape(test['data'].shape[0],
3, 32, 32)
test_labels = numpy.array(test['labels'], dtype=numpy.uint8)
test_labels = numpy.expand_dims(test_labels, 1)
data = (('train', 'features', train_features),
('train', 'targets', train_labels),
('test', 'features', test_features),
('test', 'targets', test_labels))
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'channel'
h5file['features'].dims[2].label = 'height'
h5file['features'].dims[3].label = 'width'
h5file['targets'].dims[0].label = 'batch'
h5file['targets'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
|
python
|
def convert_cifar10(directory, output_directory,
output_filename='cifar10.hdf5'):
"""Converts the CIFAR-10 dataset to HDF5.
Converts the CIFAR-10 dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CIFAR10`. The converted dataset is saved as
'cifar10.hdf5'.
It assumes the existence of the following file:
* `cifar-10-python.tar.gz`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'cifar10.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
input_file = os.path.join(directory, DISTRIBUTION_FILE)
tar_file = tarfile.open(input_file, 'r:gz')
train_batches = []
for batch in range(1, 6):
file = tar_file.extractfile(
'cifar-10-batches-py/data_batch_%d' % batch)
try:
if six.PY3:
array = cPickle.load(file, encoding='latin1')
else:
array = cPickle.load(file)
train_batches.append(array)
finally:
file.close()
train_features = numpy.concatenate(
[batch['data'].reshape(batch['data'].shape[0], 3, 32, 32)
for batch in train_batches])
train_labels = numpy.concatenate(
[numpy.array(batch['labels'], dtype=numpy.uint8)
for batch in train_batches])
train_labels = numpy.expand_dims(train_labels, 1)
file = tar_file.extractfile('cifar-10-batches-py/test_batch')
try:
if six.PY3:
test = cPickle.load(file, encoding='latin1')
else:
test = cPickle.load(file)
finally:
file.close()
test_features = test['data'].reshape(test['data'].shape[0],
3, 32, 32)
test_labels = numpy.array(test['labels'], dtype=numpy.uint8)
test_labels = numpy.expand_dims(test_labels, 1)
data = (('train', 'features', train_features),
('train', 'targets', train_labels),
('test', 'features', test_features),
('test', 'targets', test_labels))
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'channel'
h5file['features'].dims[2].label = 'height'
h5file['features'].dims[3].label = 'width'
h5file['targets'].dims[0].label = 'batch'
h5file['targets'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
|
[
"def",
"convert_cifar10",
"(",
"directory",
",",
"output_directory",
",",
"output_filename",
"=",
"'cifar10.hdf5'",
")",
":",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"h5file",
"=",
"h5py",
".",
"File",
"(",
"output_path",
",",
"mode",
"=",
"'w'",
")",
"input_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"DISTRIBUTION_FILE",
")",
"tar_file",
"=",
"tarfile",
".",
"open",
"(",
"input_file",
",",
"'r:gz'",
")",
"train_batches",
"=",
"[",
"]",
"for",
"batch",
"in",
"range",
"(",
"1",
",",
"6",
")",
":",
"file",
"=",
"tar_file",
".",
"extractfile",
"(",
"'cifar-10-batches-py/data_batch_%d'",
"%",
"batch",
")",
"try",
":",
"if",
"six",
".",
"PY3",
":",
"array",
"=",
"cPickle",
".",
"load",
"(",
"file",
",",
"encoding",
"=",
"'latin1'",
")",
"else",
":",
"array",
"=",
"cPickle",
".",
"load",
"(",
"file",
")",
"train_batches",
".",
"append",
"(",
"array",
")",
"finally",
":",
"file",
".",
"close",
"(",
")",
"train_features",
"=",
"numpy",
".",
"concatenate",
"(",
"[",
"batch",
"[",
"'data'",
"]",
".",
"reshape",
"(",
"batch",
"[",
"'data'",
"]",
".",
"shape",
"[",
"0",
"]",
",",
"3",
",",
"32",
",",
"32",
")",
"for",
"batch",
"in",
"train_batches",
"]",
")",
"train_labels",
"=",
"numpy",
".",
"concatenate",
"(",
"[",
"numpy",
".",
"array",
"(",
"batch",
"[",
"'labels'",
"]",
",",
"dtype",
"=",
"numpy",
".",
"uint8",
")",
"for",
"batch",
"in",
"train_batches",
"]",
")",
"train_labels",
"=",
"numpy",
".",
"expand_dims",
"(",
"train_labels",
",",
"1",
")",
"file",
"=",
"tar_file",
".",
"extractfile",
"(",
"'cifar-10-batches-py/test_batch'",
")",
"try",
":",
"if",
"six",
".",
"PY3",
":",
"test",
"=",
"cPickle",
".",
"load",
"(",
"file",
",",
"encoding",
"=",
"'latin1'",
")",
"else",
":",
"test",
"=",
"cPickle",
".",
"load",
"(",
"file",
")",
"finally",
":",
"file",
".",
"close",
"(",
")",
"test_features",
"=",
"test",
"[",
"'data'",
"]",
".",
"reshape",
"(",
"test",
"[",
"'data'",
"]",
".",
"shape",
"[",
"0",
"]",
",",
"3",
",",
"32",
",",
"32",
")",
"test_labels",
"=",
"numpy",
".",
"array",
"(",
"test",
"[",
"'labels'",
"]",
",",
"dtype",
"=",
"numpy",
".",
"uint8",
")",
"test_labels",
"=",
"numpy",
".",
"expand_dims",
"(",
"test_labels",
",",
"1",
")",
"data",
"=",
"(",
"(",
"'train'",
",",
"'features'",
",",
"train_features",
")",
",",
"(",
"'train'",
",",
"'targets'",
",",
"train_labels",
")",
",",
"(",
"'test'",
",",
"'features'",
",",
"test_features",
")",
",",
"(",
"'test'",
",",
"'targets'",
",",
"test_labels",
")",
")",
"fill_hdf5_file",
"(",
"h5file",
",",
"data",
")",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"'channel'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"2",
"]",
".",
"label",
"=",
"'height'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"3",
"]",
".",
"label",
"=",
"'width'",
"h5file",
"[",
"'targets'",
"]",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"h5file",
"[",
"'targets'",
"]",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"'index'",
"h5file",
".",
"flush",
"(",
")",
"h5file",
".",
"close",
"(",
")",
"return",
"(",
"output_path",
",",
")"
] |
Converts the CIFAR-10 dataset to HDF5.
Converts the CIFAR-10 dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CIFAR10`. The converted dataset is saved as
'cifar10.hdf5'.
It assumes the existence of the following file:
* `cifar-10-python.tar.gz`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'cifar10.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
|
[
"Converts",
"the",
"CIFAR",
"-",
"10",
"dataset",
"to",
"HDF5",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/cifar10.py#L15-L97
|
11,922
|
mila-iqia/fuel
|
fuel/converters/base.py
|
check_exists
|
def check_exists(required_files):
"""Decorator that checks if required files exist before running.
Parameters
----------
required_files : list of str
A list of strings indicating the filenames of regular files
(not directories) that should be found in the input directory
(which is the first argument to the wrapped function).
Returns
-------
wrapper : function
A function that takes a function and returns a wrapped function.
The function returned by `wrapper` will include input file
existence verification.
Notes
-----
Assumes that the directory in which to find the input files is
provided as the first argument, with the argument name `directory`.
"""
def function_wrapper(f):
@wraps(f)
def wrapped(directory, *args, **kwargs):
missing = []
for filename in required_files:
if not os.path.isfile(os.path.join(directory, filename)):
missing.append(filename)
if len(missing) > 0:
raise MissingInputFiles('Required files missing', missing)
return f(directory, *args, **kwargs)
return wrapped
return function_wrapper
|
python
|
def check_exists(required_files):
"""Decorator that checks if required files exist before running.
Parameters
----------
required_files : list of str
A list of strings indicating the filenames of regular files
(not directories) that should be found in the input directory
(which is the first argument to the wrapped function).
Returns
-------
wrapper : function
A function that takes a function and returns a wrapped function.
The function returned by `wrapper` will include input file
existence verification.
Notes
-----
Assumes that the directory in which to find the input files is
provided as the first argument, with the argument name `directory`.
"""
def function_wrapper(f):
@wraps(f)
def wrapped(directory, *args, **kwargs):
missing = []
for filename in required_files:
if not os.path.isfile(os.path.join(directory, filename)):
missing.append(filename)
if len(missing) > 0:
raise MissingInputFiles('Required files missing', missing)
return f(directory, *args, **kwargs)
return wrapped
return function_wrapper
|
[
"def",
"check_exists",
"(",
"required_files",
")",
":",
"def",
"function_wrapper",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapped",
"(",
"directory",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"missing",
"=",
"[",
"]",
"for",
"filename",
"in",
"required_files",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
")",
":",
"missing",
".",
"append",
"(",
"filename",
")",
"if",
"len",
"(",
"missing",
")",
">",
"0",
":",
"raise",
"MissingInputFiles",
"(",
"'Required files missing'",
",",
"missing",
")",
"return",
"f",
"(",
"directory",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped",
"return",
"function_wrapper"
] |
Decorator that checks if required files exist before running.
Parameters
----------
required_files : list of str
A list of strings indicating the filenames of regular files
(not directories) that should be found in the input directory
(which is the first argument to the wrapped function).
Returns
-------
wrapper : function
A function that takes a function and returns a wrapped function.
The function returned by `wrapper` will include input file
existence verification.
Notes
-----
Assumes that the directory in which to find the input files is
provided as the first argument, with the argument name `directory`.
|
[
"Decorator",
"that",
"checks",
"if",
"required",
"files",
"exist",
"before",
"running",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/base.py#L13-L47
|
11,923
|
mila-iqia/fuel
|
fuel/converters/base.py
|
fill_hdf5_file
|
def fill_hdf5_file(h5file, data):
"""Fills an HDF5 file in a H5PYDataset-compatible manner.
Parameters
----------
h5file : :class:`h5py.File`
File handle for an HDF5 file.
data : tuple of tuple
One element per split/source pair. Each element consists of a
tuple of (split_name, source_name, data_array, comment), where
* 'split_name' is a string identifier for the split name
* 'source_name' is a string identifier for the source name
* 'data_array' is a :class:`numpy.ndarray` containing the data
for this split/source pair
* 'comment' is a comment string for the split/source pair
The 'comment' element can optionally be omitted.
"""
# Check that all sources for a split have the same length
split_names = set(split_tuple[0] for split_tuple in data)
for name in split_names:
lengths = [len(split_tuple[2]) for split_tuple in data
if split_tuple[0] == name]
if not all(le == lengths[0] for le in lengths):
raise ValueError("split '{}' has sources that ".format(name) +
"vary in length")
# Initialize split dictionary
split_dict = dict([(split_name, {}) for split_name in split_names])
# Compute total source lengths and check that splits have the same dtype
# across a source
source_names = set(split_tuple[1] for split_tuple in data)
for name in source_names:
splits = [s for s in data if s[1] == name]
indices = numpy.cumsum([0] + [len(s[2]) for s in splits])
if not all(s[2].dtype == splits[0][2].dtype for s in splits):
raise ValueError("source '{}' has splits that ".format(name) +
"vary in dtype")
if not all(s[2].shape[1:] == splits[0][2].shape[1:] for s in splits):
raise ValueError("source '{}' has splits that ".format(name) +
"vary in shapes")
dataset = h5file.create_dataset(
name, (sum(len(s[2]) for s in splits),) + splits[0][2].shape[1:],
dtype=splits[0][2].dtype)
dataset[...] = numpy.concatenate([s[2] for s in splits], axis=0)
for i, j, s in zip(indices[:-1], indices[1:], splits):
if len(s) == 4:
split_dict[s[0]][name] = (i, j, None, s[3])
else:
split_dict[s[0]][name] = (i, j)
h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)
|
python
|
def fill_hdf5_file(h5file, data):
"""Fills an HDF5 file in a H5PYDataset-compatible manner.
Parameters
----------
h5file : :class:`h5py.File`
File handle for an HDF5 file.
data : tuple of tuple
One element per split/source pair. Each element consists of a
tuple of (split_name, source_name, data_array, comment), where
* 'split_name' is a string identifier for the split name
* 'source_name' is a string identifier for the source name
* 'data_array' is a :class:`numpy.ndarray` containing the data
for this split/source pair
* 'comment' is a comment string for the split/source pair
The 'comment' element can optionally be omitted.
"""
# Check that all sources for a split have the same length
split_names = set(split_tuple[0] for split_tuple in data)
for name in split_names:
lengths = [len(split_tuple[2]) for split_tuple in data
if split_tuple[0] == name]
if not all(le == lengths[0] for le in lengths):
raise ValueError("split '{}' has sources that ".format(name) +
"vary in length")
# Initialize split dictionary
split_dict = dict([(split_name, {}) for split_name in split_names])
# Compute total source lengths and check that splits have the same dtype
# across a source
source_names = set(split_tuple[1] for split_tuple in data)
for name in source_names:
splits = [s for s in data if s[1] == name]
indices = numpy.cumsum([0] + [len(s[2]) for s in splits])
if not all(s[2].dtype == splits[0][2].dtype for s in splits):
raise ValueError("source '{}' has splits that ".format(name) +
"vary in dtype")
if not all(s[2].shape[1:] == splits[0][2].shape[1:] for s in splits):
raise ValueError("source '{}' has splits that ".format(name) +
"vary in shapes")
dataset = h5file.create_dataset(
name, (sum(len(s[2]) for s in splits),) + splits[0][2].shape[1:],
dtype=splits[0][2].dtype)
dataset[...] = numpy.concatenate([s[2] for s in splits], axis=0)
for i, j, s in zip(indices[:-1], indices[1:], splits):
if len(s) == 4:
split_dict[s[0]][name] = (i, j, None, s[3])
else:
split_dict[s[0]][name] = (i, j)
h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)
|
[
"def",
"fill_hdf5_file",
"(",
"h5file",
",",
"data",
")",
":",
"# Check that all sources for a split have the same length",
"split_names",
"=",
"set",
"(",
"split_tuple",
"[",
"0",
"]",
"for",
"split_tuple",
"in",
"data",
")",
"for",
"name",
"in",
"split_names",
":",
"lengths",
"=",
"[",
"len",
"(",
"split_tuple",
"[",
"2",
"]",
")",
"for",
"split_tuple",
"in",
"data",
"if",
"split_tuple",
"[",
"0",
"]",
"==",
"name",
"]",
"if",
"not",
"all",
"(",
"le",
"==",
"lengths",
"[",
"0",
"]",
"for",
"le",
"in",
"lengths",
")",
":",
"raise",
"ValueError",
"(",
"\"split '{}' has sources that \"",
".",
"format",
"(",
"name",
")",
"+",
"\"vary in length\"",
")",
"# Initialize split dictionary",
"split_dict",
"=",
"dict",
"(",
"[",
"(",
"split_name",
",",
"{",
"}",
")",
"for",
"split_name",
"in",
"split_names",
"]",
")",
"# Compute total source lengths and check that splits have the same dtype",
"# across a source",
"source_names",
"=",
"set",
"(",
"split_tuple",
"[",
"1",
"]",
"for",
"split_tuple",
"in",
"data",
")",
"for",
"name",
"in",
"source_names",
":",
"splits",
"=",
"[",
"s",
"for",
"s",
"in",
"data",
"if",
"s",
"[",
"1",
"]",
"==",
"name",
"]",
"indices",
"=",
"numpy",
".",
"cumsum",
"(",
"[",
"0",
"]",
"+",
"[",
"len",
"(",
"s",
"[",
"2",
"]",
")",
"for",
"s",
"in",
"splits",
"]",
")",
"if",
"not",
"all",
"(",
"s",
"[",
"2",
"]",
".",
"dtype",
"==",
"splits",
"[",
"0",
"]",
"[",
"2",
"]",
".",
"dtype",
"for",
"s",
"in",
"splits",
")",
":",
"raise",
"ValueError",
"(",
"\"source '{}' has splits that \"",
".",
"format",
"(",
"name",
")",
"+",
"\"vary in dtype\"",
")",
"if",
"not",
"all",
"(",
"s",
"[",
"2",
"]",
".",
"shape",
"[",
"1",
":",
"]",
"==",
"splits",
"[",
"0",
"]",
"[",
"2",
"]",
".",
"shape",
"[",
"1",
":",
"]",
"for",
"s",
"in",
"splits",
")",
":",
"raise",
"ValueError",
"(",
"\"source '{}' has splits that \"",
".",
"format",
"(",
"name",
")",
"+",
"\"vary in shapes\"",
")",
"dataset",
"=",
"h5file",
".",
"create_dataset",
"(",
"name",
",",
"(",
"sum",
"(",
"len",
"(",
"s",
"[",
"2",
"]",
")",
"for",
"s",
"in",
"splits",
")",
",",
")",
"+",
"splits",
"[",
"0",
"]",
"[",
"2",
"]",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"splits",
"[",
"0",
"]",
"[",
"2",
"]",
".",
"dtype",
")",
"dataset",
"[",
"...",
"]",
"=",
"numpy",
".",
"concatenate",
"(",
"[",
"s",
"[",
"2",
"]",
"for",
"s",
"in",
"splits",
"]",
",",
"axis",
"=",
"0",
")",
"for",
"i",
",",
"j",
",",
"s",
"in",
"zip",
"(",
"indices",
"[",
":",
"-",
"1",
"]",
",",
"indices",
"[",
"1",
":",
"]",
",",
"splits",
")",
":",
"if",
"len",
"(",
"s",
")",
"==",
"4",
":",
"split_dict",
"[",
"s",
"[",
"0",
"]",
"]",
"[",
"name",
"]",
"=",
"(",
"i",
",",
"j",
",",
"None",
",",
"s",
"[",
"3",
"]",
")",
"else",
":",
"split_dict",
"[",
"s",
"[",
"0",
"]",
"]",
"[",
"name",
"]",
"=",
"(",
"i",
",",
"j",
")",
"h5file",
".",
"attrs",
"[",
"'split'",
"]",
"=",
"H5PYDataset",
".",
"create_split_array",
"(",
"split_dict",
")"
] |
Fills an HDF5 file in a H5PYDataset-compatible manner.
Parameters
----------
h5file : :class:`h5py.File`
File handle for an HDF5 file.
data : tuple of tuple
One element per split/source pair. Each element consists of a
tuple of (split_name, source_name, data_array, comment), where
* 'split_name' is a string identifier for the split name
* 'source_name' is a string identifier for the source name
* 'data_array' is a :class:`numpy.ndarray` containing the data
for this split/source pair
* 'comment' is a comment string for the split/source pair
The 'comment' element can optionally be omitted.
|
[
"Fills",
"an",
"HDF5",
"file",
"in",
"a",
"H5PYDataset",
"-",
"compatible",
"manner",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/base.py#L50-L103
|
11,924
|
mila-iqia/fuel
|
fuel/converters/base.py
|
progress_bar
|
def progress_bar(name, maxval, prefix='Converting'):
"""Manages a progress bar for a conversion.
Parameters
----------
name : str
Name of the file being converted.
maxval : int
Total number of steps for the conversion.
"""
widgets = ['{} {}: '.format(prefix, name), Percentage(), ' ',
Bar(marker='=', left='[', right=']'), ' ', ETA()]
bar = ProgressBar(widgets=widgets, max_value=maxval, fd=sys.stdout).start()
try:
yield bar
finally:
bar.update(maxval)
bar.finish()
|
python
|
def progress_bar(name, maxval, prefix='Converting'):
"""Manages a progress bar for a conversion.
Parameters
----------
name : str
Name of the file being converted.
maxval : int
Total number of steps for the conversion.
"""
widgets = ['{} {}: '.format(prefix, name), Percentage(), ' ',
Bar(marker='=', left='[', right=']'), ' ', ETA()]
bar = ProgressBar(widgets=widgets, max_value=maxval, fd=sys.stdout).start()
try:
yield bar
finally:
bar.update(maxval)
bar.finish()
|
[
"def",
"progress_bar",
"(",
"name",
",",
"maxval",
",",
"prefix",
"=",
"'Converting'",
")",
":",
"widgets",
"=",
"[",
"'{} {}: '",
".",
"format",
"(",
"prefix",
",",
"name",
")",
",",
"Percentage",
"(",
")",
",",
"' '",
",",
"Bar",
"(",
"marker",
"=",
"'='",
",",
"left",
"=",
"'['",
",",
"right",
"=",
"']'",
")",
",",
"' '",
",",
"ETA",
"(",
")",
"]",
"bar",
"=",
"ProgressBar",
"(",
"widgets",
"=",
"widgets",
",",
"max_value",
"=",
"maxval",
",",
"fd",
"=",
"sys",
".",
"stdout",
")",
".",
"start",
"(",
")",
"try",
":",
"yield",
"bar",
"finally",
":",
"bar",
".",
"update",
"(",
"maxval",
")",
"bar",
".",
"finish",
"(",
")"
] |
Manages a progress bar for a conversion.
Parameters
----------
name : str
Name of the file being converted.
maxval : int
Total number of steps for the conversion.
|
[
"Manages",
"a",
"progress",
"bar",
"for",
"a",
"conversion",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/base.py#L107-L125
|
11,925
|
mila-iqia/fuel
|
fuel/converters/iris.py
|
convert_iris
|
def convert_iris(directory, output_directory, output_filename='iris.hdf5'):
"""Convert the Iris dataset to HDF5.
Converts the Iris dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.Iris`. The converted dataset is
saved as 'iris.hdf5'.
This method assumes the existence of the file `iris.data`.
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to `None`, in which case a name
based on `dtype` will be used.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
classes = {b'Iris-setosa': 0, b'Iris-versicolor': 1, b'Iris-virginica': 2}
data = numpy.loadtxt(
os.path.join(directory, 'iris.data'),
converters={4: lambda x: classes[x]},
delimiter=',')
features = data[:, :-1].astype('float32')
targets = data[:, -1].astype('uint8').reshape((-1, 1))
data = (('all', 'features', features),
('all', 'targets', targets))
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'feature'
h5file['targets'].dims[0].label = 'batch'
h5file['targets'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
|
python
|
def convert_iris(directory, output_directory, output_filename='iris.hdf5'):
"""Convert the Iris dataset to HDF5.
Converts the Iris dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.Iris`. The converted dataset is
saved as 'iris.hdf5'.
This method assumes the existence of the file `iris.data`.
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to `None`, in which case a name
based on `dtype` will be used.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
classes = {b'Iris-setosa': 0, b'Iris-versicolor': 1, b'Iris-virginica': 2}
data = numpy.loadtxt(
os.path.join(directory, 'iris.data'),
converters={4: lambda x: classes[x]},
delimiter=',')
features = data[:, :-1].astype('float32')
targets = data[:, -1].astype('uint8').reshape((-1, 1))
data = (('all', 'features', features),
('all', 'targets', targets))
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'feature'
h5file['targets'].dims[0].label = 'batch'
h5file['targets'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
|
[
"def",
"convert_iris",
"(",
"directory",
",",
"output_directory",
",",
"output_filename",
"=",
"'iris.hdf5'",
")",
":",
"classes",
"=",
"{",
"b'Iris-setosa'",
":",
"0",
",",
"b'Iris-versicolor'",
":",
"1",
",",
"b'Iris-virginica'",
":",
"2",
"}",
"data",
"=",
"numpy",
".",
"loadtxt",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"'iris.data'",
")",
",",
"converters",
"=",
"{",
"4",
":",
"lambda",
"x",
":",
"classes",
"[",
"x",
"]",
"}",
",",
"delimiter",
"=",
"','",
")",
"features",
"=",
"data",
"[",
":",
",",
":",
"-",
"1",
"]",
".",
"astype",
"(",
"'float32'",
")",
"targets",
"=",
"data",
"[",
":",
",",
"-",
"1",
"]",
".",
"astype",
"(",
"'uint8'",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"data",
"=",
"(",
"(",
"'all'",
",",
"'features'",
",",
"features",
")",
",",
"(",
"'all'",
",",
"'targets'",
",",
"targets",
")",
")",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"h5file",
"=",
"h5py",
".",
"File",
"(",
"output_path",
",",
"mode",
"=",
"'w'",
")",
"fill_hdf5_file",
"(",
"h5file",
",",
"data",
")",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"'feature'",
"h5file",
"[",
"'targets'",
"]",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"h5file",
"[",
"'targets'",
"]",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"'index'",
"h5file",
".",
"flush",
"(",
")",
"h5file",
".",
"close",
"(",
")",
"return",
"(",
"output_path",
",",
")"
] |
Convert the Iris dataset to HDF5.
Converts the Iris dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.Iris`. The converted dataset is
saved as 'iris.hdf5'.
This method assumes the existence of the file `iris.data`.
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to `None`, in which case a name
based on `dtype` will be used.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
|
[
"Convert",
"the",
"Iris",
"dataset",
"to",
"HDF5",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/iris.py#L9-L54
|
11,926
|
mila-iqia/fuel
|
fuel/downloaders/ilsvrc2012.py
|
fill_subparser
|
def fill_subparser(subparser):
"""Sets up a subparser to download the ILSVRC2012 dataset files.
Note that you will need to use `--url-prefix` to download the
non-public files (namely, the TARs of images). This is a single
prefix that is common to all distributed files, which you can
obtain by registering at the ImageNet website [DOWNLOAD].
Note that these files are quite large and you may be better off
simply downloading them separately and running ``fuel-convert``.
.. [DOWNLOAD] http://www.image-net.org/download-images
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2012` command.
"""
urls = ([None] * len(ALL_FILES))
filenames = list(ALL_FILES)
subparser.set_defaults(urls=urls, filenames=filenames)
subparser.add_argument('-P', '--url-prefix', type=str, default=None,
help="URL prefix to prepend to the filenames of "
"non-public files, in order to download them. "
"Be sure to include the trailing slash.")
return default_downloader
|
python
|
def fill_subparser(subparser):
"""Sets up a subparser to download the ILSVRC2012 dataset files.
Note that you will need to use `--url-prefix` to download the
non-public files (namely, the TARs of images). This is a single
prefix that is common to all distributed files, which you can
obtain by registering at the ImageNet website [DOWNLOAD].
Note that these files are quite large and you may be better off
simply downloading them separately and running ``fuel-convert``.
.. [DOWNLOAD] http://www.image-net.org/download-images
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2012` command.
"""
urls = ([None] * len(ALL_FILES))
filenames = list(ALL_FILES)
subparser.set_defaults(urls=urls, filenames=filenames)
subparser.add_argument('-P', '--url-prefix', type=str, default=None,
help="URL prefix to prepend to the filenames of "
"non-public files, in order to download them. "
"Be sure to include the trailing slash.")
return default_downloader
|
[
"def",
"fill_subparser",
"(",
"subparser",
")",
":",
"urls",
"=",
"(",
"[",
"None",
"]",
"*",
"len",
"(",
"ALL_FILES",
")",
")",
"filenames",
"=",
"list",
"(",
"ALL_FILES",
")",
"subparser",
".",
"set_defaults",
"(",
"urls",
"=",
"urls",
",",
"filenames",
"=",
"filenames",
")",
"subparser",
".",
"add_argument",
"(",
"'-P'",
",",
"'--url-prefix'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"URL prefix to prepend to the filenames of \"",
"\"non-public files, in order to download them. \"",
"\"Be sure to include the trailing slash.\"",
")",
"return",
"default_downloader"
] |
Sets up a subparser to download the ILSVRC2012 dataset files.
Note that you will need to use `--url-prefix` to download the
non-public files (namely, the TARs of images). This is a single
prefix that is common to all distributed files, which you can
obtain by registering at the ImageNet website [DOWNLOAD].
Note that these files are quite large and you may be better off
simply downloading them separately and running ``fuel-convert``.
.. [DOWNLOAD] http://www.image-net.org/download-images
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2012` command.
|
[
"Sets",
"up",
"a",
"subparser",
"to",
"download",
"the",
"ILSVRC2012",
"dataset",
"files",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/ilsvrc2012.py#L5-L32
|
11,927
|
mila-iqia/fuel
|
fuel/transformers/sequences.py
|
Window._get_target_index
|
def _get_target_index(self):
"""Return the index where the target window starts."""
return (self.index + self.source_window * (not self.overlapping) +
self.offset)
|
python
|
def _get_target_index(self):
"""Return the index where the target window starts."""
return (self.index + self.source_window * (not self.overlapping) +
self.offset)
|
[
"def",
"_get_target_index",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"index",
"+",
"self",
".",
"source_window",
"*",
"(",
"not",
"self",
".",
"overlapping",
")",
"+",
"self",
".",
"offset",
")"
] |
Return the index where the target window starts.
|
[
"Return",
"the",
"index",
"where",
"the",
"target",
"window",
"starts",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/transformers/sequences.py#L66-L69
|
11,928
|
mila-iqia/fuel
|
fuel/transformers/sequences.py
|
Window._get_end_index
|
def _get_end_index(self):
"""Return the end of both windows."""
return max(self.index + self.source_window,
self._get_target_index() + self.target_window)
|
python
|
def _get_end_index(self):
"""Return the end of both windows."""
return max(self.index + self.source_window,
self._get_target_index() + self.target_window)
|
[
"def",
"_get_end_index",
"(",
"self",
")",
":",
"return",
"max",
"(",
"self",
".",
"index",
"+",
"self",
".",
"source_window",
",",
"self",
".",
"_get_target_index",
"(",
")",
"+",
"self",
".",
"target_window",
")"
] |
Return the end of both windows.
|
[
"Return",
"the",
"end",
"of",
"both",
"windows",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/transformers/sequences.py#L71-L74
|
11,929
|
mila-iqia/fuel
|
fuel/converters/svhn.py
|
convert_svhn
|
def convert_svhn(which_format, directory, output_directory,
output_filename=None):
"""Converts the SVHN dataset to HDF5.
Converts the SVHN dataset [SVHN] to an HDF5 dataset compatible
with :class:`fuel.datasets.SVHN`. The converted dataset is
saved as 'svhn_format_1.hdf5' or 'svhn_format_2.hdf5', depending
on the `which_format` argument.
.. [SVHN] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco,
Bo Wu, Andrew Y. Ng. *Reading Digits in Natural Images with
Unsupervised Feature Learning*, NIPS Workshop on Deep Learning
and Unsupervised Feature Learning, 2011.
Parameters
----------
which_format : int
Either 1 or 2. Determines which format (format 1: full numbers
or format 2: cropped digits) to convert.
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'svhn_format_1.hdf5' or
'svhn_format_2.hdf5', depending on `which_format`.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
if which_format not in (1, 2):
raise ValueError("SVHN format needs to be either 1 or 2.")
if not output_filename:
output_filename = 'svhn_format_{}.hdf5'.format(which_format)
if which_format == 1:
return convert_svhn_format_1(
directory, output_directory, output_filename)
else:
return convert_svhn_format_2(
directory, output_directory, output_filename)
|
python
|
def convert_svhn(which_format, directory, output_directory,
output_filename=None):
"""Converts the SVHN dataset to HDF5.
Converts the SVHN dataset [SVHN] to an HDF5 dataset compatible
with :class:`fuel.datasets.SVHN`. The converted dataset is
saved as 'svhn_format_1.hdf5' or 'svhn_format_2.hdf5', depending
on the `which_format` argument.
.. [SVHN] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco,
Bo Wu, Andrew Y. Ng. *Reading Digits in Natural Images with
Unsupervised Feature Learning*, NIPS Workshop on Deep Learning
and Unsupervised Feature Learning, 2011.
Parameters
----------
which_format : int
Either 1 or 2. Determines which format (format 1: full numbers
or format 2: cropped digits) to convert.
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'svhn_format_1.hdf5' or
'svhn_format_2.hdf5', depending on `which_format`.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
if which_format not in (1, 2):
raise ValueError("SVHN format needs to be either 1 or 2.")
if not output_filename:
output_filename = 'svhn_format_{}.hdf5'.format(which_format)
if which_format == 1:
return convert_svhn_format_1(
directory, output_directory, output_filename)
else:
return convert_svhn_format_2(
directory, output_directory, output_filename)
|
[
"def",
"convert_svhn",
"(",
"which_format",
",",
"directory",
",",
"output_directory",
",",
"output_filename",
"=",
"None",
")",
":",
"if",
"which_format",
"not",
"in",
"(",
"1",
",",
"2",
")",
":",
"raise",
"ValueError",
"(",
"\"SVHN format needs to be either 1 or 2.\"",
")",
"if",
"not",
"output_filename",
":",
"output_filename",
"=",
"'svhn_format_{}.hdf5'",
".",
"format",
"(",
"which_format",
")",
"if",
"which_format",
"==",
"1",
":",
"return",
"convert_svhn_format_1",
"(",
"directory",
",",
"output_directory",
",",
"output_filename",
")",
"else",
":",
"return",
"convert_svhn_format_2",
"(",
"directory",
",",
"output_directory",
",",
"output_filename",
")"
] |
Converts the SVHN dataset to HDF5.
Converts the SVHN dataset [SVHN] to an HDF5 dataset compatible
with :class:`fuel.datasets.SVHN`. The converted dataset is
saved as 'svhn_format_1.hdf5' or 'svhn_format_2.hdf5', depending
on the `which_format` argument.
.. [SVHN] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco,
Bo Wu, Andrew Y. Ng. *Reading Digits in Natural Images with
Unsupervised Feature Learning*, NIPS Workshop on Deep Learning
and Unsupervised Feature Learning, 2011.
Parameters
----------
which_format : int
Either 1 or 2. Determines which format (format 1: full numbers
or format 2: cropped digits) to convert.
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'svhn_format_1.hdf5' or
'svhn_format_2.hdf5', depending on `which_format`.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
|
[
"Converts",
"the",
"SVHN",
"dataset",
"to",
"HDF5",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/svhn.py#L327-L369
|
11,930
|
mila-iqia/fuel
|
fuel/utils/formats.py
|
open_
|
def open_(filename, mode='r', encoding=None):
"""Open a text file with encoding and optional gzip compression.
Note that on legacy Python any encoding other than ``None`` or opening
GZipped files will return an unpicklable file-like object.
Parameters
----------
filename : str
The filename to read.
mode : str, optional
The mode with which to open the file. Defaults to `r`.
encoding : str, optional
The encoding to use (see the codecs documentation_ for supported
values). Defaults to ``None``.
.. _documentation:
https://docs.python.org/3/library/codecs.html#standard-encodings
"""
if filename.endswith('.gz'):
if six.PY2:
zf = io.BufferedReader(gzip.open(filename, mode))
if encoding:
return codecs.getreader(encoding)(zf)
else:
return zf
else:
return io.BufferedReader(gzip.open(filename, mode,
encoding=encoding))
if six.PY2:
if encoding:
return codecs.open(filename, mode, encoding=encoding)
else:
return open(filename, mode)
else:
return open(filename, mode, encoding=encoding)
|
python
|
def open_(filename, mode='r', encoding=None):
"""Open a text file with encoding and optional gzip compression.
Note that on legacy Python any encoding other than ``None`` or opening
GZipped files will return an unpicklable file-like object.
Parameters
----------
filename : str
The filename to read.
mode : str, optional
The mode with which to open the file. Defaults to `r`.
encoding : str, optional
The encoding to use (see the codecs documentation_ for supported
values). Defaults to ``None``.
.. _documentation:
https://docs.python.org/3/library/codecs.html#standard-encodings
"""
if filename.endswith('.gz'):
if six.PY2:
zf = io.BufferedReader(gzip.open(filename, mode))
if encoding:
return codecs.getreader(encoding)(zf)
else:
return zf
else:
return io.BufferedReader(gzip.open(filename, mode,
encoding=encoding))
if six.PY2:
if encoding:
return codecs.open(filename, mode, encoding=encoding)
else:
return open(filename, mode)
else:
return open(filename, mode, encoding=encoding)
|
[
"def",
"open_",
"(",
"filename",
",",
"mode",
"=",
"'r'",
",",
"encoding",
"=",
"None",
")",
":",
"if",
"filename",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"if",
"six",
".",
"PY2",
":",
"zf",
"=",
"io",
".",
"BufferedReader",
"(",
"gzip",
".",
"open",
"(",
"filename",
",",
"mode",
")",
")",
"if",
"encoding",
":",
"return",
"codecs",
".",
"getreader",
"(",
"encoding",
")",
"(",
"zf",
")",
"else",
":",
"return",
"zf",
"else",
":",
"return",
"io",
".",
"BufferedReader",
"(",
"gzip",
".",
"open",
"(",
"filename",
",",
"mode",
",",
"encoding",
"=",
"encoding",
")",
")",
"if",
"six",
".",
"PY2",
":",
"if",
"encoding",
":",
"return",
"codecs",
".",
"open",
"(",
"filename",
",",
"mode",
",",
"encoding",
"=",
"encoding",
")",
"else",
":",
"return",
"open",
"(",
"filename",
",",
"mode",
")",
"else",
":",
"return",
"open",
"(",
"filename",
",",
"mode",
",",
"encoding",
"=",
"encoding",
")"
] |
Open a text file with encoding and optional gzip compression.
Note that on legacy Python any encoding other than ``None`` or opening
GZipped files will return an unpicklable file-like object.
Parameters
----------
filename : str
The filename to read.
mode : str, optional
The mode with which to open the file. Defaults to `r`.
encoding : str, optional
The encoding to use (see the codecs documentation_ for supported
values). Defaults to ``None``.
.. _documentation:
https://docs.python.org/3/library/codecs.html#standard-encodings
|
[
"Open",
"a",
"text",
"file",
"with",
"encoding",
"and",
"optional",
"gzip",
"compression",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/formats.py#L9-L45
|
11,931
|
mila-iqia/fuel
|
fuel/utils/formats.py
|
tar_open
|
def tar_open(f):
"""Open either a filename or a file-like object as a TarFile.
Parameters
----------
f : str or file-like object
The filename or file-like object from which to read.
Returns
-------
TarFile
A `TarFile` instance.
"""
if isinstance(f, six.string_types):
return tarfile.open(name=f)
else:
return tarfile.open(fileobj=f)
|
python
|
def tar_open(f):
"""Open either a filename or a file-like object as a TarFile.
Parameters
----------
f : str or file-like object
The filename or file-like object from which to read.
Returns
-------
TarFile
A `TarFile` instance.
"""
if isinstance(f, six.string_types):
return tarfile.open(name=f)
else:
return tarfile.open(fileobj=f)
|
[
"def",
"tar_open",
"(",
"f",
")",
":",
"if",
"isinstance",
"(",
"f",
",",
"six",
".",
"string_types",
")",
":",
"return",
"tarfile",
".",
"open",
"(",
"name",
"=",
"f",
")",
"else",
":",
"return",
"tarfile",
".",
"open",
"(",
"fileobj",
"=",
"f",
")"
] |
Open either a filename or a file-like object as a TarFile.
Parameters
----------
f : str or file-like object
The filename or file-like object from which to read.
Returns
-------
TarFile
A `TarFile` instance.
|
[
"Open",
"either",
"a",
"filename",
"or",
"a",
"file",
"-",
"like",
"object",
"as",
"a",
"TarFile",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/formats.py#L48-L65
|
11,932
|
mila-iqia/fuel
|
fuel/utils/cache.py
|
copy_from_server_to_local
|
def copy_from_server_to_local(dataset_remote_dir, dataset_local_dir,
remote_fname, local_fname):
"""Copies a remote file locally.
Parameters
----------
remote_fname : str
Remote file to copy
local_fname : str
Path and name of the local copy to be made of the remote file.
"""
log.debug("Copying file `{}` to a local directory `{}`."
.format(remote_fname, dataset_local_dir))
head, tail = os.path.split(local_fname)
head += os.path.sep
if not os.path.exists(head):
os.makedirs(os.path.dirname(head))
shutil.copyfile(remote_fname, local_fname)
# Copy the original group id and file permission
st = os.stat(remote_fname)
os.chmod(local_fname, st.st_mode)
# If the user have read access to the data, but not a member
# of the group, he can't set the group. So we must catch the
# exception. But we still want to do this, for directory where
# only member of the group can read that data.
try:
os.chown(local_fname, -1, st.st_gid)
except OSError:
pass
# Need to give group write permission to the folders
# For the locking mechanism
# Try to set the original group as above
dirs = os.path.dirname(local_fname).replace(dataset_local_dir, '')
sep = dirs.split(os.path.sep)
if sep[0] == "":
sep = sep[1:]
for i in range(len(sep)):
orig_p = os.path.join(dataset_remote_dir, *sep[:i + 1])
new_p = os.path.join(dataset_local_dir, *sep[:i + 1])
orig_st = os.stat(orig_p)
new_st = os.stat(new_p)
if not new_st.st_mode & stat.S_IWGRP:
os.chmod(new_p, new_st.st_mode | stat.S_IWGRP)
if orig_st.st_gid != new_st.st_gid:
try:
os.chown(new_p, -1, orig_st.st_gid)
except OSError:
pass
|
python
|
def copy_from_server_to_local(dataset_remote_dir, dataset_local_dir,
remote_fname, local_fname):
"""Copies a remote file locally.
Parameters
----------
remote_fname : str
Remote file to copy
local_fname : str
Path and name of the local copy to be made of the remote file.
"""
log.debug("Copying file `{}` to a local directory `{}`."
.format(remote_fname, dataset_local_dir))
head, tail = os.path.split(local_fname)
head += os.path.sep
if not os.path.exists(head):
os.makedirs(os.path.dirname(head))
shutil.copyfile(remote_fname, local_fname)
# Copy the original group id and file permission
st = os.stat(remote_fname)
os.chmod(local_fname, st.st_mode)
# If the user have read access to the data, but not a member
# of the group, he can't set the group. So we must catch the
# exception. But we still want to do this, for directory where
# only member of the group can read that data.
try:
os.chown(local_fname, -1, st.st_gid)
except OSError:
pass
# Need to give group write permission to the folders
# For the locking mechanism
# Try to set the original group as above
dirs = os.path.dirname(local_fname).replace(dataset_local_dir, '')
sep = dirs.split(os.path.sep)
if sep[0] == "":
sep = sep[1:]
for i in range(len(sep)):
orig_p = os.path.join(dataset_remote_dir, *sep[:i + 1])
new_p = os.path.join(dataset_local_dir, *sep[:i + 1])
orig_st = os.stat(orig_p)
new_st = os.stat(new_p)
if not new_st.st_mode & stat.S_IWGRP:
os.chmod(new_p, new_st.st_mode | stat.S_IWGRP)
if orig_st.st_gid != new_st.st_gid:
try:
os.chown(new_p, -1, orig_st.st_gid)
except OSError:
pass
|
[
"def",
"copy_from_server_to_local",
"(",
"dataset_remote_dir",
",",
"dataset_local_dir",
",",
"remote_fname",
",",
"local_fname",
")",
":",
"log",
".",
"debug",
"(",
"\"Copying file `{}` to a local directory `{}`.\"",
".",
"format",
"(",
"remote_fname",
",",
"dataset_local_dir",
")",
")",
"head",
",",
"tail",
"=",
"os",
".",
"path",
".",
"split",
"(",
"local_fname",
")",
"head",
"+=",
"os",
".",
"path",
".",
"sep",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"head",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"head",
")",
")",
"shutil",
".",
"copyfile",
"(",
"remote_fname",
",",
"local_fname",
")",
"# Copy the original group id and file permission",
"st",
"=",
"os",
".",
"stat",
"(",
"remote_fname",
")",
"os",
".",
"chmod",
"(",
"local_fname",
",",
"st",
".",
"st_mode",
")",
"# If the user have read access to the data, but not a member",
"# of the group, he can't set the group. So we must catch the",
"# exception. But we still want to do this, for directory where",
"# only member of the group can read that data.",
"try",
":",
"os",
".",
"chown",
"(",
"local_fname",
",",
"-",
"1",
",",
"st",
".",
"st_gid",
")",
"except",
"OSError",
":",
"pass",
"# Need to give group write permission to the folders",
"# For the locking mechanism",
"# Try to set the original group as above",
"dirs",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"local_fname",
")",
".",
"replace",
"(",
"dataset_local_dir",
",",
"''",
")",
"sep",
"=",
"dirs",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"if",
"sep",
"[",
"0",
"]",
"==",
"\"\"",
":",
"sep",
"=",
"sep",
"[",
"1",
":",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"sep",
")",
")",
":",
"orig_p",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dataset_remote_dir",
",",
"*",
"sep",
"[",
":",
"i",
"+",
"1",
"]",
")",
"new_p",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dataset_local_dir",
",",
"*",
"sep",
"[",
":",
"i",
"+",
"1",
"]",
")",
"orig_st",
"=",
"os",
".",
"stat",
"(",
"orig_p",
")",
"new_st",
"=",
"os",
".",
"stat",
"(",
"new_p",
")",
"if",
"not",
"new_st",
".",
"st_mode",
"&",
"stat",
".",
"S_IWGRP",
":",
"os",
".",
"chmod",
"(",
"new_p",
",",
"new_st",
".",
"st_mode",
"|",
"stat",
".",
"S_IWGRP",
")",
"if",
"orig_st",
".",
"st_gid",
"!=",
"new_st",
".",
"st_gid",
":",
"try",
":",
"os",
".",
"chown",
"(",
"new_p",
",",
"-",
"1",
",",
"orig_st",
".",
"st_gid",
")",
"except",
"OSError",
":",
"pass"
] |
Copies a remote file locally.
Parameters
----------
remote_fname : str
Remote file to copy
local_fname : str
Path and name of the local copy to be made of the remote file.
|
[
"Copies",
"a",
"remote",
"file",
"locally",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/cache.py#L217-L269
|
11,933
|
mila-iqia/fuel
|
fuel/converters/adult.py
|
convert_to_one_hot
|
def convert_to_one_hot(y):
"""
converts y into one hot reprsentation.
Parameters
----------
y : list
A list containing continous integer values.
Returns
-------
one_hot : numpy.ndarray
A numpy.ndarray object, which is one-hot representation of y.
"""
max_value = max(y)
min_value = min(y)
length = len(y)
one_hot = numpy.zeros((length, (max_value - min_value + 1)))
one_hot[numpy.arange(length), y] = 1
return one_hot
|
python
|
def convert_to_one_hot(y):
"""
converts y into one hot reprsentation.
Parameters
----------
y : list
A list containing continous integer values.
Returns
-------
one_hot : numpy.ndarray
A numpy.ndarray object, which is one-hot representation of y.
"""
max_value = max(y)
min_value = min(y)
length = len(y)
one_hot = numpy.zeros((length, (max_value - min_value + 1)))
one_hot[numpy.arange(length), y] = 1
return one_hot
|
[
"def",
"convert_to_one_hot",
"(",
"y",
")",
":",
"max_value",
"=",
"max",
"(",
"y",
")",
"min_value",
"=",
"min",
"(",
"y",
")",
"length",
"=",
"len",
"(",
"y",
")",
"one_hot",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"length",
",",
"(",
"max_value",
"-",
"min_value",
"+",
"1",
")",
")",
")",
"one_hot",
"[",
"numpy",
".",
"arange",
"(",
"length",
")",
",",
"y",
"]",
"=",
"1",
"return",
"one_hot"
] |
converts y into one hot reprsentation.
Parameters
----------
y : list
A list containing continous integer values.
Returns
-------
one_hot : numpy.ndarray
A numpy.ndarray object, which is one-hot representation of y.
|
[
"converts",
"y",
"into",
"one",
"hot",
"reprsentation",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/adult.py#L9-L29
|
11,934
|
mila-iqia/fuel
|
fuel/converters/binarized_mnist.py
|
convert_binarized_mnist
|
def convert_binarized_mnist(directory, output_directory,
output_filename='binarized_mnist.hdf5'):
"""Converts the binarized MNIST dataset to HDF5.
Converts the binarized MNIST dataset used in R. Salakhutdinov's DBN
paper [DBN] to an HDF5 dataset compatible with
:class:`fuel.datasets.BinarizedMNIST`. The converted dataset is
saved as 'binarized_mnist.hdf5'.
This method assumes the existence of the files
`binarized_mnist_{train,valid,test}.amat`, which are accessible
through Hugo Larochelle's website [HUGO].
.. [DBN] Ruslan Salakhutdinov and Iain Murray, *On the Quantitative
Analysis of Deep Belief Networks*, Proceedings of the 25th
international conference on Machine learning, 2008, pp. 872-879.
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'binarized_mnist.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
train_set = numpy.loadtxt(
os.path.join(directory, TRAIN_FILE)).reshape(
(-1, 1, 28, 28)).astype('uint8')
valid_set = numpy.loadtxt(
os.path.join(directory, VALID_FILE)).reshape(
(-1, 1, 28, 28)).astype('uint8')
test_set = numpy.loadtxt(
os.path.join(directory, TEST_FILE)).reshape(
(-1, 1, 28, 28)).astype('uint8')
data = (('train', 'features', train_set),
('valid', 'features', valid_set),
('test', 'features', test_set))
fill_hdf5_file(h5file, data)
for i, label in enumerate(('batch', 'channel', 'height', 'width')):
h5file['features'].dims[i].label = label
h5file.flush()
h5file.close()
return (output_path,)
|
python
|
def convert_binarized_mnist(directory, output_directory,
output_filename='binarized_mnist.hdf5'):
"""Converts the binarized MNIST dataset to HDF5.
Converts the binarized MNIST dataset used in R. Salakhutdinov's DBN
paper [DBN] to an HDF5 dataset compatible with
:class:`fuel.datasets.BinarizedMNIST`. The converted dataset is
saved as 'binarized_mnist.hdf5'.
This method assumes the existence of the files
`binarized_mnist_{train,valid,test}.amat`, which are accessible
through Hugo Larochelle's website [HUGO].
.. [DBN] Ruslan Salakhutdinov and Iain Murray, *On the Quantitative
Analysis of Deep Belief Networks*, Proceedings of the 25th
international conference on Machine learning, 2008, pp. 872-879.
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'binarized_mnist.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
train_set = numpy.loadtxt(
os.path.join(directory, TRAIN_FILE)).reshape(
(-1, 1, 28, 28)).astype('uint8')
valid_set = numpy.loadtxt(
os.path.join(directory, VALID_FILE)).reshape(
(-1, 1, 28, 28)).astype('uint8')
test_set = numpy.loadtxt(
os.path.join(directory, TEST_FILE)).reshape(
(-1, 1, 28, 28)).astype('uint8')
data = (('train', 'features', train_set),
('valid', 'features', valid_set),
('test', 'features', test_set))
fill_hdf5_file(h5file, data)
for i, label in enumerate(('batch', 'channel', 'height', 'width')):
h5file['features'].dims[i].label = label
h5file.flush()
h5file.close()
return (output_path,)
|
[
"def",
"convert_binarized_mnist",
"(",
"directory",
",",
"output_directory",
",",
"output_filename",
"=",
"'binarized_mnist.hdf5'",
")",
":",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"h5file",
"=",
"h5py",
".",
"File",
"(",
"output_path",
",",
"mode",
"=",
"'w'",
")",
"train_set",
"=",
"numpy",
".",
"loadtxt",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"TRAIN_FILE",
")",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
",",
"28",
",",
"28",
")",
")",
".",
"astype",
"(",
"'uint8'",
")",
"valid_set",
"=",
"numpy",
".",
"loadtxt",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"VALID_FILE",
")",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
",",
"28",
",",
"28",
")",
")",
".",
"astype",
"(",
"'uint8'",
")",
"test_set",
"=",
"numpy",
".",
"loadtxt",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"TEST_FILE",
")",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
",",
"28",
",",
"28",
")",
")",
".",
"astype",
"(",
"'uint8'",
")",
"data",
"=",
"(",
"(",
"'train'",
",",
"'features'",
",",
"train_set",
")",
",",
"(",
"'valid'",
",",
"'features'",
",",
"valid_set",
")",
",",
"(",
"'test'",
",",
"'features'",
",",
"test_set",
")",
")",
"fill_hdf5_file",
"(",
"h5file",
",",
"data",
")",
"for",
"i",
",",
"label",
"in",
"enumerate",
"(",
"(",
"'batch'",
",",
"'channel'",
",",
"'height'",
",",
"'width'",
")",
")",
":",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"i",
"]",
".",
"label",
"=",
"label",
"h5file",
".",
"flush",
"(",
")",
"h5file",
".",
"close",
"(",
")",
"return",
"(",
"output_path",
",",
")"
] |
Converts the binarized MNIST dataset to HDF5.
Converts the binarized MNIST dataset used in R. Salakhutdinov's DBN
paper [DBN] to an HDF5 dataset compatible with
:class:`fuel.datasets.BinarizedMNIST`. The converted dataset is
saved as 'binarized_mnist.hdf5'.
This method assumes the existence of the files
`binarized_mnist_{train,valid,test}.amat`, which are accessible
through Hugo Larochelle's website [HUGO].
.. [DBN] Ruslan Salakhutdinov and Iain Murray, *On the Quantitative
Analysis of Deep Belief Networks*, Proceedings of the 25th
international conference on Machine learning, 2008, pp. 872-879.
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'binarized_mnist.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
|
[
"Converts",
"the",
"binarized",
"MNIST",
"dataset",
"to",
"HDF5",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/binarized_mnist.py#L17-L71
|
11,935
|
mila-iqia/fuel
|
fuel/downloaders/cifar10.py
|
fill_subparser
|
def fill_subparser(subparser):
"""Sets up a subparser to download the CIFAR-10 dataset file.
The CIFAR-10 dataset file is downloaded from Alex Krizhevsky's
website [ALEX].
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `cifar10` command.
"""
url = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
filename = 'cifar-10-python.tar.gz'
subparser.set_defaults(urls=[url], filenames=[filename])
return default_downloader
|
python
|
def fill_subparser(subparser):
"""Sets up a subparser to download the CIFAR-10 dataset file.
The CIFAR-10 dataset file is downloaded from Alex Krizhevsky's
website [ALEX].
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `cifar10` command.
"""
url = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
filename = 'cifar-10-python.tar.gz'
subparser.set_defaults(urls=[url], filenames=[filename])
return default_downloader
|
[
"def",
"fill_subparser",
"(",
"subparser",
")",
":",
"url",
"=",
"'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'",
"filename",
"=",
"'cifar-10-python.tar.gz'",
"subparser",
".",
"set_defaults",
"(",
"urls",
"=",
"[",
"url",
"]",
",",
"filenames",
"=",
"[",
"filename",
"]",
")",
"return",
"default_downloader"
] |
Sets up a subparser to download the CIFAR-10 dataset file.
The CIFAR-10 dataset file is downloaded from Alex Krizhevsky's
website [ALEX].
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `cifar10` command.
|
[
"Sets",
"up",
"a",
"subparser",
"to",
"download",
"the",
"CIFAR",
"-",
"10",
"dataset",
"file",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/cifar10.py#L4-L19
|
11,936
|
mila-iqia/fuel
|
fuel/converters/celeba.py
|
convert_celeba_aligned_cropped
|
def convert_celeba_aligned_cropped(directory, output_directory,
output_filename=OUTPUT_FILENAME):
"""Converts the aligned and cropped CelebA dataset to HDF5.
Converts the CelebA dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CelebA`. The converted dataset is saved as
'celeba_aligned_cropped.hdf5'.
It assumes the existence of the following files:
* `img_align_celeba.zip`
* `list_attr_celeba.txt`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to
'celeba_aligned_cropped.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted
dataset.
"""
output_path = os.path.join(output_directory, output_filename)
h5file = _initialize_conversion(directory, output_path, (218, 178))
features_dataset = h5file['features']
image_file_path = os.path.join(directory, IMAGE_FILE)
with zipfile.ZipFile(image_file_path, 'r') as image_file:
with progress_bar('images', NUM_EXAMPLES) as bar:
for i in range(NUM_EXAMPLES):
image_name = 'img_align_celeba/{:06d}.jpg'.format(i + 1)
features_dataset[i] = numpy.asarray(
Image.open(
image_file.open(image_name, 'r'))).transpose(2, 0, 1)
bar.update(i + 1)
h5file.flush()
h5file.close()
return (output_path,)
|
python
|
def convert_celeba_aligned_cropped(directory, output_directory,
output_filename=OUTPUT_FILENAME):
"""Converts the aligned and cropped CelebA dataset to HDF5.
Converts the CelebA dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CelebA`. The converted dataset is saved as
'celeba_aligned_cropped.hdf5'.
It assumes the existence of the following files:
* `img_align_celeba.zip`
* `list_attr_celeba.txt`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to
'celeba_aligned_cropped.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted
dataset.
"""
output_path = os.path.join(output_directory, output_filename)
h5file = _initialize_conversion(directory, output_path, (218, 178))
features_dataset = h5file['features']
image_file_path = os.path.join(directory, IMAGE_FILE)
with zipfile.ZipFile(image_file_path, 'r') as image_file:
with progress_bar('images', NUM_EXAMPLES) as bar:
for i in range(NUM_EXAMPLES):
image_name = 'img_align_celeba/{:06d}.jpg'.format(i + 1)
features_dataset[i] = numpy.asarray(
Image.open(
image_file.open(image_name, 'r'))).transpose(2, 0, 1)
bar.update(i + 1)
h5file.flush()
h5file.close()
return (output_path,)
|
[
"def",
"convert_celeba_aligned_cropped",
"(",
"directory",
",",
"output_directory",
",",
"output_filename",
"=",
"OUTPUT_FILENAME",
")",
":",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"h5file",
"=",
"_initialize_conversion",
"(",
"directory",
",",
"output_path",
",",
"(",
"218",
",",
"178",
")",
")",
"features_dataset",
"=",
"h5file",
"[",
"'features'",
"]",
"image_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"IMAGE_FILE",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"image_file_path",
",",
"'r'",
")",
"as",
"image_file",
":",
"with",
"progress_bar",
"(",
"'images'",
",",
"NUM_EXAMPLES",
")",
"as",
"bar",
":",
"for",
"i",
"in",
"range",
"(",
"NUM_EXAMPLES",
")",
":",
"image_name",
"=",
"'img_align_celeba/{:06d}.jpg'",
".",
"format",
"(",
"i",
"+",
"1",
")",
"features_dataset",
"[",
"i",
"]",
"=",
"numpy",
".",
"asarray",
"(",
"Image",
".",
"open",
"(",
"image_file",
".",
"open",
"(",
"image_name",
",",
"'r'",
")",
")",
")",
".",
"transpose",
"(",
"2",
",",
"0",
",",
"1",
")",
"bar",
".",
"update",
"(",
"i",
"+",
"1",
")",
"h5file",
".",
"flush",
"(",
")",
"h5file",
".",
"close",
"(",
")",
"return",
"(",
"output_path",
",",
")"
] |
Converts the aligned and cropped CelebA dataset to HDF5.
Converts the CelebA dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CelebA`. The converted dataset is saved as
'celeba_aligned_cropped.hdf5'.
It assumes the existence of the following files:
* `img_align_celeba.zip`
* `list_attr_celeba.txt`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to
'celeba_aligned_cropped.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted
dataset.
|
[
"Converts",
"the",
"aligned",
"and",
"cropped",
"CelebA",
"dataset",
"to",
"HDF5",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/celeba.py#L55-L102
|
11,937
|
mila-iqia/fuel
|
fuel/converters/celeba.py
|
convert_celeba
|
def convert_celeba(which_format, directory, output_directory,
output_filename=None):
"""Converts the CelebA dataset to HDF5.
Converts the CelebA dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CelebA`. The converted dataset is
saved as 'celeba_aligned_cropped.hdf5' or 'celeba_64.hdf5',
depending on the `which_format` argument.
Parameters
----------
which_format : str
Either 'aligned_cropped' or '64'. Determines which format
to convert to.
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to
'celeba_aligned_cropped.hdf5' or 'celeba_64.hdf5',
depending on `which_format`.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
if which_format not in ('aligned_cropped', '64'):
raise ValueError("CelebA format needs to be either "
"'aligned_cropped' or '64'.")
if not output_filename:
output_filename = 'celeba_{}.hdf5'.format(which_format)
if which_format == 'aligned_cropped':
return convert_celeba_aligned_cropped(
directory, output_directory, output_filename)
else:
return convert_celeba_64(
directory, output_directory, output_filename)
|
python
|
def convert_celeba(which_format, directory, output_directory,
output_filename=None):
"""Converts the CelebA dataset to HDF5.
Converts the CelebA dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CelebA`. The converted dataset is
saved as 'celeba_aligned_cropped.hdf5' or 'celeba_64.hdf5',
depending on the `which_format` argument.
Parameters
----------
which_format : str
Either 'aligned_cropped' or '64'. Determines which format
to convert to.
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to
'celeba_aligned_cropped.hdf5' or 'celeba_64.hdf5',
depending on `which_format`.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
if which_format not in ('aligned_cropped', '64'):
raise ValueError("CelebA format needs to be either "
"'aligned_cropped' or '64'.")
if not output_filename:
output_filename = 'celeba_{}.hdf5'.format(which_format)
if which_format == 'aligned_cropped':
return convert_celeba_aligned_cropped(
directory, output_directory, output_filename)
else:
return convert_celeba_64(
directory, output_directory, output_filename)
|
[
"def",
"convert_celeba",
"(",
"which_format",
",",
"directory",
",",
"output_directory",
",",
"output_filename",
"=",
"None",
")",
":",
"if",
"which_format",
"not",
"in",
"(",
"'aligned_cropped'",
",",
"'64'",
")",
":",
"raise",
"ValueError",
"(",
"\"CelebA format needs to be either \"",
"\"'aligned_cropped' or '64'.\"",
")",
"if",
"not",
"output_filename",
":",
"output_filename",
"=",
"'celeba_{}.hdf5'",
".",
"format",
"(",
"which_format",
")",
"if",
"which_format",
"==",
"'aligned_cropped'",
":",
"return",
"convert_celeba_aligned_cropped",
"(",
"directory",
",",
"output_directory",
",",
"output_filename",
")",
"else",
":",
"return",
"convert_celeba_64",
"(",
"directory",
",",
"output_directory",
",",
"output_filename",
")"
] |
Converts the CelebA dataset to HDF5.
Converts the CelebA dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CelebA`. The converted dataset is
saved as 'celeba_aligned_cropped.hdf5' or 'celeba_64.hdf5',
depending on the `which_format` argument.
Parameters
----------
which_format : str
Either 'aligned_cropped' or '64'. Determines which format
to convert to.
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to
'celeba_aligned_cropped.hdf5' or 'celeba_64.hdf5',
depending on `which_format`.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
|
[
"Converts",
"the",
"CelebA",
"dataset",
"to",
"HDF5",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/celeba.py#L159-L198
|
11,938
|
mila-iqia/fuel
|
fuel/utils/disk.py
|
disk_usage
|
def disk_usage(path):
"""Return free usage about the given path, in bytes.
Parameters
----------
path : str
Folder for which to return disk usage
Returns
-------
output : tuple
Tuple containing total space in the folder and currently
used space in the folder
"""
st = os.statvfs(path)
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return total, used
|
python
|
def disk_usage(path):
"""Return free usage about the given path, in bytes.
Parameters
----------
path : str
Folder for which to return disk usage
Returns
-------
output : tuple
Tuple containing total space in the folder and currently
used space in the folder
"""
st = os.statvfs(path)
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return total, used
|
[
"def",
"disk_usage",
"(",
"path",
")",
":",
"st",
"=",
"os",
".",
"statvfs",
"(",
"path",
")",
"total",
"=",
"st",
".",
"f_blocks",
"*",
"st",
".",
"f_frsize",
"used",
"=",
"(",
"st",
".",
"f_blocks",
"-",
"st",
".",
"f_bfree",
")",
"*",
"st",
".",
"f_frsize",
"return",
"total",
",",
"used"
] |
Return free usage about the given path, in bytes.
Parameters
----------
path : str
Folder for which to return disk usage
Returns
-------
output : tuple
Tuple containing total space in the folder and currently
used space in the folder
|
[
"Return",
"free",
"usage",
"about",
"the",
"given",
"path",
"in",
"bytes",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/disk.py#L39-L57
|
11,939
|
mila-iqia/fuel
|
fuel/utils/disk.py
|
safe_mkdir
|
def safe_mkdir(folder_name, force_perm=None):
"""Create the specified folder.
If the parent folders do not exist, they are also created.
If the folder already exists, nothing is done.
Parameters
----------
folder_name : str
Name of the folder to create.
force_perm : str
Mode to use for folder creation.
"""
if os.path.exists(folder_name):
return
intermediary_folders = folder_name.split(os.path.sep)
# Remove invalid elements from intermediary_folders
if intermediary_folders[-1] == "":
intermediary_folders = intermediary_folders[:-1]
if force_perm:
force_perm_path = folder_name.split(os.path.sep)
if force_perm_path[-1] == "":
force_perm_path = force_perm_path[:-1]
for i in range(1, len(intermediary_folders)):
folder_to_create = os.path.sep.join(intermediary_folders[:i + 1])
if os.path.exists(folder_to_create):
continue
os.mkdir(folder_to_create)
if force_perm:
os.chmod(folder_to_create, force_perm)
|
python
|
def safe_mkdir(folder_name, force_perm=None):
"""Create the specified folder.
If the parent folders do not exist, they are also created.
If the folder already exists, nothing is done.
Parameters
----------
folder_name : str
Name of the folder to create.
force_perm : str
Mode to use for folder creation.
"""
if os.path.exists(folder_name):
return
intermediary_folders = folder_name.split(os.path.sep)
# Remove invalid elements from intermediary_folders
if intermediary_folders[-1] == "":
intermediary_folders = intermediary_folders[:-1]
if force_perm:
force_perm_path = folder_name.split(os.path.sep)
if force_perm_path[-1] == "":
force_perm_path = force_perm_path[:-1]
for i in range(1, len(intermediary_folders)):
folder_to_create = os.path.sep.join(intermediary_folders[:i + 1])
if os.path.exists(folder_to_create):
continue
os.mkdir(folder_to_create)
if force_perm:
os.chmod(folder_to_create, force_perm)
|
[
"def",
"safe_mkdir",
"(",
"folder_name",
",",
"force_perm",
"=",
"None",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"folder_name",
")",
":",
"return",
"intermediary_folders",
"=",
"folder_name",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"# Remove invalid elements from intermediary_folders",
"if",
"intermediary_folders",
"[",
"-",
"1",
"]",
"==",
"\"\"",
":",
"intermediary_folders",
"=",
"intermediary_folders",
"[",
":",
"-",
"1",
"]",
"if",
"force_perm",
":",
"force_perm_path",
"=",
"folder_name",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"if",
"force_perm_path",
"[",
"-",
"1",
"]",
"==",
"\"\"",
":",
"force_perm_path",
"=",
"force_perm_path",
"[",
":",
"-",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"intermediary_folders",
")",
")",
":",
"folder_to_create",
"=",
"os",
".",
"path",
".",
"sep",
".",
"join",
"(",
"intermediary_folders",
"[",
":",
"i",
"+",
"1",
"]",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"folder_to_create",
")",
":",
"continue",
"os",
".",
"mkdir",
"(",
"folder_to_create",
")",
"if",
"force_perm",
":",
"os",
".",
"chmod",
"(",
"folder_to_create",
",",
"force_perm",
")"
] |
Create the specified folder.
If the parent folders do not exist, they are also created.
If the folder already exists, nothing is done.
Parameters
----------
folder_name : str
Name of the folder to create.
force_perm : str
Mode to use for folder creation.
|
[
"Create",
"the",
"specified",
"folder",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/disk.py#L60-L93
|
11,940
|
mila-iqia/fuel
|
fuel/utils/disk.py
|
check_enough_space
|
def check_enough_space(dataset_local_dir, remote_fname, local_fname,
max_disk_usage=0.9):
"""Check if the given local folder has enough space.
Check if the given local folder has enough space to store
the specified remote file.
Parameters
----------
remote_fname : str
Path to the remote file
remote_fname : str
Path to the local folder
max_disk_usage : float
Fraction indicating how much of the total space in the
local folder can be used before the local cache must stop
adding to it.
Returns
-------
output : boolean
True if there is enough space to store the remote file.
"""
storage_need = os.path.getsize(remote_fname)
storage_total, storage_used = disk_usage(dataset_local_dir)
# Instead of only looking if there's enough space, we ensure we do not
# go over max disk usage level to avoid filling the disk/partition
return ((storage_used + storage_need) <
(storage_total * max_disk_usage))
|
python
|
def check_enough_space(dataset_local_dir, remote_fname, local_fname,
max_disk_usage=0.9):
"""Check if the given local folder has enough space.
Check if the given local folder has enough space to store
the specified remote file.
Parameters
----------
remote_fname : str
Path to the remote file
remote_fname : str
Path to the local folder
max_disk_usage : float
Fraction indicating how much of the total space in the
local folder can be used before the local cache must stop
adding to it.
Returns
-------
output : boolean
True if there is enough space to store the remote file.
"""
storage_need = os.path.getsize(remote_fname)
storage_total, storage_used = disk_usage(dataset_local_dir)
# Instead of only looking if there's enough space, we ensure we do not
# go over max disk usage level to avoid filling the disk/partition
return ((storage_used + storage_need) <
(storage_total * max_disk_usage))
|
[
"def",
"check_enough_space",
"(",
"dataset_local_dir",
",",
"remote_fname",
",",
"local_fname",
",",
"max_disk_usage",
"=",
"0.9",
")",
":",
"storage_need",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"remote_fname",
")",
"storage_total",
",",
"storage_used",
"=",
"disk_usage",
"(",
"dataset_local_dir",
")",
"# Instead of only looking if there's enough space, we ensure we do not",
"# go over max disk usage level to avoid filling the disk/partition",
"return",
"(",
"(",
"storage_used",
"+",
"storage_need",
")",
"<",
"(",
"storage_total",
"*",
"max_disk_usage",
")",
")"
] |
Check if the given local folder has enough space.
Check if the given local folder has enough space to store
the specified remote file.
Parameters
----------
remote_fname : str
Path to the remote file
remote_fname : str
Path to the local folder
max_disk_usage : float
Fraction indicating how much of the total space in the
local folder can be used before the local cache must stop
adding to it.
Returns
-------
output : boolean
True if there is enough space to store the remote file.
|
[
"Check",
"if",
"the",
"given",
"local",
"folder",
"has",
"enough",
"space",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/disk.py#L96-L126
|
11,941
|
mila-iqia/fuel
|
fuel/converters/cifar100.py
|
convert_cifar100
|
def convert_cifar100(directory, output_directory,
output_filename='cifar100.hdf5'):
"""Converts the CIFAR-100 dataset to HDF5.
Converts the CIFAR-100 dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CIFAR100`. The converted dataset is saved as
'cifar100.hdf5'.
This method assumes the existence of the following file:
`cifar-100-python.tar.gz`
Parameters
----------
directory : str
Directory in which the required input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'cifar100.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode="w")
input_file = os.path.join(directory, 'cifar-100-python.tar.gz')
tar_file = tarfile.open(input_file, 'r:gz')
file = tar_file.extractfile('cifar-100-python/train')
try:
if six.PY3:
train = cPickle.load(file, encoding='latin1')
else:
train = cPickle.load(file)
finally:
file.close()
train_features = train['data'].reshape(train['data'].shape[0],
3, 32, 32)
train_coarse_labels = numpy.array(train['coarse_labels'],
dtype=numpy.uint8)
train_fine_labels = numpy.array(train['fine_labels'],
dtype=numpy.uint8)
file = tar_file.extractfile('cifar-100-python/test')
try:
if six.PY3:
test = cPickle.load(file, encoding='latin1')
else:
test = cPickle.load(file)
finally:
file.close()
test_features = test['data'].reshape(test['data'].shape[0],
3, 32, 32)
test_coarse_labels = numpy.array(test['coarse_labels'], dtype=numpy.uint8)
test_fine_labels = numpy.array(test['fine_labels'], dtype=numpy.uint8)
data = (('train', 'features', train_features),
('train', 'coarse_labels', train_coarse_labels.reshape((-1, 1))),
('train', 'fine_labels', train_fine_labels.reshape((-1, 1))),
('test', 'features', test_features),
('test', 'coarse_labels', test_coarse_labels.reshape((-1, 1))),
('test', 'fine_labels', test_fine_labels.reshape((-1, 1))))
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'channel'
h5file['features'].dims[2].label = 'height'
h5file['features'].dims[3].label = 'width'
h5file['coarse_labels'].dims[0].label = 'batch'
h5file['coarse_labels'].dims[1].label = 'index'
h5file['fine_labels'].dims[0].label = 'batch'
h5file['fine_labels'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
|
python
|
def convert_cifar100(directory, output_directory,
output_filename='cifar100.hdf5'):
"""Converts the CIFAR-100 dataset to HDF5.
Converts the CIFAR-100 dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CIFAR100`. The converted dataset is saved as
'cifar100.hdf5'.
This method assumes the existence of the following file:
`cifar-100-python.tar.gz`
Parameters
----------
directory : str
Directory in which the required input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'cifar100.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode="w")
input_file = os.path.join(directory, 'cifar-100-python.tar.gz')
tar_file = tarfile.open(input_file, 'r:gz')
file = tar_file.extractfile('cifar-100-python/train')
try:
if six.PY3:
train = cPickle.load(file, encoding='latin1')
else:
train = cPickle.load(file)
finally:
file.close()
train_features = train['data'].reshape(train['data'].shape[0],
3, 32, 32)
train_coarse_labels = numpy.array(train['coarse_labels'],
dtype=numpy.uint8)
train_fine_labels = numpy.array(train['fine_labels'],
dtype=numpy.uint8)
file = tar_file.extractfile('cifar-100-python/test')
try:
if six.PY3:
test = cPickle.load(file, encoding='latin1')
else:
test = cPickle.load(file)
finally:
file.close()
test_features = test['data'].reshape(test['data'].shape[0],
3, 32, 32)
test_coarse_labels = numpy.array(test['coarse_labels'], dtype=numpy.uint8)
test_fine_labels = numpy.array(test['fine_labels'], dtype=numpy.uint8)
data = (('train', 'features', train_features),
('train', 'coarse_labels', train_coarse_labels.reshape((-1, 1))),
('train', 'fine_labels', train_fine_labels.reshape((-1, 1))),
('test', 'features', test_features),
('test', 'coarse_labels', test_coarse_labels.reshape((-1, 1))),
('test', 'fine_labels', test_fine_labels.reshape((-1, 1))))
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'channel'
h5file['features'].dims[2].label = 'height'
h5file['features'].dims[3].label = 'width'
h5file['coarse_labels'].dims[0].label = 'batch'
h5file['coarse_labels'].dims[1].label = 'index'
h5file['fine_labels'].dims[0].label = 'batch'
h5file['fine_labels'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
|
[
"def",
"convert_cifar100",
"(",
"directory",
",",
"output_directory",
",",
"output_filename",
"=",
"'cifar100.hdf5'",
")",
":",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"h5file",
"=",
"h5py",
".",
"File",
"(",
"output_path",
",",
"mode",
"=",
"\"w\"",
")",
"input_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"'cifar-100-python.tar.gz'",
")",
"tar_file",
"=",
"tarfile",
".",
"open",
"(",
"input_file",
",",
"'r:gz'",
")",
"file",
"=",
"tar_file",
".",
"extractfile",
"(",
"'cifar-100-python/train'",
")",
"try",
":",
"if",
"six",
".",
"PY3",
":",
"train",
"=",
"cPickle",
".",
"load",
"(",
"file",
",",
"encoding",
"=",
"'latin1'",
")",
"else",
":",
"train",
"=",
"cPickle",
".",
"load",
"(",
"file",
")",
"finally",
":",
"file",
".",
"close",
"(",
")",
"train_features",
"=",
"train",
"[",
"'data'",
"]",
".",
"reshape",
"(",
"train",
"[",
"'data'",
"]",
".",
"shape",
"[",
"0",
"]",
",",
"3",
",",
"32",
",",
"32",
")",
"train_coarse_labels",
"=",
"numpy",
".",
"array",
"(",
"train",
"[",
"'coarse_labels'",
"]",
",",
"dtype",
"=",
"numpy",
".",
"uint8",
")",
"train_fine_labels",
"=",
"numpy",
".",
"array",
"(",
"train",
"[",
"'fine_labels'",
"]",
",",
"dtype",
"=",
"numpy",
".",
"uint8",
")",
"file",
"=",
"tar_file",
".",
"extractfile",
"(",
"'cifar-100-python/test'",
")",
"try",
":",
"if",
"six",
".",
"PY3",
":",
"test",
"=",
"cPickle",
".",
"load",
"(",
"file",
",",
"encoding",
"=",
"'latin1'",
")",
"else",
":",
"test",
"=",
"cPickle",
".",
"load",
"(",
"file",
")",
"finally",
":",
"file",
".",
"close",
"(",
")",
"test_features",
"=",
"test",
"[",
"'data'",
"]",
".",
"reshape",
"(",
"test",
"[",
"'data'",
"]",
".",
"shape",
"[",
"0",
"]",
",",
"3",
",",
"32",
",",
"32",
")",
"test_coarse_labels",
"=",
"numpy",
".",
"array",
"(",
"test",
"[",
"'coarse_labels'",
"]",
",",
"dtype",
"=",
"numpy",
".",
"uint8",
")",
"test_fine_labels",
"=",
"numpy",
".",
"array",
"(",
"test",
"[",
"'fine_labels'",
"]",
",",
"dtype",
"=",
"numpy",
".",
"uint8",
")",
"data",
"=",
"(",
"(",
"'train'",
",",
"'features'",
",",
"train_features",
")",
",",
"(",
"'train'",
",",
"'coarse_labels'",
",",
"train_coarse_labels",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
")",
",",
"(",
"'train'",
",",
"'fine_labels'",
",",
"train_fine_labels",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
")",
",",
"(",
"'test'",
",",
"'features'",
",",
"test_features",
")",
",",
"(",
"'test'",
",",
"'coarse_labels'",
",",
"test_coarse_labels",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
")",
",",
"(",
"'test'",
",",
"'fine_labels'",
",",
"test_fine_labels",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
")",
")",
"fill_hdf5_file",
"(",
"h5file",
",",
"data",
")",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"'channel'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"2",
"]",
".",
"label",
"=",
"'height'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"3",
"]",
".",
"label",
"=",
"'width'",
"h5file",
"[",
"'coarse_labels'",
"]",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"h5file",
"[",
"'coarse_labels'",
"]",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"'index'",
"h5file",
"[",
"'fine_labels'",
"]",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"h5file",
"[",
"'fine_labels'",
"]",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"'index'",
"h5file",
".",
"flush",
"(",
")",
"h5file",
".",
"close",
"(",
")",
"return",
"(",
"output_path",
",",
")"
] |
Converts the CIFAR-100 dataset to HDF5.
Converts the CIFAR-100 dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CIFAR100`. The converted dataset is saved as
'cifar100.hdf5'.
This method assumes the existence of the following file:
`cifar-100-python.tar.gz`
Parameters
----------
directory : str
Directory in which the required input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'cifar100.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
|
[
"Converts",
"the",
"CIFAR",
"-",
"100",
"dataset",
"to",
"HDF5",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/cifar100.py#L15-L95
|
11,942
|
mila-iqia/fuel
|
fuel/transformers/__init__.py
|
ExpectsAxisLabels.verify_axis_labels
|
def verify_axis_labels(self, expected, actual, source_name):
"""Verify that axis labels for a given source are as expected.
Parameters
----------
expected : tuple
A tuple of strings representing the expected axis labels.
actual : tuple or None
A tuple of strings representing the actual axis labels, or
`None` if they could not be determined.
source_name : str
The name of the source being checked. Used for caching the
results of checks so that the check is only performed once.
Notes
-----
Logs a warning in case of `actual=None`, raises an error on
other mismatches.
"""
if not getattr(self, '_checked_axis_labels', False):
self._checked_axis_labels = defaultdict(bool)
if not self._checked_axis_labels[source_name]:
if actual is None:
log.warning("%s instance could not verify (missing) axis "
"expected %s, got None",
self.__class__.__name__, expected)
else:
if expected != actual:
raise AxisLabelsMismatchError("{} expected axis labels "
"{}, got {} instead".format(
self.__class__.__name__,
expected, actual))
self._checked_axis_labels[source_name] = True
|
python
|
def verify_axis_labels(self, expected, actual, source_name):
"""Verify that axis labels for a given source are as expected.
Parameters
----------
expected : tuple
A tuple of strings representing the expected axis labels.
actual : tuple or None
A tuple of strings representing the actual axis labels, or
`None` if they could not be determined.
source_name : str
The name of the source being checked. Used for caching the
results of checks so that the check is only performed once.
Notes
-----
Logs a warning in case of `actual=None`, raises an error on
other mismatches.
"""
if not getattr(self, '_checked_axis_labels', False):
self._checked_axis_labels = defaultdict(bool)
if not self._checked_axis_labels[source_name]:
if actual is None:
log.warning("%s instance could not verify (missing) axis "
"expected %s, got None",
self.__class__.__name__, expected)
else:
if expected != actual:
raise AxisLabelsMismatchError("{} expected axis labels "
"{}, got {} instead".format(
self.__class__.__name__,
expected, actual))
self._checked_axis_labels[source_name] = True
|
[
"def",
"verify_axis_labels",
"(",
"self",
",",
"expected",
",",
"actual",
",",
"source_name",
")",
":",
"if",
"not",
"getattr",
"(",
"self",
",",
"'_checked_axis_labels'",
",",
"False",
")",
":",
"self",
".",
"_checked_axis_labels",
"=",
"defaultdict",
"(",
"bool",
")",
"if",
"not",
"self",
".",
"_checked_axis_labels",
"[",
"source_name",
"]",
":",
"if",
"actual",
"is",
"None",
":",
"log",
".",
"warning",
"(",
"\"%s instance could not verify (missing) axis \"",
"\"expected %s, got None\"",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"expected",
")",
"else",
":",
"if",
"expected",
"!=",
"actual",
":",
"raise",
"AxisLabelsMismatchError",
"(",
"\"{} expected axis labels \"",
"\"{}, got {} instead\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"expected",
",",
"actual",
")",
")",
"self",
".",
"_checked_axis_labels",
"[",
"source_name",
"]",
"=",
"True"
] |
Verify that axis labels for a given source are as expected.
Parameters
----------
expected : tuple
A tuple of strings representing the expected axis labels.
actual : tuple or None
A tuple of strings representing the actual axis labels, or
`None` if they could not be determined.
source_name : str
The name of the source being checked. Used for caching the
results of checks so that the check is only performed once.
Notes
-----
Logs a warning in case of `actual=None`, raises an error on
other mismatches.
|
[
"Verify",
"that",
"axis",
"labels",
"for",
"a",
"given",
"source",
"are",
"as",
"expected",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/transformers/__init__.py#L34-L67
|
11,943
|
mila-iqia/fuel
|
fuel/transformers/__init__.py
|
Batch.get_data
|
def get_data(self, request=None):
"""Get data from the dataset."""
if request is None:
raise ValueError
data = [[] for _ in self.sources]
for i in range(request):
try:
for source_data, example in zip(
data, next(self.child_epoch_iterator)):
source_data.append(example)
except StopIteration:
# If some data has been extracted and `strict` is not set,
# we should spit out this data before stopping iteration.
if not self.strictness and data[0]:
break
elif self.strictness > 1 and data[0]:
raise ValueError
raise
return tuple(numpy.asarray(source_data) for source_data in data)
|
python
|
def get_data(self, request=None):
"""Get data from the dataset."""
if request is None:
raise ValueError
data = [[] for _ in self.sources]
for i in range(request):
try:
for source_data, example in zip(
data, next(self.child_epoch_iterator)):
source_data.append(example)
except StopIteration:
# If some data has been extracted and `strict` is not set,
# we should spit out this data before stopping iteration.
if not self.strictness and data[0]:
break
elif self.strictness > 1 and data[0]:
raise ValueError
raise
return tuple(numpy.asarray(source_data) for source_data in data)
|
[
"def",
"get_data",
"(",
"self",
",",
"request",
"=",
"None",
")",
":",
"if",
"request",
"is",
"None",
":",
"raise",
"ValueError",
"data",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"self",
".",
"sources",
"]",
"for",
"i",
"in",
"range",
"(",
"request",
")",
":",
"try",
":",
"for",
"source_data",
",",
"example",
"in",
"zip",
"(",
"data",
",",
"next",
"(",
"self",
".",
"child_epoch_iterator",
")",
")",
":",
"source_data",
".",
"append",
"(",
"example",
")",
"except",
"StopIteration",
":",
"# If some data has been extracted and `strict` is not set,",
"# we should spit out this data before stopping iteration.",
"if",
"not",
"self",
".",
"strictness",
"and",
"data",
"[",
"0",
"]",
":",
"break",
"elif",
"self",
".",
"strictness",
">",
"1",
"and",
"data",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"raise",
"return",
"tuple",
"(",
"numpy",
".",
"asarray",
"(",
"source_data",
")",
"for",
"source_data",
"in",
"data",
")"
] |
Get data from the dataset.
|
[
"Get",
"data",
"from",
"the",
"dataset",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/transformers/__init__.py#L608-L626
|
11,944
|
mila-iqia/fuel
|
fuel/utils/parallel.py
|
_producer_wrapper
|
def _producer_wrapper(f, port, addr='tcp://127.0.0.1'):
"""A shim that sets up a socket and starts the producer callable.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
port : int
The port on which the socket should connect.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
"""
try:
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.connect(':'.join([addr, str(port)]))
f(socket)
finally:
# Works around a Python 3.x bug.
context.destroy()
|
python
|
def _producer_wrapper(f, port, addr='tcp://127.0.0.1'):
"""A shim that sets up a socket and starts the producer callable.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
port : int
The port on which the socket should connect.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
"""
try:
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.connect(':'.join([addr, str(port)]))
f(socket)
finally:
# Works around a Python 3.x bug.
context.destroy()
|
[
"def",
"_producer_wrapper",
"(",
"f",
",",
"port",
",",
"addr",
"=",
"'tcp://127.0.0.1'",
")",
":",
"try",
":",
"context",
"=",
"zmq",
".",
"Context",
"(",
")",
"socket",
"=",
"context",
".",
"socket",
"(",
"zmq",
".",
"PUSH",
")",
"socket",
".",
"connect",
"(",
"':'",
".",
"join",
"(",
"[",
"addr",
",",
"str",
"(",
"port",
")",
"]",
")",
")",
"f",
"(",
"socket",
")",
"finally",
":",
"# Works around a Python 3.x bug.",
"context",
".",
"destroy",
"(",
")"
] |
A shim that sets up a socket and starts the producer callable.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
port : int
The port on which the socket should connect.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
|
[
"A",
"shim",
"that",
"sets",
"up",
"a",
"socket",
"and",
"starts",
"the",
"producer",
"callable",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/parallel.py#L14-L36
|
11,945
|
mila-iqia/fuel
|
fuel/utils/parallel.py
|
_spawn_producer
|
def _spawn_producer(f, port, addr='tcp://127.0.0.1'):
"""Start a process that sends results on a PUSH socket.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
Returns
-------
process : multiprocessing.Process
The process handle of the created producer process.
"""
process = Process(target=_producer_wrapper, args=(f, port, addr))
process.start()
return process
|
python
|
def _spawn_producer(f, port, addr='tcp://127.0.0.1'):
"""Start a process that sends results on a PUSH socket.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
Returns
-------
process : multiprocessing.Process
The process handle of the created producer process.
"""
process = Process(target=_producer_wrapper, args=(f, port, addr))
process.start()
return process
|
[
"def",
"_spawn_producer",
"(",
"f",
",",
"port",
",",
"addr",
"=",
"'tcp://127.0.0.1'",
")",
":",
"process",
"=",
"Process",
"(",
"target",
"=",
"_producer_wrapper",
",",
"args",
"=",
"(",
"f",
",",
"port",
",",
"addr",
")",
")",
"process",
".",
"start",
"(",
")",
"return",
"process"
] |
Start a process that sends results on a PUSH socket.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
Returns
-------
process : multiprocessing.Process
The process handle of the created producer process.
|
[
"Start",
"a",
"process",
"that",
"sends",
"results",
"on",
"a",
"PUSH",
"socket",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/parallel.py#L39-L56
|
11,946
|
mila-iqia/fuel
|
fuel/utils/parallel.py
|
producer_consumer
|
def producer_consumer(producer, consumer, addr='tcp://127.0.0.1',
port=None, context=None):
"""A producer-consumer pattern.
Parameters
----------
producer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
consumer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PULL socket.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
port : int, optional
The port on which the consumer should listen.
context : zmq.Context, optional
The ZeroMQ Context to use. One will be created otherwise.
Returns
-------
result
Passes along whatever `consumer` returns.
Notes
-----
This sets up a PULL socket in the calling process and forks
a process that calls `producer` on a PUSH socket. When the
consumer returns, the producer process is terminated.
Wrap `consumer` or `producer` in a `functools.partial` object
in order to send additional arguments; the callables passed in
should expect only one required, positional argument, the socket
handle.
"""
context_created = False
if context is None:
context_created = True
context = zmq.Context()
try:
consumer_socket = context.socket(zmq.PULL)
if port is None:
port = consumer_socket.bind_to_random_port(addr)
try:
process = _spawn_producer(producer, port)
result = consumer(consumer_socket)
finally:
process.terminate()
return result
finally:
# Works around a Python 3.x bug.
if context_created:
context.destroy()
|
python
|
def producer_consumer(producer, consumer, addr='tcp://127.0.0.1',
port=None, context=None):
"""A producer-consumer pattern.
Parameters
----------
producer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
consumer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PULL socket.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
port : int, optional
The port on which the consumer should listen.
context : zmq.Context, optional
The ZeroMQ Context to use. One will be created otherwise.
Returns
-------
result
Passes along whatever `consumer` returns.
Notes
-----
This sets up a PULL socket in the calling process and forks
a process that calls `producer` on a PUSH socket. When the
consumer returns, the producer process is terminated.
Wrap `consumer` or `producer` in a `functools.partial` object
in order to send additional arguments; the callables passed in
should expect only one required, positional argument, the socket
handle.
"""
context_created = False
if context is None:
context_created = True
context = zmq.Context()
try:
consumer_socket = context.socket(zmq.PULL)
if port is None:
port = consumer_socket.bind_to_random_port(addr)
try:
process = _spawn_producer(producer, port)
result = consumer(consumer_socket)
finally:
process.terminate()
return result
finally:
# Works around a Python 3.x bug.
if context_created:
context.destroy()
|
[
"def",
"producer_consumer",
"(",
"producer",
",",
"consumer",
",",
"addr",
"=",
"'tcp://127.0.0.1'",
",",
"port",
"=",
"None",
",",
"context",
"=",
"None",
")",
":",
"context_created",
"=",
"False",
"if",
"context",
"is",
"None",
":",
"context_created",
"=",
"True",
"context",
"=",
"zmq",
".",
"Context",
"(",
")",
"try",
":",
"consumer_socket",
"=",
"context",
".",
"socket",
"(",
"zmq",
".",
"PULL",
")",
"if",
"port",
"is",
"None",
":",
"port",
"=",
"consumer_socket",
".",
"bind_to_random_port",
"(",
"addr",
")",
"try",
":",
"process",
"=",
"_spawn_producer",
"(",
"producer",
",",
"port",
")",
"result",
"=",
"consumer",
"(",
"consumer_socket",
")",
"finally",
":",
"process",
".",
"terminate",
"(",
")",
"return",
"result",
"finally",
":",
"# Works around a Python 3.x bug.",
"if",
"context_created",
":",
"context",
".",
"destroy",
"(",
")"
] |
A producer-consumer pattern.
Parameters
----------
producer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
consumer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PULL socket.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
port : int, optional
The port on which the consumer should listen.
context : zmq.Context, optional
The ZeroMQ Context to use. One will be created otherwise.
Returns
-------
result
Passes along whatever `consumer` returns.
Notes
-----
This sets up a PULL socket in the calling process and forks
a process that calls `producer` on a PUSH socket. When the
consumer returns, the producer process is terminated.
Wrap `consumer` or `producer` in a `functools.partial` object
in order to send additional arguments; the callables passed in
should expect only one required, positional argument, the socket
handle.
|
[
"A",
"producer",
"-",
"consumer",
"pattern",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/parallel.py#L59-L113
|
11,947
|
mila-iqia/fuel
|
fuel/converters/dogs_vs_cats.py
|
convert_dogs_vs_cats
|
def convert_dogs_vs_cats(directory, output_directory,
output_filename='dogs_vs_cats.hdf5'):
"""Converts the Dogs vs. Cats dataset to HDF5.
Converts the Dogs vs. Cats dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.dogs_vs_cats`. The converted dataset is saved as
'dogs_vs_cats.hdf5'.
It assumes the existence of the following files:
* `dogs_vs_cats.train.zip`
* `dogs_vs_cats.test1.zip`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'dogs_vs_cats.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
# Prepare output file
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))
hdf_features = h5file.create_dataset('image_features', (37500,),
dtype=dtype)
hdf_shapes = h5file.create_dataset('image_features_shapes', (37500, 3),
dtype='int32')
hdf_labels = h5file.create_dataset('targets', (25000, 1), dtype='uint8')
# Attach shape annotations and scales
hdf_features.dims.create_scale(hdf_shapes, 'shapes')
hdf_features.dims[0].attach_scale(hdf_shapes)
hdf_shapes_labels = h5file.create_dataset('image_features_shapes_labels',
(3,), dtype='S7')
hdf_shapes_labels[...] = ['channel'.encode('utf8'),
'height'.encode('utf8'),
'width'.encode('utf8')]
hdf_features.dims.create_scale(hdf_shapes_labels, 'shape_labels')
hdf_features.dims[0].attach_scale(hdf_shapes_labels)
# Add axis annotations
hdf_features.dims[0].label = 'batch'
hdf_labels.dims[0].label = 'batch'
hdf_labels.dims[1].label = 'index'
# Convert
i = 0
for split, split_size in zip([TRAIN, TEST], [25000, 12500]):
# Open the ZIP file
filename = os.path.join(directory, split)
zip_file = zipfile.ZipFile(filename, 'r')
image_names = zip_file.namelist()[1:] # Discard the directory name
# Shuffle the examples
if split == TRAIN:
rng = numpy.random.RandomState(123522)
rng.shuffle(image_names)
else:
image_names.sort(key=lambda fn: int(os.path.splitext(fn[6:])[0]))
# Convert from JPEG to NumPy arrays
with progress_bar(filename, split_size) as bar:
for image_name in image_names:
# Save image
image = numpy.array(Image.open(zip_file.open(image_name)))
image = image.transpose(2, 0, 1)
hdf_features[i] = image.flatten()
hdf_shapes[i] = image.shape
# Cats are 0, Dogs are 1
if split == TRAIN:
hdf_labels[i] = 0 if 'cat' in image_name else 1
# Update progress
i += 1
bar.update(i if split == TRAIN else i - 25000)
# Add the labels
split_dict = {}
sources = ['image_features', 'targets']
split_dict['train'] = dict(zip(sources, [(0, 25000)] * 2))
split_dict['test'] = {sources[0]: (25000, 37500)}
h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)
h5file.flush()
h5file.close()
return (output_path,)
|
python
|
def convert_dogs_vs_cats(directory, output_directory,
output_filename='dogs_vs_cats.hdf5'):
"""Converts the Dogs vs. Cats dataset to HDF5.
Converts the Dogs vs. Cats dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.dogs_vs_cats`. The converted dataset is saved as
'dogs_vs_cats.hdf5'.
It assumes the existence of the following files:
* `dogs_vs_cats.train.zip`
* `dogs_vs_cats.test1.zip`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'dogs_vs_cats.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
# Prepare output file
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))
hdf_features = h5file.create_dataset('image_features', (37500,),
dtype=dtype)
hdf_shapes = h5file.create_dataset('image_features_shapes', (37500, 3),
dtype='int32')
hdf_labels = h5file.create_dataset('targets', (25000, 1), dtype='uint8')
# Attach shape annotations and scales
hdf_features.dims.create_scale(hdf_shapes, 'shapes')
hdf_features.dims[0].attach_scale(hdf_shapes)
hdf_shapes_labels = h5file.create_dataset('image_features_shapes_labels',
(3,), dtype='S7')
hdf_shapes_labels[...] = ['channel'.encode('utf8'),
'height'.encode('utf8'),
'width'.encode('utf8')]
hdf_features.dims.create_scale(hdf_shapes_labels, 'shape_labels')
hdf_features.dims[0].attach_scale(hdf_shapes_labels)
# Add axis annotations
hdf_features.dims[0].label = 'batch'
hdf_labels.dims[0].label = 'batch'
hdf_labels.dims[1].label = 'index'
# Convert
i = 0
for split, split_size in zip([TRAIN, TEST], [25000, 12500]):
# Open the ZIP file
filename = os.path.join(directory, split)
zip_file = zipfile.ZipFile(filename, 'r')
image_names = zip_file.namelist()[1:] # Discard the directory name
# Shuffle the examples
if split == TRAIN:
rng = numpy.random.RandomState(123522)
rng.shuffle(image_names)
else:
image_names.sort(key=lambda fn: int(os.path.splitext(fn[6:])[0]))
# Convert from JPEG to NumPy arrays
with progress_bar(filename, split_size) as bar:
for image_name in image_names:
# Save image
image = numpy.array(Image.open(zip_file.open(image_name)))
image = image.transpose(2, 0, 1)
hdf_features[i] = image.flatten()
hdf_shapes[i] = image.shape
# Cats are 0, Dogs are 1
if split == TRAIN:
hdf_labels[i] = 0 if 'cat' in image_name else 1
# Update progress
i += 1
bar.update(i if split == TRAIN else i - 25000)
# Add the labels
split_dict = {}
sources = ['image_features', 'targets']
split_dict['train'] = dict(zip(sources, [(0, 25000)] * 2))
split_dict['test'] = {sources[0]: (25000, 37500)}
h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)
h5file.flush()
h5file.close()
return (output_path,)
|
[
"def",
"convert_dogs_vs_cats",
"(",
"directory",
",",
"output_directory",
",",
"output_filename",
"=",
"'dogs_vs_cats.hdf5'",
")",
":",
"# Prepare output file",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"h5file",
"=",
"h5py",
".",
"File",
"(",
"output_path",
",",
"mode",
"=",
"'w'",
")",
"dtype",
"=",
"h5py",
".",
"special_dtype",
"(",
"vlen",
"=",
"numpy",
".",
"dtype",
"(",
"'uint8'",
")",
")",
"hdf_features",
"=",
"h5file",
".",
"create_dataset",
"(",
"'image_features'",
",",
"(",
"37500",
",",
")",
",",
"dtype",
"=",
"dtype",
")",
"hdf_shapes",
"=",
"h5file",
".",
"create_dataset",
"(",
"'image_features_shapes'",
",",
"(",
"37500",
",",
"3",
")",
",",
"dtype",
"=",
"'int32'",
")",
"hdf_labels",
"=",
"h5file",
".",
"create_dataset",
"(",
"'targets'",
",",
"(",
"25000",
",",
"1",
")",
",",
"dtype",
"=",
"'uint8'",
")",
"# Attach shape annotations and scales",
"hdf_features",
".",
"dims",
".",
"create_scale",
"(",
"hdf_shapes",
",",
"'shapes'",
")",
"hdf_features",
".",
"dims",
"[",
"0",
"]",
".",
"attach_scale",
"(",
"hdf_shapes",
")",
"hdf_shapes_labels",
"=",
"h5file",
".",
"create_dataset",
"(",
"'image_features_shapes_labels'",
",",
"(",
"3",
",",
")",
",",
"dtype",
"=",
"'S7'",
")",
"hdf_shapes_labels",
"[",
"...",
"]",
"=",
"[",
"'channel'",
".",
"encode",
"(",
"'utf8'",
")",
",",
"'height'",
".",
"encode",
"(",
"'utf8'",
")",
",",
"'width'",
".",
"encode",
"(",
"'utf8'",
")",
"]",
"hdf_features",
".",
"dims",
".",
"create_scale",
"(",
"hdf_shapes_labels",
",",
"'shape_labels'",
")",
"hdf_features",
".",
"dims",
"[",
"0",
"]",
".",
"attach_scale",
"(",
"hdf_shapes_labels",
")",
"# Add axis annotations",
"hdf_features",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"hdf_labels",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"hdf_labels",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"'index'",
"# Convert",
"i",
"=",
"0",
"for",
"split",
",",
"split_size",
"in",
"zip",
"(",
"[",
"TRAIN",
",",
"TEST",
"]",
",",
"[",
"25000",
",",
"12500",
"]",
")",
":",
"# Open the ZIP file",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"split",
")",
"zip_file",
"=",
"zipfile",
".",
"ZipFile",
"(",
"filename",
",",
"'r'",
")",
"image_names",
"=",
"zip_file",
".",
"namelist",
"(",
")",
"[",
"1",
":",
"]",
"# Discard the directory name",
"# Shuffle the examples",
"if",
"split",
"==",
"TRAIN",
":",
"rng",
"=",
"numpy",
".",
"random",
".",
"RandomState",
"(",
"123522",
")",
"rng",
".",
"shuffle",
"(",
"image_names",
")",
"else",
":",
"image_names",
".",
"sort",
"(",
"key",
"=",
"lambda",
"fn",
":",
"int",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"fn",
"[",
"6",
":",
"]",
")",
"[",
"0",
"]",
")",
")",
"# Convert from JPEG to NumPy arrays",
"with",
"progress_bar",
"(",
"filename",
",",
"split_size",
")",
"as",
"bar",
":",
"for",
"image_name",
"in",
"image_names",
":",
"# Save image",
"image",
"=",
"numpy",
".",
"array",
"(",
"Image",
".",
"open",
"(",
"zip_file",
".",
"open",
"(",
"image_name",
")",
")",
")",
"image",
"=",
"image",
".",
"transpose",
"(",
"2",
",",
"0",
",",
"1",
")",
"hdf_features",
"[",
"i",
"]",
"=",
"image",
".",
"flatten",
"(",
")",
"hdf_shapes",
"[",
"i",
"]",
"=",
"image",
".",
"shape",
"# Cats are 0, Dogs are 1",
"if",
"split",
"==",
"TRAIN",
":",
"hdf_labels",
"[",
"i",
"]",
"=",
"0",
"if",
"'cat'",
"in",
"image_name",
"else",
"1",
"# Update progress",
"i",
"+=",
"1",
"bar",
".",
"update",
"(",
"i",
"if",
"split",
"==",
"TRAIN",
"else",
"i",
"-",
"25000",
")",
"# Add the labels",
"split_dict",
"=",
"{",
"}",
"sources",
"=",
"[",
"'image_features'",
",",
"'targets'",
"]",
"split_dict",
"[",
"'train'",
"]",
"=",
"dict",
"(",
"zip",
"(",
"sources",
",",
"[",
"(",
"0",
",",
"25000",
")",
"]",
"*",
"2",
")",
")",
"split_dict",
"[",
"'test'",
"]",
"=",
"{",
"sources",
"[",
"0",
"]",
":",
"(",
"25000",
",",
"37500",
")",
"}",
"h5file",
".",
"attrs",
"[",
"'split'",
"]",
"=",
"H5PYDataset",
".",
"create_split_array",
"(",
"split_dict",
")",
"h5file",
".",
"flush",
"(",
")",
"h5file",
".",
"close",
"(",
")",
"return",
"(",
"output_path",
",",
")"
] |
Converts the Dogs vs. Cats dataset to HDF5.
Converts the Dogs vs. Cats dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.dogs_vs_cats`. The converted dataset is saved as
'dogs_vs_cats.hdf5'.
It assumes the existence of the following files:
* `dogs_vs_cats.train.zip`
* `dogs_vs_cats.test1.zip`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'dogs_vs_cats.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
|
[
"Converts",
"the",
"Dogs",
"vs",
".",
"Cats",
"dataset",
"to",
"HDF5",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/dogs_vs_cats.py#L16-L113
|
11,948
|
mila-iqia/fuel
|
fuel/bin/fuel_download.py
|
main
|
def main(args=None):
"""Entry point for `fuel-download` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's downloading
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
"""
built_in_datasets = dict(downloaders.all_downloaders)
if fuel.config.extra_downloaders:
for name in fuel.config.extra_downloaders:
extra_datasets = dict(
importlib.import_module(name).all_downloaders)
if any(key in built_in_datasets for key in extra_datasets.keys()):
raise ValueError('extra downloaders conflict in name with '
'built-in downloaders')
built_in_datasets.update(extra_datasets)
parser = argparse.ArgumentParser(
description='Download script for built-in datasets.')
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"-d", "--directory", help="where to save the downloaded files",
type=str, default=os.getcwd())
parent_parser.add_argument(
"--clear", help="clear the downloaded files", action='store_true')
subparsers = parser.add_subparsers()
download_functions = {}
for name, fill_subparser in built_in_datasets.items():
subparser = subparsers.add_parser(
name, parents=[parent_parser],
help='Download the {} dataset'.format(name))
# Allows the parser to know which subparser was called.
subparser.set_defaults(which_=name)
download_functions[name] = fill_subparser(subparser)
args = parser.parse_args()
args_dict = vars(args)
download_function = download_functions[args_dict.pop('which_')]
try:
download_function(**args_dict)
except NeedURLPrefix:
parser.error(url_prefix_message)
|
python
|
def main(args=None):
"""Entry point for `fuel-download` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's downloading
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
"""
built_in_datasets = dict(downloaders.all_downloaders)
if fuel.config.extra_downloaders:
for name in fuel.config.extra_downloaders:
extra_datasets = dict(
importlib.import_module(name).all_downloaders)
if any(key in built_in_datasets for key in extra_datasets.keys()):
raise ValueError('extra downloaders conflict in name with '
'built-in downloaders')
built_in_datasets.update(extra_datasets)
parser = argparse.ArgumentParser(
description='Download script for built-in datasets.')
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"-d", "--directory", help="where to save the downloaded files",
type=str, default=os.getcwd())
parent_parser.add_argument(
"--clear", help="clear the downloaded files", action='store_true')
subparsers = parser.add_subparsers()
download_functions = {}
for name, fill_subparser in built_in_datasets.items():
subparser = subparsers.add_parser(
name, parents=[parent_parser],
help='Download the {} dataset'.format(name))
# Allows the parser to know which subparser was called.
subparser.set_defaults(which_=name)
download_functions[name] = fill_subparser(subparser)
args = parser.parse_args()
args_dict = vars(args)
download_function = download_functions[args_dict.pop('which_')]
try:
download_function(**args_dict)
except NeedURLPrefix:
parser.error(url_prefix_message)
|
[
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"built_in_datasets",
"=",
"dict",
"(",
"downloaders",
".",
"all_downloaders",
")",
"if",
"fuel",
".",
"config",
".",
"extra_downloaders",
":",
"for",
"name",
"in",
"fuel",
".",
"config",
".",
"extra_downloaders",
":",
"extra_datasets",
"=",
"dict",
"(",
"importlib",
".",
"import_module",
"(",
"name",
")",
".",
"all_downloaders",
")",
"if",
"any",
"(",
"key",
"in",
"built_in_datasets",
"for",
"key",
"in",
"extra_datasets",
".",
"keys",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"'extra downloaders conflict in name with '",
"'built-in downloaders'",
")",
"built_in_datasets",
".",
"update",
"(",
"extra_datasets",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Download script for built-in datasets.'",
")",
"parent_parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"add_help",
"=",
"False",
")",
"parent_parser",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--directory\"",
",",
"help",
"=",
"\"where to save the downloaded files\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"os",
".",
"getcwd",
"(",
")",
")",
"parent_parser",
".",
"add_argument",
"(",
"\"--clear\"",
",",
"help",
"=",
"\"clear the downloaded files\"",
",",
"action",
"=",
"'store_true'",
")",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
")",
"download_functions",
"=",
"{",
"}",
"for",
"name",
",",
"fill_subparser",
"in",
"built_in_datasets",
".",
"items",
"(",
")",
":",
"subparser",
"=",
"subparsers",
".",
"add_parser",
"(",
"name",
",",
"parents",
"=",
"[",
"parent_parser",
"]",
",",
"help",
"=",
"'Download the {} dataset'",
".",
"format",
"(",
"name",
")",
")",
"# Allows the parser to know which subparser was called.",
"subparser",
".",
"set_defaults",
"(",
"which_",
"=",
"name",
")",
"download_functions",
"[",
"name",
"]",
"=",
"fill_subparser",
"(",
"subparser",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"args_dict",
"=",
"vars",
"(",
"args",
")",
"download_function",
"=",
"download_functions",
"[",
"args_dict",
".",
"pop",
"(",
"'which_'",
")",
"]",
"try",
":",
"download_function",
"(",
"*",
"*",
"args_dict",
")",
"except",
"NeedURLPrefix",
":",
"parser",
".",
"error",
"(",
"url_prefix_message",
")"
] |
Entry point for `fuel-download` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's downloading
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
|
[
"Entry",
"point",
"for",
"fuel",
"-",
"download",
"script",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/bin/fuel_download.py#L19-L64
|
11,949
|
mila-iqia/fuel
|
fuel/downloaders/mnist.py
|
fill_subparser
|
def fill_subparser(subparser):
"""Sets up a subparser to download the MNIST dataset files.
The following MNIST dataset files are downloaded from Yann LeCun's
website [LECUN]:
`train-images-idx3-ubyte.gz`, `train-labels-idx1-ubyte.gz`,
`t10k-images-idx3-ubyte.gz`, `t10k-labels-idx1-ubyte.gz`.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command.
"""
filenames = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz']
urls = ['http://yann.lecun.com/exdb/mnist/' + f for f in filenames]
subparser.set_defaults(urls=urls, filenames=filenames)
return default_downloader
|
python
|
def fill_subparser(subparser):
"""Sets up a subparser to download the MNIST dataset files.
The following MNIST dataset files are downloaded from Yann LeCun's
website [LECUN]:
`train-images-idx3-ubyte.gz`, `train-labels-idx1-ubyte.gz`,
`t10k-images-idx3-ubyte.gz`, `t10k-labels-idx1-ubyte.gz`.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command.
"""
filenames = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz']
urls = ['http://yann.lecun.com/exdb/mnist/' + f for f in filenames]
subparser.set_defaults(urls=urls, filenames=filenames)
return default_downloader
|
[
"def",
"fill_subparser",
"(",
"subparser",
")",
":",
"filenames",
"=",
"[",
"'train-images-idx3-ubyte.gz'",
",",
"'train-labels-idx1-ubyte.gz'",
",",
"'t10k-images-idx3-ubyte.gz'",
",",
"'t10k-labels-idx1-ubyte.gz'",
"]",
"urls",
"=",
"[",
"'http://yann.lecun.com/exdb/mnist/'",
"+",
"f",
"for",
"f",
"in",
"filenames",
"]",
"subparser",
".",
"set_defaults",
"(",
"urls",
"=",
"urls",
",",
"filenames",
"=",
"filenames",
")",
"return",
"default_downloader"
] |
Sets up a subparser to download the MNIST dataset files.
The following MNIST dataset files are downloaded from Yann LeCun's
website [LECUN]:
`train-images-idx3-ubyte.gz`, `train-labels-idx1-ubyte.gz`,
`t10k-images-idx3-ubyte.gz`, `t10k-labels-idx1-ubyte.gz`.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command.
|
[
"Sets",
"up",
"a",
"subparser",
"to",
"download",
"the",
"MNIST",
"dataset",
"files",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/mnist.py#L4-L22
|
11,950
|
mila-iqia/fuel
|
fuel/bin/fuel_info.py
|
main
|
def main(args=None):
"""Entry point for `fuel-info` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's information
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
"""
parser = argparse.ArgumentParser(
description='Extracts metadata from a Fuel-converted HDF5 file.')
parser.add_argument("filename", help="HDF5 file to analyze")
args = parser.parse_args()
with h5py.File(args.filename, 'r') as h5file:
interface_version = h5file.attrs.get('h5py_interface_version', 'N/A')
fuel_convert_version = h5file.attrs.get('fuel_convert_version', 'N/A')
fuel_convert_command = h5file.attrs.get('fuel_convert_command', 'N/A')
message_prefix = message_prefix_template.format(
os.path.basename(args.filename))
message_body = message_body_template.format(
fuel_convert_command, interface_version, fuel_convert_version)
message = ''.join(['\n', message_prefix, '\n', '=' * len(message_prefix),
message_body])
print(message)
|
python
|
def main(args=None):
"""Entry point for `fuel-info` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's information
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
"""
parser = argparse.ArgumentParser(
description='Extracts metadata from a Fuel-converted HDF5 file.')
parser.add_argument("filename", help="HDF5 file to analyze")
args = parser.parse_args()
with h5py.File(args.filename, 'r') as h5file:
interface_version = h5file.attrs.get('h5py_interface_version', 'N/A')
fuel_convert_version = h5file.attrs.get('fuel_convert_version', 'N/A')
fuel_convert_command = h5file.attrs.get('fuel_convert_command', 'N/A')
message_prefix = message_prefix_template.format(
os.path.basename(args.filename))
message_body = message_body_template.format(
fuel_convert_command, interface_version, fuel_convert_version)
message = ''.join(['\n', message_prefix, '\n', '=' * len(message_prefix),
message_body])
print(message)
|
[
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Extracts metadata from a Fuel-converted HDF5 file.'",
")",
"parser",
".",
"add_argument",
"(",
"\"filename\"",
",",
"help",
"=",
"\"HDF5 file to analyze\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"with",
"h5py",
".",
"File",
"(",
"args",
".",
"filename",
",",
"'r'",
")",
"as",
"h5file",
":",
"interface_version",
"=",
"h5file",
".",
"attrs",
".",
"get",
"(",
"'h5py_interface_version'",
",",
"'N/A'",
")",
"fuel_convert_version",
"=",
"h5file",
".",
"attrs",
".",
"get",
"(",
"'fuel_convert_version'",
",",
"'N/A'",
")",
"fuel_convert_command",
"=",
"h5file",
".",
"attrs",
".",
"get",
"(",
"'fuel_convert_command'",
",",
"'N/A'",
")",
"message_prefix",
"=",
"message_prefix_template",
".",
"format",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"args",
".",
"filename",
")",
")",
"message_body",
"=",
"message_body_template",
".",
"format",
"(",
"fuel_convert_command",
",",
"interface_version",
",",
"fuel_convert_version",
")",
"message",
"=",
"''",
".",
"join",
"(",
"[",
"'\\n'",
",",
"message_prefix",
",",
"'\\n'",
",",
"'='",
"*",
"len",
"(",
"message_prefix",
")",
",",
"message_body",
"]",
")",
"print",
"(",
"message",
")"
] |
Entry point for `fuel-info` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's information
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
|
[
"Entry",
"point",
"for",
"fuel",
"-",
"info",
"script",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/bin/fuel_info.py#L22-L51
|
11,951
|
mila-iqia/fuel
|
fuel/converters/caltech101_silhouettes.py
|
convert_silhouettes
|
def convert_silhouettes(size, directory, output_directory,
output_filename=None):
""" Convert the CalTech 101 Silhouettes Datasets.
Parameters
----------
size : {16, 28}
Convert either the 16x16 or 28x28 sized version of the dataset.
directory : str
Directory in which the required input files reside.
output_filename : str
Where to save the converted dataset.
"""
if size not in (16, 28):
raise ValueError('size must be 16 or 28')
if output_filename is None:
output_filename = 'caltech101_silhouettes{}.hdf5'.format(size)
output_file = os.path.join(output_directory, output_filename)
input_file = 'caltech101_silhouettes_{}_split1.mat'.format(size)
input_file = os.path.join(directory, input_file)
if not os.path.isfile(input_file):
raise MissingInputFiles('Required files missing', [input_file])
with h5py.File(output_file, mode="w") as h5file:
mat = loadmat(input_file)
train_features = mat['train_data'].reshape([-1, 1, size, size])
train_targets = mat['train_labels']
valid_features = mat['val_data'].reshape([-1, 1, size, size])
valid_targets = mat['val_labels']
test_features = mat['test_data'].reshape([-1, 1, size, size])
test_targets = mat['test_labels']
data = (
('train', 'features', train_features),
('train', 'targets', train_targets),
('valid', 'features', valid_features),
('valid', 'targets', valid_targets),
('test', 'features', test_features),
('test', 'targets', test_targets),
)
fill_hdf5_file(h5file, data)
for i, label in enumerate(('batch', 'channel', 'height', 'width')):
h5file['features'].dims[i].label = label
for i, label in enumerate(('batch', 'index')):
h5file['targets'].dims[i].label = label
return (output_file,)
|
python
|
def convert_silhouettes(size, directory, output_directory,
output_filename=None):
""" Convert the CalTech 101 Silhouettes Datasets.
Parameters
----------
size : {16, 28}
Convert either the 16x16 or 28x28 sized version of the dataset.
directory : str
Directory in which the required input files reside.
output_filename : str
Where to save the converted dataset.
"""
if size not in (16, 28):
raise ValueError('size must be 16 or 28')
if output_filename is None:
output_filename = 'caltech101_silhouettes{}.hdf5'.format(size)
output_file = os.path.join(output_directory, output_filename)
input_file = 'caltech101_silhouettes_{}_split1.mat'.format(size)
input_file = os.path.join(directory, input_file)
if not os.path.isfile(input_file):
raise MissingInputFiles('Required files missing', [input_file])
with h5py.File(output_file, mode="w") as h5file:
mat = loadmat(input_file)
train_features = mat['train_data'].reshape([-1, 1, size, size])
train_targets = mat['train_labels']
valid_features = mat['val_data'].reshape([-1, 1, size, size])
valid_targets = mat['val_labels']
test_features = mat['test_data'].reshape([-1, 1, size, size])
test_targets = mat['test_labels']
data = (
('train', 'features', train_features),
('train', 'targets', train_targets),
('valid', 'features', valid_features),
('valid', 'targets', valid_targets),
('test', 'features', test_features),
('test', 'targets', test_targets),
)
fill_hdf5_file(h5file, data)
for i, label in enumerate(('batch', 'channel', 'height', 'width')):
h5file['features'].dims[i].label = label
for i, label in enumerate(('batch', 'index')):
h5file['targets'].dims[i].label = label
return (output_file,)
|
[
"def",
"convert_silhouettes",
"(",
"size",
",",
"directory",
",",
"output_directory",
",",
"output_filename",
"=",
"None",
")",
":",
"if",
"size",
"not",
"in",
"(",
"16",
",",
"28",
")",
":",
"raise",
"ValueError",
"(",
"'size must be 16 or 28'",
")",
"if",
"output_filename",
"is",
"None",
":",
"output_filename",
"=",
"'caltech101_silhouettes{}.hdf5'",
".",
"format",
"(",
"size",
")",
"output_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"input_file",
"=",
"'caltech101_silhouettes_{}_split1.mat'",
".",
"format",
"(",
"size",
")",
"input_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"input_file",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"input_file",
")",
":",
"raise",
"MissingInputFiles",
"(",
"'Required files missing'",
",",
"[",
"input_file",
"]",
")",
"with",
"h5py",
".",
"File",
"(",
"output_file",
",",
"mode",
"=",
"\"w\"",
")",
"as",
"h5file",
":",
"mat",
"=",
"loadmat",
"(",
"input_file",
")",
"train_features",
"=",
"mat",
"[",
"'train_data'",
"]",
".",
"reshape",
"(",
"[",
"-",
"1",
",",
"1",
",",
"size",
",",
"size",
"]",
")",
"train_targets",
"=",
"mat",
"[",
"'train_labels'",
"]",
"valid_features",
"=",
"mat",
"[",
"'val_data'",
"]",
".",
"reshape",
"(",
"[",
"-",
"1",
",",
"1",
",",
"size",
",",
"size",
"]",
")",
"valid_targets",
"=",
"mat",
"[",
"'val_labels'",
"]",
"test_features",
"=",
"mat",
"[",
"'test_data'",
"]",
".",
"reshape",
"(",
"[",
"-",
"1",
",",
"1",
",",
"size",
",",
"size",
"]",
")",
"test_targets",
"=",
"mat",
"[",
"'test_labels'",
"]",
"data",
"=",
"(",
"(",
"'train'",
",",
"'features'",
",",
"train_features",
")",
",",
"(",
"'train'",
",",
"'targets'",
",",
"train_targets",
")",
",",
"(",
"'valid'",
",",
"'features'",
",",
"valid_features",
")",
",",
"(",
"'valid'",
",",
"'targets'",
",",
"valid_targets",
")",
",",
"(",
"'test'",
",",
"'features'",
",",
"test_features",
")",
",",
"(",
"'test'",
",",
"'targets'",
",",
"test_targets",
")",
",",
")",
"fill_hdf5_file",
"(",
"h5file",
",",
"data",
")",
"for",
"i",
",",
"label",
"in",
"enumerate",
"(",
"(",
"'batch'",
",",
"'channel'",
",",
"'height'",
",",
"'width'",
")",
")",
":",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"i",
"]",
".",
"label",
"=",
"label",
"for",
"i",
",",
"label",
"in",
"enumerate",
"(",
"(",
"'batch'",
",",
"'index'",
")",
")",
":",
"h5file",
"[",
"'targets'",
"]",
".",
"dims",
"[",
"i",
"]",
".",
"label",
"=",
"label",
"return",
"(",
"output_file",
",",
")"
] |
Convert the CalTech 101 Silhouettes Datasets.
Parameters
----------
size : {16, 28}
Convert either the 16x16 or 28x28 sized version of the dataset.
directory : str
Directory in which the required input files reside.
output_filename : str
Where to save the converted dataset.
|
[
"Convert",
"the",
"CalTech",
"101",
"Silhouettes",
"Datasets",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/caltech101_silhouettes.py#L9-L61
|
11,952
|
mila-iqia/fuel
|
fuel/schemes.py
|
cross_validation
|
def cross_validation(scheme_class, num_examples, num_folds, strict=True,
**kwargs):
"""Return pairs of schemes to be used for cross-validation.
Parameters
----------
scheme_class : subclass of :class:`IndexScheme` or :class:`BatchScheme`
The type of the returned schemes. The constructor is called with an
iterator and `**kwargs` as arguments.
num_examples : int
The number of examples in the datastream.
num_folds : int
The number of folds to return.
strict : bool, optional
If `True`, enforce that `num_examples` is divisible by `num_folds`
and so, that all validation sets have the same size. If `False`,
the size of the validation set is returned along the iteration
schemes. Defaults to `True`.
Yields
------
fold : tuple
The generator returns `num_folds` tuples. The first two elements of
the tuple are the training and validation iteration schemes. If
`strict` is set to `False`, the tuple has a third element
corresponding to the size of the validation set.
"""
if strict and num_examples % num_folds != 0:
raise ValueError(("{} examples are not divisible in {} evenly-sized " +
"folds. To allow this, have a look at the " +
"`strict` argument.").format(num_examples,
num_folds))
for i in xrange(num_folds):
begin = num_examples * i // num_folds
end = num_examples * (i+1) // num_folds
train = scheme_class(list(chain(xrange(0, begin),
xrange(end, num_examples))),
**kwargs)
valid = scheme_class(xrange(begin, end), **kwargs)
if strict:
yield (train, valid)
else:
yield (train, valid, end - begin)
|
python
|
def cross_validation(scheme_class, num_examples, num_folds, strict=True,
**kwargs):
"""Return pairs of schemes to be used for cross-validation.
Parameters
----------
scheme_class : subclass of :class:`IndexScheme` or :class:`BatchScheme`
The type of the returned schemes. The constructor is called with an
iterator and `**kwargs` as arguments.
num_examples : int
The number of examples in the datastream.
num_folds : int
The number of folds to return.
strict : bool, optional
If `True`, enforce that `num_examples` is divisible by `num_folds`
and so, that all validation sets have the same size. If `False`,
the size of the validation set is returned along the iteration
schemes. Defaults to `True`.
Yields
------
fold : tuple
The generator returns `num_folds` tuples. The first two elements of
the tuple are the training and validation iteration schemes. If
`strict` is set to `False`, the tuple has a third element
corresponding to the size of the validation set.
"""
if strict and num_examples % num_folds != 0:
raise ValueError(("{} examples are not divisible in {} evenly-sized " +
"folds. To allow this, have a look at the " +
"`strict` argument.").format(num_examples,
num_folds))
for i in xrange(num_folds):
begin = num_examples * i // num_folds
end = num_examples * (i+1) // num_folds
train = scheme_class(list(chain(xrange(0, begin),
xrange(end, num_examples))),
**kwargs)
valid = scheme_class(xrange(begin, end), **kwargs)
if strict:
yield (train, valid)
else:
yield (train, valid, end - begin)
|
[
"def",
"cross_validation",
"(",
"scheme_class",
",",
"num_examples",
",",
"num_folds",
",",
"strict",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"strict",
"and",
"num_examples",
"%",
"num_folds",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"(",
"\"{} examples are not divisible in {} evenly-sized \"",
"+",
"\"folds. To allow this, have a look at the \"",
"+",
"\"`strict` argument.\"",
")",
".",
"format",
"(",
"num_examples",
",",
"num_folds",
")",
")",
"for",
"i",
"in",
"xrange",
"(",
"num_folds",
")",
":",
"begin",
"=",
"num_examples",
"*",
"i",
"//",
"num_folds",
"end",
"=",
"num_examples",
"*",
"(",
"i",
"+",
"1",
")",
"//",
"num_folds",
"train",
"=",
"scheme_class",
"(",
"list",
"(",
"chain",
"(",
"xrange",
"(",
"0",
",",
"begin",
")",
",",
"xrange",
"(",
"end",
",",
"num_examples",
")",
")",
")",
",",
"*",
"*",
"kwargs",
")",
"valid",
"=",
"scheme_class",
"(",
"xrange",
"(",
"begin",
",",
"end",
")",
",",
"*",
"*",
"kwargs",
")",
"if",
"strict",
":",
"yield",
"(",
"train",
",",
"valid",
")",
"else",
":",
"yield",
"(",
"train",
",",
"valid",
",",
"end",
"-",
"begin",
")"
] |
Return pairs of schemes to be used for cross-validation.
Parameters
----------
scheme_class : subclass of :class:`IndexScheme` or :class:`BatchScheme`
The type of the returned schemes. The constructor is called with an
iterator and `**kwargs` as arguments.
num_examples : int
The number of examples in the datastream.
num_folds : int
The number of folds to return.
strict : bool, optional
If `True`, enforce that `num_examples` is divisible by `num_folds`
and so, that all validation sets have the same size. If `False`,
the size of the validation set is returned along the iteration
schemes. Defaults to `True`.
Yields
------
fold : tuple
The generator returns `num_folds` tuples. The first two elements of
the tuple are the training and validation iteration schemes. If
`strict` is set to `False`, the tuple has a third element
corresponding to the size of the validation set.
|
[
"Return",
"pairs",
"of",
"schemes",
"to",
"be",
"used",
"for",
"cross",
"-",
"validation",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/schemes.py#L260-L305
|
11,953
|
mila-iqia/fuel
|
fuel/bin/fuel_convert.py
|
main
|
def main(args=None):
"""Entry point for `fuel-convert` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's conversion
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
"""
built_in_datasets = dict(converters.all_converters)
if fuel.config.extra_converters:
for name in fuel.config.extra_converters:
extra_datasets = dict(
importlib.import_module(name).all_converters)
if any(key in built_in_datasets for key in extra_datasets.keys()):
raise ValueError('extra converters conflict in name with '
'built-in converters')
built_in_datasets.update(extra_datasets)
parser = argparse.ArgumentParser(
description='Conversion script for built-in datasets.')
subparsers = parser.add_subparsers()
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"-d", "--directory", help="directory in which input files reside",
type=str, default=os.getcwd())
convert_functions = {}
for name, fill_subparser in built_in_datasets.items():
subparser = subparsers.add_parser(
name, parents=[parent_parser],
help='Convert the {} dataset'.format(name))
subparser.add_argument(
"-o", "--output-directory", help="where to save the dataset",
type=str, default=os.getcwd(), action=CheckDirectoryAction)
subparser.add_argument(
"-r", "--output_filename", help="new name of the created dataset",
type=str, default=None)
# Allows the parser to know which subparser was called.
subparser.set_defaults(which_=name)
convert_functions[name] = fill_subparser(subparser)
args = parser.parse_args(args)
args_dict = vars(args)
if args_dict['output_filename'] is not None and\
os.path.splitext(args_dict['output_filename'])[1] not in\
('.hdf5', '.hdf', '.h5'):
args_dict['output_filename'] += '.hdf5'
if args_dict['output_filename'] is None:
args_dict.pop('output_filename')
convert_function = convert_functions[args_dict.pop('which_')]
try:
output_paths = convert_function(**args_dict)
except MissingInputFiles as e:
intro = "The following required files were not found:\n"
message = "\n".join([intro] + [" * " + f for f in e.filenames])
message += "\n\nDid you forget to run fuel-download?"
parser.error(message)
# Tag the newly-created file(s) with H5PYDataset version and command-line
# options
for output_path in output_paths:
h5file = h5py.File(output_path, 'a')
interface_version = H5PYDataset.interface_version.encode('utf-8')
h5file.attrs['h5py_interface_version'] = interface_version
fuel_convert_version = converters.__version__.encode('utf-8')
h5file.attrs['fuel_convert_version'] = fuel_convert_version
command = [os.path.basename(sys.argv[0])] + sys.argv[1:]
h5file.attrs['fuel_convert_command'] = (
' '.join(command).encode('utf-8'))
h5file.flush()
h5file.close()
|
python
|
def main(args=None):
"""Entry point for `fuel-convert` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's conversion
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
"""
built_in_datasets = dict(converters.all_converters)
if fuel.config.extra_converters:
for name in fuel.config.extra_converters:
extra_datasets = dict(
importlib.import_module(name).all_converters)
if any(key in built_in_datasets for key in extra_datasets.keys()):
raise ValueError('extra converters conflict in name with '
'built-in converters')
built_in_datasets.update(extra_datasets)
parser = argparse.ArgumentParser(
description='Conversion script for built-in datasets.')
subparsers = parser.add_subparsers()
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"-d", "--directory", help="directory in which input files reside",
type=str, default=os.getcwd())
convert_functions = {}
for name, fill_subparser in built_in_datasets.items():
subparser = subparsers.add_parser(
name, parents=[parent_parser],
help='Convert the {} dataset'.format(name))
subparser.add_argument(
"-o", "--output-directory", help="where to save the dataset",
type=str, default=os.getcwd(), action=CheckDirectoryAction)
subparser.add_argument(
"-r", "--output_filename", help="new name of the created dataset",
type=str, default=None)
# Allows the parser to know which subparser was called.
subparser.set_defaults(which_=name)
convert_functions[name] = fill_subparser(subparser)
args = parser.parse_args(args)
args_dict = vars(args)
if args_dict['output_filename'] is not None and\
os.path.splitext(args_dict['output_filename'])[1] not in\
('.hdf5', '.hdf', '.h5'):
args_dict['output_filename'] += '.hdf5'
if args_dict['output_filename'] is None:
args_dict.pop('output_filename')
convert_function = convert_functions[args_dict.pop('which_')]
try:
output_paths = convert_function(**args_dict)
except MissingInputFiles as e:
intro = "The following required files were not found:\n"
message = "\n".join([intro] + [" * " + f for f in e.filenames])
message += "\n\nDid you forget to run fuel-download?"
parser.error(message)
# Tag the newly-created file(s) with H5PYDataset version and command-line
# options
for output_path in output_paths:
h5file = h5py.File(output_path, 'a')
interface_version = H5PYDataset.interface_version.encode('utf-8')
h5file.attrs['h5py_interface_version'] = interface_version
fuel_convert_version = converters.__version__.encode('utf-8')
h5file.attrs['fuel_convert_version'] = fuel_convert_version
command = [os.path.basename(sys.argv[0])] + sys.argv[1:]
h5file.attrs['fuel_convert_command'] = (
' '.join(command).encode('utf-8'))
h5file.flush()
h5file.close()
|
[
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"built_in_datasets",
"=",
"dict",
"(",
"converters",
".",
"all_converters",
")",
"if",
"fuel",
".",
"config",
".",
"extra_converters",
":",
"for",
"name",
"in",
"fuel",
".",
"config",
".",
"extra_converters",
":",
"extra_datasets",
"=",
"dict",
"(",
"importlib",
".",
"import_module",
"(",
"name",
")",
".",
"all_converters",
")",
"if",
"any",
"(",
"key",
"in",
"built_in_datasets",
"for",
"key",
"in",
"extra_datasets",
".",
"keys",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"'extra converters conflict in name with '",
"'built-in converters'",
")",
"built_in_datasets",
".",
"update",
"(",
"extra_datasets",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Conversion script for built-in datasets.'",
")",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
")",
"parent_parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"add_help",
"=",
"False",
")",
"parent_parser",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--directory\"",
",",
"help",
"=",
"\"directory in which input files reside\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"os",
".",
"getcwd",
"(",
")",
")",
"convert_functions",
"=",
"{",
"}",
"for",
"name",
",",
"fill_subparser",
"in",
"built_in_datasets",
".",
"items",
"(",
")",
":",
"subparser",
"=",
"subparsers",
".",
"add_parser",
"(",
"name",
",",
"parents",
"=",
"[",
"parent_parser",
"]",
",",
"help",
"=",
"'Convert the {} dataset'",
".",
"format",
"(",
"name",
")",
")",
"subparser",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--output-directory\"",
",",
"help",
"=",
"\"where to save the dataset\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"os",
".",
"getcwd",
"(",
")",
",",
"action",
"=",
"CheckDirectoryAction",
")",
"subparser",
".",
"add_argument",
"(",
"\"-r\"",
",",
"\"--output_filename\"",
",",
"help",
"=",
"\"new name of the created dataset\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
")",
"# Allows the parser to know which subparser was called.",
"subparser",
".",
"set_defaults",
"(",
"which_",
"=",
"name",
")",
"convert_functions",
"[",
"name",
"]",
"=",
"fill_subparser",
"(",
"subparser",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
")",
"args_dict",
"=",
"vars",
"(",
"args",
")",
"if",
"args_dict",
"[",
"'output_filename'",
"]",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"splitext",
"(",
"args_dict",
"[",
"'output_filename'",
"]",
")",
"[",
"1",
"]",
"not",
"in",
"(",
"'.hdf5'",
",",
"'.hdf'",
",",
"'.h5'",
")",
":",
"args_dict",
"[",
"'output_filename'",
"]",
"+=",
"'.hdf5'",
"if",
"args_dict",
"[",
"'output_filename'",
"]",
"is",
"None",
":",
"args_dict",
".",
"pop",
"(",
"'output_filename'",
")",
"convert_function",
"=",
"convert_functions",
"[",
"args_dict",
".",
"pop",
"(",
"'which_'",
")",
"]",
"try",
":",
"output_paths",
"=",
"convert_function",
"(",
"*",
"*",
"args_dict",
")",
"except",
"MissingInputFiles",
"as",
"e",
":",
"intro",
"=",
"\"The following required files were not found:\\n\"",
"message",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"intro",
"]",
"+",
"[",
"\" * \"",
"+",
"f",
"for",
"f",
"in",
"e",
".",
"filenames",
"]",
")",
"message",
"+=",
"\"\\n\\nDid you forget to run fuel-download?\"",
"parser",
".",
"error",
"(",
"message",
")",
"# Tag the newly-created file(s) with H5PYDataset version and command-line",
"# options",
"for",
"output_path",
"in",
"output_paths",
":",
"h5file",
"=",
"h5py",
".",
"File",
"(",
"output_path",
",",
"'a'",
")",
"interface_version",
"=",
"H5PYDataset",
".",
"interface_version",
".",
"encode",
"(",
"'utf-8'",
")",
"h5file",
".",
"attrs",
"[",
"'h5py_interface_version'",
"]",
"=",
"interface_version",
"fuel_convert_version",
"=",
"converters",
".",
"__version__",
".",
"encode",
"(",
"'utf-8'",
")",
"h5file",
".",
"attrs",
"[",
"'fuel_convert_version'",
"]",
"=",
"fuel_convert_version",
"command",
"=",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
"]",
"+",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"h5file",
".",
"attrs",
"[",
"'fuel_convert_command'",
"]",
"=",
"(",
"' '",
".",
"join",
"(",
"command",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"h5file",
".",
"flush",
"(",
")",
"h5file",
".",
"close",
"(",
")"
] |
Entry point for `fuel-convert` script.
This function can also be imported and used from Python.
Parameters
----------
args : iterable, optional (default: None)
A list of arguments that will be passed to Fuel's conversion
utility. If this argument is not specified, `sys.argv[1:]` will
be used.
|
[
"Entry",
"point",
"for",
"fuel",
"-",
"convert",
"script",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/bin/fuel_convert.py#L24-L98
|
11,954
|
mila-iqia/fuel
|
fuel/utils/lock.py
|
refresh_lock
|
def refresh_lock(lock_file):
"""'Refresh' an existing lock.
'Refresh' an existing lock by re-writing the file containing the
owner's unique id, using a new (randomly generated) id, which is also
returned.
"""
unique_id = '%s_%s_%s' % (
os.getpid(),
''.join([str(random.randint(0, 9)) for i in range(10)]), hostname)
try:
lock_write = open(lock_file, 'w')
lock_write.write(unique_id + '\n')
lock_write.close()
except Exception:
# In some strange case, this happen. To prevent all tests
# from failing, we release the lock, but as there is a
# problem, we still keep the original exception.
# This way, only 1 test would fail.
while get_lock.n_lock > 0:
release_lock()
raise
return unique_id
|
python
|
def refresh_lock(lock_file):
"""'Refresh' an existing lock.
'Refresh' an existing lock by re-writing the file containing the
owner's unique id, using a new (randomly generated) id, which is also
returned.
"""
unique_id = '%s_%s_%s' % (
os.getpid(),
''.join([str(random.randint(0, 9)) for i in range(10)]), hostname)
try:
lock_write = open(lock_file, 'w')
lock_write.write(unique_id + '\n')
lock_write.close()
except Exception:
# In some strange case, this happen. To prevent all tests
# from failing, we release the lock, but as there is a
# problem, we still keep the original exception.
# This way, only 1 test would fail.
while get_lock.n_lock > 0:
release_lock()
raise
return unique_id
|
[
"def",
"refresh_lock",
"(",
"lock_file",
")",
":",
"unique_id",
"=",
"'%s_%s_%s'",
"%",
"(",
"os",
".",
"getpid",
"(",
")",
",",
"''",
".",
"join",
"(",
"[",
"str",
"(",
"random",
".",
"randint",
"(",
"0",
",",
"9",
")",
")",
"for",
"i",
"in",
"range",
"(",
"10",
")",
"]",
")",
",",
"hostname",
")",
"try",
":",
"lock_write",
"=",
"open",
"(",
"lock_file",
",",
"'w'",
")",
"lock_write",
".",
"write",
"(",
"unique_id",
"+",
"'\\n'",
")",
"lock_write",
".",
"close",
"(",
")",
"except",
"Exception",
":",
"# In some strange case, this happen. To prevent all tests",
"# from failing, we release the lock, but as there is a",
"# problem, we still keep the original exception.",
"# This way, only 1 test would fail.",
"while",
"get_lock",
".",
"n_lock",
">",
"0",
":",
"release_lock",
"(",
")",
"raise",
"return",
"unique_id"
] |
Refresh' an existing lock.
'Refresh' an existing lock by re-writing the file containing the
owner's unique id, using a new (randomly generated) id, which is also
returned.
|
[
"Refresh",
"an",
"existing",
"lock",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/lock.py#L95-L118
|
11,955
|
mila-iqia/fuel
|
fuel/utils/lock.py
|
get_lock
|
def get_lock(lock_dir, **kw):
"""Obtain lock on compilation directory.
Parameters
----------
lock_dir : str
Lock directory.
kw : dict
Additional arguments to be forwarded to the `lock` function when
acquiring the lock.
Notes
-----
We can lock only on 1 directory at a time.
"""
if not hasattr(get_lock, 'n_lock'):
# Initialization.
get_lock.n_lock = 0
if not hasattr(get_lock, 'lock_is_enabled'):
# Enable lock by default.
get_lock.lock_is_enabled = True
get_lock.lock_dir = lock_dir
get_lock.unlocker = Unlocker(get_lock.lock_dir)
else:
if lock_dir != get_lock.lock_dir:
# Compilation directory has changed.
# First ensure all old locks were released.
assert get_lock.n_lock == 0
# Update members for new compilation directory.
get_lock.lock_dir = lock_dir
get_lock.unlocker = Unlocker(get_lock.lock_dir)
if get_lock.lock_is_enabled:
# Only really try to acquire the lock if we do not have it already.
if get_lock.n_lock == 0:
lock(get_lock.lock_dir, **kw)
atexit.register(Unlocker.unlock, get_lock.unlocker)
# Store time at which the lock was set.
get_lock.start_time = time.time()
else:
# Check whether we need to 'refresh' the lock. We do this
# every 'config.compile.timeout / 2' seconds to ensure
# no one else tries to override our lock after their
# 'config.compile.timeout' timeout period.
if get_lock.start_time is None:
# This should not happen. So if this happen, clean up
# the lock state and raise an error.
while get_lock.n_lock > 0:
release_lock()
raise Exception(
"For some unknow reason, the lock was already taken,"
" but no start time was registered.")
now = time.time()
if now - get_lock.start_time > TIMEOUT:
lockpath = os.path.join(get_lock.lock_dir, 'lock')
logger.info('Refreshing lock %s', str(lockpath))
refresh_lock(lockpath)
get_lock.start_time = now
get_lock.n_lock += 1
|
python
|
def get_lock(lock_dir, **kw):
"""Obtain lock on compilation directory.
Parameters
----------
lock_dir : str
Lock directory.
kw : dict
Additional arguments to be forwarded to the `lock` function when
acquiring the lock.
Notes
-----
We can lock only on 1 directory at a time.
"""
if not hasattr(get_lock, 'n_lock'):
# Initialization.
get_lock.n_lock = 0
if not hasattr(get_lock, 'lock_is_enabled'):
# Enable lock by default.
get_lock.lock_is_enabled = True
get_lock.lock_dir = lock_dir
get_lock.unlocker = Unlocker(get_lock.lock_dir)
else:
if lock_dir != get_lock.lock_dir:
# Compilation directory has changed.
# First ensure all old locks were released.
assert get_lock.n_lock == 0
# Update members for new compilation directory.
get_lock.lock_dir = lock_dir
get_lock.unlocker = Unlocker(get_lock.lock_dir)
if get_lock.lock_is_enabled:
# Only really try to acquire the lock if we do not have it already.
if get_lock.n_lock == 0:
lock(get_lock.lock_dir, **kw)
atexit.register(Unlocker.unlock, get_lock.unlocker)
# Store time at which the lock was set.
get_lock.start_time = time.time()
else:
# Check whether we need to 'refresh' the lock. We do this
# every 'config.compile.timeout / 2' seconds to ensure
# no one else tries to override our lock after their
# 'config.compile.timeout' timeout period.
if get_lock.start_time is None:
# This should not happen. So if this happen, clean up
# the lock state and raise an error.
while get_lock.n_lock > 0:
release_lock()
raise Exception(
"For some unknow reason, the lock was already taken,"
" but no start time was registered.")
now = time.time()
if now - get_lock.start_time > TIMEOUT:
lockpath = os.path.join(get_lock.lock_dir, 'lock')
logger.info('Refreshing lock %s', str(lockpath))
refresh_lock(lockpath)
get_lock.start_time = now
get_lock.n_lock += 1
|
[
"def",
"get_lock",
"(",
"lock_dir",
",",
"*",
"*",
"kw",
")",
":",
"if",
"not",
"hasattr",
"(",
"get_lock",
",",
"'n_lock'",
")",
":",
"# Initialization.",
"get_lock",
".",
"n_lock",
"=",
"0",
"if",
"not",
"hasattr",
"(",
"get_lock",
",",
"'lock_is_enabled'",
")",
":",
"# Enable lock by default.",
"get_lock",
".",
"lock_is_enabled",
"=",
"True",
"get_lock",
".",
"lock_dir",
"=",
"lock_dir",
"get_lock",
".",
"unlocker",
"=",
"Unlocker",
"(",
"get_lock",
".",
"lock_dir",
")",
"else",
":",
"if",
"lock_dir",
"!=",
"get_lock",
".",
"lock_dir",
":",
"# Compilation directory has changed.",
"# First ensure all old locks were released.",
"assert",
"get_lock",
".",
"n_lock",
"==",
"0",
"# Update members for new compilation directory.",
"get_lock",
".",
"lock_dir",
"=",
"lock_dir",
"get_lock",
".",
"unlocker",
"=",
"Unlocker",
"(",
"get_lock",
".",
"lock_dir",
")",
"if",
"get_lock",
".",
"lock_is_enabled",
":",
"# Only really try to acquire the lock if we do not have it already.",
"if",
"get_lock",
".",
"n_lock",
"==",
"0",
":",
"lock",
"(",
"get_lock",
".",
"lock_dir",
",",
"*",
"*",
"kw",
")",
"atexit",
".",
"register",
"(",
"Unlocker",
".",
"unlock",
",",
"get_lock",
".",
"unlocker",
")",
"# Store time at which the lock was set.",
"get_lock",
".",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"else",
":",
"# Check whether we need to 'refresh' the lock. We do this",
"# every 'config.compile.timeout / 2' seconds to ensure",
"# no one else tries to override our lock after their",
"# 'config.compile.timeout' timeout period.",
"if",
"get_lock",
".",
"start_time",
"is",
"None",
":",
"# This should not happen. So if this happen, clean up",
"# the lock state and raise an error.",
"while",
"get_lock",
".",
"n_lock",
">",
"0",
":",
"release_lock",
"(",
")",
"raise",
"Exception",
"(",
"\"For some unknow reason, the lock was already taken,\"",
"\" but no start time was registered.\"",
")",
"now",
"=",
"time",
".",
"time",
"(",
")",
"if",
"now",
"-",
"get_lock",
".",
"start_time",
">",
"TIMEOUT",
":",
"lockpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_lock",
".",
"lock_dir",
",",
"'lock'",
")",
"logger",
".",
"info",
"(",
"'Refreshing lock %s'",
",",
"str",
"(",
"lockpath",
")",
")",
"refresh_lock",
"(",
"lockpath",
")",
"get_lock",
".",
"start_time",
"=",
"now",
"get_lock",
".",
"n_lock",
"+=",
"1"
] |
Obtain lock on compilation directory.
Parameters
----------
lock_dir : str
Lock directory.
kw : dict
Additional arguments to be forwarded to the `lock` function when
acquiring the lock.
Notes
-----
We can lock only on 1 directory at a time.
|
[
"Obtain",
"lock",
"on",
"compilation",
"directory",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/lock.py#L297-L356
|
11,956
|
mila-iqia/fuel
|
fuel/utils/lock.py
|
release_lock
|
def release_lock():
"""Release lock on compilation directory."""
get_lock.n_lock -= 1
assert get_lock.n_lock >= 0
# Only really release lock once all lock requests have ended.
if get_lock.lock_is_enabled and get_lock.n_lock == 0:
get_lock.start_time = None
get_lock.unlocker.unlock()
|
python
|
def release_lock():
"""Release lock on compilation directory."""
get_lock.n_lock -= 1
assert get_lock.n_lock >= 0
# Only really release lock once all lock requests have ended.
if get_lock.lock_is_enabled and get_lock.n_lock == 0:
get_lock.start_time = None
get_lock.unlocker.unlock()
|
[
"def",
"release_lock",
"(",
")",
":",
"get_lock",
".",
"n_lock",
"-=",
"1",
"assert",
"get_lock",
".",
"n_lock",
">=",
"0",
"# Only really release lock once all lock requests have ended.",
"if",
"get_lock",
".",
"lock_is_enabled",
"and",
"get_lock",
".",
"n_lock",
"==",
"0",
":",
"get_lock",
".",
"start_time",
"=",
"None",
"get_lock",
".",
"unlocker",
".",
"unlock",
"(",
")"
] |
Release lock on compilation directory.
|
[
"Release",
"lock",
"on",
"compilation",
"directory",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/lock.py#L359-L366
|
11,957
|
mila-iqia/fuel
|
fuel/utils/lock.py
|
release_readlock
|
def release_readlock(lockdir_name):
"""Release a previously obtained readlock.
Parameters
----------
lockdir_name : str
Name of the previously obtained readlock
"""
# Make sure the lock still exists before deleting it
if os.path.exists(lockdir_name) and os.path.isdir(lockdir_name):
os.rmdir(lockdir_name)
|
python
|
def release_readlock(lockdir_name):
"""Release a previously obtained readlock.
Parameters
----------
lockdir_name : str
Name of the previously obtained readlock
"""
# Make sure the lock still exists before deleting it
if os.path.exists(lockdir_name) and os.path.isdir(lockdir_name):
os.rmdir(lockdir_name)
|
[
"def",
"release_readlock",
"(",
"lockdir_name",
")",
":",
"# Make sure the lock still exists before deleting it",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"lockdir_name",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"lockdir_name",
")",
":",
"os",
".",
"rmdir",
"(",
"lockdir_name",
")"
] |
Release a previously obtained readlock.
Parameters
----------
lockdir_name : str
Name of the previously obtained readlock
|
[
"Release",
"a",
"previously",
"obtained",
"readlock",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/lock.py#L392-L403
|
11,958
|
mila-iqia/fuel
|
fuel/utils/lock.py
|
get_readlock
|
def get_readlock(pid, path):
"""Obtain a readlock on a file.
Parameters
----------
path : str
Name of the file on which to obtain a readlock
"""
timestamp = int(time.time() * 1e6)
lockdir_name = "%s.readlock.%i.%i" % (path, pid, timestamp)
os.mkdir(lockdir_name)
# Register function to release the readlock at the end of the script
atexit.register(release_readlock, lockdir_name=lockdir_name)
|
python
|
def get_readlock(pid, path):
"""Obtain a readlock on a file.
Parameters
----------
path : str
Name of the file on which to obtain a readlock
"""
timestamp = int(time.time() * 1e6)
lockdir_name = "%s.readlock.%i.%i" % (path, pid, timestamp)
os.mkdir(lockdir_name)
# Register function to release the readlock at the end of the script
atexit.register(release_readlock, lockdir_name=lockdir_name)
|
[
"def",
"get_readlock",
"(",
"pid",
",",
"path",
")",
":",
"timestamp",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1e6",
")",
"lockdir_name",
"=",
"\"%s.readlock.%i.%i\"",
"%",
"(",
"path",
",",
"pid",
",",
"timestamp",
")",
"os",
".",
"mkdir",
"(",
"lockdir_name",
")",
"# Register function to release the readlock at the end of the script",
"atexit",
".",
"register",
"(",
"release_readlock",
",",
"lockdir_name",
"=",
"lockdir_name",
")"
] |
Obtain a readlock on a file.
Parameters
----------
path : str
Name of the file on which to obtain a readlock
|
[
"Obtain",
"a",
"readlock",
"on",
"a",
"file",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/lock.py#L406-L420
|
11,959
|
mila-iqia/fuel
|
fuel/utils/lock.py
|
Unlocker.unlock
|
def unlock(self):
"""Remove current lock.
This function does not crash if it is unable to properly
delete the lock file and directory. The reason is that it
should be allowed for multiple jobs running in parallel to
unlock the same directory at the same time (e.g. when reaching
their timeout limit).
"""
# If any error occurs, we assume this is because someone else tried to
# unlock this directory at the same time.
# Note that it is important not to have both remove statements within
# the same try/except block. The reason is that while the attempt to
# remove the file may fail (e.g. because for some reason this file does
# not exist), we still want to try and remove the directory.
try:
self.os.remove(self.os.path.join(self.tmp_dir, 'lock'))
except Exception:
pass
try:
self.os.rmdir(self.tmp_dir)
except Exception:
pass
|
python
|
def unlock(self):
"""Remove current lock.
This function does not crash if it is unable to properly
delete the lock file and directory. The reason is that it
should be allowed for multiple jobs running in parallel to
unlock the same directory at the same time (e.g. when reaching
their timeout limit).
"""
# If any error occurs, we assume this is because someone else tried to
# unlock this directory at the same time.
# Note that it is important not to have both remove statements within
# the same try/except block. The reason is that while the attempt to
# remove the file may fail (e.g. because for some reason this file does
# not exist), we still want to try and remove the directory.
try:
self.os.remove(self.os.path.join(self.tmp_dir, 'lock'))
except Exception:
pass
try:
self.os.rmdir(self.tmp_dir)
except Exception:
pass
|
[
"def",
"unlock",
"(",
"self",
")",
":",
"# If any error occurs, we assume this is because someone else tried to",
"# unlock this directory at the same time.",
"# Note that it is important not to have both remove statements within",
"# the same try/except block. The reason is that while the attempt to",
"# remove the file may fail (e.g. because for some reason this file does",
"# not exist), we still want to try and remove the directory.",
"try",
":",
"self",
".",
"os",
".",
"remove",
"(",
"self",
".",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"tmp_dir",
",",
"'lock'",
")",
")",
"except",
"Exception",
":",
"pass",
"try",
":",
"self",
".",
"os",
".",
"rmdir",
"(",
"self",
".",
"tmp_dir",
")",
"except",
"Exception",
":",
"pass"
] |
Remove current lock.
This function does not crash if it is unable to properly
delete the lock file and directory. The reason is that it
should be allowed for multiple jobs running in parallel to
unlock the same directory at the same time (e.g. when reaching
their timeout limit).
|
[
"Remove",
"current",
"lock",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/lock.py#L69-L92
|
11,960
|
mila-iqia/fuel
|
fuel/downloaders/base.py
|
filename_from_url
|
def filename_from_url(url, path=None):
"""Parses a URL to determine a file name.
Parameters
----------
url : str
URL to parse.
"""
r = requests.get(url, stream=True)
if 'Content-Disposition' in r.headers:
filename = re.findall(r'filename=([^;]+)',
r.headers['Content-Disposition'])[0].strip('"\"')
else:
filename = os.path.basename(urllib.parse.urlparse(url).path)
return filename
|
python
|
def filename_from_url(url, path=None):
"""Parses a URL to determine a file name.
Parameters
----------
url : str
URL to parse.
"""
r = requests.get(url, stream=True)
if 'Content-Disposition' in r.headers:
filename = re.findall(r'filename=([^;]+)',
r.headers['Content-Disposition'])[0].strip('"\"')
else:
filename = os.path.basename(urllib.parse.urlparse(url).path)
return filename
|
[
"def",
"filename_from_url",
"(",
"url",
",",
"path",
"=",
"None",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"if",
"'Content-Disposition'",
"in",
"r",
".",
"headers",
":",
"filename",
"=",
"re",
".",
"findall",
"(",
"r'filename=([^;]+)'",
",",
"r",
".",
"headers",
"[",
"'Content-Disposition'",
"]",
")",
"[",
"0",
"]",
".",
"strip",
"(",
"'\"\\\"'",
")",
"else",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"url",
")",
".",
"path",
")",
"return",
"filename"
] |
Parses a URL to determine a file name.
Parameters
----------
url : str
URL to parse.
|
[
"Parses",
"a",
"URL",
"to",
"determine",
"a",
"file",
"name",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/base.py#L39-L54
|
11,961
|
mila-iqia/fuel
|
fuel/downloaders/base.py
|
download
|
def download(url, file_handle, chunk_size=1024):
"""Downloads a given URL to a specific file.
Parameters
----------
url : str
URL to download.
file_handle : file
Where to save the downloaded URL.
"""
r = requests.get(url, stream=True)
total_length = r.headers.get('content-length')
if total_length is None:
maxval = UnknownLength
else:
maxval = int(total_length)
name = file_handle.name
with progress_bar(name=name, maxval=maxval) as bar:
for i, chunk in enumerate(r.iter_content(chunk_size)):
if total_length:
bar.update(i * chunk_size)
file_handle.write(chunk)
|
python
|
def download(url, file_handle, chunk_size=1024):
"""Downloads a given URL to a specific file.
Parameters
----------
url : str
URL to download.
file_handle : file
Where to save the downloaded URL.
"""
r = requests.get(url, stream=True)
total_length = r.headers.get('content-length')
if total_length is None:
maxval = UnknownLength
else:
maxval = int(total_length)
name = file_handle.name
with progress_bar(name=name, maxval=maxval) as bar:
for i, chunk in enumerate(r.iter_content(chunk_size)):
if total_length:
bar.update(i * chunk_size)
file_handle.write(chunk)
|
[
"def",
"download",
"(",
"url",
",",
"file_handle",
",",
"chunk_size",
"=",
"1024",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"total_length",
"=",
"r",
".",
"headers",
".",
"get",
"(",
"'content-length'",
")",
"if",
"total_length",
"is",
"None",
":",
"maxval",
"=",
"UnknownLength",
"else",
":",
"maxval",
"=",
"int",
"(",
"total_length",
")",
"name",
"=",
"file_handle",
".",
"name",
"with",
"progress_bar",
"(",
"name",
"=",
"name",
",",
"maxval",
"=",
"maxval",
")",
"as",
"bar",
":",
"for",
"i",
",",
"chunk",
"in",
"enumerate",
"(",
"r",
".",
"iter_content",
"(",
"chunk_size",
")",
")",
":",
"if",
"total_length",
":",
"bar",
".",
"update",
"(",
"i",
"*",
"chunk_size",
")",
"file_handle",
".",
"write",
"(",
"chunk",
")"
] |
Downloads a given URL to a specific file.
Parameters
----------
url : str
URL to download.
file_handle : file
Where to save the downloaded URL.
|
[
"Downloads",
"a",
"given",
"URL",
"to",
"a",
"specific",
"file",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/base.py#L57-L79
|
11,962
|
mila-iqia/fuel
|
fuel/downloaders/base.py
|
default_downloader
|
def default_downloader(directory, urls, filenames, url_prefix=None,
clear=False):
"""Downloads or clears files from URLs and filenames.
Parameters
----------
directory : str
The directory in which downloaded files are saved.
urls : list
A list of URLs to download.
filenames : list
A list of file names for the corresponding URLs.
url_prefix : str, optional
If provided, this is prepended to filenames that
lack a corresponding URL.
clear : bool, optional
If `True`, delete the given filenames from the given
directory rather than download them.
"""
# Parse file names from URL if not provided
for i, url in enumerate(urls):
filename = filenames[i]
if not filename:
filename = filename_from_url(url)
if not filename:
raise ValueError("no filename available for URL '{}'".format(url))
filenames[i] = filename
files = [os.path.join(directory, f) for f in filenames]
if clear:
for f in files:
if os.path.isfile(f):
os.remove(f)
else:
print('Downloading ' + ', '.join(filenames) + '\n')
ensure_directory_exists(directory)
for url, f, n in zip(urls, files, filenames):
if not url:
if url_prefix is None:
raise NeedURLPrefix
url = url_prefix + n
with open(f, 'wb') as file_handle:
download(url, file_handle)
|
python
|
def default_downloader(directory, urls, filenames, url_prefix=None,
clear=False):
"""Downloads or clears files from URLs and filenames.
Parameters
----------
directory : str
The directory in which downloaded files are saved.
urls : list
A list of URLs to download.
filenames : list
A list of file names for the corresponding URLs.
url_prefix : str, optional
If provided, this is prepended to filenames that
lack a corresponding URL.
clear : bool, optional
If `True`, delete the given filenames from the given
directory rather than download them.
"""
# Parse file names from URL if not provided
for i, url in enumerate(urls):
filename = filenames[i]
if not filename:
filename = filename_from_url(url)
if not filename:
raise ValueError("no filename available for URL '{}'".format(url))
filenames[i] = filename
files = [os.path.join(directory, f) for f in filenames]
if clear:
for f in files:
if os.path.isfile(f):
os.remove(f)
else:
print('Downloading ' + ', '.join(filenames) + '\n')
ensure_directory_exists(directory)
for url, f, n in zip(urls, files, filenames):
if not url:
if url_prefix is None:
raise NeedURLPrefix
url = url_prefix + n
with open(f, 'wb') as file_handle:
download(url, file_handle)
|
[
"def",
"default_downloader",
"(",
"directory",
",",
"urls",
",",
"filenames",
",",
"url_prefix",
"=",
"None",
",",
"clear",
"=",
"False",
")",
":",
"# Parse file names from URL if not provided",
"for",
"i",
",",
"url",
"in",
"enumerate",
"(",
"urls",
")",
":",
"filename",
"=",
"filenames",
"[",
"i",
"]",
"if",
"not",
"filename",
":",
"filename",
"=",
"filename_from_url",
"(",
"url",
")",
"if",
"not",
"filename",
":",
"raise",
"ValueError",
"(",
"\"no filename available for URL '{}'\"",
".",
"format",
"(",
"url",
")",
")",
"filenames",
"[",
"i",
"]",
"=",
"filename",
"files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"f",
")",
"for",
"f",
"in",
"filenames",
"]",
"if",
"clear",
":",
"for",
"f",
"in",
"files",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
":",
"os",
".",
"remove",
"(",
"f",
")",
"else",
":",
"print",
"(",
"'Downloading '",
"+",
"', '",
".",
"join",
"(",
"filenames",
")",
"+",
"'\\n'",
")",
"ensure_directory_exists",
"(",
"directory",
")",
"for",
"url",
",",
"f",
",",
"n",
"in",
"zip",
"(",
"urls",
",",
"files",
",",
"filenames",
")",
":",
"if",
"not",
"url",
":",
"if",
"url_prefix",
"is",
"None",
":",
"raise",
"NeedURLPrefix",
"url",
"=",
"url_prefix",
"+",
"n",
"with",
"open",
"(",
"f",
",",
"'wb'",
")",
"as",
"file_handle",
":",
"download",
"(",
"url",
",",
"file_handle",
")"
] |
Downloads or clears files from URLs and filenames.
Parameters
----------
directory : str
The directory in which downloaded files are saved.
urls : list
A list of URLs to download.
filenames : list
A list of file names for the corresponding URLs.
url_prefix : str, optional
If provided, this is prepended to filenames that
lack a corresponding URL.
clear : bool, optional
If `True`, delete the given filenames from the given
directory rather than download them.
|
[
"Downloads",
"or",
"clears",
"files",
"from",
"URLs",
"and",
"filenames",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/base.py#L96-L140
|
11,963
|
mila-iqia/fuel
|
fuel/utils/__init__.py
|
find_in_data_path
|
def find_in_data_path(filename):
"""Searches for a file within Fuel's data path.
This function loops over all paths defined in Fuel's data path and
returns the first path in which the file is found.
Parameters
----------
filename : str
Name of the file to find.
Returns
-------
file_path : str
Path to the first file matching `filename` found in Fuel's
data path.
Raises
------
IOError
If the file doesn't appear in Fuel's data path.
"""
for path in config.data_path:
path = os.path.expanduser(os.path.expandvars(path))
file_path = os.path.join(path, filename)
if os.path.isfile(file_path):
return file_path
raise IOError("{} not found in Fuel's data path".format(filename))
|
python
|
def find_in_data_path(filename):
"""Searches for a file within Fuel's data path.
This function loops over all paths defined in Fuel's data path and
returns the first path in which the file is found.
Parameters
----------
filename : str
Name of the file to find.
Returns
-------
file_path : str
Path to the first file matching `filename` found in Fuel's
data path.
Raises
------
IOError
If the file doesn't appear in Fuel's data path.
"""
for path in config.data_path:
path = os.path.expanduser(os.path.expandvars(path))
file_path = os.path.join(path, filename)
if os.path.isfile(file_path):
return file_path
raise IOError("{} not found in Fuel's data path".format(filename))
|
[
"def",
"find_in_data_path",
"(",
"filename",
")",
":",
"for",
"path",
"in",
"config",
".",
"data_path",
":",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"path",
")",
")",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"filename",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"return",
"file_path",
"raise",
"IOError",
"(",
"\"{} not found in Fuel's data path\"",
".",
"format",
"(",
"filename",
")",
")"
] |
Searches for a file within Fuel's data path.
This function loops over all paths defined in Fuel's data path and
returns the first path in which the file is found.
Parameters
----------
filename : str
Name of the file to find.
Returns
-------
file_path : str
Path to the first file matching `filename` found in Fuel's
data path.
Raises
------
IOError
If the file doesn't appear in Fuel's data path.
|
[
"Searches",
"for",
"a",
"file",
"within",
"Fuel",
"s",
"data",
"path",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L406-L434
|
11,964
|
mila-iqia/fuel
|
fuel/utils/__init__.py
|
lazy_property_factory
|
def lazy_property_factory(lazy_property):
"""Create properties that perform lazy loading of attributes."""
def lazy_property_getter(self):
if not hasattr(self, '_' + lazy_property):
self.load()
if not hasattr(self, '_' + lazy_property):
raise ValueError("{} wasn't loaded".format(lazy_property))
return getattr(self, '_' + lazy_property)
def lazy_property_setter(self, value):
setattr(self, '_' + lazy_property, value)
return lazy_property_getter, lazy_property_setter
|
python
|
def lazy_property_factory(lazy_property):
"""Create properties that perform lazy loading of attributes."""
def lazy_property_getter(self):
if not hasattr(self, '_' + lazy_property):
self.load()
if not hasattr(self, '_' + lazy_property):
raise ValueError("{} wasn't loaded".format(lazy_property))
return getattr(self, '_' + lazy_property)
def lazy_property_setter(self, value):
setattr(self, '_' + lazy_property, value)
return lazy_property_getter, lazy_property_setter
|
[
"def",
"lazy_property_factory",
"(",
"lazy_property",
")",
":",
"def",
"lazy_property_getter",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_'",
"+",
"lazy_property",
")",
":",
"self",
".",
"load",
"(",
")",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_'",
"+",
"lazy_property",
")",
":",
"raise",
"ValueError",
"(",
"\"{} wasn't loaded\"",
".",
"format",
"(",
"lazy_property",
")",
")",
"return",
"getattr",
"(",
"self",
",",
"'_'",
"+",
"lazy_property",
")",
"def",
"lazy_property_setter",
"(",
"self",
",",
"value",
")",
":",
"setattr",
"(",
"self",
",",
"'_'",
"+",
"lazy_property",
",",
"value",
")",
"return",
"lazy_property_getter",
",",
"lazy_property_setter"
] |
Create properties that perform lazy loading of attributes.
|
[
"Create",
"properties",
"that",
"perform",
"lazy",
"loading",
"of",
"attributes",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L437-L449
|
11,965
|
mila-iqia/fuel
|
fuel/utils/__init__.py
|
do_not_pickle_attributes
|
def do_not_pickle_attributes(*lazy_properties):
r"""Decorator to assign non-pickable properties.
Used to assign properties which will not be pickled on some class.
This decorator creates a series of properties whose values won't be
serialized; instead, their values will be reloaded (e.g. from disk) by
the :meth:`load` function after deserializing the object.
The decorator can be used to avoid the serialization of bulky
attributes. Another possible use is for attributes which cannot be
pickled at all. In this case the user should construct the attribute
himself in :meth:`load`.
Parameters
----------
\*lazy_properties : strings
The names of the attributes that are lazy.
Notes
-----
The pickling behavior of the dataset is only overridden if the
dataset does not have a ``__getstate__`` method implemented.
Examples
--------
In order to make sure that attributes are not serialized with the
dataset, and are lazily reloaded after deserialization by the
:meth:`load` in the wrapped class. Use the decorator with the names of
the attributes as an argument.
>>> from fuel.datasets import Dataset
>>> @do_not_pickle_attributes('features', 'targets')
... class TestDataset(Dataset):
... def load(self):
... self.features = range(10 ** 6)
... self.targets = range(10 ** 6)[::-1]
"""
def wrap_class(cls):
if not hasattr(cls, 'load'):
raise ValueError("no load method implemented")
# Attach the lazy loading properties to the class
for lazy_property in lazy_properties:
setattr(cls, lazy_property,
property(*lazy_property_factory(lazy_property)))
# Delete the values of lazy properties when serializing
if not hasattr(cls, '__getstate__'):
def __getstate__(self):
serializable_state = self.__dict__.copy()
for lazy_property in lazy_properties:
attr = serializable_state.get('_' + lazy_property)
# Iterators would lose their state
if isinstance(attr, collections.Iterator):
raise ValueError("Iterators can't be lazy loaded")
serializable_state.pop('_' + lazy_property, None)
return serializable_state
setattr(cls, '__getstate__', __getstate__)
return cls
return wrap_class
|
python
|
def do_not_pickle_attributes(*lazy_properties):
r"""Decorator to assign non-pickable properties.
Used to assign properties which will not be pickled on some class.
This decorator creates a series of properties whose values won't be
serialized; instead, their values will be reloaded (e.g. from disk) by
the :meth:`load` function after deserializing the object.
The decorator can be used to avoid the serialization of bulky
attributes. Another possible use is for attributes which cannot be
pickled at all. In this case the user should construct the attribute
himself in :meth:`load`.
Parameters
----------
\*lazy_properties : strings
The names of the attributes that are lazy.
Notes
-----
The pickling behavior of the dataset is only overridden if the
dataset does not have a ``__getstate__`` method implemented.
Examples
--------
In order to make sure that attributes are not serialized with the
dataset, and are lazily reloaded after deserialization by the
:meth:`load` in the wrapped class. Use the decorator with the names of
the attributes as an argument.
>>> from fuel.datasets import Dataset
>>> @do_not_pickle_attributes('features', 'targets')
... class TestDataset(Dataset):
... def load(self):
... self.features = range(10 ** 6)
... self.targets = range(10 ** 6)[::-1]
"""
def wrap_class(cls):
if not hasattr(cls, 'load'):
raise ValueError("no load method implemented")
# Attach the lazy loading properties to the class
for lazy_property in lazy_properties:
setattr(cls, lazy_property,
property(*lazy_property_factory(lazy_property)))
# Delete the values of lazy properties when serializing
if not hasattr(cls, '__getstate__'):
def __getstate__(self):
serializable_state = self.__dict__.copy()
for lazy_property in lazy_properties:
attr = serializable_state.get('_' + lazy_property)
# Iterators would lose their state
if isinstance(attr, collections.Iterator):
raise ValueError("Iterators can't be lazy loaded")
serializable_state.pop('_' + lazy_property, None)
return serializable_state
setattr(cls, '__getstate__', __getstate__)
return cls
return wrap_class
|
[
"def",
"do_not_pickle_attributes",
"(",
"*",
"lazy_properties",
")",
":",
"def",
"wrap_class",
"(",
"cls",
")",
":",
"if",
"not",
"hasattr",
"(",
"cls",
",",
"'load'",
")",
":",
"raise",
"ValueError",
"(",
"\"no load method implemented\"",
")",
"# Attach the lazy loading properties to the class",
"for",
"lazy_property",
"in",
"lazy_properties",
":",
"setattr",
"(",
"cls",
",",
"lazy_property",
",",
"property",
"(",
"*",
"lazy_property_factory",
"(",
"lazy_property",
")",
")",
")",
"# Delete the values of lazy properties when serializing",
"if",
"not",
"hasattr",
"(",
"cls",
",",
"'__getstate__'",
")",
":",
"def",
"__getstate__",
"(",
"self",
")",
":",
"serializable_state",
"=",
"self",
".",
"__dict__",
".",
"copy",
"(",
")",
"for",
"lazy_property",
"in",
"lazy_properties",
":",
"attr",
"=",
"serializable_state",
".",
"get",
"(",
"'_'",
"+",
"lazy_property",
")",
"# Iterators would lose their state",
"if",
"isinstance",
"(",
"attr",
",",
"collections",
".",
"Iterator",
")",
":",
"raise",
"ValueError",
"(",
"\"Iterators can't be lazy loaded\"",
")",
"serializable_state",
".",
"pop",
"(",
"'_'",
"+",
"lazy_property",
",",
"None",
")",
"return",
"serializable_state",
"setattr",
"(",
"cls",
",",
"'__getstate__'",
",",
"__getstate__",
")",
"return",
"cls",
"return",
"wrap_class"
] |
r"""Decorator to assign non-pickable properties.
Used to assign properties which will not be pickled on some class.
This decorator creates a series of properties whose values won't be
serialized; instead, their values will be reloaded (e.g. from disk) by
the :meth:`load` function after deserializing the object.
The decorator can be used to avoid the serialization of bulky
attributes. Another possible use is for attributes which cannot be
pickled at all. In this case the user should construct the attribute
himself in :meth:`load`.
Parameters
----------
\*lazy_properties : strings
The names of the attributes that are lazy.
Notes
-----
The pickling behavior of the dataset is only overridden if the
dataset does not have a ``__getstate__`` method implemented.
Examples
--------
In order to make sure that attributes are not serialized with the
dataset, and are lazily reloaded after deserialization by the
:meth:`load` in the wrapped class. Use the decorator with the names of
the attributes as an argument.
>>> from fuel.datasets import Dataset
>>> @do_not_pickle_attributes('features', 'targets')
... class TestDataset(Dataset):
... def load(self):
... self.features = range(10 ** 6)
... self.targets = range(10 ** 6)[::-1]
|
[
"r",
"Decorator",
"to",
"assign",
"non",
"-",
"pickable",
"properties",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L452-L513
|
11,966
|
mila-iqia/fuel
|
fuel/utils/__init__.py
|
Subset.sorted_fancy_indexing
|
def sorted_fancy_indexing(indexable, request):
"""Safe fancy indexing.
Some objects, such as h5py datasets, only support list indexing
if the list is sorted.
This static method adds support for unsorted list indexing by
sorting the requested indices, accessing the corresponding
elements and re-shuffling the result.
Parameters
----------
request : list of int
Unsorted list of example indices.
indexable : any fancy-indexable object
Indexable we'd like to do unsorted fancy indexing on.
"""
if len(request) > 1:
indices = numpy.argsort(request)
data = numpy.empty(shape=(len(request),) + indexable.shape[1:],
dtype=indexable.dtype)
data[indices] = indexable[numpy.array(request)[indices], ...]
else:
data = indexable[request]
return data
|
python
|
def sorted_fancy_indexing(indexable, request):
"""Safe fancy indexing.
Some objects, such as h5py datasets, only support list indexing
if the list is sorted.
This static method adds support for unsorted list indexing by
sorting the requested indices, accessing the corresponding
elements and re-shuffling the result.
Parameters
----------
request : list of int
Unsorted list of example indices.
indexable : any fancy-indexable object
Indexable we'd like to do unsorted fancy indexing on.
"""
if len(request) > 1:
indices = numpy.argsort(request)
data = numpy.empty(shape=(len(request),) + indexable.shape[1:],
dtype=indexable.dtype)
data[indices] = indexable[numpy.array(request)[indices], ...]
else:
data = indexable[request]
return data
|
[
"def",
"sorted_fancy_indexing",
"(",
"indexable",
",",
"request",
")",
":",
"if",
"len",
"(",
"request",
")",
">",
"1",
":",
"indices",
"=",
"numpy",
".",
"argsort",
"(",
"request",
")",
"data",
"=",
"numpy",
".",
"empty",
"(",
"shape",
"=",
"(",
"len",
"(",
"request",
")",
",",
")",
"+",
"indexable",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"indexable",
".",
"dtype",
")",
"data",
"[",
"indices",
"]",
"=",
"indexable",
"[",
"numpy",
".",
"array",
"(",
"request",
")",
"[",
"indices",
"]",
",",
"...",
"]",
"else",
":",
"data",
"=",
"indexable",
"[",
"request",
"]",
"return",
"data"
] |
Safe fancy indexing.
Some objects, such as h5py datasets, only support list indexing
if the list is sorted.
This static method adds support for unsorted list indexing by
sorting the requested indices, accessing the corresponding
elements and re-shuffling the result.
Parameters
----------
request : list of int
Unsorted list of example indices.
indexable : any fancy-indexable object
Indexable we'd like to do unsorted fancy indexing on.
|
[
"Safe",
"fancy",
"indexing",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L175-L200
|
11,967
|
mila-iqia/fuel
|
fuel/utils/__init__.py
|
Subset.slice_to_numerical_args
|
def slice_to_numerical_args(slice_, num_examples):
"""Translate a slice's attributes into numerical attributes.
Parameters
----------
slice_ : :class:`slice`
Slice for which numerical attributes are wanted.
num_examples : int
Number of examples in the indexable that is to be sliced
through. This determines the numerical value for the `stop`
attribute in case it's `None`.
"""
start = slice_.start if slice_.start is not None else 0
stop = slice_.stop if slice_.stop is not None else num_examples
step = slice_.step if slice_.step is not None else 1
return start, stop, step
|
python
|
def slice_to_numerical_args(slice_, num_examples):
"""Translate a slice's attributes into numerical attributes.
Parameters
----------
slice_ : :class:`slice`
Slice for which numerical attributes are wanted.
num_examples : int
Number of examples in the indexable that is to be sliced
through. This determines the numerical value for the `stop`
attribute in case it's `None`.
"""
start = slice_.start if slice_.start is not None else 0
stop = slice_.stop if slice_.stop is not None else num_examples
step = slice_.step if slice_.step is not None else 1
return start, stop, step
|
[
"def",
"slice_to_numerical_args",
"(",
"slice_",
",",
"num_examples",
")",
":",
"start",
"=",
"slice_",
".",
"start",
"if",
"slice_",
".",
"start",
"is",
"not",
"None",
"else",
"0",
"stop",
"=",
"slice_",
".",
"stop",
"if",
"slice_",
".",
"stop",
"is",
"not",
"None",
"else",
"num_examples",
"step",
"=",
"slice_",
".",
"step",
"if",
"slice_",
".",
"step",
"is",
"not",
"None",
"else",
"1",
"return",
"start",
",",
"stop",
",",
"step"
] |
Translate a slice's attributes into numerical attributes.
Parameters
----------
slice_ : :class:`slice`
Slice for which numerical attributes are wanted.
num_examples : int
Number of examples in the indexable that is to be sliced
through. This determines the numerical value for the `stop`
attribute in case it's `None`.
|
[
"Translate",
"a",
"slice",
"s",
"attributes",
"into",
"numerical",
"attributes",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L203-L219
|
11,968
|
mila-iqia/fuel
|
fuel/utils/__init__.py
|
Subset.get_list_representation
|
def get_list_representation(self):
"""Returns this subset's representation as a list of indices."""
if self.is_list:
return self.list_or_slice
else:
return self[list(range(self.num_examples))]
|
python
|
def get_list_representation(self):
"""Returns this subset's representation as a list of indices."""
if self.is_list:
return self.list_or_slice
else:
return self[list(range(self.num_examples))]
|
[
"def",
"get_list_representation",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_list",
":",
"return",
"self",
".",
"list_or_slice",
"else",
":",
"return",
"self",
"[",
"list",
"(",
"range",
"(",
"self",
".",
"num_examples",
")",
")",
"]"
] |
Returns this subset's representation as a list of indices.
|
[
"Returns",
"this",
"subset",
"s",
"representation",
"as",
"a",
"list",
"of",
"indices",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L221-L226
|
11,969
|
mila-iqia/fuel
|
fuel/utils/__init__.py
|
Subset.index_within_subset
|
def index_within_subset(self, indexable, subset_request,
sort_indices=False):
"""Index an indexable object within the context of this subset.
Parameters
----------
indexable : indexable object
The object to index through.
subset_request : :class:`list` or :class:`slice`
List of positive integer indices or slice that constitutes
the request *within the context of this subset*. This
request will be translated to a request on the indexable
object.
sort_indices : bool, optional
If the request is a list of indices, indexes in sorted order
and reshuffles the result in the original order. Defaults to
`False`.
"""
# Translate the request within the context of this subset to a
# request to the indexable object
if isinstance(subset_request, numbers.Integral):
request, = self[[subset_request]]
else:
request = self[subset_request]
# Integer or slice requests can be processed directly.
if isinstance(request, numbers.Integral) or hasattr(request, 'step'):
return indexable[request]
# If requested, we do fancy indexing in sorted order and reshuffle the
# result back in the original order.
if sort_indices:
return self.sorted_fancy_indexing(indexable, request)
# If the indexable supports fancy indexing (numpy array, HDF5 dataset),
# the request can be processed directly.
if isinstance(indexable, (numpy.ndarray, h5py.Dataset)):
return indexable[request]
# Anything else (e.g. lists) isn't considered to support fancy
# indexing, so Subset does it manually.
return iterable_fancy_indexing(indexable, request)
|
python
|
def index_within_subset(self, indexable, subset_request,
sort_indices=False):
"""Index an indexable object within the context of this subset.
Parameters
----------
indexable : indexable object
The object to index through.
subset_request : :class:`list` or :class:`slice`
List of positive integer indices or slice that constitutes
the request *within the context of this subset*. This
request will be translated to a request on the indexable
object.
sort_indices : bool, optional
If the request is a list of indices, indexes in sorted order
and reshuffles the result in the original order. Defaults to
`False`.
"""
# Translate the request within the context of this subset to a
# request to the indexable object
if isinstance(subset_request, numbers.Integral):
request, = self[[subset_request]]
else:
request = self[subset_request]
# Integer or slice requests can be processed directly.
if isinstance(request, numbers.Integral) or hasattr(request, 'step'):
return indexable[request]
# If requested, we do fancy indexing in sorted order and reshuffle the
# result back in the original order.
if sort_indices:
return self.sorted_fancy_indexing(indexable, request)
# If the indexable supports fancy indexing (numpy array, HDF5 dataset),
# the request can be processed directly.
if isinstance(indexable, (numpy.ndarray, h5py.Dataset)):
return indexable[request]
# Anything else (e.g. lists) isn't considered to support fancy
# indexing, so Subset does it manually.
return iterable_fancy_indexing(indexable, request)
|
[
"def",
"index_within_subset",
"(",
"self",
",",
"indexable",
",",
"subset_request",
",",
"sort_indices",
"=",
"False",
")",
":",
"# Translate the request within the context of this subset to a",
"# request to the indexable object",
"if",
"isinstance",
"(",
"subset_request",
",",
"numbers",
".",
"Integral",
")",
":",
"request",
",",
"=",
"self",
"[",
"[",
"subset_request",
"]",
"]",
"else",
":",
"request",
"=",
"self",
"[",
"subset_request",
"]",
"# Integer or slice requests can be processed directly.",
"if",
"isinstance",
"(",
"request",
",",
"numbers",
".",
"Integral",
")",
"or",
"hasattr",
"(",
"request",
",",
"'step'",
")",
":",
"return",
"indexable",
"[",
"request",
"]",
"# If requested, we do fancy indexing in sorted order and reshuffle the",
"# result back in the original order.",
"if",
"sort_indices",
":",
"return",
"self",
".",
"sorted_fancy_indexing",
"(",
"indexable",
",",
"request",
")",
"# If the indexable supports fancy indexing (numpy array, HDF5 dataset),",
"# the request can be processed directly.",
"if",
"isinstance",
"(",
"indexable",
",",
"(",
"numpy",
".",
"ndarray",
",",
"h5py",
".",
"Dataset",
")",
")",
":",
"return",
"indexable",
"[",
"request",
"]",
"# Anything else (e.g. lists) isn't considered to support fancy",
"# indexing, so Subset does it manually.",
"return",
"iterable_fancy_indexing",
"(",
"indexable",
",",
"request",
")"
] |
Index an indexable object within the context of this subset.
Parameters
----------
indexable : indexable object
The object to index through.
subset_request : :class:`list` or :class:`slice`
List of positive integer indices or slice that constitutes
the request *within the context of this subset*. This
request will be translated to a request on the indexable
object.
sort_indices : bool, optional
If the request is a list of indices, indexes in sorted order
and reshuffles the result in the original order. Defaults to
`False`.
|
[
"Index",
"an",
"indexable",
"object",
"within",
"the",
"context",
"of",
"this",
"subset",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L228-L266
|
11,970
|
mila-iqia/fuel
|
fuel/utils/__init__.py
|
Subset.num_examples
|
def num_examples(self):
"""The number of examples this subset spans."""
if self.is_list:
return len(self.list_or_slice)
else:
start, stop, step = self.slice_to_numerical_args(
self.list_or_slice, self.original_num_examples)
return stop - start
|
python
|
def num_examples(self):
"""The number of examples this subset spans."""
if self.is_list:
return len(self.list_or_slice)
else:
start, stop, step = self.slice_to_numerical_args(
self.list_or_slice, self.original_num_examples)
return stop - start
|
[
"def",
"num_examples",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_list",
":",
"return",
"len",
"(",
"self",
".",
"list_or_slice",
")",
"else",
":",
"start",
",",
"stop",
",",
"step",
"=",
"self",
".",
"slice_to_numerical_args",
"(",
"self",
".",
"list_or_slice",
",",
"self",
".",
"original_num_examples",
")",
"return",
"stop",
"-",
"start"
] |
The number of examples this subset spans.
|
[
"The",
"number",
"of",
"examples",
"this",
"subset",
"spans",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/__init__.py#L290-L297
|
11,971
|
mila-iqia/fuel
|
fuel/streams.py
|
DataStream.get_epoch_iterator
|
def get_epoch_iterator(self, **kwargs):
"""Get an epoch iterator for the data stream."""
if not self._fresh_state:
self.next_epoch()
else:
self._fresh_state = False
return super(DataStream, self).get_epoch_iterator(**kwargs)
|
python
|
def get_epoch_iterator(self, **kwargs):
"""Get an epoch iterator for the data stream."""
if not self._fresh_state:
self.next_epoch()
else:
self._fresh_state = False
return super(DataStream, self).get_epoch_iterator(**kwargs)
|
[
"def",
"get_epoch_iterator",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"_fresh_state",
":",
"self",
".",
"next_epoch",
"(",
")",
"else",
":",
"self",
".",
"_fresh_state",
"=",
"False",
"return",
"super",
"(",
"DataStream",
",",
"self",
")",
".",
"get_epoch_iterator",
"(",
"*",
"*",
"kwargs",
")"
] |
Get an epoch iterator for the data stream.
|
[
"Get",
"an",
"epoch",
"iterator",
"for",
"the",
"data",
"stream",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/streams.py#L172-L178
|
11,972
|
mila-iqia/fuel
|
fuel/downloaders/binarized_mnist.py
|
fill_subparser
|
def fill_subparser(subparser):
"""Sets up a subparser to download the binarized MNIST dataset files.
The binarized MNIST dataset files
(`binarized_mnist_{train,valid,test}.amat`) are downloaded from
Hugo Larochelle's website [HUGO].
.. [HUGO] http://www.cs.toronto.edu/~larocheh/public/datasets/
binarized_mnist/binarized_mnist_{train,valid,test}.amat
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `binarized_mnist` command.
"""
sets = ['train', 'valid', 'test']
urls = ['http://www.cs.toronto.edu/~larocheh/public/datasets/' +
'binarized_mnist/binarized_mnist_{}.amat'.format(s) for s in sets]
filenames = ['binarized_mnist_{}.amat'.format(s) for s in sets]
subparser.set_defaults(urls=urls, filenames=filenames)
return default_downloader
|
python
|
def fill_subparser(subparser):
"""Sets up a subparser to download the binarized MNIST dataset files.
The binarized MNIST dataset files
(`binarized_mnist_{train,valid,test}.amat`) are downloaded from
Hugo Larochelle's website [HUGO].
.. [HUGO] http://www.cs.toronto.edu/~larocheh/public/datasets/
binarized_mnist/binarized_mnist_{train,valid,test}.amat
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `binarized_mnist` command.
"""
sets = ['train', 'valid', 'test']
urls = ['http://www.cs.toronto.edu/~larocheh/public/datasets/' +
'binarized_mnist/binarized_mnist_{}.amat'.format(s) for s in sets]
filenames = ['binarized_mnist_{}.amat'.format(s) for s in sets]
subparser.set_defaults(urls=urls, filenames=filenames)
return default_downloader
|
[
"def",
"fill_subparser",
"(",
"subparser",
")",
":",
"sets",
"=",
"[",
"'train'",
",",
"'valid'",
",",
"'test'",
"]",
"urls",
"=",
"[",
"'http://www.cs.toronto.edu/~larocheh/public/datasets/'",
"+",
"'binarized_mnist/binarized_mnist_{}.amat'",
".",
"format",
"(",
"s",
")",
"for",
"s",
"in",
"sets",
"]",
"filenames",
"=",
"[",
"'binarized_mnist_{}.amat'",
".",
"format",
"(",
"s",
")",
"for",
"s",
"in",
"sets",
"]",
"subparser",
".",
"set_defaults",
"(",
"urls",
"=",
"urls",
",",
"filenames",
"=",
"filenames",
")",
"return",
"default_downloader"
] |
Sets up a subparser to download the binarized MNIST dataset files.
The binarized MNIST dataset files
(`binarized_mnist_{train,valid,test}.amat`) are downloaded from
Hugo Larochelle's website [HUGO].
.. [HUGO] http://www.cs.toronto.edu/~larocheh/public/datasets/
binarized_mnist/binarized_mnist_{train,valid,test}.amat
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `binarized_mnist` command.
|
[
"Sets",
"up",
"a",
"subparser",
"to",
"download",
"the",
"binarized",
"MNIST",
"dataset",
"files",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/binarized_mnist.py#L4-L25
|
11,973
|
mila-iqia/fuel
|
fuel/downloaders/youtube_audio.py
|
download
|
def download(directory, youtube_id, clear=False):
"""Download the audio of a YouTube video.
The audio is downloaded in the highest available quality. Progress is
printed to `stdout`. The file is named `youtube_id.m4a`, where
`youtube_id` is the 11-character code identifiying the YouTube video
(can be determined from the URL).
Parameters
----------
directory : str
The directory in which to save the downloaded audio file.
youtube_id : str
11-character video ID (taken from YouTube URL)
clear : bool
If `True`, it deletes the downloaded video. Otherwise it downloads
it. Defaults to `False`.
"""
filepath = os.path.join(directory, '{}.m4a'.format(youtube_id))
if clear:
os.remove(filepath)
return
if not PAFY_AVAILABLE:
raise ImportError("pafy is required to download YouTube videos")
url = 'https://www.youtube.com/watch?v={}'.format(youtube_id)
video = pafy.new(url)
audio = video.getbestaudio()
audio.download(quiet=False, filepath=filepath)
|
python
|
def download(directory, youtube_id, clear=False):
"""Download the audio of a YouTube video.
The audio is downloaded in the highest available quality. Progress is
printed to `stdout`. The file is named `youtube_id.m4a`, where
`youtube_id` is the 11-character code identifiying the YouTube video
(can be determined from the URL).
Parameters
----------
directory : str
The directory in which to save the downloaded audio file.
youtube_id : str
11-character video ID (taken from YouTube URL)
clear : bool
If `True`, it deletes the downloaded video. Otherwise it downloads
it. Defaults to `False`.
"""
filepath = os.path.join(directory, '{}.m4a'.format(youtube_id))
if clear:
os.remove(filepath)
return
if not PAFY_AVAILABLE:
raise ImportError("pafy is required to download YouTube videos")
url = 'https://www.youtube.com/watch?v={}'.format(youtube_id)
video = pafy.new(url)
audio = video.getbestaudio()
audio.download(quiet=False, filepath=filepath)
|
[
"def",
"download",
"(",
"directory",
",",
"youtube_id",
",",
"clear",
"=",
"False",
")",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"'{}.m4a'",
".",
"format",
"(",
"youtube_id",
")",
")",
"if",
"clear",
":",
"os",
".",
"remove",
"(",
"filepath",
")",
"return",
"if",
"not",
"PAFY_AVAILABLE",
":",
"raise",
"ImportError",
"(",
"\"pafy is required to download YouTube videos\"",
")",
"url",
"=",
"'https://www.youtube.com/watch?v={}'",
".",
"format",
"(",
"youtube_id",
")",
"video",
"=",
"pafy",
".",
"new",
"(",
"url",
")",
"audio",
"=",
"video",
".",
"getbestaudio",
"(",
")",
"audio",
".",
"download",
"(",
"quiet",
"=",
"False",
",",
"filepath",
"=",
"filepath",
")"
] |
Download the audio of a YouTube video.
The audio is downloaded in the highest available quality. Progress is
printed to `stdout`. The file is named `youtube_id.m4a`, where
`youtube_id` is the 11-character code identifiying the YouTube video
(can be determined from the URL).
Parameters
----------
directory : str
The directory in which to save the downloaded audio file.
youtube_id : str
11-character video ID (taken from YouTube URL)
clear : bool
If `True`, it deletes the downloaded video. Otherwise it downloads
it. Defaults to `False`.
|
[
"Download",
"the",
"audio",
"of",
"a",
"YouTube",
"video",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/youtube_audio.py#L10-L38
|
11,974
|
mila-iqia/fuel
|
fuel/downloaders/youtube_audio.py
|
fill_subparser
|
def fill_subparser(subparser):
"""Sets up a subparser to download audio of YouTube videos.
Adds the compulsory `--youtube-id` flag.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
"""
subparser.add_argument(
'--youtube-id', type=str, required=True,
help=("The YouTube ID of the video from which to extract audio, "
"usually an 11-character string.")
)
return download
|
python
|
def fill_subparser(subparser):
"""Sets up a subparser to download audio of YouTube videos.
Adds the compulsory `--youtube-id` flag.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
"""
subparser.add_argument(
'--youtube-id', type=str, required=True,
help=("The YouTube ID of the video from which to extract audio, "
"usually an 11-character string.")
)
return download
|
[
"def",
"fill_subparser",
"(",
"subparser",
")",
":",
"subparser",
".",
"add_argument",
"(",
"'--youtube-id'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"help",
"=",
"(",
"\"The YouTube ID of the video from which to extract audio, \"",
"\"usually an 11-character string.\"",
")",
")",
"return",
"download"
] |
Sets up a subparser to download audio of YouTube videos.
Adds the compulsory `--youtube-id` flag.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
|
[
"Sets",
"up",
"a",
"subparser",
"to",
"download",
"audio",
"of",
"YouTube",
"videos",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/downloaders/youtube_audio.py#L41-L57
|
11,975
|
mila-iqia/fuel
|
fuel/converters/youtube_audio.py
|
convert_youtube_audio
|
def convert_youtube_audio(directory, output_directory, youtube_id, channels,
sample, output_filename=None):
"""Converts downloaded YouTube audio to HDF5 format.
Requires `ffmpeg` to be installed and available on the command line
(i.e. available on your `PATH`).
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
youtube_id : str
11-character video ID (taken from YouTube URL)
channels : int
The number of audio channels to use in the PCM Wave file.
sample : int
The sampling rate to use in Hz, e.g. 44100 or 16000.
output_filename : str, optional
Name of the saved dataset. If `None` (the default),
`youtube_id.hdf5` is used.
"""
input_file = os.path.join(directory, '{}.m4a'.format(youtube_id))
wav_filename = '{}.wav'.format(youtube_id)
wav_file = os.path.join(directory, wav_filename)
ffmpeg_not_available = subprocess.call(['ffmpeg', '-version'])
if ffmpeg_not_available:
raise RuntimeError('conversion requires ffmpeg')
subprocess.check_call(['ffmpeg', '-y', '-i', input_file, '-ac',
str(channels), '-ar', str(sample), wav_file],
stdout=sys.stdout)
# Load WAV into array
_, data = scipy.io.wavfile.read(wav_file)
if data.ndim == 1:
data = data[:, None]
data = data[None, :]
# Store in HDF5
if output_filename is None:
output_filename = '{}.hdf5'.format(youtube_id)
output_file = os.path.join(output_directory, output_filename)
with h5py.File(output_file, 'w') as h5file:
fill_hdf5_file(h5file, (('train', 'features', data),))
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'time'
h5file['features'].dims[2].label = 'feature'
return (output_file,)
|
python
|
def convert_youtube_audio(directory, output_directory, youtube_id, channels,
sample, output_filename=None):
"""Converts downloaded YouTube audio to HDF5 format.
Requires `ffmpeg` to be installed and available on the command line
(i.e. available on your `PATH`).
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
youtube_id : str
11-character video ID (taken from YouTube URL)
channels : int
The number of audio channels to use in the PCM Wave file.
sample : int
The sampling rate to use in Hz, e.g. 44100 or 16000.
output_filename : str, optional
Name of the saved dataset. If `None` (the default),
`youtube_id.hdf5` is used.
"""
input_file = os.path.join(directory, '{}.m4a'.format(youtube_id))
wav_filename = '{}.wav'.format(youtube_id)
wav_file = os.path.join(directory, wav_filename)
ffmpeg_not_available = subprocess.call(['ffmpeg', '-version'])
if ffmpeg_not_available:
raise RuntimeError('conversion requires ffmpeg')
subprocess.check_call(['ffmpeg', '-y', '-i', input_file, '-ac',
str(channels), '-ar', str(sample), wav_file],
stdout=sys.stdout)
# Load WAV into array
_, data = scipy.io.wavfile.read(wav_file)
if data.ndim == 1:
data = data[:, None]
data = data[None, :]
# Store in HDF5
if output_filename is None:
output_filename = '{}.hdf5'.format(youtube_id)
output_file = os.path.join(output_directory, output_filename)
with h5py.File(output_file, 'w') as h5file:
fill_hdf5_file(h5file, (('train', 'features', data),))
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'time'
h5file['features'].dims[2].label = 'feature'
return (output_file,)
|
[
"def",
"convert_youtube_audio",
"(",
"directory",
",",
"output_directory",
",",
"youtube_id",
",",
"channels",
",",
"sample",
",",
"output_filename",
"=",
"None",
")",
":",
"input_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"'{}.m4a'",
".",
"format",
"(",
"youtube_id",
")",
")",
"wav_filename",
"=",
"'{}.wav'",
".",
"format",
"(",
"youtube_id",
")",
"wav_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"wav_filename",
")",
"ffmpeg_not_available",
"=",
"subprocess",
".",
"call",
"(",
"[",
"'ffmpeg'",
",",
"'-version'",
"]",
")",
"if",
"ffmpeg_not_available",
":",
"raise",
"RuntimeError",
"(",
"'conversion requires ffmpeg'",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'ffmpeg'",
",",
"'-y'",
",",
"'-i'",
",",
"input_file",
",",
"'-ac'",
",",
"str",
"(",
"channels",
")",
",",
"'-ar'",
",",
"str",
"(",
"sample",
")",
",",
"wav_file",
"]",
",",
"stdout",
"=",
"sys",
".",
"stdout",
")",
"# Load WAV into array",
"_",
",",
"data",
"=",
"scipy",
".",
"io",
".",
"wavfile",
".",
"read",
"(",
"wav_file",
")",
"if",
"data",
".",
"ndim",
"==",
"1",
":",
"data",
"=",
"data",
"[",
":",
",",
"None",
"]",
"data",
"=",
"data",
"[",
"None",
",",
":",
"]",
"# Store in HDF5",
"if",
"output_filename",
"is",
"None",
":",
"output_filename",
"=",
"'{}.hdf5'",
".",
"format",
"(",
"youtube_id",
")",
"output_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"with",
"h5py",
".",
"File",
"(",
"output_file",
",",
"'w'",
")",
"as",
"h5file",
":",
"fill_hdf5_file",
"(",
"h5file",
",",
"(",
"(",
"'train'",
",",
"'features'",
",",
"data",
")",
",",
")",
")",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"'time'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"2",
"]",
".",
"label",
"=",
"'feature'",
"return",
"(",
"output_file",
",",
")"
] |
Converts downloaded YouTube audio to HDF5 format.
Requires `ffmpeg` to be installed and available on the command line
(i.e. available on your `PATH`).
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
youtube_id : str
11-character video ID (taken from YouTube URL)
channels : int
The number of audio channels to use in the PCM Wave file.
sample : int
The sampling rate to use in Hz, e.g. 44100 or 16000.
output_filename : str, optional
Name of the saved dataset. If `None` (the default),
`youtube_id.hdf5` is used.
|
[
"Converts",
"downloaded",
"YouTube",
"audio",
"to",
"HDF5",
"format",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/youtube_audio.py#L11-L62
|
11,976
|
mila-iqia/fuel
|
fuel/converters/youtube_audio.py
|
fill_subparser
|
def fill_subparser(subparser):
"""Sets up a subparser to convert YouTube audio files.
Adds the compulsory `--youtube-id` flag as well as the optional
`sample` and `channels` flags.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
"""
subparser.add_argument(
'--youtube-id', type=str, required=True,
help=("The YouTube ID of the video from which to extract audio, "
"usually an 11-character string.")
)
subparser.add_argument(
'--channels', type=int, default=1,
help=("The number of audio channels to convert to. The default of 1"
"means audio is converted to mono.")
)
subparser.add_argument(
'--sample', type=int, default=16000,
help=("The sampling rate in Hz. The default of 16000 is "
"significantly downsampled compared to normal WAVE files; "
"pass 44100 for the usual sampling rate.")
)
return convert_youtube_audio
|
python
|
def fill_subparser(subparser):
"""Sets up a subparser to convert YouTube audio files.
Adds the compulsory `--youtube-id` flag as well as the optional
`sample` and `channels` flags.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
"""
subparser.add_argument(
'--youtube-id', type=str, required=True,
help=("The YouTube ID of the video from which to extract audio, "
"usually an 11-character string.")
)
subparser.add_argument(
'--channels', type=int, default=1,
help=("The number of audio channels to convert to. The default of 1"
"means audio is converted to mono.")
)
subparser.add_argument(
'--sample', type=int, default=16000,
help=("The sampling rate in Hz. The default of 16000 is "
"significantly downsampled compared to normal WAVE files; "
"pass 44100 for the usual sampling rate.")
)
return convert_youtube_audio
|
[
"def",
"fill_subparser",
"(",
"subparser",
")",
":",
"subparser",
".",
"add_argument",
"(",
"'--youtube-id'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"help",
"=",
"(",
"\"The YouTube ID of the video from which to extract audio, \"",
"\"usually an 11-character string.\"",
")",
")",
"subparser",
".",
"add_argument",
"(",
"'--channels'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"(",
"\"The number of audio channels to convert to. The default of 1\"",
"\"means audio is converted to mono.\"",
")",
")",
"subparser",
".",
"add_argument",
"(",
"'--sample'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"16000",
",",
"help",
"=",
"(",
"\"The sampling rate in Hz. The default of 16000 is \"",
"\"significantly downsampled compared to normal WAVE files; \"",
"\"pass 44100 for the usual sampling rate.\"",
")",
")",
"return",
"convert_youtube_audio"
] |
Sets up a subparser to convert YouTube audio files.
Adds the compulsory `--youtube-id` flag as well as the optional
`sample` and `channels` flags.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `youtube_audio` command.
|
[
"Sets",
"up",
"a",
"subparser",
"to",
"convert",
"YouTube",
"audio",
"files",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/youtube_audio.py#L65-L93
|
11,977
|
mila-iqia/fuel
|
fuel/converters/ilsvrc2012.py
|
convert_ilsvrc2012
|
def convert_ilsvrc2012(directory, output_directory,
output_filename='ilsvrc2012.hdf5',
shuffle_seed=config.default_seed):
"""Converter for data from the ILSVRC 2012 competition.
Source files for this dataset can be obtained by registering at
[ILSVRC2012WEB].
Parameters
----------
input_directory : str
Path from which to read raw data files.
output_directory : str
Path to which to save the HDF5 file.
output_filename : str, optional
The output filename for the HDF5 file. Default: 'ilsvrc2012.hdf5'.
shuffle_seed : int or sequence, optional
Seed for a random number generator used to shuffle the order
of the training set on disk, so that sequential reads will not
be ordered by class.
.. [ILSVRC2012WEB] http://image-net.org/challenges/LSVRC/2012/index
"""
devkit_path = os.path.join(directory, DEVKIT_ARCHIVE)
train, valid, test = [os.path.join(directory, fn) for fn in IMAGE_TARS]
n_train, valid_groundtruth, n_test, wnid_map = prepare_metadata(
devkit_path)
n_valid = len(valid_groundtruth)
output_path = os.path.join(output_directory, output_filename)
with h5py.File(output_path, 'w') as f, create_temp_tar() as patch:
log.info('Creating HDF5 datasets...')
prepare_hdf5_file(f, n_train, n_valid, n_test)
log.info('Processing training set...')
process_train_set(f, train, patch, n_train, wnid_map, shuffle_seed)
log.info('Processing validation set...')
process_other_set(f, 'valid', valid, patch, valid_groundtruth, n_train)
log.info('Processing test set...')
process_other_set(f, 'test', test, patch, (None,) * n_test,
n_train + n_valid)
log.info('Done.')
return (output_path,)
|
python
|
def convert_ilsvrc2012(directory, output_directory,
output_filename='ilsvrc2012.hdf5',
shuffle_seed=config.default_seed):
"""Converter for data from the ILSVRC 2012 competition.
Source files for this dataset can be obtained by registering at
[ILSVRC2012WEB].
Parameters
----------
input_directory : str
Path from which to read raw data files.
output_directory : str
Path to which to save the HDF5 file.
output_filename : str, optional
The output filename for the HDF5 file. Default: 'ilsvrc2012.hdf5'.
shuffle_seed : int or sequence, optional
Seed for a random number generator used to shuffle the order
of the training set on disk, so that sequential reads will not
be ordered by class.
.. [ILSVRC2012WEB] http://image-net.org/challenges/LSVRC/2012/index
"""
devkit_path = os.path.join(directory, DEVKIT_ARCHIVE)
train, valid, test = [os.path.join(directory, fn) for fn in IMAGE_TARS]
n_train, valid_groundtruth, n_test, wnid_map = prepare_metadata(
devkit_path)
n_valid = len(valid_groundtruth)
output_path = os.path.join(output_directory, output_filename)
with h5py.File(output_path, 'w') as f, create_temp_tar() as patch:
log.info('Creating HDF5 datasets...')
prepare_hdf5_file(f, n_train, n_valid, n_test)
log.info('Processing training set...')
process_train_set(f, train, patch, n_train, wnid_map, shuffle_seed)
log.info('Processing validation set...')
process_other_set(f, 'valid', valid, patch, valid_groundtruth, n_train)
log.info('Processing test set...')
process_other_set(f, 'test', test, patch, (None,) * n_test,
n_train + n_valid)
log.info('Done.')
return (output_path,)
|
[
"def",
"convert_ilsvrc2012",
"(",
"directory",
",",
"output_directory",
",",
"output_filename",
"=",
"'ilsvrc2012.hdf5'",
",",
"shuffle_seed",
"=",
"config",
".",
"default_seed",
")",
":",
"devkit_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"DEVKIT_ARCHIVE",
")",
"train",
",",
"valid",
",",
"test",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"fn",
")",
"for",
"fn",
"in",
"IMAGE_TARS",
"]",
"n_train",
",",
"valid_groundtruth",
",",
"n_test",
",",
"wnid_map",
"=",
"prepare_metadata",
"(",
"devkit_path",
")",
"n_valid",
"=",
"len",
"(",
"valid_groundtruth",
")",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"with",
"h5py",
".",
"File",
"(",
"output_path",
",",
"'w'",
")",
"as",
"f",
",",
"create_temp_tar",
"(",
")",
"as",
"patch",
":",
"log",
".",
"info",
"(",
"'Creating HDF5 datasets...'",
")",
"prepare_hdf5_file",
"(",
"f",
",",
"n_train",
",",
"n_valid",
",",
"n_test",
")",
"log",
".",
"info",
"(",
"'Processing training set...'",
")",
"process_train_set",
"(",
"f",
",",
"train",
",",
"patch",
",",
"n_train",
",",
"wnid_map",
",",
"shuffle_seed",
")",
"log",
".",
"info",
"(",
"'Processing validation set...'",
")",
"process_other_set",
"(",
"f",
",",
"'valid'",
",",
"valid",
",",
"patch",
",",
"valid_groundtruth",
",",
"n_train",
")",
"log",
".",
"info",
"(",
"'Processing test set...'",
")",
"process_other_set",
"(",
"f",
",",
"'test'",
",",
"test",
",",
"patch",
",",
"(",
"None",
",",
")",
"*",
"n_test",
",",
"n_train",
"+",
"n_valid",
")",
"log",
".",
"info",
"(",
"'Done.'",
")",
"return",
"(",
"output_path",
",",
")"
] |
Converter for data from the ILSVRC 2012 competition.
Source files for this dataset can be obtained by registering at
[ILSVRC2012WEB].
Parameters
----------
input_directory : str
Path from which to read raw data files.
output_directory : str
Path to which to save the HDF5 file.
output_filename : str, optional
The output filename for the HDF5 file. Default: 'ilsvrc2012.hdf5'.
shuffle_seed : int or sequence, optional
Seed for a random number generator used to shuffle the order
of the training set on disk, so that sequential reads will not
be ordered by class.
.. [ILSVRC2012WEB] http://image-net.org/challenges/LSVRC/2012/index
|
[
"Converter",
"for",
"data",
"from",
"the",
"ILSVRC",
"2012",
"competition",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2012.py#L35-L78
|
11,978
|
mila-iqia/fuel
|
fuel/converters/ilsvrc2012.py
|
fill_subparser
|
def fill_subparser(subparser):
"""Sets up a subparser to convert the ILSVRC2012 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2012` command.
"""
subparser.add_argument(
"--shuffle-seed", help="Seed to use for randomizing order of the "
"training set on disk.",
default=config.default_seed, type=int, required=False)
return convert_ilsvrc2012
|
python
|
def fill_subparser(subparser):
"""Sets up a subparser to convert the ILSVRC2012 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2012` command.
"""
subparser.add_argument(
"--shuffle-seed", help="Seed to use for randomizing order of the "
"training set on disk.",
default=config.default_seed, type=int, required=False)
return convert_ilsvrc2012
|
[
"def",
"fill_subparser",
"(",
"subparser",
")",
":",
"subparser",
".",
"add_argument",
"(",
"\"--shuffle-seed\"",
",",
"help",
"=",
"\"Seed to use for randomizing order of the \"",
"\"training set on disk.\"",
",",
"default",
"=",
"config",
".",
"default_seed",
",",
"type",
"=",
"int",
",",
"required",
"=",
"False",
")",
"return",
"convert_ilsvrc2012"
] |
Sets up a subparser to convert the ILSVRC2012 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2012` command.
|
[
"Sets",
"up",
"a",
"subparser",
"to",
"convert",
"the",
"ILSVRC2012",
"dataset",
"files",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2012.py#L81-L94
|
11,979
|
mila-iqia/fuel
|
fuel/converters/ilsvrc2012.py
|
read_metadata_mat_file
|
def read_metadata_mat_file(meta_mat):
"""Read ILSVRC2012 metadata from the distributed MAT file.
Parameters
----------
meta_mat : str or file-like object
The filename or file-handle for `meta.mat` from the
ILSVRC2012 development kit.
Returns
-------
synsets : ndarray, 1-dimensional, compound dtype
A table containing ILSVRC2012 metadata for the "synonym sets"
or "synsets" that comprise the classes and superclasses,
including the following fields:
* `ILSVRC2012_ID`: the integer ID used in the original
competition data.
* `WNID`: A string identifier that uniquely identifies
a synset in ImageNet and WordNet.
* `wordnet_height`: The length of the longest path to
a leaf node in the FULL ImageNet/WordNet hierarchy
(leaf nodes in the FULL ImageNet/WordNet hierarchy
have `wordnet_height` 0).
* `gloss`: A string representation of an English
textual description of the concept represented by
this synset.
* `num_children`: The number of children in the hierarchy
for this synset.
* `words`: A string representation, comma separated,
of different synoym words or phrases for the concept
represented by this synset.
* `children`: A vector of `ILSVRC2012_ID`s of children
of this synset, padded with -1. Note that these refer
to `ILSVRC2012_ID`s from the original data and *not*
the zero-based index in the table.
* `num_train_images`: The number of training images for
this synset.
"""
mat = loadmat(meta_mat, squeeze_me=True)
synsets = mat['synsets']
new_dtype = numpy.dtype([
('ILSVRC2012_ID', numpy.int16),
('WNID', ('S', max(map(len, synsets['WNID'])))),
('wordnet_height', numpy.int8),
('gloss', ('S', max(map(len, synsets['gloss'])))),
('num_children', numpy.int8),
('words', ('S', max(map(len, synsets['words'])))),
('children', (numpy.int8, max(synsets['num_children']))),
('num_train_images', numpy.uint16)
])
new_synsets = numpy.empty(synsets.shape, dtype=new_dtype)
for attr in ['ILSVRC2012_ID', 'WNID', 'wordnet_height', 'gloss',
'num_children', 'words', 'num_train_images']:
new_synsets[attr] = synsets[attr]
children = [numpy.atleast_1d(ch) for ch in synsets['children']]
padded_children = [
numpy.concatenate((c,
-numpy.ones(new_dtype['children'].shape[0] - len(c),
dtype=numpy.int16)))
for c in children
]
new_synsets['children'] = padded_children
return new_synsets
|
python
|
def read_metadata_mat_file(meta_mat):
"""Read ILSVRC2012 metadata from the distributed MAT file.
Parameters
----------
meta_mat : str or file-like object
The filename or file-handle for `meta.mat` from the
ILSVRC2012 development kit.
Returns
-------
synsets : ndarray, 1-dimensional, compound dtype
A table containing ILSVRC2012 metadata for the "synonym sets"
or "synsets" that comprise the classes and superclasses,
including the following fields:
* `ILSVRC2012_ID`: the integer ID used in the original
competition data.
* `WNID`: A string identifier that uniquely identifies
a synset in ImageNet and WordNet.
* `wordnet_height`: The length of the longest path to
a leaf node in the FULL ImageNet/WordNet hierarchy
(leaf nodes in the FULL ImageNet/WordNet hierarchy
have `wordnet_height` 0).
* `gloss`: A string representation of an English
textual description of the concept represented by
this synset.
* `num_children`: The number of children in the hierarchy
for this synset.
* `words`: A string representation, comma separated,
of different synoym words or phrases for the concept
represented by this synset.
* `children`: A vector of `ILSVRC2012_ID`s of children
of this synset, padded with -1. Note that these refer
to `ILSVRC2012_ID`s from the original data and *not*
the zero-based index in the table.
* `num_train_images`: The number of training images for
this synset.
"""
mat = loadmat(meta_mat, squeeze_me=True)
synsets = mat['synsets']
new_dtype = numpy.dtype([
('ILSVRC2012_ID', numpy.int16),
('WNID', ('S', max(map(len, synsets['WNID'])))),
('wordnet_height', numpy.int8),
('gloss', ('S', max(map(len, synsets['gloss'])))),
('num_children', numpy.int8),
('words', ('S', max(map(len, synsets['words'])))),
('children', (numpy.int8, max(synsets['num_children']))),
('num_train_images', numpy.uint16)
])
new_synsets = numpy.empty(synsets.shape, dtype=new_dtype)
for attr in ['ILSVRC2012_ID', 'WNID', 'wordnet_height', 'gloss',
'num_children', 'words', 'num_train_images']:
new_synsets[attr] = synsets[attr]
children = [numpy.atleast_1d(ch) for ch in synsets['children']]
padded_children = [
numpy.concatenate((c,
-numpy.ones(new_dtype['children'].shape[0] - len(c),
dtype=numpy.int16)))
for c in children
]
new_synsets['children'] = padded_children
return new_synsets
|
[
"def",
"read_metadata_mat_file",
"(",
"meta_mat",
")",
":",
"mat",
"=",
"loadmat",
"(",
"meta_mat",
",",
"squeeze_me",
"=",
"True",
")",
"synsets",
"=",
"mat",
"[",
"'synsets'",
"]",
"new_dtype",
"=",
"numpy",
".",
"dtype",
"(",
"[",
"(",
"'ILSVRC2012_ID'",
",",
"numpy",
".",
"int16",
")",
",",
"(",
"'WNID'",
",",
"(",
"'S'",
",",
"max",
"(",
"map",
"(",
"len",
",",
"synsets",
"[",
"'WNID'",
"]",
")",
")",
")",
")",
",",
"(",
"'wordnet_height'",
",",
"numpy",
".",
"int8",
")",
",",
"(",
"'gloss'",
",",
"(",
"'S'",
",",
"max",
"(",
"map",
"(",
"len",
",",
"synsets",
"[",
"'gloss'",
"]",
")",
")",
")",
")",
",",
"(",
"'num_children'",
",",
"numpy",
".",
"int8",
")",
",",
"(",
"'words'",
",",
"(",
"'S'",
",",
"max",
"(",
"map",
"(",
"len",
",",
"synsets",
"[",
"'words'",
"]",
")",
")",
")",
")",
",",
"(",
"'children'",
",",
"(",
"numpy",
".",
"int8",
",",
"max",
"(",
"synsets",
"[",
"'num_children'",
"]",
")",
")",
")",
",",
"(",
"'num_train_images'",
",",
"numpy",
".",
"uint16",
")",
"]",
")",
"new_synsets",
"=",
"numpy",
".",
"empty",
"(",
"synsets",
".",
"shape",
",",
"dtype",
"=",
"new_dtype",
")",
"for",
"attr",
"in",
"[",
"'ILSVRC2012_ID'",
",",
"'WNID'",
",",
"'wordnet_height'",
",",
"'gloss'",
",",
"'num_children'",
",",
"'words'",
",",
"'num_train_images'",
"]",
":",
"new_synsets",
"[",
"attr",
"]",
"=",
"synsets",
"[",
"attr",
"]",
"children",
"=",
"[",
"numpy",
".",
"atleast_1d",
"(",
"ch",
")",
"for",
"ch",
"in",
"synsets",
"[",
"'children'",
"]",
"]",
"padded_children",
"=",
"[",
"numpy",
".",
"concatenate",
"(",
"(",
"c",
",",
"-",
"numpy",
".",
"ones",
"(",
"new_dtype",
"[",
"'children'",
"]",
".",
"shape",
"[",
"0",
"]",
"-",
"len",
"(",
"c",
")",
",",
"dtype",
"=",
"numpy",
".",
"int16",
")",
")",
")",
"for",
"c",
"in",
"children",
"]",
"new_synsets",
"[",
"'children'",
"]",
"=",
"padded_children",
"return",
"new_synsets"
] |
Read ILSVRC2012 metadata from the distributed MAT file.
Parameters
----------
meta_mat : str or file-like object
The filename or file-handle for `meta.mat` from the
ILSVRC2012 development kit.
Returns
-------
synsets : ndarray, 1-dimensional, compound dtype
A table containing ILSVRC2012 metadata for the "synonym sets"
or "synsets" that comprise the classes and superclasses,
including the following fields:
* `ILSVRC2012_ID`: the integer ID used in the original
competition data.
* `WNID`: A string identifier that uniquely identifies
a synset in ImageNet and WordNet.
* `wordnet_height`: The length of the longest path to
a leaf node in the FULL ImageNet/WordNet hierarchy
(leaf nodes in the FULL ImageNet/WordNet hierarchy
have `wordnet_height` 0).
* `gloss`: A string representation of an English
textual description of the concept represented by
this synset.
* `num_children`: The number of children in the hierarchy
for this synset.
* `words`: A string representation, comma separated,
of different synoym words or phrases for the concept
represented by this synset.
* `children`: A vector of `ILSVRC2012_ID`s of children
of this synset, padded with -1. Note that these refer
to `ILSVRC2012_ID`s from the original data and *not*
the zero-based index in the table.
* `num_train_images`: The number of training images for
this synset.
|
[
"Read",
"ILSVRC2012",
"metadata",
"from",
"the",
"distributed",
"MAT",
"file",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2012.py#L231-L294
|
11,980
|
mila-iqia/fuel
|
fuel/config_parser.py
|
multiple_paths_parser
|
def multiple_paths_parser(value):
"""Parses data_path argument.
Parameters
----------
value : str
a string of data paths separated by ":".
Returns
-------
value : list
a list of strings indicating each data paths.
"""
if isinstance(value, six.string_types):
value = value.split(os.path.pathsep)
return value
|
python
|
def multiple_paths_parser(value):
"""Parses data_path argument.
Parameters
----------
value : str
a string of data paths separated by ":".
Returns
-------
value : list
a list of strings indicating each data paths.
"""
if isinstance(value, six.string_types):
value = value.split(os.path.pathsep)
return value
|
[
"def",
"multiple_paths_parser",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"value",
"=",
"value",
".",
"split",
"(",
"os",
".",
"path",
".",
"pathsep",
")",
"return",
"value"
] |
Parses data_path argument.
Parameters
----------
value : str
a string of data paths separated by ":".
Returns
-------
value : list
a list of strings indicating each data paths.
|
[
"Parses",
"data_path",
"argument",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/config_parser.py#L108-L124
|
11,981
|
mila-iqia/fuel
|
fuel/config_parser.py
|
Configuration.add_config
|
def add_config(self, key, type_, default=NOT_SET, env_var=None):
"""Add a configuration setting.
Parameters
----------
key : str
The name of the configuration setting. This must be a valid
Python attribute name i.e. alphanumeric with underscores.
type : function
A function such as ``float``, ``int`` or ``str`` which takes
the configuration value and returns an object of the correct
type. Note that the values retrieved from environment
variables are always strings, while those retrieved from the
YAML file might already be parsed. Hence, the function provided
here must accept both types of input.
default : object, optional
The default configuration to return if not set. By default none
is set and an error is raised instead.
env_var : str, optional
The environment variable name that holds this configuration
value. If not given, this configuration can only be set in the
YAML configuration file.
"""
self.config[key] = {'type': type_}
if env_var is not None:
self.config[key]['env_var'] = env_var
if default is not NOT_SET:
self.config[key]['default'] = default
|
python
|
def add_config(self, key, type_, default=NOT_SET, env_var=None):
"""Add a configuration setting.
Parameters
----------
key : str
The name of the configuration setting. This must be a valid
Python attribute name i.e. alphanumeric with underscores.
type : function
A function such as ``float``, ``int`` or ``str`` which takes
the configuration value and returns an object of the correct
type. Note that the values retrieved from environment
variables are always strings, while those retrieved from the
YAML file might already be parsed. Hence, the function provided
here must accept both types of input.
default : object, optional
The default configuration to return if not set. By default none
is set and an error is raised instead.
env_var : str, optional
The environment variable name that holds this configuration
value. If not given, this configuration can only be set in the
YAML configuration file.
"""
self.config[key] = {'type': type_}
if env_var is not None:
self.config[key]['env_var'] = env_var
if default is not NOT_SET:
self.config[key]['default'] = default
|
[
"def",
"add_config",
"(",
"self",
",",
"key",
",",
"type_",
",",
"default",
"=",
"NOT_SET",
",",
"env_var",
"=",
"None",
")",
":",
"self",
".",
"config",
"[",
"key",
"]",
"=",
"{",
"'type'",
":",
"type_",
"}",
"if",
"env_var",
"is",
"not",
"None",
":",
"self",
".",
"config",
"[",
"key",
"]",
"[",
"'env_var'",
"]",
"=",
"env_var",
"if",
"default",
"is",
"not",
"NOT_SET",
":",
"self",
".",
"config",
"[",
"key",
"]",
"[",
"'default'",
"]",
"=",
"default"
] |
Add a configuration setting.
Parameters
----------
key : str
The name of the configuration setting. This must be a valid
Python attribute name i.e. alphanumeric with underscores.
type : function
A function such as ``float``, ``int`` or ``str`` which takes
the configuration value and returns an object of the correct
type. Note that the values retrieved from environment
variables are always strings, while those retrieved from the
YAML file might already be parsed. Hence, the function provided
here must accept both types of input.
default : object, optional
The default configuration to return if not set. By default none
is set and an error is raised instead.
env_var : str, optional
The environment variable name that holds this configuration
value. If not given, this configuration can only be set in the
YAML configuration file.
|
[
"Add",
"a",
"configuration",
"setting",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/config_parser.py#L168-L196
|
11,982
|
mila-iqia/fuel
|
fuel/server.py
|
send_arrays
|
def send_arrays(socket, arrays, stop=False):
"""Send NumPy arrays using the buffer interface and some metadata.
Parameters
----------
socket : :class:`zmq.Socket`
The socket to send data over.
arrays : list
A list of :class:`numpy.ndarray` to transfer.
stop : bool, optional
Instead of sending a series of NumPy arrays, send a JSON object
with a single `stop` key. The :func:`recv_arrays` will raise
``StopIteration`` when it receives this.
Notes
-----
The protocol is very simple: A single JSON object describing the array
format (using the same specification as ``.npy`` files) is sent first.
Subsequently the arrays are sent as bytestreams (through NumPy's
support of the buffering protocol).
"""
if arrays:
# The buffer protocol only works on contiguous arrays
arrays = [numpy.ascontiguousarray(array) for array in arrays]
if stop:
headers = {'stop': True}
socket.send_json(headers)
else:
headers = [header_data_from_array_1_0(array) for array in arrays]
socket.send_json(headers, zmq.SNDMORE)
for array in arrays[:-1]:
socket.send(array, zmq.SNDMORE)
socket.send(arrays[-1])
|
python
|
def send_arrays(socket, arrays, stop=False):
"""Send NumPy arrays using the buffer interface and some metadata.
Parameters
----------
socket : :class:`zmq.Socket`
The socket to send data over.
arrays : list
A list of :class:`numpy.ndarray` to transfer.
stop : bool, optional
Instead of sending a series of NumPy arrays, send a JSON object
with a single `stop` key. The :func:`recv_arrays` will raise
``StopIteration`` when it receives this.
Notes
-----
The protocol is very simple: A single JSON object describing the array
format (using the same specification as ``.npy`` files) is sent first.
Subsequently the arrays are sent as bytestreams (through NumPy's
support of the buffering protocol).
"""
if arrays:
# The buffer protocol only works on contiguous arrays
arrays = [numpy.ascontiguousarray(array) for array in arrays]
if stop:
headers = {'stop': True}
socket.send_json(headers)
else:
headers = [header_data_from_array_1_0(array) for array in arrays]
socket.send_json(headers, zmq.SNDMORE)
for array in arrays[:-1]:
socket.send(array, zmq.SNDMORE)
socket.send(arrays[-1])
|
[
"def",
"send_arrays",
"(",
"socket",
",",
"arrays",
",",
"stop",
"=",
"False",
")",
":",
"if",
"arrays",
":",
"# The buffer protocol only works on contiguous arrays",
"arrays",
"=",
"[",
"numpy",
".",
"ascontiguousarray",
"(",
"array",
")",
"for",
"array",
"in",
"arrays",
"]",
"if",
"stop",
":",
"headers",
"=",
"{",
"'stop'",
":",
"True",
"}",
"socket",
".",
"send_json",
"(",
"headers",
")",
"else",
":",
"headers",
"=",
"[",
"header_data_from_array_1_0",
"(",
"array",
")",
"for",
"array",
"in",
"arrays",
"]",
"socket",
".",
"send_json",
"(",
"headers",
",",
"zmq",
".",
"SNDMORE",
")",
"for",
"array",
"in",
"arrays",
"[",
":",
"-",
"1",
"]",
":",
"socket",
".",
"send",
"(",
"array",
",",
"zmq",
".",
"SNDMORE",
")",
"socket",
".",
"send",
"(",
"arrays",
"[",
"-",
"1",
"]",
")"
] |
Send NumPy arrays using the buffer interface and some metadata.
Parameters
----------
socket : :class:`zmq.Socket`
The socket to send data over.
arrays : list
A list of :class:`numpy.ndarray` to transfer.
stop : bool, optional
Instead of sending a series of NumPy arrays, send a JSON object
with a single `stop` key. The :func:`recv_arrays` will raise
``StopIteration`` when it receives this.
Notes
-----
The protocol is very simple: A single JSON object describing the array
format (using the same specification as ``.npy`` files) is sent first.
Subsequently the arrays are sent as bytestreams (through NumPy's
support of the buffering protocol).
|
[
"Send",
"NumPy",
"arrays",
"using",
"the",
"buffer",
"interface",
"and",
"some",
"metadata",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/server.py#L12-L45
|
11,983
|
mila-iqia/fuel
|
fuel/server.py
|
recv_arrays
|
def recv_arrays(socket):
"""Receive a list of NumPy arrays.
Parameters
----------
socket : :class:`zmq.Socket`
The socket to receive the arrays on.
Returns
-------
list
A list of :class:`numpy.ndarray` objects.
Raises
------
StopIteration
If the first JSON object received contains the key `stop`,
signifying that the server has finished a single epoch.
"""
headers = socket.recv_json()
if 'stop' in headers:
raise StopIteration
arrays = []
for header in headers:
data = socket.recv(copy=False)
buf = buffer_(data)
array = numpy.frombuffer(buf, dtype=numpy.dtype(header['descr']))
array.shape = header['shape']
if header['fortran_order']:
array.shape = header['shape'][::-1]
array = array.transpose()
arrays.append(array)
return arrays
|
python
|
def recv_arrays(socket):
"""Receive a list of NumPy arrays.
Parameters
----------
socket : :class:`zmq.Socket`
The socket to receive the arrays on.
Returns
-------
list
A list of :class:`numpy.ndarray` objects.
Raises
------
StopIteration
If the first JSON object received contains the key `stop`,
signifying that the server has finished a single epoch.
"""
headers = socket.recv_json()
if 'stop' in headers:
raise StopIteration
arrays = []
for header in headers:
data = socket.recv(copy=False)
buf = buffer_(data)
array = numpy.frombuffer(buf, dtype=numpy.dtype(header['descr']))
array.shape = header['shape']
if header['fortran_order']:
array.shape = header['shape'][::-1]
array = array.transpose()
arrays.append(array)
return arrays
|
[
"def",
"recv_arrays",
"(",
"socket",
")",
":",
"headers",
"=",
"socket",
".",
"recv_json",
"(",
")",
"if",
"'stop'",
"in",
"headers",
":",
"raise",
"StopIteration",
"arrays",
"=",
"[",
"]",
"for",
"header",
"in",
"headers",
":",
"data",
"=",
"socket",
".",
"recv",
"(",
"copy",
"=",
"False",
")",
"buf",
"=",
"buffer_",
"(",
"data",
")",
"array",
"=",
"numpy",
".",
"frombuffer",
"(",
"buf",
",",
"dtype",
"=",
"numpy",
".",
"dtype",
"(",
"header",
"[",
"'descr'",
"]",
")",
")",
"array",
".",
"shape",
"=",
"header",
"[",
"'shape'",
"]",
"if",
"header",
"[",
"'fortran_order'",
"]",
":",
"array",
".",
"shape",
"=",
"header",
"[",
"'shape'",
"]",
"[",
":",
":",
"-",
"1",
"]",
"array",
"=",
"array",
".",
"transpose",
"(",
")",
"arrays",
".",
"append",
"(",
"array",
")",
"return",
"arrays"
] |
Receive a list of NumPy arrays.
Parameters
----------
socket : :class:`zmq.Socket`
The socket to receive the arrays on.
Returns
-------
list
A list of :class:`numpy.ndarray` objects.
Raises
------
StopIteration
If the first JSON object received contains the key `stop`,
signifying that the server has finished a single epoch.
|
[
"Receive",
"a",
"list",
"of",
"NumPy",
"arrays",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/server.py#L48-L81
|
11,984
|
mila-iqia/fuel
|
fuel/server.py
|
start_server
|
def start_server(data_stream, port=5557, hwm=10):
"""Start a data processing server.
This command starts a server in the current process that performs the
actual data processing (by retrieving data from the given data stream).
It also starts a second process, the broker, which mediates between the
server and the client. The broker also keeps a buffer of batches in
memory.
Parameters
----------
data_stream : :class:`.DataStream`
The data stream to return examples from.
port : int, optional
The port the server and the client (training loop) will use to
communicate. Defaults to 5557.
hwm : int, optional
The `ZeroMQ high-water mark (HWM)
<http://zguide.zeromq.org/page:all#High-Water-Marks>`_ on the
sending socket. Increasing this increases the buffer, which can be
useful if your data preprocessing times are very random. However,
it will increase memory usage. There is no easy way to tell how
many batches will actually be queued with a particular HWM.
Defaults to 10. Be sure to set the corresponding HWM on the
receiving end as well.
"""
logging.basicConfig(level='INFO')
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.set_hwm(hwm)
socket.bind('tcp://*:{}'.format(port))
it = data_stream.get_epoch_iterator()
logger.info('server started')
while True:
try:
data = next(it)
stop = False
logger.debug("sending {} arrays".format(len(data)))
except StopIteration:
it = data_stream.get_epoch_iterator()
data = None
stop = True
logger.debug("sending StopIteration")
send_arrays(socket, data, stop=stop)
|
python
|
def start_server(data_stream, port=5557, hwm=10):
"""Start a data processing server.
This command starts a server in the current process that performs the
actual data processing (by retrieving data from the given data stream).
It also starts a second process, the broker, which mediates between the
server and the client. The broker also keeps a buffer of batches in
memory.
Parameters
----------
data_stream : :class:`.DataStream`
The data stream to return examples from.
port : int, optional
The port the server and the client (training loop) will use to
communicate. Defaults to 5557.
hwm : int, optional
The `ZeroMQ high-water mark (HWM)
<http://zguide.zeromq.org/page:all#High-Water-Marks>`_ on the
sending socket. Increasing this increases the buffer, which can be
useful if your data preprocessing times are very random. However,
it will increase memory usage. There is no easy way to tell how
many batches will actually be queued with a particular HWM.
Defaults to 10. Be sure to set the corresponding HWM on the
receiving end as well.
"""
logging.basicConfig(level='INFO')
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.set_hwm(hwm)
socket.bind('tcp://*:{}'.format(port))
it = data_stream.get_epoch_iterator()
logger.info('server started')
while True:
try:
data = next(it)
stop = False
logger.debug("sending {} arrays".format(len(data)))
except StopIteration:
it = data_stream.get_epoch_iterator()
data = None
stop = True
logger.debug("sending StopIteration")
send_arrays(socket, data, stop=stop)
|
[
"def",
"start_server",
"(",
"data_stream",
",",
"port",
"=",
"5557",
",",
"hwm",
"=",
"10",
")",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"'INFO'",
")",
"context",
"=",
"zmq",
".",
"Context",
"(",
")",
"socket",
"=",
"context",
".",
"socket",
"(",
"zmq",
".",
"PUSH",
")",
"socket",
".",
"set_hwm",
"(",
"hwm",
")",
"socket",
".",
"bind",
"(",
"'tcp://*:{}'",
".",
"format",
"(",
"port",
")",
")",
"it",
"=",
"data_stream",
".",
"get_epoch_iterator",
"(",
")",
"logger",
".",
"info",
"(",
"'server started'",
")",
"while",
"True",
":",
"try",
":",
"data",
"=",
"next",
"(",
"it",
")",
"stop",
"=",
"False",
"logger",
".",
"debug",
"(",
"\"sending {} arrays\"",
".",
"format",
"(",
"len",
"(",
"data",
")",
")",
")",
"except",
"StopIteration",
":",
"it",
"=",
"data_stream",
".",
"get_epoch_iterator",
"(",
")",
"data",
"=",
"None",
"stop",
"=",
"True",
"logger",
".",
"debug",
"(",
"\"sending StopIteration\"",
")",
"send_arrays",
"(",
"socket",
",",
"data",
",",
"stop",
"=",
"stop",
")"
] |
Start a data processing server.
This command starts a server in the current process that performs the
actual data processing (by retrieving data from the given data stream).
It also starts a second process, the broker, which mediates between the
server and the client. The broker also keeps a buffer of batches in
memory.
Parameters
----------
data_stream : :class:`.DataStream`
The data stream to return examples from.
port : int, optional
The port the server and the client (training loop) will use to
communicate. Defaults to 5557.
hwm : int, optional
The `ZeroMQ high-water mark (HWM)
<http://zguide.zeromq.org/page:all#High-Water-Marks>`_ on the
sending socket. Increasing this increases the buffer, which can be
useful if your data preprocessing times are very random. However,
it will increase memory usage. There is no easy way to tell how
many batches will actually be queued with a particular HWM.
Defaults to 10. Be sure to set the corresponding HWM on the
receiving end as well.
|
[
"Start",
"a",
"data",
"processing",
"server",
"."
] |
1d6292dc25e3a115544237e392e61bff6631d23c
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/server.py#L84-L131
|
11,985
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/HomusImageGenerator.py
|
HomusImageGenerator.create_images
|
def create_images(raw_data_directory: str,
destination_directory: str,
stroke_thicknesses: List[int],
canvas_width: int = None,
canvas_height: int = None,
staff_line_spacing: int = 14,
staff_line_vertical_offsets: List[int] = None,
random_position_on_canvas: bool = False) -> dict:
"""
Creates a visual representation of the Homus Dataset by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
Each symbol will be drawn in the center of a fixed canvas, specified by width and height.
:param raw_data_directory: The directory, that contains the text-files that contain the textual representation
of the music symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
:param canvas_width: The width of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here.
:param canvas_height: The height of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here
:param staff_line_spacing: Number of pixels spacing between each of the five staff-lines
:param staff_line_vertical_offsets: List of vertical offsets, where the staff-lines will be superimposed over
the drawn images. If None is provided, no staff-lines will be superimposed.
If multiple values are provided, multiple versions of each symbol will be
generated with the appropriate staff-lines, e.g. 1-5_3_offset_70.png and
1-5_3_offset_77.png for two versions of the symbol 1-5 with stroke thickness
3 and staff-line offsets 70 and 77 pixels from the top.
:param random_position_on_canvas: True, if the symbols should be randomly placed on the fixed canvas.
False, if the symbols should be centered in the fixed canvas.
Note that this flag only has an effect, if fixed canvas sizes are used.
:return: A dictionary that contains the file-names of all generated symbols and the respective bounding-boxes
of each symbol.
"""
all_symbol_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.txt'))]
staff_line_multiplier = 1
if staff_line_vertical_offsets is not None and staff_line_vertical_offsets:
staff_line_multiplier = len(staff_line_vertical_offsets)
total_number_of_symbols = len(all_symbol_files) * len(stroke_thicknesses) * staff_line_multiplier
output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})".format(
total_number_of_symbols, len(all_symbol_files), len(stroke_thicknesses), stroke_thicknesses)
if staff_line_vertical_offsets is not None:
output += " and with staff-lines with {0} different offsets from the top ({1})".format(
staff_line_multiplier, staff_line_vertical_offsets)
if canvas_width is not None and canvas_height is not None:
if random_position_on_canvas is False:
output += "\nRandomly drawn on a fixed canvas of size {0}x{1} (Width x Height)".format(canvas_width,
canvas_height)
else:
output += "\nCentrally drawn on a fixed canvas of size {0}x{1} (Width x Height)".format(canvas_width,
canvas_height)
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
bounding_boxes = dict()
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25)
for symbol_file in all_symbol_files:
with open(symbol_file) as file:
content = file.read()
symbol = HomusSymbol.initialize_from_string(content)
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = os.path.splitext(os.path.basename(symbol_file))[0]
for stroke_thickness in stroke_thicknesses:
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,
'png', stroke_thickness)
if canvas_width is None and canvas_height is None:
symbol.draw_into_bitmap(export_path, stroke_thickness, margin=2)
else:
symbol.draw_onto_canvas(export_path, stroke_thickness, 0, canvas_width,
canvas_height, staff_line_spacing, staff_line_vertical_offsets,
bounding_boxes, random_position_on_canvas)
progress_bar.update(1 * staff_line_multiplier)
progress_bar.close()
return bounding_boxes
|
python
|
def create_images(raw_data_directory: str,
destination_directory: str,
stroke_thicknesses: List[int],
canvas_width: int = None,
canvas_height: int = None,
staff_line_spacing: int = 14,
staff_line_vertical_offsets: List[int] = None,
random_position_on_canvas: bool = False) -> dict:
"""
Creates a visual representation of the Homus Dataset by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
Each symbol will be drawn in the center of a fixed canvas, specified by width and height.
:param raw_data_directory: The directory, that contains the text-files that contain the textual representation
of the music symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
:param canvas_width: The width of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here.
:param canvas_height: The height of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here
:param staff_line_spacing: Number of pixels spacing between each of the five staff-lines
:param staff_line_vertical_offsets: List of vertical offsets, where the staff-lines will be superimposed over
the drawn images. If None is provided, no staff-lines will be superimposed.
If multiple values are provided, multiple versions of each symbol will be
generated with the appropriate staff-lines, e.g. 1-5_3_offset_70.png and
1-5_3_offset_77.png for two versions of the symbol 1-5 with stroke thickness
3 and staff-line offsets 70 and 77 pixels from the top.
:param random_position_on_canvas: True, if the symbols should be randomly placed on the fixed canvas.
False, if the symbols should be centered in the fixed canvas.
Note that this flag only has an effect, if fixed canvas sizes are used.
:return: A dictionary that contains the file-names of all generated symbols and the respective bounding-boxes
of each symbol.
"""
all_symbol_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.txt'))]
staff_line_multiplier = 1
if staff_line_vertical_offsets is not None and staff_line_vertical_offsets:
staff_line_multiplier = len(staff_line_vertical_offsets)
total_number_of_symbols = len(all_symbol_files) * len(stroke_thicknesses) * staff_line_multiplier
output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})".format(
total_number_of_symbols, len(all_symbol_files), len(stroke_thicknesses), stroke_thicknesses)
if staff_line_vertical_offsets is not None:
output += " and with staff-lines with {0} different offsets from the top ({1})".format(
staff_line_multiplier, staff_line_vertical_offsets)
if canvas_width is not None and canvas_height is not None:
if random_position_on_canvas is False:
output += "\nRandomly drawn on a fixed canvas of size {0}x{1} (Width x Height)".format(canvas_width,
canvas_height)
else:
output += "\nCentrally drawn on a fixed canvas of size {0}x{1} (Width x Height)".format(canvas_width,
canvas_height)
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
bounding_boxes = dict()
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25)
for symbol_file in all_symbol_files:
with open(symbol_file) as file:
content = file.read()
symbol = HomusSymbol.initialize_from_string(content)
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = os.path.splitext(os.path.basename(symbol_file))[0]
for stroke_thickness in stroke_thicknesses:
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,
'png', stroke_thickness)
if canvas_width is None and canvas_height is None:
symbol.draw_into_bitmap(export_path, stroke_thickness, margin=2)
else:
symbol.draw_onto_canvas(export_path, stroke_thickness, 0, canvas_width,
canvas_height, staff_line_spacing, staff_line_vertical_offsets,
bounding_boxes, random_position_on_canvas)
progress_bar.update(1 * staff_line_multiplier)
progress_bar.close()
return bounding_boxes
|
[
"def",
"create_images",
"(",
"raw_data_directory",
":",
"str",
",",
"destination_directory",
":",
"str",
",",
"stroke_thicknesses",
":",
"List",
"[",
"int",
"]",
",",
"canvas_width",
":",
"int",
"=",
"None",
",",
"canvas_height",
":",
"int",
"=",
"None",
",",
"staff_line_spacing",
":",
"int",
"=",
"14",
",",
"staff_line_vertical_offsets",
":",
"List",
"[",
"int",
"]",
"=",
"None",
",",
"random_position_on_canvas",
":",
"bool",
"=",
"False",
")",
"->",
"dict",
":",
"all_symbol_files",
"=",
"[",
"y",
"for",
"x",
"in",
"os",
".",
"walk",
"(",
"raw_data_directory",
")",
"for",
"y",
"in",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"x",
"[",
"0",
"]",
",",
"'*.txt'",
")",
")",
"]",
"staff_line_multiplier",
"=",
"1",
"if",
"staff_line_vertical_offsets",
"is",
"not",
"None",
"and",
"staff_line_vertical_offsets",
":",
"staff_line_multiplier",
"=",
"len",
"(",
"staff_line_vertical_offsets",
")",
"total_number_of_symbols",
"=",
"len",
"(",
"all_symbol_files",
")",
"*",
"len",
"(",
"stroke_thicknesses",
")",
"*",
"staff_line_multiplier",
"output",
"=",
"\"Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})\"",
".",
"format",
"(",
"total_number_of_symbols",
",",
"len",
"(",
"all_symbol_files",
")",
",",
"len",
"(",
"stroke_thicknesses",
")",
",",
"stroke_thicknesses",
")",
"if",
"staff_line_vertical_offsets",
"is",
"not",
"None",
":",
"output",
"+=",
"\" and with staff-lines with {0} different offsets from the top ({1})\"",
".",
"format",
"(",
"staff_line_multiplier",
",",
"staff_line_vertical_offsets",
")",
"if",
"canvas_width",
"is",
"not",
"None",
"and",
"canvas_height",
"is",
"not",
"None",
":",
"if",
"random_position_on_canvas",
"is",
"False",
":",
"output",
"+=",
"\"\\nRandomly drawn on a fixed canvas of size {0}x{1} (Width x Height)\"",
".",
"format",
"(",
"canvas_width",
",",
"canvas_height",
")",
"else",
":",
"output",
"+=",
"\"\\nCentrally drawn on a fixed canvas of size {0}x{1} (Width x Height)\"",
".",
"format",
"(",
"canvas_width",
",",
"canvas_height",
")",
"print",
"(",
"output",
")",
"print",
"(",
"\"In directory {0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"destination_directory",
")",
")",
",",
"flush",
"=",
"True",
")",
"bounding_boxes",
"=",
"dict",
"(",
")",
"progress_bar",
"=",
"tqdm",
"(",
"total",
"=",
"total_number_of_symbols",
",",
"mininterval",
"=",
"0.25",
")",
"for",
"symbol_file",
"in",
"all_symbol_files",
":",
"with",
"open",
"(",
"symbol_file",
")",
"as",
"file",
":",
"content",
"=",
"file",
".",
"read",
"(",
")",
"symbol",
"=",
"HomusSymbol",
".",
"initialize_from_string",
"(",
"content",
")",
"target_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destination_directory",
",",
"symbol",
".",
"symbol_class",
")",
"os",
".",
"makedirs",
"(",
"target_directory",
",",
"exist_ok",
"=",
"True",
")",
"raw_file_name_without_extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"symbol_file",
")",
")",
"[",
"0",
"]",
"for",
"stroke_thickness",
"in",
"stroke_thicknesses",
":",
"export_path",
"=",
"ExportPath",
"(",
"destination_directory",
",",
"symbol",
".",
"symbol_class",
",",
"raw_file_name_without_extension",
",",
"'png'",
",",
"stroke_thickness",
")",
"if",
"canvas_width",
"is",
"None",
"and",
"canvas_height",
"is",
"None",
":",
"symbol",
".",
"draw_into_bitmap",
"(",
"export_path",
",",
"stroke_thickness",
",",
"margin",
"=",
"2",
")",
"else",
":",
"symbol",
".",
"draw_onto_canvas",
"(",
"export_path",
",",
"stroke_thickness",
",",
"0",
",",
"canvas_width",
",",
"canvas_height",
",",
"staff_line_spacing",
",",
"staff_line_vertical_offsets",
",",
"bounding_boxes",
",",
"random_position_on_canvas",
")",
"progress_bar",
".",
"update",
"(",
"1",
"*",
"staff_line_multiplier",
")",
"progress_bar",
".",
"close",
"(",
")",
"return",
"bounding_boxes"
] |
Creates a visual representation of the Homus Dataset by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
Each symbol will be drawn in the center of a fixed canvas, specified by width and height.
:param raw_data_directory: The directory, that contains the text-files that contain the textual representation
of the music symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
:param canvas_width: The width of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here.
:param canvas_height: The height of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here
:param staff_line_spacing: Number of pixels spacing between each of the five staff-lines
:param staff_line_vertical_offsets: List of vertical offsets, where the staff-lines will be superimposed over
the drawn images. If None is provided, no staff-lines will be superimposed.
If multiple values are provided, multiple versions of each symbol will be
generated with the appropriate staff-lines, e.g. 1-5_3_offset_70.png and
1-5_3_offset_77.png for two versions of the symbol 1-5 with stroke thickness
3 and staff-line offsets 70 and 77 pixels from the top.
:param random_position_on_canvas: True, if the symbols should be randomly placed on the fixed canvas.
False, if the symbols should be centered in the fixed canvas.
Note that this flag only has an effect, if fixed canvas sizes are used.
:return: A dictionary that contains the file-names of all generated symbols and the respective bounding-boxes
of each symbol.
|
[
"Creates",
"a",
"visual",
"representation",
"of",
"the",
"Homus",
"Dataset",
"by",
"parsing",
"all",
"text",
"-",
"files",
"and",
"the",
"symbols",
"as",
"specified",
"by",
"the",
"parameters",
"by",
"drawing",
"lines",
"that",
"connect",
"the",
"points",
"from",
"each",
"stroke",
"of",
"each",
"symbol",
"."
] |
d0a22a03ae35caeef211729efa340e1ec0e01ea5
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/HomusImageGenerator.py#L13-L105
|
11,986
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/MuscimaPlusPlusImageGenerator.py
|
MuscimaPlusPlusImageGenerator.extract_and_render_all_symbol_masks
|
def extract_and_render_all_symbol_masks(self, raw_data_directory: str, destination_directory: str):
"""
Extracts all symbols from the raw XML documents and generates individual symbols from the masks
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
"""
print("Extracting Symbols from Muscima++ Dataset...")
xml_files = self.get_all_xml_file_paths(raw_data_directory)
crop_objects = self.load_crop_objects_from_xml_files(xml_files)
self.render_masks_of_crop_objects_into_image(crop_objects, destination_directory)
|
python
|
def extract_and_render_all_symbol_masks(self, raw_data_directory: str, destination_directory: str):
"""
Extracts all symbols from the raw XML documents and generates individual symbols from the masks
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
"""
print("Extracting Symbols from Muscima++ Dataset...")
xml_files = self.get_all_xml_file_paths(raw_data_directory)
crop_objects = self.load_crop_objects_from_xml_files(xml_files)
self.render_masks_of_crop_objects_into_image(crop_objects, destination_directory)
|
[
"def",
"extract_and_render_all_symbol_masks",
"(",
"self",
",",
"raw_data_directory",
":",
"str",
",",
"destination_directory",
":",
"str",
")",
":",
"print",
"(",
"\"Extracting Symbols from Muscima++ Dataset...\"",
")",
"xml_files",
"=",
"self",
".",
"get_all_xml_file_paths",
"(",
"raw_data_directory",
")",
"crop_objects",
"=",
"self",
".",
"load_crop_objects_from_xml_files",
"(",
"xml_files",
")",
"self",
".",
"render_masks_of_crop_objects_into_image",
"(",
"crop_objects",
",",
"destination_directory",
")"
] |
Extracts all symbols from the raw XML documents and generates individual symbols from the masks
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
|
[
"Extracts",
"all",
"symbols",
"from",
"the",
"raw",
"XML",
"documents",
"and",
"generates",
"individual",
"symbols",
"from",
"the",
"masks"
] |
d0a22a03ae35caeef211729efa340e1ec0e01ea5
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/MuscimaPlusPlusImageGenerator.py#L23-L35
|
11,987
|
apacha/OMR-Datasets
|
omrdatasettools/converters/ImageColorInverter.py
|
ImageColorInverter.invert_images
|
def invert_images(self, image_directory: str, image_file_ending: str = "*.bmp"):
"""
In-situ converts the white on black images of a directory to black on white images
:param image_directory: The directory, that contains the images
:param image_file_ending: The pattern for finding files in the image_directory
"""
image_paths = [y for x in os.walk(image_directory) for y in glob(os.path.join(x[0], image_file_ending))]
for image_path in tqdm(image_paths, desc="Inverting all images in directory {0}".format(image_directory)):
white_on_black_image = Image.open(image_path).convert("L")
black_on_white_image = ImageOps.invert(white_on_black_image)
black_on_white_image.save(os.path.splitext(image_path)[0] + ".png")
|
python
|
def invert_images(self, image_directory: str, image_file_ending: str = "*.bmp"):
"""
In-situ converts the white on black images of a directory to black on white images
:param image_directory: The directory, that contains the images
:param image_file_ending: The pattern for finding files in the image_directory
"""
image_paths = [y for x in os.walk(image_directory) for y in glob(os.path.join(x[0], image_file_ending))]
for image_path in tqdm(image_paths, desc="Inverting all images in directory {0}".format(image_directory)):
white_on_black_image = Image.open(image_path).convert("L")
black_on_white_image = ImageOps.invert(white_on_black_image)
black_on_white_image.save(os.path.splitext(image_path)[0] + ".png")
|
[
"def",
"invert_images",
"(",
"self",
",",
"image_directory",
":",
"str",
",",
"image_file_ending",
":",
"str",
"=",
"\"*.bmp\"",
")",
":",
"image_paths",
"=",
"[",
"y",
"for",
"x",
"in",
"os",
".",
"walk",
"(",
"image_directory",
")",
"for",
"y",
"in",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"x",
"[",
"0",
"]",
",",
"image_file_ending",
")",
")",
"]",
"for",
"image_path",
"in",
"tqdm",
"(",
"image_paths",
",",
"desc",
"=",
"\"Inverting all images in directory {0}\"",
".",
"format",
"(",
"image_directory",
")",
")",
":",
"white_on_black_image",
"=",
"Image",
".",
"open",
"(",
"image_path",
")",
".",
"convert",
"(",
"\"L\"",
")",
"black_on_white_image",
"=",
"ImageOps",
".",
"invert",
"(",
"white_on_black_image",
")",
"black_on_white_image",
".",
"save",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"image_path",
")",
"[",
"0",
"]",
"+",
"\".png\"",
")"
] |
In-situ converts the white on black images of a directory to black on white images
:param image_directory: The directory, that contains the images
:param image_file_ending: The pattern for finding files in the image_directory
|
[
"In",
"-",
"situ",
"converts",
"the",
"white",
"on",
"black",
"images",
"of",
"a",
"directory",
"to",
"black",
"on",
"white",
"images"
] |
d0a22a03ae35caeef211729efa340e1ec0e01ea5
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/converters/ImageColorInverter.py#L15-L26
|
11,988
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/CapitanImageGenerator.py
|
CapitanImageGenerator.create_capitan_images
|
def create_capitan_images(self, raw_data_directory: str,
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
"""
Creates a visual representation of the Capitan strokes by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
:param raw_data_directory: The directory, that contains the raw capitan dataset
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
symbols = self.load_capitan_symbols(raw_data_directory)
self.draw_capitan_stroke_images(symbols, destination_directory, stroke_thicknesses)
self.draw_capitan_score_images(symbols, destination_directory)
|
python
|
def create_capitan_images(self, raw_data_directory: str,
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
"""
Creates a visual representation of the Capitan strokes by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
:param raw_data_directory: The directory, that contains the raw capitan dataset
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
symbols = self.load_capitan_symbols(raw_data_directory)
self.draw_capitan_stroke_images(symbols, destination_directory, stroke_thicknesses)
self.draw_capitan_score_images(symbols, destination_directory)
|
[
"def",
"create_capitan_images",
"(",
"self",
",",
"raw_data_directory",
":",
"str",
",",
"destination_directory",
":",
"str",
",",
"stroke_thicknesses",
":",
"List",
"[",
"int",
"]",
")",
"->",
"None",
":",
"symbols",
"=",
"self",
".",
"load_capitan_symbols",
"(",
"raw_data_directory",
")",
"self",
".",
"draw_capitan_stroke_images",
"(",
"symbols",
",",
"destination_directory",
",",
"stroke_thicknesses",
")",
"self",
".",
"draw_capitan_score_images",
"(",
"symbols",
",",
"destination_directory",
")"
] |
Creates a visual representation of the Capitan strokes by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
:param raw_data_directory: The directory, that contains the raw capitan dataset
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
|
[
"Creates",
"a",
"visual",
"representation",
"of",
"the",
"Capitan",
"strokes",
"by",
"parsing",
"all",
"text",
"-",
"files",
"and",
"the",
"symbols",
"as",
"specified",
"by",
"the",
"parameters",
"by",
"drawing",
"lines",
"that",
"connect",
"the",
"points",
"from",
"each",
"stroke",
"of",
"each",
"symbol",
"."
] |
d0a22a03ae35caeef211729efa340e1ec0e01ea5
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/CapitanImageGenerator.py#L13-L29
|
11,989
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/CapitanImageGenerator.py
|
CapitanImageGenerator.draw_capitan_stroke_images
|
def draw_capitan_stroke_images(self, symbols: List[CapitanSymbol],
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
"""
Creates a visual representation of the Capitan strokes by drawing lines that connect the points
from each stroke of each symbol.
:param symbols: The list of parsed Capitan-symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
total_number_of_symbols = len(symbols) * len(stroke_thicknesses)
output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})".format(
total_number_of_symbols, len(symbols), len(stroke_thicknesses), stroke_thicknesses)
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25, desc="Rendering strokes")
capitan_file_name_counter = 0
for symbol in symbols:
capitan_file_name_counter += 1
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = "capitan-{0}-{1}-stroke".format(symbol.symbol_class,
capitan_file_name_counter)
for stroke_thickness in stroke_thicknesses:
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,
'png', stroke_thickness)
symbol.draw_capitan_stroke_onto_canvas(export_path, stroke_thickness, 0)
progress_bar.update(1)
progress_bar.close()
|
python
|
def draw_capitan_stroke_images(self, symbols: List[CapitanSymbol],
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
"""
Creates a visual representation of the Capitan strokes by drawing lines that connect the points
from each stroke of each symbol.
:param symbols: The list of parsed Capitan-symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
total_number_of_symbols = len(symbols) * len(stroke_thicknesses)
output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})".format(
total_number_of_symbols, len(symbols), len(stroke_thicknesses), stroke_thicknesses)
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25, desc="Rendering strokes")
capitan_file_name_counter = 0
for symbol in symbols:
capitan_file_name_counter += 1
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = "capitan-{0}-{1}-stroke".format(symbol.symbol_class,
capitan_file_name_counter)
for stroke_thickness in stroke_thicknesses:
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,
'png', stroke_thickness)
symbol.draw_capitan_stroke_onto_canvas(export_path, stroke_thickness, 0)
progress_bar.update(1)
progress_bar.close()
|
[
"def",
"draw_capitan_stroke_images",
"(",
"self",
",",
"symbols",
":",
"List",
"[",
"CapitanSymbol",
"]",
",",
"destination_directory",
":",
"str",
",",
"stroke_thicknesses",
":",
"List",
"[",
"int",
"]",
")",
"->",
"None",
":",
"total_number_of_symbols",
"=",
"len",
"(",
"symbols",
")",
"*",
"len",
"(",
"stroke_thicknesses",
")",
"output",
"=",
"\"Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})\"",
".",
"format",
"(",
"total_number_of_symbols",
",",
"len",
"(",
"symbols",
")",
",",
"len",
"(",
"stroke_thicknesses",
")",
",",
"stroke_thicknesses",
")",
"print",
"(",
"output",
")",
"print",
"(",
"\"In directory {0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"destination_directory",
")",
")",
",",
"flush",
"=",
"True",
")",
"progress_bar",
"=",
"tqdm",
"(",
"total",
"=",
"total_number_of_symbols",
",",
"mininterval",
"=",
"0.25",
",",
"desc",
"=",
"\"Rendering strokes\"",
")",
"capitan_file_name_counter",
"=",
"0",
"for",
"symbol",
"in",
"symbols",
":",
"capitan_file_name_counter",
"+=",
"1",
"target_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destination_directory",
",",
"symbol",
".",
"symbol_class",
")",
"os",
".",
"makedirs",
"(",
"target_directory",
",",
"exist_ok",
"=",
"True",
")",
"raw_file_name_without_extension",
"=",
"\"capitan-{0}-{1}-stroke\"",
".",
"format",
"(",
"symbol",
".",
"symbol_class",
",",
"capitan_file_name_counter",
")",
"for",
"stroke_thickness",
"in",
"stroke_thicknesses",
":",
"export_path",
"=",
"ExportPath",
"(",
"destination_directory",
",",
"symbol",
".",
"symbol_class",
",",
"raw_file_name_without_extension",
",",
"'png'",
",",
"stroke_thickness",
")",
"symbol",
".",
"draw_capitan_stroke_onto_canvas",
"(",
"export_path",
",",
"stroke_thickness",
",",
"0",
")",
"progress_bar",
".",
"update",
"(",
"1",
")",
"progress_bar",
".",
"close",
"(",
")"
] |
Creates a visual representation of the Capitan strokes by drawing lines that connect the points
from each stroke of each symbol.
:param symbols: The list of parsed Capitan-symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
|
[
"Creates",
"a",
"visual",
"representation",
"of",
"the",
"Capitan",
"strokes",
"by",
"drawing",
"lines",
"that",
"connect",
"the",
"points",
"from",
"each",
"stroke",
"of",
"each",
"symbol",
"."
] |
d0a22a03ae35caeef211729efa340e1ec0e01ea5
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/CapitanImageGenerator.py#L44-L82
|
11,990
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/Rectangle.py
|
Rectangle.overlap
|
def overlap(r1: 'Rectangle', r2: 'Rectangle'):
"""
Overlapping rectangles overlap both horizontally & vertically
"""
h_overlaps = (r1.left <= r2.right) and (r1.right >= r2.left)
v_overlaps = (r1.bottom >= r2.top) and (r1.top <= r2.bottom)
return h_overlaps and v_overlaps
|
python
|
def overlap(r1: 'Rectangle', r2: 'Rectangle'):
"""
Overlapping rectangles overlap both horizontally & vertically
"""
h_overlaps = (r1.left <= r2.right) and (r1.right >= r2.left)
v_overlaps = (r1.bottom >= r2.top) and (r1.top <= r2.bottom)
return h_overlaps and v_overlaps
|
[
"def",
"overlap",
"(",
"r1",
":",
"'Rectangle'",
",",
"r2",
":",
"'Rectangle'",
")",
":",
"h_overlaps",
"=",
"(",
"r1",
".",
"left",
"<=",
"r2",
".",
"right",
")",
"and",
"(",
"r1",
".",
"right",
">=",
"r2",
".",
"left",
")",
"v_overlaps",
"=",
"(",
"r1",
".",
"bottom",
">=",
"r2",
".",
"top",
")",
"and",
"(",
"r1",
".",
"top",
"<=",
"r2",
".",
"bottom",
")",
"return",
"h_overlaps",
"and",
"v_overlaps"
] |
Overlapping rectangles overlap both horizontally & vertically
|
[
"Overlapping",
"rectangles",
"overlap",
"both",
"horizontally",
"&",
"vertically"
] |
d0a22a03ae35caeef211729efa340e1ec0e01ea5
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/Rectangle.py#L18-L24
|
11,991
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/AudiverisOmrImageGenerator.py
|
AudiverisOmrImageGenerator.extract_symbols
|
def extract_symbols(self, raw_data_directory: str, destination_directory: str):
"""
Extracts the symbols from the raw XML documents and matching images of the Audiveris OMR dataset into
individual symbols
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
"""
print("Extracting Symbols from Audiveris OMR Dataset...")
all_xml_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.xml'))]
all_image_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.png'))]
data_pairs = []
for i in range(len(all_xml_files)):
data_pairs.append((all_xml_files[i], all_image_files[i]))
for data_pair in data_pairs:
self.__extract_symbols(data_pair[0], data_pair[1], destination_directory)
|
python
|
def extract_symbols(self, raw_data_directory: str, destination_directory: str):
"""
Extracts the symbols from the raw XML documents and matching images of the Audiveris OMR dataset into
individual symbols
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
"""
print("Extracting Symbols from Audiveris OMR Dataset...")
all_xml_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.xml'))]
all_image_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.png'))]
data_pairs = []
for i in range(len(all_xml_files)):
data_pairs.append((all_xml_files[i], all_image_files[i]))
for data_pair in data_pairs:
self.__extract_symbols(data_pair[0], data_pair[1], destination_directory)
|
[
"def",
"extract_symbols",
"(",
"self",
",",
"raw_data_directory",
":",
"str",
",",
"destination_directory",
":",
"str",
")",
":",
"print",
"(",
"\"Extracting Symbols from Audiveris OMR Dataset...\"",
")",
"all_xml_files",
"=",
"[",
"y",
"for",
"x",
"in",
"os",
".",
"walk",
"(",
"raw_data_directory",
")",
"for",
"y",
"in",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"x",
"[",
"0",
"]",
",",
"'*.xml'",
")",
")",
"]",
"all_image_files",
"=",
"[",
"y",
"for",
"x",
"in",
"os",
".",
"walk",
"(",
"raw_data_directory",
")",
"for",
"y",
"in",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"x",
"[",
"0",
"]",
",",
"'*.png'",
")",
")",
"]",
"data_pairs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"all_xml_files",
")",
")",
":",
"data_pairs",
".",
"append",
"(",
"(",
"all_xml_files",
"[",
"i",
"]",
",",
"all_image_files",
"[",
"i",
"]",
")",
")",
"for",
"data_pair",
"in",
"data_pairs",
":",
"self",
".",
"__extract_symbols",
"(",
"data_pair",
"[",
"0",
"]",
",",
"data_pair",
"[",
"1",
"]",
",",
"destination_directory",
")"
] |
Extracts the symbols from the raw XML documents and matching images of the Audiveris OMR dataset into
individual symbols
:param raw_data_directory: The directory, that contains the xml-files and matching images
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
|
[
"Extracts",
"the",
"symbols",
"from",
"the",
"raw",
"XML",
"documents",
"and",
"matching",
"images",
"of",
"the",
"Audiveris",
"OMR",
"dataset",
"into",
"individual",
"symbols"
] |
d0a22a03ae35caeef211729efa340e1ec0e01ea5
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/AudiverisOmrImageGenerator.py#L16-L35
|
11,992
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/HomusSymbol.py
|
HomusSymbol.initialize_from_string
|
def initialize_from_string(content: str) -> 'HomusSymbol':
"""
Create and initializes a new symbol from a string
:param content: The content of a symbol as read from the text-file
:return: The initialized symbol
:rtype: HomusSymbol
"""
if content is None or content is "":
return None
lines = content.splitlines()
min_x = sys.maxsize
max_x = 0
min_y = sys.maxsize
max_y = 0
symbol_name = lines[0]
strokes = []
for stroke_string in lines[1:]:
stroke = []
for point_string in stroke_string.split(";"):
if point_string is "":
continue # Skip the last element, that is due to a trailing ; in each line
point_x, point_y = point_string.split(",")
x = int(point_x)
y = int(point_y)
stroke.append(Point2D(x, y))
max_x = max(max_x, x)
min_x = min(min_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
strokes.append(stroke)
dimensions = Rectangle(Point2D(min_x, min_y), max_x - min_x + 1, max_y - min_y + 1)
return HomusSymbol(content, strokes, symbol_name, dimensions)
|
python
|
def initialize_from_string(content: str) -> 'HomusSymbol':
"""
Create and initializes a new symbol from a string
:param content: The content of a symbol as read from the text-file
:return: The initialized symbol
:rtype: HomusSymbol
"""
if content is None or content is "":
return None
lines = content.splitlines()
min_x = sys.maxsize
max_x = 0
min_y = sys.maxsize
max_y = 0
symbol_name = lines[0]
strokes = []
for stroke_string in lines[1:]:
stroke = []
for point_string in stroke_string.split(";"):
if point_string is "":
continue # Skip the last element, that is due to a trailing ; in each line
point_x, point_y = point_string.split(",")
x = int(point_x)
y = int(point_y)
stroke.append(Point2D(x, y))
max_x = max(max_x, x)
min_x = min(min_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
strokes.append(stroke)
dimensions = Rectangle(Point2D(min_x, min_y), max_x - min_x + 1, max_y - min_y + 1)
return HomusSymbol(content, strokes, symbol_name, dimensions)
|
[
"def",
"initialize_from_string",
"(",
"content",
":",
"str",
")",
"->",
"'HomusSymbol'",
":",
"if",
"content",
"is",
"None",
"or",
"content",
"is",
"\"\"",
":",
"return",
"None",
"lines",
"=",
"content",
".",
"splitlines",
"(",
")",
"min_x",
"=",
"sys",
".",
"maxsize",
"max_x",
"=",
"0",
"min_y",
"=",
"sys",
".",
"maxsize",
"max_y",
"=",
"0",
"symbol_name",
"=",
"lines",
"[",
"0",
"]",
"strokes",
"=",
"[",
"]",
"for",
"stroke_string",
"in",
"lines",
"[",
"1",
":",
"]",
":",
"stroke",
"=",
"[",
"]",
"for",
"point_string",
"in",
"stroke_string",
".",
"split",
"(",
"\";\"",
")",
":",
"if",
"point_string",
"is",
"\"\"",
":",
"continue",
"# Skip the last element, that is due to a trailing ; in each line",
"point_x",
",",
"point_y",
"=",
"point_string",
".",
"split",
"(",
"\",\"",
")",
"x",
"=",
"int",
"(",
"point_x",
")",
"y",
"=",
"int",
"(",
"point_y",
")",
"stroke",
".",
"append",
"(",
"Point2D",
"(",
"x",
",",
"y",
")",
")",
"max_x",
"=",
"max",
"(",
"max_x",
",",
"x",
")",
"min_x",
"=",
"min",
"(",
"min_x",
",",
"x",
")",
"max_y",
"=",
"max",
"(",
"max_y",
",",
"y",
")",
"min_y",
"=",
"min",
"(",
"min_y",
",",
"y",
")",
"strokes",
".",
"append",
"(",
"stroke",
")",
"dimensions",
"=",
"Rectangle",
"(",
"Point2D",
"(",
"min_x",
",",
"min_y",
")",
",",
"max_x",
"-",
"min_x",
"+",
"1",
",",
"max_y",
"-",
"min_y",
"+",
"1",
")",
"return",
"HomusSymbol",
"(",
"content",
",",
"strokes",
",",
"symbol_name",
",",
"dimensions",
")"
] |
Create and initializes a new symbol from a string
:param content: The content of a symbol as read from the text-file
:return: The initialized symbol
:rtype: HomusSymbol
|
[
"Create",
"and",
"initializes",
"a",
"new",
"symbol",
"from",
"a",
"string"
] |
d0a22a03ae35caeef211729efa340e1ec0e01ea5
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/HomusSymbol.py#L21-L62
|
11,993
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/HomusSymbol.py
|
HomusSymbol.draw_into_bitmap
|
def draw_into_bitmap(self, export_path: ExportPath, stroke_thickness: int, margin: int = 0) -> None:
"""
Draws the symbol in the original size that it has plus an optional margin
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness: Pen-thickness for drawing the symbol in pixels
:param margin: An optional margin for each symbol
"""
self.draw_onto_canvas(export_path,
stroke_thickness,
margin,
self.dimensions.width + 2 * margin,
self.dimensions.height + 2 * margin)
|
python
|
def draw_into_bitmap(self, export_path: ExportPath, stroke_thickness: int, margin: int = 0) -> None:
"""
Draws the symbol in the original size that it has plus an optional margin
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness: Pen-thickness for drawing the symbol in pixels
:param margin: An optional margin for each symbol
"""
self.draw_onto_canvas(export_path,
stroke_thickness,
margin,
self.dimensions.width + 2 * margin,
self.dimensions.height + 2 * margin)
|
[
"def",
"draw_into_bitmap",
"(",
"self",
",",
"export_path",
":",
"ExportPath",
",",
"stroke_thickness",
":",
"int",
",",
"margin",
":",
"int",
"=",
"0",
")",
"->",
"None",
":",
"self",
".",
"draw_onto_canvas",
"(",
"export_path",
",",
"stroke_thickness",
",",
"margin",
",",
"self",
".",
"dimensions",
".",
"width",
"+",
"2",
"*",
"margin",
",",
"self",
".",
"dimensions",
".",
"height",
"+",
"2",
"*",
"margin",
")"
] |
Draws the symbol in the original size that it has plus an optional margin
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness: Pen-thickness for drawing the symbol in pixels
:param margin: An optional margin for each symbol
|
[
"Draws",
"the",
"symbol",
"in",
"the",
"original",
"size",
"that",
"it",
"has",
"plus",
"an",
"optional",
"margin"
] |
d0a22a03ae35caeef211729efa340e1ec0e01ea5
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/HomusSymbol.py#L64-L76
|
11,994
|
apacha/OMR-Datasets
|
omrdatasettools/image_generators/HomusSymbol.py
|
HomusSymbol.draw_onto_canvas
|
def draw_onto_canvas(self, export_path: ExportPath, stroke_thickness: int, margin: int, destination_width: int,
destination_height: int, staff_line_spacing: int = 14,
staff_line_vertical_offsets: List[int] = None,
bounding_boxes: dict = None, random_position_on_canvas: bool = False) -> None:
"""
Draws the symbol onto a canvas with a fixed size
:param bounding_boxes: The dictionary into which the bounding-boxes will be added of each generated image
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness:
:param margin:
:param destination_width:
:param destination_height:
:param staff_line_spacing:
:param staff_line_vertical_offsets: Offsets used for drawing staff-lines. If None provided, no staff-lines will be drawn if multiple integers are provided, multiple images will be generated
"""
width = self.dimensions.width + 2 * margin
height = self.dimensions.height + 2 * margin
if random_position_on_canvas:
# max is required for elements that are larger than the canvas,
# where the possible range for the random value would be negative
random_horizontal_offset = random.randint(0, max(0, destination_width - width))
random_vertical_offset = random.randint(0, max(0, destination_height - height))
offset = Point2D(self.dimensions.origin.x - margin - random_horizontal_offset,
self.dimensions.origin.y - margin - random_vertical_offset)
else:
width_offset_for_centering = (destination_width - width) / 2
height_offset_for_centering = (destination_height - height) / 2
offset = Point2D(self.dimensions.origin.x - margin - width_offset_for_centering,
self.dimensions.origin.y - margin - height_offset_for_centering)
image_without_staff_lines = Image.new('RGB', (destination_width, destination_height),
"white") # create a new white image
draw = ImageDraw.Draw(image_without_staff_lines)
black = (0, 0, 0)
for stroke in self.strokes:
for i in range(0, len(stroke) - 1):
start_point = self.__subtract_offset(stroke[i], offset)
end_point = self.__subtract_offset(stroke[i + 1], offset)
draw.line((start_point.x, start_point.y, end_point.x, end_point.y), black, stroke_thickness)
location = self.__subtract_offset(self.dimensions.origin, offset)
bounding_box_in_image = Rectangle(location, self.dimensions.width, self.dimensions.height)
# self.draw_bounding_box(draw, location)
del draw
if staff_line_vertical_offsets is not None and staff_line_vertical_offsets:
for staff_line_vertical_offset in staff_line_vertical_offsets:
image_with_staff_lines = image_without_staff_lines.copy()
self.__draw_staff_lines_into_image(image_with_staff_lines, stroke_thickness,
staff_line_spacing, staff_line_vertical_offset)
file_name_with_offset = export_path.get_full_path(staff_line_vertical_offset)
image_with_staff_lines.save(file_name_with_offset)
image_with_staff_lines.close()
if bounding_boxes is not None:
# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and
# the file_name, e.g. '3-4-Time\\1-13_3_offset_74.png', so we store only that part in the dictionary
class_and_file_name = export_path.get_class_name_and_file_path(staff_line_vertical_offset)
bounding_boxes[class_and_file_name] = bounding_box_in_image
else:
image_without_staff_lines.save(export_path.get_full_path())
if bounding_boxes is not None:
# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and
# the file_name, e.g. '3-4-Time\\1-13_3_offset_74.png', so we store only that part in the dictionary
class_and_file_name = export_path.get_class_name_and_file_path()
bounding_boxes[class_and_file_name] = bounding_box_in_image
image_without_staff_lines.close()
|
python
|
def draw_onto_canvas(self, export_path: ExportPath, stroke_thickness: int, margin: int, destination_width: int,
destination_height: int, staff_line_spacing: int = 14,
staff_line_vertical_offsets: List[int] = None,
bounding_boxes: dict = None, random_position_on_canvas: bool = False) -> None:
"""
Draws the symbol onto a canvas with a fixed size
:param bounding_boxes: The dictionary into which the bounding-boxes will be added of each generated image
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness:
:param margin:
:param destination_width:
:param destination_height:
:param staff_line_spacing:
:param staff_line_vertical_offsets: Offsets used for drawing staff-lines. If None provided, no staff-lines will be drawn if multiple integers are provided, multiple images will be generated
"""
width = self.dimensions.width + 2 * margin
height = self.dimensions.height + 2 * margin
if random_position_on_canvas:
# max is required for elements that are larger than the canvas,
# where the possible range for the random value would be negative
random_horizontal_offset = random.randint(0, max(0, destination_width - width))
random_vertical_offset = random.randint(0, max(0, destination_height - height))
offset = Point2D(self.dimensions.origin.x - margin - random_horizontal_offset,
self.dimensions.origin.y - margin - random_vertical_offset)
else:
width_offset_for_centering = (destination_width - width) / 2
height_offset_for_centering = (destination_height - height) / 2
offset = Point2D(self.dimensions.origin.x - margin - width_offset_for_centering,
self.dimensions.origin.y - margin - height_offset_for_centering)
image_without_staff_lines = Image.new('RGB', (destination_width, destination_height),
"white") # create a new white image
draw = ImageDraw.Draw(image_without_staff_lines)
black = (0, 0, 0)
for stroke in self.strokes:
for i in range(0, len(stroke) - 1):
start_point = self.__subtract_offset(stroke[i], offset)
end_point = self.__subtract_offset(stroke[i + 1], offset)
draw.line((start_point.x, start_point.y, end_point.x, end_point.y), black, stroke_thickness)
location = self.__subtract_offset(self.dimensions.origin, offset)
bounding_box_in_image = Rectangle(location, self.dimensions.width, self.dimensions.height)
# self.draw_bounding_box(draw, location)
del draw
if staff_line_vertical_offsets is not None and staff_line_vertical_offsets:
for staff_line_vertical_offset in staff_line_vertical_offsets:
image_with_staff_lines = image_without_staff_lines.copy()
self.__draw_staff_lines_into_image(image_with_staff_lines, stroke_thickness,
staff_line_spacing, staff_line_vertical_offset)
file_name_with_offset = export_path.get_full_path(staff_line_vertical_offset)
image_with_staff_lines.save(file_name_with_offset)
image_with_staff_lines.close()
if bounding_boxes is not None:
# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and
# the file_name, e.g. '3-4-Time\\1-13_3_offset_74.png', so we store only that part in the dictionary
class_and_file_name = export_path.get_class_name_and_file_path(staff_line_vertical_offset)
bounding_boxes[class_and_file_name] = bounding_box_in_image
else:
image_without_staff_lines.save(export_path.get_full_path())
if bounding_boxes is not None:
# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and
# the file_name, e.g. '3-4-Time\\1-13_3_offset_74.png', so we store only that part in the dictionary
class_and_file_name = export_path.get_class_name_and_file_path()
bounding_boxes[class_and_file_name] = bounding_box_in_image
image_without_staff_lines.close()
|
[
"def",
"draw_onto_canvas",
"(",
"self",
",",
"export_path",
":",
"ExportPath",
",",
"stroke_thickness",
":",
"int",
",",
"margin",
":",
"int",
",",
"destination_width",
":",
"int",
",",
"destination_height",
":",
"int",
",",
"staff_line_spacing",
":",
"int",
"=",
"14",
",",
"staff_line_vertical_offsets",
":",
"List",
"[",
"int",
"]",
"=",
"None",
",",
"bounding_boxes",
":",
"dict",
"=",
"None",
",",
"random_position_on_canvas",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"width",
"=",
"self",
".",
"dimensions",
".",
"width",
"+",
"2",
"*",
"margin",
"height",
"=",
"self",
".",
"dimensions",
".",
"height",
"+",
"2",
"*",
"margin",
"if",
"random_position_on_canvas",
":",
"# max is required for elements that are larger than the canvas,",
"# where the possible range for the random value would be negative",
"random_horizontal_offset",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"max",
"(",
"0",
",",
"destination_width",
"-",
"width",
")",
")",
"random_vertical_offset",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"max",
"(",
"0",
",",
"destination_height",
"-",
"height",
")",
")",
"offset",
"=",
"Point2D",
"(",
"self",
".",
"dimensions",
".",
"origin",
".",
"x",
"-",
"margin",
"-",
"random_horizontal_offset",
",",
"self",
".",
"dimensions",
".",
"origin",
".",
"y",
"-",
"margin",
"-",
"random_vertical_offset",
")",
"else",
":",
"width_offset_for_centering",
"=",
"(",
"destination_width",
"-",
"width",
")",
"/",
"2",
"height_offset_for_centering",
"=",
"(",
"destination_height",
"-",
"height",
")",
"/",
"2",
"offset",
"=",
"Point2D",
"(",
"self",
".",
"dimensions",
".",
"origin",
".",
"x",
"-",
"margin",
"-",
"width_offset_for_centering",
",",
"self",
".",
"dimensions",
".",
"origin",
".",
"y",
"-",
"margin",
"-",
"height_offset_for_centering",
")",
"image_without_staff_lines",
"=",
"Image",
".",
"new",
"(",
"'RGB'",
",",
"(",
"destination_width",
",",
"destination_height",
")",
",",
"\"white\"",
")",
"# create a new white image",
"draw",
"=",
"ImageDraw",
".",
"Draw",
"(",
"image_without_staff_lines",
")",
"black",
"=",
"(",
"0",
",",
"0",
",",
"0",
")",
"for",
"stroke",
"in",
"self",
".",
"strokes",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"stroke",
")",
"-",
"1",
")",
":",
"start_point",
"=",
"self",
".",
"__subtract_offset",
"(",
"stroke",
"[",
"i",
"]",
",",
"offset",
")",
"end_point",
"=",
"self",
".",
"__subtract_offset",
"(",
"stroke",
"[",
"i",
"+",
"1",
"]",
",",
"offset",
")",
"draw",
".",
"line",
"(",
"(",
"start_point",
".",
"x",
",",
"start_point",
".",
"y",
",",
"end_point",
".",
"x",
",",
"end_point",
".",
"y",
")",
",",
"black",
",",
"stroke_thickness",
")",
"location",
"=",
"self",
".",
"__subtract_offset",
"(",
"self",
".",
"dimensions",
".",
"origin",
",",
"offset",
")",
"bounding_box_in_image",
"=",
"Rectangle",
"(",
"location",
",",
"self",
".",
"dimensions",
".",
"width",
",",
"self",
".",
"dimensions",
".",
"height",
")",
"# self.draw_bounding_box(draw, location)",
"del",
"draw",
"if",
"staff_line_vertical_offsets",
"is",
"not",
"None",
"and",
"staff_line_vertical_offsets",
":",
"for",
"staff_line_vertical_offset",
"in",
"staff_line_vertical_offsets",
":",
"image_with_staff_lines",
"=",
"image_without_staff_lines",
".",
"copy",
"(",
")",
"self",
".",
"__draw_staff_lines_into_image",
"(",
"image_with_staff_lines",
",",
"stroke_thickness",
",",
"staff_line_spacing",
",",
"staff_line_vertical_offset",
")",
"file_name_with_offset",
"=",
"export_path",
".",
"get_full_path",
"(",
"staff_line_vertical_offset",
")",
"image_with_staff_lines",
".",
"save",
"(",
"file_name_with_offset",
")",
"image_with_staff_lines",
".",
"close",
"(",
")",
"if",
"bounding_boxes",
"is",
"not",
"None",
":",
"# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and",
"# the file_name, e.g. '3-4-Time\\\\1-13_3_offset_74.png', so we store only that part in the dictionary",
"class_and_file_name",
"=",
"export_path",
".",
"get_class_name_and_file_path",
"(",
"staff_line_vertical_offset",
")",
"bounding_boxes",
"[",
"class_and_file_name",
"]",
"=",
"bounding_box_in_image",
"else",
":",
"image_without_staff_lines",
".",
"save",
"(",
"export_path",
".",
"get_full_path",
"(",
")",
")",
"if",
"bounding_boxes",
"is",
"not",
"None",
":",
"# Note that the ImageDatasetGenerator does not yield the full path, but only the class_name and",
"# the file_name, e.g. '3-4-Time\\\\1-13_3_offset_74.png', so we store only that part in the dictionary",
"class_and_file_name",
"=",
"export_path",
".",
"get_class_name_and_file_path",
"(",
")",
"bounding_boxes",
"[",
"class_and_file_name",
"]",
"=",
"bounding_box_in_image",
"image_without_staff_lines",
".",
"close",
"(",
")"
] |
Draws the symbol onto a canvas with a fixed size
:param bounding_boxes: The dictionary into which the bounding-boxes will be added of each generated image
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness:
:param margin:
:param destination_width:
:param destination_height:
:param staff_line_spacing:
:param staff_line_vertical_offsets: Offsets used for drawing staff-lines. If None provided, no staff-lines will be drawn if multiple integers are provided, multiple images will be generated
|
[
"Draws",
"the",
"symbol",
"onto",
"a",
"canvas",
"with",
"a",
"fixed",
"size"
] |
d0a22a03ae35caeef211729efa340e1ec0e01ea5
|
https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/HomusSymbol.py#L78-L148
|
11,995
|
datascopeanalytics/scrubadub
|
scrubadub/import_magic.py
|
update_locals
|
def update_locals(locals_instance, instance_iterator, *args, **kwargs):
"""import all of the detector classes into the local namespace to make it
easy to do things like `import scrubadub.detectors.NameDetector` without
having to add each new ``Detector`` or ``Filth``
"""
# http://stackoverflow.com/a/4526709/564709
# http://stackoverflow.com/a/511059/564709
for instance in instance_iterator():
locals_instance.update({type(instance).__name__: instance.__class__})
|
python
|
def update_locals(locals_instance, instance_iterator, *args, **kwargs):
"""import all of the detector classes into the local namespace to make it
easy to do things like `import scrubadub.detectors.NameDetector` without
having to add each new ``Detector`` or ``Filth``
"""
# http://stackoverflow.com/a/4526709/564709
# http://stackoverflow.com/a/511059/564709
for instance in instance_iterator():
locals_instance.update({type(instance).__name__: instance.__class__})
|
[
"def",
"update_locals",
"(",
"locals_instance",
",",
"instance_iterator",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# http://stackoverflow.com/a/4526709/564709",
"# http://stackoverflow.com/a/511059/564709",
"for",
"instance",
"in",
"instance_iterator",
"(",
")",
":",
"locals_instance",
".",
"update",
"(",
"{",
"type",
"(",
"instance",
")",
".",
"__name__",
":",
"instance",
".",
"__class__",
"}",
")"
] |
import all of the detector classes into the local namespace to make it
easy to do things like `import scrubadub.detectors.NameDetector` without
having to add each new ``Detector`` or ``Filth``
|
[
"import",
"all",
"of",
"the",
"detector",
"classes",
"into",
"the",
"local",
"namespace",
"to",
"make",
"it",
"easy",
"to",
"do",
"things",
"like",
"import",
"scrubadub",
".",
"detectors",
".",
"NameDetector",
"without",
"having",
"to",
"add",
"each",
"new",
"Detector",
"or",
"Filth"
] |
914bda49a16130b44af43df6a2f84755477c407c
|
https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/import_magic.py#L34-L42
|
11,996
|
datascopeanalytics/scrubadub
|
scrubadub/filth/__init__.py
|
iter_filth_clss
|
def iter_filth_clss():
"""Iterate over all of the filths that are included in this sub-package.
This is a convenience method for capturing all new Filth that are added
over time.
"""
return iter_subclasses(
os.path.dirname(os.path.abspath(__file__)),
Filth,
_is_abstract_filth,
)
|
python
|
def iter_filth_clss():
"""Iterate over all of the filths that are included in this sub-package.
This is a convenience method for capturing all new Filth that are added
over time.
"""
return iter_subclasses(
os.path.dirname(os.path.abspath(__file__)),
Filth,
_is_abstract_filth,
)
|
[
"def",
"iter_filth_clss",
"(",
")",
":",
"return",
"iter_subclasses",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
",",
"Filth",
",",
"_is_abstract_filth",
",",
")"
] |
Iterate over all of the filths that are included in this sub-package.
This is a convenience method for capturing all new Filth that are added
over time.
|
[
"Iterate",
"over",
"all",
"of",
"the",
"filths",
"that",
"are",
"included",
"in",
"this",
"sub",
"-",
"package",
".",
"This",
"is",
"a",
"convenience",
"method",
"for",
"capturing",
"all",
"new",
"Filth",
"that",
"are",
"added",
"over",
"time",
"."
] |
914bda49a16130b44af43df6a2f84755477c407c
|
https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/filth/__init__.py#L13-L22
|
11,997
|
datascopeanalytics/scrubadub
|
scrubadub/filth/__init__.py
|
iter_filths
|
def iter_filths():
"""Iterate over all instances of filth"""
for filth_cls in iter_filth_clss():
if issubclass(filth_cls, RegexFilth):
m = next(re.finditer(r"\s+", "fake pattern string"))
yield filth_cls(m)
else:
yield filth_cls()
|
python
|
def iter_filths():
"""Iterate over all instances of filth"""
for filth_cls in iter_filth_clss():
if issubclass(filth_cls, RegexFilth):
m = next(re.finditer(r"\s+", "fake pattern string"))
yield filth_cls(m)
else:
yield filth_cls()
|
[
"def",
"iter_filths",
"(",
")",
":",
"for",
"filth_cls",
"in",
"iter_filth_clss",
"(",
")",
":",
"if",
"issubclass",
"(",
"filth_cls",
",",
"RegexFilth",
")",
":",
"m",
"=",
"next",
"(",
"re",
".",
"finditer",
"(",
"r\"\\s+\"",
",",
"\"fake pattern string\"",
")",
")",
"yield",
"filth_cls",
"(",
"m",
")",
"else",
":",
"yield",
"filth_cls",
"(",
")"
] |
Iterate over all instances of filth
|
[
"Iterate",
"over",
"all",
"instances",
"of",
"filth"
] |
914bda49a16130b44af43df6a2f84755477c407c
|
https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/filth/__init__.py#L25-L32
|
11,998
|
datascopeanalytics/scrubadub
|
scrubadub/filth/base.py
|
MergedFilth._update_content
|
def _update_content(self, other_filth):
"""this updates the bounds, text and placeholder for the merged
filth
"""
if self.end < other_filth.beg or other_filth.end < self.beg:
raise exceptions.FilthMergeError(
"a_filth goes from [%s, %s) and b_filth goes from [%s, %s)" % (
self.beg, self.end, other_filth.beg, other_filth.end
))
# get the text over lap correct
if self.beg < other_filth.beg:
first = self
second = other_filth
else:
second = self
first = other_filth
end_offset = second.end - first.end
if end_offset > 0:
self.text = first.text + second.text[-end_offset:]
# update the beg/end strings
self.beg = min(self.beg, other_filth.beg)
self.end = max(self.end, other_filth.end)
if self.end - self.beg != len(self.text):
raise exceptions.FilthMergeError("text length isn't consistent")
# update the placeholder
self.filths.append(other_filth)
self._placeholder = '+'.join([filth.type for filth in self.filths])
|
python
|
def _update_content(self, other_filth):
"""this updates the bounds, text and placeholder for the merged
filth
"""
if self.end < other_filth.beg or other_filth.end < self.beg:
raise exceptions.FilthMergeError(
"a_filth goes from [%s, %s) and b_filth goes from [%s, %s)" % (
self.beg, self.end, other_filth.beg, other_filth.end
))
# get the text over lap correct
if self.beg < other_filth.beg:
first = self
second = other_filth
else:
second = self
first = other_filth
end_offset = second.end - first.end
if end_offset > 0:
self.text = first.text + second.text[-end_offset:]
# update the beg/end strings
self.beg = min(self.beg, other_filth.beg)
self.end = max(self.end, other_filth.end)
if self.end - self.beg != len(self.text):
raise exceptions.FilthMergeError("text length isn't consistent")
# update the placeholder
self.filths.append(other_filth)
self._placeholder = '+'.join([filth.type for filth in self.filths])
|
[
"def",
"_update_content",
"(",
"self",
",",
"other_filth",
")",
":",
"if",
"self",
".",
"end",
"<",
"other_filth",
".",
"beg",
"or",
"other_filth",
".",
"end",
"<",
"self",
".",
"beg",
":",
"raise",
"exceptions",
".",
"FilthMergeError",
"(",
"\"a_filth goes from [%s, %s) and b_filth goes from [%s, %s)\"",
"%",
"(",
"self",
".",
"beg",
",",
"self",
".",
"end",
",",
"other_filth",
".",
"beg",
",",
"other_filth",
".",
"end",
")",
")",
"# get the text over lap correct",
"if",
"self",
".",
"beg",
"<",
"other_filth",
".",
"beg",
":",
"first",
"=",
"self",
"second",
"=",
"other_filth",
"else",
":",
"second",
"=",
"self",
"first",
"=",
"other_filth",
"end_offset",
"=",
"second",
".",
"end",
"-",
"first",
".",
"end",
"if",
"end_offset",
">",
"0",
":",
"self",
".",
"text",
"=",
"first",
".",
"text",
"+",
"second",
".",
"text",
"[",
"-",
"end_offset",
":",
"]",
"# update the beg/end strings",
"self",
".",
"beg",
"=",
"min",
"(",
"self",
".",
"beg",
",",
"other_filth",
".",
"beg",
")",
"self",
".",
"end",
"=",
"max",
"(",
"self",
".",
"end",
",",
"other_filth",
".",
"end",
")",
"if",
"self",
".",
"end",
"-",
"self",
".",
"beg",
"!=",
"len",
"(",
"self",
".",
"text",
")",
":",
"raise",
"exceptions",
".",
"FilthMergeError",
"(",
"\"text length isn't consistent\"",
")",
"# update the placeholder",
"self",
".",
"filths",
".",
"append",
"(",
"other_filth",
")",
"self",
".",
"_placeholder",
"=",
"'+'",
".",
"join",
"(",
"[",
"filth",
".",
"type",
"for",
"filth",
"in",
"self",
".",
"filths",
"]",
")"
] |
this updates the bounds, text and placeholder for the merged
filth
|
[
"this",
"updates",
"the",
"bounds",
"text",
"and",
"placeholder",
"for",
"the",
"merged",
"filth"
] |
914bda49a16130b44af43df6a2f84755477c407c
|
https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/filth/base.py#L65-L94
|
11,999
|
datascopeanalytics/scrubadub
|
scrubadub/scrubbers.py
|
Scrubber.add_detector
|
def add_detector(self, detector_cls):
"""Add a ``Detector`` to scrubadub"""
if not issubclass(detector_cls, detectors.base.Detector):
raise TypeError((
'"%(detector_cls)s" is not a subclass of Detector'
) % locals())
# TODO: should add tests to make sure filth_cls is actually a proper
# filth_cls
name = detector_cls.filth_cls.type
if name in self._detectors:
raise KeyError((
'can not add Detector "%(name)s"---it already exists. '
'Try removing it first.'
) % locals())
self._detectors[name] = detector_cls()
|
python
|
def add_detector(self, detector_cls):
"""Add a ``Detector`` to scrubadub"""
if not issubclass(detector_cls, detectors.base.Detector):
raise TypeError((
'"%(detector_cls)s" is not a subclass of Detector'
) % locals())
# TODO: should add tests to make sure filth_cls is actually a proper
# filth_cls
name = detector_cls.filth_cls.type
if name in self._detectors:
raise KeyError((
'can not add Detector "%(name)s"---it already exists. '
'Try removing it first.'
) % locals())
self._detectors[name] = detector_cls()
|
[
"def",
"add_detector",
"(",
"self",
",",
"detector_cls",
")",
":",
"if",
"not",
"issubclass",
"(",
"detector_cls",
",",
"detectors",
".",
"base",
".",
"Detector",
")",
":",
"raise",
"TypeError",
"(",
"(",
"'\"%(detector_cls)s\" is not a subclass of Detector'",
")",
"%",
"locals",
"(",
")",
")",
"# TODO: should add tests to make sure filth_cls is actually a proper",
"# filth_cls",
"name",
"=",
"detector_cls",
".",
"filth_cls",
".",
"type",
"if",
"name",
"in",
"self",
".",
"_detectors",
":",
"raise",
"KeyError",
"(",
"(",
"'can not add Detector \"%(name)s\"---it already exists. '",
"'Try removing it first.'",
")",
"%",
"locals",
"(",
")",
")",
"self",
".",
"_detectors",
"[",
"name",
"]",
"=",
"detector_cls",
"(",
")"
] |
Add a ``Detector`` to scrubadub
|
[
"Add",
"a",
"Detector",
"to",
"scrubadub"
] |
914bda49a16130b44af43df6a2f84755477c407c
|
https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/scrubbers.py#L24-L38
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.