partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
SMBus.write_byte_data
|
Write a byte of data to the specified cmd register of the device.
|
Adafruit_PureIO/smbus.py
|
def write_byte_data(self, addr, cmd, val):
"""Write a byte of data to the specified cmd register of the device.
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Construct a string of data to send with the command register and byte value.
data = bytearray(2)
data[0] = cmd & 0xFF
data[1] = val & 0xFF
# Send the data to the device.
self._select_device(addr)
self._device.write(data)
|
def write_byte_data(self, addr, cmd, val):
"""Write a byte of data to the specified cmd register of the device.
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Construct a string of data to send with the command register and byte value.
data = bytearray(2)
data[0] = cmd & 0xFF
data[1] = val & 0xFF
# Send the data to the device.
self._select_device(addr)
self._device.write(data)
|
[
"Write",
"a",
"byte",
"of",
"data",
"to",
"the",
"specified",
"cmd",
"register",
"of",
"the",
"device",
"."
] |
adafruit/Adafruit_Python_PureIO
|
python
|
https://github.com/adafruit/Adafruit_Python_PureIO/blob/6f4976d91c52d70b67b28bba75a429b5328a52c1/Adafruit_PureIO/smbus.py#L246-L256
|
[
"def",
"write_byte_data",
"(",
"self",
",",
"addr",
",",
"cmd",
",",
"val",
")",
":",
"assert",
"self",
".",
"_device",
"is",
"not",
"None",
",",
"'Bus must be opened before operations are made against it!'",
"# Construct a string of data to send with the command register and byte value.",
"data",
"=",
"bytearray",
"(",
"2",
")",
"data",
"[",
"0",
"]",
"=",
"cmd",
"&",
"0xFF",
"data",
"[",
"1",
"]",
"=",
"val",
"&",
"0xFF",
"# Send the data to the device.",
"self",
".",
"_select_device",
"(",
"addr",
")",
"self",
".",
"_device",
".",
"write",
"(",
"data",
")"
] |
6f4976d91c52d70b67b28bba75a429b5328a52c1
|
test
|
SMBus.write_word_data
|
Write a word (2 bytes) of data to the specified cmd register of the
device. Note that this will write the data in the endianness of the
processor running Python (typically little endian)!
|
Adafruit_PureIO/smbus.py
|
def write_word_data(self, addr, cmd, val):
"""Write a word (2 bytes) of data to the specified cmd register of the
device. Note that this will write the data in the endianness of the
processor running Python (typically little endian)!
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Construct a string of data to send with the command register and word value.
data = struct.pack('=BH', cmd & 0xFF, val & 0xFFFF)
# Send the data to the device.
self._select_device(addr)
self._device.write(data)
|
def write_word_data(self, addr, cmd, val):
"""Write a word (2 bytes) of data to the specified cmd register of the
device. Note that this will write the data in the endianness of the
processor running Python (typically little endian)!
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Construct a string of data to send with the command register and word value.
data = struct.pack('=BH', cmd & 0xFF, val & 0xFFFF)
# Send the data to the device.
self._select_device(addr)
self._device.write(data)
|
[
"Write",
"a",
"word",
"(",
"2",
"bytes",
")",
"of",
"data",
"to",
"the",
"specified",
"cmd",
"register",
"of",
"the",
"device",
".",
"Note",
"that",
"this",
"will",
"write",
"the",
"data",
"in",
"the",
"endianness",
"of",
"the",
"processor",
"running",
"Python",
"(",
"typically",
"little",
"endian",
")",
"!"
] |
adafruit/Adafruit_Python_PureIO
|
python
|
https://github.com/adafruit/Adafruit_Python_PureIO/blob/6f4976d91c52d70b67b28bba75a429b5328a52c1/Adafruit_PureIO/smbus.py#L258-L268
|
[
"def",
"write_word_data",
"(",
"self",
",",
"addr",
",",
"cmd",
",",
"val",
")",
":",
"assert",
"self",
".",
"_device",
"is",
"not",
"None",
",",
"'Bus must be opened before operations are made against it!'",
"# Construct a string of data to send with the command register and word value.",
"data",
"=",
"struct",
".",
"pack",
"(",
"'=BH'",
",",
"cmd",
"&",
"0xFF",
",",
"val",
"&",
"0xFFFF",
")",
"# Send the data to the device.",
"self",
".",
"_select_device",
"(",
"addr",
")",
"self",
".",
"_device",
".",
"write",
"(",
"data",
")"
] |
6f4976d91c52d70b67b28bba75a429b5328a52c1
|
test
|
SMBus.write_block_data
|
Write a block of data to the specified cmd register of the device.
The amount of data to write should be the first byte inside the vals
string/bytearray and that count of bytes of data to write should follow
it.
|
Adafruit_PureIO/smbus.py
|
def write_block_data(self, addr, cmd, vals):
"""Write a block of data to the specified cmd register of the device.
The amount of data to write should be the first byte inside the vals
string/bytearray and that count of bytes of data to write should follow
it.
"""
# Just use the I2C block data write to write the provided values and
# their length as the first byte.
data = bytearray(len(vals)+1)
data[0] = len(vals) & 0xFF
data[1:] = vals[0:]
self.write_i2c_block_data(addr, cmd, data)
|
def write_block_data(self, addr, cmd, vals):
"""Write a block of data to the specified cmd register of the device.
The amount of data to write should be the first byte inside the vals
string/bytearray and that count of bytes of data to write should follow
it.
"""
# Just use the I2C block data write to write the provided values and
# their length as the first byte.
data = bytearray(len(vals)+1)
data[0] = len(vals) & 0xFF
data[1:] = vals[0:]
self.write_i2c_block_data(addr, cmd, data)
|
[
"Write",
"a",
"block",
"of",
"data",
"to",
"the",
"specified",
"cmd",
"register",
"of",
"the",
"device",
".",
"The",
"amount",
"of",
"data",
"to",
"write",
"should",
"be",
"the",
"first",
"byte",
"inside",
"the",
"vals",
"string",
"/",
"bytearray",
"and",
"that",
"count",
"of",
"bytes",
"of",
"data",
"to",
"write",
"should",
"follow",
"it",
"."
] |
adafruit/Adafruit_Python_PureIO
|
python
|
https://github.com/adafruit/Adafruit_Python_PureIO/blob/6f4976d91c52d70b67b28bba75a429b5328a52c1/Adafruit_PureIO/smbus.py#L270-L281
|
[
"def",
"write_block_data",
"(",
"self",
",",
"addr",
",",
"cmd",
",",
"vals",
")",
":",
"# Just use the I2C block data write to write the provided values and",
"# their length as the first byte.",
"data",
"=",
"bytearray",
"(",
"len",
"(",
"vals",
")",
"+",
"1",
")",
"data",
"[",
"0",
"]",
"=",
"len",
"(",
"vals",
")",
"&",
"0xFF",
"data",
"[",
"1",
":",
"]",
"=",
"vals",
"[",
"0",
":",
"]",
"self",
".",
"write_i2c_block_data",
"(",
"addr",
",",
"cmd",
",",
"data",
")"
] |
6f4976d91c52d70b67b28bba75a429b5328a52c1
|
test
|
SMBus.write_i2c_block_data
|
Write a buffer of data to the specified cmd register of the device.
|
Adafruit_PureIO/smbus.py
|
def write_i2c_block_data(self, addr, cmd, vals):
"""Write a buffer of data to the specified cmd register of the device.
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Construct a string of data to send, including room for the command register.
data = bytearray(len(vals)+1)
data[0] = cmd & 0xFF # Command register at the start.
data[1:] = vals[0:] # Copy in the block data (ugly but necessary to ensure
# the entire write happens in one transaction).
# Send the data to the device.
self._select_device(addr)
self._device.write(data)
|
def write_i2c_block_data(self, addr, cmd, vals):
"""Write a buffer of data to the specified cmd register of the device.
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Construct a string of data to send, including room for the command register.
data = bytearray(len(vals)+1)
data[0] = cmd & 0xFF # Command register at the start.
data[1:] = vals[0:] # Copy in the block data (ugly but necessary to ensure
# the entire write happens in one transaction).
# Send the data to the device.
self._select_device(addr)
self._device.write(data)
|
[
"Write",
"a",
"buffer",
"of",
"data",
"to",
"the",
"specified",
"cmd",
"register",
"of",
"the",
"device",
"."
] |
adafruit/Adafruit_Python_PureIO
|
python
|
https://github.com/adafruit/Adafruit_Python_PureIO/blob/6f4976d91c52d70b67b28bba75a429b5328a52c1/Adafruit_PureIO/smbus.py#L283-L294
|
[
"def",
"write_i2c_block_data",
"(",
"self",
",",
"addr",
",",
"cmd",
",",
"vals",
")",
":",
"assert",
"self",
".",
"_device",
"is",
"not",
"None",
",",
"'Bus must be opened before operations are made against it!'",
"# Construct a string of data to send, including room for the command register.",
"data",
"=",
"bytearray",
"(",
"len",
"(",
"vals",
")",
"+",
"1",
")",
"data",
"[",
"0",
"]",
"=",
"cmd",
"&",
"0xFF",
"# Command register at the start.",
"data",
"[",
"1",
":",
"]",
"=",
"vals",
"[",
"0",
":",
"]",
"# Copy in the block data (ugly but necessary to ensure",
"# the entire write happens in one transaction).",
"# Send the data to the device.",
"self",
".",
"_select_device",
"(",
"addr",
")",
"self",
".",
"_device",
".",
"write",
"(",
"data",
")"
] |
6f4976d91c52d70b67b28bba75a429b5328a52c1
|
test
|
SMBus.process_call
|
Perform a smbus process call by writing a word (2 byte) value to
the specified register of the device, and then reading a word of response
data (which is returned).
|
Adafruit_PureIO/smbus.py
|
def process_call(self, addr, cmd, val):
"""Perform a smbus process call by writing a word (2 byte) value to
the specified register of the device, and then reading a word of response
data (which is returned).
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Build ctypes values to marshall between ioctl and Python.
data = create_string_buffer(struct.pack('=BH', cmd, val))
result = c_uint16()
# Build ioctl request.
request = make_i2c_rdwr_data([
(addr, 0, 3, cast(pointer(data), POINTER(c_uint8))), # Write data.
(addr, I2C_M_RD, 2, cast(pointer(result), POINTER(c_uint8))) # Read word (2 bytes).
])
# Make ioctl call and return result data.
ioctl(self._device.fileno(), I2C_RDWR, request)
# Note the python-smbus code appears to have a rather serious bug and
# does not return the result value! This is fixed below by returning it.
return result.value
|
def process_call(self, addr, cmd, val):
"""Perform a smbus process call by writing a word (2 byte) value to
the specified register of the device, and then reading a word of response
data (which is returned).
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Build ctypes values to marshall between ioctl and Python.
data = create_string_buffer(struct.pack('=BH', cmd, val))
result = c_uint16()
# Build ioctl request.
request = make_i2c_rdwr_data([
(addr, 0, 3, cast(pointer(data), POINTER(c_uint8))), # Write data.
(addr, I2C_M_RD, 2, cast(pointer(result), POINTER(c_uint8))) # Read word (2 bytes).
])
# Make ioctl call and return result data.
ioctl(self._device.fileno(), I2C_RDWR, request)
# Note the python-smbus code appears to have a rather serious bug and
# does not return the result value! This is fixed below by returning it.
return result.value
|
[
"Perform",
"a",
"smbus",
"process",
"call",
"by",
"writing",
"a",
"word",
"(",
"2",
"byte",
")",
"value",
"to",
"the",
"specified",
"register",
"of",
"the",
"device",
"and",
"then",
"reading",
"a",
"word",
"of",
"response",
"data",
"(",
"which",
"is",
"returned",
")",
"."
] |
adafruit/Adafruit_Python_PureIO
|
python
|
https://github.com/adafruit/Adafruit_Python_PureIO/blob/6f4976d91c52d70b67b28bba75a429b5328a52c1/Adafruit_PureIO/smbus.py#L296-L314
|
[
"def",
"process_call",
"(",
"self",
",",
"addr",
",",
"cmd",
",",
"val",
")",
":",
"assert",
"self",
".",
"_device",
"is",
"not",
"None",
",",
"'Bus must be opened before operations are made against it!'",
"# Build ctypes values to marshall between ioctl and Python.",
"data",
"=",
"create_string_buffer",
"(",
"struct",
".",
"pack",
"(",
"'=BH'",
",",
"cmd",
",",
"val",
")",
")",
"result",
"=",
"c_uint16",
"(",
")",
"# Build ioctl request.",
"request",
"=",
"make_i2c_rdwr_data",
"(",
"[",
"(",
"addr",
",",
"0",
",",
"3",
",",
"cast",
"(",
"pointer",
"(",
"data",
")",
",",
"POINTER",
"(",
"c_uint8",
")",
")",
")",
",",
"# Write data.",
"(",
"addr",
",",
"I2C_M_RD",
",",
"2",
",",
"cast",
"(",
"pointer",
"(",
"result",
")",
",",
"POINTER",
"(",
"c_uint8",
")",
")",
")",
"# Read word (2 bytes).",
"]",
")",
"# Make ioctl call and return result data.",
"ioctl",
"(",
"self",
".",
"_device",
".",
"fileno",
"(",
")",
",",
"I2C_RDWR",
",",
"request",
")",
"# Note the python-smbus code appears to have a rather serious bug and",
"# does not return the result value! This is fixed below by returning it.",
"return",
"result",
".",
"value"
] |
6f4976d91c52d70b67b28bba75a429b5328a52c1
|
test
|
File.cdn_url
|
Returns file's CDN url.
Usage example::
>>> file_ = File('a771f854-c2cb-408a-8c36-71af77811f3b')
>>> file_.cdn_url
https://ucarecdn.com/a771f854-c2cb-408a-8c36-71af77811f3b/
You can set default effects::
>>> file_.default_effects = 'effect/flip/-/effect/mirror/'
>>> file_.cdn_url
https://ucarecdn.com/a771f854-c2cb-408a-8c36-71af77811f3b/-/effect/flip/-/effect/mirror/
|
pyuploadcare/api_resources.py
|
def cdn_url(self):
"""Returns file's CDN url.
Usage example::
>>> file_ = File('a771f854-c2cb-408a-8c36-71af77811f3b')
>>> file_.cdn_url
https://ucarecdn.com/a771f854-c2cb-408a-8c36-71af77811f3b/
You can set default effects::
>>> file_.default_effects = 'effect/flip/-/effect/mirror/'
>>> file_.cdn_url
https://ucarecdn.com/a771f854-c2cb-408a-8c36-71af77811f3b/-/effect/flip/-/effect/mirror/
"""
return '{cdn_base}{path}'.format(cdn_base=conf.cdn_base,
path=self.cdn_path(self.default_effects))
|
def cdn_url(self):
"""Returns file's CDN url.
Usage example::
>>> file_ = File('a771f854-c2cb-408a-8c36-71af77811f3b')
>>> file_.cdn_url
https://ucarecdn.com/a771f854-c2cb-408a-8c36-71af77811f3b/
You can set default effects::
>>> file_.default_effects = 'effect/flip/-/effect/mirror/'
>>> file_.cdn_url
https://ucarecdn.com/a771f854-c2cb-408a-8c36-71af77811f3b/-/effect/flip/-/effect/mirror/
"""
return '{cdn_base}{path}'.format(cdn_base=conf.cdn_base,
path=self.cdn_path(self.default_effects))
|
[
"Returns",
"file",
"s",
"CDN",
"url",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L106-L123
|
[
"def",
"cdn_url",
"(",
"self",
")",
":",
"return",
"'{cdn_base}{path}'",
".",
"format",
"(",
"cdn_base",
"=",
"conf",
".",
"cdn_base",
",",
"path",
"=",
"self",
".",
"cdn_path",
"(",
"self",
".",
"default_effects",
")",
")"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
File.datetime_stored
|
Returns file's store aware *datetime* in UTC format.
It might do API request once because it depends on ``info()``.
|
pyuploadcare/api_resources.py
|
def datetime_stored(self):
"""Returns file's store aware *datetime* in UTC format.
It might do API request once because it depends on ``info()``.
"""
if self.info().get('datetime_stored'):
return dateutil.parser.parse(self.info()['datetime_stored'])
|
def datetime_stored(self):
"""Returns file's store aware *datetime* in UTC format.
It might do API request once because it depends on ``info()``.
"""
if self.info().get('datetime_stored'):
return dateutil.parser.parse(self.info()['datetime_stored'])
|
[
"Returns",
"file",
"s",
"store",
"aware",
"*",
"datetime",
"*",
"in",
"UTC",
"format",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L150-L157
|
[
"def",
"datetime_stored",
"(",
"self",
")",
":",
"if",
"self",
".",
"info",
"(",
")",
".",
"get",
"(",
"'datetime_stored'",
")",
":",
"return",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"self",
".",
"info",
"(",
")",
"[",
"'datetime_stored'",
"]",
")"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
File.datetime_removed
|
Returns file's remove aware *datetime* in UTC format.
It might do API request once because it depends on ``info()``.
|
pyuploadcare/api_resources.py
|
def datetime_removed(self):
"""Returns file's remove aware *datetime* in UTC format.
It might do API request once because it depends on ``info()``.
"""
if self.info().get('datetime_removed'):
return dateutil.parser.parse(self.info()['datetime_removed'])
|
def datetime_removed(self):
"""Returns file's remove aware *datetime* in UTC format.
It might do API request once because it depends on ``info()``.
"""
if self.info().get('datetime_removed'):
return dateutil.parser.parse(self.info()['datetime_removed'])
|
[
"Returns",
"file",
"s",
"remove",
"aware",
"*",
"datetime",
"*",
"in",
"UTC",
"format",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L159-L166
|
[
"def",
"datetime_removed",
"(",
"self",
")",
":",
"if",
"self",
".",
"info",
"(",
")",
".",
"get",
"(",
"'datetime_removed'",
")",
":",
"return",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"self",
".",
"info",
"(",
")",
"[",
"'datetime_removed'",
"]",
")"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
File.datetime_uploaded
|
Returns file's upload aware *datetime* in UTC format.
It might do API request once because it depends on ``info()``.
|
pyuploadcare/api_resources.py
|
def datetime_uploaded(self):
"""Returns file's upload aware *datetime* in UTC format.
It might do API request once because it depends on ``info()``.
"""
if self.info().get('datetime_uploaded'):
return dateutil.parser.parse(self.info()['datetime_uploaded'])
|
def datetime_uploaded(self):
"""Returns file's upload aware *datetime* in UTC format.
It might do API request once because it depends on ``info()``.
"""
if self.info().get('datetime_uploaded'):
return dateutil.parser.parse(self.info()['datetime_uploaded'])
|
[
"Returns",
"file",
"s",
"upload",
"aware",
"*",
"datetime",
"*",
"in",
"UTC",
"format",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L168-L175
|
[
"def",
"datetime_uploaded",
"(",
"self",
")",
":",
"if",
"self",
".",
"info",
"(",
")",
".",
"get",
"(",
"'datetime_uploaded'",
")",
":",
"return",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"self",
".",
"info",
"(",
")",
"[",
"'datetime_uploaded'",
"]",
")"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
File.copy
|
Creates a File Copy on Uploadcare or Custom Storage.
File.copy method is deprecated and will be removed in 4.0.0.
Please use `create_local_copy` and `create_remote_copy` instead.
Args:
- effects:
Adds CDN image effects. If ``self.default_effects`` property
is set effects will be combined with default effects.
- target:
Name of a custom storage connected to your project.
Uploadcare storage is used if target is absent.
|
pyuploadcare/api_resources.py
|
def copy(self, effects=None, target=None):
"""Creates a File Copy on Uploadcare or Custom Storage.
File.copy method is deprecated and will be removed in 4.0.0.
Please use `create_local_copy` and `create_remote_copy` instead.
Args:
- effects:
Adds CDN image effects. If ``self.default_effects`` property
is set effects will be combined with default effects.
- target:
Name of a custom storage connected to your project.
Uploadcare storage is used if target is absent.
"""
warning = """File.copy method is deprecated and will be
removed in 4.0.0.
Please use `create_local_copy`
and `create_remote_copy` instead.
"""
logger.warn('API Warning: {0}'.format(warning))
if target is not None:
return self.create_remote_copy(target, effects)
else:
return self.create_local_copy(effects)
|
def copy(self, effects=None, target=None):
"""Creates a File Copy on Uploadcare or Custom Storage.
File.copy method is deprecated and will be removed in 4.0.0.
Please use `create_local_copy` and `create_remote_copy` instead.
Args:
- effects:
Adds CDN image effects. If ``self.default_effects`` property
is set effects will be combined with default effects.
- target:
Name of a custom storage connected to your project.
Uploadcare storage is used if target is absent.
"""
warning = """File.copy method is deprecated and will be
removed in 4.0.0.
Please use `create_local_copy`
and `create_remote_copy` instead.
"""
logger.warn('API Warning: {0}'.format(warning))
if target is not None:
return self.create_remote_copy(target, effects)
else:
return self.create_local_copy(effects)
|
[
"Creates",
"a",
"File",
"Copy",
"on",
"Uploadcare",
"or",
"Custom",
"Storage",
".",
"File",
".",
"copy",
"method",
"is",
"deprecated",
"and",
"will",
"be",
"removed",
"in",
"4",
".",
"0",
".",
"0",
".",
"Please",
"use",
"create_local_copy",
"and",
"create_remote_copy",
"instead",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L240-L265
|
[
"def",
"copy",
"(",
"self",
",",
"effects",
"=",
"None",
",",
"target",
"=",
"None",
")",
":",
"warning",
"=",
"\"\"\"File.copy method is deprecated and will be\n removed in 4.0.0.\n Please use `create_local_copy`\n and `create_remote_copy` instead.\n \"\"\"",
"logger",
".",
"warn",
"(",
"'API Warning: {0}'",
".",
"format",
"(",
"warning",
")",
")",
"if",
"target",
"is",
"not",
"None",
":",
"return",
"self",
".",
"create_remote_copy",
"(",
"target",
",",
"effects",
")",
"else",
":",
"return",
"self",
".",
"create_local_copy",
"(",
"effects",
")"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
File.create_local_copy
|
Creates a Local File Copy on Uploadcare Storage.
Args:
- effects:
Adds CDN image effects. If ``self.default_effects`` property
is set effects will be combined with default effects.
- store:
If ``store`` option is set to False the copy of your file will
be deleted in 24 hour period after the upload.
Works only if `autostore` is enabled in the project.
|
pyuploadcare/api_resources.py
|
def create_local_copy(self, effects=None, store=None):
"""Creates a Local File Copy on Uploadcare Storage.
Args:
- effects:
Adds CDN image effects. If ``self.default_effects`` property
is set effects will be combined with default effects.
- store:
If ``store`` option is set to False the copy of your file will
be deleted in 24 hour period after the upload.
Works only if `autostore` is enabled in the project.
"""
effects = self._build_effects(effects)
store = store or ''
data = {
'source': self.cdn_path(effects)
}
if store:
data['store'] = store
return rest_request('POST', 'files/', data=data)
|
def create_local_copy(self, effects=None, store=None):
"""Creates a Local File Copy on Uploadcare Storage.
Args:
- effects:
Adds CDN image effects. If ``self.default_effects`` property
is set effects will be combined with default effects.
- store:
If ``store`` option is set to False the copy of your file will
be deleted in 24 hour period after the upload.
Works only if `autostore` is enabled in the project.
"""
effects = self._build_effects(effects)
store = store or ''
data = {
'source': self.cdn_path(effects)
}
if store:
data['store'] = store
return rest_request('POST', 'files/', data=data)
|
[
"Creates",
"a",
"Local",
"File",
"Copy",
"on",
"Uploadcare",
"Storage",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L267-L287
|
[
"def",
"create_local_copy",
"(",
"self",
",",
"effects",
"=",
"None",
",",
"store",
"=",
"None",
")",
":",
"effects",
"=",
"self",
".",
"_build_effects",
"(",
"effects",
")",
"store",
"=",
"store",
"or",
"''",
"data",
"=",
"{",
"'source'",
":",
"self",
".",
"cdn_path",
"(",
"effects",
")",
"}",
"if",
"store",
":",
"data",
"[",
"'store'",
"]",
"=",
"store",
"return",
"rest_request",
"(",
"'POST'",
",",
"'files/'",
",",
"data",
"=",
"data",
")"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
File.create_remote_copy
|
Creates file copy in remote storage.
Args:
- target:
Name of a custom storage connected to the project.
- effects:
Adds CDN image effects to ``self.default_effects`` if any.
- make_public:
To forbid public from accessing your files on the storage set
``make_public`` option to be False.
Default value is None. Files have public access by default.
- pattern:
Specify ``pattern`` option to set S3 object key name.
Takes precedence over pattern set in project settings.
If neither is specified defaults to
`${uuid}/${filename}${effects}${ext}`.
For more information on each of the options above please refer to
REST API docs https://uploadcare.com/docs/api_reference/rest/accessing_files/.
Following example copies a file to custom storage named ``samplefs``:
>>> file = File('e8ebfe20-8c11-4a94-9b40-52ecad7d8d1a')
>>> file.create_remote_copy(target='samplefs',
>>> make_public=True,
>>> pattern='${uuid}/${filename}${ext}')
Now custom storage ``samplefs`` contains publicly available file
with original filename billmurray.jpg in
in the directory named ``e8ebfe20-8c11-4a94-9b40-52ecad7d8d1a``.
|
pyuploadcare/api_resources.py
|
def create_remote_copy(self, target, effects=None, make_public=None,
pattern=None):
"""Creates file copy in remote storage.
Args:
- target:
Name of a custom storage connected to the project.
- effects:
Adds CDN image effects to ``self.default_effects`` if any.
- make_public:
To forbid public from accessing your files on the storage set
``make_public`` option to be False.
Default value is None. Files have public access by default.
- pattern:
Specify ``pattern`` option to set S3 object key name.
Takes precedence over pattern set in project settings.
If neither is specified defaults to
`${uuid}/${filename}${effects}${ext}`.
For more information on each of the options above please refer to
REST API docs https://uploadcare.com/docs/api_reference/rest/accessing_files/.
Following example copies a file to custom storage named ``samplefs``:
>>> file = File('e8ebfe20-8c11-4a94-9b40-52ecad7d8d1a')
>>> file.create_remote_copy(target='samplefs',
>>> make_public=True,
>>> pattern='${uuid}/${filename}${ext}')
Now custom storage ``samplefs`` contains publicly available file
with original filename billmurray.jpg in
in the directory named ``e8ebfe20-8c11-4a94-9b40-52ecad7d8d1a``.
"""
effects = self._build_effects(effects)
data = {
'source': self.cdn_path(effects),
'target': target
}
if make_public is not None:
data['make_public'] = make_public
if pattern is not None:
data['pattern'] = pattern
return rest_request('POST', 'files/', data=data)
|
def create_remote_copy(self, target, effects=None, make_public=None,
pattern=None):
"""Creates file copy in remote storage.
Args:
- target:
Name of a custom storage connected to the project.
- effects:
Adds CDN image effects to ``self.default_effects`` if any.
- make_public:
To forbid public from accessing your files on the storage set
``make_public`` option to be False.
Default value is None. Files have public access by default.
- pattern:
Specify ``pattern`` option to set S3 object key name.
Takes precedence over pattern set in project settings.
If neither is specified defaults to
`${uuid}/${filename}${effects}${ext}`.
For more information on each of the options above please refer to
REST API docs https://uploadcare.com/docs/api_reference/rest/accessing_files/.
Following example copies a file to custom storage named ``samplefs``:
>>> file = File('e8ebfe20-8c11-4a94-9b40-52ecad7d8d1a')
>>> file.create_remote_copy(target='samplefs',
>>> make_public=True,
>>> pattern='${uuid}/${filename}${ext}')
Now custom storage ``samplefs`` contains publicly available file
with original filename billmurray.jpg in
in the directory named ``e8ebfe20-8c11-4a94-9b40-52ecad7d8d1a``.
"""
effects = self._build_effects(effects)
data = {
'source': self.cdn_path(effects),
'target': target
}
if make_public is not None:
data['make_public'] = make_public
if pattern is not None:
data['pattern'] = pattern
return rest_request('POST', 'files/', data=data)
|
[
"Creates",
"file",
"copy",
"in",
"remote",
"storage",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L289-L333
|
[
"def",
"create_remote_copy",
"(",
"self",
",",
"target",
",",
"effects",
"=",
"None",
",",
"make_public",
"=",
"None",
",",
"pattern",
"=",
"None",
")",
":",
"effects",
"=",
"self",
".",
"_build_effects",
"(",
"effects",
")",
"data",
"=",
"{",
"'source'",
":",
"self",
".",
"cdn_path",
"(",
"effects",
")",
",",
"'target'",
":",
"target",
"}",
"if",
"make_public",
"is",
"not",
"None",
":",
"data",
"[",
"'make_public'",
"]",
"=",
"make_public",
"if",
"pattern",
"is",
"not",
"None",
":",
"data",
"[",
"'pattern'",
"]",
"=",
"pattern",
"return",
"rest_request",
"(",
"'POST'",
",",
"'files/'",
",",
"data",
"=",
"data",
")"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
File.construct_from
|
Constructs ``File`` instance from file information.
For example you have result of
``/files/1921953c-5d94-4e47-ba36-c2e1dd165e1a/`` API request::
>>> file_info = {
# ...
'uuid': '1921953c-5d94-4e47-ba36-c2e1dd165e1a',
# ...
}
>>> File.construct_from(file_info)
<uploadcare.File 1921953c-5d94-4e47-ba36-c2e1dd165e1a>
|
pyuploadcare/api_resources.py
|
def construct_from(cls, file_info):
"""Constructs ``File`` instance from file information.
For example you have result of
``/files/1921953c-5d94-4e47-ba36-c2e1dd165e1a/`` API request::
>>> file_info = {
# ...
'uuid': '1921953c-5d94-4e47-ba36-c2e1dd165e1a',
# ...
}
>>> File.construct_from(file_info)
<uploadcare.File 1921953c-5d94-4e47-ba36-c2e1dd165e1a>
"""
file_ = cls(file_info['uuid'])
file_.default_effects = file_info.get('default_effects')
file_._info_cache = file_info
return file_
|
def construct_from(cls, file_info):
"""Constructs ``File`` instance from file information.
For example you have result of
``/files/1921953c-5d94-4e47-ba36-c2e1dd165e1a/`` API request::
>>> file_info = {
# ...
'uuid': '1921953c-5d94-4e47-ba36-c2e1dd165e1a',
# ...
}
>>> File.construct_from(file_info)
<uploadcare.File 1921953c-5d94-4e47-ba36-c2e1dd165e1a>
"""
file_ = cls(file_info['uuid'])
file_.default_effects = file_info.get('default_effects')
file_._info_cache = file_info
return file_
|
[
"Constructs",
"File",
"instance",
"from",
"file",
"information",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L340-L358
|
[
"def",
"construct_from",
"(",
"cls",
",",
"file_info",
")",
":",
"file_",
"=",
"cls",
"(",
"file_info",
"[",
"'uuid'",
"]",
")",
"file_",
".",
"default_effects",
"=",
"file_info",
".",
"get",
"(",
"'default_effects'",
")",
"file_",
".",
"_info_cache",
"=",
"file_info",
"return",
"file_"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
File.upload
|
Uploads a file and returns ``File`` instance.
Args:
- file_obj: file object to upload to
- store (Optional[bool]): Should the file be automatically stored
upon upload. Defaults to None.
- False - do not store file
- True - store file (can result in error if autostore
is disabled for project)
- None - use project settings
Returns:
``File`` instance
|
pyuploadcare/api_resources.py
|
def upload(cls, file_obj, store=None):
"""Uploads a file and returns ``File`` instance.
Args:
- file_obj: file object to upload to
- store (Optional[bool]): Should the file be automatically stored
upon upload. Defaults to None.
- False - do not store file
- True - store file (can result in error if autostore
is disabled for project)
- None - use project settings
Returns:
``File`` instance
"""
if store is None:
store = 'auto'
elif store:
store = '1'
else:
store = '0'
data = {
'UPLOADCARE_STORE': store,
}
files = uploading_request('POST', 'base/', data=data,
files={'file': file_obj})
file_ = cls(files['file'])
return file_
|
def upload(cls, file_obj, store=None):
"""Uploads a file and returns ``File`` instance.
Args:
- file_obj: file object to upload to
- store (Optional[bool]): Should the file be automatically stored
upon upload. Defaults to None.
- False - do not store file
- True - store file (can result in error if autostore
is disabled for project)
- None - use project settings
Returns:
``File`` instance
"""
if store is None:
store = 'auto'
elif store:
store = '1'
else:
store = '0'
data = {
'UPLOADCARE_STORE': store,
}
files = uploading_request('POST', 'base/', data=data,
files={'file': file_obj})
file_ = cls(files['file'])
return file_
|
[
"Uploads",
"a",
"file",
"and",
"returns",
"File",
"instance",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L361-L391
|
[
"def",
"upload",
"(",
"cls",
",",
"file_obj",
",",
"store",
"=",
"None",
")",
":",
"if",
"store",
"is",
"None",
":",
"store",
"=",
"'auto'",
"elif",
"store",
":",
"store",
"=",
"'1'",
"else",
":",
"store",
"=",
"'0'",
"data",
"=",
"{",
"'UPLOADCARE_STORE'",
":",
"store",
",",
"}",
"files",
"=",
"uploading_request",
"(",
"'POST'",
",",
"'base/'",
",",
"data",
"=",
"data",
",",
"files",
"=",
"{",
"'file'",
":",
"file_obj",
"}",
")",
"file_",
"=",
"cls",
"(",
"files",
"[",
"'file'",
"]",
")",
"return",
"file_"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
File.upload_from_url
|
Uploads file from given url and returns ``FileFromUrl`` instance.
Args:
- url (str): URL of file to upload to
- store (Optional[bool]): Should the file be automatically stored
upon upload. Defaults to None.
- False - do not store file
- True - store file (can result in error if autostore
is disabled for project)
- None - use project settings
- filename (Optional[str]): Name of the uploaded file. If this not
specified the filename will be obtained from response headers
or source URL. Defaults to None.
Returns:
``FileFromUrl`` instance
|
pyuploadcare/api_resources.py
|
def upload_from_url(cls, url, store=None, filename=None):
"""Uploads file from given url and returns ``FileFromUrl`` instance.
Args:
- url (str): URL of file to upload to
- store (Optional[bool]): Should the file be automatically stored
upon upload. Defaults to None.
- False - do not store file
- True - store file (can result in error if autostore
is disabled for project)
- None - use project settings
- filename (Optional[str]): Name of the uploaded file. If this not
specified the filename will be obtained from response headers
or source URL. Defaults to None.
Returns:
``FileFromUrl`` instance
"""
if store is None:
store = 'auto'
elif store:
store = '1'
else:
store = '0'
data = {
'source_url': url,
'store': store,
}
if filename:
data['filename'] = filename
result = uploading_request('POST', 'from_url/',
data=data)
if 'token' not in result:
raise APIError(
'could not find token in result: {0}'.format(result)
)
file_from_url = cls.FileFromUrl(result['token'])
return file_from_url
|
def upload_from_url(cls, url, store=None, filename=None):
"""Uploads file from given url and returns ``FileFromUrl`` instance.
Args:
- url (str): URL of file to upload to
- store (Optional[bool]): Should the file be automatically stored
upon upload. Defaults to None.
- False - do not store file
- True - store file (can result in error if autostore
is disabled for project)
- None - use project settings
- filename (Optional[str]): Name of the uploaded file. If this not
specified the filename will be obtained from response headers
or source URL. Defaults to None.
Returns:
``FileFromUrl`` instance
"""
if store is None:
store = 'auto'
elif store:
store = '1'
else:
store = '0'
data = {
'source_url': url,
'store': store,
}
if filename:
data['filename'] = filename
result = uploading_request('POST', 'from_url/',
data=data)
if 'token' not in result:
raise APIError(
'could not find token in result: {0}'.format(result)
)
file_from_url = cls.FileFromUrl(result['token'])
return file_from_url
|
[
"Uploads",
"file",
"from",
"given",
"url",
"and",
"returns",
"FileFromUrl",
"instance",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L394-L434
|
[
"def",
"upload_from_url",
"(",
"cls",
",",
"url",
",",
"store",
"=",
"None",
",",
"filename",
"=",
"None",
")",
":",
"if",
"store",
"is",
"None",
":",
"store",
"=",
"'auto'",
"elif",
"store",
":",
"store",
"=",
"'1'",
"else",
":",
"store",
"=",
"'0'",
"data",
"=",
"{",
"'source_url'",
":",
"url",
",",
"'store'",
":",
"store",
",",
"}",
"if",
"filename",
":",
"data",
"[",
"'filename'",
"]",
"=",
"filename",
"result",
"=",
"uploading_request",
"(",
"'POST'",
",",
"'from_url/'",
",",
"data",
"=",
"data",
")",
"if",
"'token'",
"not",
"in",
"result",
":",
"raise",
"APIError",
"(",
"'could not find token in result: {0}'",
".",
"format",
"(",
"result",
")",
")",
"file_from_url",
"=",
"cls",
".",
"FileFromUrl",
"(",
"result",
"[",
"'token'",
"]",
")",
"return",
"file_from_url"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
File.upload_from_url_sync
|
Uploads file from given url and returns ``File`` instance.
Args:
- url (str): URL of file to upload to
- store (Optional[bool]): Should the file be automatically stored
upon upload. Defaults to None.
- False - do not store file
- True - store file (can result in error if autostore
is disabled for project)
- None - use project settings
- filename (Optional[str]): Name of the uploaded file. If this not
specified the filename will be obtained from response headers
or source URL. Defaults to None.
- timeout (Optional[int]): seconds to wait for successful upload.
Defaults to 30.
- interval (Optional[float]): interval between upload status checks.
Defaults to 0.3.
- until_ready (Optional[bool]): should we wait until file is
available via CDN. Defaults to False.
Returns:
``File`` instance
Raises:
``TimeoutError`` if file wasn't uploaded in time
|
pyuploadcare/api_resources.py
|
def upload_from_url_sync(cls, url, timeout=30, interval=0.3,
until_ready=False, store=None, filename=None):
"""Uploads file from given url and returns ``File`` instance.
Args:
- url (str): URL of file to upload to
- store (Optional[bool]): Should the file be automatically stored
upon upload. Defaults to None.
- False - do not store file
- True - store file (can result in error if autostore
is disabled for project)
- None - use project settings
- filename (Optional[str]): Name of the uploaded file. If this not
specified the filename will be obtained from response headers
or source URL. Defaults to None.
- timeout (Optional[int]): seconds to wait for successful upload.
Defaults to 30.
- interval (Optional[float]): interval between upload status checks.
Defaults to 0.3.
- until_ready (Optional[bool]): should we wait until file is
available via CDN. Defaults to False.
Returns:
``File`` instance
Raises:
``TimeoutError`` if file wasn't uploaded in time
"""
ffu = cls.upload_from_url(url, store, filename)
return ffu.wait(timeout=timeout, interval=interval,
until_ready=until_ready)
|
def upload_from_url_sync(cls, url, timeout=30, interval=0.3,
until_ready=False, store=None, filename=None):
"""Uploads file from given url and returns ``File`` instance.
Args:
- url (str): URL of file to upload to
- store (Optional[bool]): Should the file be automatically stored
upon upload. Defaults to None.
- False - do not store file
- True - store file (can result in error if autostore
is disabled for project)
- None - use project settings
- filename (Optional[str]): Name of the uploaded file. If this not
specified the filename will be obtained from response headers
or source URL. Defaults to None.
- timeout (Optional[int]): seconds to wait for successful upload.
Defaults to 30.
- interval (Optional[float]): interval between upload status checks.
Defaults to 0.3.
- until_ready (Optional[bool]): should we wait until file is
available via CDN. Defaults to False.
Returns:
``File`` instance
Raises:
``TimeoutError`` if file wasn't uploaded in time
"""
ffu = cls.upload_from_url(url, store, filename)
return ffu.wait(timeout=timeout, interval=interval,
until_ready=until_ready)
|
[
"Uploads",
"file",
"from",
"given",
"url",
"and",
"returns",
"File",
"instance",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L437-L468
|
[
"def",
"upload_from_url_sync",
"(",
"cls",
",",
"url",
",",
"timeout",
"=",
"30",
",",
"interval",
"=",
"0.3",
",",
"until_ready",
"=",
"False",
",",
"store",
"=",
"None",
",",
"filename",
"=",
"None",
")",
":",
"ffu",
"=",
"cls",
".",
"upload_from_url",
"(",
"url",
",",
"store",
",",
"filename",
")",
"return",
"ffu",
".",
"wait",
"(",
"timeout",
"=",
"timeout",
",",
"interval",
"=",
"interval",
",",
"until_ready",
"=",
"until_ready",
")"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
FileGroup.file_cdn_urls
|
Returns CDN urls of all files from group without API requesting.
Usage example::
>>> file_group = FileGroup('0513dda0-582f-447d-846f-096e5df9e2bb~2')
>>> file_group.file_cdn_urls[0]
'https://ucarecdn.com/0513dda0-582f-447d-846f-096e5df9e2bb~2/nth/0/'
|
pyuploadcare/api_resources.py
|
def file_cdn_urls(self):
"""Returns CDN urls of all files from group without API requesting.
Usage example::
>>> file_group = FileGroup('0513dda0-582f-447d-846f-096e5df9e2bb~2')
>>> file_group.file_cdn_urls[0]
'https://ucarecdn.com/0513dda0-582f-447d-846f-096e5df9e2bb~2/nth/0/'
"""
file_cdn_urls = []
for file_index in six.moves.xrange(len(self)):
file_cdn_url = '{group_cdn_url}nth/{file_index}/'.format(
group_cdn_url=self.cdn_url,
file_index=file_index
)
file_cdn_urls.append(file_cdn_url)
return file_cdn_urls
|
def file_cdn_urls(self):
"""Returns CDN urls of all files from group without API requesting.
Usage example::
>>> file_group = FileGroup('0513dda0-582f-447d-846f-096e5df9e2bb~2')
>>> file_group.file_cdn_urls[0]
'https://ucarecdn.com/0513dda0-582f-447d-846f-096e5df9e2bb~2/nth/0/'
"""
file_cdn_urls = []
for file_index in six.moves.xrange(len(self)):
file_cdn_url = '{group_cdn_url}nth/{file_index}/'.format(
group_cdn_url=self.cdn_url,
file_index=file_index
)
file_cdn_urls.append(file_cdn_url)
return file_cdn_urls
|
[
"Returns",
"CDN",
"urls",
"of",
"all",
"files",
"from",
"group",
"without",
"API",
"requesting",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L659-L676
|
[
"def",
"file_cdn_urls",
"(",
"self",
")",
":",
"file_cdn_urls",
"=",
"[",
"]",
"for",
"file_index",
"in",
"six",
".",
"moves",
".",
"xrange",
"(",
"len",
"(",
"self",
")",
")",
":",
"file_cdn_url",
"=",
"'{group_cdn_url}nth/{file_index}/'",
".",
"format",
"(",
"group_cdn_url",
"=",
"self",
".",
"cdn_url",
",",
"file_index",
"=",
"file_index",
")",
"file_cdn_urls",
".",
"append",
"(",
"file_cdn_url",
")",
"return",
"file_cdn_urls"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
FileGroup.datetime_created
|
Returns file group's create aware *datetime* in UTC format.
|
pyuploadcare/api_resources.py
|
def datetime_created(self):
"""Returns file group's create aware *datetime* in UTC format."""
if self.info().get('datetime_created'):
return dateutil.parser.parse(self.info()['datetime_created'])
|
def datetime_created(self):
"""Returns file group's create aware *datetime* in UTC format."""
if self.info().get('datetime_created'):
return dateutil.parser.parse(self.info()['datetime_created'])
|
[
"Returns",
"file",
"group",
"s",
"create",
"aware",
"*",
"datetime",
"*",
"in",
"UTC",
"format",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L700-L703
|
[
"def",
"datetime_created",
"(",
"self",
")",
":",
"if",
"self",
".",
"info",
"(",
")",
".",
"get",
"(",
"'datetime_created'",
")",
":",
"return",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"self",
".",
"info",
"(",
")",
"[",
"'datetime_created'",
"]",
")"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
FileGroup.construct_from
|
Constructs ``FileGroup`` instance from group information.
|
pyuploadcare/api_resources.py
|
def construct_from(cls, group_info):
"""Constructs ``FileGroup`` instance from group information."""
group = cls(group_info['id'])
group._info_cache = group_info
return group
|
def construct_from(cls, group_info):
"""Constructs ``FileGroup`` instance from group information."""
group = cls(group_info['id'])
group._info_cache = group_info
return group
|
[
"Constructs",
"FileGroup",
"instance",
"from",
"group",
"information",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L725-L729
|
[
"def",
"construct_from",
"(",
"cls",
",",
"group_info",
")",
":",
"group",
"=",
"cls",
"(",
"group_info",
"[",
"'id'",
"]",
")",
"group",
".",
"_info_cache",
"=",
"group_info",
"return",
"group"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
FileGroup.create
|
Creates file group and returns ``FileGroup`` instance.
It expects iterable object that contains ``File`` instances, e.g.::
>>> file_1 = File('6c5e9526-b0fe-4739-8975-72e8d5ee6342')
>>> file_2 = File('a771f854-c2cb-408a-8c36-71af77811f3b')
>>> FileGroup.create((file_1, file_2))
<uploadcare.FileGroup 0513dda0-6666-447d-846f-096e5df9e2bb~2>
|
pyuploadcare/api_resources.py
|
def create(cls, files):
"""Creates file group and returns ``FileGroup`` instance.
It expects iterable object that contains ``File`` instances, e.g.::
>>> file_1 = File('6c5e9526-b0fe-4739-8975-72e8d5ee6342')
>>> file_2 = File('a771f854-c2cb-408a-8c36-71af77811f3b')
>>> FileGroup.create((file_1, file_2))
<uploadcare.FileGroup 0513dda0-6666-447d-846f-096e5df9e2bb~2>
"""
data = {}
for index, file_ in enumerate(files):
if isinstance(file_, File):
file_index = 'files[{index}]'.format(index=index)
data[file_index] = six.text_type(file_)
else:
raise InvalidParamError(
'all items have to be ``File`` instance'
)
if not data:
raise InvalidParamError('set of files is empty')
group_info = uploading_request('POST', 'group/', data=data)
group = cls.construct_from(group_info)
return group
|
def create(cls, files):
"""Creates file group and returns ``FileGroup`` instance.
It expects iterable object that contains ``File`` instances, e.g.::
>>> file_1 = File('6c5e9526-b0fe-4739-8975-72e8d5ee6342')
>>> file_2 = File('a771f854-c2cb-408a-8c36-71af77811f3b')
>>> FileGroup.create((file_1, file_2))
<uploadcare.FileGroup 0513dda0-6666-447d-846f-096e5df9e2bb~2>
"""
data = {}
for index, file_ in enumerate(files):
if isinstance(file_, File):
file_index = 'files[{index}]'.format(index=index)
data[file_index] = six.text_type(file_)
else:
raise InvalidParamError(
'all items have to be ``File`` instance'
)
if not data:
raise InvalidParamError('set of files is empty')
group_info = uploading_request('POST', 'group/', data=data)
group = cls.construct_from(group_info)
return group
|
[
"Creates",
"file",
"group",
"and",
"returns",
"FileGroup",
"instance",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L732-L758
|
[
"def",
"create",
"(",
"cls",
",",
"files",
")",
":",
"data",
"=",
"{",
"}",
"for",
"index",
",",
"file_",
"in",
"enumerate",
"(",
"files",
")",
":",
"if",
"isinstance",
"(",
"file_",
",",
"File",
")",
":",
"file_index",
"=",
"'files[{index}]'",
".",
"format",
"(",
"index",
"=",
"index",
")",
"data",
"[",
"file_index",
"]",
"=",
"six",
".",
"text_type",
"(",
"file_",
")",
"else",
":",
"raise",
"InvalidParamError",
"(",
"'all items have to be ``File`` instance'",
")",
"if",
"not",
"data",
":",
"raise",
"InvalidParamError",
"(",
"'set of files is empty'",
")",
"group_info",
"=",
"uploading_request",
"(",
"'POST'",
",",
"'group/'",
",",
"data",
"=",
"data",
")",
"group",
"=",
"cls",
".",
"construct_from",
"(",
"group_info",
")",
"return",
"group"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
FilesStorage._base_opration
|
Base method for storage operations.
|
pyuploadcare/api_resources.py
|
def _base_opration(self, method):
""" Base method for storage operations.
"""
uuids = self.uuids()
while True:
chunk = list(islice(uuids, 0, self.chunk_size))
if not chunk:
return
rest_request(method, self.storage_url, chunk)
|
def _base_opration(self, method):
""" Base method for storage operations.
"""
uuids = self.uuids()
while True:
chunk = list(islice(uuids, 0, self.chunk_size))
if not chunk:
return
rest_request(method, self.storage_url, chunk)
|
[
"Base",
"method",
"for",
"storage",
"operations",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L916-L927
|
[
"def",
"_base_opration",
"(",
"self",
",",
"method",
")",
":",
"uuids",
"=",
"self",
".",
"uuids",
"(",
")",
"while",
"True",
":",
"chunk",
"=",
"list",
"(",
"islice",
"(",
"uuids",
",",
"0",
",",
"self",
".",
"chunk_size",
")",
")",
"if",
"not",
"chunk",
":",
"return",
"rest_request",
"(",
"method",
",",
"self",
".",
"storage_url",
",",
"chunk",
")"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
FilesStorage.uuids
|
Extract uuid from each item of specified ``seq``.
|
pyuploadcare/api_resources.py
|
def uuids(self):
""" Extract uuid from each item of specified ``seq``.
"""
for f in self._seq:
if isinstance(f, File):
yield f.uuid
elif isinstance(f, six.string_types):
yield f
else:
raise ValueError(
'Invalid type for sequence item: {0}'.format(type(f)))
|
def uuids(self):
""" Extract uuid from each item of specified ``seq``.
"""
for f in self._seq:
if isinstance(f, File):
yield f.uuid
elif isinstance(f, six.string_types):
yield f
else:
raise ValueError(
'Invalid type for sequence item: {0}'.format(type(f)))
|
[
"Extract",
"uuid",
"from",
"each",
"item",
"of",
"specified",
"seq",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api_resources.py#L929-L939
|
[
"def",
"uuids",
"(",
"self",
")",
":",
"for",
"f",
"in",
"self",
".",
"_seq",
":",
"if",
"isinstance",
"(",
"f",
",",
"File",
")",
":",
"yield",
"f",
".",
"uuid",
"elif",
"isinstance",
"(",
"f",
",",
"six",
".",
"string_types",
")",
":",
"yield",
"f",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid type for sequence item: {0}'",
".",
"format",
"(",
"type",
"(",
"f",
")",
")",
")"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
_list
|
A common function for building methods of the "list showing".
|
pyuploadcare/ucare_cli/__init__.py
|
def _list(api_list_class, arg_namespace, **extra):
""" A common function for building methods of the "list showing".
"""
if arg_namespace.starting_point:
ordering_field = (arg_namespace.ordering or '').lstrip('-')
if ordering_field in ('', 'datetime_uploaded', 'datetime_created'):
arg_namespace.starting_point = parser.parse(
arg_namespace.starting_point)
items = api_list_class(
starting_point=arg_namespace.starting_point,
ordering=arg_namespace.ordering,
limit=arg_namespace.limit,
request_limit=arg_namespace.request_limit,
**extra
)
items.constructor = lambda x: x
try:
pprint(list(items))
except ValueError as e:
print(e)
|
def _list(api_list_class, arg_namespace, **extra):
""" A common function for building methods of the "list showing".
"""
if arg_namespace.starting_point:
ordering_field = (arg_namespace.ordering or '').lstrip('-')
if ordering_field in ('', 'datetime_uploaded', 'datetime_created'):
arg_namespace.starting_point = parser.parse(
arg_namespace.starting_point)
items = api_list_class(
starting_point=arg_namespace.starting_point,
ordering=arg_namespace.ordering,
limit=arg_namespace.limit,
request_limit=arg_namespace.request_limit,
**extra
)
items.constructor = lambda x: x
try:
pprint(list(items))
except ValueError as e:
print(e)
|
[
"A",
"common",
"function",
"for",
"building",
"methods",
"of",
"the",
"list",
"showing",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/ucare_cli/__init__.py#L38-L59
|
[
"def",
"_list",
"(",
"api_list_class",
",",
"arg_namespace",
",",
"*",
"*",
"extra",
")",
":",
"if",
"arg_namespace",
".",
"starting_point",
":",
"ordering_field",
"=",
"(",
"arg_namespace",
".",
"ordering",
"or",
"''",
")",
".",
"lstrip",
"(",
"'-'",
")",
"if",
"ordering_field",
"in",
"(",
"''",
",",
"'datetime_uploaded'",
",",
"'datetime_created'",
")",
":",
"arg_namespace",
".",
"starting_point",
"=",
"parser",
".",
"parse",
"(",
"arg_namespace",
".",
"starting_point",
")",
"items",
"=",
"api_list_class",
"(",
"starting_point",
"=",
"arg_namespace",
".",
"starting_point",
",",
"ordering",
"=",
"arg_namespace",
".",
"ordering",
",",
"limit",
"=",
"arg_namespace",
".",
"limit",
",",
"request_limit",
"=",
"arg_namespace",
".",
"request_limit",
",",
"*",
"*",
"extra",
")",
"items",
".",
"constructor",
"=",
"lambda",
"x",
":",
"x",
"try",
":",
"pprint",
"(",
"list",
"(",
"items",
")",
")",
"except",
"ValueError",
"as",
"e",
":",
"print",
"(",
"e",
")"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
bar
|
Iterates over the "iter_content" and draws a progress bar to stdout.
|
pyuploadcare/ucare_cli/utils.py
|
def bar(iter_content, parts, title=''):
""" Iterates over the "iter_content" and draws a progress bar to stdout.
"""
parts = max(float(parts), 1.0)
cells = 10
progress = 0
step = cells / parts
draw = lambda progress: sys.stdout.write(
'\r[{0:10}] {1:.2f}% {2}'.format(
'#'*int(progress), progress * cells, title))
for chunk in iter_content:
yield chunk
progress += step
draw(progress)
sys.stdout.flush()
draw(cells)
print('')
|
def bar(iter_content, parts, title=''):
""" Iterates over the "iter_content" and draws a progress bar to stdout.
"""
parts = max(float(parts), 1.0)
cells = 10
progress = 0
step = cells / parts
draw = lambda progress: sys.stdout.write(
'\r[{0:10}] {1:.2f}% {2}'.format(
'#'*int(progress), progress * cells, title))
for chunk in iter_content:
yield chunk
progress += step
draw(progress)
sys.stdout.flush()
draw(cells)
print('')
|
[
"Iterates",
"over",
"the",
"iter_content",
"and",
"draws",
"a",
"progress",
"bar",
"to",
"stdout",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/ucare_cli/utils.py#L26-L46
|
[
"def",
"bar",
"(",
"iter_content",
",",
"parts",
",",
"title",
"=",
"''",
")",
":",
"parts",
"=",
"max",
"(",
"float",
"(",
"parts",
")",
",",
"1.0",
")",
"cells",
"=",
"10",
"progress",
"=",
"0",
"step",
"=",
"cells",
"/",
"parts",
"draw",
"=",
"lambda",
"progress",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\r[{0:10}] {1:.2f}% {2}'",
".",
"format",
"(",
"'#'",
"*",
"int",
"(",
"progress",
")",
",",
"progress",
"*",
"cells",
",",
"title",
")",
")",
"for",
"chunk",
"in",
"iter_content",
":",
"yield",
"chunk",
"progress",
"+=",
"step",
"draw",
"(",
"progress",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"draw",
"(",
"cells",
")",
"print",
"(",
"''",
")"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
rest_request
|
Makes REST API request and returns response as ``dict``.
It provides auth headers as well and takes settings from ``conf`` module.
Make sure that given ``path`` does not contain leading slash.
Usage example::
>>> rest_request('GET', 'files/?limit=10')
{
'next': 'https://api.uploadcare.com/files/?limit=10&page=2',
'total': 1241,
'page': 1,
'pages': 125,
'per_page': 10,
'previous': None,
'results': [
# ...
{
# ...
'uuid': 1921953c-5d94-4e47-ba36-c2e1dd165e1a,
# ...
},
# ...
]
}
|
pyuploadcare/api.py
|
def rest_request(verb, path, data=None, timeout=conf.DEFAULT,
retry_throttled=conf.DEFAULT):
"""Makes REST API request and returns response as ``dict``.
It provides auth headers as well and takes settings from ``conf`` module.
Make sure that given ``path`` does not contain leading slash.
Usage example::
>>> rest_request('GET', 'files/?limit=10')
{
'next': 'https://api.uploadcare.com/files/?limit=10&page=2',
'total': 1241,
'page': 1,
'pages': 125,
'per_page': 10,
'previous': None,
'results': [
# ...
{
# ...
'uuid': 1921953c-5d94-4e47-ba36-c2e1dd165e1a,
# ...
},
# ...
]
}
"""
if retry_throttled is conf.DEFAULT:
retry_throttled = conf.retry_throttled
path = path.lstrip('/')
url = urljoin(conf.api_base, path)
url_parts = urlsplit(url)
if url_parts.query:
path = url_parts.path + '?' + url_parts.query
else:
path = url_parts.path
content = ''
if data is not None:
content = json.dumps(data)
content_type = 'application/json'
content_md5 = hashlib.md5(content.encode('utf-8')).hexdigest()
def _request():
date = email.utils.formatdate(usegmt=True)
sign_string = '\n'.join([
verb,
content_md5,
content_type,
date,
path,
])
sign_string_as_bytes = sign_string.encode('utf-8')
try:
secret_as_bytes = conf.secret.encode('utf-8')
except AttributeError:
secret_as_bytes = bytes()
sign = hmac.new(secret_as_bytes, sign_string_as_bytes, hashlib.sha1) \
.hexdigest()
headers = {
'Authorization': 'Uploadcare {0}:{1}'.format(conf.pub_key, sign),
'Date': date,
'Content-Type': content_type,
'Accept': 'application/vnd.uploadcare-v{0}+json'.format(
conf.api_version),
'User-Agent': _build_user_agent(),
}
logger.debug('''sent:
verb: {0}
path: {1}
headers: {2}
data: {3}'''.format(verb, path, headers, content))
try:
response = session.request(verb, url, allow_redirects=True,
verify=conf.verify_api_ssl,
headers=headers, data=content,
timeout=_get_timeout(timeout))
except requests.RequestException as exc:
raise APIConnectionError(exc.args[0])
logger.debug(
'got: {0} {1}'.format(response.status_code, response.text)
)
if 'warning' in response.headers:
match = re.search('"(.+)"', response.headers['warning'])
if match:
for warning in match.group(1).split('; '):
logger.warn('API Warning: {0}'.format(warning))
# No content.
if response.status_code == 204:
return {}
if verb.lower() == 'options':
return ''
if 200 <= response.status_code < 300:
if _content_type_from_response(response).endswith(('/json', '+json')):
if verb.lower() == 'head':
return ''
try:
return response.json()
except ValueError as exc:
raise APIError(exc.args[0])
if response.status_code in (401, 403):
raise AuthenticationError(response.content)
if response.status_code in (400, 404):
raise InvalidRequestError(response.content)
if response.status_code == 429:
raise ThrottledRequestError(response)
# Not json or unknown status code.
raise APIError(response.content)
while True:
try:
return _request()
except ThrottledRequestError as e:
if retry_throttled:
logger.debug('Throttled, retry in {0} seconds'.format(e.wait))
time.sleep(e.wait)
retry_throttled -= 1
continue
else:
raise
|
def rest_request(verb, path, data=None, timeout=conf.DEFAULT,
retry_throttled=conf.DEFAULT):
"""Makes REST API request and returns response as ``dict``.
It provides auth headers as well and takes settings from ``conf`` module.
Make sure that given ``path`` does not contain leading slash.
Usage example::
>>> rest_request('GET', 'files/?limit=10')
{
'next': 'https://api.uploadcare.com/files/?limit=10&page=2',
'total': 1241,
'page': 1,
'pages': 125,
'per_page': 10,
'previous': None,
'results': [
# ...
{
# ...
'uuid': 1921953c-5d94-4e47-ba36-c2e1dd165e1a,
# ...
},
# ...
]
}
"""
if retry_throttled is conf.DEFAULT:
retry_throttled = conf.retry_throttled
path = path.lstrip('/')
url = urljoin(conf.api_base, path)
url_parts = urlsplit(url)
if url_parts.query:
path = url_parts.path + '?' + url_parts.query
else:
path = url_parts.path
content = ''
if data is not None:
content = json.dumps(data)
content_type = 'application/json'
content_md5 = hashlib.md5(content.encode('utf-8')).hexdigest()
def _request():
date = email.utils.formatdate(usegmt=True)
sign_string = '\n'.join([
verb,
content_md5,
content_type,
date,
path,
])
sign_string_as_bytes = sign_string.encode('utf-8')
try:
secret_as_bytes = conf.secret.encode('utf-8')
except AttributeError:
secret_as_bytes = bytes()
sign = hmac.new(secret_as_bytes, sign_string_as_bytes, hashlib.sha1) \
.hexdigest()
headers = {
'Authorization': 'Uploadcare {0}:{1}'.format(conf.pub_key, sign),
'Date': date,
'Content-Type': content_type,
'Accept': 'application/vnd.uploadcare-v{0}+json'.format(
conf.api_version),
'User-Agent': _build_user_agent(),
}
logger.debug('''sent:
verb: {0}
path: {1}
headers: {2}
data: {3}'''.format(verb, path, headers, content))
try:
response = session.request(verb, url, allow_redirects=True,
verify=conf.verify_api_ssl,
headers=headers, data=content,
timeout=_get_timeout(timeout))
except requests.RequestException as exc:
raise APIConnectionError(exc.args[0])
logger.debug(
'got: {0} {1}'.format(response.status_code, response.text)
)
if 'warning' in response.headers:
match = re.search('"(.+)"', response.headers['warning'])
if match:
for warning in match.group(1).split('; '):
logger.warn('API Warning: {0}'.format(warning))
# No content.
if response.status_code == 204:
return {}
if verb.lower() == 'options':
return ''
if 200 <= response.status_code < 300:
if _content_type_from_response(response).endswith(('/json', '+json')):
if verb.lower() == 'head':
return ''
try:
return response.json()
except ValueError as exc:
raise APIError(exc.args[0])
if response.status_code in (401, 403):
raise AuthenticationError(response.content)
if response.status_code in (400, 404):
raise InvalidRequestError(response.content)
if response.status_code == 429:
raise ThrottledRequestError(response)
# Not json or unknown status code.
raise APIError(response.content)
while True:
try:
return _request()
except ThrottledRequestError as e:
if retry_throttled:
logger.debug('Throttled, retry in {0} seconds'.format(e.wait))
time.sleep(e.wait)
retry_throttled -= 1
continue
else:
raise
|
[
"Makes",
"REST",
"API",
"request",
"and",
"returns",
"response",
"as",
"dict",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api.py#L68-L204
|
[
"def",
"rest_request",
"(",
"verb",
",",
"path",
",",
"data",
"=",
"None",
",",
"timeout",
"=",
"conf",
".",
"DEFAULT",
",",
"retry_throttled",
"=",
"conf",
".",
"DEFAULT",
")",
":",
"if",
"retry_throttled",
"is",
"conf",
".",
"DEFAULT",
":",
"retry_throttled",
"=",
"conf",
".",
"retry_throttled",
"path",
"=",
"path",
".",
"lstrip",
"(",
"'/'",
")",
"url",
"=",
"urljoin",
"(",
"conf",
".",
"api_base",
",",
"path",
")",
"url_parts",
"=",
"urlsplit",
"(",
"url",
")",
"if",
"url_parts",
".",
"query",
":",
"path",
"=",
"url_parts",
".",
"path",
"+",
"'?'",
"+",
"url_parts",
".",
"query",
"else",
":",
"path",
"=",
"url_parts",
".",
"path",
"content",
"=",
"''",
"if",
"data",
"is",
"not",
"None",
":",
"content",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"content_type",
"=",
"'application/json'",
"content_md5",
"=",
"hashlib",
".",
"md5",
"(",
"content",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"def",
"_request",
"(",
")",
":",
"date",
"=",
"email",
".",
"utils",
".",
"formatdate",
"(",
"usegmt",
"=",
"True",
")",
"sign_string",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"verb",
",",
"content_md5",
",",
"content_type",
",",
"date",
",",
"path",
",",
"]",
")",
"sign_string_as_bytes",
"=",
"sign_string",
".",
"encode",
"(",
"'utf-8'",
")",
"try",
":",
"secret_as_bytes",
"=",
"conf",
".",
"secret",
".",
"encode",
"(",
"'utf-8'",
")",
"except",
"AttributeError",
":",
"secret_as_bytes",
"=",
"bytes",
"(",
")",
"sign",
"=",
"hmac",
".",
"new",
"(",
"secret_as_bytes",
",",
"sign_string_as_bytes",
",",
"hashlib",
".",
"sha1",
")",
".",
"hexdigest",
"(",
")",
"headers",
"=",
"{",
"'Authorization'",
":",
"'Uploadcare {0}:{1}'",
".",
"format",
"(",
"conf",
".",
"pub_key",
",",
"sign",
")",
",",
"'Date'",
":",
"date",
",",
"'Content-Type'",
":",
"content_type",
",",
"'Accept'",
":",
"'application/vnd.uploadcare-v{0}+json'",
".",
"format",
"(",
"conf",
".",
"api_version",
")",
",",
"'User-Agent'",
":",
"_build_user_agent",
"(",
")",
",",
"}",
"logger",
".",
"debug",
"(",
"'''sent:\n verb: {0}\n path: {1}\n headers: {2}\n data: {3}'''",
".",
"format",
"(",
"verb",
",",
"path",
",",
"headers",
",",
"content",
")",
")",
"try",
":",
"response",
"=",
"session",
".",
"request",
"(",
"verb",
",",
"url",
",",
"allow_redirects",
"=",
"True",
",",
"verify",
"=",
"conf",
".",
"verify_api_ssl",
",",
"headers",
"=",
"headers",
",",
"data",
"=",
"content",
",",
"timeout",
"=",
"_get_timeout",
"(",
"timeout",
")",
")",
"except",
"requests",
".",
"RequestException",
"as",
"exc",
":",
"raise",
"APIConnectionError",
"(",
"exc",
".",
"args",
"[",
"0",
"]",
")",
"logger",
".",
"debug",
"(",
"'got: {0} {1}'",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
")",
"if",
"'warning'",
"in",
"response",
".",
"headers",
":",
"match",
"=",
"re",
".",
"search",
"(",
"'\"(.+)\"'",
",",
"response",
".",
"headers",
"[",
"'warning'",
"]",
")",
"if",
"match",
":",
"for",
"warning",
"in",
"match",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
"'; '",
")",
":",
"logger",
".",
"warn",
"(",
"'API Warning: {0}'",
".",
"format",
"(",
"warning",
")",
")",
"# No content.",
"if",
"response",
".",
"status_code",
"==",
"204",
":",
"return",
"{",
"}",
"if",
"verb",
".",
"lower",
"(",
")",
"==",
"'options'",
":",
"return",
"''",
"if",
"200",
"<=",
"response",
".",
"status_code",
"<",
"300",
":",
"if",
"_content_type_from_response",
"(",
"response",
")",
".",
"endswith",
"(",
"(",
"'/json'",
",",
"'+json'",
")",
")",
":",
"if",
"verb",
".",
"lower",
"(",
")",
"==",
"'head'",
":",
"return",
"''",
"try",
":",
"return",
"response",
".",
"json",
"(",
")",
"except",
"ValueError",
"as",
"exc",
":",
"raise",
"APIError",
"(",
"exc",
".",
"args",
"[",
"0",
"]",
")",
"if",
"response",
".",
"status_code",
"in",
"(",
"401",
",",
"403",
")",
":",
"raise",
"AuthenticationError",
"(",
"response",
".",
"content",
")",
"if",
"response",
".",
"status_code",
"in",
"(",
"400",
",",
"404",
")",
":",
"raise",
"InvalidRequestError",
"(",
"response",
".",
"content",
")",
"if",
"response",
".",
"status_code",
"==",
"429",
":",
"raise",
"ThrottledRequestError",
"(",
"response",
")",
"# Not json or unknown status code.",
"raise",
"APIError",
"(",
"response",
".",
"content",
")",
"while",
"True",
":",
"try",
":",
"return",
"_request",
"(",
")",
"except",
"ThrottledRequestError",
"as",
"e",
":",
"if",
"retry_throttled",
":",
"logger",
".",
"debug",
"(",
"'Throttled, retry in {0} seconds'",
".",
"format",
"(",
"e",
".",
"wait",
")",
")",
"time",
".",
"sleep",
"(",
"e",
".",
"wait",
")",
"retry_throttled",
"-=",
"1",
"continue",
"else",
":",
"raise"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
uploading_request
|
Makes Uploading API request and returns response as ``dict``.
It takes settings from ``conf`` module.
Make sure that given ``path`` does not contain leading slash.
Usage example::
>>> file_obj = open('photo.jpg', 'rb')
>>> uploading_request('POST', 'base/', files={'file': file_obj})
{
'file': '9b9f4483-77b8-40ae-a198-272ba6280004'
}
>>> File('9b9f4483-77b8-40ae-a198-272ba6280004')
|
pyuploadcare/api.py
|
def uploading_request(verb, path, data=None, files=None, timeout=conf.DEFAULT):
"""Makes Uploading API request and returns response as ``dict``.
It takes settings from ``conf`` module.
Make sure that given ``path`` does not contain leading slash.
Usage example::
>>> file_obj = open('photo.jpg', 'rb')
>>> uploading_request('POST', 'base/', files={'file': file_obj})
{
'file': '9b9f4483-77b8-40ae-a198-272ba6280004'
}
>>> File('9b9f4483-77b8-40ae-a198-272ba6280004')
"""
path = path.lstrip('/')
url = urljoin(conf.upload_base, path)
if data is None:
data = {}
data['pub_key'] = conf.pub_key
data['UPLOADCARE_PUB_KEY'] = conf.pub_key
headers = {
'User-Agent': _build_user_agent(),
}
try:
response = session.request(
str(verb), url, allow_redirects=True,
verify=conf.verify_upload_ssl, data=data, files=files,
headers=headers, timeout=_get_timeout(timeout),
)
except requests.RequestException as exc:
raise APIConnectionError(exc.args[0])
# No content.
if response.status_code == 204:
return {}
if 200 <= response.status_code < 300:
if _content_type_from_response(response).endswith(('/json', '+json')):
try:
return response.json()
except ValueError as exc:
raise APIError(exc.args[0])
if response.status_code in (400, 404):
raise InvalidRequestError(response.content)
# Not json or unknown status code.
raise APIError(response.content)
|
def uploading_request(verb, path, data=None, files=None, timeout=conf.DEFAULT):
"""Makes Uploading API request and returns response as ``dict``.
It takes settings from ``conf`` module.
Make sure that given ``path`` does not contain leading slash.
Usage example::
>>> file_obj = open('photo.jpg', 'rb')
>>> uploading_request('POST', 'base/', files={'file': file_obj})
{
'file': '9b9f4483-77b8-40ae-a198-272ba6280004'
}
>>> File('9b9f4483-77b8-40ae-a198-272ba6280004')
"""
path = path.lstrip('/')
url = urljoin(conf.upload_base, path)
if data is None:
data = {}
data['pub_key'] = conf.pub_key
data['UPLOADCARE_PUB_KEY'] = conf.pub_key
headers = {
'User-Agent': _build_user_agent(),
}
try:
response = session.request(
str(verb), url, allow_redirects=True,
verify=conf.verify_upload_ssl, data=data, files=files,
headers=headers, timeout=_get_timeout(timeout),
)
except requests.RequestException as exc:
raise APIConnectionError(exc.args[0])
# No content.
if response.status_code == 204:
return {}
if 200 <= response.status_code < 300:
if _content_type_from_response(response).endswith(('/json', '+json')):
try:
return response.json()
except ValueError as exc:
raise APIError(exc.args[0])
if response.status_code in (400, 404):
raise InvalidRequestError(response.content)
# Not json or unknown status code.
raise APIError(response.content)
|
[
"Makes",
"Uploading",
"API",
"request",
"and",
"returns",
"response",
"as",
"dict",
"."
] |
uploadcare/pyuploadcare
|
python
|
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/api.py#L207-L260
|
[
"def",
"uploading_request",
"(",
"verb",
",",
"path",
",",
"data",
"=",
"None",
",",
"files",
"=",
"None",
",",
"timeout",
"=",
"conf",
".",
"DEFAULT",
")",
":",
"path",
"=",
"path",
".",
"lstrip",
"(",
"'/'",
")",
"url",
"=",
"urljoin",
"(",
"conf",
".",
"upload_base",
",",
"path",
")",
"if",
"data",
"is",
"None",
":",
"data",
"=",
"{",
"}",
"data",
"[",
"'pub_key'",
"]",
"=",
"conf",
".",
"pub_key",
"data",
"[",
"'UPLOADCARE_PUB_KEY'",
"]",
"=",
"conf",
".",
"pub_key",
"headers",
"=",
"{",
"'User-Agent'",
":",
"_build_user_agent",
"(",
")",
",",
"}",
"try",
":",
"response",
"=",
"session",
".",
"request",
"(",
"str",
"(",
"verb",
")",
",",
"url",
",",
"allow_redirects",
"=",
"True",
",",
"verify",
"=",
"conf",
".",
"verify_upload_ssl",
",",
"data",
"=",
"data",
",",
"files",
"=",
"files",
",",
"headers",
"=",
"headers",
",",
"timeout",
"=",
"_get_timeout",
"(",
"timeout",
")",
",",
")",
"except",
"requests",
".",
"RequestException",
"as",
"exc",
":",
"raise",
"APIConnectionError",
"(",
"exc",
".",
"args",
"[",
"0",
"]",
")",
"# No content.",
"if",
"response",
".",
"status_code",
"==",
"204",
":",
"return",
"{",
"}",
"if",
"200",
"<=",
"response",
".",
"status_code",
"<",
"300",
":",
"if",
"_content_type_from_response",
"(",
"response",
")",
".",
"endswith",
"(",
"(",
"'/json'",
",",
"'+json'",
")",
")",
":",
"try",
":",
"return",
"response",
".",
"json",
"(",
")",
"except",
"ValueError",
"as",
"exc",
":",
"raise",
"APIError",
"(",
"exc",
".",
"args",
"[",
"0",
"]",
")",
"if",
"response",
".",
"status_code",
"in",
"(",
"400",
",",
"404",
")",
":",
"raise",
"InvalidRequestError",
"(",
"response",
".",
"content",
")",
"# Not json or unknown status code.",
"raise",
"APIError",
"(",
"response",
".",
"content",
")"
] |
cefddc0306133a71e37b18e8700df5948ef49b37
|
test
|
Api.home_mode_set_state
|
Set the state of Home Mode
|
synology/api.py
|
def home_mode_set_state(self, state, **kwargs):
"""Set the state of Home Mode"""
# It appears that surveillance station needs lowercase text
# true/false for the on switch
if state not in (HOME_MODE_ON, HOME_MODE_OFF):
raise ValueError('Invalid home mode state')
api = self._api_info['home_mode']
payload = dict({
'api': api['name'],
'method': 'Switch',
'version': api['version'],
'on': state,
'_sid': self._sid,
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
if response['success']:
return True
return False
|
def home_mode_set_state(self, state, **kwargs):
"""Set the state of Home Mode"""
# It appears that surveillance station needs lowercase text
# true/false for the on switch
if state not in (HOME_MODE_ON, HOME_MODE_OFF):
raise ValueError('Invalid home mode state')
api = self._api_info['home_mode']
payload = dict({
'api': api['name'],
'method': 'Switch',
'version': api['version'],
'on': state,
'_sid': self._sid,
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
if response['success']:
return True
return False
|
[
"Set",
"the",
"state",
"of",
"Home",
"Mode"
] |
snjoetw/py-synology
|
python
|
https://github.com/snjoetw/py-synology/blob/4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f/synology/api.py#L99-L120
|
[
"def",
"home_mode_set_state",
"(",
"self",
",",
"state",
",",
"*",
"*",
"kwargs",
")",
":",
"# It appears that surveillance station needs lowercase text",
"# true/false for the on switch",
"if",
"state",
"not",
"in",
"(",
"HOME_MODE_ON",
",",
"HOME_MODE_OFF",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid home mode state'",
")",
"api",
"=",
"self",
".",
"_api_info",
"[",
"'home_mode'",
"]",
"payload",
"=",
"dict",
"(",
"{",
"'api'",
":",
"api",
"[",
"'name'",
"]",
",",
"'method'",
":",
"'Switch'",
",",
"'version'",
":",
"api",
"[",
"'version'",
"]",
",",
"'on'",
":",
"state",
",",
"'_sid'",
":",
"self",
".",
"_sid",
",",
"}",
",",
"*",
"*",
"kwargs",
")",
"response",
"=",
"self",
".",
"_get_json_with_retry",
"(",
"api",
"[",
"'url'",
"]",
",",
"payload",
")",
"if",
"response",
"[",
"'success'",
"]",
":",
"return",
"True",
"return",
"False"
] |
4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f
|
test
|
Api.home_mode_status
|
Returns the status of Home Mode
|
synology/api.py
|
def home_mode_status(self, **kwargs):
"""Returns the status of Home Mode"""
api = self._api_info['home_mode']
payload = dict({
'api': api['name'],
'method': 'GetInfo',
'version': api['version'],
'_sid': self._sid
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
return response['data']['on']
|
def home_mode_status(self, **kwargs):
"""Returns the status of Home Mode"""
api = self._api_info['home_mode']
payload = dict({
'api': api['name'],
'method': 'GetInfo',
'version': api['version'],
'_sid': self._sid
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
return response['data']['on']
|
[
"Returns",
"the",
"status",
"of",
"Home",
"Mode"
] |
snjoetw/py-synology
|
python
|
https://github.com/snjoetw/py-synology/blob/4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f/synology/api.py#L122-L133
|
[
"def",
"home_mode_status",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"api",
"=",
"self",
".",
"_api_info",
"[",
"'home_mode'",
"]",
"payload",
"=",
"dict",
"(",
"{",
"'api'",
":",
"api",
"[",
"'name'",
"]",
",",
"'method'",
":",
"'GetInfo'",
",",
"'version'",
":",
"api",
"[",
"'version'",
"]",
",",
"'_sid'",
":",
"self",
".",
"_sid",
"}",
",",
"*",
"*",
"kwargs",
")",
"response",
"=",
"self",
".",
"_get_json_with_retry",
"(",
"api",
"[",
"'url'",
"]",
",",
"payload",
")",
"return",
"response",
"[",
"'data'",
"]",
"[",
"'on'",
"]"
] |
4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f
|
test
|
Api.camera_list
|
Return a list of cameras.
|
synology/api.py
|
def camera_list(self, **kwargs):
"""Return a list of cameras."""
api = self._api_info['camera']
payload = dict({
'_sid': self._sid,
'api': api['name'],
'method': 'List',
'version': api['version'],
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
cameras = []
for data in response['data']['cameras']:
cameras.append(Camera(data, self._video_stream_url))
return cameras
|
def camera_list(self, **kwargs):
"""Return a list of cameras."""
api = self._api_info['camera']
payload = dict({
'_sid': self._sid,
'api': api['name'],
'method': 'List',
'version': api['version'],
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
cameras = []
for data in response['data']['cameras']:
cameras.append(Camera(data, self._video_stream_url))
return cameras
|
[
"Return",
"a",
"list",
"of",
"cameras",
"."
] |
snjoetw/py-synology
|
python
|
https://github.com/snjoetw/py-synology/blob/4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f/synology/api.py#L135-L151
|
[
"def",
"camera_list",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"api",
"=",
"self",
".",
"_api_info",
"[",
"'camera'",
"]",
"payload",
"=",
"dict",
"(",
"{",
"'_sid'",
":",
"self",
".",
"_sid",
",",
"'api'",
":",
"api",
"[",
"'name'",
"]",
",",
"'method'",
":",
"'List'",
",",
"'version'",
":",
"api",
"[",
"'version'",
"]",
",",
"}",
",",
"*",
"*",
"kwargs",
")",
"response",
"=",
"self",
".",
"_get_json_with_retry",
"(",
"api",
"[",
"'url'",
"]",
",",
"payload",
")",
"cameras",
"=",
"[",
"]",
"for",
"data",
"in",
"response",
"[",
"'data'",
"]",
"[",
"'cameras'",
"]",
":",
"cameras",
".",
"append",
"(",
"Camera",
"(",
"data",
",",
"self",
".",
"_video_stream_url",
")",
")",
"return",
"cameras"
] |
4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f
|
test
|
Api.camera_info
|
Return a list of cameras matching camera_ids.
|
synology/api.py
|
def camera_info(self, camera_ids, **kwargs):
"""Return a list of cameras matching camera_ids."""
api = self._api_info['camera']
payload = dict({
'_sid': self._sid,
'api': api['name'],
'method': 'GetInfo',
'version': api['version'],
'cameraIds': ', '.join(str(id) for id in camera_ids),
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
cameras = []
for data in response['data']['cameras']:
cameras.append(Camera(data, self._video_stream_url))
return cameras
|
def camera_info(self, camera_ids, **kwargs):
"""Return a list of cameras matching camera_ids."""
api = self._api_info['camera']
payload = dict({
'_sid': self._sid,
'api': api['name'],
'method': 'GetInfo',
'version': api['version'],
'cameraIds': ', '.join(str(id) for id in camera_ids),
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
cameras = []
for data in response['data']['cameras']:
cameras.append(Camera(data, self._video_stream_url))
return cameras
|
[
"Return",
"a",
"list",
"of",
"cameras",
"matching",
"camera_ids",
"."
] |
snjoetw/py-synology
|
python
|
https://github.com/snjoetw/py-synology/blob/4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f/synology/api.py#L153-L170
|
[
"def",
"camera_info",
"(",
"self",
",",
"camera_ids",
",",
"*",
"*",
"kwargs",
")",
":",
"api",
"=",
"self",
".",
"_api_info",
"[",
"'camera'",
"]",
"payload",
"=",
"dict",
"(",
"{",
"'_sid'",
":",
"self",
".",
"_sid",
",",
"'api'",
":",
"api",
"[",
"'name'",
"]",
",",
"'method'",
":",
"'GetInfo'",
",",
"'version'",
":",
"api",
"[",
"'version'",
"]",
",",
"'cameraIds'",
":",
"', '",
".",
"join",
"(",
"str",
"(",
"id",
")",
"for",
"id",
"in",
"camera_ids",
")",
",",
"}",
",",
"*",
"*",
"kwargs",
")",
"response",
"=",
"self",
".",
"_get_json_with_retry",
"(",
"api",
"[",
"'url'",
"]",
",",
"payload",
")",
"cameras",
"=",
"[",
"]",
"for",
"data",
"in",
"response",
"[",
"'data'",
"]",
"[",
"'cameras'",
"]",
":",
"cameras",
".",
"append",
"(",
"Camera",
"(",
"data",
",",
"self",
".",
"_video_stream_url",
")",
")",
"return",
"cameras"
] |
4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f
|
test
|
Api.camera_snapshot
|
Return bytes of camera image.
|
synology/api.py
|
def camera_snapshot(self, camera_id, **kwargs):
"""Return bytes of camera image."""
api = self._api_info['camera']
payload = dict({
'_sid': self._sid,
'api': api['name'],
'method': 'GetSnapshot',
'version': api['version'],
'cameraId': camera_id,
}, **kwargs)
response = self._get(api['url'], payload)
return response.content
|
def camera_snapshot(self, camera_id, **kwargs):
"""Return bytes of camera image."""
api = self._api_info['camera']
payload = dict({
'_sid': self._sid,
'api': api['name'],
'method': 'GetSnapshot',
'version': api['version'],
'cameraId': camera_id,
}, **kwargs)
response = self._get(api['url'], payload)
return response.content
|
[
"Return",
"bytes",
"of",
"camera",
"image",
"."
] |
snjoetw/py-synology
|
python
|
https://github.com/snjoetw/py-synology/blob/4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f/synology/api.py#L172-L184
|
[
"def",
"camera_snapshot",
"(",
"self",
",",
"camera_id",
",",
"*",
"*",
"kwargs",
")",
":",
"api",
"=",
"self",
".",
"_api_info",
"[",
"'camera'",
"]",
"payload",
"=",
"dict",
"(",
"{",
"'_sid'",
":",
"self",
".",
"_sid",
",",
"'api'",
":",
"api",
"[",
"'name'",
"]",
",",
"'method'",
":",
"'GetSnapshot'",
",",
"'version'",
":",
"api",
"[",
"'version'",
"]",
",",
"'cameraId'",
":",
"camera_id",
",",
"}",
",",
"*",
"*",
"kwargs",
")",
"response",
"=",
"self",
".",
"_get",
"(",
"api",
"[",
"'url'",
"]",
",",
"payload",
")",
"return",
"response",
".",
"content"
] |
4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f
|
test
|
Api.camera_disable
|
Disable camera.
|
synology/api.py
|
def camera_disable(self, camera_id, **kwargs):
"""Disable camera."""
api = self._api_info['camera']
payload = dict({
'_sid': self._sid,
'api': api['name'],
'method': 'Disable',
'version': 9,
'idList': camera_id,
}, **kwargs)
print(api['url'])
print(payload)
response = self._get(api['url'], payload)
return response['success']
|
def camera_disable(self, camera_id, **kwargs):
"""Disable camera."""
api = self._api_info['camera']
payload = dict({
'_sid': self._sid,
'api': api['name'],
'method': 'Disable',
'version': 9,
'idList': camera_id,
}, **kwargs)
print(api['url'])
print(payload)
response = self._get(api['url'], payload)
return response['success']
|
[
"Disable",
"camera",
"."
] |
snjoetw/py-synology
|
python
|
https://github.com/snjoetw/py-synology/blob/4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f/synology/api.py#L186-L200
|
[
"def",
"camera_disable",
"(",
"self",
",",
"camera_id",
",",
"*",
"*",
"kwargs",
")",
":",
"api",
"=",
"self",
".",
"_api_info",
"[",
"'camera'",
"]",
"payload",
"=",
"dict",
"(",
"{",
"'_sid'",
":",
"self",
".",
"_sid",
",",
"'api'",
":",
"api",
"[",
"'name'",
"]",
",",
"'method'",
":",
"'Disable'",
",",
"'version'",
":",
"9",
",",
"'idList'",
":",
"camera_id",
",",
"}",
",",
"*",
"*",
"kwargs",
")",
"print",
"(",
"api",
"[",
"'url'",
"]",
")",
"print",
"(",
"payload",
")",
"response",
"=",
"self",
".",
"_get",
"(",
"api",
"[",
"'url'",
"]",
",",
"payload",
")",
"return",
"response",
"[",
"'success'",
"]"
] |
4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f
|
test
|
Api.camera_event_motion_enum
|
Return motion settings matching camera_id.
|
synology/api.py
|
def camera_event_motion_enum(self, camera_id, **kwargs):
"""Return motion settings matching camera_id."""
api = self._api_info['camera_event']
payload = dict({
'_sid': self._sid,
'api': api['name'],
'method': 'MotionEnum',
'version': api['version'],
'camId': camera_id,
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
return MotionSetting(camera_id, response['data']['MDParam'])
|
def camera_event_motion_enum(self, camera_id, **kwargs):
"""Return motion settings matching camera_id."""
api = self._api_info['camera_event']
payload = dict({
'_sid': self._sid,
'api': api['name'],
'method': 'MotionEnum',
'version': api['version'],
'camId': camera_id,
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
return MotionSetting(camera_id, response['data']['MDParam'])
|
[
"Return",
"motion",
"settings",
"matching",
"camera_id",
"."
] |
snjoetw/py-synology
|
python
|
https://github.com/snjoetw/py-synology/blob/4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f/synology/api.py#L216-L228
|
[
"def",
"camera_event_motion_enum",
"(",
"self",
",",
"camera_id",
",",
"*",
"*",
"kwargs",
")",
":",
"api",
"=",
"self",
".",
"_api_info",
"[",
"'camera_event'",
"]",
"payload",
"=",
"dict",
"(",
"{",
"'_sid'",
":",
"self",
".",
"_sid",
",",
"'api'",
":",
"api",
"[",
"'name'",
"]",
",",
"'method'",
":",
"'MotionEnum'",
",",
"'version'",
":",
"api",
"[",
"'version'",
"]",
",",
"'camId'",
":",
"camera_id",
",",
"}",
",",
"*",
"*",
"kwargs",
")",
"response",
"=",
"self",
".",
"_get_json_with_retry",
"(",
"api",
"[",
"'url'",
"]",
",",
"payload",
")",
"return",
"MotionSetting",
"(",
"camera_id",
",",
"response",
"[",
"'data'",
"]",
"[",
"'MDParam'",
"]",
")"
] |
4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f
|
test
|
Api.camera_event_md_param_save
|
Update motion settings matching camera_id with keyword args.
|
synology/api.py
|
def camera_event_md_param_save(self, camera_id, **kwargs):
"""Update motion settings matching camera_id with keyword args."""
api = self._api_info['camera_event']
payload = dict({
'_sid': self._sid,
'api': api['name'],
'method': 'MDParamSave',
'version': api['version'],
'camId': camera_id,
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
return response['data']['camId']
|
def camera_event_md_param_save(self, camera_id, **kwargs):
"""Update motion settings matching camera_id with keyword args."""
api = self._api_info['camera_event']
payload = dict({
'_sid': self._sid,
'api': api['name'],
'method': 'MDParamSave',
'version': api['version'],
'camId': camera_id,
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
return response['data']['camId']
|
[
"Update",
"motion",
"settings",
"matching",
"camera_id",
"with",
"keyword",
"args",
"."
] |
snjoetw/py-synology
|
python
|
https://github.com/snjoetw/py-synology/blob/4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f/synology/api.py#L230-L242
|
[
"def",
"camera_event_md_param_save",
"(",
"self",
",",
"camera_id",
",",
"*",
"*",
"kwargs",
")",
":",
"api",
"=",
"self",
".",
"_api_info",
"[",
"'camera_event'",
"]",
"payload",
"=",
"dict",
"(",
"{",
"'_sid'",
":",
"self",
".",
"_sid",
",",
"'api'",
":",
"api",
"[",
"'name'",
"]",
",",
"'method'",
":",
"'MDParamSave'",
",",
"'version'",
":",
"api",
"[",
"'version'",
"]",
",",
"'camId'",
":",
"camera_id",
",",
"}",
",",
"*",
"*",
"kwargs",
")",
"response",
"=",
"self",
".",
"_get_json_with_retry",
"(",
"api",
"[",
"'url'",
"]",
",",
"payload",
")",
"return",
"response",
"[",
"'data'",
"]",
"[",
"'camId'",
"]"
] |
4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f
|
test
|
SurveillanceStation.update
|
Update cameras and motion settings with latest from API.
|
synology/surveillance_station.py
|
def update(self):
"""Update cameras and motion settings with latest from API."""
cameras = self._api.camera_list()
self._cameras_by_id = {v.camera_id: v for i, v in enumerate(cameras)}
motion_settings = []
for camera_id in self._cameras_by_id.keys():
motion_setting = self._api.camera_event_motion_enum(camera_id)
motion_settings.append(motion_setting)
self._motion_settings_by_id = {
v.camera_id: v for i, v in enumerate(motion_settings)}
|
def update(self):
"""Update cameras and motion settings with latest from API."""
cameras = self._api.camera_list()
self._cameras_by_id = {v.camera_id: v for i, v in enumerate(cameras)}
motion_settings = []
for camera_id in self._cameras_by_id.keys():
motion_setting = self._api.camera_event_motion_enum(camera_id)
motion_settings.append(motion_setting)
self._motion_settings_by_id = {
v.camera_id: v for i, v in enumerate(motion_settings)}
|
[
"Update",
"cameras",
"and",
"motion",
"settings",
"with",
"latest",
"from",
"API",
"."
] |
snjoetw/py-synology
|
python
|
https://github.com/snjoetw/py-synology/blob/4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f/synology/surveillance_station.py#L19-L30
|
[
"def",
"update",
"(",
"self",
")",
":",
"cameras",
"=",
"self",
".",
"_api",
".",
"camera_list",
"(",
")",
"self",
".",
"_cameras_by_id",
"=",
"{",
"v",
".",
"camera_id",
":",
"v",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"cameras",
")",
"}",
"motion_settings",
"=",
"[",
"]",
"for",
"camera_id",
"in",
"self",
".",
"_cameras_by_id",
".",
"keys",
"(",
")",
":",
"motion_setting",
"=",
"self",
".",
"_api",
".",
"camera_event_motion_enum",
"(",
"camera_id",
")",
"motion_settings",
".",
"append",
"(",
"motion_setting",
")",
"self",
".",
"_motion_settings_by_id",
"=",
"{",
"v",
".",
"camera_id",
":",
"v",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"motion_settings",
")",
"}"
] |
4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f
|
test
|
SurveillanceStation.set_home_mode
|
Set the state of Home Mode
|
synology/surveillance_station.py
|
def set_home_mode(self, state):
"""Set the state of Home Mode"""
state_parameter = HOME_MODE_OFF
if state:
state_parameter = HOME_MODE_ON
return self._api.home_mode_set_state(state_parameter)
|
def set_home_mode(self, state):
"""Set the state of Home Mode"""
state_parameter = HOME_MODE_OFF
if state:
state_parameter = HOME_MODE_ON
return self._api.home_mode_set_state(state_parameter)
|
[
"Set",
"the",
"state",
"of",
"Home",
"Mode"
] |
snjoetw/py-synology
|
python
|
https://github.com/snjoetw/py-synology/blob/4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f/synology/surveillance_station.py#L68-L73
|
[
"def",
"set_home_mode",
"(",
"self",
",",
"state",
")",
":",
"state_parameter",
"=",
"HOME_MODE_OFF",
"if",
"state",
":",
"state_parameter",
"=",
"HOME_MODE_ON",
"return",
"self",
".",
"_api",
".",
"home_mode_set_state",
"(",
"state_parameter",
")"
] |
4f7eb0a3a9f86c24ad65993802e6fb11fbaa1f7f
|
test
|
replace_ext
|
>>> replace_ext('one/two/three.four.doc', '.html')
'one/two/three.four.html'
>>> replace_ext('one/two/three.four.DOC', '.html')
'one/two/three.four.html'
>>> replace_ext('one/two/three.four.DOC', 'html')
'one/two/three.four.html'
|
docx2html/core.py
|
def replace_ext(file_path, new_ext):
"""
>>> replace_ext('one/two/three.four.doc', '.html')
'one/two/three.four.html'
>>> replace_ext('one/two/three.four.DOC', '.html')
'one/two/three.four.html'
>>> replace_ext('one/two/three.four.DOC', 'html')
'one/two/three.four.html'
"""
if not new_ext.startswith(os.extsep):
new_ext = os.extsep + new_ext
index = file_path.rfind(os.extsep)
return file_path[:index] + new_ext
|
def replace_ext(file_path, new_ext):
"""
>>> replace_ext('one/two/three.four.doc', '.html')
'one/two/three.four.html'
>>> replace_ext('one/two/three.four.DOC', '.html')
'one/two/three.four.html'
>>> replace_ext('one/two/three.four.DOC', 'html')
'one/two/three.four.html'
"""
if not new_ext.startswith(os.extsep):
new_ext = os.extsep + new_ext
index = file_path.rfind(os.extsep)
return file_path[:index] + new_ext
|
[
">>>",
"replace_ext",
"(",
"one",
"/",
"two",
"/",
"three",
".",
"four",
".",
"doc",
".",
"html",
")",
"one",
"/",
"two",
"/",
"three",
".",
"four",
".",
"html",
">>>",
"replace_ext",
"(",
"one",
"/",
"two",
"/",
"three",
".",
"four",
".",
"DOC",
".",
"html",
")",
"one",
"/",
"two",
"/",
"three",
".",
"four",
".",
"html",
">>>",
"replace_ext",
"(",
"one",
"/",
"two",
"/",
"three",
".",
"four",
".",
"DOC",
"html",
")",
"one",
"/",
"two",
"/",
"three",
".",
"four",
".",
"html"
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L34-L46
|
[
"def",
"replace_ext",
"(",
"file_path",
",",
"new_ext",
")",
":",
"if",
"not",
"new_ext",
".",
"startswith",
"(",
"os",
".",
"extsep",
")",
":",
"new_ext",
"=",
"os",
".",
"extsep",
"+",
"new_ext",
"index",
"=",
"file_path",
".",
"rfind",
"(",
"os",
".",
"extsep",
")",
"return",
"file_path",
"[",
":",
"index",
"]",
"+",
"new_ext"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
is_last_li
|
Determine if ``li`` is the last list item for a given list
|
docx2html/core.py
|
def is_last_li(li, meta_data, current_numId):
"""
Determine if ``li`` is the last list item for a given list
"""
if not is_li(li, meta_data):
return False
w_namespace = get_namespace(li, 'w')
next_el = li
while True:
# If we run out of element this must be the last list item
if next_el is None:
return True
next_el = next_el.getnext()
# Ignore elements that are not a list item
if not is_li(next_el, meta_data):
continue
new_numId = get_numId(next_el, w_namespace)
if current_numId != new_numId:
return True
# If we have gotten here then we have found another list item in the
# current list, so ``li`` is not the last li in the list.
return False
|
def is_last_li(li, meta_data, current_numId):
"""
Determine if ``li`` is the last list item for a given list
"""
if not is_li(li, meta_data):
return False
w_namespace = get_namespace(li, 'w')
next_el = li
while True:
# If we run out of element this must be the last list item
if next_el is None:
return True
next_el = next_el.getnext()
# Ignore elements that are not a list item
if not is_li(next_el, meta_data):
continue
new_numId = get_numId(next_el, w_namespace)
if current_numId != new_numId:
return True
# If we have gotten here then we have found another list item in the
# current list, so ``li`` is not the last li in the list.
return False
|
[
"Determine",
"if",
"li",
"is",
"the",
"last",
"list",
"item",
"for",
"a",
"given",
"list"
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L266-L289
|
[
"def",
"is_last_li",
"(",
"li",
",",
"meta_data",
",",
"current_numId",
")",
":",
"if",
"not",
"is_li",
"(",
"li",
",",
"meta_data",
")",
":",
"return",
"False",
"w_namespace",
"=",
"get_namespace",
"(",
"li",
",",
"'w'",
")",
"next_el",
"=",
"li",
"while",
"True",
":",
"# If we run out of element this must be the last list item",
"if",
"next_el",
"is",
"None",
":",
"return",
"True",
"next_el",
"=",
"next_el",
".",
"getnext",
"(",
")",
"# Ignore elements that are not a list item",
"if",
"not",
"is_li",
"(",
"next_el",
",",
"meta_data",
")",
":",
"continue",
"new_numId",
"=",
"get_numId",
"(",
"next_el",
",",
"w_namespace",
")",
"if",
"current_numId",
"!=",
"new_numId",
":",
"return",
"True",
"# If we have gotten here then we have found another list item in the",
"# current list, so ``li`` is not the last li in the list.",
"return",
"False"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
get_single_list_nodes_data
|
Find consecutive li tags that have content that have the same list id.
|
docx2html/core.py
|
def get_single_list_nodes_data(li, meta_data):
"""
Find consecutive li tags that have content that have the same list id.
"""
yield li
w_namespace = get_namespace(li, 'w')
current_numId = get_numId(li, w_namespace)
starting_ilvl = get_ilvl(li, w_namespace)
el = li
while True:
el = el.getnext()
if el is None:
break
# If the tag has no content ignore it.
if not has_text(el):
continue
# Stop the lists if you come across a list item that should be a
# heading.
if _is_top_level_upper_roman(el, meta_data):
break
if (
is_li(el, meta_data) and
(starting_ilvl > get_ilvl(el, w_namespace))):
break
new_numId = get_numId(el, w_namespace)
if new_numId is None or new_numId == -1:
# Not a p tag or a list item
yield el
continue
# If the list id of the next tag is different that the previous that
# means a new list being made (not nested)
if current_numId != new_numId:
# Not a subsequent list.
break
if is_last_li(el, meta_data, current_numId):
yield el
break
yield el
|
def get_single_list_nodes_data(li, meta_data):
"""
Find consecutive li tags that have content that have the same list id.
"""
yield li
w_namespace = get_namespace(li, 'w')
current_numId = get_numId(li, w_namespace)
starting_ilvl = get_ilvl(li, w_namespace)
el = li
while True:
el = el.getnext()
if el is None:
break
# If the tag has no content ignore it.
if not has_text(el):
continue
# Stop the lists if you come across a list item that should be a
# heading.
if _is_top_level_upper_roman(el, meta_data):
break
if (
is_li(el, meta_data) and
(starting_ilvl > get_ilvl(el, w_namespace))):
break
new_numId = get_numId(el, w_namespace)
if new_numId is None or new_numId == -1:
# Not a p tag or a list item
yield el
continue
# If the list id of the next tag is different that the previous that
# means a new list being made (not nested)
if current_numId != new_numId:
# Not a subsequent list.
break
if is_last_li(el, meta_data, current_numId):
yield el
break
yield el
|
[
"Find",
"consecutive",
"li",
"tags",
"that",
"have",
"content",
"that",
"have",
"the",
"same",
"list",
"id",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L293-L333
|
[
"def",
"get_single_list_nodes_data",
"(",
"li",
",",
"meta_data",
")",
":",
"yield",
"li",
"w_namespace",
"=",
"get_namespace",
"(",
"li",
",",
"'w'",
")",
"current_numId",
"=",
"get_numId",
"(",
"li",
",",
"w_namespace",
")",
"starting_ilvl",
"=",
"get_ilvl",
"(",
"li",
",",
"w_namespace",
")",
"el",
"=",
"li",
"while",
"True",
":",
"el",
"=",
"el",
".",
"getnext",
"(",
")",
"if",
"el",
"is",
"None",
":",
"break",
"# If the tag has no content ignore it.",
"if",
"not",
"has_text",
"(",
"el",
")",
":",
"continue",
"# Stop the lists if you come across a list item that should be a",
"# heading.",
"if",
"_is_top_level_upper_roman",
"(",
"el",
",",
"meta_data",
")",
":",
"break",
"if",
"(",
"is_li",
"(",
"el",
",",
"meta_data",
")",
"and",
"(",
"starting_ilvl",
">",
"get_ilvl",
"(",
"el",
",",
"w_namespace",
")",
")",
")",
":",
"break",
"new_numId",
"=",
"get_numId",
"(",
"el",
",",
"w_namespace",
")",
"if",
"new_numId",
"is",
"None",
"or",
"new_numId",
"==",
"-",
"1",
":",
"# Not a p tag or a list item",
"yield",
"el",
"continue",
"# If the list id of the next tag is different that the previous that",
"# means a new list being made (not nested)",
"if",
"current_numId",
"!=",
"new_numId",
":",
"# Not a subsequent list.",
"break",
"if",
"is_last_li",
"(",
"el",
",",
"meta_data",
",",
"current_numId",
")",
":",
"yield",
"el",
"break",
"yield",
"el"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
get_ilvl
|
The ilvl on an li tag tells the li tag at what level of indentation this
tag is at. This is used to determine if the li tag needs to be nested or
not.
|
docx2html/core.py
|
def get_ilvl(li, w_namespace):
"""
The ilvl on an li tag tells the li tag at what level of indentation this
tag is at. This is used to determine if the li tag needs to be nested or
not.
"""
ilvls = li.xpath('.//w:ilvl', namespaces=li.nsmap)
if len(ilvls) == 0:
return -1
return int(ilvls[0].get('%sval' % w_namespace))
|
def get_ilvl(li, w_namespace):
"""
The ilvl on an li tag tells the li tag at what level of indentation this
tag is at. This is used to determine if the li tag needs to be nested or
not.
"""
ilvls = li.xpath('.//w:ilvl', namespaces=li.nsmap)
if len(ilvls) == 0:
return -1
return int(ilvls[0].get('%sval' % w_namespace))
|
[
"The",
"ilvl",
"on",
"an",
"li",
"tag",
"tells",
"the",
"li",
"tag",
"at",
"what",
"level",
"of",
"indentation",
"this",
"tag",
"is",
"at",
".",
"This",
"is",
"used",
"to",
"determine",
"if",
"the",
"li",
"tag",
"needs",
"to",
"be",
"nested",
"or",
"not",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L337-L346
|
[
"def",
"get_ilvl",
"(",
"li",
",",
"w_namespace",
")",
":",
"ilvls",
"=",
"li",
".",
"xpath",
"(",
"'.//w:ilvl'",
",",
"namespaces",
"=",
"li",
".",
"nsmap",
")",
"if",
"len",
"(",
"ilvls",
")",
"==",
"0",
":",
"return",
"-",
"1",
"return",
"int",
"(",
"ilvls",
"[",
"0",
"]",
".",
"get",
"(",
"'%sval'",
"%",
"w_namespace",
")",
")"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
get_numId
|
The numId on an li tag maps to the numbering dictionary along side the ilvl
to determine what the list should look like (unordered, digits, lower
alpha, etc)
|
docx2html/core.py
|
def get_numId(li, w_namespace):
"""
The numId on an li tag maps to the numbering dictionary along side the ilvl
to determine what the list should look like (unordered, digits, lower
alpha, etc)
"""
numIds = li.xpath('.//w:numId', namespaces=li.nsmap)
if len(numIds) == 0:
return -1
return numIds[0].get('%sval' % w_namespace)
|
def get_numId(li, w_namespace):
"""
The numId on an li tag maps to the numbering dictionary along side the ilvl
to determine what the list should look like (unordered, digits, lower
alpha, etc)
"""
numIds = li.xpath('.//w:numId', namespaces=li.nsmap)
if len(numIds) == 0:
return -1
return numIds[0].get('%sval' % w_namespace)
|
[
"The",
"numId",
"on",
"an",
"li",
"tag",
"maps",
"to",
"the",
"numbering",
"dictionary",
"along",
"side",
"the",
"ilvl",
"to",
"determine",
"what",
"the",
"list",
"should",
"look",
"like",
"(",
"unordered",
"digits",
"lower",
"alpha",
"etc",
")"
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L350-L359
|
[
"def",
"get_numId",
"(",
"li",
",",
"w_namespace",
")",
":",
"numIds",
"=",
"li",
".",
"xpath",
"(",
"'.//w:numId'",
",",
"namespaces",
"=",
"li",
".",
"nsmap",
")",
"if",
"len",
"(",
"numIds",
")",
"==",
"0",
":",
"return",
"-",
"1",
"return",
"numIds",
"[",
"0",
"]",
".",
"get",
"(",
"'%sval'",
"%",
"w_namespace",
")"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
create_list
|
Based on the passed in list_type create a list objects (ol/ul). In the
future this function will also deal with what the numbering of an ordered
list should look like.
|
docx2html/core.py
|
def create_list(list_type):
"""
Based on the passed in list_type create a list objects (ol/ul). In the
future this function will also deal with what the numbering of an ordered
list should look like.
"""
list_types = {
'bullet': 'ul',
}
el = etree.Element(list_types.get(list_type, 'ol'))
# These are the supported list style types and their conversion to css.
list_type_conversions = {
'decimal': DEFAULT_LIST_NUMBERING_STYLE,
'decimalZero': 'decimal-leading-zero',
'upperRoman': 'upper-roman',
'lowerRoman': 'lower-roman',
'upperLetter': 'upper-alpha',
'lowerLetter': 'lower-alpha',
'ordinal': DEFAULT_LIST_NUMBERING_STYLE,
'cardinalText': DEFAULT_LIST_NUMBERING_STYLE,
'ordinalText': DEFAULT_LIST_NUMBERING_STYLE,
}
if list_type != 'bullet':
el.set(
'data-list-type',
list_type_conversions.get(list_type, DEFAULT_LIST_NUMBERING_STYLE),
)
return el
|
def create_list(list_type):
"""
Based on the passed in list_type create a list objects (ol/ul). In the
future this function will also deal with what the numbering of an ordered
list should look like.
"""
list_types = {
'bullet': 'ul',
}
el = etree.Element(list_types.get(list_type, 'ol'))
# These are the supported list style types and their conversion to css.
list_type_conversions = {
'decimal': DEFAULT_LIST_NUMBERING_STYLE,
'decimalZero': 'decimal-leading-zero',
'upperRoman': 'upper-roman',
'lowerRoman': 'lower-roman',
'upperLetter': 'upper-alpha',
'lowerLetter': 'lower-alpha',
'ordinal': DEFAULT_LIST_NUMBERING_STYLE,
'cardinalText': DEFAULT_LIST_NUMBERING_STYLE,
'ordinalText': DEFAULT_LIST_NUMBERING_STYLE,
}
if list_type != 'bullet':
el.set(
'data-list-type',
list_type_conversions.get(list_type, DEFAULT_LIST_NUMBERING_STYLE),
)
return el
|
[
"Based",
"on",
"the",
"passed",
"in",
"list_type",
"create",
"a",
"list",
"objects",
"(",
"ol",
"/",
"ul",
")",
".",
"In",
"the",
"future",
"this",
"function",
"will",
"also",
"deal",
"with",
"what",
"the",
"numbering",
"of",
"an",
"ordered",
"list",
"should",
"look",
"like",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L362-L389
|
[
"def",
"create_list",
"(",
"list_type",
")",
":",
"list_types",
"=",
"{",
"'bullet'",
":",
"'ul'",
",",
"}",
"el",
"=",
"etree",
".",
"Element",
"(",
"list_types",
".",
"get",
"(",
"list_type",
",",
"'ol'",
")",
")",
"# These are the supported list style types and their conversion to css.",
"list_type_conversions",
"=",
"{",
"'decimal'",
":",
"DEFAULT_LIST_NUMBERING_STYLE",
",",
"'decimalZero'",
":",
"'decimal-leading-zero'",
",",
"'upperRoman'",
":",
"'upper-roman'",
",",
"'lowerRoman'",
":",
"'lower-roman'",
",",
"'upperLetter'",
":",
"'upper-alpha'",
",",
"'lowerLetter'",
":",
"'lower-alpha'",
",",
"'ordinal'",
":",
"DEFAULT_LIST_NUMBERING_STYLE",
",",
"'cardinalText'",
":",
"DEFAULT_LIST_NUMBERING_STYLE",
",",
"'ordinalText'",
":",
"DEFAULT_LIST_NUMBERING_STYLE",
",",
"}",
"if",
"list_type",
"!=",
"'bullet'",
":",
"el",
".",
"set",
"(",
"'data-list-type'",
",",
"list_type_conversions",
".",
"get",
"(",
"list_type",
",",
"DEFAULT_LIST_NUMBERING_STYLE",
")",
",",
")",
"return",
"el"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
get_v_merge
|
vMerge is what docx uses to denote that a table cell is part of a rowspan.
The first cell to have a vMerge is the start of the rowspan, and the vMerge
will be denoted with 'restart'. If it is anything other than restart then
it is a continuation of another rowspan.
|
docx2html/core.py
|
def get_v_merge(tc):
"""
vMerge is what docx uses to denote that a table cell is part of a rowspan.
The first cell to have a vMerge is the start of the rowspan, and the vMerge
will be denoted with 'restart'. If it is anything other than restart then
it is a continuation of another rowspan.
"""
if tc is None:
return None
v_merges = tc.xpath('.//w:vMerge', namespaces=tc.nsmap)
if len(v_merges) != 1:
return None
v_merge = v_merges[0]
return v_merge
|
def get_v_merge(tc):
"""
vMerge is what docx uses to denote that a table cell is part of a rowspan.
The first cell to have a vMerge is the start of the rowspan, and the vMerge
will be denoted with 'restart'. If it is anything other than restart then
it is a continuation of another rowspan.
"""
if tc is None:
return None
v_merges = tc.xpath('.//w:vMerge', namespaces=tc.nsmap)
if len(v_merges) != 1:
return None
v_merge = v_merges[0]
return v_merge
|
[
"vMerge",
"is",
"what",
"docx",
"uses",
"to",
"denote",
"that",
"a",
"table",
"cell",
"is",
"part",
"of",
"a",
"rowspan",
".",
"The",
"first",
"cell",
"to",
"have",
"a",
"vMerge",
"is",
"the",
"start",
"of",
"the",
"rowspan",
"and",
"the",
"vMerge",
"will",
"be",
"denoted",
"with",
"restart",
".",
"If",
"it",
"is",
"anything",
"other",
"than",
"restart",
"then",
"it",
"is",
"a",
"continuation",
"of",
"another",
"rowspan",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L393-L406
|
[
"def",
"get_v_merge",
"(",
"tc",
")",
":",
"if",
"tc",
"is",
"None",
":",
"return",
"None",
"v_merges",
"=",
"tc",
".",
"xpath",
"(",
"'.//w:vMerge'",
",",
"namespaces",
"=",
"tc",
".",
"nsmap",
")",
"if",
"len",
"(",
"v_merges",
")",
"!=",
"1",
":",
"return",
"None",
"v_merge",
"=",
"v_merges",
"[",
"0",
"]",
"return",
"v_merge"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
get_grid_span
|
gridSpan is what docx uses to denote that a table cell has a colspan. This
is much more simple than rowspans in that there is a one-to-one mapping
from gridSpan to colspan.
|
docx2html/core.py
|
def get_grid_span(tc):
"""
gridSpan is what docx uses to denote that a table cell has a colspan. This
is much more simple than rowspans in that there is a one-to-one mapping
from gridSpan to colspan.
"""
w_namespace = get_namespace(tc, 'w')
grid_spans = tc.xpath('.//w:gridSpan', namespaces=tc.nsmap)
if len(grid_spans) != 1:
return 1
grid_span = grid_spans[0]
return int(grid_span.get('%sval' % w_namespace))
|
def get_grid_span(tc):
"""
gridSpan is what docx uses to denote that a table cell has a colspan. This
is much more simple than rowspans in that there is a one-to-one mapping
from gridSpan to colspan.
"""
w_namespace = get_namespace(tc, 'w')
grid_spans = tc.xpath('.//w:gridSpan', namespaces=tc.nsmap)
if len(grid_spans) != 1:
return 1
grid_span = grid_spans[0]
return int(grid_span.get('%sval' % w_namespace))
|
[
"gridSpan",
"is",
"what",
"docx",
"uses",
"to",
"denote",
"that",
"a",
"table",
"cell",
"has",
"a",
"colspan",
".",
"This",
"is",
"much",
"more",
"simple",
"than",
"rowspans",
"in",
"that",
"there",
"is",
"a",
"one",
"-",
"to",
"-",
"one",
"mapping",
"from",
"gridSpan",
"to",
"colspan",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L410-L421
|
[
"def",
"get_grid_span",
"(",
"tc",
")",
":",
"w_namespace",
"=",
"get_namespace",
"(",
"tc",
",",
"'w'",
")",
"grid_spans",
"=",
"tc",
".",
"xpath",
"(",
"'.//w:gridSpan'",
",",
"namespaces",
"=",
"tc",
".",
"nsmap",
")",
"if",
"len",
"(",
"grid_spans",
")",
"!=",
"1",
":",
"return",
"1",
"grid_span",
"=",
"grid_spans",
"[",
"0",
"]",
"return",
"int",
"(",
"grid_span",
".",
"get",
"(",
"'%sval'",
"%",
"w_namespace",
")",
")"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
get_td_at_index
|
When calculating the rowspan for a given cell it is required to find all
table cells 'below' the initial cell with a v_merge. This function will
return the td element at the passed in index, taking into account colspans.
|
docx2html/core.py
|
def get_td_at_index(tr, index):
"""
When calculating the rowspan for a given cell it is required to find all
table cells 'below' the initial cell with a v_merge. This function will
return the td element at the passed in index, taking into account colspans.
"""
current = 0
for td in tr.xpath('.//w:tc', namespaces=tr.nsmap):
if index == current:
return td
current += get_grid_span(td)
|
def get_td_at_index(tr, index):
"""
When calculating the rowspan for a given cell it is required to find all
table cells 'below' the initial cell with a v_merge. This function will
return the td element at the passed in index, taking into account colspans.
"""
current = 0
for td in tr.xpath('.//w:tc', namespaces=tr.nsmap):
if index == current:
return td
current += get_grid_span(td)
|
[
"When",
"calculating",
"the",
"rowspan",
"for",
"a",
"given",
"cell",
"it",
"is",
"required",
"to",
"find",
"all",
"table",
"cells",
"below",
"the",
"initial",
"cell",
"with",
"a",
"v_merge",
".",
"This",
"function",
"will",
"return",
"the",
"td",
"element",
"at",
"the",
"passed",
"in",
"index",
"taking",
"into",
"account",
"colspans",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L425-L435
|
[
"def",
"get_td_at_index",
"(",
"tr",
",",
"index",
")",
":",
"current",
"=",
"0",
"for",
"td",
"in",
"tr",
".",
"xpath",
"(",
"'.//w:tc'",
",",
"namespaces",
"=",
"tr",
".",
"nsmap",
")",
":",
"if",
"index",
"==",
"current",
":",
"return",
"td",
"current",
"+=",
"get_grid_span",
"(",
"td",
")"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
style_is_false
|
For bold, italics and underline. Simply checking to see if the various tags
are present will not suffice. If the tag is present and set to False then
the style should not be present.
|
docx2html/core.py
|
def style_is_false(style):
"""
For bold, italics and underline. Simply checking to see if the various tags
are present will not suffice. If the tag is present and set to False then
the style should not be present.
"""
if style is None:
return False
w_namespace = get_namespace(style, 'w')
return style.get('%sval' % w_namespace) != 'false'
|
def style_is_false(style):
"""
For bold, italics and underline. Simply checking to see if the various tags
are present will not suffice. If the tag is present and set to False then
the style should not be present.
"""
if style is None:
return False
w_namespace = get_namespace(style, 'w')
return style.get('%sval' % w_namespace) != 'false'
|
[
"For",
"bold",
"italics",
"and",
"underline",
".",
"Simply",
"checking",
"to",
"see",
"if",
"the",
"various",
"tags",
"are",
"present",
"will",
"not",
"suffice",
".",
"If",
"the",
"tag",
"is",
"present",
"and",
"set",
"to",
"False",
"then",
"the",
"style",
"should",
"not",
"be",
"present",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L494-L503
|
[
"def",
"style_is_false",
"(",
"style",
")",
":",
"if",
"style",
"is",
"None",
":",
"return",
"False",
"w_namespace",
"=",
"get_namespace",
"(",
"style",
",",
"'w'",
")",
"return",
"style",
".",
"get",
"(",
"'%sval'",
"%",
"w_namespace",
")",
"!=",
"'false'"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
is_bold
|
The function will return True if the r tag passed in is considered bold.
|
docx2html/core.py
|
def is_bold(r):
"""
The function will return True if the r tag passed in is considered bold.
"""
w_namespace = get_namespace(r, 'w')
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return False
bold = rpr.find('%sb' % w_namespace)
return style_is_false(bold)
|
def is_bold(r):
"""
The function will return True if the r tag passed in is considered bold.
"""
w_namespace = get_namespace(r, 'w')
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return False
bold = rpr.find('%sb' % w_namespace)
return style_is_false(bold)
|
[
"The",
"function",
"will",
"return",
"True",
"if",
"the",
"r",
"tag",
"passed",
"in",
"is",
"considered",
"bold",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L507-L516
|
[
"def",
"is_bold",
"(",
"r",
")",
":",
"w_namespace",
"=",
"get_namespace",
"(",
"r",
",",
"'w'",
")",
"rpr",
"=",
"r",
".",
"find",
"(",
"'%srPr'",
"%",
"w_namespace",
")",
"if",
"rpr",
"is",
"None",
":",
"return",
"False",
"bold",
"=",
"rpr",
".",
"find",
"(",
"'%sb'",
"%",
"w_namespace",
")",
"return",
"style_is_false",
"(",
"bold",
")"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
is_italics
|
The function will return True if the r tag passed in is considered
italicized.
|
docx2html/core.py
|
def is_italics(r):
"""
The function will return True if the r tag passed in is considered
italicized.
"""
w_namespace = get_namespace(r, 'w')
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return False
italics = rpr.find('%si' % w_namespace)
return style_is_false(italics)
|
def is_italics(r):
"""
The function will return True if the r tag passed in is considered
italicized.
"""
w_namespace = get_namespace(r, 'w')
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return False
italics = rpr.find('%si' % w_namespace)
return style_is_false(italics)
|
[
"The",
"function",
"will",
"return",
"True",
"if",
"the",
"r",
"tag",
"passed",
"in",
"is",
"considered",
"italicized",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L520-L530
|
[
"def",
"is_italics",
"(",
"r",
")",
":",
"w_namespace",
"=",
"get_namespace",
"(",
"r",
",",
"'w'",
")",
"rpr",
"=",
"r",
".",
"find",
"(",
"'%srPr'",
"%",
"w_namespace",
")",
"if",
"rpr",
"is",
"None",
":",
"return",
"False",
"italics",
"=",
"rpr",
".",
"find",
"(",
"'%si'",
"%",
"w_namespace",
")",
"return",
"style_is_false",
"(",
"italics",
")"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
is_underlined
|
The function will return True if the r tag passed in is considered
underlined.
|
docx2html/core.py
|
def is_underlined(r):
"""
The function will return True if the r tag passed in is considered
underlined.
"""
w_namespace = get_namespace(r, 'w')
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return False
underline = rpr.find('%su' % w_namespace)
return style_is_false(underline)
|
def is_underlined(r):
"""
The function will return True if the r tag passed in is considered
underlined.
"""
w_namespace = get_namespace(r, 'w')
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return False
underline = rpr.find('%su' % w_namespace)
return style_is_false(underline)
|
[
"The",
"function",
"will",
"return",
"True",
"if",
"the",
"r",
"tag",
"passed",
"in",
"is",
"considered",
"underlined",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L534-L544
|
[
"def",
"is_underlined",
"(",
"r",
")",
":",
"w_namespace",
"=",
"get_namespace",
"(",
"r",
",",
"'w'",
")",
"rpr",
"=",
"r",
".",
"find",
"(",
"'%srPr'",
"%",
"w_namespace",
")",
"if",
"rpr",
"is",
"None",
":",
"return",
"False",
"underline",
"=",
"rpr",
".",
"find",
"(",
"'%su'",
"%",
"w_namespace",
")",
"return",
"style_is_false",
"(",
"underline",
")"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
is_title
|
Certain p tags are denoted as ``Title`` tags. This function will return
True if the passed in p tag is considered a title.
|
docx2html/core.py
|
def is_title(p):
"""
Certain p tags are denoted as ``Title`` tags. This function will return
True if the passed in p tag is considered a title.
"""
w_namespace = get_namespace(p, 'w')
styles = p.xpath('.//w:pStyle', namespaces=p.nsmap)
if len(styles) == 0:
return False
style = styles[0]
return style.get('%sval' % w_namespace) == 'Title'
|
def is_title(p):
"""
Certain p tags are denoted as ``Title`` tags. This function will return
True if the passed in p tag is considered a title.
"""
w_namespace = get_namespace(p, 'w')
styles = p.xpath('.//w:pStyle', namespaces=p.nsmap)
if len(styles) == 0:
return False
style = styles[0]
return style.get('%sval' % w_namespace) == 'Title'
|
[
"Certain",
"p",
"tags",
"are",
"denoted",
"as",
"Title",
"tags",
".",
"This",
"function",
"will",
"return",
"True",
"if",
"the",
"passed",
"in",
"p",
"tag",
"is",
"considered",
"a",
"title",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L548-L558
|
[
"def",
"is_title",
"(",
"p",
")",
":",
"w_namespace",
"=",
"get_namespace",
"(",
"p",
",",
"'w'",
")",
"styles",
"=",
"p",
".",
"xpath",
"(",
"'.//w:pStyle'",
",",
"namespaces",
"=",
"p",
".",
"nsmap",
")",
"if",
"len",
"(",
"styles",
")",
"==",
"0",
":",
"return",
"False",
"style",
"=",
"styles",
"[",
"0",
"]",
"return",
"style",
".",
"get",
"(",
"'%sval'",
"%",
"w_namespace",
")",
"==",
"'Title'"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
get_text_run_content_data
|
It turns out that r tags can contain both t tags and drawing tags. Since we
need both, this function will return them in the order in which they are
found.
|
docx2html/core.py
|
def get_text_run_content_data(r):
"""
It turns out that r tags can contain both t tags and drawing tags. Since we
need both, this function will return them in the order in which they are
found.
"""
w_namespace = get_namespace(r, 'w')
valid_elements = (
'%st' % w_namespace,
'%sdrawing' % w_namespace,
'%spict' % w_namespace,
'%sbr' % w_namespace,
)
for el in r:
if el.tag in valid_elements:
yield el
|
def get_text_run_content_data(r):
"""
It turns out that r tags can contain both t tags and drawing tags. Since we
need both, this function will return them in the order in which they are
found.
"""
w_namespace = get_namespace(r, 'w')
valid_elements = (
'%st' % w_namespace,
'%sdrawing' % w_namespace,
'%spict' % w_namespace,
'%sbr' % w_namespace,
)
for el in r:
if el.tag in valid_elements:
yield el
|
[
"It",
"turns",
"out",
"that",
"r",
"tags",
"can",
"contain",
"both",
"t",
"tags",
"and",
"drawing",
"tags",
".",
"Since",
"we",
"need",
"both",
"this",
"function",
"will",
"return",
"them",
"in",
"the",
"order",
"in",
"which",
"they",
"are",
"found",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L562-L577
|
[
"def",
"get_text_run_content_data",
"(",
"r",
")",
":",
"w_namespace",
"=",
"get_namespace",
"(",
"r",
",",
"'w'",
")",
"valid_elements",
"=",
"(",
"'%st'",
"%",
"w_namespace",
",",
"'%sdrawing'",
"%",
"w_namespace",
",",
"'%spict'",
"%",
"w_namespace",
",",
"'%sbr'",
"%",
"w_namespace",
",",
")",
"for",
"el",
"in",
"r",
":",
"if",
"el",
".",
"tag",
"in",
"valid_elements",
":",
"yield",
"el"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
whole_line_styled
|
Checks to see if the whole p tag will end up being bold or italics. Returns
a tuple (boolean, boolean). The first boolean will be True if the whole
line is bold, False otherwise. The second boolean will be True if the whole
line is italics, False otherwise.
|
docx2html/core.py
|
def whole_line_styled(p):
"""
Checks to see if the whole p tag will end up being bold or italics. Returns
a tuple (boolean, boolean). The first boolean will be True if the whole
line is bold, False otherwise. The second boolean will be True if the whole
line is italics, False otherwise.
"""
r_tags = p.xpath('.//w:r', namespaces=p.nsmap)
tags_are_bold = [
is_bold(r) or is_underlined(r) for r in r_tags
]
tags_are_italics = [
is_italics(r) for r in r_tags
]
return all(tags_are_bold), all(tags_are_italics)
|
def whole_line_styled(p):
"""
Checks to see if the whole p tag will end up being bold or italics. Returns
a tuple (boolean, boolean). The first boolean will be True if the whole
line is bold, False otherwise. The second boolean will be True if the whole
line is italics, False otherwise.
"""
r_tags = p.xpath('.//w:r', namespaces=p.nsmap)
tags_are_bold = [
is_bold(r) or is_underlined(r) for r in r_tags
]
tags_are_italics = [
is_italics(r) for r in r_tags
]
return all(tags_are_bold), all(tags_are_italics)
|
[
"Checks",
"to",
"see",
"if",
"the",
"whole",
"p",
"tag",
"will",
"end",
"up",
"being",
"bold",
"or",
"italics",
".",
"Returns",
"a",
"tuple",
"(",
"boolean",
"boolean",
")",
".",
"The",
"first",
"boolean",
"will",
"be",
"True",
"if",
"the",
"whole",
"line",
"is",
"bold",
"False",
"otherwise",
".",
"The",
"second",
"boolean",
"will",
"be",
"True",
"if",
"the",
"whole",
"line",
"is",
"italics",
"False",
"otherwise",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L599-L613
|
[
"def",
"whole_line_styled",
"(",
"p",
")",
":",
"r_tags",
"=",
"p",
".",
"xpath",
"(",
"'.//w:r'",
",",
"namespaces",
"=",
"p",
".",
"nsmap",
")",
"tags_are_bold",
"=",
"[",
"is_bold",
"(",
"r",
")",
"or",
"is_underlined",
"(",
"r",
")",
"for",
"r",
"in",
"r_tags",
"]",
"tags_are_italics",
"=",
"[",
"is_italics",
"(",
"r",
")",
"for",
"r",
"in",
"r_tags",
"]",
"return",
"all",
"(",
"tags_are_bold",
")",
",",
"all",
"(",
"tags_are_italics",
")"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
get_numbering_info
|
There is a separate file called numbering.xml that stores how lists should
look (unordered, digits, lower case letters, etc.). Parse that file and
return a dictionary of what each combination should be based on list Id and
level of indentation.
|
docx2html/core.py
|
def get_numbering_info(tree):
"""
There is a separate file called numbering.xml that stores how lists should
look (unordered, digits, lower case letters, etc.). Parse that file and
return a dictionary of what each combination should be based on list Id and
level of indentation.
"""
if tree is None:
return {}
w_namespace = get_namespace(tree, 'w')
num_ids = {}
result = defaultdict(dict)
# First find all the list types
for list_type in tree.findall('%snum' % w_namespace):
list_id = list_type.get('%snumId' % w_namespace)
# Each list type is assigned an abstractNumber that defines how lists
# should look.
abstract_number = list_type.find('%sabstractNumId' % w_namespace)
num_ids[abstract_number.get('%sval' % w_namespace)] = list_id
# Loop through all the abstractNumbers
for abstract_number in tree.findall('%sabstractNum' % w_namespace):
abstract_num_id = abstract_number.get('%sabstractNumId' % w_namespace)
# If we find an abstractNumber that is not being used in the document
# then ignore it.
if abstract_num_id not in num_ids:
continue
# Get the level of the abstract number.
for lvl in abstract_number.findall('%slvl' % w_namespace):
ilvl = int(lvl.get('%silvl' % w_namespace))
lvl_format = lvl.find('%snumFmt' % w_namespace)
list_style = lvl_format.get('%sval' % w_namespace)
# Based on the list type and the ilvl (indentation level) store the
# needed style.
result[num_ids[abstract_num_id]][ilvl] = list_style
return result
|
def get_numbering_info(tree):
"""
There is a separate file called numbering.xml that stores how lists should
look (unordered, digits, lower case letters, etc.). Parse that file and
return a dictionary of what each combination should be based on list Id and
level of indentation.
"""
if tree is None:
return {}
w_namespace = get_namespace(tree, 'w')
num_ids = {}
result = defaultdict(dict)
# First find all the list types
for list_type in tree.findall('%snum' % w_namespace):
list_id = list_type.get('%snumId' % w_namespace)
# Each list type is assigned an abstractNumber that defines how lists
# should look.
abstract_number = list_type.find('%sabstractNumId' % w_namespace)
num_ids[abstract_number.get('%sval' % w_namespace)] = list_id
# Loop through all the abstractNumbers
for abstract_number in tree.findall('%sabstractNum' % w_namespace):
abstract_num_id = abstract_number.get('%sabstractNumId' % w_namespace)
# If we find an abstractNumber that is not being used in the document
# then ignore it.
if abstract_num_id not in num_ids:
continue
# Get the level of the abstract number.
for lvl in abstract_number.findall('%slvl' % w_namespace):
ilvl = int(lvl.get('%silvl' % w_namespace))
lvl_format = lvl.find('%snumFmt' % w_namespace)
list_style = lvl_format.get('%sval' % w_namespace)
# Based on the list type and the ilvl (indentation level) store the
# needed style.
result[num_ids[abstract_num_id]][ilvl] = list_style
return result
|
[
"There",
"is",
"a",
"separate",
"file",
"called",
"numbering",
".",
"xml",
"that",
"stores",
"how",
"lists",
"should",
"look",
"(",
"unordered",
"digits",
"lower",
"case",
"letters",
"etc",
".",
")",
".",
"Parse",
"that",
"file",
"and",
"return",
"a",
"dictionary",
"of",
"what",
"each",
"combination",
"should",
"be",
"based",
"on",
"list",
"Id",
"and",
"level",
"of",
"indentation",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L634-L671
|
[
"def",
"get_numbering_info",
"(",
"tree",
")",
":",
"if",
"tree",
"is",
"None",
":",
"return",
"{",
"}",
"w_namespace",
"=",
"get_namespace",
"(",
"tree",
",",
"'w'",
")",
"num_ids",
"=",
"{",
"}",
"result",
"=",
"defaultdict",
"(",
"dict",
")",
"# First find all the list types",
"for",
"list_type",
"in",
"tree",
".",
"findall",
"(",
"'%snum'",
"%",
"w_namespace",
")",
":",
"list_id",
"=",
"list_type",
".",
"get",
"(",
"'%snumId'",
"%",
"w_namespace",
")",
"# Each list type is assigned an abstractNumber that defines how lists",
"# should look.",
"abstract_number",
"=",
"list_type",
".",
"find",
"(",
"'%sabstractNumId'",
"%",
"w_namespace",
")",
"num_ids",
"[",
"abstract_number",
".",
"get",
"(",
"'%sval'",
"%",
"w_namespace",
")",
"]",
"=",
"list_id",
"# Loop through all the abstractNumbers",
"for",
"abstract_number",
"in",
"tree",
".",
"findall",
"(",
"'%sabstractNum'",
"%",
"w_namespace",
")",
":",
"abstract_num_id",
"=",
"abstract_number",
".",
"get",
"(",
"'%sabstractNumId'",
"%",
"w_namespace",
")",
"# If we find an abstractNumber that is not being used in the document",
"# then ignore it.",
"if",
"abstract_num_id",
"not",
"in",
"num_ids",
":",
"continue",
"# Get the level of the abstract number.",
"for",
"lvl",
"in",
"abstract_number",
".",
"findall",
"(",
"'%slvl'",
"%",
"w_namespace",
")",
":",
"ilvl",
"=",
"int",
"(",
"lvl",
".",
"get",
"(",
"'%silvl'",
"%",
"w_namespace",
")",
")",
"lvl_format",
"=",
"lvl",
".",
"find",
"(",
"'%snumFmt'",
"%",
"w_namespace",
")",
"list_style",
"=",
"lvl_format",
".",
"get",
"(",
"'%sval'",
"%",
"w_namespace",
")",
"# Based on the list type and the ilvl (indentation level) store the",
"# needed style.",
"result",
"[",
"num_ids",
"[",
"abstract_num_id",
"]",
"]",
"[",
"ilvl",
"]",
"=",
"list_style",
"return",
"result"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
get_style_dict
|
Some things that are considered lists are actually supposed to be H tags
(h1, h2, etc.) These can be denoted by their styleId
|
docx2html/core.py
|
def get_style_dict(tree):
"""
Some things that are considered lists are actually supposed to be H tags
(h1, h2, etc.) These can be denoted by their styleId
"""
# This is a partial document and actual h1 is the document title, which
# will be displayed elsewhere.
headers = {
'heading 1': 'h2',
'heading 2': 'h3',
'heading 3': 'h4',
'heading 4': 'h5',
'heading 5': 'h6',
'heading 6': 'h6',
'heading 7': 'h6',
'heading 8': 'h6',
'heading 9': 'h6',
'heading 10': 'h6',
}
if tree is None:
return {}
w_namespace = get_namespace(tree, 'w')
result = {}
for el in tree:
style_id = el.get('%sstyleId' % w_namespace)
el_result = {
'header': False,
'font_size': None,
'based_on': None,
}
# Get the header info
name = el.find('%sname' % w_namespace)
if name is None:
continue
value = name.get('%sval' % w_namespace).lower()
if value in headers:
el_result['header'] = headers[value]
# Get the size info.
rpr = el.find('%srPr' % w_namespace)
if rpr is None:
continue
size = rpr.find('%ssz' % w_namespace)
if size is None:
el_result['font_size'] = None
else:
el_result['font_size'] = size.get('%sval' % w_namespace)
# Get based on info.
based_on = el.find('%sbasedOn' % w_namespace)
if based_on is None:
el_result['based_on'] = None
else:
el_result['based_on'] = based_on.get('%sval' % w_namespace)
result[style_id] = el_result
return result
|
def get_style_dict(tree):
"""
Some things that are considered lists are actually supposed to be H tags
(h1, h2, etc.) These can be denoted by their styleId
"""
# This is a partial document and actual h1 is the document title, which
# will be displayed elsewhere.
headers = {
'heading 1': 'h2',
'heading 2': 'h3',
'heading 3': 'h4',
'heading 4': 'h5',
'heading 5': 'h6',
'heading 6': 'h6',
'heading 7': 'h6',
'heading 8': 'h6',
'heading 9': 'h6',
'heading 10': 'h6',
}
if tree is None:
return {}
w_namespace = get_namespace(tree, 'w')
result = {}
for el in tree:
style_id = el.get('%sstyleId' % w_namespace)
el_result = {
'header': False,
'font_size': None,
'based_on': None,
}
# Get the header info
name = el.find('%sname' % w_namespace)
if name is None:
continue
value = name.get('%sval' % w_namespace).lower()
if value in headers:
el_result['header'] = headers[value]
# Get the size info.
rpr = el.find('%srPr' % w_namespace)
if rpr is None:
continue
size = rpr.find('%ssz' % w_namespace)
if size is None:
el_result['font_size'] = None
else:
el_result['font_size'] = size.get('%sval' % w_namespace)
# Get based on info.
based_on = el.find('%sbasedOn' % w_namespace)
if based_on is None:
el_result['based_on'] = None
else:
el_result['based_on'] = based_on.get('%sval' % w_namespace)
result[style_id] = el_result
return result
|
[
"Some",
"things",
"that",
"are",
"considered",
"lists",
"are",
"actually",
"supposed",
"to",
"be",
"H",
"tags",
"(",
"h1",
"h2",
"etc",
".",
")",
"These",
"can",
"be",
"denoted",
"by",
"their",
"styleId"
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L674-L729
|
[
"def",
"get_style_dict",
"(",
"tree",
")",
":",
"# This is a partial document and actual h1 is the document title, which",
"# will be displayed elsewhere.",
"headers",
"=",
"{",
"'heading 1'",
":",
"'h2'",
",",
"'heading 2'",
":",
"'h3'",
",",
"'heading 3'",
":",
"'h4'",
",",
"'heading 4'",
":",
"'h5'",
",",
"'heading 5'",
":",
"'h6'",
",",
"'heading 6'",
":",
"'h6'",
",",
"'heading 7'",
":",
"'h6'",
",",
"'heading 8'",
":",
"'h6'",
",",
"'heading 9'",
":",
"'h6'",
",",
"'heading 10'",
":",
"'h6'",
",",
"}",
"if",
"tree",
"is",
"None",
":",
"return",
"{",
"}",
"w_namespace",
"=",
"get_namespace",
"(",
"tree",
",",
"'w'",
")",
"result",
"=",
"{",
"}",
"for",
"el",
"in",
"tree",
":",
"style_id",
"=",
"el",
".",
"get",
"(",
"'%sstyleId'",
"%",
"w_namespace",
")",
"el_result",
"=",
"{",
"'header'",
":",
"False",
",",
"'font_size'",
":",
"None",
",",
"'based_on'",
":",
"None",
",",
"}",
"# Get the header info",
"name",
"=",
"el",
".",
"find",
"(",
"'%sname'",
"%",
"w_namespace",
")",
"if",
"name",
"is",
"None",
":",
"continue",
"value",
"=",
"name",
".",
"get",
"(",
"'%sval'",
"%",
"w_namespace",
")",
".",
"lower",
"(",
")",
"if",
"value",
"in",
"headers",
":",
"el_result",
"[",
"'header'",
"]",
"=",
"headers",
"[",
"value",
"]",
"# Get the size info.",
"rpr",
"=",
"el",
".",
"find",
"(",
"'%srPr'",
"%",
"w_namespace",
")",
"if",
"rpr",
"is",
"None",
":",
"continue",
"size",
"=",
"rpr",
".",
"find",
"(",
"'%ssz'",
"%",
"w_namespace",
")",
"if",
"size",
"is",
"None",
":",
"el_result",
"[",
"'font_size'",
"]",
"=",
"None",
"else",
":",
"el_result",
"[",
"'font_size'",
"]",
"=",
"size",
".",
"get",
"(",
"'%sval'",
"%",
"w_namespace",
")",
"# Get based on info.",
"based_on",
"=",
"el",
".",
"find",
"(",
"'%sbasedOn'",
"%",
"w_namespace",
")",
"if",
"based_on",
"is",
"None",
":",
"el_result",
"[",
"'based_on'",
"]",
"=",
"None",
"else",
":",
"el_result",
"[",
"'based_on'",
"]",
"=",
"based_on",
".",
"get",
"(",
"'%sval'",
"%",
"w_namespace",
")",
"result",
"[",
"style_id",
"]",
"=",
"el_result",
"return",
"result"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
get_relationship_info
|
There is a separate file holds the targets to links as well as the targets
for images. Return a dictionary based on the relationship id and the
target.
|
docx2html/core.py
|
def get_relationship_info(tree, media, image_sizes):
"""
There is a separate file holds the targets to links as well as the targets
for images. Return a dictionary based on the relationship id and the
target.
"""
if tree is None:
return {}
result = {}
# Loop through each relationship.
for el in tree.iter():
el_id = el.get('Id')
if el_id is None:
continue
# Store the target in the result dict.
target = el.get('Target')
if any(
target.lower().endswith(ext) for
ext in IMAGE_EXTENSIONS_TO_SKIP):
continue
if target in media:
image_size = image_sizes.get(el_id)
target = convert_image(media[target], image_size)
# cgi will replace things like & < > with & < >
result[el_id] = cgi.escape(target)
return result
|
def get_relationship_info(tree, media, image_sizes):
"""
There is a separate file holds the targets to links as well as the targets
for images. Return a dictionary based on the relationship id and the
target.
"""
if tree is None:
return {}
result = {}
# Loop through each relationship.
for el in tree.iter():
el_id = el.get('Id')
if el_id is None:
continue
# Store the target in the result dict.
target = el.get('Target')
if any(
target.lower().endswith(ext) for
ext in IMAGE_EXTENSIONS_TO_SKIP):
continue
if target in media:
image_size = image_sizes.get(el_id)
target = convert_image(media[target], image_size)
# cgi will replace things like & < > with & < >
result[el_id] = cgi.escape(target)
return result
|
[
"There",
"is",
"a",
"separate",
"file",
"holds",
"the",
"targets",
"to",
"links",
"as",
"well",
"as",
"the",
"targets",
"for",
"images",
".",
"Return",
"a",
"dictionary",
"based",
"on",
"the",
"relationship",
"id",
"and",
"the",
"target",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L752-L778
|
[
"def",
"get_relationship_info",
"(",
"tree",
",",
"media",
",",
"image_sizes",
")",
":",
"if",
"tree",
"is",
"None",
":",
"return",
"{",
"}",
"result",
"=",
"{",
"}",
"# Loop through each relationship.",
"for",
"el",
"in",
"tree",
".",
"iter",
"(",
")",
":",
"el_id",
"=",
"el",
".",
"get",
"(",
"'Id'",
")",
"if",
"el_id",
"is",
"None",
":",
"continue",
"# Store the target in the result dict.",
"target",
"=",
"el",
".",
"get",
"(",
"'Target'",
")",
"if",
"any",
"(",
"target",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"ext",
")",
"for",
"ext",
"in",
"IMAGE_EXTENSIONS_TO_SKIP",
")",
":",
"continue",
"if",
"target",
"in",
"media",
":",
"image_size",
"=",
"image_sizes",
".",
"get",
"(",
"el_id",
")",
"target",
"=",
"convert_image",
"(",
"media",
"[",
"target",
"]",
",",
"image_size",
")",
"# cgi will replace things like & < > with & < >",
"result",
"[",
"el_id",
"]",
"=",
"cgi",
".",
"escape",
"(",
"target",
")",
"return",
"result"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
_get_document_data
|
``f`` is a ``ZipFile`` that is open
Extract out the document data, numbering data and the relationship data.
|
docx2html/core.py
|
def _get_document_data(f, image_handler=None):
'''
``f`` is a ``ZipFile`` that is open
Extract out the document data, numbering data and the relationship data.
'''
if image_handler is None:
def image_handler(image_id, relationship_dict):
return relationship_dict.get(image_id)
document_xml = None
numbering_xml = None
relationship_xml = None
styles_xml = None
parser = etree.XMLParser(strip_cdata=False)
path, _ = os.path.split(f.filename)
media = {}
image_sizes = {}
# Loop through the files in the zip file.
for item in f.infolist():
# This file holds all the content of the document.
if item.filename == 'word/document.xml':
xml = f.read(item.filename)
document_xml = etree.fromstring(xml, parser)
# This file tells document.xml how lists should look.
elif item.filename == 'word/numbering.xml':
xml = f.read(item.filename)
numbering_xml = etree.fromstring(xml, parser)
elif item.filename == 'word/styles.xml':
xml = f.read(item.filename)
styles_xml = etree.fromstring(xml, parser)
# This file holds the targets for hyperlinks and images.
elif item.filename == 'word/_rels/document.xml.rels':
xml = f.read(item.filename)
try:
relationship_xml = etree.fromstring(xml, parser)
except XMLSyntaxError:
relationship_xml = etree.fromstring('<xml></xml>', parser)
if item.filename.startswith('word/media/'):
# Strip off the leading word/
media[item.filename[len('word/'):]] = f.extract(
item.filename,
path,
)
# Close the file pointer.
f.close()
# Get dictionaries for the numbering and the relationships.
numbering_dict = get_numbering_info(numbering_xml)
image_sizes = get_image_sizes(document_xml)
relationship_dict = get_relationship_info(
relationship_xml,
media,
image_sizes
)
styles_dict = get_style_dict(styles_xml)
font_sizes_dict = defaultdict(int)
if DETECT_FONT_SIZE:
font_sizes_dict = get_font_sizes_dict(document_xml, styles_dict)
meta_data = MetaData(
numbering_dict=numbering_dict,
relationship_dict=relationship_dict,
styles_dict=styles_dict,
font_sizes_dict=font_sizes_dict,
image_handler=image_handler,
image_sizes=image_sizes,
)
return document_xml, meta_data
|
def _get_document_data(f, image_handler=None):
'''
``f`` is a ``ZipFile`` that is open
Extract out the document data, numbering data and the relationship data.
'''
if image_handler is None:
def image_handler(image_id, relationship_dict):
return relationship_dict.get(image_id)
document_xml = None
numbering_xml = None
relationship_xml = None
styles_xml = None
parser = etree.XMLParser(strip_cdata=False)
path, _ = os.path.split(f.filename)
media = {}
image_sizes = {}
# Loop through the files in the zip file.
for item in f.infolist():
# This file holds all the content of the document.
if item.filename == 'word/document.xml':
xml = f.read(item.filename)
document_xml = etree.fromstring(xml, parser)
# This file tells document.xml how lists should look.
elif item.filename == 'word/numbering.xml':
xml = f.read(item.filename)
numbering_xml = etree.fromstring(xml, parser)
elif item.filename == 'word/styles.xml':
xml = f.read(item.filename)
styles_xml = etree.fromstring(xml, parser)
# This file holds the targets for hyperlinks and images.
elif item.filename == 'word/_rels/document.xml.rels':
xml = f.read(item.filename)
try:
relationship_xml = etree.fromstring(xml, parser)
except XMLSyntaxError:
relationship_xml = etree.fromstring('<xml></xml>', parser)
if item.filename.startswith('word/media/'):
# Strip off the leading word/
media[item.filename[len('word/'):]] = f.extract(
item.filename,
path,
)
# Close the file pointer.
f.close()
# Get dictionaries for the numbering and the relationships.
numbering_dict = get_numbering_info(numbering_xml)
image_sizes = get_image_sizes(document_xml)
relationship_dict = get_relationship_info(
relationship_xml,
media,
image_sizes
)
styles_dict = get_style_dict(styles_xml)
font_sizes_dict = defaultdict(int)
if DETECT_FONT_SIZE:
font_sizes_dict = get_font_sizes_dict(document_xml, styles_dict)
meta_data = MetaData(
numbering_dict=numbering_dict,
relationship_dict=relationship_dict,
styles_dict=styles_dict,
font_sizes_dict=font_sizes_dict,
image_handler=image_handler,
image_sizes=image_sizes,
)
return document_xml, meta_data
|
[
"f",
"is",
"a",
"ZipFile",
"that",
"is",
"open",
"Extract",
"out",
"the",
"document",
"data",
"numbering",
"data",
"and",
"the",
"relationship",
"data",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L816-L882
|
[
"def",
"_get_document_data",
"(",
"f",
",",
"image_handler",
"=",
"None",
")",
":",
"if",
"image_handler",
"is",
"None",
":",
"def",
"image_handler",
"(",
"image_id",
",",
"relationship_dict",
")",
":",
"return",
"relationship_dict",
".",
"get",
"(",
"image_id",
")",
"document_xml",
"=",
"None",
"numbering_xml",
"=",
"None",
"relationship_xml",
"=",
"None",
"styles_xml",
"=",
"None",
"parser",
"=",
"etree",
".",
"XMLParser",
"(",
"strip_cdata",
"=",
"False",
")",
"path",
",",
"_",
"=",
"os",
".",
"path",
".",
"split",
"(",
"f",
".",
"filename",
")",
"media",
"=",
"{",
"}",
"image_sizes",
"=",
"{",
"}",
"# Loop through the files in the zip file.",
"for",
"item",
"in",
"f",
".",
"infolist",
"(",
")",
":",
"# This file holds all the content of the document.",
"if",
"item",
".",
"filename",
"==",
"'word/document.xml'",
":",
"xml",
"=",
"f",
".",
"read",
"(",
"item",
".",
"filename",
")",
"document_xml",
"=",
"etree",
".",
"fromstring",
"(",
"xml",
",",
"parser",
")",
"# This file tells document.xml how lists should look.",
"elif",
"item",
".",
"filename",
"==",
"'word/numbering.xml'",
":",
"xml",
"=",
"f",
".",
"read",
"(",
"item",
".",
"filename",
")",
"numbering_xml",
"=",
"etree",
".",
"fromstring",
"(",
"xml",
",",
"parser",
")",
"elif",
"item",
".",
"filename",
"==",
"'word/styles.xml'",
":",
"xml",
"=",
"f",
".",
"read",
"(",
"item",
".",
"filename",
")",
"styles_xml",
"=",
"etree",
".",
"fromstring",
"(",
"xml",
",",
"parser",
")",
"# This file holds the targets for hyperlinks and images.",
"elif",
"item",
".",
"filename",
"==",
"'word/_rels/document.xml.rels'",
":",
"xml",
"=",
"f",
".",
"read",
"(",
"item",
".",
"filename",
")",
"try",
":",
"relationship_xml",
"=",
"etree",
".",
"fromstring",
"(",
"xml",
",",
"parser",
")",
"except",
"XMLSyntaxError",
":",
"relationship_xml",
"=",
"etree",
".",
"fromstring",
"(",
"'<xml></xml>'",
",",
"parser",
")",
"if",
"item",
".",
"filename",
".",
"startswith",
"(",
"'word/media/'",
")",
":",
"# Strip off the leading word/",
"media",
"[",
"item",
".",
"filename",
"[",
"len",
"(",
"'word/'",
")",
":",
"]",
"]",
"=",
"f",
".",
"extract",
"(",
"item",
".",
"filename",
",",
"path",
",",
")",
"# Close the file pointer.",
"f",
".",
"close",
"(",
")",
"# Get dictionaries for the numbering and the relationships.",
"numbering_dict",
"=",
"get_numbering_info",
"(",
"numbering_xml",
")",
"image_sizes",
"=",
"get_image_sizes",
"(",
"document_xml",
")",
"relationship_dict",
"=",
"get_relationship_info",
"(",
"relationship_xml",
",",
"media",
",",
"image_sizes",
")",
"styles_dict",
"=",
"get_style_dict",
"(",
"styles_xml",
")",
"font_sizes_dict",
"=",
"defaultdict",
"(",
"int",
")",
"if",
"DETECT_FONT_SIZE",
":",
"font_sizes_dict",
"=",
"get_font_sizes_dict",
"(",
"document_xml",
",",
"styles_dict",
")",
"meta_data",
"=",
"MetaData",
"(",
"numbering_dict",
"=",
"numbering_dict",
",",
"relationship_dict",
"=",
"relationship_dict",
",",
"styles_dict",
"=",
"styles_dict",
",",
"font_sizes_dict",
"=",
"font_sizes_dict",
",",
"image_handler",
"=",
"image_handler",
",",
"image_sizes",
"=",
"image_sizes",
",",
")",
"return",
"document_xml",
",",
"meta_data"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
get_ordered_list_type
|
Return the list type. If numId or ilvl not in the numbering dict then
default to returning decimal.
This function only cares about ordered lists, unordered lists get dealt
with elsewhere.
|
docx2html/core.py
|
def get_ordered_list_type(meta_data, numId, ilvl):
"""
Return the list type. If numId or ilvl not in the numbering dict then
default to returning decimal.
This function only cares about ordered lists, unordered lists get dealt
with elsewhere.
"""
# Early return if numId or ilvl are not valid
numbering_dict = meta_data.numbering_dict
if numId not in numbering_dict:
return DEFAULT_LIST_NUMBERING_STYLE
if ilvl not in numbering_dict[numId]:
return DEFAULT_LIST_NUMBERING_STYLE
return meta_data.numbering_dict[numId][ilvl]
|
def get_ordered_list_type(meta_data, numId, ilvl):
"""
Return the list type. If numId or ilvl not in the numbering dict then
default to returning decimal.
This function only cares about ordered lists, unordered lists get dealt
with elsewhere.
"""
# Early return if numId or ilvl are not valid
numbering_dict = meta_data.numbering_dict
if numId not in numbering_dict:
return DEFAULT_LIST_NUMBERING_STYLE
if ilvl not in numbering_dict[numId]:
return DEFAULT_LIST_NUMBERING_STYLE
return meta_data.numbering_dict[numId][ilvl]
|
[
"Return",
"the",
"list",
"type",
".",
"If",
"numId",
"or",
"ilvl",
"not",
"in",
"the",
"numbering",
"dict",
"then",
"default",
"to",
"returning",
"decimal",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L890-L905
|
[
"def",
"get_ordered_list_type",
"(",
"meta_data",
",",
"numId",
",",
"ilvl",
")",
":",
"# Early return if numId or ilvl are not valid",
"numbering_dict",
"=",
"meta_data",
".",
"numbering_dict",
"if",
"numId",
"not",
"in",
"numbering_dict",
":",
"return",
"DEFAULT_LIST_NUMBERING_STYLE",
"if",
"ilvl",
"not",
"in",
"numbering_dict",
"[",
"numId",
"]",
":",
"return",
"DEFAULT_LIST_NUMBERING_STYLE",
"return",
"meta_data",
".",
"numbering_dict",
"[",
"numId",
"]",
"[",
"ilvl",
"]"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
build_list
|
Build the list structure and return the root list
|
docx2html/core.py
|
def build_list(li_nodes, meta_data):
"""
Build the list structure and return the root list
"""
# Need to keep track of all incomplete nested lists.
ol_dict = {}
# Need to keep track of the current indentation level.
current_ilvl = -1
# Need to keep track of the current list id.
current_numId = -1
# Need to keep track of list that new li tags should be added too.
current_ol = None
# Store the first list created (the root list) for the return value.
root_ol = None
visited_nodes = []
list_contents = []
def _build_li(list_contents):
data = '<br />'.join(t for t in list_contents if t is not None)
return etree.XML('<li>%s</li>' % data)
def _build_non_li_content(el, meta_data):
w_namespace = get_namespace(el, 'w')
if el.tag == '%stbl' % w_namespace:
new_el, visited_nodes = build_table(el, meta_data)
return etree.tostring(new_el), visited_nodes
elif el.tag == '%sp' % w_namespace:
return get_element_content(el, meta_data), [el]
if has_text(el):
raise UnintendedTag('Did not expect %s' % el.tag)
def _merge_lists(ilvl, current_ilvl, ol_dict, current_ol):
for i in reversed(range(ilvl, current_ilvl)):
# Any list that is more indented that ilvl needs to
# be merged to the list before it.
if i not in ol_dict:
continue
if ol_dict[i] is not current_ol:
if ol_dict[i] is current_ol:
continue
ol_dict[i][-1].append(current_ol)
current_ol = ol_dict[i]
# Clean up finished nested lists.
for key in list(ol_dict):
if key > ilvl:
del ol_dict[key]
return current_ol
for li_node in li_nodes:
w_namespace = get_namespace(li_node, 'w')
if not is_li(li_node, meta_data):
# Get the content and visited nodes
new_el, el_visited_nodes = _build_non_li_content(
li_node,
meta_data,
)
list_contents.append(new_el)
visited_nodes.extend(el_visited_nodes)
continue
if list_contents:
li_el = _build_li(list_contents)
list_contents = []
current_ol.append(li_el)
# Get the data needed to build the current list item
list_contents.append(get_element_content(
li_node,
meta_data,
))
ilvl = get_ilvl(li_node, w_namespace)
numId = get_numId(li_node, w_namespace)
list_type = get_ordered_list_type(meta_data, numId, ilvl)
# If the ilvl is greater than the current_ilvl or the list id is
# changing then we have the first li tag in a nested list. We need to
# create a new list object and update all of our variables for keeping
# track.
if (ilvl > current_ilvl) or (numId != current_numId):
# Only create a new list
ol_dict[ilvl] = create_list(list_type)
current_ol = ol_dict[ilvl]
current_ilvl = ilvl
current_numId = numId
# Both cases above are not True then we need to close all lists greater
# than ilvl and then remove them from the ol_dict
else:
# Merge any nested lists that need to be merged.
current_ol = _merge_lists(
ilvl=ilvl,
current_ilvl=current_ilvl,
ol_dict=ol_dict,
current_ol=current_ol,
)
# Set the root list after the first list is created.
if root_ol is None:
root_ol = current_ol
# Set the current list.
if ilvl in ol_dict:
current_ol = ol_dict[ilvl]
else:
# In some instances the ilvl is not in the ol_dict, if that is the
# case, create it here (not sure how this happens but it has
# before.) Only do this if the current_ol is not the root_ol,
# otherwise etree will crash.
if current_ol is not root_ol:
# Merge the current_ol into the root_ol. _merge_lists is not
# equipped to handle this situation since the only way to get
# into this block of code is to have mangled ilvls.
root_ol[-1].append(current_ol)
# Reset the current_ol
current_ol = create_list(list_type)
# Create the li element.
visited_nodes.extend(list(li_node.iter()))
# If a list item is the last thing in a document, then you will need to add
# it here. Should probably figure out how to get the above logic to deal
# with it.
if list_contents:
li_el = _build_li(list_contents)
list_contents = []
current_ol.append(li_el)
# Merge up any nested lists that have not been merged.
current_ol = _merge_lists(
ilvl=0,
current_ilvl=current_ilvl,
ol_dict=ol_dict,
current_ol=current_ol,
)
return root_ol, visited_nodes
|
def build_list(li_nodes, meta_data):
"""
Build the list structure and return the root list
"""
# Need to keep track of all incomplete nested lists.
ol_dict = {}
# Need to keep track of the current indentation level.
current_ilvl = -1
# Need to keep track of the current list id.
current_numId = -1
# Need to keep track of list that new li tags should be added too.
current_ol = None
# Store the first list created (the root list) for the return value.
root_ol = None
visited_nodes = []
list_contents = []
def _build_li(list_contents):
data = '<br />'.join(t for t in list_contents if t is not None)
return etree.XML('<li>%s</li>' % data)
def _build_non_li_content(el, meta_data):
w_namespace = get_namespace(el, 'w')
if el.tag == '%stbl' % w_namespace:
new_el, visited_nodes = build_table(el, meta_data)
return etree.tostring(new_el), visited_nodes
elif el.tag == '%sp' % w_namespace:
return get_element_content(el, meta_data), [el]
if has_text(el):
raise UnintendedTag('Did not expect %s' % el.tag)
def _merge_lists(ilvl, current_ilvl, ol_dict, current_ol):
for i in reversed(range(ilvl, current_ilvl)):
# Any list that is more indented that ilvl needs to
# be merged to the list before it.
if i not in ol_dict:
continue
if ol_dict[i] is not current_ol:
if ol_dict[i] is current_ol:
continue
ol_dict[i][-1].append(current_ol)
current_ol = ol_dict[i]
# Clean up finished nested lists.
for key in list(ol_dict):
if key > ilvl:
del ol_dict[key]
return current_ol
for li_node in li_nodes:
w_namespace = get_namespace(li_node, 'w')
if not is_li(li_node, meta_data):
# Get the content and visited nodes
new_el, el_visited_nodes = _build_non_li_content(
li_node,
meta_data,
)
list_contents.append(new_el)
visited_nodes.extend(el_visited_nodes)
continue
if list_contents:
li_el = _build_li(list_contents)
list_contents = []
current_ol.append(li_el)
# Get the data needed to build the current list item
list_contents.append(get_element_content(
li_node,
meta_data,
))
ilvl = get_ilvl(li_node, w_namespace)
numId = get_numId(li_node, w_namespace)
list_type = get_ordered_list_type(meta_data, numId, ilvl)
# If the ilvl is greater than the current_ilvl or the list id is
# changing then we have the first li tag in a nested list. We need to
# create a new list object and update all of our variables for keeping
# track.
if (ilvl > current_ilvl) or (numId != current_numId):
# Only create a new list
ol_dict[ilvl] = create_list(list_type)
current_ol = ol_dict[ilvl]
current_ilvl = ilvl
current_numId = numId
# Both cases above are not True then we need to close all lists greater
# than ilvl and then remove them from the ol_dict
else:
# Merge any nested lists that need to be merged.
current_ol = _merge_lists(
ilvl=ilvl,
current_ilvl=current_ilvl,
ol_dict=ol_dict,
current_ol=current_ol,
)
# Set the root list after the first list is created.
if root_ol is None:
root_ol = current_ol
# Set the current list.
if ilvl in ol_dict:
current_ol = ol_dict[ilvl]
else:
# In some instances the ilvl is not in the ol_dict, if that is the
# case, create it here (not sure how this happens but it has
# before.) Only do this if the current_ol is not the root_ol,
# otherwise etree will crash.
if current_ol is not root_ol:
# Merge the current_ol into the root_ol. _merge_lists is not
# equipped to handle this situation since the only way to get
# into this block of code is to have mangled ilvls.
root_ol[-1].append(current_ol)
# Reset the current_ol
current_ol = create_list(list_type)
# Create the li element.
visited_nodes.extend(list(li_node.iter()))
# If a list item is the last thing in a document, then you will need to add
# it here. Should probably figure out how to get the above logic to deal
# with it.
if list_contents:
li_el = _build_li(list_contents)
list_contents = []
current_ol.append(li_el)
# Merge up any nested lists that have not been merged.
current_ol = _merge_lists(
ilvl=0,
current_ilvl=current_ilvl,
ol_dict=ol_dict,
current_ol=current_ol,
)
return root_ol, visited_nodes
|
[
"Build",
"the",
"list",
"structure",
"and",
"return",
"the",
"root",
"list"
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L908-L1048
|
[
"def",
"build_list",
"(",
"li_nodes",
",",
"meta_data",
")",
":",
"# Need to keep track of all incomplete nested lists.",
"ol_dict",
"=",
"{",
"}",
"# Need to keep track of the current indentation level.",
"current_ilvl",
"=",
"-",
"1",
"# Need to keep track of the current list id.",
"current_numId",
"=",
"-",
"1",
"# Need to keep track of list that new li tags should be added too.",
"current_ol",
"=",
"None",
"# Store the first list created (the root list) for the return value.",
"root_ol",
"=",
"None",
"visited_nodes",
"=",
"[",
"]",
"list_contents",
"=",
"[",
"]",
"def",
"_build_li",
"(",
"list_contents",
")",
":",
"data",
"=",
"'<br />'",
".",
"join",
"(",
"t",
"for",
"t",
"in",
"list_contents",
"if",
"t",
"is",
"not",
"None",
")",
"return",
"etree",
".",
"XML",
"(",
"'<li>%s</li>'",
"%",
"data",
")",
"def",
"_build_non_li_content",
"(",
"el",
",",
"meta_data",
")",
":",
"w_namespace",
"=",
"get_namespace",
"(",
"el",
",",
"'w'",
")",
"if",
"el",
".",
"tag",
"==",
"'%stbl'",
"%",
"w_namespace",
":",
"new_el",
",",
"visited_nodes",
"=",
"build_table",
"(",
"el",
",",
"meta_data",
")",
"return",
"etree",
".",
"tostring",
"(",
"new_el",
")",
",",
"visited_nodes",
"elif",
"el",
".",
"tag",
"==",
"'%sp'",
"%",
"w_namespace",
":",
"return",
"get_element_content",
"(",
"el",
",",
"meta_data",
")",
",",
"[",
"el",
"]",
"if",
"has_text",
"(",
"el",
")",
":",
"raise",
"UnintendedTag",
"(",
"'Did not expect %s'",
"%",
"el",
".",
"tag",
")",
"def",
"_merge_lists",
"(",
"ilvl",
",",
"current_ilvl",
",",
"ol_dict",
",",
"current_ol",
")",
":",
"for",
"i",
"in",
"reversed",
"(",
"range",
"(",
"ilvl",
",",
"current_ilvl",
")",
")",
":",
"# Any list that is more indented that ilvl needs to",
"# be merged to the list before it.",
"if",
"i",
"not",
"in",
"ol_dict",
":",
"continue",
"if",
"ol_dict",
"[",
"i",
"]",
"is",
"not",
"current_ol",
":",
"if",
"ol_dict",
"[",
"i",
"]",
"is",
"current_ol",
":",
"continue",
"ol_dict",
"[",
"i",
"]",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"current_ol",
")",
"current_ol",
"=",
"ol_dict",
"[",
"i",
"]",
"# Clean up finished nested lists.",
"for",
"key",
"in",
"list",
"(",
"ol_dict",
")",
":",
"if",
"key",
">",
"ilvl",
":",
"del",
"ol_dict",
"[",
"key",
"]",
"return",
"current_ol",
"for",
"li_node",
"in",
"li_nodes",
":",
"w_namespace",
"=",
"get_namespace",
"(",
"li_node",
",",
"'w'",
")",
"if",
"not",
"is_li",
"(",
"li_node",
",",
"meta_data",
")",
":",
"# Get the content and visited nodes",
"new_el",
",",
"el_visited_nodes",
"=",
"_build_non_li_content",
"(",
"li_node",
",",
"meta_data",
",",
")",
"list_contents",
".",
"append",
"(",
"new_el",
")",
"visited_nodes",
".",
"extend",
"(",
"el_visited_nodes",
")",
"continue",
"if",
"list_contents",
":",
"li_el",
"=",
"_build_li",
"(",
"list_contents",
")",
"list_contents",
"=",
"[",
"]",
"current_ol",
".",
"append",
"(",
"li_el",
")",
"# Get the data needed to build the current list item",
"list_contents",
".",
"append",
"(",
"get_element_content",
"(",
"li_node",
",",
"meta_data",
",",
")",
")",
"ilvl",
"=",
"get_ilvl",
"(",
"li_node",
",",
"w_namespace",
")",
"numId",
"=",
"get_numId",
"(",
"li_node",
",",
"w_namespace",
")",
"list_type",
"=",
"get_ordered_list_type",
"(",
"meta_data",
",",
"numId",
",",
"ilvl",
")",
"# If the ilvl is greater than the current_ilvl or the list id is",
"# changing then we have the first li tag in a nested list. We need to",
"# create a new list object and update all of our variables for keeping",
"# track.",
"if",
"(",
"ilvl",
">",
"current_ilvl",
")",
"or",
"(",
"numId",
"!=",
"current_numId",
")",
":",
"# Only create a new list",
"ol_dict",
"[",
"ilvl",
"]",
"=",
"create_list",
"(",
"list_type",
")",
"current_ol",
"=",
"ol_dict",
"[",
"ilvl",
"]",
"current_ilvl",
"=",
"ilvl",
"current_numId",
"=",
"numId",
"# Both cases above are not True then we need to close all lists greater",
"# than ilvl and then remove them from the ol_dict",
"else",
":",
"# Merge any nested lists that need to be merged.",
"current_ol",
"=",
"_merge_lists",
"(",
"ilvl",
"=",
"ilvl",
",",
"current_ilvl",
"=",
"current_ilvl",
",",
"ol_dict",
"=",
"ol_dict",
",",
"current_ol",
"=",
"current_ol",
",",
")",
"# Set the root list after the first list is created.",
"if",
"root_ol",
"is",
"None",
":",
"root_ol",
"=",
"current_ol",
"# Set the current list.",
"if",
"ilvl",
"in",
"ol_dict",
":",
"current_ol",
"=",
"ol_dict",
"[",
"ilvl",
"]",
"else",
":",
"# In some instances the ilvl is not in the ol_dict, if that is the",
"# case, create it here (not sure how this happens but it has",
"# before.) Only do this if the current_ol is not the root_ol,",
"# otherwise etree will crash.",
"if",
"current_ol",
"is",
"not",
"root_ol",
":",
"# Merge the current_ol into the root_ol. _merge_lists is not",
"# equipped to handle this situation since the only way to get",
"# into this block of code is to have mangled ilvls.",
"root_ol",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"current_ol",
")",
"# Reset the current_ol",
"current_ol",
"=",
"create_list",
"(",
"list_type",
")",
"# Create the li element.",
"visited_nodes",
".",
"extend",
"(",
"list",
"(",
"li_node",
".",
"iter",
"(",
")",
")",
")",
"# If a list item is the last thing in a document, then you will need to add",
"# it here. Should probably figure out how to get the above logic to deal",
"# with it.",
"if",
"list_contents",
":",
"li_el",
"=",
"_build_li",
"(",
"list_contents",
")",
"list_contents",
"=",
"[",
"]",
"current_ol",
".",
"append",
"(",
"li_el",
")",
"# Merge up any nested lists that have not been merged.",
"current_ol",
"=",
"_merge_lists",
"(",
"ilvl",
"=",
"0",
",",
"current_ilvl",
"=",
"current_ilvl",
",",
"ol_dict",
"=",
"ol_dict",
",",
"current_ol",
"=",
"current_ol",
",",
")",
"return",
"root_ol",
",",
"visited_nodes"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
build_tr
|
This will return a single tr element, with all tds already populated.
|
docx2html/core.py
|
def build_tr(tr, meta_data, row_spans):
"""
This will return a single tr element, with all tds already populated.
"""
# Create a blank tr element.
tr_el = etree.Element('tr')
w_namespace = get_namespace(tr, 'w')
visited_nodes = []
for el in tr:
if el in visited_nodes:
continue
visited_nodes.append(el)
# Find the table cells.
if el.tag == '%stc' % w_namespace:
v_merge = get_v_merge(el)
# If there is a v_merge and it is not restart then this cell can be
# ignored.
if (
v_merge is not None and
v_merge.get('%sval' % w_namespace) != 'restart'):
continue
# Loop through each and build a list of all the content.
texts = []
for td_content in el:
# Since we are doing look-a-heads in this loop we need to check
# again to see if we have already visited the node.
if td_content in visited_nodes:
continue
# Check to see if it is a list or a regular paragraph.
if is_li(td_content, meta_data):
# If it is a list, create the list and update
# visited_nodes.
li_nodes = get_single_list_nodes_data(
td_content,
meta_data,
)
list_el, list_visited_nodes = build_list(
li_nodes,
meta_data,
)
visited_nodes.extend(list_visited_nodes)
texts.append(etree.tostring(list_el))
elif td_content.tag == '%stbl' % w_namespace:
table_el, table_visited_nodes = build_table(
td_content,
meta_data,
)
visited_nodes.extend(table_visited_nodes)
texts.append(etree.tostring(table_el))
elif td_content.tag == '%stcPr' % w_namespace:
# Do nothing
visited_nodes.append(td_content)
continue
else:
text = get_element_content(
td_content,
meta_data,
is_td=True,
)
texts.append(text)
data = '<br />'.join(t for t in texts if t is not None)
td_el = etree.XML('<td>%s</td>' % data)
# if there is a colspan then set it here.
colspan = get_grid_span(el)
if colspan > 1:
td_el.set('colspan', '%d' % colspan)
v_merge = get_v_merge(el)
# If this td has a v_merge and it is restart then set the rowspan
# here.
if (
v_merge is not None and
v_merge.get('%sval' % w_namespace) == 'restart'):
rowspan = next(row_spans)
td_el.set('rowspan', '%d' % rowspan)
tr_el.append(td_el)
return tr_el
|
def build_tr(tr, meta_data, row_spans):
"""
This will return a single tr element, with all tds already populated.
"""
# Create a blank tr element.
tr_el = etree.Element('tr')
w_namespace = get_namespace(tr, 'w')
visited_nodes = []
for el in tr:
if el in visited_nodes:
continue
visited_nodes.append(el)
# Find the table cells.
if el.tag == '%stc' % w_namespace:
v_merge = get_v_merge(el)
# If there is a v_merge and it is not restart then this cell can be
# ignored.
if (
v_merge is not None and
v_merge.get('%sval' % w_namespace) != 'restart'):
continue
# Loop through each and build a list of all the content.
texts = []
for td_content in el:
# Since we are doing look-a-heads in this loop we need to check
# again to see if we have already visited the node.
if td_content in visited_nodes:
continue
# Check to see if it is a list or a regular paragraph.
if is_li(td_content, meta_data):
# If it is a list, create the list and update
# visited_nodes.
li_nodes = get_single_list_nodes_data(
td_content,
meta_data,
)
list_el, list_visited_nodes = build_list(
li_nodes,
meta_data,
)
visited_nodes.extend(list_visited_nodes)
texts.append(etree.tostring(list_el))
elif td_content.tag == '%stbl' % w_namespace:
table_el, table_visited_nodes = build_table(
td_content,
meta_data,
)
visited_nodes.extend(table_visited_nodes)
texts.append(etree.tostring(table_el))
elif td_content.tag == '%stcPr' % w_namespace:
# Do nothing
visited_nodes.append(td_content)
continue
else:
text = get_element_content(
td_content,
meta_data,
is_td=True,
)
texts.append(text)
data = '<br />'.join(t for t in texts if t is not None)
td_el = etree.XML('<td>%s</td>' % data)
# if there is a colspan then set it here.
colspan = get_grid_span(el)
if colspan > 1:
td_el.set('colspan', '%d' % colspan)
v_merge = get_v_merge(el)
# If this td has a v_merge and it is restart then set the rowspan
# here.
if (
v_merge is not None and
v_merge.get('%sval' % w_namespace) == 'restart'):
rowspan = next(row_spans)
td_el.set('rowspan', '%d' % rowspan)
tr_el.append(td_el)
return tr_el
|
[
"This",
"will",
"return",
"a",
"single",
"tr",
"element",
"with",
"all",
"tds",
"already",
"populated",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L1052-L1133
|
[
"def",
"build_tr",
"(",
"tr",
",",
"meta_data",
",",
"row_spans",
")",
":",
"# Create a blank tr element.",
"tr_el",
"=",
"etree",
".",
"Element",
"(",
"'tr'",
")",
"w_namespace",
"=",
"get_namespace",
"(",
"tr",
",",
"'w'",
")",
"visited_nodes",
"=",
"[",
"]",
"for",
"el",
"in",
"tr",
":",
"if",
"el",
"in",
"visited_nodes",
":",
"continue",
"visited_nodes",
".",
"append",
"(",
"el",
")",
"# Find the table cells.",
"if",
"el",
".",
"tag",
"==",
"'%stc'",
"%",
"w_namespace",
":",
"v_merge",
"=",
"get_v_merge",
"(",
"el",
")",
"# If there is a v_merge and it is not restart then this cell can be",
"# ignored.",
"if",
"(",
"v_merge",
"is",
"not",
"None",
"and",
"v_merge",
".",
"get",
"(",
"'%sval'",
"%",
"w_namespace",
")",
"!=",
"'restart'",
")",
":",
"continue",
"# Loop through each and build a list of all the content.",
"texts",
"=",
"[",
"]",
"for",
"td_content",
"in",
"el",
":",
"# Since we are doing look-a-heads in this loop we need to check",
"# again to see if we have already visited the node.",
"if",
"td_content",
"in",
"visited_nodes",
":",
"continue",
"# Check to see if it is a list or a regular paragraph.",
"if",
"is_li",
"(",
"td_content",
",",
"meta_data",
")",
":",
"# If it is a list, create the list and update",
"# visited_nodes.",
"li_nodes",
"=",
"get_single_list_nodes_data",
"(",
"td_content",
",",
"meta_data",
",",
")",
"list_el",
",",
"list_visited_nodes",
"=",
"build_list",
"(",
"li_nodes",
",",
"meta_data",
",",
")",
"visited_nodes",
".",
"extend",
"(",
"list_visited_nodes",
")",
"texts",
".",
"append",
"(",
"etree",
".",
"tostring",
"(",
"list_el",
")",
")",
"elif",
"td_content",
".",
"tag",
"==",
"'%stbl'",
"%",
"w_namespace",
":",
"table_el",
",",
"table_visited_nodes",
"=",
"build_table",
"(",
"td_content",
",",
"meta_data",
",",
")",
"visited_nodes",
".",
"extend",
"(",
"table_visited_nodes",
")",
"texts",
".",
"append",
"(",
"etree",
".",
"tostring",
"(",
"table_el",
")",
")",
"elif",
"td_content",
".",
"tag",
"==",
"'%stcPr'",
"%",
"w_namespace",
":",
"# Do nothing",
"visited_nodes",
".",
"append",
"(",
"td_content",
")",
"continue",
"else",
":",
"text",
"=",
"get_element_content",
"(",
"td_content",
",",
"meta_data",
",",
"is_td",
"=",
"True",
",",
")",
"texts",
".",
"append",
"(",
"text",
")",
"data",
"=",
"'<br />'",
".",
"join",
"(",
"t",
"for",
"t",
"in",
"texts",
"if",
"t",
"is",
"not",
"None",
")",
"td_el",
"=",
"etree",
".",
"XML",
"(",
"'<td>%s</td>'",
"%",
"data",
")",
"# if there is a colspan then set it here.",
"colspan",
"=",
"get_grid_span",
"(",
"el",
")",
"if",
"colspan",
">",
"1",
":",
"td_el",
".",
"set",
"(",
"'colspan'",
",",
"'%d'",
"%",
"colspan",
")",
"v_merge",
"=",
"get_v_merge",
"(",
"el",
")",
"# If this td has a v_merge and it is restart then set the rowspan",
"# here.",
"if",
"(",
"v_merge",
"is",
"not",
"None",
"and",
"v_merge",
".",
"get",
"(",
"'%sval'",
"%",
"w_namespace",
")",
"==",
"'restart'",
")",
":",
"rowspan",
"=",
"next",
"(",
"row_spans",
")",
"td_el",
".",
"set",
"(",
"'rowspan'",
",",
"'%d'",
"%",
"rowspan",
")",
"tr_el",
".",
"append",
"(",
"td_el",
")",
"return",
"tr_el"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
build_table
|
This returns a table object with all rows and cells correctly populated.
|
docx2html/core.py
|
def build_table(table, meta_data):
"""
This returns a table object with all rows and cells correctly populated.
"""
# Create a blank table element.
table_el = etree.Element('table')
w_namespace = get_namespace(table, 'w')
# Get the rowspan values for cells that have a rowspan.
row_spans = get_rowspan_data(table)
for el in table:
if el.tag == '%str' % w_namespace:
# Create the tr element.
tr_el = build_tr(
el,
meta_data,
row_spans,
)
# And append it to the table.
table_el.append(tr_el)
visited_nodes = list(table.iter())
return table_el, visited_nodes
|
def build_table(table, meta_data):
"""
This returns a table object with all rows and cells correctly populated.
"""
# Create a blank table element.
table_el = etree.Element('table')
w_namespace = get_namespace(table, 'w')
# Get the rowspan values for cells that have a rowspan.
row_spans = get_rowspan_data(table)
for el in table:
if el.tag == '%str' % w_namespace:
# Create the tr element.
tr_el = build_tr(
el,
meta_data,
row_spans,
)
# And append it to the table.
table_el.append(tr_el)
visited_nodes = list(table.iter())
return table_el, visited_nodes
|
[
"This",
"returns",
"a",
"table",
"object",
"with",
"all",
"rows",
"and",
"cells",
"correctly",
"populated",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L1137-L1160
|
[
"def",
"build_table",
"(",
"table",
",",
"meta_data",
")",
":",
"# Create a blank table element.",
"table_el",
"=",
"etree",
".",
"Element",
"(",
"'table'",
")",
"w_namespace",
"=",
"get_namespace",
"(",
"table",
",",
"'w'",
")",
"# Get the rowspan values for cells that have a rowspan.",
"row_spans",
"=",
"get_rowspan_data",
"(",
"table",
")",
"for",
"el",
"in",
"table",
":",
"if",
"el",
".",
"tag",
"==",
"'%str'",
"%",
"w_namespace",
":",
"# Create the tr element.",
"tr_el",
"=",
"build_tr",
"(",
"el",
",",
"meta_data",
",",
"row_spans",
",",
")",
"# And append it to the table.",
"table_el",
".",
"append",
"(",
"tr_el",
")",
"visited_nodes",
"=",
"list",
"(",
"table",
".",
"iter",
"(",
")",
")",
"return",
"table_el",
",",
"visited_nodes"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
get_t_tag_content
|
Generate the string data that for this particular t tag.
|
docx2html/core.py
|
def get_t_tag_content(
t, parent, remove_bold, remove_italics, meta_data):
"""
Generate the string data that for this particular t tag.
"""
if t is None or t.text is None:
return ''
# Need to escape the text so that we do not accidentally put in text
# that is not valid XML.
# cgi will replace things like & < > with & < >
text = cgi.escape(t.text)
# Wrap the text with any modifiers it might have (bold, italics or
# underline)
el_is_bold = not remove_bold and (
is_bold(parent) or
is_underlined(parent)
)
el_is_italics = not remove_italics and is_italics(parent)
if el_is_bold:
text = '<strong>%s</strong>' % text
if el_is_italics:
text = '<em>%s</em>' % text
return text
|
def get_t_tag_content(
t, parent, remove_bold, remove_italics, meta_data):
"""
Generate the string data that for this particular t tag.
"""
if t is None or t.text is None:
return ''
# Need to escape the text so that we do not accidentally put in text
# that is not valid XML.
# cgi will replace things like & < > with & < >
text = cgi.escape(t.text)
# Wrap the text with any modifiers it might have (bold, italics or
# underline)
el_is_bold = not remove_bold and (
is_bold(parent) or
is_underlined(parent)
)
el_is_italics = not remove_italics and is_italics(parent)
if el_is_bold:
text = '<strong>%s</strong>' % text
if el_is_italics:
text = '<em>%s</em>' % text
return text
|
[
"Generate",
"the",
"string",
"data",
"that",
"for",
"this",
"particular",
"t",
"tag",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L1164-L1188
|
[
"def",
"get_t_tag_content",
"(",
"t",
",",
"parent",
",",
"remove_bold",
",",
"remove_italics",
",",
"meta_data",
")",
":",
"if",
"t",
"is",
"None",
"or",
"t",
".",
"text",
"is",
"None",
":",
"return",
"''",
"# Need to escape the text so that we do not accidentally put in text",
"# that is not valid XML.",
"# cgi will replace things like & < > with & < >",
"text",
"=",
"cgi",
".",
"escape",
"(",
"t",
".",
"text",
")",
"# Wrap the text with any modifiers it might have (bold, italics or",
"# underline)",
"el_is_bold",
"=",
"not",
"remove_bold",
"and",
"(",
"is_bold",
"(",
"parent",
")",
"or",
"is_underlined",
"(",
"parent",
")",
")",
"el_is_italics",
"=",
"not",
"remove_italics",
"and",
"is_italics",
"(",
"parent",
")",
"if",
"el_is_bold",
":",
"text",
"=",
"'<strong>%s</strong>'",
"%",
"text",
"if",
"el_is_italics",
":",
"text",
"=",
"'<em>%s</em>'",
"%",
"text",
"return",
"text"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
get_element_content
|
P tags are made up of several runs (r tags) of text. This function takes a
p tag and constructs the text that should be part of the p tag.
image_handler should be a callable that returns the desired ``src``
attribute for a given image.
|
docx2html/core.py
|
def get_element_content(
p,
meta_data,
is_td=False,
remove_italics=False,
remove_bold=False,
):
"""
P tags are made up of several runs (r tags) of text. This function takes a
p tag and constructs the text that should be part of the p tag.
image_handler should be a callable that returns the desired ``src``
attribute for a given image.
"""
# Only remove bold or italics if this tag is an h tag.
# Td elements have the same look and feel as p/h elements. Right now we are
# never putting h tags in td elements, as such if we are in a td we will
# never be stripping bold/italics since that is only done on h tags
if not is_td and is_header(p, meta_data):
# Check to see if the whole line is bold or italics.
remove_bold, remove_italics = whole_line_styled(p)
p_text = ''
w_namespace = get_namespace(p, 'w')
if len(p) == 0:
return ''
# Only these tags contain text that we care about (eg. We don't care about
# delete tags)
content_tags = (
'%sr' % w_namespace,
'%shyperlink' % w_namespace,
'%sins' % w_namespace,
'%ssmartTag' % w_namespace,
)
elements_with_content = []
for child in p:
if child is None:
break
if child.tag in content_tags:
elements_with_content.append(child)
# Gather the content from all of the children
for el in elements_with_content:
# Hyperlinks and insert tags need to be handled differently than
# r and smart tags.
if el.tag in ('%sins' % w_namespace, '%ssmartTag' % w_namespace):
p_text += get_element_content(
el,
meta_data,
remove_bold=remove_bold,
remove_italics=remove_italics,
)
elif el.tag == '%shyperlink' % w_namespace:
p_text += build_hyperlink(el, meta_data)
elif el.tag == '%sr' % w_namespace:
p_text += get_text_run_content(
el,
meta_data,
remove_bold=remove_bold,
remove_italics=remove_italics,
)
else:
raise SyntaxNotSupported(
'Content element "%s" not handled.' % el.tag
)
# This function does not return a p tag since other tag types need this as
# well (td, li).
return p_text
|
def get_element_content(
p,
meta_data,
is_td=False,
remove_italics=False,
remove_bold=False,
):
"""
P tags are made up of several runs (r tags) of text. This function takes a
p tag and constructs the text that should be part of the p tag.
image_handler should be a callable that returns the desired ``src``
attribute for a given image.
"""
# Only remove bold or italics if this tag is an h tag.
# Td elements have the same look and feel as p/h elements. Right now we are
# never putting h tags in td elements, as such if we are in a td we will
# never be stripping bold/italics since that is only done on h tags
if not is_td and is_header(p, meta_data):
# Check to see if the whole line is bold or italics.
remove_bold, remove_italics = whole_line_styled(p)
p_text = ''
w_namespace = get_namespace(p, 'w')
if len(p) == 0:
return ''
# Only these tags contain text that we care about (eg. We don't care about
# delete tags)
content_tags = (
'%sr' % w_namespace,
'%shyperlink' % w_namespace,
'%sins' % w_namespace,
'%ssmartTag' % w_namespace,
)
elements_with_content = []
for child in p:
if child is None:
break
if child.tag in content_tags:
elements_with_content.append(child)
# Gather the content from all of the children
for el in elements_with_content:
# Hyperlinks and insert tags need to be handled differently than
# r and smart tags.
if el.tag in ('%sins' % w_namespace, '%ssmartTag' % w_namespace):
p_text += get_element_content(
el,
meta_data,
remove_bold=remove_bold,
remove_italics=remove_italics,
)
elif el.tag == '%shyperlink' % w_namespace:
p_text += build_hyperlink(el, meta_data)
elif el.tag == '%sr' % w_namespace:
p_text += get_text_run_content(
el,
meta_data,
remove_bold=remove_bold,
remove_italics=remove_italics,
)
else:
raise SyntaxNotSupported(
'Content element "%s" not handled.' % el.tag
)
# This function does not return a p tag since other tag types need this as
# well (td, li).
return p_text
|
[
"P",
"tags",
"are",
"made",
"up",
"of",
"several",
"runs",
"(",
"r",
"tags",
")",
"of",
"text",
".",
"This",
"function",
"takes",
"a",
"p",
"tag",
"and",
"constructs",
"the",
"text",
"that",
"should",
"be",
"part",
"of",
"the",
"p",
"tag",
"."
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L1272-L1341
|
[
"def",
"get_element_content",
"(",
"p",
",",
"meta_data",
",",
"is_td",
"=",
"False",
",",
"remove_italics",
"=",
"False",
",",
"remove_bold",
"=",
"False",
",",
")",
":",
"# Only remove bold or italics if this tag is an h tag.",
"# Td elements have the same look and feel as p/h elements. Right now we are",
"# never putting h tags in td elements, as such if we are in a td we will",
"# never be stripping bold/italics since that is only done on h tags",
"if",
"not",
"is_td",
"and",
"is_header",
"(",
"p",
",",
"meta_data",
")",
":",
"# Check to see if the whole line is bold or italics.",
"remove_bold",
",",
"remove_italics",
"=",
"whole_line_styled",
"(",
"p",
")",
"p_text",
"=",
"''",
"w_namespace",
"=",
"get_namespace",
"(",
"p",
",",
"'w'",
")",
"if",
"len",
"(",
"p",
")",
"==",
"0",
":",
"return",
"''",
"# Only these tags contain text that we care about (eg. We don't care about",
"# delete tags)",
"content_tags",
"=",
"(",
"'%sr'",
"%",
"w_namespace",
",",
"'%shyperlink'",
"%",
"w_namespace",
",",
"'%sins'",
"%",
"w_namespace",
",",
"'%ssmartTag'",
"%",
"w_namespace",
",",
")",
"elements_with_content",
"=",
"[",
"]",
"for",
"child",
"in",
"p",
":",
"if",
"child",
"is",
"None",
":",
"break",
"if",
"child",
".",
"tag",
"in",
"content_tags",
":",
"elements_with_content",
".",
"append",
"(",
"child",
")",
"# Gather the content from all of the children",
"for",
"el",
"in",
"elements_with_content",
":",
"# Hyperlinks and insert tags need to be handled differently than",
"# r and smart tags.",
"if",
"el",
".",
"tag",
"in",
"(",
"'%sins'",
"%",
"w_namespace",
",",
"'%ssmartTag'",
"%",
"w_namespace",
")",
":",
"p_text",
"+=",
"get_element_content",
"(",
"el",
",",
"meta_data",
",",
"remove_bold",
"=",
"remove_bold",
",",
"remove_italics",
"=",
"remove_italics",
",",
")",
"elif",
"el",
".",
"tag",
"==",
"'%shyperlink'",
"%",
"w_namespace",
":",
"p_text",
"+=",
"build_hyperlink",
"(",
"el",
",",
"meta_data",
")",
"elif",
"el",
".",
"tag",
"==",
"'%sr'",
"%",
"w_namespace",
":",
"p_text",
"+=",
"get_text_run_content",
"(",
"el",
",",
"meta_data",
",",
"remove_bold",
"=",
"remove_bold",
",",
"remove_italics",
"=",
"remove_italics",
",",
")",
"else",
":",
"raise",
"SyntaxNotSupported",
"(",
"'Content element \"%s\" not handled.'",
"%",
"el",
".",
"tag",
")",
"# This function does not return a p tag since other tag types need this as",
"# well (td, li).",
"return",
"p_text"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
_strip_tag
|
Remove all tags that have the tag name ``tag``
|
docx2html/core.py
|
def _strip_tag(tree, tag):
"""
Remove all tags that have the tag name ``tag``
"""
for el in tree.iter():
if el.tag == tag:
el.getparent().remove(el)
|
def _strip_tag(tree, tag):
"""
Remove all tags that have the tag name ``tag``
"""
for el in tree.iter():
if el.tag == tag:
el.getparent().remove(el)
|
[
"Remove",
"all",
"tags",
"that",
"have",
"the",
"tag",
"name",
"tag"
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L1344-L1350
|
[
"def",
"_strip_tag",
"(",
"tree",
",",
"tag",
")",
":",
"for",
"el",
"in",
"tree",
".",
"iter",
"(",
")",
":",
"if",
"el",
".",
"tag",
"==",
"tag",
":",
"el",
".",
"getparent",
"(",
")",
".",
"remove",
"(",
"el",
")"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
convert
|
``file_path`` is a path to the file on the file system that you want to be
converted to html.
``image_handler`` is a function that takes an image_id and a
relationship_dict to generate the src attribute for images. (see readme
for more details)
``fall_back`` is a function that takes a ``file_path``. This function will
only be called if for whatever reason the conversion fails.
``converter`` is a function to convert a document that is not docx to docx
(examples in docx2html.converters)
Returns html extracted from ``file_path``
|
docx2html/core.py
|
def convert(file_path, image_handler=None, fall_back=None, converter=None):
"""
``file_path`` is a path to the file on the file system that you want to be
converted to html.
``image_handler`` is a function that takes an image_id and a
relationship_dict to generate the src attribute for images. (see readme
for more details)
``fall_back`` is a function that takes a ``file_path``. This function will
only be called if for whatever reason the conversion fails.
``converter`` is a function to convert a document that is not docx to docx
(examples in docx2html.converters)
Returns html extracted from ``file_path``
"""
file_base, extension = os.path.splitext(os.path.basename(file_path))
if extension == '.html' or extension == '.htm':
return read_html_file(file_path)
# Create the converted file as a file in the same dir with the
# same name only with a .docx extension
docx_path = replace_ext(file_path, '.docx')
if extension == '.docx':
# If the file is already html, just leave it in place.
docx_path = file_path
else:
if converter is None:
raise FileNotDocx('The file passed in is not a docx.')
converter(docx_path, file_path)
if not os.path.isfile(docx_path):
if fall_back is None:
raise ConversionFailed('Conversion to docx failed.')
else:
return fall_back(file_path)
try:
# Docx files are actually just zip files.
zf = get_zip_file_handler(docx_path)
except BadZipfile:
raise MalformedDocx('This file is not a docx')
# Need to populate the xml based on word/document.xml
tree, meta_data = _get_document_data(zf, image_handler)
return create_html(tree, meta_data)
|
def convert(file_path, image_handler=None, fall_back=None, converter=None):
"""
``file_path`` is a path to the file on the file system that you want to be
converted to html.
``image_handler`` is a function that takes an image_id and a
relationship_dict to generate the src attribute for images. (see readme
for more details)
``fall_back`` is a function that takes a ``file_path``. This function will
only be called if for whatever reason the conversion fails.
``converter`` is a function to convert a document that is not docx to docx
(examples in docx2html.converters)
Returns html extracted from ``file_path``
"""
file_base, extension = os.path.splitext(os.path.basename(file_path))
if extension == '.html' or extension == '.htm':
return read_html_file(file_path)
# Create the converted file as a file in the same dir with the
# same name only with a .docx extension
docx_path = replace_ext(file_path, '.docx')
if extension == '.docx':
# If the file is already html, just leave it in place.
docx_path = file_path
else:
if converter is None:
raise FileNotDocx('The file passed in is not a docx.')
converter(docx_path, file_path)
if not os.path.isfile(docx_path):
if fall_back is None:
raise ConversionFailed('Conversion to docx failed.')
else:
return fall_back(file_path)
try:
# Docx files are actually just zip files.
zf = get_zip_file_handler(docx_path)
except BadZipfile:
raise MalformedDocx('This file is not a docx')
# Need to populate the xml based on word/document.xml
tree, meta_data = _get_document_data(zf, image_handler)
return create_html(tree, meta_data)
|
[
"file_path",
"is",
"a",
"path",
"to",
"the",
"file",
"on",
"the",
"file",
"system",
"that",
"you",
"want",
"to",
"be",
"converted",
"to",
"html",
".",
"image_handler",
"is",
"a",
"function",
"that",
"takes",
"an",
"image_id",
"and",
"a",
"relationship_dict",
"to",
"generate",
"the",
"src",
"attribute",
"for",
"images",
".",
"(",
"see",
"readme",
"for",
"more",
"details",
")",
"fall_back",
"is",
"a",
"function",
"that",
"takes",
"a",
"file_path",
".",
"This",
"function",
"will",
"only",
"be",
"called",
"if",
"for",
"whatever",
"reason",
"the",
"conversion",
"fails",
".",
"converter",
"is",
"a",
"function",
"to",
"convert",
"a",
"document",
"that",
"is",
"not",
"docx",
"to",
"docx",
"(",
"examples",
"in",
"docx2html",
".",
"converters",
")"
] |
PolicyStat/docx2html
|
python
|
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L1363-L1406
|
[
"def",
"convert",
"(",
"file_path",
",",
"image_handler",
"=",
"None",
",",
"fall_back",
"=",
"None",
",",
"converter",
"=",
"None",
")",
":",
"file_base",
",",
"extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"file_path",
")",
")",
"if",
"extension",
"==",
"'.html'",
"or",
"extension",
"==",
"'.htm'",
":",
"return",
"read_html_file",
"(",
"file_path",
")",
"# Create the converted file as a file in the same dir with the",
"# same name only with a .docx extension",
"docx_path",
"=",
"replace_ext",
"(",
"file_path",
",",
"'.docx'",
")",
"if",
"extension",
"==",
"'.docx'",
":",
"# If the file is already html, just leave it in place.",
"docx_path",
"=",
"file_path",
"else",
":",
"if",
"converter",
"is",
"None",
":",
"raise",
"FileNotDocx",
"(",
"'The file passed in is not a docx.'",
")",
"converter",
"(",
"docx_path",
",",
"file_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"docx_path",
")",
":",
"if",
"fall_back",
"is",
"None",
":",
"raise",
"ConversionFailed",
"(",
"'Conversion to docx failed.'",
")",
"else",
":",
"return",
"fall_back",
"(",
"file_path",
")",
"try",
":",
"# Docx files are actually just zip files.",
"zf",
"=",
"get_zip_file_handler",
"(",
"docx_path",
")",
"except",
"BadZipfile",
":",
"raise",
"MalformedDocx",
"(",
"'This file is not a docx'",
")",
"# Need to populate the xml based on word/document.xml",
"tree",
",",
"meta_data",
"=",
"_get_document_data",
"(",
"zf",
",",
"image_handler",
")",
"return",
"create_html",
"(",
"tree",
",",
"meta_data",
")"
] |
2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429
|
test
|
find
|
Find the location of a dataset on disk, downloading if needed.
|
examples/utils.py
|
def find(dataset, url):
'''Find the location of a dataset on disk, downloading if needed.'''
fn = os.path.join(DATASETS, dataset)
dn = os.path.dirname(fn)
if not os.path.exists(dn):
print('creating dataset directory: %s', dn)
os.makedirs(dn)
if not os.path.exists(fn):
if sys.version_info < (3, ):
urllib.urlretrieve(url, fn)
else:
urllib.request.urlretrieve(url, fn)
return fn
|
def find(dataset, url):
'''Find the location of a dataset on disk, downloading if needed.'''
fn = os.path.join(DATASETS, dataset)
dn = os.path.dirname(fn)
if not os.path.exists(dn):
print('creating dataset directory: %s', dn)
os.makedirs(dn)
if not os.path.exists(fn):
if sys.version_info < (3, ):
urllib.urlretrieve(url, fn)
else:
urllib.request.urlretrieve(url, fn)
return fn
|
[
"Find",
"the",
"location",
"of",
"a",
"dataset",
"on",
"disk",
"downloading",
"if",
"needed",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/examples/utils.py#L18-L30
|
[
"def",
"find",
"(",
"dataset",
",",
"url",
")",
":",
"fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"DATASETS",
",",
"dataset",
")",
"dn",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fn",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dn",
")",
":",
"print",
"(",
"'creating dataset directory: %s'",
",",
"dn",
")",
"os",
".",
"makedirs",
"(",
"dn",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"fn",
")",
":",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
")",
":",
"urllib",
".",
"urlretrieve",
"(",
"url",
",",
"fn",
")",
"else",
":",
"urllib",
".",
"request",
".",
"urlretrieve",
"(",
"url",
",",
"fn",
")",
"return",
"fn"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
load_mnist
|
Load the MNIST digits dataset.
|
examples/utils.py
|
def load_mnist(flatten=True, labels=False):
'''Load the MNIST digits dataset.'''
fn = find('mnist.pkl.gz', 'http://deeplearning.net/data/mnist/mnist.pkl.gz')
h = gzip.open(fn, 'rb')
if sys.version_info < (3, ):
(timg, tlab), (vimg, vlab), (simg, slab) = pickle.load(h)
else:
(timg, tlab), (vimg, vlab), (simg, slab) = pickle.load(h, encoding='bytes')
h.close()
if not flatten:
timg = timg.reshape((-1, 28, 28, 1))
vimg = vimg.reshape((-1, 28, 28, 1))
simg = simg.reshape((-1, 28, 28, 1))
if labels:
return ((timg, tlab.astype('i')),
(vimg, vlab.astype('i')),
(simg, slab.astype('i')))
return (timg, ), (vimg, ), (simg, )
|
def load_mnist(flatten=True, labels=False):
'''Load the MNIST digits dataset.'''
fn = find('mnist.pkl.gz', 'http://deeplearning.net/data/mnist/mnist.pkl.gz')
h = gzip.open(fn, 'rb')
if sys.version_info < (3, ):
(timg, tlab), (vimg, vlab), (simg, slab) = pickle.load(h)
else:
(timg, tlab), (vimg, vlab), (simg, slab) = pickle.load(h, encoding='bytes')
h.close()
if not flatten:
timg = timg.reshape((-1, 28, 28, 1))
vimg = vimg.reshape((-1, 28, 28, 1))
simg = simg.reshape((-1, 28, 28, 1))
if labels:
return ((timg, tlab.astype('i')),
(vimg, vlab.astype('i')),
(simg, slab.astype('i')))
return (timg, ), (vimg, ), (simg, )
|
[
"Load",
"the",
"MNIST",
"digits",
"dataset",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/examples/utils.py#L33-L50
|
[
"def",
"load_mnist",
"(",
"flatten",
"=",
"True",
",",
"labels",
"=",
"False",
")",
":",
"fn",
"=",
"find",
"(",
"'mnist.pkl.gz'",
",",
"'http://deeplearning.net/data/mnist/mnist.pkl.gz'",
")",
"h",
"=",
"gzip",
".",
"open",
"(",
"fn",
",",
"'rb'",
")",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
")",
":",
"(",
"timg",
",",
"tlab",
")",
",",
"(",
"vimg",
",",
"vlab",
")",
",",
"(",
"simg",
",",
"slab",
")",
"=",
"pickle",
".",
"load",
"(",
"h",
")",
"else",
":",
"(",
"timg",
",",
"tlab",
")",
",",
"(",
"vimg",
",",
"vlab",
")",
",",
"(",
"simg",
",",
"slab",
")",
"=",
"pickle",
".",
"load",
"(",
"h",
",",
"encoding",
"=",
"'bytes'",
")",
"h",
".",
"close",
"(",
")",
"if",
"not",
"flatten",
":",
"timg",
"=",
"timg",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"28",
",",
"28",
",",
"1",
")",
")",
"vimg",
"=",
"vimg",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"28",
",",
"28",
",",
"1",
")",
")",
"simg",
"=",
"simg",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"28",
",",
"28",
",",
"1",
")",
")",
"if",
"labels",
":",
"return",
"(",
"(",
"timg",
",",
"tlab",
".",
"astype",
"(",
"'i'",
")",
")",
",",
"(",
"vimg",
",",
"vlab",
".",
"astype",
"(",
"'i'",
")",
")",
",",
"(",
"simg",
",",
"slab",
".",
"astype",
"(",
"'i'",
")",
")",
")",
"return",
"(",
"timg",
",",
")",
",",
"(",
"vimg",
",",
")",
",",
"(",
"simg",
",",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
load_cifar
|
Load the CIFAR10 image dataset.
|
examples/utils.py
|
def load_cifar(flatten=True, labels=False):
'''Load the CIFAR10 image dataset.'''
def extract(name):
print('extracting data from {}'.format(name))
h = tar.extractfile(name)
if sys.version_info < (3, ):
d = pickle.load(h)
else:
d = pickle.load(h, encoding='bytes')
for k in list(d):
d[k.decode('utf8')] = d[k]
h.close()
img = d['data'].reshape(
(-1, 3, 32, 32)).transpose((0, 2, 3, 1)).astype('f') / 128 - 1
if flatten:
img = img.reshape((-1, 32 * 32 * 3))
d['data'] = img
return d
fn = find('cifar10.tar.gz', 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz')
tar = tarfile.open(fn)
imgs = []
labs = []
for i in range(1, 6):
d = extract('cifar-10-batches-py/data_batch_{}'.format(i))
imgs.extend(d['data'])
labs.extend(d['labels'])
timg = np.asarray(imgs[:40000])
tlab = np.asarray(labs[:40000], 'i')
vimg = np.asarray(imgs[40000:])
vlab = np.asarray(labs[40000:], 'i')
d = extract('cifar-10-batches-py/test_batch')
simg = d['data']
slab = d['labels']
tar.close()
if labels:
return (timg, tlab), (vimg, vlab), (simg, slab)
return (timg, ), (vimg, ), (simg, )
|
def load_cifar(flatten=True, labels=False):
'''Load the CIFAR10 image dataset.'''
def extract(name):
print('extracting data from {}'.format(name))
h = tar.extractfile(name)
if sys.version_info < (3, ):
d = pickle.load(h)
else:
d = pickle.load(h, encoding='bytes')
for k in list(d):
d[k.decode('utf8')] = d[k]
h.close()
img = d['data'].reshape(
(-1, 3, 32, 32)).transpose((0, 2, 3, 1)).astype('f') / 128 - 1
if flatten:
img = img.reshape((-1, 32 * 32 * 3))
d['data'] = img
return d
fn = find('cifar10.tar.gz', 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz')
tar = tarfile.open(fn)
imgs = []
labs = []
for i in range(1, 6):
d = extract('cifar-10-batches-py/data_batch_{}'.format(i))
imgs.extend(d['data'])
labs.extend(d['labels'])
timg = np.asarray(imgs[:40000])
tlab = np.asarray(labs[:40000], 'i')
vimg = np.asarray(imgs[40000:])
vlab = np.asarray(labs[40000:], 'i')
d = extract('cifar-10-batches-py/test_batch')
simg = d['data']
slab = d['labels']
tar.close()
if labels:
return (timg, tlab), (vimg, vlab), (simg, slab)
return (timg, ), (vimg, ), (simg, )
|
[
"Load",
"the",
"CIFAR10",
"image",
"dataset",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/examples/utils.py#L53-L94
|
[
"def",
"load_cifar",
"(",
"flatten",
"=",
"True",
",",
"labels",
"=",
"False",
")",
":",
"def",
"extract",
"(",
"name",
")",
":",
"print",
"(",
"'extracting data from {}'",
".",
"format",
"(",
"name",
")",
")",
"h",
"=",
"tar",
".",
"extractfile",
"(",
"name",
")",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
")",
":",
"d",
"=",
"pickle",
".",
"load",
"(",
"h",
")",
"else",
":",
"d",
"=",
"pickle",
".",
"load",
"(",
"h",
",",
"encoding",
"=",
"'bytes'",
")",
"for",
"k",
"in",
"list",
"(",
"d",
")",
":",
"d",
"[",
"k",
".",
"decode",
"(",
"'utf8'",
")",
"]",
"=",
"d",
"[",
"k",
"]",
"h",
".",
"close",
"(",
")",
"img",
"=",
"d",
"[",
"'data'",
"]",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"3",
",",
"32",
",",
"32",
")",
")",
".",
"transpose",
"(",
"(",
"0",
",",
"2",
",",
"3",
",",
"1",
")",
")",
".",
"astype",
"(",
"'f'",
")",
"/",
"128",
"-",
"1",
"if",
"flatten",
":",
"img",
"=",
"img",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"32",
"*",
"32",
"*",
"3",
")",
")",
"d",
"[",
"'data'",
"]",
"=",
"img",
"return",
"d",
"fn",
"=",
"find",
"(",
"'cifar10.tar.gz'",
",",
"'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'",
")",
"tar",
"=",
"tarfile",
".",
"open",
"(",
"fn",
")",
"imgs",
"=",
"[",
"]",
"labs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"6",
")",
":",
"d",
"=",
"extract",
"(",
"'cifar-10-batches-py/data_batch_{}'",
".",
"format",
"(",
"i",
")",
")",
"imgs",
".",
"extend",
"(",
"d",
"[",
"'data'",
"]",
")",
"labs",
".",
"extend",
"(",
"d",
"[",
"'labels'",
"]",
")",
"timg",
"=",
"np",
".",
"asarray",
"(",
"imgs",
"[",
":",
"40000",
"]",
")",
"tlab",
"=",
"np",
".",
"asarray",
"(",
"labs",
"[",
":",
"40000",
"]",
",",
"'i'",
")",
"vimg",
"=",
"np",
".",
"asarray",
"(",
"imgs",
"[",
"40000",
":",
"]",
")",
"vlab",
"=",
"np",
".",
"asarray",
"(",
"labs",
"[",
"40000",
":",
"]",
",",
"'i'",
")",
"d",
"=",
"extract",
"(",
"'cifar-10-batches-py/test_batch'",
")",
"simg",
"=",
"d",
"[",
"'data'",
"]",
"slab",
"=",
"d",
"[",
"'labels'",
"]",
"tar",
".",
"close",
"(",
")",
"if",
"labels",
":",
"return",
"(",
"timg",
",",
"tlab",
")",
",",
"(",
"vimg",
",",
"vlab",
")",
",",
"(",
"simg",
",",
"slab",
")",
"return",
"(",
"timg",
",",
")",
",",
"(",
"vimg",
",",
")",
",",
"(",
"simg",
",",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
plot_images
|
Plot an array of images.
We assume that we are given a matrix of data whose shape is (n*n, s*s*c) --
that is, there are n^2 images along the first axis of the array, and each
image is c squares measuring s pixels on a side. Each row of the input will
be plotted as a sub-region within a single image array containing an n x n
grid of images.
|
examples/utils.py
|
def plot_images(imgs, loc, title=None, channels=1):
'''Plot an array of images.
We assume that we are given a matrix of data whose shape is (n*n, s*s*c) --
that is, there are n^2 images along the first axis of the array, and each
image is c squares measuring s pixels on a side. Each row of the input will
be plotted as a sub-region within a single image array containing an n x n
grid of images.
'''
n = int(np.sqrt(len(imgs)))
assert n * n == len(imgs), 'images array must contain a square number of rows!'
s = int(np.sqrt(len(imgs[0]) / channels))
assert s * s == len(imgs[0]) / channels, 'images must be square!'
img = np.zeros(((s+1) * n - 1, (s+1) * n - 1, channels), dtype=imgs[0].dtype)
for i, pix in enumerate(imgs):
r, c = divmod(i, n)
img[r * (s+1):(r+1) * (s+1) - 1,
c * (s+1):(c+1) * (s+1) - 1] = pix.reshape((s, s, channels))
img -= img.min()
img /= img.max()
ax = plt.gcf().add_subplot(loc)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_frame_on(False)
ax.imshow(img.squeeze(), cmap=plt.cm.gray)
if title:
ax.set_title(title)
|
def plot_images(imgs, loc, title=None, channels=1):
'''Plot an array of images.
We assume that we are given a matrix of data whose shape is (n*n, s*s*c) --
that is, there are n^2 images along the first axis of the array, and each
image is c squares measuring s pixels on a side. Each row of the input will
be plotted as a sub-region within a single image array containing an n x n
grid of images.
'''
n = int(np.sqrt(len(imgs)))
assert n * n == len(imgs), 'images array must contain a square number of rows!'
s = int(np.sqrt(len(imgs[0]) / channels))
assert s * s == len(imgs[0]) / channels, 'images must be square!'
img = np.zeros(((s+1) * n - 1, (s+1) * n - 1, channels), dtype=imgs[0].dtype)
for i, pix in enumerate(imgs):
r, c = divmod(i, n)
img[r * (s+1):(r+1) * (s+1) - 1,
c * (s+1):(c+1) * (s+1) - 1] = pix.reshape((s, s, channels))
img -= img.min()
img /= img.max()
ax = plt.gcf().add_subplot(loc)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_frame_on(False)
ax.imshow(img.squeeze(), cmap=plt.cm.gray)
if title:
ax.set_title(title)
|
[
"Plot",
"an",
"array",
"of",
"images",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/examples/utils.py#L97-L126
|
[
"def",
"plot_images",
"(",
"imgs",
",",
"loc",
",",
"title",
"=",
"None",
",",
"channels",
"=",
"1",
")",
":",
"n",
"=",
"int",
"(",
"np",
".",
"sqrt",
"(",
"len",
"(",
"imgs",
")",
")",
")",
"assert",
"n",
"*",
"n",
"==",
"len",
"(",
"imgs",
")",
",",
"'images array must contain a square number of rows!'",
"s",
"=",
"int",
"(",
"np",
".",
"sqrt",
"(",
"len",
"(",
"imgs",
"[",
"0",
"]",
")",
"/",
"channels",
")",
")",
"assert",
"s",
"*",
"s",
"==",
"len",
"(",
"imgs",
"[",
"0",
"]",
")",
"/",
"channels",
",",
"'images must be square!'",
"img",
"=",
"np",
".",
"zeros",
"(",
"(",
"(",
"s",
"+",
"1",
")",
"*",
"n",
"-",
"1",
",",
"(",
"s",
"+",
"1",
")",
"*",
"n",
"-",
"1",
",",
"channels",
")",
",",
"dtype",
"=",
"imgs",
"[",
"0",
"]",
".",
"dtype",
")",
"for",
"i",
",",
"pix",
"in",
"enumerate",
"(",
"imgs",
")",
":",
"r",
",",
"c",
"=",
"divmod",
"(",
"i",
",",
"n",
")",
"img",
"[",
"r",
"*",
"(",
"s",
"+",
"1",
")",
":",
"(",
"r",
"+",
"1",
")",
"*",
"(",
"s",
"+",
"1",
")",
"-",
"1",
",",
"c",
"*",
"(",
"s",
"+",
"1",
")",
":",
"(",
"c",
"+",
"1",
")",
"*",
"(",
"s",
"+",
"1",
")",
"-",
"1",
"]",
"=",
"pix",
".",
"reshape",
"(",
"(",
"s",
",",
"s",
",",
"channels",
")",
")",
"img",
"-=",
"img",
".",
"min",
"(",
")",
"img",
"/=",
"img",
".",
"max",
"(",
")",
"ax",
"=",
"plt",
".",
"gcf",
"(",
")",
".",
"add_subplot",
"(",
"loc",
")",
"ax",
".",
"xaxis",
".",
"set_visible",
"(",
"False",
")",
"ax",
".",
"yaxis",
".",
"set_visible",
"(",
"False",
")",
"ax",
".",
"set_frame_on",
"(",
"False",
")",
"ax",
".",
"imshow",
"(",
"img",
".",
"squeeze",
"(",
")",
",",
"cmap",
"=",
"plt",
".",
"cm",
".",
"gray",
")",
"if",
"title",
":",
"ax",
".",
"set_title",
"(",
"title",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
plot_layers
|
Create a plot of weights, visualized as "bottom-level" pixel arrays.
|
examples/utils.py
|
def plot_layers(weights, tied_weights=False, channels=1):
'''Create a plot of weights, visualized as "bottom-level" pixel arrays.'''
if hasattr(weights[0], 'get_value'):
weights = [w.get_value() for w in weights]
k = min(len(weights), 9)
imgs = np.eye(weights[0].shape[0])
for i, weight in enumerate(weights[:-1]):
imgs = np.dot(weight.T, imgs)
plot_images(imgs,
100 + 10 * k + i + 1,
channels=channels,
title='Layer {}'.format(i+1))
weight = weights[-1]
n = weight.shape[1] / channels
if int(np.sqrt(n)) ** 2 != n:
return
if tied_weights:
imgs = np.dot(weight.T, imgs)
plot_images(imgs,
100 + 10 * k + k,
channels=channels,
title='Layer {}'.format(k))
else:
plot_images(weight,
100 + 10 * k + k,
channels=channels,
title='Decoding weights')
|
def plot_layers(weights, tied_weights=False, channels=1):
'''Create a plot of weights, visualized as "bottom-level" pixel arrays.'''
if hasattr(weights[0], 'get_value'):
weights = [w.get_value() for w in weights]
k = min(len(weights), 9)
imgs = np.eye(weights[0].shape[0])
for i, weight in enumerate(weights[:-1]):
imgs = np.dot(weight.T, imgs)
plot_images(imgs,
100 + 10 * k + i + 1,
channels=channels,
title='Layer {}'.format(i+1))
weight = weights[-1]
n = weight.shape[1] / channels
if int(np.sqrt(n)) ** 2 != n:
return
if tied_weights:
imgs = np.dot(weight.T, imgs)
plot_images(imgs,
100 + 10 * k + k,
channels=channels,
title='Layer {}'.format(k))
else:
plot_images(weight,
100 + 10 * k + k,
channels=channels,
title='Decoding weights')
|
[
"Create",
"a",
"plot",
"of",
"weights",
"visualized",
"as",
"bottom",
"-",
"level",
"pixel",
"arrays",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/examples/utils.py#L129-L155
|
[
"def",
"plot_layers",
"(",
"weights",
",",
"tied_weights",
"=",
"False",
",",
"channels",
"=",
"1",
")",
":",
"if",
"hasattr",
"(",
"weights",
"[",
"0",
"]",
",",
"'get_value'",
")",
":",
"weights",
"=",
"[",
"w",
".",
"get_value",
"(",
")",
"for",
"w",
"in",
"weights",
"]",
"k",
"=",
"min",
"(",
"len",
"(",
"weights",
")",
",",
"9",
")",
"imgs",
"=",
"np",
".",
"eye",
"(",
"weights",
"[",
"0",
"]",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"i",
",",
"weight",
"in",
"enumerate",
"(",
"weights",
"[",
":",
"-",
"1",
"]",
")",
":",
"imgs",
"=",
"np",
".",
"dot",
"(",
"weight",
".",
"T",
",",
"imgs",
")",
"plot_images",
"(",
"imgs",
",",
"100",
"+",
"10",
"*",
"k",
"+",
"i",
"+",
"1",
",",
"channels",
"=",
"channels",
",",
"title",
"=",
"'Layer {}'",
".",
"format",
"(",
"i",
"+",
"1",
")",
")",
"weight",
"=",
"weights",
"[",
"-",
"1",
"]",
"n",
"=",
"weight",
".",
"shape",
"[",
"1",
"]",
"/",
"channels",
"if",
"int",
"(",
"np",
".",
"sqrt",
"(",
"n",
")",
")",
"**",
"2",
"!=",
"n",
":",
"return",
"if",
"tied_weights",
":",
"imgs",
"=",
"np",
".",
"dot",
"(",
"weight",
".",
"T",
",",
"imgs",
")",
"plot_images",
"(",
"imgs",
",",
"100",
"+",
"10",
"*",
"k",
"+",
"k",
",",
"channels",
"=",
"channels",
",",
"title",
"=",
"'Layer {}'",
".",
"format",
"(",
"k",
")",
")",
"else",
":",
"plot_images",
"(",
"weight",
",",
"100",
"+",
"10",
"*",
"k",
"+",
"k",
",",
"channels",
"=",
"channels",
",",
"title",
"=",
"'Decoding weights'",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
plot_filters
|
Create a plot of conv filters, visualized as pixel arrays.
|
examples/utils.py
|
def plot_filters(filters):
'''Create a plot of conv filters, visualized as pixel arrays.'''
imgs = filters.get_value()
N, channels, x, y = imgs.shape
n = int(np.sqrt(N))
assert n * n == N, 'filters must contain a square number of rows!'
assert channels == 1 or channels == 3, 'can only plot grayscale or rgb filters!'
img = np.zeros(((y+1) * n - 1, (x+1) * n - 1, channels), dtype=imgs[0].dtype)
for i, pix in enumerate(imgs):
r, c = divmod(i, n)
img[r * (y+1):(r+1) * (y+1) - 1,
c * (x+1):(c+1) * (x+1) - 1] = pix.transpose((1, 2, 0))
img -= img.min()
img /= img.max()
ax = plt.gcf().add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_frame_on(False)
ax.imshow(img.squeeze(), cmap=plt.cm.gray)
|
def plot_filters(filters):
'''Create a plot of conv filters, visualized as pixel arrays.'''
imgs = filters.get_value()
N, channels, x, y = imgs.shape
n = int(np.sqrt(N))
assert n * n == N, 'filters must contain a square number of rows!'
assert channels == 1 or channels == 3, 'can only plot grayscale or rgb filters!'
img = np.zeros(((y+1) * n - 1, (x+1) * n - 1, channels), dtype=imgs[0].dtype)
for i, pix in enumerate(imgs):
r, c = divmod(i, n)
img[r * (y+1):(r+1) * (y+1) - 1,
c * (x+1):(c+1) * (x+1) - 1] = pix.transpose((1, 2, 0))
img -= img.min()
img /= img.max()
ax = plt.gcf().add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_frame_on(False)
ax.imshow(img.squeeze(), cmap=plt.cm.gray)
|
[
"Create",
"a",
"plot",
"of",
"conv",
"filters",
"visualized",
"as",
"pixel",
"arrays",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/examples/utils.py#L158-L180
|
[
"def",
"plot_filters",
"(",
"filters",
")",
":",
"imgs",
"=",
"filters",
".",
"get_value",
"(",
")",
"N",
",",
"channels",
",",
"x",
",",
"y",
"=",
"imgs",
".",
"shape",
"n",
"=",
"int",
"(",
"np",
".",
"sqrt",
"(",
"N",
")",
")",
"assert",
"n",
"*",
"n",
"==",
"N",
",",
"'filters must contain a square number of rows!'",
"assert",
"channels",
"==",
"1",
"or",
"channels",
"==",
"3",
",",
"'can only plot grayscale or rgb filters!'",
"img",
"=",
"np",
".",
"zeros",
"(",
"(",
"(",
"y",
"+",
"1",
")",
"*",
"n",
"-",
"1",
",",
"(",
"x",
"+",
"1",
")",
"*",
"n",
"-",
"1",
",",
"channels",
")",
",",
"dtype",
"=",
"imgs",
"[",
"0",
"]",
".",
"dtype",
")",
"for",
"i",
",",
"pix",
"in",
"enumerate",
"(",
"imgs",
")",
":",
"r",
",",
"c",
"=",
"divmod",
"(",
"i",
",",
"n",
")",
"img",
"[",
"r",
"*",
"(",
"y",
"+",
"1",
")",
":",
"(",
"r",
"+",
"1",
")",
"*",
"(",
"y",
"+",
"1",
")",
"-",
"1",
",",
"c",
"*",
"(",
"x",
"+",
"1",
")",
":",
"(",
"c",
"+",
"1",
")",
"*",
"(",
"x",
"+",
"1",
")",
"-",
"1",
"]",
"=",
"pix",
".",
"transpose",
"(",
"(",
"1",
",",
"2",
",",
"0",
")",
")",
"img",
"-=",
"img",
".",
"min",
"(",
")",
"img",
"/=",
"img",
".",
"max",
"(",
")",
"ax",
"=",
"plt",
".",
"gcf",
"(",
")",
".",
"add_subplot",
"(",
"111",
")",
"ax",
".",
"xaxis",
".",
"set_visible",
"(",
"False",
")",
"ax",
".",
"yaxis",
".",
"set_visible",
"(",
"False",
")",
"ax",
".",
"set_frame_on",
"(",
"False",
")",
"ax",
".",
"imshow",
"(",
"img",
".",
"squeeze",
"(",
")",
",",
"cmap",
"=",
"plt",
".",
"cm",
".",
"gray",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
batches
|
Create a callable that generates samples from a dataset.
Parameters
----------
arrays : list of ndarray (time-steps, data-dimensions)
Arrays of data. Rows in these arrays are assumed to correspond to time
steps, and columns to variables. Multiple arrays can be given; in such
a case, these arrays usually correspond to [input, output]---for
example, for a recurrent regression problem---or [input, output,
weights]---for a weighted regression or classification problem.
steps : int, optional
Generate samples of this many time steps. Defaults to 100.
batch_size : int, optional
Generate this many samples per call. Defaults to 64. This must match the
batch_size parameter that was used when creating the recurrent network
that will process the data.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
callable :
A callable that can be used inside a dataset for training a recurrent
network.
|
theanets/recurrent.py
|
def batches(arrays, steps=100, batch_size=64, rng=None):
'''Create a callable that generates samples from a dataset.
Parameters
----------
arrays : list of ndarray (time-steps, data-dimensions)
Arrays of data. Rows in these arrays are assumed to correspond to time
steps, and columns to variables. Multiple arrays can be given; in such
a case, these arrays usually correspond to [input, output]---for
example, for a recurrent regression problem---or [input, output,
weights]---for a weighted regression or classification problem.
steps : int, optional
Generate samples of this many time steps. Defaults to 100.
batch_size : int, optional
Generate this many samples per call. Defaults to 64. This must match the
batch_size parameter that was used when creating the recurrent network
that will process the data.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
callable :
A callable that can be used inside a dataset for training a recurrent
network.
'''
assert batch_size >= 2, 'batch_size must be at least 2!'
assert isinstance(arrays, (tuple, list)), 'arrays must be a tuple or list!'
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng)
def sample():
xs = [np.zeros((batch_size, steps, a.shape[1]), a.dtype) for a in arrays]
for i in range(batch_size):
j = rng.randint(len(arrays[0]) - steps)
for x, a in zip(xs, arrays):
x[i] = a[j:j+steps]
return xs
return sample
|
def batches(arrays, steps=100, batch_size=64, rng=None):
'''Create a callable that generates samples from a dataset.
Parameters
----------
arrays : list of ndarray (time-steps, data-dimensions)
Arrays of data. Rows in these arrays are assumed to correspond to time
steps, and columns to variables. Multiple arrays can be given; in such
a case, these arrays usually correspond to [input, output]---for
example, for a recurrent regression problem---or [input, output,
weights]---for a weighted regression or classification problem.
steps : int, optional
Generate samples of this many time steps. Defaults to 100.
batch_size : int, optional
Generate this many samples per call. Defaults to 64. This must match the
batch_size parameter that was used when creating the recurrent network
that will process the data.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
callable :
A callable that can be used inside a dataset for training a recurrent
network.
'''
assert batch_size >= 2, 'batch_size must be at least 2!'
assert isinstance(arrays, (tuple, list)), 'arrays must be a tuple or list!'
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng)
def sample():
xs = [np.zeros((batch_size, steps, a.shape[1]), a.dtype) for a in arrays]
for i in range(batch_size):
j = rng.randint(len(arrays[0]) - steps)
for x, a in zip(xs, arrays):
x[i] = a[j:j+steps]
return xs
return sample
|
[
"Create",
"a",
"callable",
"that",
"generates",
"samples",
"from",
"a",
"dataset",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/recurrent.py#L12-L54
|
[
"def",
"batches",
"(",
"arrays",
",",
"steps",
"=",
"100",
",",
"batch_size",
"=",
"64",
",",
"rng",
"=",
"None",
")",
":",
"assert",
"batch_size",
">=",
"2",
",",
"'batch_size must be at least 2!'",
"assert",
"isinstance",
"(",
"arrays",
",",
"(",
"tuple",
",",
"list",
")",
")",
",",
"'arrays must be a tuple or list!'",
"if",
"rng",
"is",
"None",
"or",
"isinstance",
"(",
"rng",
",",
"int",
")",
":",
"rng",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"rng",
")",
"def",
"sample",
"(",
")",
":",
"xs",
"=",
"[",
"np",
".",
"zeros",
"(",
"(",
"batch_size",
",",
"steps",
",",
"a",
".",
"shape",
"[",
"1",
"]",
")",
",",
"a",
".",
"dtype",
")",
"for",
"a",
"in",
"arrays",
"]",
"for",
"i",
"in",
"range",
"(",
"batch_size",
")",
":",
"j",
"=",
"rng",
".",
"randint",
"(",
"len",
"(",
"arrays",
"[",
"0",
"]",
")",
"-",
"steps",
")",
"for",
"x",
",",
"a",
"in",
"zip",
"(",
"xs",
",",
"arrays",
")",
":",
"x",
"[",
"i",
"]",
"=",
"a",
"[",
"j",
":",
"j",
"+",
"steps",
"]",
"return",
"xs",
"return",
"sample"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Text.encode
|
Encode a text string by replacing characters with alphabet index.
Parameters
----------
txt : str
A string to encode.
Returns
-------
classes : list of int
A sequence of alphabet index values corresponding to the given text.
|
theanets/recurrent.py
|
def encode(self, txt):
'''Encode a text string by replacing characters with alphabet index.
Parameters
----------
txt : str
A string to encode.
Returns
-------
classes : list of int
A sequence of alphabet index values corresponding to the given text.
'''
return list(self._fwd_index.get(c, 0) for c in txt)
|
def encode(self, txt):
'''Encode a text string by replacing characters with alphabet index.
Parameters
----------
txt : str
A string to encode.
Returns
-------
classes : list of int
A sequence of alphabet index values corresponding to the given text.
'''
return list(self._fwd_index.get(c, 0) for c in txt)
|
[
"Encode",
"a",
"text",
"string",
"by",
"replacing",
"characters",
"with",
"alphabet",
"index",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/recurrent.py#L97-L110
|
[
"def",
"encode",
"(",
"self",
",",
"txt",
")",
":",
"return",
"list",
"(",
"self",
".",
"_fwd_index",
".",
"get",
"(",
"c",
",",
"0",
")",
"for",
"c",
"in",
"txt",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Text.classifier_batches
|
Create a callable that returns a batch of training data.
Parameters
----------
steps : int
Number of time steps in each batch.
batch_size : int
Number of training examples per batch.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be
created with an automatically chosen seed.
Returns
-------
batch : callable
A callable that, when called, returns a batch of data that can be
used to train a classifier model.
|
theanets/recurrent.py
|
def classifier_batches(self, steps, batch_size, rng=None):
'''Create a callable that returns a batch of training data.
Parameters
----------
steps : int
Number of time steps in each batch.
batch_size : int
Number of training examples per batch.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be
created with an automatically chosen seed.
Returns
-------
batch : callable
A callable that, when called, returns a batch of data that can be
used to train a classifier model.
'''
assert batch_size >= 2, 'batch_size must be at least 2!'
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng)
T = np.arange(steps)
def batch():
inputs = np.zeros((batch_size, steps, 1 + len(self.alpha)), 'f')
outputs = np.zeros((batch_size, steps), 'i')
for b in range(batch_size):
offset = rng.randint(len(self.text) - steps - 1)
enc = self.encode(self.text[offset:offset + steps + 1])
inputs[b, T, enc[:-1]] = 1
outputs[b, T] = enc[1:]
return [inputs, outputs]
return batch
|
def classifier_batches(self, steps, batch_size, rng=None):
'''Create a callable that returns a batch of training data.
Parameters
----------
steps : int
Number of time steps in each batch.
batch_size : int
Number of training examples per batch.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be
created with an automatically chosen seed.
Returns
-------
batch : callable
A callable that, when called, returns a batch of data that can be
used to train a classifier model.
'''
assert batch_size >= 2, 'batch_size must be at least 2!'
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng)
T = np.arange(steps)
def batch():
inputs = np.zeros((batch_size, steps, 1 + len(self.alpha)), 'f')
outputs = np.zeros((batch_size, steps), 'i')
for b in range(batch_size):
offset = rng.randint(len(self.text) - steps - 1)
enc = self.encode(self.text[offset:offset + steps + 1])
inputs[b, T, enc[:-1]] = 1
outputs[b, T] = enc[1:]
return [inputs, outputs]
return batch
|
[
"Create",
"a",
"callable",
"that",
"returns",
"a",
"batch",
"of",
"training",
"data",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/recurrent.py#L127-L164
|
[
"def",
"classifier_batches",
"(",
"self",
",",
"steps",
",",
"batch_size",
",",
"rng",
"=",
"None",
")",
":",
"assert",
"batch_size",
">=",
"2",
",",
"'batch_size must be at least 2!'",
"if",
"rng",
"is",
"None",
"or",
"isinstance",
"(",
"rng",
",",
"int",
")",
":",
"rng",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"rng",
")",
"T",
"=",
"np",
".",
"arange",
"(",
"steps",
")",
"def",
"batch",
"(",
")",
":",
"inputs",
"=",
"np",
".",
"zeros",
"(",
"(",
"batch_size",
",",
"steps",
",",
"1",
"+",
"len",
"(",
"self",
".",
"alpha",
")",
")",
",",
"'f'",
")",
"outputs",
"=",
"np",
".",
"zeros",
"(",
"(",
"batch_size",
",",
"steps",
")",
",",
"'i'",
")",
"for",
"b",
"in",
"range",
"(",
"batch_size",
")",
":",
"offset",
"=",
"rng",
".",
"randint",
"(",
"len",
"(",
"self",
".",
"text",
")",
"-",
"steps",
"-",
"1",
")",
"enc",
"=",
"self",
".",
"encode",
"(",
"self",
".",
"text",
"[",
"offset",
":",
"offset",
"+",
"steps",
"+",
"1",
"]",
")",
"inputs",
"[",
"b",
",",
"T",
",",
"enc",
"[",
":",
"-",
"1",
"]",
"]",
"=",
"1",
"outputs",
"[",
"b",
",",
"T",
"]",
"=",
"enc",
"[",
"1",
":",
"]",
"return",
"[",
"inputs",
",",
"outputs",
"]",
"return",
"batch"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Classifier.predict_sequence
|
Draw a sequential sample of class labels from this network.
Parameters
----------
labels : list of int
A list of integer class labels to get the classifier started.
steps : int
The number of time steps to sample.
streams : int, optional
Number of parallel streams to sample from the model. Defaults to 1.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be
created with an automatically chosen seed.
Yields
------
label(s) : int or list of int
Yields at each time step an integer class label sampled sequentially
from the model. If the number of requested streams is greater than
1, this will be a list containing the corresponding number of class
labels.
|
theanets/recurrent.py
|
def predict_sequence(self, labels, steps, streams=1, rng=None):
'''Draw a sequential sample of class labels from this network.
Parameters
----------
labels : list of int
A list of integer class labels to get the classifier started.
steps : int
The number of time steps to sample.
streams : int, optional
Number of parallel streams to sample from the model. Defaults to 1.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be
created with an automatically chosen seed.
Yields
------
label(s) : int or list of int
Yields at each time step an integer class label sampled sequentially
from the model. If the number of requested streams is greater than
1, this will be a list containing the corresponding number of class
labels.
'''
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng)
offset = len(labels)
batch = max(2, streams)
inputs = np.zeros((batch, offset + steps, self.layers[0].output_size), 'f')
inputs[:, np.arange(offset), labels] = 1
for i in range(offset, offset + steps):
chars = []
for pdf in self.predict_proba(inputs[:i])[:, -1]:
try:
c = rng.multinomial(1, pdf).argmax(axis=-1)
except ValueError:
# sometimes the pdf triggers a normalization error. just
# choose greedily in this case.
c = pdf.argmax(axis=-1)
chars.append(int(c))
inputs[np.arange(batch), i, chars] = 1
yield chars[0] if streams == 1 else chars
|
def predict_sequence(self, labels, steps, streams=1, rng=None):
'''Draw a sequential sample of class labels from this network.
Parameters
----------
labels : list of int
A list of integer class labels to get the classifier started.
steps : int
The number of time steps to sample.
streams : int, optional
Number of parallel streams to sample from the model. Defaults to 1.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be
created with an automatically chosen seed.
Yields
------
label(s) : int or list of int
Yields at each time step an integer class label sampled sequentially
from the model. If the number of requested streams is greater than
1, this will be a list containing the corresponding number of class
labels.
'''
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng)
offset = len(labels)
batch = max(2, streams)
inputs = np.zeros((batch, offset + steps, self.layers[0].output_size), 'f')
inputs[:, np.arange(offset), labels] = 1
for i in range(offset, offset + steps):
chars = []
for pdf in self.predict_proba(inputs[:i])[:, -1]:
try:
c = rng.multinomial(1, pdf).argmax(axis=-1)
except ValueError:
# sometimes the pdf triggers a normalization error. just
# choose greedily in this case.
c = pdf.argmax(axis=-1)
chars.append(int(c))
inputs[np.arange(batch), i, chars] = 1
yield chars[0] if streams == 1 else chars
|
[
"Draw",
"a",
"sequential",
"sample",
"of",
"class",
"labels",
"from",
"this",
"network",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/recurrent.py#L392-L433
|
[
"def",
"predict_sequence",
"(",
"self",
",",
"labels",
",",
"steps",
",",
"streams",
"=",
"1",
",",
"rng",
"=",
"None",
")",
":",
"if",
"rng",
"is",
"None",
"or",
"isinstance",
"(",
"rng",
",",
"int",
")",
":",
"rng",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"rng",
")",
"offset",
"=",
"len",
"(",
"labels",
")",
"batch",
"=",
"max",
"(",
"2",
",",
"streams",
")",
"inputs",
"=",
"np",
".",
"zeros",
"(",
"(",
"batch",
",",
"offset",
"+",
"steps",
",",
"self",
".",
"layers",
"[",
"0",
"]",
".",
"output_size",
")",
",",
"'f'",
")",
"inputs",
"[",
":",
",",
"np",
".",
"arange",
"(",
"offset",
")",
",",
"labels",
"]",
"=",
"1",
"for",
"i",
"in",
"range",
"(",
"offset",
",",
"offset",
"+",
"steps",
")",
":",
"chars",
"=",
"[",
"]",
"for",
"pdf",
"in",
"self",
".",
"predict_proba",
"(",
"inputs",
"[",
":",
"i",
"]",
")",
"[",
":",
",",
"-",
"1",
"]",
":",
"try",
":",
"c",
"=",
"rng",
".",
"multinomial",
"(",
"1",
",",
"pdf",
")",
".",
"argmax",
"(",
"axis",
"=",
"-",
"1",
")",
"except",
"ValueError",
":",
"# sometimes the pdf triggers a normalization error. just",
"# choose greedily in this case.",
"c",
"=",
"pdf",
".",
"argmax",
"(",
"axis",
"=",
"-",
"1",
")",
"chars",
".",
"append",
"(",
"int",
"(",
"c",
")",
")",
"inputs",
"[",
"np",
".",
"arange",
"(",
"batch",
")",
",",
"i",
",",
"chars",
"]",
"=",
"1",
"yield",
"chars",
"[",
"0",
"]",
"if",
"streams",
"==",
"1",
"else",
"chars"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Convolution.add_conv_weights
|
Add a convolutional weight array to this layer's parameters.
Parameters
----------
name : str
Name of the parameter to add.
mean : float, optional
Mean value for randomly-initialized weights. Defaults to 0.
std : float, optional
Standard deviation of initial matrix values. Defaults to
:math:`1 / sqrt(n_i + n_o)`.
sparsity : float, optional
Fraction of weights to set to zero. Defaults to 0.
|
theanets/layers/convolution.py
|
def add_conv_weights(self, name, mean=0, std=None, sparsity=0):
'''Add a convolutional weight array to this layer's parameters.
Parameters
----------
name : str
Name of the parameter to add.
mean : float, optional
Mean value for randomly-initialized weights. Defaults to 0.
std : float, optional
Standard deviation of initial matrix values. Defaults to
:math:`1 / sqrt(n_i + n_o)`.
sparsity : float, optional
Fraction of weights to set to zero. Defaults to 0.
'''
nin = self.input_size
nout = self.output_size
mean = self.kwargs.get(
'mean_{}'.format(name),
self.kwargs.get('mean', mean))
std = self.kwargs.get(
'std_{}'.format(name),
self.kwargs.get('std', std or 1 / np.sqrt(nin + nout)))
sparsity = self.kwargs.get(
'sparsity_{}'.format(name),
self.kwargs.get('sparsity', sparsity))
arr = np.zeros((nout, nin) + self.filter_size, util.FLOAT)
for r in range(self.filter_size[0]):
for c in range(self.filter_size[1]):
arr[:, :, r, c] = util.random_matrix(
nout, nin, mean, std, sparsity=sparsity, rng=self.rng)
self._params.append(theano.shared(arr, name=self._fmt(name)))
|
def add_conv_weights(self, name, mean=0, std=None, sparsity=0):
'''Add a convolutional weight array to this layer's parameters.
Parameters
----------
name : str
Name of the parameter to add.
mean : float, optional
Mean value for randomly-initialized weights. Defaults to 0.
std : float, optional
Standard deviation of initial matrix values. Defaults to
:math:`1 / sqrt(n_i + n_o)`.
sparsity : float, optional
Fraction of weights to set to zero. Defaults to 0.
'''
nin = self.input_size
nout = self.output_size
mean = self.kwargs.get(
'mean_{}'.format(name),
self.kwargs.get('mean', mean))
std = self.kwargs.get(
'std_{}'.format(name),
self.kwargs.get('std', std or 1 / np.sqrt(nin + nout)))
sparsity = self.kwargs.get(
'sparsity_{}'.format(name),
self.kwargs.get('sparsity', sparsity))
arr = np.zeros((nout, nin) + self.filter_size, util.FLOAT)
for r in range(self.filter_size[0]):
for c in range(self.filter_size[1]):
arr[:, :, r, c] = util.random_matrix(
nout, nin, mean, std, sparsity=sparsity, rng=self.rng)
self._params.append(theano.shared(arr, name=self._fmt(name)))
|
[
"Add",
"a",
"convolutional",
"weight",
"array",
"to",
"this",
"layer",
"s",
"parameters",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/layers/convolution.py#L53-L84
|
[
"def",
"add_conv_weights",
"(",
"self",
",",
"name",
",",
"mean",
"=",
"0",
",",
"std",
"=",
"None",
",",
"sparsity",
"=",
"0",
")",
":",
"nin",
"=",
"self",
".",
"input_size",
"nout",
"=",
"self",
".",
"output_size",
"mean",
"=",
"self",
".",
"kwargs",
".",
"get",
"(",
"'mean_{}'",
".",
"format",
"(",
"name",
")",
",",
"self",
".",
"kwargs",
".",
"get",
"(",
"'mean'",
",",
"mean",
")",
")",
"std",
"=",
"self",
".",
"kwargs",
".",
"get",
"(",
"'std_{}'",
".",
"format",
"(",
"name",
")",
",",
"self",
".",
"kwargs",
".",
"get",
"(",
"'std'",
",",
"std",
"or",
"1",
"/",
"np",
".",
"sqrt",
"(",
"nin",
"+",
"nout",
")",
")",
")",
"sparsity",
"=",
"self",
".",
"kwargs",
".",
"get",
"(",
"'sparsity_{}'",
".",
"format",
"(",
"name",
")",
",",
"self",
".",
"kwargs",
".",
"get",
"(",
"'sparsity'",
",",
"sparsity",
")",
")",
"arr",
"=",
"np",
".",
"zeros",
"(",
"(",
"nout",
",",
"nin",
")",
"+",
"self",
".",
"filter_size",
",",
"util",
".",
"FLOAT",
")",
"for",
"r",
"in",
"range",
"(",
"self",
".",
"filter_size",
"[",
"0",
"]",
")",
":",
"for",
"c",
"in",
"range",
"(",
"self",
".",
"filter_size",
"[",
"1",
"]",
")",
":",
"arr",
"[",
":",
",",
":",
",",
"r",
",",
"c",
"]",
"=",
"util",
".",
"random_matrix",
"(",
"nout",
",",
"nin",
",",
"mean",
",",
"std",
",",
"sparsity",
"=",
"sparsity",
",",
"rng",
"=",
"self",
".",
"rng",
")",
"self",
".",
"_params",
".",
"append",
"(",
"theano",
".",
"shared",
"(",
"arr",
",",
"name",
"=",
"self",
".",
"_fmt",
"(",
"name",
")",
")",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Autoencoder.encode
|
Encode a dataset using the hidden layer activations of our network.
Parameters
----------
x : ndarray
A dataset to encode. Rows of this dataset capture individual data
points, while columns represent the variables in each data point.
layer : str, optional
The name of the hidden layer output to use. By default, we use
the "middle" hidden layer---for example, for a 4,2,4 or 4,3,2,3,4
autoencoder, we use the layer with size 2.
sample : bool, optional
If True, then draw a sample using the hidden activations as
independent Bernoulli probabilities for the encoded data. This
assumes the hidden layer has a logistic sigmoid activation function.
Returns
-------
ndarray :
The given dataset, encoded by the appropriate hidden layer
activation.
|
theanets/feedforward.py
|
def encode(self, x, layer=None, sample=False, **kwargs):
'''Encode a dataset using the hidden layer activations of our network.
Parameters
----------
x : ndarray
A dataset to encode. Rows of this dataset capture individual data
points, while columns represent the variables in each data point.
layer : str, optional
The name of the hidden layer output to use. By default, we use
the "middle" hidden layer---for example, for a 4,2,4 or 4,3,2,3,4
autoencoder, we use the layer with size 2.
sample : bool, optional
If True, then draw a sample using the hidden activations as
independent Bernoulli probabilities for the encoded data. This
assumes the hidden layer has a logistic sigmoid activation function.
Returns
-------
ndarray :
The given dataset, encoded by the appropriate hidden layer
activation.
'''
enc = self.feed_forward(x, **kwargs)[self._find_output(layer)]
if sample:
return np.random.binomial(n=1, p=enc).astype(np.uint8)
return enc
|
def encode(self, x, layer=None, sample=False, **kwargs):
'''Encode a dataset using the hidden layer activations of our network.
Parameters
----------
x : ndarray
A dataset to encode. Rows of this dataset capture individual data
points, while columns represent the variables in each data point.
layer : str, optional
The name of the hidden layer output to use. By default, we use
the "middle" hidden layer---for example, for a 4,2,4 or 4,3,2,3,4
autoencoder, we use the layer with size 2.
sample : bool, optional
If True, then draw a sample using the hidden activations as
independent Bernoulli probabilities for the encoded data. This
assumes the hidden layer has a logistic sigmoid activation function.
Returns
-------
ndarray :
The given dataset, encoded by the appropriate hidden layer
activation.
'''
enc = self.feed_forward(x, **kwargs)[self._find_output(layer)]
if sample:
return np.random.binomial(n=1, p=enc).astype(np.uint8)
return enc
|
[
"Encode",
"a",
"dataset",
"using",
"the",
"hidden",
"layer",
"activations",
"of",
"our",
"network",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/feedforward.py#L125-L153
|
[
"def",
"encode",
"(",
"self",
",",
"x",
",",
"layer",
"=",
"None",
",",
"sample",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"enc",
"=",
"self",
".",
"feed_forward",
"(",
"x",
",",
"*",
"*",
"kwargs",
")",
"[",
"self",
".",
"_find_output",
"(",
"layer",
")",
"]",
"if",
"sample",
":",
"return",
"np",
".",
"random",
".",
"binomial",
"(",
"n",
"=",
"1",
",",
"p",
"=",
"enc",
")",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"return",
"enc"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Autoencoder.decode
|
Decode an encoded dataset by computing the output layer activation.
Parameters
----------
z : ndarray
A matrix containing encoded data from this autoencoder.
layer : int or str or :class:`Layer <layers.Layer>`, optional
The index or name of the hidden layer that was used to encode `z`.
Returns
-------
decoded : ndarray
The decoded dataset.
|
theanets/feedforward.py
|
def decode(self, z, layer=None, **kwargs):
'''Decode an encoded dataset by computing the output layer activation.
Parameters
----------
z : ndarray
A matrix containing encoded data from this autoencoder.
layer : int or str or :class:`Layer <layers.Layer>`, optional
The index or name of the hidden layer that was used to encode `z`.
Returns
-------
decoded : ndarray
The decoded dataset.
'''
key = self._find_output(layer)
if key not in self._functions:
regs = regularizers.from_kwargs(self, **kwargs)
outputs, updates = self.build_graph(regs)
self._functions[key] = theano.function(
[outputs[key]],
[outputs[self.layers[-1].output_name]],
updates=updates)
return self._functions[key](z)[0]
|
def decode(self, z, layer=None, **kwargs):
'''Decode an encoded dataset by computing the output layer activation.
Parameters
----------
z : ndarray
A matrix containing encoded data from this autoencoder.
layer : int or str or :class:`Layer <layers.Layer>`, optional
The index or name of the hidden layer that was used to encode `z`.
Returns
-------
decoded : ndarray
The decoded dataset.
'''
key = self._find_output(layer)
if key not in self._functions:
regs = regularizers.from_kwargs(self, **kwargs)
outputs, updates = self.build_graph(regs)
self._functions[key] = theano.function(
[outputs[key]],
[outputs[self.layers[-1].output_name]],
updates=updates)
return self._functions[key](z)[0]
|
[
"Decode",
"an",
"encoded",
"dataset",
"by",
"computing",
"the",
"output",
"layer",
"activation",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/feedforward.py#L155-L178
|
[
"def",
"decode",
"(",
"self",
",",
"z",
",",
"layer",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"key",
"=",
"self",
".",
"_find_output",
"(",
"layer",
")",
"if",
"key",
"not",
"in",
"self",
".",
"_functions",
":",
"regs",
"=",
"regularizers",
".",
"from_kwargs",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"outputs",
",",
"updates",
"=",
"self",
".",
"build_graph",
"(",
"regs",
")",
"self",
".",
"_functions",
"[",
"key",
"]",
"=",
"theano",
".",
"function",
"(",
"[",
"outputs",
"[",
"key",
"]",
"]",
",",
"[",
"outputs",
"[",
"self",
".",
"layers",
"[",
"-",
"1",
"]",
".",
"output_name",
"]",
"]",
",",
"updates",
"=",
"updates",
")",
"return",
"self",
".",
"_functions",
"[",
"key",
"]",
"(",
"z",
")",
"[",
"0",
"]"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Autoencoder._find_output
|
Find a layer output name for the given layer specifier.
Parameters
----------
layer : None, int, str, or :class:`theanets.layers.Layer`
A layer specification. If this is None, the "middle" layer in the
network will be used (i.e., the layer at the middle index in the
list of network layers). If this is an integer, the corresponding
layer in the network's layer list will be used. If this is a string,
the layer with the corresponding name will be returned.
Returns
-------
name : str
The fully-scoped output name for the desired layer.
|
theanets/feedforward.py
|
def _find_output(self, layer):
'''Find a layer output name for the given layer specifier.
Parameters
----------
layer : None, int, str, or :class:`theanets.layers.Layer`
A layer specification. If this is None, the "middle" layer in the
network will be used (i.e., the layer at the middle index in the
list of network layers). If this is an integer, the corresponding
layer in the network's layer list will be used. If this is a string,
the layer with the corresponding name will be returned.
Returns
-------
name : str
The fully-scoped output name for the desired layer.
'''
if layer is None:
layer = len(self.layers) // 2
if isinstance(layer, int):
layer = self.layers[layer]
if isinstance(layer, util.basestring):
try:
layer = [l for l in self.layers if l.name == layer][0]
except IndexError:
pass
if isinstance(layer, layers.Layer):
layer = layer.output_name
return layer
|
def _find_output(self, layer):
'''Find a layer output name for the given layer specifier.
Parameters
----------
layer : None, int, str, or :class:`theanets.layers.Layer`
A layer specification. If this is None, the "middle" layer in the
network will be used (i.e., the layer at the middle index in the
list of network layers). If this is an integer, the corresponding
layer in the network's layer list will be used. If this is a string,
the layer with the corresponding name will be returned.
Returns
-------
name : str
The fully-scoped output name for the desired layer.
'''
if layer is None:
layer = len(self.layers) // 2
if isinstance(layer, int):
layer = self.layers[layer]
if isinstance(layer, util.basestring):
try:
layer = [l for l in self.layers if l.name == layer][0]
except IndexError:
pass
if isinstance(layer, layers.Layer):
layer = layer.output_name
return layer
|
[
"Find",
"a",
"layer",
"output",
"name",
"for",
"the",
"given",
"layer",
"specifier",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/feedforward.py#L180-L208
|
[
"def",
"_find_output",
"(",
"self",
",",
"layer",
")",
":",
"if",
"layer",
"is",
"None",
":",
"layer",
"=",
"len",
"(",
"self",
".",
"layers",
")",
"//",
"2",
"if",
"isinstance",
"(",
"layer",
",",
"int",
")",
":",
"layer",
"=",
"self",
".",
"layers",
"[",
"layer",
"]",
"if",
"isinstance",
"(",
"layer",
",",
"util",
".",
"basestring",
")",
":",
"try",
":",
"layer",
"=",
"[",
"l",
"for",
"l",
"in",
"self",
".",
"layers",
"if",
"l",
".",
"name",
"==",
"layer",
"]",
"[",
"0",
"]",
"except",
"IndexError",
":",
"pass",
"if",
"isinstance",
"(",
"layer",
",",
"layers",
".",
"Layer",
")",
":",
"layer",
"=",
"layer",
".",
"output_name",
"return",
"layer"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Autoencoder.score
|
Compute R^2 coefficient of determination for a given input.
Parameters
----------
x : ndarray (num-examples, num-inputs)
An array containing data to be fed into the network. Multiple
examples are arranged as rows in this array, with columns containing
the variables for each example.
Returns
-------
r2 : float
The R^2 correlation between the prediction of this netork and its
input. This can serve as one measure of the information loss of the
autoencoder.
|
theanets/feedforward.py
|
def score(self, x, w=None, **kwargs):
'''Compute R^2 coefficient of determination for a given input.
Parameters
----------
x : ndarray (num-examples, num-inputs)
An array containing data to be fed into the network. Multiple
examples are arranged as rows in this array, with columns containing
the variables for each example.
Returns
-------
r2 : float
The R^2 correlation between the prediction of this netork and its
input. This can serve as one measure of the information loss of the
autoencoder.
'''
return super(Autoencoder, self).score(x, x, w=w, **kwargs)
|
def score(self, x, w=None, **kwargs):
'''Compute R^2 coefficient of determination for a given input.
Parameters
----------
x : ndarray (num-examples, num-inputs)
An array containing data to be fed into the network. Multiple
examples are arranged as rows in this array, with columns containing
the variables for each example.
Returns
-------
r2 : float
The R^2 correlation between the prediction of this netork and its
input. This can serve as one measure of the information loss of the
autoencoder.
'''
return super(Autoencoder, self).score(x, x, w=w, **kwargs)
|
[
"Compute",
"R^2",
"coefficient",
"of",
"determination",
"for",
"a",
"given",
"input",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/feedforward.py#L210-L227
|
[
"def",
"score",
"(",
"self",
",",
"x",
",",
"w",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"super",
"(",
"Autoencoder",
",",
"self",
")",
".",
"score",
"(",
"x",
",",
"x",
",",
"w",
"=",
"w",
",",
"*",
"*",
"kwargs",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Classifier.monitors
|
Return expressions that should be computed to monitor training.
Returns
-------
monitors : list of (name, expression) pairs
A list of named monitor expressions to compute for this network.
|
theanets/feedforward.py
|
def monitors(self, **kwargs):
'''Return expressions that should be computed to monitor training.
Returns
-------
monitors : list of (name, expression) pairs
A list of named monitor expressions to compute for this network.
'''
monitors = super(Classifier, self).monitors(**kwargs)
regs = regularizers.from_kwargs(self, **kwargs)
outputs, _ = self.build_graph(regs)
return monitors + [('acc', self.losses[0].accuracy(outputs))]
|
def monitors(self, **kwargs):
'''Return expressions that should be computed to monitor training.
Returns
-------
monitors : list of (name, expression) pairs
A list of named monitor expressions to compute for this network.
'''
monitors = super(Classifier, self).monitors(**kwargs)
regs = regularizers.from_kwargs(self, **kwargs)
outputs, _ = self.build_graph(regs)
return monitors + [('acc', self.losses[0].accuracy(outputs))]
|
[
"Return",
"expressions",
"that",
"should",
"be",
"computed",
"to",
"monitor",
"training",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/feedforward.py#L363-L374
|
[
"def",
"monitors",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"monitors",
"=",
"super",
"(",
"Classifier",
",",
"self",
")",
".",
"monitors",
"(",
"*",
"*",
"kwargs",
")",
"regs",
"=",
"regularizers",
".",
"from_kwargs",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"outputs",
",",
"_",
"=",
"self",
".",
"build_graph",
"(",
"regs",
")",
"return",
"monitors",
"+",
"[",
"(",
"'acc'",
",",
"self",
".",
"losses",
"[",
"0",
"]",
".",
"accuracy",
"(",
"outputs",
")",
")",
"]"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Classifier.predict
|
Compute a greedy classification for the given set of data.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing examples to classify. Examples are given as the
rows in this array.
Returns
-------
k : ndarray (num-examples, )
A vector of class index values, one per row of input data.
|
theanets/feedforward.py
|
def predict(self, x, **kwargs):
'''Compute a greedy classification for the given set of data.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing examples to classify. Examples are given as the
rows in this array.
Returns
-------
k : ndarray (num-examples, )
A vector of class index values, one per row of input data.
'''
outputs = self.feed_forward(x, **kwargs)
return outputs[self.layers[-1].output_name].argmax(axis=-1)
|
def predict(self, x, **kwargs):
'''Compute a greedy classification for the given set of data.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing examples to classify. Examples are given as the
rows in this array.
Returns
-------
k : ndarray (num-examples, )
A vector of class index values, one per row of input data.
'''
outputs = self.feed_forward(x, **kwargs)
return outputs[self.layers[-1].output_name].argmax(axis=-1)
|
[
"Compute",
"a",
"greedy",
"classification",
"for",
"the",
"given",
"set",
"of",
"data",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/feedforward.py#L376-L391
|
[
"def",
"predict",
"(",
"self",
",",
"x",
",",
"*",
"*",
"kwargs",
")",
":",
"outputs",
"=",
"self",
".",
"feed_forward",
"(",
"x",
",",
"*",
"*",
"kwargs",
")",
"return",
"outputs",
"[",
"self",
".",
"layers",
"[",
"-",
"1",
"]",
".",
"output_name",
"]",
".",
"argmax",
"(",
"axis",
"=",
"-",
"1",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Classifier.predict_proba
|
Compute class posterior probabilities for the given set of data.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing examples to predict. Examples are given as the
rows in this array.
Returns
-------
p : ndarray (num-examples, num-classes)
An array of class posterior probability values, one per row of input
data.
|
theanets/feedforward.py
|
def predict_proba(self, x, **kwargs):
'''Compute class posterior probabilities for the given set of data.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing examples to predict. Examples are given as the
rows in this array.
Returns
-------
p : ndarray (num-examples, num-classes)
An array of class posterior probability values, one per row of input
data.
'''
return self.feed_forward(x, **kwargs)[self.layers[-1].output_name]
|
def predict_proba(self, x, **kwargs):
'''Compute class posterior probabilities for the given set of data.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing examples to predict. Examples are given as the
rows in this array.
Returns
-------
p : ndarray (num-examples, num-classes)
An array of class posterior probability values, one per row of input
data.
'''
return self.feed_forward(x, **kwargs)[self.layers[-1].output_name]
|
[
"Compute",
"class",
"posterior",
"probabilities",
"for",
"the",
"given",
"set",
"of",
"data",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/feedforward.py#L398-L413
|
[
"def",
"predict_proba",
"(",
"self",
",",
"x",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"feed_forward",
"(",
"x",
",",
"*",
"*",
"kwargs",
")",
"[",
"self",
".",
"layers",
"[",
"-",
"1",
"]",
".",
"output_name",
"]"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Classifier.predict_logit
|
Compute the logit values that underlie the softmax output.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing examples to classify. Examples are given as the
rows in this array.
Returns
-------
l : ndarray (num-examples, num-classes)
An array of posterior class logit values, one row of logit values
per row of input data.
|
theanets/feedforward.py
|
def predict_logit(self, x, **kwargs):
'''Compute the logit values that underlie the softmax output.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing examples to classify. Examples are given as the
rows in this array.
Returns
-------
l : ndarray (num-examples, num-classes)
An array of posterior class logit values, one row of logit values
per row of input data.
'''
return self.feed_forward(x, **kwargs)[self.layers[-1].full_name('pre')]
|
def predict_logit(self, x, **kwargs):
'''Compute the logit values that underlie the softmax output.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing examples to classify. Examples are given as the
rows in this array.
Returns
-------
l : ndarray (num-examples, num-classes)
An array of posterior class logit values, one row of logit values
per row of input data.
'''
return self.feed_forward(x, **kwargs)[self.layers[-1].full_name('pre')]
|
[
"Compute",
"the",
"logit",
"values",
"that",
"underlie",
"the",
"softmax",
"output",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/feedforward.py#L415-L430
|
[
"def",
"predict_logit",
"(",
"self",
",",
"x",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"feed_forward",
"(",
"x",
",",
"*",
"*",
"kwargs",
")",
"[",
"self",
".",
"layers",
"[",
"-",
"1",
"]",
".",
"full_name",
"(",
"'pre'",
")",
"]"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Classifier.score
|
Compute the mean accuracy on a set of labeled data.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing examples to classify. Examples are given as the
rows in this array.
y : ndarray (num-examples, )
A vector of integer class labels, one for each row of input data.
w : ndarray (num-examples, )
A vector of weights, one for each row of input data.
Returns
-------
score : float
The (possibly weighted) mean accuracy of the model on the data.
|
theanets/feedforward.py
|
def score(self, x, y, w=None, **kwargs):
'''Compute the mean accuracy on a set of labeled data.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing examples to classify. Examples are given as the
rows in this array.
y : ndarray (num-examples, )
A vector of integer class labels, one for each row of input data.
w : ndarray (num-examples, )
A vector of weights, one for each row of input data.
Returns
-------
score : float
The (possibly weighted) mean accuracy of the model on the data.
'''
eq = y == self.predict(x, **kwargs)
if w is not None:
return (w * eq).sum() / w.sum()
return eq.mean()
|
def score(self, x, y, w=None, **kwargs):
'''Compute the mean accuracy on a set of labeled data.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing examples to classify. Examples are given as the
rows in this array.
y : ndarray (num-examples, )
A vector of integer class labels, one for each row of input data.
w : ndarray (num-examples, )
A vector of weights, one for each row of input data.
Returns
-------
score : float
The (possibly weighted) mean accuracy of the model on the data.
'''
eq = y == self.predict(x, **kwargs)
if w is not None:
return (w * eq).sum() / w.sum()
return eq.mean()
|
[
"Compute",
"the",
"mean",
"accuracy",
"on",
"a",
"set",
"of",
"labeled",
"data",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/feedforward.py#L432-L453
|
[
"def",
"score",
"(",
"self",
",",
"x",
",",
"y",
",",
"w",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"eq",
"=",
"y",
"==",
"self",
".",
"predict",
"(",
"x",
",",
"*",
"*",
"kwargs",
")",
"if",
"w",
"is",
"not",
"None",
":",
"return",
"(",
"w",
"*",
"eq",
")",
".",
"sum",
"(",
")",
"/",
"w",
".",
"sum",
"(",
")",
"return",
"eq",
".",
"mean",
"(",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
batch_at
|
Extract a single batch of data to pass to the model being trained.
Parameters
----------
features, labels : ndarray
Arrays of the input features and target labels.
seq_begins : ndarray
Array of the start offsets of the speech segments to include.
seq_lengths : ndarray
Array of the lengths of the speech segments to include in the batch.
Returns
-------
features, labels, mask : ndarrays
A triple of arrays for training a network. The first element contains
input features, the second contains target labels, and the third
contains a "mask" consisting of ones where there is valid data and zeros
everywhere else.
|
examples/lstm-chime.py
|
def batch_at(features, labels, seq_begins, seq_lengths):
'''Extract a single batch of data to pass to the model being trained.
Parameters
----------
features, labels : ndarray
Arrays of the input features and target labels.
seq_begins : ndarray
Array of the start offsets of the speech segments to include.
seq_lengths : ndarray
Array of the lengths of the speech segments to include in the batch.
Returns
-------
features, labels, mask : ndarrays
A triple of arrays for training a network. The first element contains
input features, the second contains target labels, and the third
contains a "mask" consisting of ones where there is valid data and zeros
everywhere else.
'''
length = seq_lengths.max()
feat = np.zeros((BATCH_SIZE, length, features.shape[-1]), 'f')
labl = np.zeros((BATCH_SIZE, length), 'i')
mask = np.zeros((BATCH_SIZE, length), 'f')
for b, (begin, length) in enumerate(zip(seq_begins, seq_lengths)):
feat[b, :length] = features[begin:begin+length]
labl[b, :length] = labels[begin:begin+length]
mask[b, :length] = 1
return [feat, labl, mask]
|
def batch_at(features, labels, seq_begins, seq_lengths):
'''Extract a single batch of data to pass to the model being trained.
Parameters
----------
features, labels : ndarray
Arrays of the input features and target labels.
seq_begins : ndarray
Array of the start offsets of the speech segments to include.
seq_lengths : ndarray
Array of the lengths of the speech segments to include in the batch.
Returns
-------
features, labels, mask : ndarrays
A triple of arrays for training a network. The first element contains
input features, the second contains target labels, and the third
contains a "mask" consisting of ones where there is valid data and zeros
everywhere else.
'''
length = seq_lengths.max()
feat = np.zeros((BATCH_SIZE, length, features.shape[-1]), 'f')
labl = np.zeros((BATCH_SIZE, length), 'i')
mask = np.zeros((BATCH_SIZE, length), 'f')
for b, (begin, length) in enumerate(zip(seq_begins, seq_lengths)):
feat[b, :length] = features[begin:begin+length]
labl[b, :length] = labels[begin:begin+length]
mask[b, :length] = 1
return [feat, labl, mask]
|
[
"Extract",
"a",
"single",
"batch",
"of",
"data",
"to",
"pass",
"to",
"the",
"model",
"being",
"trained",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/examples/lstm-chime.py#L40-L68
|
[
"def",
"batch_at",
"(",
"features",
",",
"labels",
",",
"seq_begins",
",",
"seq_lengths",
")",
":",
"length",
"=",
"seq_lengths",
".",
"max",
"(",
")",
"feat",
"=",
"np",
".",
"zeros",
"(",
"(",
"BATCH_SIZE",
",",
"length",
",",
"features",
".",
"shape",
"[",
"-",
"1",
"]",
")",
",",
"'f'",
")",
"labl",
"=",
"np",
".",
"zeros",
"(",
"(",
"BATCH_SIZE",
",",
"length",
")",
",",
"'i'",
")",
"mask",
"=",
"np",
".",
"zeros",
"(",
"(",
"BATCH_SIZE",
",",
"length",
")",
",",
"'f'",
")",
"for",
"b",
",",
"(",
"begin",
",",
"length",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"seq_begins",
",",
"seq_lengths",
")",
")",
":",
"feat",
"[",
"b",
",",
":",
"length",
"]",
"=",
"features",
"[",
"begin",
":",
"begin",
"+",
"length",
"]",
"labl",
"[",
"b",
",",
":",
"length",
"]",
"=",
"labels",
"[",
"begin",
":",
"begin",
"+",
"length",
"]",
"mask",
"[",
"b",
",",
":",
"length",
"]",
"=",
"1",
"return",
"[",
"feat",
",",
"labl",
",",
"mask",
"]"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
batches
|
Returns a callable that chooses sequences from netcdf data.
|
examples/lstm-chime.py
|
def batches(dataset):
'''Returns a callable that chooses sequences from netcdf data.'''
seq_lengths = dataset.variables['seqLengths'].data
seq_begins = np.concatenate(([0], np.cumsum(seq_lengths)[:-1]))
def sample():
chosen = np.random.choice(
list(range(len(seq_lengths))), BATCH_SIZE, replace=False)
return batch_at(dataset.variables['inputs'].data,
dataset.variables['targetClasses'].data,
seq_begins[chosen],
seq_lengths[chosen])
return sample
|
def batches(dataset):
'''Returns a callable that chooses sequences from netcdf data.'''
seq_lengths = dataset.variables['seqLengths'].data
seq_begins = np.concatenate(([0], np.cumsum(seq_lengths)[:-1]))
def sample():
chosen = np.random.choice(
list(range(len(seq_lengths))), BATCH_SIZE, replace=False)
return batch_at(dataset.variables['inputs'].data,
dataset.variables['targetClasses'].data,
seq_begins[chosen],
seq_lengths[chosen])
return sample
|
[
"Returns",
"a",
"callable",
"that",
"chooses",
"sequences",
"from",
"netcdf",
"data",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/examples/lstm-chime.py#L71-L84
|
[
"def",
"batches",
"(",
"dataset",
")",
":",
"seq_lengths",
"=",
"dataset",
".",
"variables",
"[",
"'seqLengths'",
"]",
".",
"data",
"seq_begins",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"0",
"]",
",",
"np",
".",
"cumsum",
"(",
"seq_lengths",
")",
"[",
":",
"-",
"1",
"]",
")",
")",
"def",
"sample",
"(",
")",
":",
"chosen",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"list",
"(",
"range",
"(",
"len",
"(",
"seq_lengths",
")",
")",
")",
",",
"BATCH_SIZE",
",",
"replace",
"=",
"False",
")",
"return",
"batch_at",
"(",
"dataset",
".",
"variables",
"[",
"'inputs'",
"]",
".",
"data",
",",
"dataset",
".",
"variables",
"[",
"'targetClasses'",
"]",
".",
"data",
",",
"seq_begins",
"[",
"chosen",
"]",
",",
"seq_lengths",
"[",
"chosen",
"]",
")",
"return",
"sample"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Experiment.load
|
Load a saved network from a pickle file on disk.
This method sets the ``network`` attribute of the experiment to the
loaded network model.
Parameters
----------
filename : str
Load the keyword arguments and parameters of a network from a pickle
file at the named path. If this name ends in ".gz" then the input
will automatically be gunzipped; otherwise the input will be treated
as a "raw" pickle.
Returns
-------
network : :class:`Network <graph.Network>`
A newly-constructed network, with topology and parameters loaded
from the given pickle file.
|
theanets/main.py
|
def load(self, path):
'''Load a saved network from a pickle file on disk.
This method sets the ``network`` attribute of the experiment to the
loaded network model.
Parameters
----------
filename : str
Load the keyword arguments and parameters of a network from a pickle
file at the named path. If this name ends in ".gz" then the input
will automatically be gunzipped; otherwise the input will be treated
as a "raw" pickle.
Returns
-------
network : :class:`Network <graph.Network>`
A newly-constructed network, with topology and parameters loaded
from the given pickle file.
'''
self.network = graph.Network.load(path)
return self.network
|
def load(self, path):
'''Load a saved network from a pickle file on disk.
This method sets the ``network`` attribute of the experiment to the
loaded network model.
Parameters
----------
filename : str
Load the keyword arguments and parameters of a network from a pickle
file at the named path. If this name ends in ".gz" then the input
will automatically be gunzipped; otherwise the input will be treated
as a "raw" pickle.
Returns
-------
network : :class:`Network <graph.Network>`
A newly-constructed network, with topology and parameters loaded
from the given pickle file.
'''
self.network = graph.Network.load(path)
return self.network
|
[
"Load",
"a",
"saved",
"network",
"from",
"a",
"pickle",
"file",
"on",
"disk",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/main.py#L86-L107
|
[
"def",
"load",
"(",
"self",
",",
"path",
")",
":",
"self",
".",
"network",
"=",
"graph",
".",
"Network",
".",
"load",
"(",
"path",
")",
"return",
"self",
".",
"network"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
random_matrix
|
Create a matrix of randomly-initialized weights.
Parameters
----------
rows : int
Number of rows of the weight matrix -- equivalently, the number of
"input" units that the weight matrix connects.
cols : int
Number of columns of the weight matrix -- equivalently, the number
of "output" units that the weight matrix connects.
mean : float, optional
Draw initial weight values from a normal with this mean. Defaults to 0.
std : float, optional
Draw initial weight values from a normal with this standard deviation.
Defaults to 1.
sparsity : float in (0, 1), optional
If given, ensure that the given fraction of the weight matrix is
set to zero. Defaults to 0, meaning all weights are nonzero.
radius : float, optional
If given, rescale the initial weights to have this spectral radius.
No scaling is performed by default.
diagonal : float, optional
If nonzero, create a matrix containing all zeros except for this value
along the diagonal. If nonzero, other arguments (except for rows and
cols) will be ignored.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
matrix : numpy array
An array containing random values. These often represent the weights
connecting each "input" unit to each "output" unit in a layer.
|
theanets/util.py
|
def random_matrix(rows, cols, mean=0, std=1, sparsity=0, radius=0, diagonal=0, rng=None):
'''Create a matrix of randomly-initialized weights.
Parameters
----------
rows : int
Number of rows of the weight matrix -- equivalently, the number of
"input" units that the weight matrix connects.
cols : int
Number of columns of the weight matrix -- equivalently, the number
of "output" units that the weight matrix connects.
mean : float, optional
Draw initial weight values from a normal with this mean. Defaults to 0.
std : float, optional
Draw initial weight values from a normal with this standard deviation.
Defaults to 1.
sparsity : float in (0, 1), optional
If given, ensure that the given fraction of the weight matrix is
set to zero. Defaults to 0, meaning all weights are nonzero.
radius : float, optional
If given, rescale the initial weights to have this spectral radius.
No scaling is performed by default.
diagonal : float, optional
If nonzero, create a matrix containing all zeros except for this value
along the diagonal. If nonzero, other arguments (except for rows and
cols) will be ignored.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
matrix : numpy array
An array containing random values. These often represent the weights
connecting each "input" unit to each "output" unit in a layer.
'''
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng)
arr = mean + std * rng.randn(rows, cols)
if 1 > sparsity > 0:
k = min(rows, cols)
mask = rng.binomial(n=1, p=1 - sparsity, size=(rows, cols)).astype(bool)
mask[:k, :k] |= np.eye(k).astype(bool)
arr *= mask
if radius > 0:
# rescale weights to have the appropriate spectral radius.
u, s, vT = np.linalg.svd(arr, full_matrices=False)
arr = np.dot(np.dot(u, np.diag(radius * s / abs(s[0]))), vT)
if diagonal != 0:
# generate a diagonal weight matrix. ignore other options.
arr = diagonal * np.eye(max(rows, cols))[:rows, :cols]
return arr.astype(FLOAT)
|
def random_matrix(rows, cols, mean=0, std=1, sparsity=0, radius=0, diagonal=0, rng=None):
'''Create a matrix of randomly-initialized weights.
Parameters
----------
rows : int
Number of rows of the weight matrix -- equivalently, the number of
"input" units that the weight matrix connects.
cols : int
Number of columns of the weight matrix -- equivalently, the number
of "output" units that the weight matrix connects.
mean : float, optional
Draw initial weight values from a normal with this mean. Defaults to 0.
std : float, optional
Draw initial weight values from a normal with this standard deviation.
Defaults to 1.
sparsity : float in (0, 1), optional
If given, ensure that the given fraction of the weight matrix is
set to zero. Defaults to 0, meaning all weights are nonzero.
radius : float, optional
If given, rescale the initial weights to have this spectral radius.
No scaling is performed by default.
diagonal : float, optional
If nonzero, create a matrix containing all zeros except for this value
along the diagonal. If nonzero, other arguments (except for rows and
cols) will be ignored.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
matrix : numpy array
An array containing random values. These often represent the weights
connecting each "input" unit to each "output" unit in a layer.
'''
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng)
arr = mean + std * rng.randn(rows, cols)
if 1 > sparsity > 0:
k = min(rows, cols)
mask = rng.binomial(n=1, p=1 - sparsity, size=(rows, cols)).astype(bool)
mask[:k, :k] |= np.eye(k).astype(bool)
arr *= mask
if radius > 0:
# rescale weights to have the appropriate spectral radius.
u, s, vT = np.linalg.svd(arr, full_matrices=False)
arr = np.dot(np.dot(u, np.diag(radius * s / abs(s[0]))), vT)
if diagonal != 0:
# generate a diagonal weight matrix. ignore other options.
arr = diagonal * np.eye(max(rows, cols))[:rows, :cols]
return arr.astype(FLOAT)
|
[
"Create",
"a",
"matrix",
"of",
"randomly",
"-",
"initialized",
"weights",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/util.py#L55-L107
|
[
"def",
"random_matrix",
"(",
"rows",
",",
"cols",
",",
"mean",
"=",
"0",
",",
"std",
"=",
"1",
",",
"sparsity",
"=",
"0",
",",
"radius",
"=",
"0",
",",
"diagonal",
"=",
"0",
",",
"rng",
"=",
"None",
")",
":",
"if",
"rng",
"is",
"None",
"or",
"isinstance",
"(",
"rng",
",",
"int",
")",
":",
"rng",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"rng",
")",
"arr",
"=",
"mean",
"+",
"std",
"*",
"rng",
".",
"randn",
"(",
"rows",
",",
"cols",
")",
"if",
"1",
">",
"sparsity",
">",
"0",
":",
"k",
"=",
"min",
"(",
"rows",
",",
"cols",
")",
"mask",
"=",
"rng",
".",
"binomial",
"(",
"n",
"=",
"1",
",",
"p",
"=",
"1",
"-",
"sparsity",
",",
"size",
"=",
"(",
"rows",
",",
"cols",
")",
")",
".",
"astype",
"(",
"bool",
")",
"mask",
"[",
":",
"k",
",",
":",
"k",
"]",
"|=",
"np",
".",
"eye",
"(",
"k",
")",
".",
"astype",
"(",
"bool",
")",
"arr",
"*=",
"mask",
"if",
"radius",
">",
"0",
":",
"# rescale weights to have the appropriate spectral radius.",
"u",
",",
"s",
",",
"vT",
"=",
"np",
".",
"linalg",
".",
"svd",
"(",
"arr",
",",
"full_matrices",
"=",
"False",
")",
"arr",
"=",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"u",
",",
"np",
".",
"diag",
"(",
"radius",
"*",
"s",
"/",
"abs",
"(",
"s",
"[",
"0",
"]",
")",
")",
")",
",",
"vT",
")",
"if",
"diagonal",
"!=",
"0",
":",
"# generate a diagonal weight matrix. ignore other options.",
"arr",
"=",
"diagonal",
"*",
"np",
".",
"eye",
"(",
"max",
"(",
"rows",
",",
"cols",
")",
")",
"[",
":",
"rows",
",",
":",
"cols",
"]",
"return",
"arr",
".",
"astype",
"(",
"FLOAT",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
random_vector
|
Create a vector of randomly-initialized values.
Parameters
----------
size : int
Length of vecctor to create.
mean : float, optional
Mean value for initial vector values. Defaults to 0.
std : float, optional
Standard deviation for initial vector values. Defaults to 1.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
vector : numpy array
An array containing random values. This often represents the bias for a
layer of computation units.
|
theanets/util.py
|
def random_vector(size, mean=0, std=1, rng=None):
'''Create a vector of randomly-initialized values.
Parameters
----------
size : int
Length of vecctor to create.
mean : float, optional
Mean value for initial vector values. Defaults to 0.
std : float, optional
Standard deviation for initial vector values. Defaults to 1.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
vector : numpy array
An array containing random values. This often represents the bias for a
layer of computation units.
'''
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng)
return (mean + std * rng.randn(size)).astype(FLOAT)
|
def random_vector(size, mean=0, std=1, rng=None):
'''Create a vector of randomly-initialized values.
Parameters
----------
size : int
Length of vecctor to create.
mean : float, optional
Mean value for initial vector values. Defaults to 0.
std : float, optional
Standard deviation for initial vector values. Defaults to 1.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
vector : numpy array
An array containing random values. This often represents the bias for a
layer of computation units.
'''
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng)
return (mean + std * rng.randn(size)).astype(FLOAT)
|
[
"Create",
"a",
"vector",
"of",
"randomly",
"-",
"initialized",
"values",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/util.py#L110-L134
|
[
"def",
"random_vector",
"(",
"size",
",",
"mean",
"=",
"0",
",",
"std",
"=",
"1",
",",
"rng",
"=",
"None",
")",
":",
"if",
"rng",
"is",
"None",
"or",
"isinstance",
"(",
"rng",
",",
"int",
")",
":",
"rng",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"rng",
")",
"return",
"(",
"mean",
"+",
"std",
"*",
"rng",
".",
"randn",
"(",
"size",
")",
")",
".",
"astype",
"(",
"FLOAT",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
outputs_matching
|
Get the outputs from a network that match a pattern.
Parameters
----------
outputs : dict or sequence of (str, theano expression)
Output expressions to filter for matches. If this is a dictionary, its
``items()`` will be processed for matches.
patterns : sequence of str
A sequence of glob-style patterns to match against. Any parameter
matching any pattern in this sequence will be included in the match.
Yields
------
matches : pair of str, theano expression
Generates a sequence of (name, expression) pairs. The name is the name
of the output that matched, and the expression is the symbolic output in
the network graph.
|
theanets/util.py
|
def outputs_matching(outputs, patterns):
'''Get the outputs from a network that match a pattern.
Parameters
----------
outputs : dict or sequence of (str, theano expression)
Output expressions to filter for matches. If this is a dictionary, its
``items()`` will be processed for matches.
patterns : sequence of str
A sequence of glob-style patterns to match against. Any parameter
matching any pattern in this sequence will be included in the match.
Yields
------
matches : pair of str, theano expression
Generates a sequence of (name, expression) pairs. The name is the name
of the output that matched, and the expression is the symbolic output in
the network graph.
'''
if isinstance(patterns, basestring):
patterns = (patterns, )
if isinstance(outputs, dict):
outputs = outputs.items()
for name, expr in outputs:
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
yield name, expr
break
|
def outputs_matching(outputs, patterns):
'''Get the outputs from a network that match a pattern.
Parameters
----------
outputs : dict or sequence of (str, theano expression)
Output expressions to filter for matches. If this is a dictionary, its
``items()`` will be processed for matches.
patterns : sequence of str
A sequence of glob-style patterns to match against. Any parameter
matching any pattern in this sequence will be included in the match.
Yields
------
matches : pair of str, theano expression
Generates a sequence of (name, expression) pairs. The name is the name
of the output that matched, and the expression is the symbolic output in
the network graph.
'''
if isinstance(patterns, basestring):
patterns = (patterns, )
if isinstance(outputs, dict):
outputs = outputs.items()
for name, expr in outputs:
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
yield name, expr
break
|
[
"Get",
"the",
"outputs",
"from",
"a",
"network",
"that",
"match",
"a",
"pattern",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/util.py#L137-L164
|
[
"def",
"outputs_matching",
"(",
"outputs",
",",
"patterns",
")",
":",
"if",
"isinstance",
"(",
"patterns",
",",
"basestring",
")",
":",
"patterns",
"=",
"(",
"patterns",
",",
")",
"if",
"isinstance",
"(",
"outputs",
",",
"dict",
")",
":",
"outputs",
"=",
"outputs",
".",
"items",
"(",
")",
"for",
"name",
",",
"expr",
"in",
"outputs",
":",
"for",
"pattern",
"in",
"patterns",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"name",
",",
"pattern",
")",
":",
"yield",
"name",
",",
"expr",
"break"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
params_matching
|
Get the parameters from a network that match a pattern.
Parameters
----------
layers : list of :class:`theanets.layers.Layer`
A list of network layers to retrieve parameters from.
patterns : sequence of str
A sequence of glob-style patterns to match against. Any parameter
matching any pattern in this sequence will be included in the match.
Yields
------
matches : pair of str, theano expression
Generates a sequence of (name, expression) pairs. The name is the name
of the parameter that matched, and the expression represents the
parameter symbolically.
|
theanets/util.py
|
def params_matching(layers, patterns):
'''Get the parameters from a network that match a pattern.
Parameters
----------
layers : list of :class:`theanets.layers.Layer`
A list of network layers to retrieve parameters from.
patterns : sequence of str
A sequence of glob-style patterns to match against. Any parameter
matching any pattern in this sequence will be included in the match.
Yields
------
matches : pair of str, theano expression
Generates a sequence of (name, expression) pairs. The name is the name
of the parameter that matched, and the expression represents the
parameter symbolically.
'''
if isinstance(patterns, basestring):
patterns = (patterns, )
for layer in layers:
for param in layer.params:
name = param.name
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
yield name, param
break
|
def params_matching(layers, patterns):
'''Get the parameters from a network that match a pattern.
Parameters
----------
layers : list of :class:`theanets.layers.Layer`
A list of network layers to retrieve parameters from.
patterns : sequence of str
A sequence of glob-style patterns to match against. Any parameter
matching any pattern in this sequence will be included in the match.
Yields
------
matches : pair of str, theano expression
Generates a sequence of (name, expression) pairs. The name is the name
of the parameter that matched, and the expression represents the
parameter symbolically.
'''
if isinstance(patterns, basestring):
patterns = (patterns, )
for layer in layers:
for param in layer.params:
name = param.name
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
yield name, param
break
|
[
"Get",
"the",
"parameters",
"from",
"a",
"network",
"that",
"match",
"a",
"pattern",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/util.py#L167-L193
|
[
"def",
"params_matching",
"(",
"layers",
",",
"patterns",
")",
":",
"if",
"isinstance",
"(",
"patterns",
",",
"basestring",
")",
":",
"patterns",
"=",
"(",
"patterns",
",",
")",
"for",
"layer",
"in",
"layers",
":",
"for",
"param",
"in",
"layer",
".",
"params",
":",
"name",
"=",
"param",
".",
"name",
"for",
"pattern",
"in",
"patterns",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"name",
",",
"pattern",
")",
":",
"yield",
"name",
",",
"param",
"break"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
from_kwargs
|
Construct common regularizers from a set of keyword arguments.
Keyword arguments not listed below will be passed to
:func:`Regularizer.build` if they specify the name of a registered
:class:`Regularizer`.
Parameters
----------
graph : :class:`theanets.graph.Network`
A network graph to regularize.
regularizers : dict or tuple/list of :class:`Regularizer`, optional
If this is a list or a tuple, the contents of the list will be returned
as the regularizers. This is to permit custom lists of regularizers to
be passed easily.
If this is a dict, its contents will be added to the other keyword
arguments passed in.
rng : int or theano RandomStreams, optional
If an integer is provided, it will be used to seed the random number
generators for the dropout or noise regularizers. If a theano
RandomStreams object is provided, it will be used directly. Defaults to
13.
input_dropout : float, optional
Apply dropout to input layers in the network graph, with this dropout
rate. Defaults to 0 (no dropout).
hidden_dropout : float, optional
Apply dropout to hidden layers in the network graph, with this dropout
rate. Defaults to 0 (no dropout).
output_dropout : float, optional
Apply dropout to the output layer in the network graph, with this
dropout rate. Defaults to 0 (no dropout).
input_noise : float, optional
Apply noise to input layers in the network graph, with this standard
deviation. Defaults to 0 (no noise).
hidden_noise : float, optional
Apply noise to hidden layers in the network graph, with this standard
deviation. Defaults to 0 (no noise).
output_noise : float, optional
Apply noise to the output layer in the network graph, with this
standard deviation. Defaults to 0 (no noise).
Returns
-------
regs : list of :class:`Regularizer`
A list of regularizers to apply to the given network graph.
|
theanets/regularizers.py
|
def from_kwargs(graph, **kwargs):
'''Construct common regularizers from a set of keyword arguments.
Keyword arguments not listed below will be passed to
:func:`Regularizer.build` if they specify the name of a registered
:class:`Regularizer`.
Parameters
----------
graph : :class:`theanets.graph.Network`
A network graph to regularize.
regularizers : dict or tuple/list of :class:`Regularizer`, optional
If this is a list or a tuple, the contents of the list will be returned
as the regularizers. This is to permit custom lists of regularizers to
be passed easily.
If this is a dict, its contents will be added to the other keyword
arguments passed in.
rng : int or theano RandomStreams, optional
If an integer is provided, it will be used to seed the random number
generators for the dropout or noise regularizers. If a theano
RandomStreams object is provided, it will be used directly. Defaults to
13.
input_dropout : float, optional
Apply dropout to input layers in the network graph, with this dropout
rate. Defaults to 0 (no dropout).
hidden_dropout : float, optional
Apply dropout to hidden layers in the network graph, with this dropout
rate. Defaults to 0 (no dropout).
output_dropout : float, optional
Apply dropout to the output layer in the network graph, with this
dropout rate. Defaults to 0 (no dropout).
input_noise : float, optional
Apply noise to input layers in the network graph, with this standard
deviation. Defaults to 0 (no noise).
hidden_noise : float, optional
Apply noise to hidden layers in the network graph, with this standard
deviation. Defaults to 0 (no noise).
output_noise : float, optional
Apply noise to the output layer in the network graph, with this
standard deviation. Defaults to 0 (no noise).
Returns
-------
regs : list of :class:`Regularizer`
A list of regularizers to apply to the given network graph.
'''
if 'regularizers' in kwargs:
regs = kwargs['regularizers']
if isinstance(regs, (tuple, list)):
return regs
if isinstance(regs, dict):
kwargs.update(regs)
regs = []
rng = kwargs.get('rng', 13)
def pattern(ls):
return tuple(l.output_name for l in ls)
inputs = pattern([l for l in graph.layers if isinstance(l, layers.Input)])
hiddens = pattern(graph.layers[1:-1])
outputs = pattern([graph.layers[-1]])
# create regularizers for different types of canned dropout.
spec = {inputs: kwargs.get('input_dropout', 0),
hiddens: kwargs.get('hidden_dropout', 0),
outputs: kwargs.get('output_dropout', 0)}
spec.update(kwargs.get('dropout', {}))
for pattern, w in spec.items():
if w:
regs.append(BernoulliDropout(pattern=pattern, weight=w, rng=rng))
# create regularizers for different types of canned noise.
spec = {inputs: kwargs.get('input_noise', 0),
hiddens: kwargs.get('hidden_noise', 0),
outputs: kwargs.get('output_noise', 0)}
spec.update(kwargs.get('noise', {}))
for pattern, w in spec.items():
if w:
regs.append(GaussianNoise(pattern=pattern, weight=w, rng=rng))
# create regularizers based on other keyword arguments.
for key, value in kwargs.items():
if Regularizer.is_registered(key):
if not isinstance(value, dict):
value = dict(weight=value)
regs.append(Regularizer.build(key, **value))
return regs
|
def from_kwargs(graph, **kwargs):
'''Construct common regularizers from a set of keyword arguments.
Keyword arguments not listed below will be passed to
:func:`Regularizer.build` if they specify the name of a registered
:class:`Regularizer`.
Parameters
----------
graph : :class:`theanets.graph.Network`
A network graph to regularize.
regularizers : dict or tuple/list of :class:`Regularizer`, optional
If this is a list or a tuple, the contents of the list will be returned
as the regularizers. This is to permit custom lists of regularizers to
be passed easily.
If this is a dict, its contents will be added to the other keyword
arguments passed in.
rng : int or theano RandomStreams, optional
If an integer is provided, it will be used to seed the random number
generators for the dropout or noise regularizers. If a theano
RandomStreams object is provided, it will be used directly. Defaults to
13.
input_dropout : float, optional
Apply dropout to input layers in the network graph, with this dropout
rate. Defaults to 0 (no dropout).
hidden_dropout : float, optional
Apply dropout to hidden layers in the network graph, with this dropout
rate. Defaults to 0 (no dropout).
output_dropout : float, optional
Apply dropout to the output layer in the network graph, with this
dropout rate. Defaults to 0 (no dropout).
input_noise : float, optional
Apply noise to input layers in the network graph, with this standard
deviation. Defaults to 0 (no noise).
hidden_noise : float, optional
Apply noise to hidden layers in the network graph, with this standard
deviation. Defaults to 0 (no noise).
output_noise : float, optional
Apply noise to the output layer in the network graph, with this
standard deviation. Defaults to 0 (no noise).
Returns
-------
regs : list of :class:`Regularizer`
A list of regularizers to apply to the given network graph.
'''
if 'regularizers' in kwargs:
regs = kwargs['regularizers']
if isinstance(regs, (tuple, list)):
return regs
if isinstance(regs, dict):
kwargs.update(regs)
regs = []
rng = kwargs.get('rng', 13)
def pattern(ls):
return tuple(l.output_name for l in ls)
inputs = pattern([l for l in graph.layers if isinstance(l, layers.Input)])
hiddens = pattern(graph.layers[1:-1])
outputs = pattern([graph.layers[-1]])
# create regularizers for different types of canned dropout.
spec = {inputs: kwargs.get('input_dropout', 0),
hiddens: kwargs.get('hidden_dropout', 0),
outputs: kwargs.get('output_dropout', 0)}
spec.update(kwargs.get('dropout', {}))
for pattern, w in spec.items():
if w:
regs.append(BernoulliDropout(pattern=pattern, weight=w, rng=rng))
# create regularizers for different types of canned noise.
spec = {inputs: kwargs.get('input_noise', 0),
hiddens: kwargs.get('hidden_noise', 0),
outputs: kwargs.get('output_noise', 0)}
spec.update(kwargs.get('noise', {}))
for pattern, w in spec.items():
if w:
regs.append(GaussianNoise(pattern=pattern, weight=w, rng=rng))
# create regularizers based on other keyword arguments.
for key, value in kwargs.items():
if Regularizer.is_registered(key):
if not isinstance(value, dict):
value = dict(weight=value)
regs.append(Regularizer.build(key, **value))
return regs
|
[
"Construct",
"common",
"regularizers",
"from",
"a",
"set",
"of",
"keyword",
"arguments",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/regularizers.py#L23-L121
|
[
"def",
"from_kwargs",
"(",
"graph",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'regularizers'",
"in",
"kwargs",
":",
"regs",
"=",
"kwargs",
"[",
"'regularizers'",
"]",
"if",
"isinstance",
"(",
"regs",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"regs",
"if",
"isinstance",
"(",
"regs",
",",
"dict",
")",
":",
"kwargs",
".",
"update",
"(",
"regs",
")",
"regs",
"=",
"[",
"]",
"rng",
"=",
"kwargs",
".",
"get",
"(",
"'rng'",
",",
"13",
")",
"def",
"pattern",
"(",
"ls",
")",
":",
"return",
"tuple",
"(",
"l",
".",
"output_name",
"for",
"l",
"in",
"ls",
")",
"inputs",
"=",
"pattern",
"(",
"[",
"l",
"for",
"l",
"in",
"graph",
".",
"layers",
"if",
"isinstance",
"(",
"l",
",",
"layers",
".",
"Input",
")",
"]",
")",
"hiddens",
"=",
"pattern",
"(",
"graph",
".",
"layers",
"[",
"1",
":",
"-",
"1",
"]",
")",
"outputs",
"=",
"pattern",
"(",
"[",
"graph",
".",
"layers",
"[",
"-",
"1",
"]",
"]",
")",
"# create regularizers for different types of canned dropout.",
"spec",
"=",
"{",
"inputs",
":",
"kwargs",
".",
"get",
"(",
"'input_dropout'",
",",
"0",
")",
",",
"hiddens",
":",
"kwargs",
".",
"get",
"(",
"'hidden_dropout'",
",",
"0",
")",
",",
"outputs",
":",
"kwargs",
".",
"get",
"(",
"'output_dropout'",
",",
"0",
")",
"}",
"spec",
".",
"update",
"(",
"kwargs",
".",
"get",
"(",
"'dropout'",
",",
"{",
"}",
")",
")",
"for",
"pattern",
",",
"w",
"in",
"spec",
".",
"items",
"(",
")",
":",
"if",
"w",
":",
"regs",
".",
"append",
"(",
"BernoulliDropout",
"(",
"pattern",
"=",
"pattern",
",",
"weight",
"=",
"w",
",",
"rng",
"=",
"rng",
")",
")",
"# create regularizers for different types of canned noise.",
"spec",
"=",
"{",
"inputs",
":",
"kwargs",
".",
"get",
"(",
"'input_noise'",
",",
"0",
")",
",",
"hiddens",
":",
"kwargs",
".",
"get",
"(",
"'hidden_noise'",
",",
"0",
")",
",",
"outputs",
":",
"kwargs",
".",
"get",
"(",
"'output_noise'",
",",
"0",
")",
"}",
"spec",
".",
"update",
"(",
"kwargs",
".",
"get",
"(",
"'noise'",
",",
"{",
"}",
")",
")",
"for",
"pattern",
",",
"w",
"in",
"spec",
".",
"items",
"(",
")",
":",
"if",
"w",
":",
"regs",
".",
"append",
"(",
"GaussianNoise",
"(",
"pattern",
"=",
"pattern",
",",
"weight",
"=",
"w",
",",
"rng",
"=",
"rng",
")",
")",
"# create regularizers based on other keyword arguments.",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"Regularizer",
".",
"is_registered",
"(",
"key",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"value",
"=",
"dict",
"(",
"weight",
"=",
"value",
")",
"regs",
".",
"append",
"(",
"Regularizer",
".",
"build",
"(",
"key",
",",
"*",
"*",
"value",
")",
")",
"return",
"regs"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Loss.variables
|
A list of Theano variables used in this loss.
|
theanets/losses.py
|
def variables(self):
'''A list of Theano variables used in this loss.'''
result = [self._target]
if self._weights is not None:
result.append(self._weights)
return result
|
def variables(self):
'''A list of Theano variables used in this loss.'''
result = [self._target]
if self._weights is not None:
result.append(self._weights)
return result
|
[
"A",
"list",
"of",
"Theano",
"variables",
"used",
"in",
"this",
"loss",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/losses.py#L54-L59
|
[
"def",
"variables",
"(",
"self",
")",
":",
"result",
"=",
"[",
"self",
".",
"_target",
"]",
"if",
"self",
".",
"_weights",
"is",
"not",
"None",
":",
"result",
".",
"append",
"(",
"self",
".",
"_weights",
")",
"return",
"result"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
CrossEntropy.accuracy
|
Build a Theano expression for computing the accuracy of graph output.
Parameters
----------
outputs : dict of Theano expressions
A dictionary mapping network output names to Theano expressions
representing the outputs of a computation graph.
Returns
-------
acc : Theano expression
A Theano expression representing the accuracy of the output compared
to the target data.
|
theanets/losses.py
|
def accuracy(self, outputs):
'''Build a Theano expression for computing the accuracy of graph output.
Parameters
----------
outputs : dict of Theano expressions
A dictionary mapping network output names to Theano expressions
representing the outputs of a computation graph.
Returns
-------
acc : Theano expression
A Theano expression representing the accuracy of the output compared
to the target data.
'''
output = outputs[self.output_name]
predict = TT.argmax(output, axis=-1)
correct = TT.eq(predict, self._target)
acc = correct.mean()
if self._weights is not None:
acc = (self._weights * correct).sum() / self._weights.sum()
return acc
|
def accuracy(self, outputs):
'''Build a Theano expression for computing the accuracy of graph output.
Parameters
----------
outputs : dict of Theano expressions
A dictionary mapping network output names to Theano expressions
representing the outputs of a computation graph.
Returns
-------
acc : Theano expression
A Theano expression representing the accuracy of the output compared
to the target data.
'''
output = outputs[self.output_name]
predict = TT.argmax(output, axis=-1)
correct = TT.eq(predict, self._target)
acc = correct.mean()
if self._weights is not None:
acc = (self._weights * correct).sum() / self._weights.sum()
return acc
|
[
"Build",
"a",
"Theano",
"expression",
"for",
"computing",
"the",
"accuracy",
"of",
"graph",
"output",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/losses.py#L517-L538
|
[
"def",
"accuracy",
"(",
"self",
",",
"outputs",
")",
":",
"output",
"=",
"outputs",
"[",
"self",
".",
"output_name",
"]",
"predict",
"=",
"TT",
".",
"argmax",
"(",
"output",
",",
"axis",
"=",
"-",
"1",
")",
"correct",
"=",
"TT",
".",
"eq",
"(",
"predict",
",",
"self",
".",
"_target",
")",
"acc",
"=",
"correct",
".",
"mean",
"(",
")",
"if",
"self",
".",
"_weights",
"is",
"not",
"None",
":",
"acc",
"=",
"(",
"self",
".",
"_weights",
"*",
"correct",
")",
".",
"sum",
"(",
")",
"/",
"self",
".",
"_weights",
".",
"sum",
"(",
")",
"return",
"acc"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Recurrent.add_weights
|
Helper method to create a new weight matrix.
Parameters
----------
name : str
Name of parameter to define.
nin : int, optional
Size of "input" for this weight matrix. Defaults to self.nin.
nout : int, optional
Size of "output" for this weight matrix. Defaults to self.nout.
mean : float, optional
Mean of initial matrix values. Defaults to 0.
std : float, optional
Standard deviation of initial matrix values. Defaults to
:math:`1 / sqrt(n_i + n_o)`.
sparsity : float, optional
Fraction of weights to set randomly to zero. Defaults to 0.
radius : float, optional
If nonzero, rescale initial weights to have this spectral radius.
Defaults to 0.
|
theanets/layers/recurrent.py
|
def add_weights(self, name, nin, nout, mean=0, std=0, sparsity=0, radius=0,
diagonal=0):
'''Helper method to create a new weight matrix.
Parameters
----------
name : str
Name of parameter to define.
nin : int, optional
Size of "input" for this weight matrix. Defaults to self.nin.
nout : int, optional
Size of "output" for this weight matrix. Defaults to self.nout.
mean : float, optional
Mean of initial matrix values. Defaults to 0.
std : float, optional
Standard deviation of initial matrix values. Defaults to
:math:`1 / sqrt(n_i + n_o)`.
sparsity : float, optional
Fraction of weights to set randomly to zero. Defaults to 0.
radius : float, optional
If nonzero, rescale initial weights to have this spectral radius.
Defaults to 0.
'''
glorot = 1 / np.sqrt(nin + nout)
mean = self.kwargs.get(
'mean_{}'.format(name), self.kwargs.get('mean', mean))
std = self.kwargs.get(
'std_{}'.format(name), self.kwargs.get('std', std or glorot))
s = self.kwargs.get(
'sparsity_{}'.format(name), self.kwargs.get('sparsity', sparsity))
r = self.kwargs.get(
'radius_{}'.format(name), self.kwargs.get('radius', radius))
d = self.kwargs.get(
'diagonal_{}'.format(name), self.kwargs.get('diagonal', diagonal))
if nin == self.output_size and nout % nin == 0:
arr = np.concatenate([
util.random_matrix(nin, nin, mean, std, sparsity=s, radius=r,
diagonal=d, rng=self.rng)
for _ in range(nout // nin)], axis=1)
else:
arr = util.random_matrix(nin, nout, mean, std, sparsity=s, rng=self.rng)
self._params.append(theano.shared(arr, name=self._fmt(name)))
|
def add_weights(self, name, nin, nout, mean=0, std=0, sparsity=0, radius=0,
diagonal=0):
'''Helper method to create a new weight matrix.
Parameters
----------
name : str
Name of parameter to define.
nin : int, optional
Size of "input" for this weight matrix. Defaults to self.nin.
nout : int, optional
Size of "output" for this weight matrix. Defaults to self.nout.
mean : float, optional
Mean of initial matrix values. Defaults to 0.
std : float, optional
Standard deviation of initial matrix values. Defaults to
:math:`1 / sqrt(n_i + n_o)`.
sparsity : float, optional
Fraction of weights to set randomly to zero. Defaults to 0.
radius : float, optional
If nonzero, rescale initial weights to have this spectral radius.
Defaults to 0.
'''
glorot = 1 / np.sqrt(nin + nout)
mean = self.kwargs.get(
'mean_{}'.format(name), self.kwargs.get('mean', mean))
std = self.kwargs.get(
'std_{}'.format(name), self.kwargs.get('std', std or glorot))
s = self.kwargs.get(
'sparsity_{}'.format(name), self.kwargs.get('sparsity', sparsity))
r = self.kwargs.get(
'radius_{}'.format(name), self.kwargs.get('radius', radius))
d = self.kwargs.get(
'diagonal_{}'.format(name), self.kwargs.get('diagonal', diagonal))
if nin == self.output_size and nout % nin == 0:
arr = np.concatenate([
util.random_matrix(nin, nin, mean, std, sparsity=s, radius=r,
diagonal=d, rng=self.rng)
for _ in range(nout // nin)], axis=1)
else:
arr = util.random_matrix(nin, nout, mean, std, sparsity=s, rng=self.rng)
self._params.append(theano.shared(arr, name=self._fmt(name)))
|
[
"Helper",
"method",
"to",
"create",
"a",
"new",
"weight",
"matrix",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/layers/recurrent.py#L78-L119
|
[
"def",
"add_weights",
"(",
"self",
",",
"name",
",",
"nin",
",",
"nout",
",",
"mean",
"=",
"0",
",",
"std",
"=",
"0",
",",
"sparsity",
"=",
"0",
",",
"radius",
"=",
"0",
",",
"diagonal",
"=",
"0",
")",
":",
"glorot",
"=",
"1",
"/",
"np",
".",
"sqrt",
"(",
"nin",
"+",
"nout",
")",
"mean",
"=",
"self",
".",
"kwargs",
".",
"get",
"(",
"'mean_{}'",
".",
"format",
"(",
"name",
")",
",",
"self",
".",
"kwargs",
".",
"get",
"(",
"'mean'",
",",
"mean",
")",
")",
"std",
"=",
"self",
".",
"kwargs",
".",
"get",
"(",
"'std_{}'",
".",
"format",
"(",
"name",
")",
",",
"self",
".",
"kwargs",
".",
"get",
"(",
"'std'",
",",
"std",
"or",
"glorot",
")",
")",
"s",
"=",
"self",
".",
"kwargs",
".",
"get",
"(",
"'sparsity_{}'",
".",
"format",
"(",
"name",
")",
",",
"self",
".",
"kwargs",
".",
"get",
"(",
"'sparsity'",
",",
"sparsity",
")",
")",
"r",
"=",
"self",
".",
"kwargs",
".",
"get",
"(",
"'radius_{}'",
".",
"format",
"(",
"name",
")",
",",
"self",
".",
"kwargs",
".",
"get",
"(",
"'radius'",
",",
"radius",
")",
")",
"d",
"=",
"self",
".",
"kwargs",
".",
"get",
"(",
"'diagonal_{}'",
".",
"format",
"(",
"name",
")",
",",
"self",
".",
"kwargs",
".",
"get",
"(",
"'diagonal'",
",",
"diagonal",
")",
")",
"if",
"nin",
"==",
"self",
".",
"output_size",
"and",
"nout",
"%",
"nin",
"==",
"0",
":",
"arr",
"=",
"np",
".",
"concatenate",
"(",
"[",
"util",
".",
"random_matrix",
"(",
"nin",
",",
"nin",
",",
"mean",
",",
"std",
",",
"sparsity",
"=",
"s",
",",
"radius",
"=",
"r",
",",
"diagonal",
"=",
"d",
",",
"rng",
"=",
"self",
".",
"rng",
")",
"for",
"_",
"in",
"range",
"(",
"nout",
"//",
"nin",
")",
"]",
",",
"axis",
"=",
"1",
")",
"else",
":",
"arr",
"=",
"util",
".",
"random_matrix",
"(",
"nin",
",",
"nout",
",",
"mean",
",",
"std",
",",
"sparsity",
"=",
"s",
",",
"rng",
"=",
"self",
".",
"rng",
")",
"self",
".",
"_params",
".",
"append",
"(",
"theano",
".",
"shared",
"(",
"arr",
",",
"name",
"=",
"self",
".",
"_fmt",
"(",
"name",
")",
")",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Recurrent._scan
|
Helper method for defining a basic loop in theano.
Parameters
----------
inputs : sequence of theano expressions
Inputs to the scan operation.
outputs : sequence of output specifiers
Specifiers for the outputs of the scan operation. This should be a
sequence containing:
- None for values that are output by the scan but not tapped as
inputs,
- an integer or theano scalar (``ndim == 0``) indicating the batch
size for initial zero state,
- a theano tensor variable (``ndim > 0``) containing initial state
data, or
- a dictionary containing a full output specifier. See
``outputs_info`` in the Theano documentation for ``scan``.
name : str, optional
Name of the scan variable to create. Defaults to ``'scan'``.
step : callable, optional
The callable to apply in the loop. Defaults to :func:`self._step`.
constants : sequence of tensor, optional
A sequence of parameters, if any, needed by the step function.
Returns
-------
output(s) : theano expression(s)
Theano expression(s) representing output(s) from the scan.
updates : sequence of update tuples
A sequence of updates to apply inside a theano function.
|
theanets/layers/recurrent.py
|
def _scan(self, inputs, outputs, name='scan', step=None, constants=None):
'''Helper method for defining a basic loop in theano.
Parameters
----------
inputs : sequence of theano expressions
Inputs to the scan operation.
outputs : sequence of output specifiers
Specifiers for the outputs of the scan operation. This should be a
sequence containing:
- None for values that are output by the scan but not tapped as
inputs,
- an integer or theano scalar (``ndim == 0``) indicating the batch
size for initial zero state,
- a theano tensor variable (``ndim > 0``) containing initial state
data, or
- a dictionary containing a full output specifier. See
``outputs_info`` in the Theano documentation for ``scan``.
name : str, optional
Name of the scan variable to create. Defaults to ``'scan'``.
step : callable, optional
The callable to apply in the loop. Defaults to :func:`self._step`.
constants : sequence of tensor, optional
A sequence of parameters, if any, needed by the step function.
Returns
-------
output(s) : theano expression(s)
Theano expression(s) representing output(s) from the scan.
updates : sequence of update tuples
A sequence of updates to apply inside a theano function.
'''
init = []
for i, x in enumerate(outputs):
ndim = getattr(x, 'ndim', -1)
if x is None or isinstance(x, dict) or ndim > 0:
init.append(x)
continue
if isinstance(x, int) or ndim == 0:
init.append(TT.repeat(theano.shared(
np.zeros((1, self.output_size), util.FLOAT),
name=self._fmt('init{}'.format(i))), x, axis=0))
continue
raise ValueError('cannot handle input {} for scan!'.format(x))
return theano.scan(
step or self._step,
name=self._fmt(name),
sequences=inputs,
outputs_info=init,
non_sequences=constants,
go_backwards='back' in self.kwargs.get('direction', '').lower(),
truncate_gradient=self.kwargs.get('bptt_limit', -1),
)
|
def _scan(self, inputs, outputs, name='scan', step=None, constants=None):
'''Helper method for defining a basic loop in theano.
Parameters
----------
inputs : sequence of theano expressions
Inputs to the scan operation.
outputs : sequence of output specifiers
Specifiers for the outputs of the scan operation. This should be a
sequence containing:
- None for values that are output by the scan but not tapped as
inputs,
- an integer or theano scalar (``ndim == 0``) indicating the batch
size for initial zero state,
- a theano tensor variable (``ndim > 0``) containing initial state
data, or
- a dictionary containing a full output specifier. See
``outputs_info`` in the Theano documentation for ``scan``.
name : str, optional
Name of the scan variable to create. Defaults to ``'scan'``.
step : callable, optional
The callable to apply in the loop. Defaults to :func:`self._step`.
constants : sequence of tensor, optional
A sequence of parameters, if any, needed by the step function.
Returns
-------
output(s) : theano expression(s)
Theano expression(s) representing output(s) from the scan.
updates : sequence of update tuples
A sequence of updates to apply inside a theano function.
'''
init = []
for i, x in enumerate(outputs):
ndim = getattr(x, 'ndim', -1)
if x is None or isinstance(x, dict) or ndim > 0:
init.append(x)
continue
if isinstance(x, int) or ndim == 0:
init.append(TT.repeat(theano.shared(
np.zeros((1, self.output_size), util.FLOAT),
name=self._fmt('init{}'.format(i))), x, axis=0))
continue
raise ValueError('cannot handle input {} for scan!'.format(x))
return theano.scan(
step or self._step,
name=self._fmt(name),
sequences=inputs,
outputs_info=init,
non_sequences=constants,
go_backwards='back' in self.kwargs.get('direction', '').lower(),
truncate_gradient=self.kwargs.get('bptt_limit', -1),
)
|
[
"Helper",
"method",
"for",
"defining",
"a",
"basic",
"loop",
"in",
"theano",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/layers/recurrent.py#L121-L173
|
[
"def",
"_scan",
"(",
"self",
",",
"inputs",
",",
"outputs",
",",
"name",
"=",
"'scan'",
",",
"step",
"=",
"None",
",",
"constants",
"=",
"None",
")",
":",
"init",
"=",
"[",
"]",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"outputs",
")",
":",
"ndim",
"=",
"getattr",
"(",
"x",
",",
"'ndim'",
",",
"-",
"1",
")",
"if",
"x",
"is",
"None",
"or",
"isinstance",
"(",
"x",
",",
"dict",
")",
"or",
"ndim",
">",
"0",
":",
"init",
".",
"append",
"(",
"x",
")",
"continue",
"if",
"isinstance",
"(",
"x",
",",
"int",
")",
"or",
"ndim",
"==",
"0",
":",
"init",
".",
"append",
"(",
"TT",
".",
"repeat",
"(",
"theano",
".",
"shared",
"(",
"np",
".",
"zeros",
"(",
"(",
"1",
",",
"self",
".",
"output_size",
")",
",",
"util",
".",
"FLOAT",
")",
",",
"name",
"=",
"self",
".",
"_fmt",
"(",
"'init{}'",
".",
"format",
"(",
"i",
")",
")",
")",
",",
"x",
",",
"axis",
"=",
"0",
")",
")",
"continue",
"raise",
"ValueError",
"(",
"'cannot handle input {} for scan!'",
".",
"format",
"(",
"x",
")",
")",
"return",
"theano",
".",
"scan",
"(",
"step",
"or",
"self",
".",
"_step",
",",
"name",
"=",
"self",
".",
"_fmt",
"(",
"name",
")",
",",
"sequences",
"=",
"inputs",
",",
"outputs_info",
"=",
"init",
",",
"non_sequences",
"=",
"constants",
",",
"go_backwards",
"=",
"'back'",
"in",
"self",
".",
"kwargs",
".",
"get",
"(",
"'direction'",
",",
"''",
")",
".",
"lower",
"(",
")",
",",
"truncate_gradient",
"=",
"self",
".",
"kwargs",
".",
"get",
"(",
"'bptt_limit'",
",",
"-",
"1",
")",
",",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
Recurrent._create_rates
|
Create a rate parameter (usually for a recurrent network layer).
Parameters
----------
dist : {'uniform', 'log'}, optional
Distribution of rate values. Defaults to ``'uniform'``.
size : int, optional
Number of rates to create. Defaults to ``self.output_size``.
eps : float, optional
A "buffer" preventing rate values from getting too close to 0 or 1.
Defaults to 1e-4.
Returns
-------
rates : theano shared or None
A vector of rate parameters for certain types of recurrent layers.
|
theanets/layers/recurrent.py
|
def _create_rates(self, dist='uniform', size=None, eps=1e-4):
'''Create a rate parameter (usually for a recurrent network layer).
Parameters
----------
dist : {'uniform', 'log'}, optional
Distribution of rate values. Defaults to ``'uniform'``.
size : int, optional
Number of rates to create. Defaults to ``self.output_size``.
eps : float, optional
A "buffer" preventing rate values from getting too close to 0 or 1.
Defaults to 1e-4.
Returns
-------
rates : theano shared or None
A vector of rate parameters for certain types of recurrent layers.
'''
if size is None:
size = self.output_size
if dist == 'uniform':
z = np.random.uniform(eps, 1 - eps, size=size).astype(util.FLOAT)
return theano.shared(z, name=self._fmt('rate'))
if dist == 'log':
z = np.random.uniform(-6, -eps, size=size).astype(util.FLOAT)
return theano.shared(np.exp(z), name=self._fmt('rate'))
return None
|
def _create_rates(self, dist='uniform', size=None, eps=1e-4):
'''Create a rate parameter (usually for a recurrent network layer).
Parameters
----------
dist : {'uniform', 'log'}, optional
Distribution of rate values. Defaults to ``'uniform'``.
size : int, optional
Number of rates to create. Defaults to ``self.output_size``.
eps : float, optional
A "buffer" preventing rate values from getting too close to 0 or 1.
Defaults to 1e-4.
Returns
-------
rates : theano shared or None
A vector of rate parameters for certain types of recurrent layers.
'''
if size is None:
size = self.output_size
if dist == 'uniform':
z = np.random.uniform(eps, 1 - eps, size=size).astype(util.FLOAT)
return theano.shared(z, name=self._fmt('rate'))
if dist == 'log':
z = np.random.uniform(-6, -eps, size=size).astype(util.FLOAT)
return theano.shared(np.exp(z), name=self._fmt('rate'))
return None
|
[
"Create",
"a",
"rate",
"parameter",
"(",
"usually",
"for",
"a",
"recurrent",
"network",
"layer",
")",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/layers/recurrent.py#L175-L201
|
[
"def",
"_create_rates",
"(",
"self",
",",
"dist",
"=",
"'uniform'",
",",
"size",
"=",
"None",
",",
"eps",
"=",
"1e-4",
")",
":",
"if",
"size",
"is",
"None",
":",
"size",
"=",
"self",
".",
"output_size",
"if",
"dist",
"==",
"'uniform'",
":",
"z",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"eps",
",",
"1",
"-",
"eps",
",",
"size",
"=",
"size",
")",
".",
"astype",
"(",
"util",
".",
"FLOAT",
")",
"return",
"theano",
".",
"shared",
"(",
"z",
",",
"name",
"=",
"self",
".",
"_fmt",
"(",
"'rate'",
")",
")",
"if",
"dist",
"==",
"'log'",
":",
"z",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"-",
"6",
",",
"-",
"eps",
",",
"size",
"=",
"size",
")",
".",
"astype",
"(",
"util",
".",
"FLOAT",
")",
"return",
"theano",
".",
"shared",
"(",
"np",
".",
"exp",
"(",
"z",
")",
",",
"name",
"=",
"self",
".",
"_fmt",
"(",
"'rate'",
")",
")",
"return",
"None"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
build
|
Construct an activation function by name.
Parameters
----------
name : str or :class:`Activation`
The name of the type of activation function to build, or an
already-created instance of an activation function.
layer : :class:`theanets.layers.Layer`
The layer to which this activation will be applied.
kwargs : dict
Additional named arguments to pass to the activation constructor.
Returns
-------
activation : :class:`Activation`
A neural network activation function instance.
|
theanets/activations.py
|
def build(name, layer, **kwargs):
'''Construct an activation function by name.
Parameters
----------
name : str or :class:`Activation`
The name of the type of activation function to build, or an
already-created instance of an activation function.
layer : :class:`theanets.layers.Layer`
The layer to which this activation will be applied.
kwargs : dict
Additional named arguments to pass to the activation constructor.
Returns
-------
activation : :class:`Activation`
A neural network activation function instance.
'''
if isinstance(name, Activation):
return name
if '+' in name:
return functools.reduce(
Compose, (build(n, layer, **kwargs) for n in name.split('+')))
act = COMMON.get(name)
if act is not None:
act.name = name
act.params = []
return act
if name.lower().startswith('maxout') and ':' in name:
name, pieces = name.split(':', 1)
kwargs['pieces'] = int(pieces)
kwargs['name'] = name
kwargs['layer'] = layer
return Activation.build(name, **kwargs)
|
def build(name, layer, **kwargs):
'''Construct an activation function by name.
Parameters
----------
name : str or :class:`Activation`
The name of the type of activation function to build, or an
already-created instance of an activation function.
layer : :class:`theanets.layers.Layer`
The layer to which this activation will be applied.
kwargs : dict
Additional named arguments to pass to the activation constructor.
Returns
-------
activation : :class:`Activation`
A neural network activation function instance.
'''
if isinstance(name, Activation):
return name
if '+' in name:
return functools.reduce(
Compose, (build(n, layer, **kwargs) for n in name.split('+')))
act = COMMON.get(name)
if act is not None:
act.name = name
act.params = []
return act
if name.lower().startswith('maxout') and ':' in name:
name, pieces = name.split(':', 1)
kwargs['pieces'] = int(pieces)
kwargs['name'] = name
kwargs['layer'] = layer
return Activation.build(name, **kwargs)
|
[
"Construct",
"an",
"activation",
"function",
"by",
"name",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/activations.py#L89-L125
|
[
"def",
"build",
"(",
"name",
",",
"layer",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"name",
",",
"Activation",
")",
":",
"return",
"name",
"if",
"'+'",
"in",
"name",
":",
"return",
"functools",
".",
"reduce",
"(",
"Compose",
",",
"(",
"build",
"(",
"n",
",",
"layer",
",",
"*",
"*",
"kwargs",
")",
"for",
"n",
"in",
"name",
".",
"split",
"(",
"'+'",
")",
")",
")",
"act",
"=",
"COMMON",
".",
"get",
"(",
"name",
")",
"if",
"act",
"is",
"not",
"None",
":",
"act",
".",
"name",
"=",
"name",
"act",
".",
"params",
"=",
"[",
"]",
"return",
"act",
"if",
"name",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'maxout'",
")",
"and",
"':'",
"in",
"name",
":",
"name",
",",
"pieces",
"=",
"name",
".",
"split",
"(",
"':'",
",",
"1",
")",
"kwargs",
"[",
"'pieces'",
"]",
"=",
"int",
"(",
"pieces",
")",
"kwargs",
"[",
"'name'",
"]",
"=",
"name",
"kwargs",
"[",
"'layer'",
"]",
"=",
"layer",
"return",
"Activation",
".",
"build",
"(",
"name",
",",
"*",
"*",
"kwargs",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
DownhillTrainer.itertrain
|
Train a model using a training and validation set.
This method yields a series of monitor values to the caller. After every
iteration, a pair of monitor dictionaries is generated: one evaluated on
the training dataset, and another evaluated on the validation dataset.
The validation monitors might not be updated during every training
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Parameters
----------
train : :class:`Dataset <theanets.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : :class:`Dataset <theanets.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving.
Yields
------
training : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
validation : dict
A dictionary containing monitor values evaluated on the validation
dataset.
|
theanets/trainer.py
|
def itertrain(self, train, valid=None, **kwargs):
'''Train a model using a training and validation set.
This method yields a series of monitor values to the caller. After every
iteration, a pair of monitor dictionaries is generated: one evaluated on
the training dataset, and another evaluated on the validation dataset.
The validation monitors might not be updated during every training
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Parameters
----------
train : :class:`Dataset <theanets.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : :class:`Dataset <theanets.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving.
Yields
------
training : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
validation : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
for monitors in downhill.build(
algo=self.algo,
loss=self.network.loss(**kwargs),
updates=self.network.updates(**kwargs),
monitors=self.network.monitors(**kwargs),
inputs=self.network.variables,
params=self.network.params,
monitor_gradients=kwargs.get('monitor_gradients', False),
).iterate(train, valid=valid, **kwargs):
yield monitors
|
def itertrain(self, train, valid=None, **kwargs):
'''Train a model using a training and validation set.
This method yields a series of monitor values to the caller. After every
iteration, a pair of monitor dictionaries is generated: one evaluated on
the training dataset, and another evaluated on the validation dataset.
The validation monitors might not be updated during every training
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Parameters
----------
train : :class:`Dataset <theanets.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : :class:`Dataset <theanets.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving.
Yields
------
training : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
validation : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
for monitors in downhill.build(
algo=self.algo,
loss=self.network.loss(**kwargs),
updates=self.network.updates(**kwargs),
monitors=self.network.monitors(**kwargs),
inputs=self.network.variables,
params=self.network.params,
monitor_gradients=kwargs.get('monitor_gradients', False),
).iterate(train, valid=valid, **kwargs):
yield monitors
|
[
"Train",
"a",
"model",
"using",
"a",
"training",
"and",
"validation",
"set",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/trainer.py#L28-L64
|
[
"def",
"itertrain",
"(",
"self",
",",
"train",
",",
"valid",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"monitors",
"in",
"downhill",
".",
"build",
"(",
"algo",
"=",
"self",
".",
"algo",
",",
"loss",
"=",
"self",
".",
"network",
".",
"loss",
"(",
"*",
"*",
"kwargs",
")",
",",
"updates",
"=",
"self",
".",
"network",
".",
"updates",
"(",
"*",
"*",
"kwargs",
")",
",",
"monitors",
"=",
"self",
".",
"network",
".",
"monitors",
"(",
"*",
"*",
"kwargs",
")",
",",
"inputs",
"=",
"self",
".",
"network",
".",
"variables",
",",
"params",
"=",
"self",
".",
"network",
".",
"params",
",",
"monitor_gradients",
"=",
"kwargs",
".",
"get",
"(",
"'monitor_gradients'",
",",
"False",
")",
",",
")",
".",
"iterate",
"(",
"train",
",",
"valid",
"=",
"valid",
",",
"*",
"*",
"kwargs",
")",
":",
"yield",
"monitors"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
test
|
SampleTrainer.reservoir
|
Select a random sample of n items from xs.
|
theanets/trainer.py
|
def reservoir(xs, n, rng):
'''Select a random sample of n items from xs.'''
pool = []
for i, x in enumerate(xs):
if len(pool) < n:
pool.append(x / np.linalg.norm(x))
continue
j = rng.randint(i + 1)
if j < n:
pool[j] = x / np.linalg.norm(x)
# if the pool still has fewer than n items, pad with distorted random
# duplicates from the source data.
L = len(pool)
S = np.std(pool, axis=0)
while len(pool) < n:
x = pool[rng.randint(L)]
pool.append(x + S * rng.randn(*x.shape))
return np.array(pool, dtype=pool[0].dtype)
|
def reservoir(xs, n, rng):
'''Select a random sample of n items from xs.'''
pool = []
for i, x in enumerate(xs):
if len(pool) < n:
pool.append(x / np.linalg.norm(x))
continue
j = rng.randint(i + 1)
if j < n:
pool[j] = x / np.linalg.norm(x)
# if the pool still has fewer than n items, pad with distorted random
# duplicates from the source data.
L = len(pool)
S = np.std(pool, axis=0)
while len(pool) < n:
x = pool[rng.randint(L)]
pool.append(x + S * rng.randn(*x.shape))
return np.array(pool, dtype=pool[0].dtype)
|
[
"Select",
"a",
"random",
"sample",
"of",
"n",
"items",
"from",
"xs",
"."
] |
lmjohns3/theanets
|
python
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/trainer.py#L71-L88
|
[
"def",
"reservoir",
"(",
"xs",
",",
"n",
",",
"rng",
")",
":",
"pool",
"=",
"[",
"]",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"xs",
")",
":",
"if",
"len",
"(",
"pool",
")",
"<",
"n",
":",
"pool",
".",
"append",
"(",
"x",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"x",
")",
")",
"continue",
"j",
"=",
"rng",
".",
"randint",
"(",
"i",
"+",
"1",
")",
"if",
"j",
"<",
"n",
":",
"pool",
"[",
"j",
"]",
"=",
"x",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"x",
")",
"# if the pool still has fewer than n items, pad with distorted random",
"# duplicates from the source data.",
"L",
"=",
"len",
"(",
"pool",
")",
"S",
"=",
"np",
".",
"std",
"(",
"pool",
",",
"axis",
"=",
"0",
")",
"while",
"len",
"(",
"pool",
")",
"<",
"n",
":",
"x",
"=",
"pool",
"[",
"rng",
".",
"randint",
"(",
"L",
")",
"]",
"pool",
".",
"append",
"(",
"x",
"+",
"S",
"*",
"rng",
".",
"randn",
"(",
"*",
"x",
".",
"shape",
")",
")",
"return",
"np",
".",
"array",
"(",
"pool",
",",
"dtype",
"=",
"pool",
"[",
"0",
"]",
".",
"dtype",
")"
] |
79db9f878ef2071f2f576a1cf5d43a752a55894a
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.