id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
12,200
|
optimizely/python-sdk
|
optimizely/helpers/condition_tree_evaluator.py
|
evaluate
|
def evaluate(conditions, leaf_evaluator):
""" Top level method to evaluate conditions.
Args:
conditions: Nested array of and/or conditions, or a single leaf condition value of any type.
Example: ['and', '0', ['or', '1', '2']]
leaf_evaluator: Function which will be called to evaluate leaf condition values.
Returns:
Boolean: Result of evaluating the conditions using the operator rules and the leaf evaluator.
None: if conditions couldn't be evaluated.
"""
if isinstance(conditions, list):
if conditions[0] in list(EVALUATORS_BY_OPERATOR_TYPE.keys()):
return EVALUATORS_BY_OPERATOR_TYPE[conditions[0]](conditions[1:], leaf_evaluator)
else:
# assume OR when operator is not explicit.
return EVALUATORS_BY_OPERATOR_TYPE[ConditionOperatorTypes.OR](conditions, leaf_evaluator)
leaf_condition = conditions
return leaf_evaluator(leaf_condition)
|
python
|
def evaluate(conditions, leaf_evaluator):
""" Top level method to evaluate conditions.
Args:
conditions: Nested array of and/or conditions, or a single leaf condition value of any type.
Example: ['and', '0', ['or', '1', '2']]
leaf_evaluator: Function which will be called to evaluate leaf condition values.
Returns:
Boolean: Result of evaluating the conditions using the operator rules and the leaf evaluator.
None: if conditions couldn't be evaluated.
"""
if isinstance(conditions, list):
if conditions[0] in list(EVALUATORS_BY_OPERATOR_TYPE.keys()):
return EVALUATORS_BY_OPERATOR_TYPE[conditions[0]](conditions[1:], leaf_evaluator)
else:
# assume OR when operator is not explicit.
return EVALUATORS_BY_OPERATOR_TYPE[ConditionOperatorTypes.OR](conditions, leaf_evaluator)
leaf_condition = conditions
return leaf_evaluator(leaf_condition)
|
[
"def",
"evaluate",
"(",
"conditions",
",",
"leaf_evaluator",
")",
":",
"if",
"isinstance",
"(",
"conditions",
",",
"list",
")",
":",
"if",
"conditions",
"[",
"0",
"]",
"in",
"list",
"(",
"EVALUATORS_BY_OPERATOR_TYPE",
".",
"keys",
"(",
")",
")",
":",
"return",
"EVALUATORS_BY_OPERATOR_TYPE",
"[",
"conditions",
"[",
"0",
"]",
"]",
"(",
"conditions",
"[",
"1",
":",
"]",
",",
"leaf_evaluator",
")",
"else",
":",
"# assume OR when operator is not explicit.",
"return",
"EVALUATORS_BY_OPERATOR_TYPE",
"[",
"ConditionOperatorTypes",
".",
"OR",
"]",
"(",
"conditions",
",",
"leaf_evaluator",
")",
"leaf_condition",
"=",
"conditions",
"return",
"leaf_evaluator",
"(",
"leaf_condition",
")"
] |
Top level method to evaluate conditions.
Args:
conditions: Nested array of and/or conditions, or a single leaf condition value of any type.
Example: ['and', '0', ['or', '1', '2']]
leaf_evaluator: Function which will be called to evaluate leaf condition values.
Returns:
Boolean: Result of evaluating the conditions using the operator rules and the leaf evaluator.
None: if conditions couldn't be evaluated.
|
[
"Top",
"level",
"method",
"to",
"evaluate",
"conditions",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/condition_tree_evaluator.py#L97-L119
|
12,201
|
Parisson/TimeSide
|
timeside/core/analyzer.py
|
data_objet_class
|
def data_objet_class(data_mode='value', time_mode='framewise'):
"""
Factory function for Analyzer result
"""
classes_table = {('value', 'global'): GlobalValueObject,
('value', 'event'): EventValueObject,
('value', 'segment'): SegmentValueObject,
('value', 'framewise'): FrameValueObject,
('label', 'global'): GlobalLabelObject,
('label', 'event'): EventLabelObject,
('label', 'segment'): SegmentLabelObject,
('label', 'framewise'): FrameLabelObject}
try:
return classes_table[(data_mode, time_mode)]
except KeyError as e:
raise ValueError('Wrong arguments')
|
python
|
def data_objet_class(data_mode='value', time_mode='framewise'):
"""
Factory function for Analyzer result
"""
classes_table = {('value', 'global'): GlobalValueObject,
('value', 'event'): EventValueObject,
('value', 'segment'): SegmentValueObject,
('value', 'framewise'): FrameValueObject,
('label', 'global'): GlobalLabelObject,
('label', 'event'): EventLabelObject,
('label', 'segment'): SegmentLabelObject,
('label', 'framewise'): FrameLabelObject}
try:
return classes_table[(data_mode, time_mode)]
except KeyError as e:
raise ValueError('Wrong arguments')
|
[
"def",
"data_objet_class",
"(",
"data_mode",
"=",
"'value'",
",",
"time_mode",
"=",
"'framewise'",
")",
":",
"classes_table",
"=",
"{",
"(",
"'value'",
",",
"'global'",
")",
":",
"GlobalValueObject",
",",
"(",
"'value'",
",",
"'event'",
")",
":",
"EventValueObject",
",",
"(",
"'value'",
",",
"'segment'",
")",
":",
"SegmentValueObject",
",",
"(",
"'value'",
",",
"'framewise'",
")",
":",
"FrameValueObject",
",",
"(",
"'label'",
",",
"'global'",
")",
":",
"GlobalLabelObject",
",",
"(",
"'label'",
",",
"'event'",
")",
":",
"EventLabelObject",
",",
"(",
"'label'",
",",
"'segment'",
")",
":",
"SegmentLabelObject",
",",
"(",
"'label'",
",",
"'framewise'",
")",
":",
"FrameLabelObject",
"}",
"try",
":",
"return",
"classes_table",
"[",
"(",
"data_mode",
",",
"time_mode",
")",
"]",
"except",
"KeyError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"'Wrong arguments'",
")"
] |
Factory function for Analyzer result
|
[
"Factory",
"function",
"for",
"Analyzer",
"result"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/analyzer.py#L511-L527
|
12,202
|
Parisson/TimeSide
|
timeside/core/analyzer.py
|
JSON_NumpyArrayEncoder
|
def JSON_NumpyArrayEncoder(obj):
'''Define Specialize JSON encoder for numpy array'''
if isinstance(obj, np.ndarray):
return {'numpyArray': obj.tolist(),
'dtype': obj.dtype.__str__()}
elif isinstance(obj, np.generic):
return np.asscalar(obj)
else:
print type(obj)
raise TypeError(repr(obj) + " is not JSON serializable")
|
python
|
def JSON_NumpyArrayEncoder(obj):
'''Define Specialize JSON encoder for numpy array'''
if isinstance(obj, np.ndarray):
return {'numpyArray': obj.tolist(),
'dtype': obj.dtype.__str__()}
elif isinstance(obj, np.generic):
return np.asscalar(obj)
else:
print type(obj)
raise TypeError(repr(obj) + " is not JSON serializable")
|
[
"def",
"JSON_NumpyArrayEncoder",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"{",
"'numpyArray'",
":",
"obj",
".",
"tolist",
"(",
")",
",",
"'dtype'",
":",
"obj",
".",
"dtype",
".",
"__str__",
"(",
")",
"}",
"elif",
"isinstance",
"(",
"obj",
",",
"np",
".",
"generic",
")",
":",
"return",
"np",
".",
"asscalar",
"(",
"obj",
")",
"else",
":",
"print",
"type",
"(",
"obj",
")",
"raise",
"TypeError",
"(",
"repr",
"(",
"obj",
")",
"+",
"\" is not JSON serializable\"",
")"
] |
Define Specialize JSON encoder for numpy array
|
[
"Define",
"Specialize",
"JSON",
"encoder",
"for",
"numpy",
"array"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/analyzer.py#L1047-L1056
|
12,203
|
Parisson/TimeSide
|
timeside/core/analyzer.py
|
AnalyzerResult.render
|
def render(self):
'''Render a matplotlib figure from the analyzer result
Return the figure, use fig.show() to display if neeeded
'''
fig, ax = plt.subplots()
self.data_object._render_plot(ax)
return fig
|
python
|
def render(self):
'''Render a matplotlib figure from the analyzer result
Return the figure, use fig.show() to display if neeeded
'''
fig, ax = plt.subplots()
self.data_object._render_plot(ax)
return fig
|
[
"def",
"render",
"(",
"self",
")",
":",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
")",
"self",
".",
"data_object",
".",
"_render_plot",
"(",
"ax",
")",
"return",
"fig"
] |
Render a matplotlib figure from the analyzer result
Return the figure, use fig.show() to display if neeeded
|
[
"Render",
"a",
"matplotlib",
"figure",
"from",
"the",
"analyzer",
"result"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/analyzer.py#L670-L678
|
12,204
|
Parisson/TimeSide
|
timeside/core/analyzer.py
|
Analyzer.new_result
|
def new_result(self, data_mode='value', time_mode='framewise'):
'''
Create a new result
Attributes
----------
data_object : MetadataObject
id_metadata : MetadataObject
audio_metadata : MetadataObject
frame_metadata : MetadataObject
label_metadata : MetadataObject
parameters : dict
'''
from datetime import datetime
result = AnalyzerResult(data_mode=data_mode,
time_mode=time_mode)
# Automatically write known metadata
result.id_metadata.date = datetime.now().replace(
microsecond=0).isoformat(' ')
result.id_metadata.version = timeside.core.__version__
result.id_metadata.author = 'TimeSide'
result.id_metadata.id = self.id()
result.id_metadata.name = self.name()
result.id_metadata.description = self.description()
result.id_metadata.unit = self.unit()
result.id_metadata.proc_uuid = self.uuid()
result.audio_metadata.uri = self.mediainfo()['uri']
result.audio_metadata.sha1 = self.mediainfo()['sha1']
result.audio_metadata.start = self.mediainfo()['start']
result.audio_metadata.duration = self.mediainfo()['duration']
result.audio_metadata.is_segment = self.mediainfo()['is_segment']
result.audio_metadata.channels = self.channels()
result.parameters = Parameters(self.get_parameters())
if time_mode == 'framewise':
result.data_object.frame_metadata.samplerate = self.result_samplerate
result.data_object.frame_metadata.blocksize = self.result_blocksize
result.data_object.frame_metadata.stepsize = self.result_stepsize
return result
|
python
|
def new_result(self, data_mode='value', time_mode='framewise'):
'''
Create a new result
Attributes
----------
data_object : MetadataObject
id_metadata : MetadataObject
audio_metadata : MetadataObject
frame_metadata : MetadataObject
label_metadata : MetadataObject
parameters : dict
'''
from datetime import datetime
result = AnalyzerResult(data_mode=data_mode,
time_mode=time_mode)
# Automatically write known metadata
result.id_metadata.date = datetime.now().replace(
microsecond=0).isoformat(' ')
result.id_metadata.version = timeside.core.__version__
result.id_metadata.author = 'TimeSide'
result.id_metadata.id = self.id()
result.id_metadata.name = self.name()
result.id_metadata.description = self.description()
result.id_metadata.unit = self.unit()
result.id_metadata.proc_uuid = self.uuid()
result.audio_metadata.uri = self.mediainfo()['uri']
result.audio_metadata.sha1 = self.mediainfo()['sha1']
result.audio_metadata.start = self.mediainfo()['start']
result.audio_metadata.duration = self.mediainfo()['duration']
result.audio_metadata.is_segment = self.mediainfo()['is_segment']
result.audio_metadata.channels = self.channels()
result.parameters = Parameters(self.get_parameters())
if time_mode == 'framewise':
result.data_object.frame_metadata.samplerate = self.result_samplerate
result.data_object.frame_metadata.blocksize = self.result_blocksize
result.data_object.frame_metadata.stepsize = self.result_stepsize
return result
|
[
"def",
"new_result",
"(",
"self",
",",
"data_mode",
"=",
"'value'",
",",
"time_mode",
"=",
"'framewise'",
")",
":",
"from",
"datetime",
"import",
"datetime",
"result",
"=",
"AnalyzerResult",
"(",
"data_mode",
"=",
"data_mode",
",",
"time_mode",
"=",
"time_mode",
")",
"# Automatically write known metadata",
"result",
".",
"id_metadata",
".",
"date",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"replace",
"(",
"microsecond",
"=",
"0",
")",
".",
"isoformat",
"(",
"' '",
")",
"result",
".",
"id_metadata",
".",
"version",
"=",
"timeside",
".",
"core",
".",
"__version__",
"result",
".",
"id_metadata",
".",
"author",
"=",
"'TimeSide'",
"result",
".",
"id_metadata",
".",
"id",
"=",
"self",
".",
"id",
"(",
")",
"result",
".",
"id_metadata",
".",
"name",
"=",
"self",
".",
"name",
"(",
")",
"result",
".",
"id_metadata",
".",
"description",
"=",
"self",
".",
"description",
"(",
")",
"result",
".",
"id_metadata",
".",
"unit",
"=",
"self",
".",
"unit",
"(",
")",
"result",
".",
"id_metadata",
".",
"proc_uuid",
"=",
"self",
".",
"uuid",
"(",
")",
"result",
".",
"audio_metadata",
".",
"uri",
"=",
"self",
".",
"mediainfo",
"(",
")",
"[",
"'uri'",
"]",
"result",
".",
"audio_metadata",
".",
"sha1",
"=",
"self",
".",
"mediainfo",
"(",
")",
"[",
"'sha1'",
"]",
"result",
".",
"audio_metadata",
".",
"start",
"=",
"self",
".",
"mediainfo",
"(",
")",
"[",
"'start'",
"]",
"result",
".",
"audio_metadata",
".",
"duration",
"=",
"self",
".",
"mediainfo",
"(",
")",
"[",
"'duration'",
"]",
"result",
".",
"audio_metadata",
".",
"is_segment",
"=",
"self",
".",
"mediainfo",
"(",
")",
"[",
"'is_segment'",
"]",
"result",
".",
"audio_metadata",
".",
"channels",
"=",
"self",
".",
"channels",
"(",
")",
"result",
".",
"parameters",
"=",
"Parameters",
"(",
"self",
".",
"get_parameters",
"(",
")",
")",
"if",
"time_mode",
"==",
"'framewise'",
":",
"result",
".",
"data_object",
".",
"frame_metadata",
".",
"samplerate",
"=",
"self",
".",
"result_samplerate",
"result",
".",
"data_object",
".",
"frame_metadata",
".",
"blocksize",
"=",
"self",
".",
"result_blocksize",
"result",
".",
"data_object",
".",
"frame_metadata",
".",
"stepsize",
"=",
"self",
".",
"result_stepsize",
"return",
"result"
] |
Create a new result
Attributes
----------
data_object : MetadataObject
id_metadata : MetadataObject
audio_metadata : MetadataObject
frame_metadata : MetadataObject
label_metadata : MetadataObject
parameters : dict
|
[
"Create",
"a",
"new",
"result"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/analyzer.py#L1279-L1324
|
12,205
|
Parisson/TimeSide
|
timeside/core/preprocessors.py
|
downmix_to_mono
|
def downmix_to_mono(process_func):
'''
Pre-processing decorator that downmixes frames from multi-channel to mono
Downmix is achieved by averaging all channels
>>> from timeside.core.preprocessors import downmix_to_mono
>>> @downmix_to_mono
... def process(analyzer,frames,eod):
... print 'Frames, eod inside process :'
... print frames, eod
... return frames, eod
...
>>> import numpy as np
>>> frames = np.asarray([[1,2],[3,4],[5,6],[7,8],[9,10]])
>>> eod = False
>>> frames_, eod_ = process(object(),frames,eod)
Frames, eod inside process :
[1.5 3.5 5.5 7.5 9.5] False
Outside Process frames and eod are preserved :
>>> frames_
array([[ 1, 2],
[ 3, 4],
[ 5, 6],
[ 7, 8],
[ 9, 10]])
>>> eod_
False
'''
import functools
@functools.wraps(process_func)
def wrapper(analyzer, frames, eod):
# Pre-processing
if frames.ndim > 1:
downmix_frames = frames.mean(axis=-1)
else:
downmix_frames = frames
# Processing
process_func(analyzer, downmix_frames, eod)
return frames, eod
return wrapper
|
python
|
def downmix_to_mono(process_func):
'''
Pre-processing decorator that downmixes frames from multi-channel to mono
Downmix is achieved by averaging all channels
>>> from timeside.core.preprocessors import downmix_to_mono
>>> @downmix_to_mono
... def process(analyzer,frames,eod):
... print 'Frames, eod inside process :'
... print frames, eod
... return frames, eod
...
>>> import numpy as np
>>> frames = np.asarray([[1,2],[3,4],[5,6],[7,8],[9,10]])
>>> eod = False
>>> frames_, eod_ = process(object(),frames,eod)
Frames, eod inside process :
[1.5 3.5 5.5 7.5 9.5] False
Outside Process frames and eod are preserved :
>>> frames_
array([[ 1, 2],
[ 3, 4],
[ 5, 6],
[ 7, 8],
[ 9, 10]])
>>> eod_
False
'''
import functools
@functools.wraps(process_func)
def wrapper(analyzer, frames, eod):
# Pre-processing
if frames.ndim > 1:
downmix_frames = frames.mean(axis=-1)
else:
downmix_frames = frames
# Processing
process_func(analyzer, downmix_frames, eod)
return frames, eod
return wrapper
|
[
"def",
"downmix_to_mono",
"(",
"process_func",
")",
":",
"import",
"functools",
"@",
"functools",
".",
"wraps",
"(",
"process_func",
")",
"def",
"wrapper",
"(",
"analyzer",
",",
"frames",
",",
"eod",
")",
":",
"# Pre-processing",
"if",
"frames",
".",
"ndim",
">",
"1",
":",
"downmix_frames",
"=",
"frames",
".",
"mean",
"(",
"axis",
"=",
"-",
"1",
")",
"else",
":",
"downmix_frames",
"=",
"frames",
"# Processing",
"process_func",
"(",
"analyzer",
",",
"downmix_frames",
",",
"eod",
")",
"return",
"frames",
",",
"eod",
"return",
"wrapper"
] |
Pre-processing decorator that downmixes frames from multi-channel to mono
Downmix is achieved by averaging all channels
>>> from timeside.core.preprocessors import downmix_to_mono
>>> @downmix_to_mono
... def process(analyzer,frames,eod):
... print 'Frames, eod inside process :'
... print frames, eod
... return frames, eod
...
>>> import numpy as np
>>> frames = np.asarray([[1,2],[3,4],[5,6],[7,8],[9,10]])
>>> eod = False
>>> frames_, eod_ = process(object(),frames,eod)
Frames, eod inside process :
[1.5 3.5 5.5 7.5 9.5] False
Outside Process frames and eod are preserved :
>>> frames_
array([[ 1, 2],
[ 3, 4],
[ 5, 6],
[ 7, 8],
[ 9, 10]])
>>> eod_
False
|
[
"Pre",
"-",
"processing",
"decorator",
"that",
"downmixes",
"frames",
"from",
"multi",
"-",
"channel",
"to",
"mono"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/preprocessors.py#L32-L77
|
12,206
|
Parisson/TimeSide
|
timeside/core/preprocessors.py
|
frames_adapter
|
def frames_adapter(process_func):
'''
Pre-processing decorator that adapt frames to match input_blocksize and
input_stepsize of the decorated analyzer
>>> from timeside.core.preprocessors import frames_adapter
>>> @frames_adapter
... def process(analyzer,frames,eod):
... analyzer.frames.append(frames)
... return frames, eod
>>> class Fake_Analyzer(object):
... def __init__(self):
... self.input_blocksize = 4
... self.input_stepsize = 3
... self.frames = [] # Container for the frame as viewed by process
... @staticmethod
... def id():
... return 'fake_analyzer'
>>> import numpy as np
>>> analyzer = Fake_Analyzer()
>>> frames = np.asarray(range(0,12))
>>> eod = False
>>> frames_, eod_ = process(analyzer,frames,eod)
Inside the process the frames have been adapted to match input_blocksize
and input_stepsize
>>> analyzer.frames
[array([0, 1, 2, 3]), array([3, 4, 5, 6]), array([6, 7, 8, 9])]
Outside the process, the original frames and eod are preserved:
>>> frames_
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
>>> eod_
False
Releasing the process with eod=True will zeropad the last frame if necessary
>>> frames = np.asarray(range(12,14))
>>> eod = True
>>> frames_, eod_ = process(analyzer,frames,eod)
>>> analyzer.frames
[array([0, 1, 2, 3]), array([3, 4, 5, 6]), array([6, 7, 8, 9]), array([ 9, 10, 11, 12]), array([12, 13, 0, 0])]
'''
import functools
import numpy as np
class framesBuffer(object):
def __init__(self, blocksize, stepsize):
self.blocksize = blocksize
self.stepsize = stepsize
self.buffer = None
def frames(self, frames, eod):
if self.buffer is not None:
stack = np.concatenate([self.buffer, frames])
else:
stack = frames.copy()
stack_length = len(stack)
nb_frames = (
stack_length - self.blocksize + self.stepsize) // self.stepsize
nb_frames = max(nb_frames, 0)
frames_length = nb_frames * self.stepsize + \
self.blocksize - self.stepsize
last_block_size = stack_length - frames_length
if eod:
# Final zeropadding
pad_shape = tuple(
self.blocksize - last_block_size if i == 0 else x
for i, x in enumerate(frames.shape))
stack = np.concatenate([stack, np.zeros(pad_shape,
dtype=frames.dtype)])
nb_frames += 1
self.buffer = stack[nb_frames * self.stepsize:]
eod_list = np.repeat(False, nb_frames)
if eod and len(eod_list):
eod_list[-1] = eod
for index, eod in zip(xrange(0, nb_frames * self.stepsize, self.stepsize), eod_list):
yield (stack[index:index + self.blocksize], eod)
aubio_analyzers = ['aubio_melenergy', 'aubio_mfcc', 'aubio_pitch', 'aubio_specdesc', 'aubio_temporal']
@functools.wraps(process_func)
def wrapper(analyzer, frames, eod):
# Pre-processing
if not hasattr(analyzer, 'frames_buffer'):
if analyzer.id() in aubio_analyzers:
# Aubio analyzers are waiting for stepsize length block
# and reconstructs blocksize length frames itself
# thus frames_adapter has to provide Aubio Pitch blocksize=stepsize length frames
analyzer.frames_buffer = framesBuffer(analyzer.input_stepsize,
analyzer.input_stepsize)
else:
analyzer.frames_buffer = framesBuffer(analyzer.input_blocksize,
analyzer.input_stepsize)
# Processing
for adapted_frames, adapted_eod in analyzer.frames_buffer.frames(frames, eod):
process_func(analyzer, adapted_frames, adapted_eod)
return frames, eod
return wrapper
|
python
|
def frames_adapter(process_func):
'''
Pre-processing decorator that adapt frames to match input_blocksize and
input_stepsize of the decorated analyzer
>>> from timeside.core.preprocessors import frames_adapter
>>> @frames_adapter
... def process(analyzer,frames,eod):
... analyzer.frames.append(frames)
... return frames, eod
>>> class Fake_Analyzer(object):
... def __init__(self):
... self.input_blocksize = 4
... self.input_stepsize = 3
... self.frames = [] # Container for the frame as viewed by process
... @staticmethod
... def id():
... return 'fake_analyzer'
>>> import numpy as np
>>> analyzer = Fake_Analyzer()
>>> frames = np.asarray(range(0,12))
>>> eod = False
>>> frames_, eod_ = process(analyzer,frames,eod)
Inside the process the frames have been adapted to match input_blocksize
and input_stepsize
>>> analyzer.frames
[array([0, 1, 2, 3]), array([3, 4, 5, 6]), array([6, 7, 8, 9])]
Outside the process, the original frames and eod are preserved:
>>> frames_
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
>>> eod_
False
Releasing the process with eod=True will zeropad the last frame if necessary
>>> frames = np.asarray(range(12,14))
>>> eod = True
>>> frames_, eod_ = process(analyzer,frames,eod)
>>> analyzer.frames
[array([0, 1, 2, 3]), array([3, 4, 5, 6]), array([6, 7, 8, 9]), array([ 9, 10, 11, 12]), array([12, 13, 0, 0])]
'''
import functools
import numpy as np
class framesBuffer(object):
def __init__(self, blocksize, stepsize):
self.blocksize = blocksize
self.stepsize = stepsize
self.buffer = None
def frames(self, frames, eod):
if self.buffer is not None:
stack = np.concatenate([self.buffer, frames])
else:
stack = frames.copy()
stack_length = len(stack)
nb_frames = (
stack_length - self.blocksize + self.stepsize) // self.stepsize
nb_frames = max(nb_frames, 0)
frames_length = nb_frames * self.stepsize + \
self.blocksize - self.stepsize
last_block_size = stack_length - frames_length
if eod:
# Final zeropadding
pad_shape = tuple(
self.blocksize - last_block_size if i == 0 else x
for i, x in enumerate(frames.shape))
stack = np.concatenate([stack, np.zeros(pad_shape,
dtype=frames.dtype)])
nb_frames += 1
self.buffer = stack[nb_frames * self.stepsize:]
eod_list = np.repeat(False, nb_frames)
if eod and len(eod_list):
eod_list[-1] = eod
for index, eod in zip(xrange(0, nb_frames * self.stepsize, self.stepsize), eod_list):
yield (stack[index:index + self.blocksize], eod)
aubio_analyzers = ['aubio_melenergy', 'aubio_mfcc', 'aubio_pitch', 'aubio_specdesc', 'aubio_temporal']
@functools.wraps(process_func)
def wrapper(analyzer, frames, eod):
# Pre-processing
if not hasattr(analyzer, 'frames_buffer'):
if analyzer.id() in aubio_analyzers:
# Aubio analyzers are waiting for stepsize length block
# and reconstructs blocksize length frames itself
# thus frames_adapter has to provide Aubio Pitch blocksize=stepsize length frames
analyzer.frames_buffer = framesBuffer(analyzer.input_stepsize,
analyzer.input_stepsize)
else:
analyzer.frames_buffer = framesBuffer(analyzer.input_blocksize,
analyzer.input_stepsize)
# Processing
for adapted_frames, adapted_eod in analyzer.frames_buffer.frames(frames, eod):
process_func(analyzer, adapted_frames, adapted_eod)
return frames, eod
return wrapper
|
[
"def",
"frames_adapter",
"(",
"process_func",
")",
":",
"import",
"functools",
"import",
"numpy",
"as",
"np",
"class",
"framesBuffer",
"(",
"object",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"blocksize",
",",
"stepsize",
")",
":",
"self",
".",
"blocksize",
"=",
"blocksize",
"self",
".",
"stepsize",
"=",
"stepsize",
"self",
".",
"buffer",
"=",
"None",
"def",
"frames",
"(",
"self",
",",
"frames",
",",
"eod",
")",
":",
"if",
"self",
".",
"buffer",
"is",
"not",
"None",
":",
"stack",
"=",
"np",
".",
"concatenate",
"(",
"[",
"self",
".",
"buffer",
",",
"frames",
"]",
")",
"else",
":",
"stack",
"=",
"frames",
".",
"copy",
"(",
")",
"stack_length",
"=",
"len",
"(",
"stack",
")",
"nb_frames",
"=",
"(",
"stack_length",
"-",
"self",
".",
"blocksize",
"+",
"self",
".",
"stepsize",
")",
"//",
"self",
".",
"stepsize",
"nb_frames",
"=",
"max",
"(",
"nb_frames",
",",
"0",
")",
"frames_length",
"=",
"nb_frames",
"*",
"self",
".",
"stepsize",
"+",
"self",
".",
"blocksize",
"-",
"self",
".",
"stepsize",
"last_block_size",
"=",
"stack_length",
"-",
"frames_length",
"if",
"eod",
":",
"# Final zeropadding",
"pad_shape",
"=",
"tuple",
"(",
"self",
".",
"blocksize",
"-",
"last_block_size",
"if",
"i",
"==",
"0",
"else",
"x",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"frames",
".",
"shape",
")",
")",
"stack",
"=",
"np",
".",
"concatenate",
"(",
"[",
"stack",
",",
"np",
".",
"zeros",
"(",
"pad_shape",
",",
"dtype",
"=",
"frames",
".",
"dtype",
")",
"]",
")",
"nb_frames",
"+=",
"1",
"self",
".",
"buffer",
"=",
"stack",
"[",
"nb_frames",
"*",
"self",
".",
"stepsize",
":",
"]",
"eod_list",
"=",
"np",
".",
"repeat",
"(",
"False",
",",
"nb_frames",
")",
"if",
"eod",
"and",
"len",
"(",
"eod_list",
")",
":",
"eod_list",
"[",
"-",
"1",
"]",
"=",
"eod",
"for",
"index",
",",
"eod",
"in",
"zip",
"(",
"xrange",
"(",
"0",
",",
"nb_frames",
"*",
"self",
".",
"stepsize",
",",
"self",
".",
"stepsize",
")",
",",
"eod_list",
")",
":",
"yield",
"(",
"stack",
"[",
"index",
":",
"index",
"+",
"self",
".",
"blocksize",
"]",
",",
"eod",
")",
"aubio_analyzers",
"=",
"[",
"'aubio_melenergy'",
",",
"'aubio_mfcc'",
",",
"'aubio_pitch'",
",",
"'aubio_specdesc'",
",",
"'aubio_temporal'",
"]",
"@",
"functools",
".",
"wraps",
"(",
"process_func",
")",
"def",
"wrapper",
"(",
"analyzer",
",",
"frames",
",",
"eod",
")",
":",
"# Pre-processing",
"if",
"not",
"hasattr",
"(",
"analyzer",
",",
"'frames_buffer'",
")",
":",
"if",
"analyzer",
".",
"id",
"(",
")",
"in",
"aubio_analyzers",
":",
"# Aubio analyzers are waiting for stepsize length block",
"# and reconstructs blocksize length frames itself",
"# thus frames_adapter has to provide Aubio Pitch blocksize=stepsize length frames",
"analyzer",
".",
"frames_buffer",
"=",
"framesBuffer",
"(",
"analyzer",
".",
"input_stepsize",
",",
"analyzer",
".",
"input_stepsize",
")",
"else",
":",
"analyzer",
".",
"frames_buffer",
"=",
"framesBuffer",
"(",
"analyzer",
".",
"input_blocksize",
",",
"analyzer",
".",
"input_stepsize",
")",
"# Processing",
"for",
"adapted_frames",
",",
"adapted_eod",
"in",
"analyzer",
".",
"frames_buffer",
".",
"frames",
"(",
"frames",
",",
"eod",
")",
":",
"process_func",
"(",
"analyzer",
",",
"adapted_frames",
",",
"adapted_eod",
")",
"return",
"frames",
",",
"eod",
"return",
"wrapper"
] |
Pre-processing decorator that adapt frames to match input_blocksize and
input_stepsize of the decorated analyzer
>>> from timeside.core.preprocessors import frames_adapter
>>> @frames_adapter
... def process(analyzer,frames,eod):
... analyzer.frames.append(frames)
... return frames, eod
>>> class Fake_Analyzer(object):
... def __init__(self):
... self.input_blocksize = 4
... self.input_stepsize = 3
... self.frames = [] # Container for the frame as viewed by process
... @staticmethod
... def id():
... return 'fake_analyzer'
>>> import numpy as np
>>> analyzer = Fake_Analyzer()
>>> frames = np.asarray(range(0,12))
>>> eod = False
>>> frames_, eod_ = process(analyzer,frames,eod)
Inside the process the frames have been adapted to match input_blocksize
and input_stepsize
>>> analyzer.frames
[array([0, 1, 2, 3]), array([3, 4, 5, 6]), array([6, 7, 8, 9])]
Outside the process, the original frames and eod are preserved:
>>> frames_
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
>>> eod_
False
Releasing the process with eod=True will zeropad the last frame if necessary
>>> frames = np.asarray(range(12,14))
>>> eod = True
>>> frames_, eod_ = process(analyzer,frames,eod)
>>> analyzer.frames
[array([0, 1, 2, 3]), array([3, 4, 5, 6]), array([6, 7, 8, 9]), array([ 9, 10, 11, 12]), array([12, 13, 0, 0])]
|
[
"Pre",
"-",
"processing",
"decorator",
"that",
"adapt",
"frames",
"to",
"match",
"input_blocksize",
"and",
"input_stepsize",
"of",
"the",
"decorated",
"analyzer"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/preprocessors.py#L80-L190
|
12,207
|
Parisson/TimeSide
|
timeside/server/models.py
|
Item.get_uri
|
def get_uri(self):
"""Return the Item source"""
if self.source_file and os.path.exists(self.source_file.path):
return self.source_file.path
elif self.source_url:
return self.source_url
return None
|
python
|
def get_uri(self):
"""Return the Item source"""
if self.source_file and os.path.exists(self.source_file.path):
return self.source_file.path
elif self.source_url:
return self.source_url
return None
|
[
"def",
"get_uri",
"(",
"self",
")",
":",
"if",
"self",
".",
"source_file",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"source_file",
".",
"path",
")",
":",
"return",
"self",
".",
"source_file",
".",
"path",
"elif",
"self",
".",
"source_url",
":",
"return",
"self",
".",
"source_url",
"return",
"None"
] |
Return the Item source
|
[
"Return",
"the",
"Item",
"source"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/server/models.py#L184-L190
|
12,208
|
Parisson/TimeSide
|
timeside/server/models.py
|
Item.get_audio_duration
|
def get_audio_duration(self):
"""
Return item audio duration
"""
decoder = timeside.core.get_processor('file_decoder')(
uri=self.get_uri())
return decoder.uri_total_duration
|
python
|
def get_audio_duration(self):
"""
Return item audio duration
"""
decoder = timeside.core.get_processor('file_decoder')(
uri=self.get_uri())
return decoder.uri_total_duration
|
[
"def",
"get_audio_duration",
"(",
"self",
")",
":",
"decoder",
"=",
"timeside",
".",
"core",
".",
"get_processor",
"(",
"'file_decoder'",
")",
"(",
"uri",
"=",
"self",
".",
"get_uri",
"(",
")",
")",
"return",
"decoder",
".",
"uri_total_duration"
] |
Return item audio duration
|
[
"Return",
"item",
"audio",
"duration"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/server/models.py#L192-L198
|
12,209
|
Parisson/TimeSide
|
timeside/server/models.py
|
Item.get_results_path
|
def get_results_path(self):
"""
Return Item result path
"""
result_path = os.path.join(RESULTS_ROOT, self.uuid)
if not os.path.exists(result_path):
os.makedirs(result_path)
return result_path
|
python
|
def get_results_path(self):
"""
Return Item result path
"""
result_path = os.path.join(RESULTS_ROOT, self.uuid)
if not os.path.exists(result_path):
os.makedirs(result_path)
return result_path
|
[
"def",
"get_results_path",
"(",
"self",
")",
":",
"result_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"RESULTS_ROOT",
",",
"self",
".",
"uuid",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"result_path",
")",
":",
"os",
".",
"makedirs",
"(",
"result_path",
")",
"return",
"result_path"
] |
Return Item result path
|
[
"Return",
"Item",
"result",
"path"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/server/models.py#L200-L207
|
12,210
|
Parisson/TimeSide
|
timeside/plugins/decoder/utils.py
|
get_uri
|
def get_uri(source):
"""
Check a media source as a valid file or uri and return the proper uri
"""
import gst
src_info = source_info(source)
if src_info['is_file']: # Is this a file?
return get_uri(src_info['uri'])
elif gst.uri_is_valid(source): # Is this a valid URI source for Gstreamer
uri_protocol = gst.uri_get_protocol(source)
if gst.uri_protocol_is_supported(gst.URI_SRC, uri_protocol):
return source
else:
raise IOError('Invalid URI source for Gstreamer')
else:
raise IOError('Failed getting uri for path %s: no such file' % source)
|
python
|
def get_uri(source):
"""
Check a media source as a valid file or uri and return the proper uri
"""
import gst
src_info = source_info(source)
if src_info['is_file']: # Is this a file?
return get_uri(src_info['uri'])
elif gst.uri_is_valid(source): # Is this a valid URI source for Gstreamer
uri_protocol = gst.uri_get_protocol(source)
if gst.uri_protocol_is_supported(gst.URI_SRC, uri_protocol):
return source
else:
raise IOError('Invalid URI source for Gstreamer')
else:
raise IOError('Failed getting uri for path %s: no such file' % source)
|
[
"def",
"get_uri",
"(",
"source",
")",
":",
"import",
"gst",
"src_info",
"=",
"source_info",
"(",
"source",
")",
"if",
"src_info",
"[",
"'is_file'",
"]",
":",
"# Is this a file?",
"return",
"get_uri",
"(",
"src_info",
"[",
"'uri'",
"]",
")",
"elif",
"gst",
".",
"uri_is_valid",
"(",
"source",
")",
":",
"# Is this a valid URI source for Gstreamer",
"uri_protocol",
"=",
"gst",
".",
"uri_get_protocol",
"(",
"source",
")",
"if",
"gst",
".",
"uri_protocol_is_supported",
"(",
"gst",
".",
"URI_SRC",
",",
"uri_protocol",
")",
":",
"return",
"source",
"else",
":",
"raise",
"IOError",
"(",
"'Invalid URI source for Gstreamer'",
")",
"else",
":",
"raise",
"IOError",
"(",
"'Failed getting uri for path %s: no such file'",
"%",
"source",
")"
] |
Check a media source as a valid file or uri and return the proper uri
|
[
"Check",
"a",
"media",
"source",
"as",
"a",
"valid",
"file",
"or",
"uri",
"and",
"return",
"the",
"proper",
"uri"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/plugins/decoder/utils.py#L100-L119
|
12,211
|
Parisson/TimeSide
|
timeside/plugins/decoder/utils.py
|
sha1sum_file
|
def sha1sum_file(filename):
'''
Return the secure hash digest with sha1 algorithm for a given file
>>> from timeside.core.tools.test_samples import samples
>>> wav_file = samples["C4_scale.wav"]
>>> print sha1sum_file(wav_file)
a598e78d0b5c90da54a77e34c083abdcd38d42ba
'''
import hashlib
import io
sha1 = hashlib.sha1()
chunk_size = sha1.block_size * io.DEFAULT_BUFFER_SIZE
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
sha1.update(chunk)
return sha1.hexdigest()
|
python
|
def sha1sum_file(filename):
'''
Return the secure hash digest with sha1 algorithm for a given file
>>> from timeside.core.tools.test_samples import samples
>>> wav_file = samples["C4_scale.wav"]
>>> print sha1sum_file(wav_file)
a598e78d0b5c90da54a77e34c083abdcd38d42ba
'''
import hashlib
import io
sha1 = hashlib.sha1()
chunk_size = sha1.block_size * io.DEFAULT_BUFFER_SIZE
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
sha1.update(chunk)
return sha1.hexdigest()
|
[
"def",
"sha1sum_file",
"(",
"filename",
")",
":",
"import",
"hashlib",
"import",
"io",
"sha1",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"chunk_size",
"=",
"sha1",
".",
"block_size",
"*",
"io",
".",
"DEFAULT_BUFFER_SIZE",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"for",
"chunk",
"in",
"iter",
"(",
"lambda",
":",
"f",
".",
"read",
"(",
"chunk_size",
")",
",",
"b''",
")",
":",
"sha1",
".",
"update",
"(",
"chunk",
")",
"return",
"sha1",
".",
"hexdigest",
"(",
")"
] |
Return the secure hash digest with sha1 algorithm for a given file
>>> from timeside.core.tools.test_samples import samples
>>> wav_file = samples["C4_scale.wav"]
>>> print sha1sum_file(wav_file)
a598e78d0b5c90da54a77e34c083abdcd38d42ba
|
[
"Return",
"the",
"secure",
"hash",
"digest",
"with",
"sha1",
"algorithm",
"for",
"a",
"given",
"file"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/plugins/decoder/utils.py#L180-L198
|
12,212
|
Parisson/TimeSide
|
timeside/plugins/decoder/utils.py
|
sha1sum_url
|
def sha1sum_url(url):
'''Return the secure hash digest with sha1 algorithm for a given url
>>> url = "https://github.com/yomguy/timeside-samples/raw/master/samples/guitar.wav"
>>> print sha1sum_url(url)
08301c3f9a8d60926f31e253825cc74263e52ad1
'''
import hashlib
import urllib
from contextlib import closing
sha1 = hashlib.sha1()
chunk_size = sha1.block_size * 8192
max_file_size = 10 * 1024 * 1024 # 10Mo limit in case of very large file
total_read = 0
with closing(urllib.urlopen(url)) as url_obj:
for chunk in iter(lambda: url_obj.read(chunk_size), b''):
sha1.update(chunk)
total_read += chunk_size
if total_read > max_file_size:
break
return sha1.hexdigest()
|
python
|
def sha1sum_url(url):
'''Return the secure hash digest with sha1 algorithm for a given url
>>> url = "https://github.com/yomguy/timeside-samples/raw/master/samples/guitar.wav"
>>> print sha1sum_url(url)
08301c3f9a8d60926f31e253825cc74263e52ad1
'''
import hashlib
import urllib
from contextlib import closing
sha1 = hashlib.sha1()
chunk_size = sha1.block_size * 8192
max_file_size = 10 * 1024 * 1024 # 10Mo limit in case of very large file
total_read = 0
with closing(urllib.urlopen(url)) as url_obj:
for chunk in iter(lambda: url_obj.read(chunk_size), b''):
sha1.update(chunk)
total_read += chunk_size
if total_read > max_file_size:
break
return sha1.hexdigest()
|
[
"def",
"sha1sum_url",
"(",
"url",
")",
":",
"import",
"hashlib",
"import",
"urllib",
"from",
"contextlib",
"import",
"closing",
"sha1",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"chunk_size",
"=",
"sha1",
".",
"block_size",
"*",
"8192",
"max_file_size",
"=",
"10",
"*",
"1024",
"*",
"1024",
"# 10Mo limit in case of very large file",
"total_read",
"=",
"0",
"with",
"closing",
"(",
"urllib",
".",
"urlopen",
"(",
"url",
")",
")",
"as",
"url_obj",
":",
"for",
"chunk",
"in",
"iter",
"(",
"lambda",
":",
"url_obj",
".",
"read",
"(",
"chunk_size",
")",
",",
"b''",
")",
":",
"sha1",
".",
"update",
"(",
"chunk",
")",
"total_read",
"+=",
"chunk_size",
"if",
"total_read",
">",
"max_file_size",
":",
"break",
"return",
"sha1",
".",
"hexdigest",
"(",
")"
] |
Return the secure hash digest with sha1 algorithm for a given url
>>> url = "https://github.com/yomguy/timeside-samples/raw/master/samples/guitar.wav"
>>> print sha1sum_url(url)
08301c3f9a8d60926f31e253825cc74263e52ad1
|
[
"Return",
"the",
"secure",
"hash",
"digest",
"with",
"sha1",
"algorithm",
"for",
"a",
"given",
"url"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/plugins/decoder/utils.py#L201-L226
|
12,213
|
Parisson/TimeSide
|
timeside/plugins/decoder/utils.py
|
sha1sum_numpy
|
def sha1sum_numpy(np_array):
'''
Return the secure hash digest with sha1 algorithm for a numpy array
'''
import hashlib
return hashlib.sha1(np_array.view(np.uint8)).hexdigest()
|
python
|
def sha1sum_numpy(np_array):
'''
Return the secure hash digest with sha1 algorithm for a numpy array
'''
import hashlib
return hashlib.sha1(np_array.view(np.uint8)).hexdigest()
|
[
"def",
"sha1sum_numpy",
"(",
"np_array",
")",
":",
"import",
"hashlib",
"return",
"hashlib",
".",
"sha1",
"(",
"np_array",
".",
"view",
"(",
"np",
".",
"uint8",
")",
")",
".",
"hexdigest",
"(",
")"
] |
Return the secure hash digest with sha1 algorithm for a numpy array
|
[
"Return",
"the",
"secure",
"hash",
"digest",
"with",
"sha1",
"algorithm",
"for",
"a",
"numpy",
"array"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/plugins/decoder/utils.py#L229-L234
|
12,214
|
Parisson/TimeSide
|
timeside/core/tools/package.py
|
import_module_with_exceptions
|
def import_module_with_exceptions(name, package=None):
"""Wrapper around importlib.import_module to import TimeSide subpackage
and ignoring ImportError if Aubio, Yaafe and Vamp Host are not available"""
from timeside.core import _WITH_AUBIO, _WITH_YAAFE, _WITH_VAMP
if name.count('.server.'):
# TODO:
# Temporary skip all timeside.server submodules before check dependencies
return
try:
import_module(name, package)
except VampImportError:
# No Vamp Host
if _WITH_VAMP:
raise VampImportError
else:
# Ignore Vamp ImportError
return
except ImportError as e:
if str(e).count('yaafelib') and not _WITH_YAAFE:
# Ignore Yaafe ImportError
return
elif str(e).count('aubio') and not _WITH_AUBIO:
# Ignore Aubio ImportError
return
elif str(e).count('DJANGO_SETTINGS_MODULE'):
# Ignore module requiring DJANGO_SETTINGS_MODULE in environnement
return
else:
print (name, package)
raise e
return name
|
python
|
def import_module_with_exceptions(name, package=None):
"""Wrapper around importlib.import_module to import TimeSide subpackage
and ignoring ImportError if Aubio, Yaafe and Vamp Host are not available"""
from timeside.core import _WITH_AUBIO, _WITH_YAAFE, _WITH_VAMP
if name.count('.server.'):
# TODO:
# Temporary skip all timeside.server submodules before check dependencies
return
try:
import_module(name, package)
except VampImportError:
# No Vamp Host
if _WITH_VAMP:
raise VampImportError
else:
# Ignore Vamp ImportError
return
except ImportError as e:
if str(e).count('yaafelib') and not _WITH_YAAFE:
# Ignore Yaafe ImportError
return
elif str(e).count('aubio') and not _WITH_AUBIO:
# Ignore Aubio ImportError
return
elif str(e).count('DJANGO_SETTINGS_MODULE'):
# Ignore module requiring DJANGO_SETTINGS_MODULE in environnement
return
else:
print (name, package)
raise e
return name
|
[
"def",
"import_module_with_exceptions",
"(",
"name",
",",
"package",
"=",
"None",
")",
":",
"from",
"timeside",
".",
"core",
"import",
"_WITH_AUBIO",
",",
"_WITH_YAAFE",
",",
"_WITH_VAMP",
"if",
"name",
".",
"count",
"(",
"'.server.'",
")",
":",
"# TODO:",
"# Temporary skip all timeside.server submodules before check dependencies",
"return",
"try",
":",
"import_module",
"(",
"name",
",",
"package",
")",
"except",
"VampImportError",
":",
"# No Vamp Host",
"if",
"_WITH_VAMP",
":",
"raise",
"VampImportError",
"else",
":",
"# Ignore Vamp ImportError",
"return",
"except",
"ImportError",
"as",
"e",
":",
"if",
"str",
"(",
"e",
")",
".",
"count",
"(",
"'yaafelib'",
")",
"and",
"not",
"_WITH_YAAFE",
":",
"# Ignore Yaafe ImportError",
"return",
"elif",
"str",
"(",
"e",
")",
".",
"count",
"(",
"'aubio'",
")",
"and",
"not",
"_WITH_AUBIO",
":",
"# Ignore Aubio ImportError",
"return",
"elif",
"str",
"(",
"e",
")",
".",
"count",
"(",
"'DJANGO_SETTINGS_MODULE'",
")",
":",
"# Ignore module requiring DJANGO_SETTINGS_MODULE in environnement",
"return",
"else",
":",
"print",
"(",
"name",
",",
"package",
")",
"raise",
"e",
"return",
"name"
] |
Wrapper around importlib.import_module to import TimeSide subpackage
and ignoring ImportError if Aubio, Yaafe and Vamp Host are not available
|
[
"Wrapper",
"around",
"importlib",
".",
"import_module",
"to",
"import",
"TimeSide",
"subpackage",
"and",
"ignoring",
"ImportError",
"if",
"Aubio",
"Yaafe",
"and",
"Vamp",
"Host",
"are",
"not",
"available"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/tools/package.py#L50-L82
|
12,215
|
Parisson/TimeSide
|
timeside/core/tools/package.py
|
check_vamp
|
def check_vamp():
"Check Vamp host availability"
try:
from timeside.plugins.analyzer.externals import vamp_plugin
except VampImportError:
warnings.warn('Vamp host is not available', ImportWarning,
stacklevel=2)
_WITH_VAMP = False
else:
_WITH_VAMP = True
del vamp_plugin
return _WITH_VAMP
|
python
|
def check_vamp():
"Check Vamp host availability"
try:
from timeside.plugins.analyzer.externals import vamp_plugin
except VampImportError:
warnings.warn('Vamp host is not available', ImportWarning,
stacklevel=2)
_WITH_VAMP = False
else:
_WITH_VAMP = True
del vamp_plugin
return _WITH_VAMP
|
[
"def",
"check_vamp",
"(",
")",
":",
"try",
":",
"from",
"timeside",
".",
"plugins",
".",
"analyzer",
".",
"externals",
"import",
"vamp_plugin",
"except",
"VampImportError",
":",
"warnings",
".",
"warn",
"(",
"'Vamp host is not available'",
",",
"ImportWarning",
",",
"stacklevel",
"=",
"2",
")",
"_WITH_VAMP",
"=",
"False",
"else",
":",
"_WITH_VAMP",
"=",
"True",
"del",
"vamp_plugin",
"return",
"_WITH_VAMP"
] |
Check Vamp host availability
|
[
"Check",
"Vamp",
"host",
"availability"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/tools/package.py#L115-L128
|
12,216
|
Parisson/TimeSide
|
timeside/plugins/grapher/utils.py
|
im_watermark
|
def im_watermark(im, inputtext, font=None, color=None, opacity=.6, margin=(30, 30)):
"""imprints a PIL image with the indicated text in lower-right corner"""
if im.mode != "RGBA":
im = im.convert("RGBA")
textlayer = Image.new("RGBA", im.size, (0, 0, 0, 0))
textdraw = ImageDraw.Draw(textlayer)
textsize = textdraw.textsize(inputtext, font=font)
textpos = [im.size[i] - textsize[i] - margin[i] for i in [0, 1]]
textdraw.text(textpos, inputtext, font=font, fill=color)
if opacity != 1:
textlayer = reduce_opacity(textlayer, opacity)
return Image.composite(textlayer, im, textlayer)
|
python
|
def im_watermark(im, inputtext, font=None, color=None, opacity=.6, margin=(30, 30)):
"""imprints a PIL image with the indicated text in lower-right corner"""
if im.mode != "RGBA":
im = im.convert("RGBA")
textlayer = Image.new("RGBA", im.size, (0, 0, 0, 0))
textdraw = ImageDraw.Draw(textlayer)
textsize = textdraw.textsize(inputtext, font=font)
textpos = [im.size[i] - textsize[i] - margin[i] for i in [0, 1]]
textdraw.text(textpos, inputtext, font=font, fill=color)
if opacity != 1:
textlayer = reduce_opacity(textlayer, opacity)
return Image.composite(textlayer, im, textlayer)
|
[
"def",
"im_watermark",
"(",
"im",
",",
"inputtext",
",",
"font",
"=",
"None",
",",
"color",
"=",
"None",
",",
"opacity",
"=",
".6",
",",
"margin",
"=",
"(",
"30",
",",
"30",
")",
")",
":",
"if",
"im",
".",
"mode",
"!=",
"\"RGBA\"",
":",
"im",
"=",
"im",
".",
"convert",
"(",
"\"RGBA\"",
")",
"textlayer",
"=",
"Image",
".",
"new",
"(",
"\"RGBA\"",
",",
"im",
".",
"size",
",",
"(",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
")",
"textdraw",
"=",
"ImageDraw",
".",
"Draw",
"(",
"textlayer",
")",
"textsize",
"=",
"textdraw",
".",
"textsize",
"(",
"inputtext",
",",
"font",
"=",
"font",
")",
"textpos",
"=",
"[",
"im",
".",
"size",
"[",
"i",
"]",
"-",
"textsize",
"[",
"i",
"]",
"-",
"margin",
"[",
"i",
"]",
"for",
"i",
"in",
"[",
"0",
",",
"1",
"]",
"]",
"textdraw",
".",
"text",
"(",
"textpos",
",",
"inputtext",
",",
"font",
"=",
"font",
",",
"fill",
"=",
"color",
")",
"if",
"opacity",
"!=",
"1",
":",
"textlayer",
"=",
"reduce_opacity",
"(",
"textlayer",
",",
"opacity",
")",
"return",
"Image",
".",
"composite",
"(",
"textlayer",
",",
"im",
",",
"textlayer",
")"
] |
imprints a PIL image with the indicated text in lower-right corner
|
[
"imprints",
"a",
"PIL",
"image",
"with",
"the",
"indicated",
"text",
"in",
"lower",
"-",
"right",
"corner"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/plugins/grapher/utils.py#L168-L179
|
12,217
|
Parisson/TimeSide
|
timeside/plugins/analyzer/utils.py
|
nextpow2
|
def nextpow2(value):
"""Compute the nearest power of two greater or equal to the input value"""
if value >= 1:
return 2**np.ceil(np.log2(value)).astype(int)
elif value > 0:
return 1
elif value == 0:
return 0
else:
raise ValueError('Value must be positive')
|
python
|
def nextpow2(value):
"""Compute the nearest power of two greater or equal to the input value"""
if value >= 1:
return 2**np.ceil(np.log2(value)).astype(int)
elif value > 0:
return 1
elif value == 0:
return 0
else:
raise ValueError('Value must be positive')
|
[
"def",
"nextpow2",
"(",
"value",
")",
":",
"if",
"value",
">=",
"1",
":",
"return",
"2",
"**",
"np",
".",
"ceil",
"(",
"np",
".",
"log2",
"(",
"value",
")",
")",
".",
"astype",
"(",
"int",
")",
"elif",
"value",
">",
"0",
":",
"return",
"1",
"elif",
"value",
"==",
"0",
":",
"return",
"0",
"else",
":",
"raise",
"ValueError",
"(",
"'Value must be positive'",
")"
] |
Compute the nearest power of two greater or equal to the input value
|
[
"Compute",
"the",
"nearest",
"power",
"of",
"two",
"greater",
"or",
"equal",
"to",
"the",
"input",
"value"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/plugins/analyzer/utils.py#L65-L74
|
12,218
|
Parisson/TimeSide
|
timeside/core/processor.py
|
FixedSizeInputAdapter.blocksize
|
def blocksize(self, input_totalframes):
"""Return the total number of frames that this adapter will output
according to the input_totalframes argument"""
blocksize = input_totalframes
if self.pad:
mod = input_totalframes % self.buffer_size
if mod:
blocksize += self.buffer_size - mod
return blocksize
|
python
|
def blocksize(self, input_totalframes):
"""Return the total number of frames that this adapter will output
according to the input_totalframes argument"""
blocksize = input_totalframes
if self.pad:
mod = input_totalframes % self.buffer_size
if mod:
blocksize += self.buffer_size - mod
return blocksize
|
[
"def",
"blocksize",
"(",
"self",
",",
"input_totalframes",
")",
":",
"blocksize",
"=",
"input_totalframes",
"if",
"self",
".",
"pad",
":",
"mod",
"=",
"input_totalframes",
"%",
"self",
".",
"buffer_size",
"if",
"mod",
":",
"blocksize",
"+=",
"self",
".",
"buffer_size",
"-",
"mod",
"return",
"blocksize"
] |
Return the total number of frames that this adapter will output
according to the input_totalframes argument
|
[
"Return",
"the",
"total",
"number",
"of",
"frames",
"that",
"this",
"adapter",
"will",
"output",
"according",
"to",
"the",
"input_totalframes",
"argument"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/processor.py#L218-L228
|
12,219
|
Parisson/TimeSide
|
timeside/core/processor.py
|
ProcessPipe.append_processor
|
def append_processor(self, proc, source_proc=None):
"Append a new processor to the pipe"
if source_proc is None and len(self.processors):
source_proc = self.processors[0]
if source_proc and not isinstance(source_proc, Processor):
raise TypeError('source_proc must be a Processor or None')
if not isinstance(proc, Processor):
raise TypeError('proc must be a Processor or None')
if proc.type == 'decoder' and len(self.processors):
raise ValueError('Only the first processor in a pipe could be a Decoder')
# TODO : check if the processor is already in the pipe
if source_proc:
for child in self._graph.neighbors_iter(source_proc.uuid()):
child_proc = self._graph.node[child]['processor']
if proc == child_proc:
proc._uuid = child_proc.uuid()
proc.process_pipe = self
break
if not self._graph.has_node(proc.uuid()):
self.processors.append(proc) # Add processor to the pipe
self._graph.add_node(proc.uuid(), processor=proc, id=proc.id())
if source_proc:
self._graph.add_edge(self.processors[0].uuid(), proc.uuid(),
type='audio_source')
proc.process_pipe = self
# Add an edge between each parent and proc
for parent in proc.parents.values():
self._graph.add_edge(parent.uuid(), proc.uuid(),
type='data_source')
|
python
|
def append_processor(self, proc, source_proc=None):
"Append a new processor to the pipe"
if source_proc is None and len(self.processors):
source_proc = self.processors[0]
if source_proc and not isinstance(source_proc, Processor):
raise TypeError('source_proc must be a Processor or None')
if not isinstance(proc, Processor):
raise TypeError('proc must be a Processor or None')
if proc.type == 'decoder' and len(self.processors):
raise ValueError('Only the first processor in a pipe could be a Decoder')
# TODO : check if the processor is already in the pipe
if source_proc:
for child in self._graph.neighbors_iter(source_proc.uuid()):
child_proc = self._graph.node[child]['processor']
if proc == child_proc:
proc._uuid = child_proc.uuid()
proc.process_pipe = self
break
if not self._graph.has_node(proc.uuid()):
self.processors.append(proc) # Add processor to the pipe
self._graph.add_node(proc.uuid(), processor=proc, id=proc.id())
if source_proc:
self._graph.add_edge(self.processors[0].uuid(), proc.uuid(),
type='audio_source')
proc.process_pipe = self
# Add an edge between each parent and proc
for parent in proc.parents.values():
self._graph.add_edge(parent.uuid(), proc.uuid(),
type='data_source')
|
[
"def",
"append_processor",
"(",
"self",
",",
"proc",
",",
"source_proc",
"=",
"None",
")",
":",
"if",
"source_proc",
"is",
"None",
"and",
"len",
"(",
"self",
".",
"processors",
")",
":",
"source_proc",
"=",
"self",
".",
"processors",
"[",
"0",
"]",
"if",
"source_proc",
"and",
"not",
"isinstance",
"(",
"source_proc",
",",
"Processor",
")",
":",
"raise",
"TypeError",
"(",
"'source_proc must be a Processor or None'",
")",
"if",
"not",
"isinstance",
"(",
"proc",
",",
"Processor",
")",
":",
"raise",
"TypeError",
"(",
"'proc must be a Processor or None'",
")",
"if",
"proc",
".",
"type",
"==",
"'decoder'",
"and",
"len",
"(",
"self",
".",
"processors",
")",
":",
"raise",
"ValueError",
"(",
"'Only the first processor in a pipe could be a Decoder'",
")",
"# TODO : check if the processor is already in the pipe",
"if",
"source_proc",
":",
"for",
"child",
"in",
"self",
".",
"_graph",
".",
"neighbors_iter",
"(",
"source_proc",
".",
"uuid",
"(",
")",
")",
":",
"child_proc",
"=",
"self",
".",
"_graph",
".",
"node",
"[",
"child",
"]",
"[",
"'processor'",
"]",
"if",
"proc",
"==",
"child_proc",
":",
"proc",
".",
"_uuid",
"=",
"child_proc",
".",
"uuid",
"(",
")",
"proc",
".",
"process_pipe",
"=",
"self",
"break",
"if",
"not",
"self",
".",
"_graph",
".",
"has_node",
"(",
"proc",
".",
"uuid",
"(",
")",
")",
":",
"self",
".",
"processors",
".",
"append",
"(",
"proc",
")",
"# Add processor to the pipe",
"self",
".",
"_graph",
".",
"add_node",
"(",
"proc",
".",
"uuid",
"(",
")",
",",
"processor",
"=",
"proc",
",",
"id",
"=",
"proc",
".",
"id",
"(",
")",
")",
"if",
"source_proc",
":",
"self",
".",
"_graph",
".",
"add_edge",
"(",
"self",
".",
"processors",
"[",
"0",
"]",
".",
"uuid",
"(",
")",
",",
"proc",
".",
"uuid",
"(",
")",
",",
"type",
"=",
"'audio_source'",
")",
"proc",
".",
"process_pipe",
"=",
"self",
"# Add an edge between each parent and proc",
"for",
"parent",
"in",
"proc",
".",
"parents",
".",
"values",
"(",
")",
":",
"self",
".",
"_graph",
".",
"add_edge",
"(",
"parent",
".",
"uuid",
"(",
")",
",",
"proc",
".",
"uuid",
"(",
")",
",",
"type",
"=",
"'data_source'",
")"
] |
Append a new processor to the pipe
|
[
"Append",
"a",
"new",
"processor",
"to",
"the",
"pipe"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/processor.py#L337-L369
|
12,220
|
Parisson/TimeSide
|
timeside/plugins/analyzer/externals/vamp_plugin.py
|
simple_host_process
|
def simple_host_process(argslist):
"""Call vamp-simple-host"""
vamp_host = 'vamp-simple-host'
command = [vamp_host]
command.extend(argslist)
# try ?
stdout = subprocess.check_output(command,
stderr=subprocess.STDOUT).splitlines()
return stdout
|
python
|
def simple_host_process(argslist):
"""Call vamp-simple-host"""
vamp_host = 'vamp-simple-host'
command = [vamp_host]
command.extend(argslist)
# try ?
stdout = subprocess.check_output(command,
stderr=subprocess.STDOUT).splitlines()
return stdout
|
[
"def",
"simple_host_process",
"(",
"argslist",
")",
":",
"vamp_host",
"=",
"'vamp-simple-host'",
"command",
"=",
"[",
"vamp_host",
"]",
"command",
".",
"extend",
"(",
"argslist",
")",
"# try ?",
"stdout",
"=",
"subprocess",
".",
"check_output",
"(",
"command",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
".",
"splitlines",
"(",
")",
"return",
"stdout"
] |
Call vamp-simple-host
|
[
"Call",
"vamp",
"-",
"simple",
"-",
"host"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/plugins/analyzer/externals/vamp_plugin.py#L33-L43
|
12,221
|
Parisson/TimeSide
|
timeside/plugins/grapher/spectrogram_lin.py
|
SpectrogramLinear.set_scale
|
def set_scale(self):
"""generate the lookup which translates y-coordinate to fft-bin"""
f_min = float(self.lower_freq)
f_max = float(self.higher_freq)
y_min = f_min
y_max = f_max
for y in range(self.image_height):
freq = y_min + y / (self.image_height - 1.0) * (y_max - y_min)
fft_bin = freq / f_max * (self.fft_size / 2 + 1)
if fft_bin < self.fft_size / 2:
alpha = fft_bin - int(fft_bin)
self.y_to_bin.append((int(fft_bin), alpha * 255))
|
python
|
def set_scale(self):
"""generate the lookup which translates y-coordinate to fft-bin"""
f_min = float(self.lower_freq)
f_max = float(self.higher_freq)
y_min = f_min
y_max = f_max
for y in range(self.image_height):
freq = y_min + y / (self.image_height - 1.0) * (y_max - y_min)
fft_bin = freq / f_max * (self.fft_size / 2 + 1)
if fft_bin < self.fft_size / 2:
alpha = fft_bin - int(fft_bin)
self.y_to_bin.append((int(fft_bin), alpha * 255))
|
[
"def",
"set_scale",
"(",
"self",
")",
":",
"f_min",
"=",
"float",
"(",
"self",
".",
"lower_freq",
")",
"f_max",
"=",
"float",
"(",
"self",
".",
"higher_freq",
")",
"y_min",
"=",
"f_min",
"y_max",
"=",
"f_max",
"for",
"y",
"in",
"range",
"(",
"self",
".",
"image_height",
")",
":",
"freq",
"=",
"y_min",
"+",
"y",
"/",
"(",
"self",
".",
"image_height",
"-",
"1.0",
")",
"*",
"(",
"y_max",
"-",
"y_min",
")",
"fft_bin",
"=",
"freq",
"/",
"f_max",
"*",
"(",
"self",
".",
"fft_size",
"/",
"2",
"+",
"1",
")",
"if",
"fft_bin",
"<",
"self",
".",
"fft_size",
"/",
"2",
":",
"alpha",
"=",
"fft_bin",
"-",
"int",
"(",
"fft_bin",
")",
"self",
".",
"y_to_bin",
".",
"append",
"(",
"(",
"int",
"(",
"fft_bin",
")",
",",
"alpha",
"*",
"255",
")",
")"
] |
generate the lookup which translates y-coordinate to fft-bin
|
[
"generate",
"the",
"lookup",
"which",
"translates",
"y",
"-",
"coordinate",
"to",
"fft",
"-",
"bin"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/plugins/grapher/spectrogram_lin.py#L55-L67
|
12,222
|
Parisson/TimeSide
|
timeside/core/tools/hdf5.py
|
dict_from_hdf5
|
def dict_from_hdf5(dict_like, h5group):
"""
Load a dictionnary-like object from a h5 file group
"""
# Read attributes
for name, value in h5group.attrs.items():
dict_like[name] = value
|
python
|
def dict_from_hdf5(dict_like, h5group):
"""
Load a dictionnary-like object from a h5 file group
"""
# Read attributes
for name, value in h5group.attrs.items():
dict_like[name] = value
|
[
"def",
"dict_from_hdf5",
"(",
"dict_like",
",",
"h5group",
")",
":",
"# Read attributes",
"for",
"name",
",",
"value",
"in",
"h5group",
".",
"attrs",
".",
"items",
"(",
")",
":",
"dict_like",
"[",
"name",
"]",
"=",
"value"
] |
Load a dictionnary-like object from a h5 file group
|
[
"Load",
"a",
"dictionnary",
"-",
"like",
"object",
"from",
"a",
"h5",
"file",
"group"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/tools/hdf5.py#L34-L40
|
12,223
|
Parisson/TimeSide
|
timeside/plugins/decoder/array.py
|
ArrayDecoder.get_frames
|
def get_frames(self):
"Define an iterator that will return frames at the given blocksize"
nb_frames = self.input_totalframes // self.output_blocksize
if self.input_totalframes % self.output_blocksize == 0:
nb_frames -= 1 # Last frame must send eod=True
for index in xrange(0,
nb_frames * self.output_blocksize,
self.output_blocksize):
yield (self.samples[index:index + self.output_blocksize], False)
yield (self.samples[nb_frames * self.output_blocksize:], True)
|
python
|
def get_frames(self):
"Define an iterator that will return frames at the given blocksize"
nb_frames = self.input_totalframes // self.output_blocksize
if self.input_totalframes % self.output_blocksize == 0:
nb_frames -= 1 # Last frame must send eod=True
for index in xrange(0,
nb_frames * self.output_blocksize,
self.output_blocksize):
yield (self.samples[index:index + self.output_blocksize], False)
yield (self.samples[nb_frames * self.output_blocksize:], True)
|
[
"def",
"get_frames",
"(",
"self",
")",
":",
"nb_frames",
"=",
"self",
".",
"input_totalframes",
"//",
"self",
".",
"output_blocksize",
"if",
"self",
".",
"input_totalframes",
"%",
"self",
".",
"output_blocksize",
"==",
"0",
":",
"nb_frames",
"-=",
"1",
"# Last frame must send eod=True",
"for",
"index",
"in",
"xrange",
"(",
"0",
",",
"nb_frames",
"*",
"self",
".",
"output_blocksize",
",",
"self",
".",
"output_blocksize",
")",
":",
"yield",
"(",
"self",
".",
"samples",
"[",
"index",
":",
"index",
"+",
"self",
".",
"output_blocksize",
"]",
",",
"False",
")",
"yield",
"(",
"self",
".",
"samples",
"[",
"nb_frames",
"*",
"self",
".",
"output_blocksize",
":",
"]",
",",
"True",
")"
] |
Define an iterator that will return frames at the given blocksize
|
[
"Define",
"an",
"iterator",
"that",
"will",
"return",
"frames",
"at",
"the",
"given",
"blocksize"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/plugins/decoder/array.py#L113-L125
|
12,224
|
Parisson/TimeSide
|
timeside/core/component.py
|
implementations
|
def implementations(interface, recurse=True, abstract=False):
"""Returns the components implementing interface, and if recurse, any of
the descendants of interface. If abstract is True, also return the
abstract implementations."""
result = []
find_implementations(interface, recurse, abstract, result)
return result
|
python
|
def implementations(interface, recurse=True, abstract=False):
"""Returns the components implementing interface, and if recurse, any of
the descendants of interface. If abstract is True, also return the
abstract implementations."""
result = []
find_implementations(interface, recurse, abstract, result)
return result
|
[
"def",
"implementations",
"(",
"interface",
",",
"recurse",
"=",
"True",
",",
"abstract",
"=",
"False",
")",
":",
"result",
"=",
"[",
"]",
"find_implementations",
"(",
"interface",
",",
"recurse",
",",
"abstract",
",",
"result",
")",
"return",
"result"
] |
Returns the components implementing interface, and if recurse, any of
the descendants of interface. If abstract is True, also return the
abstract implementations.
|
[
"Returns",
"the",
"components",
"implementing",
"interface",
"and",
"if",
"recurse",
"any",
"of",
"the",
"descendants",
"of",
"interface",
".",
"If",
"abstract",
"is",
"True",
"also",
"return",
"the",
"abstract",
"implementations",
"."
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/component.py#L65-L71
|
12,225
|
Parisson/TimeSide
|
timeside/core/component.py
|
find_implementations
|
def find_implementations(interface, recurse, abstract, result):
"""Find implementations of an interface or of one of its descendants and
extend result with the classes found."""
for item in MetaComponent.implementations:
if (item['interface'] == interface and (abstract or not item['abstract'])):
extend_unique(result, [item['class']])
if recurse:
subinterfaces = interface.__subclasses__()
if subinterfaces:
for i in subinterfaces:
find_implementations(i, recurse, abstract, result)
|
python
|
def find_implementations(interface, recurse, abstract, result):
"""Find implementations of an interface or of one of its descendants and
extend result with the classes found."""
for item in MetaComponent.implementations:
if (item['interface'] == interface and (abstract or not item['abstract'])):
extend_unique(result, [item['class']])
if recurse:
subinterfaces = interface.__subclasses__()
if subinterfaces:
for i in subinterfaces:
find_implementations(i, recurse, abstract, result)
|
[
"def",
"find_implementations",
"(",
"interface",
",",
"recurse",
",",
"abstract",
",",
"result",
")",
":",
"for",
"item",
"in",
"MetaComponent",
".",
"implementations",
":",
"if",
"(",
"item",
"[",
"'interface'",
"]",
"==",
"interface",
"and",
"(",
"abstract",
"or",
"not",
"item",
"[",
"'abstract'",
"]",
")",
")",
":",
"extend_unique",
"(",
"result",
",",
"[",
"item",
"[",
"'class'",
"]",
"]",
")",
"if",
"recurse",
":",
"subinterfaces",
"=",
"interface",
".",
"__subclasses__",
"(",
")",
"if",
"subinterfaces",
":",
"for",
"i",
"in",
"subinterfaces",
":",
"find_implementations",
"(",
"i",
",",
"recurse",
",",
"abstract",
",",
"result",
")"
] |
Find implementations of an interface or of one of its descendants and
extend result with the classes found.
|
[
"Find",
"implementations",
"of",
"an",
"interface",
"or",
"of",
"one",
"of",
"its",
"descendants",
"and",
"extend",
"result",
"with",
"the",
"classes",
"found",
"."
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/component.py#L141-L152
|
12,226
|
Parisson/TimeSide
|
timeside/core/grapher.py
|
Grapher.draw_peaks
|
def draw_peaks(self, x, peaks, line_color):
"""Draw 2 peaks at x"""
y1 = self.image_height * 0.5 - peaks[0] * (self.image_height - 4) * 0.5
y2 = self.image_height * 0.5 - peaks[1] * (self.image_height - 4) * 0.5
if self.previous_y:
self.draw.line(
[self.previous_x, self.previous_y, x, y1, x, y2], line_color)
else:
self.draw.line([x, y1, x, y2], line_color)
self.draw_anti_aliased_pixels(x, y1, y2, line_color)
self.previous_x, self.previous_y = x, y2
|
python
|
def draw_peaks(self, x, peaks, line_color):
"""Draw 2 peaks at x"""
y1 = self.image_height * 0.5 - peaks[0] * (self.image_height - 4) * 0.5
y2 = self.image_height * 0.5 - peaks[1] * (self.image_height - 4) * 0.5
if self.previous_y:
self.draw.line(
[self.previous_x, self.previous_y, x, y1, x, y2], line_color)
else:
self.draw.line([x, y1, x, y2], line_color)
self.draw_anti_aliased_pixels(x, y1, y2, line_color)
self.previous_x, self.previous_y = x, y2
|
[
"def",
"draw_peaks",
"(",
"self",
",",
"x",
",",
"peaks",
",",
"line_color",
")",
":",
"y1",
"=",
"self",
".",
"image_height",
"*",
"0.5",
"-",
"peaks",
"[",
"0",
"]",
"*",
"(",
"self",
".",
"image_height",
"-",
"4",
")",
"*",
"0.5",
"y2",
"=",
"self",
".",
"image_height",
"*",
"0.5",
"-",
"peaks",
"[",
"1",
"]",
"*",
"(",
"self",
".",
"image_height",
"-",
"4",
")",
"*",
"0.5",
"if",
"self",
".",
"previous_y",
":",
"self",
".",
"draw",
".",
"line",
"(",
"[",
"self",
".",
"previous_x",
",",
"self",
".",
"previous_y",
",",
"x",
",",
"y1",
",",
"x",
",",
"y2",
"]",
",",
"line_color",
")",
"else",
":",
"self",
".",
"draw",
".",
"line",
"(",
"[",
"x",
",",
"y1",
",",
"x",
",",
"y2",
"]",
",",
"line_color",
")",
"self",
".",
"draw_anti_aliased_pixels",
"(",
"x",
",",
"y1",
",",
"y2",
",",
"line_color",
")",
"self",
".",
"previous_x",
",",
"self",
".",
"previous_y",
"=",
"x",
",",
"y2"
] |
Draw 2 peaks at x
|
[
"Draw",
"2",
"peaks",
"at",
"x"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/grapher.py#L193-L206
|
12,227
|
Parisson/TimeSide
|
timeside/core/grapher.py
|
Grapher.draw_peaks_inverted
|
def draw_peaks_inverted(self, x, peaks, line_color):
"""Draw 2 inverted peaks at x"""
y1 = self.image_height * 0.5 - peaks[0] * (self.image_height - 4) * 0.5
y2 = self.image_height * 0.5 - peaks[1] * (self.image_height - 4) * 0.5
if self.previous_y and x < self.image_width - 1:
if y1 < y2:
self.draw.line((x, 0, x, y1), line_color)
self.draw.line((x, self.image_height, x, y2), line_color)
else:
self.draw.line((x, 0, x, y2), line_color)
self.draw.line((x, self.image_height, x, y1), line_color)
else:
self.draw.line((x, 0, x, self.image_height), line_color)
self.draw_anti_aliased_pixels(x, y1, y2, line_color)
self.previous_x, self.previous_y = x, y1
|
python
|
def draw_peaks_inverted(self, x, peaks, line_color):
"""Draw 2 inverted peaks at x"""
y1 = self.image_height * 0.5 - peaks[0] * (self.image_height - 4) * 0.5
y2 = self.image_height * 0.5 - peaks[1] * (self.image_height - 4) * 0.5
if self.previous_y and x < self.image_width - 1:
if y1 < y2:
self.draw.line((x, 0, x, y1), line_color)
self.draw.line((x, self.image_height, x, y2), line_color)
else:
self.draw.line((x, 0, x, y2), line_color)
self.draw.line((x, self.image_height, x, y1), line_color)
else:
self.draw.line((x, 0, x, self.image_height), line_color)
self.draw_anti_aliased_pixels(x, y1, y2, line_color)
self.previous_x, self.previous_y = x, y1
|
[
"def",
"draw_peaks_inverted",
"(",
"self",
",",
"x",
",",
"peaks",
",",
"line_color",
")",
":",
"y1",
"=",
"self",
".",
"image_height",
"*",
"0.5",
"-",
"peaks",
"[",
"0",
"]",
"*",
"(",
"self",
".",
"image_height",
"-",
"4",
")",
"*",
"0.5",
"y2",
"=",
"self",
".",
"image_height",
"*",
"0.5",
"-",
"peaks",
"[",
"1",
"]",
"*",
"(",
"self",
".",
"image_height",
"-",
"4",
")",
"*",
"0.5",
"if",
"self",
".",
"previous_y",
"and",
"x",
"<",
"self",
".",
"image_width",
"-",
"1",
":",
"if",
"y1",
"<",
"y2",
":",
"self",
".",
"draw",
".",
"line",
"(",
"(",
"x",
",",
"0",
",",
"x",
",",
"y1",
")",
",",
"line_color",
")",
"self",
".",
"draw",
".",
"line",
"(",
"(",
"x",
",",
"self",
".",
"image_height",
",",
"x",
",",
"y2",
")",
",",
"line_color",
")",
"else",
":",
"self",
".",
"draw",
".",
"line",
"(",
"(",
"x",
",",
"0",
",",
"x",
",",
"y2",
")",
",",
"line_color",
")",
"self",
".",
"draw",
".",
"line",
"(",
"(",
"x",
",",
"self",
".",
"image_height",
",",
"x",
",",
"y1",
")",
",",
"line_color",
")",
"else",
":",
"self",
".",
"draw",
".",
"line",
"(",
"(",
"x",
",",
"0",
",",
"x",
",",
"self",
".",
"image_height",
")",
",",
"line_color",
")",
"self",
".",
"draw_anti_aliased_pixels",
"(",
"x",
",",
"y1",
",",
"y2",
",",
"line_color",
")",
"self",
".",
"previous_x",
",",
"self",
".",
"previous_y",
"=",
"x",
",",
"y1"
] |
Draw 2 inverted peaks at x
|
[
"Draw",
"2",
"inverted",
"peaks",
"at",
"x"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/grapher.py#L208-L224
|
12,228
|
Parisson/TimeSide
|
timeside/core/grapher.py
|
Grapher.draw_anti_aliased_pixels
|
def draw_anti_aliased_pixels(self, x, y1, y2, color):
""" vertical anti-aliasing at y1 and y2 """
y_max = max(y1, y2)
y_max_int = int(y_max)
alpha = y_max - y_max_int
if alpha > 0.0 and alpha < 1.0 and y_max_int + 1 < self.image_height:
current_pix = self.pixel[int(x), y_max_int + 1]
r = int((1 - alpha) * current_pix[0] + alpha * color[0])
g = int((1 - alpha) * current_pix[1] + alpha * color[1])
b = int((1 - alpha) * current_pix[2] + alpha * color[2])
self.pixel[x, y_max_int + 1] = (r, g, b)
y_min = min(y1, y2)
y_min_int = int(y_min)
alpha = 1.0 - (y_min - y_min_int)
if alpha > 0.0 and alpha < 1.0 and y_min_int - 1 >= 0:
current_pix = self.pixel[x, y_min_int - 1]
r = int((1 - alpha) * current_pix[0] + alpha * color[0])
g = int((1 - alpha) * current_pix[1] + alpha * color[1])
b = int((1 - alpha) * current_pix[2] + alpha * color[2])
self.pixel[x, y_min_int - 1] = (r, g, b)
|
python
|
def draw_anti_aliased_pixels(self, x, y1, y2, color):
""" vertical anti-aliasing at y1 and y2 """
y_max = max(y1, y2)
y_max_int = int(y_max)
alpha = y_max - y_max_int
if alpha > 0.0 and alpha < 1.0 and y_max_int + 1 < self.image_height:
current_pix = self.pixel[int(x), y_max_int + 1]
r = int((1 - alpha) * current_pix[0] + alpha * color[0])
g = int((1 - alpha) * current_pix[1] + alpha * color[1])
b = int((1 - alpha) * current_pix[2] + alpha * color[2])
self.pixel[x, y_max_int + 1] = (r, g, b)
y_min = min(y1, y2)
y_min_int = int(y_min)
alpha = 1.0 - (y_min - y_min_int)
if alpha > 0.0 and alpha < 1.0 and y_min_int - 1 >= 0:
current_pix = self.pixel[x, y_min_int - 1]
r = int((1 - alpha) * current_pix[0] + alpha * color[0])
g = int((1 - alpha) * current_pix[1] + alpha * color[1])
b = int((1 - alpha) * current_pix[2] + alpha * color[2])
self.pixel[x, y_min_int - 1] = (r, g, b)
|
[
"def",
"draw_anti_aliased_pixels",
"(",
"self",
",",
"x",
",",
"y1",
",",
"y2",
",",
"color",
")",
":",
"y_max",
"=",
"max",
"(",
"y1",
",",
"y2",
")",
"y_max_int",
"=",
"int",
"(",
"y_max",
")",
"alpha",
"=",
"y_max",
"-",
"y_max_int",
"if",
"alpha",
">",
"0.0",
"and",
"alpha",
"<",
"1.0",
"and",
"y_max_int",
"+",
"1",
"<",
"self",
".",
"image_height",
":",
"current_pix",
"=",
"self",
".",
"pixel",
"[",
"int",
"(",
"x",
")",
",",
"y_max_int",
"+",
"1",
"]",
"r",
"=",
"int",
"(",
"(",
"1",
"-",
"alpha",
")",
"*",
"current_pix",
"[",
"0",
"]",
"+",
"alpha",
"*",
"color",
"[",
"0",
"]",
")",
"g",
"=",
"int",
"(",
"(",
"1",
"-",
"alpha",
")",
"*",
"current_pix",
"[",
"1",
"]",
"+",
"alpha",
"*",
"color",
"[",
"1",
"]",
")",
"b",
"=",
"int",
"(",
"(",
"1",
"-",
"alpha",
")",
"*",
"current_pix",
"[",
"2",
"]",
"+",
"alpha",
"*",
"color",
"[",
"2",
"]",
")",
"self",
".",
"pixel",
"[",
"x",
",",
"y_max_int",
"+",
"1",
"]",
"=",
"(",
"r",
",",
"g",
",",
"b",
")",
"y_min",
"=",
"min",
"(",
"y1",
",",
"y2",
")",
"y_min_int",
"=",
"int",
"(",
"y_min",
")",
"alpha",
"=",
"1.0",
"-",
"(",
"y_min",
"-",
"y_min_int",
")",
"if",
"alpha",
">",
"0.0",
"and",
"alpha",
"<",
"1.0",
"and",
"y_min_int",
"-",
"1",
">=",
"0",
":",
"current_pix",
"=",
"self",
".",
"pixel",
"[",
"x",
",",
"y_min_int",
"-",
"1",
"]",
"r",
"=",
"int",
"(",
"(",
"1",
"-",
"alpha",
")",
"*",
"current_pix",
"[",
"0",
"]",
"+",
"alpha",
"*",
"color",
"[",
"0",
"]",
")",
"g",
"=",
"int",
"(",
"(",
"1",
"-",
"alpha",
")",
"*",
"current_pix",
"[",
"1",
"]",
"+",
"alpha",
"*",
"color",
"[",
"1",
"]",
")",
"b",
"=",
"int",
"(",
"(",
"1",
"-",
"alpha",
")",
"*",
"current_pix",
"[",
"2",
"]",
"+",
"alpha",
"*",
"color",
"[",
"2",
"]",
")",
"self",
".",
"pixel",
"[",
"x",
",",
"y_min_int",
"-",
"1",
"]",
"=",
"(",
"r",
",",
"g",
",",
"b",
")"
] |
vertical anti-aliasing at y1 and y2
|
[
"vertical",
"anti",
"-",
"aliasing",
"at",
"y1",
"and",
"y2"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/grapher.py#L226-L249
|
12,229
|
Parisson/TimeSide
|
timeside/plugins/grapher/spectrogram_log.py
|
SpectrogramLog.post_process
|
def post_process(self):
""" Apply last 2D transforms"""
self.image.putdata(self.pixels)
self.image = self.image.transpose(Image.ROTATE_90)
|
python
|
def post_process(self):
""" Apply last 2D transforms"""
self.image.putdata(self.pixels)
self.image = self.image.transpose(Image.ROTATE_90)
|
[
"def",
"post_process",
"(",
"self",
")",
":",
"self",
".",
"image",
".",
"putdata",
"(",
"self",
".",
"pixels",
")",
"self",
".",
"image",
"=",
"self",
".",
"image",
".",
"transpose",
"(",
"Image",
".",
"ROTATE_90",
")"
] |
Apply last 2D transforms
|
[
"Apply",
"last",
"2D",
"transforms"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/plugins/grapher/spectrogram_log.py#L105-L108
|
12,230
|
Parisson/TimeSide
|
timeside/plugins/encoder/mp3.py
|
Mp3Encoder.write_metadata
|
def write_metadata(self):
"""Write all ID3v2.4 tags to file from self.metadata"""
import mutagen
from mutagen import id3
id3 = id3.ID3(self.filename)
for tag in self.metadata.keys():
value = self.metadata[tag]
frame = mutagen.id3.Frames[tag](3, value)
try:
id3.add(frame)
except:
raise IOError('EncoderError: cannot tag "' + tag + '"')
try:
id3.save()
except:
raise IOError('EncoderError: cannot write tags')
|
python
|
def write_metadata(self):
"""Write all ID3v2.4 tags to file from self.metadata"""
import mutagen
from mutagen import id3
id3 = id3.ID3(self.filename)
for tag in self.metadata.keys():
value = self.metadata[tag]
frame = mutagen.id3.Frames[tag](3, value)
try:
id3.add(frame)
except:
raise IOError('EncoderError: cannot tag "' + tag + '"')
try:
id3.save()
except:
raise IOError('EncoderError: cannot write tags')
|
[
"def",
"write_metadata",
"(",
"self",
")",
":",
"import",
"mutagen",
"from",
"mutagen",
"import",
"id3",
"id3",
"=",
"id3",
".",
"ID3",
"(",
"self",
".",
"filename",
")",
"for",
"tag",
"in",
"self",
".",
"metadata",
".",
"keys",
"(",
")",
":",
"value",
"=",
"self",
".",
"metadata",
"[",
"tag",
"]",
"frame",
"=",
"mutagen",
".",
"id3",
".",
"Frames",
"[",
"tag",
"]",
"(",
"3",
",",
"value",
")",
"try",
":",
"id3",
".",
"add",
"(",
"frame",
")",
"except",
":",
"raise",
"IOError",
"(",
"'EncoderError: cannot tag \"'",
"+",
"tag",
"+",
"'\"'",
")",
"try",
":",
"id3",
".",
"save",
"(",
")",
"except",
":",
"raise",
"IOError",
"(",
"'EncoderError: cannot write tags'",
")"
] |
Write all ID3v2.4 tags to file from self.metadata
|
[
"Write",
"all",
"ID3v2",
".",
"4",
"tags",
"to",
"file",
"from",
"self",
".",
"metadata"
] |
0618d75cd2f16021afcfd3d5b77f692adad76ea5
|
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/plugins/encoder/mp3.py#L94-L110
|
12,231
|
btimby/fulltext
|
fulltext/__main__.py
|
main
|
def main(args=sys.argv[1:]):
"""Extract text from a file.
Commands:
extract - extract text from path
check - make sure all deps are installed
Usage:
fulltext extract [-v] [-f] <path>...
fulltext check [-t]
Options:
-f, --file Open file first.
-t, --title Check deps for title.
-v, --verbose More verbose output.
"""
opt = docopt(main.__doc__.strip(), args, options_first=True)
config_logging(opt['--verbose'])
if opt['check']:
check_backends(opt['--title'])
elif opt['extract']:
handler = fulltext.get
if opt['--file']:
handler = _handle_open
for path in opt['<path>']:
print(handler(path))
else:
# we should never get here
raise ValueError("don't know how to handle cmd")
|
python
|
def main(args=sys.argv[1:]):
"""Extract text from a file.
Commands:
extract - extract text from path
check - make sure all deps are installed
Usage:
fulltext extract [-v] [-f] <path>...
fulltext check [-t]
Options:
-f, --file Open file first.
-t, --title Check deps for title.
-v, --verbose More verbose output.
"""
opt = docopt(main.__doc__.strip(), args, options_first=True)
config_logging(opt['--verbose'])
if opt['check']:
check_backends(opt['--title'])
elif opt['extract']:
handler = fulltext.get
if opt['--file']:
handler = _handle_open
for path in opt['<path>']:
print(handler(path))
else:
# we should never get here
raise ValueError("don't know how to handle cmd")
|
[
"def",
"main",
"(",
"args",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
":",
"opt",
"=",
"docopt",
"(",
"main",
".",
"__doc__",
".",
"strip",
"(",
")",
",",
"args",
",",
"options_first",
"=",
"True",
")",
"config_logging",
"(",
"opt",
"[",
"'--verbose'",
"]",
")",
"if",
"opt",
"[",
"'check'",
"]",
":",
"check_backends",
"(",
"opt",
"[",
"'--title'",
"]",
")",
"elif",
"opt",
"[",
"'extract'",
"]",
":",
"handler",
"=",
"fulltext",
".",
"get",
"if",
"opt",
"[",
"'--file'",
"]",
":",
"handler",
"=",
"_handle_open",
"for",
"path",
"in",
"opt",
"[",
"'<path>'",
"]",
":",
"print",
"(",
"handler",
"(",
"path",
")",
")",
"else",
":",
"# we should never get here",
"raise",
"ValueError",
"(",
"\"don't know how to handle cmd\"",
")"
] |
Extract text from a file.
Commands:
extract - extract text from path
check - make sure all deps are installed
Usage:
fulltext extract [-v] [-f] <path>...
fulltext check [-t]
Options:
-f, --file Open file first.
-t, --title Check deps for title.
-v, --verbose More verbose output.
|
[
"Extract",
"text",
"from",
"a",
"file",
"."
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__main__.py#L71-L103
|
12,232
|
btimby/fulltext
|
fulltext/__init__.py
|
is_binary
|
def is_binary(f):
"""Return True if binary mode."""
# NOTE: order matters here. We don't bail on Python 2 just yet. Both
# codecs.open() and io.open() can open in text mode, both set the encoding
# attribute. We must do that check first.
# If it has a decoding attribute with a value, it is text mode.
if getattr(f, "encoding", None):
return False
# Python 2 makes no further distinction.
if not PY3:
return True
# If the file has a mode, and it contains b, it is binary.
try:
if 'b' in getattr(f, 'mode', ''):
return True
except TypeError:
import gzip
if isinstance(f, gzip.GzipFile):
return True # in gzip mode is an integer
raise
# Can we sniff?
try:
f.seek(0, os.SEEK_CUR)
except (AttributeError, IOError):
return False
# Finally, let's sniff by reading a byte.
byte = f.read(1)
f.seek(-1, os.SEEK_CUR)
return hasattr(byte, 'decode')
|
python
|
def is_binary(f):
"""Return True if binary mode."""
# NOTE: order matters here. We don't bail on Python 2 just yet. Both
# codecs.open() and io.open() can open in text mode, both set the encoding
# attribute. We must do that check first.
# If it has a decoding attribute with a value, it is text mode.
if getattr(f, "encoding", None):
return False
# Python 2 makes no further distinction.
if not PY3:
return True
# If the file has a mode, and it contains b, it is binary.
try:
if 'b' in getattr(f, 'mode', ''):
return True
except TypeError:
import gzip
if isinstance(f, gzip.GzipFile):
return True # in gzip mode is an integer
raise
# Can we sniff?
try:
f.seek(0, os.SEEK_CUR)
except (AttributeError, IOError):
return False
# Finally, let's sniff by reading a byte.
byte = f.read(1)
f.seek(-1, os.SEEK_CUR)
return hasattr(byte, 'decode')
|
[
"def",
"is_binary",
"(",
"f",
")",
":",
"# NOTE: order matters here. We don't bail on Python 2 just yet. Both",
"# codecs.open() and io.open() can open in text mode, both set the encoding",
"# attribute. We must do that check first.",
"# If it has a decoding attribute with a value, it is text mode.",
"if",
"getattr",
"(",
"f",
",",
"\"encoding\"",
",",
"None",
")",
":",
"return",
"False",
"# Python 2 makes no further distinction.",
"if",
"not",
"PY3",
":",
"return",
"True",
"# If the file has a mode, and it contains b, it is binary.",
"try",
":",
"if",
"'b'",
"in",
"getattr",
"(",
"f",
",",
"'mode'",
",",
"''",
")",
":",
"return",
"True",
"except",
"TypeError",
":",
"import",
"gzip",
"if",
"isinstance",
"(",
"f",
",",
"gzip",
".",
"GzipFile",
")",
":",
"return",
"True",
"# in gzip mode is an integer",
"raise",
"# Can we sniff?",
"try",
":",
"f",
".",
"seek",
"(",
"0",
",",
"os",
".",
"SEEK_CUR",
")",
"except",
"(",
"AttributeError",
",",
"IOError",
")",
":",
"return",
"False",
"# Finally, let's sniff by reading a byte.",
"byte",
"=",
"f",
".",
"read",
"(",
"1",
")",
"f",
".",
"seek",
"(",
"-",
"1",
",",
"os",
".",
"SEEK_CUR",
")",
"return",
"hasattr",
"(",
"byte",
",",
"'decode'",
")"
] |
Return True if binary mode.
|
[
"Return",
"True",
"if",
"binary",
"mode",
"."
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L329-L362
|
12,233
|
btimby/fulltext
|
fulltext/__init__.py
|
handle_path
|
def handle_path(backend_inst, path, **kwargs):
"""
Handle a path.
Called by `get()` when provided a path. This function will prefer the
backend's `handle_path()` if one is provided Otherwise, it will open the
given path then use `handle_fobj()`.
"""
if callable(getattr(backend_inst, 'handle_path', None)):
# Prefer handle_path() if present.
LOGGER.debug("using handle_path")
return backend_inst.handle_path(path)
elif callable(getattr(backend_inst, 'handle_fobj', None)):
# Fallback to handle_fobj(). No warning here since the performance hit
# is minimal.
LOGGER.debug("using handle_fobj")
with open(path, 'rb') as f:
return backend_inst.handle_fobj(f)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend_inst.__name__)
|
python
|
def handle_path(backend_inst, path, **kwargs):
"""
Handle a path.
Called by `get()` when provided a path. This function will prefer the
backend's `handle_path()` if one is provided Otherwise, it will open the
given path then use `handle_fobj()`.
"""
if callable(getattr(backend_inst, 'handle_path', None)):
# Prefer handle_path() if present.
LOGGER.debug("using handle_path")
return backend_inst.handle_path(path)
elif callable(getattr(backend_inst, 'handle_fobj', None)):
# Fallback to handle_fobj(). No warning here since the performance hit
# is minimal.
LOGGER.debug("using handle_fobj")
with open(path, 'rb') as f:
return backend_inst.handle_fobj(f)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend_inst.__name__)
|
[
"def",
"handle_path",
"(",
"backend_inst",
",",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"callable",
"(",
"getattr",
"(",
"backend_inst",
",",
"'handle_path'",
",",
"None",
")",
")",
":",
"# Prefer handle_path() if present.",
"LOGGER",
".",
"debug",
"(",
"\"using handle_path\"",
")",
"return",
"backend_inst",
".",
"handle_path",
"(",
"path",
")",
"elif",
"callable",
"(",
"getattr",
"(",
"backend_inst",
",",
"'handle_fobj'",
",",
"None",
")",
")",
":",
"# Fallback to handle_fobj(). No warning here since the performance hit",
"# is minimal.",
"LOGGER",
".",
"debug",
"(",
"\"using handle_fobj\"",
")",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"return",
"backend_inst",
".",
"handle_fobj",
"(",
"f",
")",
"else",
":",
"raise",
"AssertionError",
"(",
"'Backend %s has no _get functions'",
"%",
"backend_inst",
".",
"__name__",
")"
] |
Handle a path.
Called by `get()` when provided a path. This function will prefer the
backend's `handle_path()` if one is provided Otherwise, it will open the
given path then use `handle_fobj()`.
|
[
"Handle",
"a",
"path",
"."
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L365-L387
|
12,234
|
btimby/fulltext
|
fulltext/__init__.py
|
handle_fobj
|
def handle_fobj(backend, f, **kwargs):
"""
Handle a file-like object.
Called by `get()` when provided a file-like. This function will prefer the
backend's `handle_fobj()` if one is provided. Otherwise, it will write the
data to a temporary file and call `handle_path()`.
"""
if not is_binary(f):
raise AssertionError('File must be opened in binary mode.')
if callable(getattr(backend, 'handle_fobj', None)):
# Prefer handle_fobj() if present.
LOGGER.debug("using handle_fobj")
return backend.handle_fobj(f)
elif callable(getattr(backend, 'handle_path', None)):
# Fallback to handle_path(). Warn user since this is potentially
# expensive.
LOGGER.debug("using handle_path")
LOGGER.warning(
"Using disk, %r backend does not provide `handle_fobj()`", backend)
ext = ''
if 'ext' in kwargs:
ext = '.' + kwargs['ext']
with fobj_to_tempfile(f, suffix=ext) as fname:
return backend.handle_path(fname, **kwargs)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend.__name__)
|
python
|
def handle_fobj(backend, f, **kwargs):
"""
Handle a file-like object.
Called by `get()` when provided a file-like. This function will prefer the
backend's `handle_fobj()` if one is provided. Otherwise, it will write the
data to a temporary file and call `handle_path()`.
"""
if not is_binary(f):
raise AssertionError('File must be opened in binary mode.')
if callable(getattr(backend, 'handle_fobj', None)):
# Prefer handle_fobj() if present.
LOGGER.debug("using handle_fobj")
return backend.handle_fobj(f)
elif callable(getattr(backend, 'handle_path', None)):
# Fallback to handle_path(). Warn user since this is potentially
# expensive.
LOGGER.debug("using handle_path")
LOGGER.warning(
"Using disk, %r backend does not provide `handle_fobj()`", backend)
ext = ''
if 'ext' in kwargs:
ext = '.' + kwargs['ext']
with fobj_to_tempfile(f, suffix=ext) as fname:
return backend.handle_path(fname, **kwargs)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend.__name__)
|
[
"def",
"handle_fobj",
"(",
"backend",
",",
"f",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"is_binary",
"(",
"f",
")",
":",
"raise",
"AssertionError",
"(",
"'File must be opened in binary mode.'",
")",
"if",
"callable",
"(",
"getattr",
"(",
"backend",
",",
"'handle_fobj'",
",",
"None",
")",
")",
":",
"# Prefer handle_fobj() if present.",
"LOGGER",
".",
"debug",
"(",
"\"using handle_fobj\"",
")",
"return",
"backend",
".",
"handle_fobj",
"(",
"f",
")",
"elif",
"callable",
"(",
"getattr",
"(",
"backend",
",",
"'handle_path'",
",",
"None",
")",
")",
":",
"# Fallback to handle_path(). Warn user since this is potentially",
"# expensive.",
"LOGGER",
".",
"debug",
"(",
"\"using handle_path\"",
")",
"LOGGER",
".",
"warning",
"(",
"\"Using disk, %r backend does not provide `handle_fobj()`\"",
",",
"backend",
")",
"ext",
"=",
"''",
"if",
"'ext'",
"in",
"kwargs",
":",
"ext",
"=",
"'.'",
"+",
"kwargs",
"[",
"'ext'",
"]",
"with",
"fobj_to_tempfile",
"(",
"f",
",",
"suffix",
"=",
"ext",
")",
"as",
"fname",
":",
"return",
"backend",
".",
"handle_path",
"(",
"fname",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"AssertionError",
"(",
"'Backend %s has no _get functions'",
"%",
"backend",
".",
"__name__",
")"
] |
Handle a file-like object.
Called by `get()` when provided a file-like. This function will prefer the
backend's `handle_fobj()` if one is provided. Otherwise, it will write the
data to a temporary file and call `handle_path()`.
|
[
"Handle",
"a",
"file",
"-",
"like",
"object",
"."
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L390-L421
|
12,235
|
btimby/fulltext
|
fulltext/__init__.py
|
backend_from_mime
|
def backend_from_mime(mime):
"""Determine backend module object from a mime string."""
try:
mod_name = MIMETYPE_TO_BACKENDS[mime]
except KeyError:
msg = "No handler for %r, defaulting to %r" % (mime, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
mod = import_mod(mod_name)
return mod
|
python
|
def backend_from_mime(mime):
"""Determine backend module object from a mime string."""
try:
mod_name = MIMETYPE_TO_BACKENDS[mime]
except KeyError:
msg = "No handler for %r, defaulting to %r" % (mime, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
mod = import_mod(mod_name)
return mod
|
[
"def",
"backend_from_mime",
"(",
"mime",
")",
":",
"try",
":",
"mod_name",
"=",
"MIMETYPE_TO_BACKENDS",
"[",
"mime",
"]",
"except",
"KeyError",
":",
"msg",
"=",
"\"No handler for %r, defaulting to %r\"",
"%",
"(",
"mime",
",",
"DEFAULT_MIME",
")",
"if",
"'FULLTEXT_TESTING'",
"in",
"os",
".",
"environ",
":",
"warn",
"(",
"msg",
")",
"else",
":",
"LOGGER",
".",
"debug",
"(",
"msg",
")",
"mod_name",
"=",
"MIMETYPE_TO_BACKENDS",
"[",
"DEFAULT_MIME",
"]",
"mod",
"=",
"import_mod",
"(",
"mod_name",
")",
"return",
"mod"
] |
Determine backend module object from a mime string.
|
[
"Determine",
"backend",
"module",
"object",
"from",
"a",
"mime",
"string",
"."
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L428-L442
|
12,236
|
btimby/fulltext
|
fulltext/__init__.py
|
backend_from_fname
|
def backend_from_fname(name):
"""Determine backend module object from a file name."""
ext = splitext(name)[1]
try:
mime = EXTS_TO_MIMETYPES[ext]
except KeyError:
try:
f = open(name, 'rb')
except IOError as e:
# The file may not exist, we are being asked to determine it's type
# from it's name. Other errors are unexpected.
if e.errno != errno.ENOENT:
raise
# We will have to fall back upon the default backend.
msg = "No handler for %r, defaulting to %r" % (ext, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
else:
with f:
return backend_from_fobj(f)
else:
mod_name = MIMETYPE_TO_BACKENDS[mime]
mod = import_mod(mod_name)
return mod
|
python
|
def backend_from_fname(name):
"""Determine backend module object from a file name."""
ext = splitext(name)[1]
try:
mime = EXTS_TO_MIMETYPES[ext]
except KeyError:
try:
f = open(name, 'rb')
except IOError as e:
# The file may not exist, we are being asked to determine it's type
# from it's name. Other errors are unexpected.
if e.errno != errno.ENOENT:
raise
# We will have to fall back upon the default backend.
msg = "No handler for %r, defaulting to %r" % (ext, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
else:
with f:
return backend_from_fobj(f)
else:
mod_name = MIMETYPE_TO_BACKENDS[mime]
mod = import_mod(mod_name)
return mod
|
[
"def",
"backend_from_fname",
"(",
"name",
")",
":",
"ext",
"=",
"splitext",
"(",
"name",
")",
"[",
"1",
"]",
"try",
":",
"mime",
"=",
"EXTS_TO_MIMETYPES",
"[",
"ext",
"]",
"except",
"KeyError",
":",
"try",
":",
"f",
"=",
"open",
"(",
"name",
",",
"'rb'",
")",
"except",
"IOError",
"as",
"e",
":",
"# The file may not exist, we are being asked to determine it's type",
"# from it's name. Other errors are unexpected.",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"ENOENT",
":",
"raise",
"# We will have to fall back upon the default backend.",
"msg",
"=",
"\"No handler for %r, defaulting to %r\"",
"%",
"(",
"ext",
",",
"DEFAULT_MIME",
")",
"if",
"'FULLTEXT_TESTING'",
"in",
"os",
".",
"environ",
":",
"warn",
"(",
"msg",
")",
"else",
":",
"LOGGER",
".",
"debug",
"(",
"msg",
")",
"mod_name",
"=",
"MIMETYPE_TO_BACKENDS",
"[",
"DEFAULT_MIME",
"]",
"else",
":",
"with",
"f",
":",
"return",
"backend_from_fobj",
"(",
"f",
")",
"else",
":",
"mod_name",
"=",
"MIMETYPE_TO_BACKENDS",
"[",
"mime",
"]",
"mod",
"=",
"import_mod",
"(",
"mod_name",
")",
"return",
"mod"
] |
Determine backend module object from a file name.
|
[
"Determine",
"backend",
"module",
"object",
"from",
"a",
"file",
"name",
"."
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L445-L479
|
12,237
|
btimby/fulltext
|
fulltext/__init__.py
|
backend_from_fobj
|
def backend_from_fobj(f):
"""Determine backend module object from a file object."""
if magic is None:
warn("magic lib is not installed; assuming mime type %r" % (
DEFAULT_MIME))
return backend_from_mime(DEFAULT_MIME)
else:
offset = f.tell()
try:
f.seek(0)
chunk = f.read(MAGIC_BUFFER_SIZE)
mime = magic.from_buffer(chunk, mime=True)
return backend_from_mime(mime)
finally:
f.seek(offset)
|
python
|
def backend_from_fobj(f):
"""Determine backend module object from a file object."""
if magic is None:
warn("magic lib is not installed; assuming mime type %r" % (
DEFAULT_MIME))
return backend_from_mime(DEFAULT_MIME)
else:
offset = f.tell()
try:
f.seek(0)
chunk = f.read(MAGIC_BUFFER_SIZE)
mime = magic.from_buffer(chunk, mime=True)
return backend_from_mime(mime)
finally:
f.seek(offset)
|
[
"def",
"backend_from_fobj",
"(",
"f",
")",
":",
"if",
"magic",
"is",
"None",
":",
"warn",
"(",
"\"magic lib is not installed; assuming mime type %r\"",
"%",
"(",
"DEFAULT_MIME",
")",
")",
"return",
"backend_from_mime",
"(",
"DEFAULT_MIME",
")",
"else",
":",
"offset",
"=",
"f",
".",
"tell",
"(",
")",
"try",
":",
"f",
".",
"seek",
"(",
"0",
")",
"chunk",
"=",
"f",
".",
"read",
"(",
"MAGIC_BUFFER_SIZE",
")",
"mime",
"=",
"magic",
".",
"from_buffer",
"(",
"chunk",
",",
"mime",
"=",
"True",
")",
"return",
"backend_from_mime",
"(",
"mime",
")",
"finally",
":",
"f",
".",
"seek",
"(",
"offset",
")"
] |
Determine backend module object from a file object.
|
[
"Determine",
"backend",
"module",
"object",
"from",
"a",
"file",
"object",
"."
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L482-L496
|
12,238
|
btimby/fulltext
|
fulltext/__init__.py
|
backend_inst_from_mod
|
def backend_inst_from_mod(mod, encoding, encoding_errors, kwargs):
"""Given a mod and a set of opts return an instantiated
Backend class.
"""
kw = dict(encoding=encoding, encoding_errors=encoding_errors,
kwargs=kwargs)
try:
klass = getattr(mod, "Backend")
except AttributeError:
raise AttributeError("%r mod does not define any backend class" % mod)
inst = klass(**kw)
try:
inst.check(title=False)
except Exception as err:
bin_mod = "fulltext.backends.__bin"
warn("can't use %r due to %r; use %r backend instead" % (
mod, str(err), bin_mod))
inst = import_mod(bin_mod).Backend(**kw)
inst.check(title=False)
LOGGER.debug("using %r" % inst)
return inst
|
python
|
def backend_inst_from_mod(mod, encoding, encoding_errors, kwargs):
"""Given a mod and a set of opts return an instantiated
Backend class.
"""
kw = dict(encoding=encoding, encoding_errors=encoding_errors,
kwargs=kwargs)
try:
klass = getattr(mod, "Backend")
except AttributeError:
raise AttributeError("%r mod does not define any backend class" % mod)
inst = klass(**kw)
try:
inst.check(title=False)
except Exception as err:
bin_mod = "fulltext.backends.__bin"
warn("can't use %r due to %r; use %r backend instead" % (
mod, str(err), bin_mod))
inst = import_mod(bin_mod).Backend(**kw)
inst.check(title=False)
LOGGER.debug("using %r" % inst)
return inst
|
[
"def",
"backend_inst_from_mod",
"(",
"mod",
",",
"encoding",
",",
"encoding_errors",
",",
"kwargs",
")",
":",
"kw",
"=",
"dict",
"(",
"encoding",
"=",
"encoding",
",",
"encoding_errors",
"=",
"encoding_errors",
",",
"kwargs",
"=",
"kwargs",
")",
"try",
":",
"klass",
"=",
"getattr",
"(",
"mod",
",",
"\"Backend\"",
")",
"except",
"AttributeError",
":",
"raise",
"AttributeError",
"(",
"\"%r mod does not define any backend class\"",
"%",
"mod",
")",
"inst",
"=",
"klass",
"(",
"*",
"*",
"kw",
")",
"try",
":",
"inst",
".",
"check",
"(",
"title",
"=",
"False",
")",
"except",
"Exception",
"as",
"err",
":",
"bin_mod",
"=",
"\"fulltext.backends.__bin\"",
"warn",
"(",
"\"can't use %r due to %r; use %r backend instead\"",
"%",
"(",
"mod",
",",
"str",
"(",
"err",
")",
",",
"bin_mod",
")",
")",
"inst",
"=",
"import_mod",
"(",
"bin_mod",
")",
".",
"Backend",
"(",
"*",
"*",
"kw",
")",
"inst",
".",
"check",
"(",
"title",
"=",
"False",
")",
"LOGGER",
".",
"debug",
"(",
"\"using %r\"",
"%",
"inst",
")",
"return",
"inst"
] |
Given a mod and a set of opts return an instantiated
Backend class.
|
[
"Given",
"a",
"mod",
"and",
"a",
"set",
"of",
"opts",
"return",
"an",
"instantiated",
"Backend",
"class",
"."
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L499-L519
|
12,239
|
btimby/fulltext
|
fulltext/__init__.py
|
get
|
def get(path_or_file, default=SENTINAL, mime=None, name=None, backend=None,
encoding=None, encoding_errors=None, kwargs=None,
_wtitle=False):
"""
Get document full text.
Accepts a path or file-like object.
* If given, `default` is returned instead of an error.
* `backend` is either a module object or a string specifying which
default backend to use (e.g. "doc"); take a look at backends
directory to see a list of default backends.
* `mime` and `name` should be passed if the information
is available to caller, otherwise a best guess is made.
If both are specified `mime` takes precedence.
* `encoding` and `encoding_errors` are used to handle text encoding.
They are taken into consideration mostly only by pure-python
backends which do not rely on CLI tools.
Default to "utf8" and "strict" respectively.
* `kwargs` are passed to the underlying backend.
"""
try:
text, title = _get(
path_or_file, default=default, mime=mime, name=name,
backend=backend, kwargs=kwargs, encoding=encoding,
encoding_errors=encoding_errors, _wtitle=_wtitle)
if _wtitle:
return (text, title)
else:
return text
except Exception as e:
if default is not SENTINAL:
LOGGER.exception(e)
return default
raise
|
python
|
def get(path_or_file, default=SENTINAL, mime=None, name=None, backend=None,
encoding=None, encoding_errors=None, kwargs=None,
_wtitle=False):
"""
Get document full text.
Accepts a path or file-like object.
* If given, `default` is returned instead of an error.
* `backend` is either a module object or a string specifying which
default backend to use (e.g. "doc"); take a look at backends
directory to see a list of default backends.
* `mime` and `name` should be passed if the information
is available to caller, otherwise a best guess is made.
If both are specified `mime` takes precedence.
* `encoding` and `encoding_errors` are used to handle text encoding.
They are taken into consideration mostly only by pure-python
backends which do not rely on CLI tools.
Default to "utf8" and "strict" respectively.
* `kwargs` are passed to the underlying backend.
"""
try:
text, title = _get(
path_or_file, default=default, mime=mime, name=name,
backend=backend, kwargs=kwargs, encoding=encoding,
encoding_errors=encoding_errors, _wtitle=_wtitle)
if _wtitle:
return (text, title)
else:
return text
except Exception as e:
if default is not SENTINAL:
LOGGER.exception(e)
return default
raise
|
[
"def",
"get",
"(",
"path_or_file",
",",
"default",
"=",
"SENTINAL",
",",
"mime",
"=",
"None",
",",
"name",
"=",
"None",
",",
"backend",
"=",
"None",
",",
"encoding",
"=",
"None",
",",
"encoding_errors",
"=",
"None",
",",
"kwargs",
"=",
"None",
",",
"_wtitle",
"=",
"False",
")",
":",
"try",
":",
"text",
",",
"title",
"=",
"_get",
"(",
"path_or_file",
",",
"default",
"=",
"default",
",",
"mime",
"=",
"mime",
",",
"name",
"=",
"name",
",",
"backend",
"=",
"backend",
",",
"kwargs",
"=",
"kwargs",
",",
"encoding",
"=",
"encoding",
",",
"encoding_errors",
"=",
"encoding_errors",
",",
"_wtitle",
"=",
"_wtitle",
")",
"if",
"_wtitle",
":",
"return",
"(",
"text",
",",
"title",
")",
"else",
":",
"return",
"text",
"except",
"Exception",
"as",
"e",
":",
"if",
"default",
"is",
"not",
"SENTINAL",
":",
"LOGGER",
".",
"exception",
"(",
"e",
")",
"return",
"default",
"raise"
] |
Get document full text.
Accepts a path or file-like object.
* If given, `default` is returned instead of an error.
* `backend` is either a module object or a string specifying which
default backend to use (e.g. "doc"); take a look at backends
directory to see a list of default backends.
* `mime` and `name` should be passed if the information
is available to caller, otherwise a best guess is made.
If both are specified `mime` takes precedence.
* `encoding` and `encoding_errors` are used to handle text encoding.
They are taken into consideration mostly only by pure-python
backends which do not rely on CLI tools.
Default to "utf8" and "strict" respectively.
* `kwargs` are passed to the underlying backend.
|
[
"Get",
"document",
"full",
"text",
"."
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L585-L618
|
12,240
|
btimby/fulltext
|
fulltext/util.py
|
hilite
|
def hilite(s, ok=True, bold=False):
"""Return an highlighted version of 'string'."""
if not term_supports_colors():
return s
attr = []
if ok is None: # no color
pass
elif ok: # green
attr.append('32')
else: # red
attr.append('31')
if bold:
attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), s)
|
python
|
def hilite(s, ok=True, bold=False):
"""Return an highlighted version of 'string'."""
if not term_supports_colors():
return s
attr = []
if ok is None: # no color
pass
elif ok: # green
attr.append('32')
else: # red
attr.append('31')
if bold:
attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), s)
|
[
"def",
"hilite",
"(",
"s",
",",
"ok",
"=",
"True",
",",
"bold",
"=",
"False",
")",
":",
"if",
"not",
"term_supports_colors",
"(",
")",
":",
"return",
"s",
"attr",
"=",
"[",
"]",
"if",
"ok",
"is",
"None",
":",
"# no color",
"pass",
"elif",
"ok",
":",
"# green",
"attr",
".",
"append",
"(",
"'32'",
")",
"else",
":",
"# red",
"attr",
".",
"append",
"(",
"'31'",
")",
"if",
"bold",
":",
"attr",
".",
"append",
"(",
"'1'",
")",
"return",
"'\\x1b[%sm%s\\x1b[0m'",
"%",
"(",
"';'",
".",
"join",
"(",
"attr",
")",
",",
"s",
")"
] |
Return an highlighted version of 'string'.
|
[
"Return",
"an",
"highlighted",
"version",
"of",
"string",
"."
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/util.py#L254-L267
|
12,241
|
btimby/fulltext
|
fulltext/util.py
|
fobj_to_tempfile
|
def fobj_to_tempfile(f, suffix=''):
"""Context manager which copies a file object to disk and return its
name. When done the file is deleted.
"""
with tempfile.NamedTemporaryFile(
dir=TEMPDIR, suffix=suffix, delete=False) as t:
shutil.copyfileobj(f, t)
try:
yield t.name
finally:
os.remove(t.name)
|
python
|
def fobj_to_tempfile(f, suffix=''):
"""Context manager which copies a file object to disk and return its
name. When done the file is deleted.
"""
with tempfile.NamedTemporaryFile(
dir=TEMPDIR, suffix=suffix, delete=False) as t:
shutil.copyfileobj(f, t)
try:
yield t.name
finally:
os.remove(t.name)
|
[
"def",
"fobj_to_tempfile",
"(",
"f",
",",
"suffix",
"=",
"''",
")",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"dir",
"=",
"TEMPDIR",
",",
"suffix",
"=",
"suffix",
",",
"delete",
"=",
"False",
")",
"as",
"t",
":",
"shutil",
".",
"copyfileobj",
"(",
"f",
",",
"t",
")",
"try",
":",
"yield",
"t",
".",
"name",
"finally",
":",
"os",
".",
"remove",
"(",
"t",
".",
"name",
")"
] |
Context manager which copies a file object to disk and return its
name. When done the file is deleted.
|
[
"Context",
"manager",
"which",
"copies",
"a",
"file",
"object",
"to",
"disk",
"and",
"return",
"its",
"name",
".",
"When",
"done",
"the",
"file",
"is",
"deleted",
"."
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/util.py#L308-L318
|
12,242
|
btimby/fulltext
|
fulltext/data/winmake.py
|
rm
|
def rm(pattern):
"""Recursively remove a file or dir by pattern."""
paths = glob.glob(pattern)
for path in paths:
if path.startswith('.git/'):
continue
if os.path.isdir(path):
def onerror(fun, path, excinfo):
exc = excinfo[1]
if exc.errno != errno.ENOENT:
raise
safe_print("rmdir -f %s" % path)
shutil.rmtree(path, onerror=onerror)
else:
safe_print("rm %s" % path)
os.remove(path)
|
python
|
def rm(pattern):
"""Recursively remove a file or dir by pattern."""
paths = glob.glob(pattern)
for path in paths:
if path.startswith('.git/'):
continue
if os.path.isdir(path):
def onerror(fun, path, excinfo):
exc = excinfo[1]
if exc.errno != errno.ENOENT:
raise
safe_print("rmdir -f %s" % path)
shutil.rmtree(path, onerror=onerror)
else:
safe_print("rm %s" % path)
os.remove(path)
|
[
"def",
"rm",
"(",
"pattern",
")",
":",
"paths",
"=",
"glob",
".",
"glob",
"(",
"pattern",
")",
"for",
"path",
"in",
"paths",
":",
"if",
"path",
".",
"startswith",
"(",
"'.git/'",
")",
":",
"continue",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"def",
"onerror",
"(",
"fun",
",",
"path",
",",
"excinfo",
")",
":",
"exc",
"=",
"excinfo",
"[",
"1",
"]",
"if",
"exc",
".",
"errno",
"!=",
"errno",
".",
"ENOENT",
":",
"raise",
"safe_print",
"(",
"\"rmdir -f %s\"",
"%",
"path",
")",
"shutil",
".",
"rmtree",
"(",
"path",
",",
"onerror",
"=",
"onerror",
")",
"else",
":",
"safe_print",
"(",
"\"rm %s\"",
"%",
"path",
")",
"os",
".",
"remove",
"(",
"path",
")"
] |
Recursively remove a file or dir by pattern.
|
[
"Recursively",
"remove",
"a",
"file",
"or",
"dir",
"by",
"pattern",
"."
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L106-L122
|
12,243
|
btimby/fulltext
|
fulltext/data/winmake.py
|
help
|
def help():
"""Print this help"""
safe_print('Run "make [-p <PYTHON>] <target>" where <target> is one of:')
for name in sorted(_cmds):
safe_print(
" %-20s %s" % (name.replace('_', '-'), _cmds[name] or ''))
sys.exit(1)
|
python
|
def help():
"""Print this help"""
safe_print('Run "make [-p <PYTHON>] <target>" where <target> is one of:')
for name in sorted(_cmds):
safe_print(
" %-20s %s" % (name.replace('_', '-'), _cmds[name] or ''))
sys.exit(1)
|
[
"def",
"help",
"(",
")",
":",
"safe_print",
"(",
"'Run \"make [-p <PYTHON>] <target>\" where <target> is one of:'",
")",
"for",
"name",
"in",
"sorted",
"(",
"_cmds",
")",
":",
"safe_print",
"(",
"\" %-20s %s\"",
"%",
"(",
"name",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
",",
"_cmds",
"[",
"name",
"]",
"or",
"''",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Print this help
|
[
"Print",
"this",
"help"
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L149-L155
|
12,244
|
btimby/fulltext
|
fulltext/data/winmake.py
|
clean
|
def clean():
"""Deletes dev files"""
rm("$testfn*")
rm("*.bak")
rm("*.core")
rm("*.egg-info")
rm("*.orig")
rm("*.pyc")
rm("*.pyd")
rm("*.pyo")
rm("*.rej")
rm("*.so")
rm("*.~")
rm("*__pycache__")
rm(".coverage")
rm(".tox")
rm(".coverage")
rm("build")
rm("dist")
rm("docs/_build")
rm("htmlcov")
rm("tmp")
rm("venv")
|
python
|
def clean():
"""Deletes dev files"""
rm("$testfn*")
rm("*.bak")
rm("*.core")
rm("*.egg-info")
rm("*.orig")
rm("*.pyc")
rm("*.pyd")
rm("*.pyo")
rm("*.rej")
rm("*.so")
rm("*.~")
rm("*__pycache__")
rm(".coverage")
rm(".tox")
rm(".coverage")
rm("build")
rm("dist")
rm("docs/_build")
rm("htmlcov")
rm("tmp")
rm("venv")
|
[
"def",
"clean",
"(",
")",
":",
"rm",
"(",
"\"$testfn*\"",
")",
"rm",
"(",
"\"*.bak\"",
")",
"rm",
"(",
"\"*.core\"",
")",
"rm",
"(",
"\"*.egg-info\"",
")",
"rm",
"(",
"\"*.orig\"",
")",
"rm",
"(",
"\"*.pyc\"",
")",
"rm",
"(",
"\"*.pyd\"",
")",
"rm",
"(",
"\"*.pyo\"",
")",
"rm",
"(",
"\"*.rej\"",
")",
"rm",
"(",
"\"*.so\"",
")",
"rm",
"(",
"\"*.~\"",
")",
"rm",
"(",
"\"*__pycache__\"",
")",
"rm",
"(",
"\".coverage\"",
")",
"rm",
"(",
"\".tox\"",
")",
"rm",
"(",
"\".coverage\"",
")",
"rm",
"(",
"\"build\"",
")",
"rm",
"(",
"\"dist\"",
")",
"rm",
"(",
"\"docs/_build\"",
")",
"rm",
"(",
"\"htmlcov\"",
")",
"rm",
"(",
"\"tmp\"",
")",
"rm",
"(",
"\"venv\"",
")"
] |
Deletes dev files
|
[
"Deletes",
"dev",
"files"
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L200-L222
|
12,245
|
btimby/fulltext
|
fulltext/data/winmake.py
|
lint
|
def lint():
"""Run flake8 against all py files"""
py_files = subprocess.check_output("git ls-files")
if PY3:
py_files = py_files.decode()
py_files = [x for x in py_files.split() if x.endswith('.py')]
py_files = ' '.join(py_files)
sh("%s -m flake8 %s" % (PYTHON, py_files), nolog=True)
|
python
|
def lint():
"""Run flake8 against all py files"""
py_files = subprocess.check_output("git ls-files")
if PY3:
py_files = py_files.decode()
py_files = [x for x in py_files.split() if x.endswith('.py')]
py_files = ' '.join(py_files)
sh("%s -m flake8 %s" % (PYTHON, py_files), nolog=True)
|
[
"def",
"lint",
"(",
")",
":",
"py_files",
"=",
"subprocess",
".",
"check_output",
"(",
"\"git ls-files\"",
")",
"if",
"PY3",
":",
"py_files",
"=",
"py_files",
".",
"decode",
"(",
")",
"py_files",
"=",
"[",
"x",
"for",
"x",
"in",
"py_files",
".",
"split",
"(",
")",
"if",
"x",
".",
"endswith",
"(",
"'.py'",
")",
"]",
"py_files",
"=",
"' '",
".",
"join",
"(",
"py_files",
")",
"sh",
"(",
"\"%s -m flake8 %s\"",
"%",
"(",
"PYTHON",
",",
"py_files",
")",
",",
"nolog",
"=",
"True",
")"
] |
Run flake8 against all py files
|
[
"Run",
"flake8",
"against",
"all",
"py",
"files"
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L234-L241
|
12,246
|
btimby/fulltext
|
fulltext/data/winmake.py
|
coverage
|
def coverage():
"""Run coverage tests."""
# Note: coverage options are controlled by .coveragerc file
install()
test_setup()
sh("%s -m coverage run %s" % (PYTHON, TEST_SCRIPT))
sh("%s -m coverage report" % PYTHON)
sh("%s -m coverage html" % PYTHON)
sh("%s -m webbrowser -t htmlcov/index.html" % PYTHON)
|
python
|
def coverage():
"""Run coverage tests."""
# Note: coverage options are controlled by .coveragerc file
install()
test_setup()
sh("%s -m coverage run %s" % (PYTHON, TEST_SCRIPT))
sh("%s -m coverage report" % PYTHON)
sh("%s -m coverage html" % PYTHON)
sh("%s -m webbrowser -t htmlcov/index.html" % PYTHON)
|
[
"def",
"coverage",
"(",
")",
":",
"# Note: coverage options are controlled by .coveragerc file",
"install",
"(",
")",
"test_setup",
"(",
")",
"sh",
"(",
"\"%s -m coverage run %s\"",
"%",
"(",
"PYTHON",
",",
"TEST_SCRIPT",
")",
")",
"sh",
"(",
"\"%s -m coverage report\"",
"%",
"PYTHON",
")",
"sh",
"(",
"\"%s -m coverage html\"",
"%",
"PYTHON",
")",
"sh",
"(",
"\"%s -m webbrowser -t htmlcov/index.html\"",
"%",
"PYTHON",
")"
] |
Run coverage tests.
|
[
"Run",
"coverage",
"tests",
"."
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L261-L269
|
12,247
|
btimby/fulltext
|
fulltext/data/winmake.py
|
venv
|
def venv():
"""Install venv + deps."""
try:
import virtualenv # NOQA
except ImportError:
sh("%s -m pip install virtualenv" % PYTHON)
if not os.path.isdir("venv"):
sh("%s -m virtualenv venv" % PYTHON)
sh("venv\\Scripts\\pip install -r %s" % (REQUIREMENTS_TXT))
|
python
|
def venv():
"""Install venv + deps."""
try:
import virtualenv # NOQA
except ImportError:
sh("%s -m pip install virtualenv" % PYTHON)
if not os.path.isdir("venv"):
sh("%s -m virtualenv venv" % PYTHON)
sh("venv\\Scripts\\pip install -r %s" % (REQUIREMENTS_TXT))
|
[
"def",
"venv",
"(",
")",
":",
"try",
":",
"import",
"virtualenv",
"# NOQA",
"except",
"ImportError",
":",
"sh",
"(",
"\"%s -m pip install virtualenv\"",
"%",
"PYTHON",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"\"venv\"",
")",
":",
"sh",
"(",
"\"%s -m virtualenv venv\"",
"%",
"PYTHON",
")",
"sh",
"(",
"\"venv\\\\Scripts\\\\pip install -r %s\"",
"%",
"(",
"REQUIREMENTS_TXT",
")",
")"
] |
Install venv + deps.
|
[
"Install",
"venv",
"+",
"deps",
"."
] |
9234cc1e2099209430e20317649549026de283ce
|
https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L311-L319
|
12,248
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/kdbx4.py
|
compute_header_hmac_hash
|
def compute_header_hmac_hash(context):
"""Compute HMAC-SHA256 hash of header.
Used to prevent header tampering."""
return hmac.new(
hashlib.sha512(
b'\xff' * 8 +
hashlib.sha512(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key +
b'\x01'
).digest()
).digest(),
context._.header.data,
hashlib.sha256
).digest()
|
python
|
def compute_header_hmac_hash(context):
"""Compute HMAC-SHA256 hash of header.
Used to prevent header tampering."""
return hmac.new(
hashlib.sha512(
b'\xff' * 8 +
hashlib.sha512(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key +
b'\x01'
).digest()
).digest(),
context._.header.data,
hashlib.sha256
).digest()
|
[
"def",
"compute_header_hmac_hash",
"(",
"context",
")",
":",
"return",
"hmac",
".",
"new",
"(",
"hashlib",
".",
"sha512",
"(",
"b'\\xff'",
"*",
"8",
"+",
"hashlib",
".",
"sha512",
"(",
"context",
".",
"_",
".",
"header",
".",
"value",
".",
"dynamic_header",
".",
"master_seed",
".",
"data",
"+",
"context",
".",
"transformed_key",
"+",
"b'\\x01'",
")",
".",
"digest",
"(",
")",
")",
".",
"digest",
"(",
")",
",",
"context",
".",
"_",
".",
"header",
".",
"data",
",",
"hashlib",
".",
"sha256",
")",
".",
"digest",
"(",
")"
] |
Compute HMAC-SHA256 hash of header.
Used to prevent header tampering.
|
[
"Compute",
"HMAC",
"-",
"SHA256",
"hash",
"of",
"header",
".",
"Used",
"to",
"prevent",
"header",
"tampering",
"."
] |
85da3630d6e410b2a10d3e711cd69308b51d401d
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/kdbx4.py#L64-L79
|
12,249
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/kdbx4.py
|
compute_payload_block_hash
|
def compute_payload_block_hash(this):
"""Compute hash of each payload block.
Used to prevent payload corruption and tampering."""
return hmac.new(
hashlib.sha512(
struct.pack('<Q', this._index) +
hashlib.sha512(
this._._.header.value.dynamic_header.master_seed.data +
this._.transformed_key + b'\x01'
).digest()
).digest(),
struct.pack('<Q', this._index) +
struct.pack('<I', len(this.block_data)) +
this.block_data, hashlib.sha256
).digest()
|
python
|
def compute_payload_block_hash(this):
"""Compute hash of each payload block.
Used to prevent payload corruption and tampering."""
return hmac.new(
hashlib.sha512(
struct.pack('<Q', this._index) +
hashlib.sha512(
this._._.header.value.dynamic_header.master_seed.data +
this._.transformed_key + b'\x01'
).digest()
).digest(),
struct.pack('<Q', this._index) +
struct.pack('<I', len(this.block_data)) +
this.block_data, hashlib.sha256
).digest()
|
[
"def",
"compute_payload_block_hash",
"(",
"this",
")",
":",
"return",
"hmac",
".",
"new",
"(",
"hashlib",
".",
"sha512",
"(",
"struct",
".",
"pack",
"(",
"'<Q'",
",",
"this",
".",
"_index",
")",
"+",
"hashlib",
".",
"sha512",
"(",
"this",
".",
"_",
".",
"_",
".",
"header",
".",
"value",
".",
"dynamic_header",
".",
"master_seed",
".",
"data",
"+",
"this",
".",
"_",
".",
"transformed_key",
"+",
"b'\\x01'",
")",
".",
"digest",
"(",
")",
")",
".",
"digest",
"(",
")",
",",
"struct",
".",
"pack",
"(",
"'<Q'",
",",
"this",
".",
"_index",
")",
"+",
"struct",
".",
"pack",
"(",
"'<I'",
",",
"len",
"(",
"this",
".",
"block_data",
")",
")",
"+",
"this",
".",
"block_data",
",",
"hashlib",
".",
"sha256",
")",
".",
"digest",
"(",
")"
] |
Compute hash of each payload block.
Used to prevent payload corruption and tampering.
|
[
"Compute",
"hash",
"of",
"each",
"payload",
"block",
".",
"Used",
"to",
"prevent",
"payload",
"corruption",
"and",
"tampering",
"."
] |
85da3630d6e410b2a10d3e711cd69308b51d401d
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/kdbx4.py#L156-L171
|
12,250
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/pytwofish.py
|
Twofish.decrypt
|
def decrypt(self, block):
"""Decrypt blocks."""
if len(block) % 16:
raise ValueError("block size must be a multiple of 16")
plaintext = b''
while block:
a, b, c, d = struct.unpack("<4L", block[:16])
temp = [a, b, c, d]
decrypt(self.context, temp)
plaintext += struct.pack("<4L", *temp)
block = block[16:]
return plaintext
|
python
|
def decrypt(self, block):
"""Decrypt blocks."""
if len(block) % 16:
raise ValueError("block size must be a multiple of 16")
plaintext = b''
while block:
a, b, c, d = struct.unpack("<4L", block[:16])
temp = [a, b, c, d]
decrypt(self.context, temp)
plaintext += struct.pack("<4L", *temp)
block = block[16:]
return plaintext
|
[
"def",
"decrypt",
"(",
"self",
",",
"block",
")",
":",
"if",
"len",
"(",
"block",
")",
"%",
"16",
":",
"raise",
"ValueError",
"(",
"\"block size must be a multiple of 16\"",
")",
"plaintext",
"=",
"b''",
"while",
"block",
":",
"a",
",",
"b",
",",
"c",
",",
"d",
"=",
"struct",
".",
"unpack",
"(",
"\"<4L\"",
",",
"block",
"[",
":",
"16",
"]",
")",
"temp",
"=",
"[",
"a",
",",
"b",
",",
"c",
",",
"d",
"]",
"decrypt",
"(",
"self",
".",
"context",
",",
"temp",
")",
"plaintext",
"+=",
"struct",
".",
"pack",
"(",
"\"<4L\"",
",",
"*",
"temp",
")",
"block",
"=",
"block",
"[",
"16",
":",
"]",
"return",
"plaintext"
] |
Decrypt blocks.
|
[
"Decrypt",
"blocks",
"."
] |
85da3630d6e410b2a10d3e711cd69308b51d401d
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/pytwofish.py#L81-L96
|
12,251
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/pytwofish.py
|
Twofish.encrypt
|
def encrypt(self, block):
"""Encrypt blocks."""
if len(block) % 16:
raise ValueError("block size must be a multiple of 16")
ciphertext = b''
while block:
a, b, c, d = struct.unpack("<4L", block[0:16])
temp = [a, b, c, d]
encrypt(self.context, temp)
ciphertext += struct.pack("<4L", *temp)
block = block[16:]
return ciphertext
|
python
|
def encrypt(self, block):
"""Encrypt blocks."""
if len(block) % 16:
raise ValueError("block size must be a multiple of 16")
ciphertext = b''
while block:
a, b, c, d = struct.unpack("<4L", block[0:16])
temp = [a, b, c, d]
encrypt(self.context, temp)
ciphertext += struct.pack("<4L", *temp)
block = block[16:]
return ciphertext
|
[
"def",
"encrypt",
"(",
"self",
",",
"block",
")",
":",
"if",
"len",
"(",
"block",
")",
"%",
"16",
":",
"raise",
"ValueError",
"(",
"\"block size must be a multiple of 16\"",
")",
"ciphertext",
"=",
"b''",
"while",
"block",
":",
"a",
",",
"b",
",",
"c",
",",
"d",
"=",
"struct",
".",
"unpack",
"(",
"\"<4L\"",
",",
"block",
"[",
"0",
":",
"16",
"]",
")",
"temp",
"=",
"[",
"a",
",",
"b",
",",
"c",
",",
"d",
"]",
"encrypt",
"(",
"self",
".",
"context",
",",
"temp",
")",
"ciphertext",
"+=",
"struct",
".",
"pack",
"(",
"\"<4L\"",
",",
"*",
"temp",
")",
"block",
"=",
"block",
"[",
"16",
":",
"]",
"return",
"ciphertext"
] |
Encrypt blocks.
|
[
"Encrypt",
"blocks",
"."
] |
85da3630d6e410b2a10d3e711cd69308b51d401d
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/pytwofish.py#L99-L114
|
12,252
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/common.py
|
aes_kdf
|
def aes_kdf(key, rounds, password=None, keyfile=None):
"""Set up a context for AES128-ECB encryption to find transformed_key"""
cipher = AES.new(key, AES.MODE_ECB)
key_composite = compute_key_composite(
password=password,
keyfile=keyfile
)
# get the number of rounds from the header and transform the key_composite
transformed_key = key_composite
for _ in range(0, rounds):
transformed_key = cipher.encrypt(transformed_key)
return hashlib.sha256(transformed_key).digest()
|
python
|
def aes_kdf(key, rounds, password=None, keyfile=None):
"""Set up a context for AES128-ECB encryption to find transformed_key"""
cipher = AES.new(key, AES.MODE_ECB)
key_composite = compute_key_composite(
password=password,
keyfile=keyfile
)
# get the number of rounds from the header and transform the key_composite
transformed_key = key_composite
for _ in range(0, rounds):
transformed_key = cipher.encrypt(transformed_key)
return hashlib.sha256(transformed_key).digest()
|
[
"def",
"aes_kdf",
"(",
"key",
",",
"rounds",
",",
"password",
"=",
"None",
",",
"keyfile",
"=",
"None",
")",
":",
"cipher",
"=",
"AES",
".",
"new",
"(",
"key",
",",
"AES",
".",
"MODE_ECB",
")",
"key_composite",
"=",
"compute_key_composite",
"(",
"password",
"=",
"password",
",",
"keyfile",
"=",
"keyfile",
")",
"# get the number of rounds from the header and transform the key_composite",
"transformed_key",
"=",
"key_composite",
"for",
"_",
"in",
"range",
"(",
"0",
",",
"rounds",
")",
":",
"transformed_key",
"=",
"cipher",
".",
"encrypt",
"(",
"transformed_key",
")",
"return",
"hashlib",
".",
"sha256",
"(",
"transformed_key",
")",
".",
"digest",
"(",
")"
] |
Set up a context for AES128-ECB encryption to find transformed_key
|
[
"Set",
"up",
"a",
"context",
"for",
"AES128",
"-",
"ECB",
"encryption",
"to",
"find",
"transformed_key"
] |
85da3630d6e410b2a10d3e711cd69308b51d401d
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/common.py#L84-L98
|
12,253
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/common.py
|
compute_key_composite
|
def compute_key_composite(password=None, keyfile=None):
"""Compute composite key.
Used in header verification and payload decryption."""
# hash the password
if password:
password_composite = hashlib.sha256(password.encode('utf-8')).digest()
else:
password_composite = b''
# hash the keyfile
if keyfile:
# try to read XML keyfile
try:
with open(keyfile, 'r') as f:
tree = etree.parse(f).getroot()
keyfile_composite = base64.b64decode(tree.find('Key/Data').text)
# otherwise, try to read plain keyfile
except (etree.XMLSyntaxError, UnicodeDecodeError):
try:
with open(keyfile, 'rb') as f:
key = f.read()
try:
int(key, 16)
is_hex = True
except ValueError:
is_hex = False
# if the length is 32 bytes we assume it is the key
if len(key) == 32:
keyfile_composite = key
# if the length is 64 bytes we assume the key is hex encoded
elif len(key) == 64 and is_hex:
keyfile_composite = codecs.decode(key, 'hex')
# anything else may be a file to hash for the key
else:
keyfile_composite = hashlib.sha256(key).digest()
except:
raise IOError('Could not read keyfile')
else:
keyfile_composite = b''
# create composite key from password and keyfile composites
return hashlib.sha256(password_composite + keyfile_composite).digest()
|
python
|
def compute_key_composite(password=None, keyfile=None):
"""Compute composite key.
Used in header verification and payload decryption."""
# hash the password
if password:
password_composite = hashlib.sha256(password.encode('utf-8')).digest()
else:
password_composite = b''
# hash the keyfile
if keyfile:
# try to read XML keyfile
try:
with open(keyfile, 'r') as f:
tree = etree.parse(f).getroot()
keyfile_composite = base64.b64decode(tree.find('Key/Data').text)
# otherwise, try to read plain keyfile
except (etree.XMLSyntaxError, UnicodeDecodeError):
try:
with open(keyfile, 'rb') as f:
key = f.read()
try:
int(key, 16)
is_hex = True
except ValueError:
is_hex = False
# if the length is 32 bytes we assume it is the key
if len(key) == 32:
keyfile_composite = key
# if the length is 64 bytes we assume the key is hex encoded
elif len(key) == 64 and is_hex:
keyfile_composite = codecs.decode(key, 'hex')
# anything else may be a file to hash for the key
else:
keyfile_composite = hashlib.sha256(key).digest()
except:
raise IOError('Could not read keyfile')
else:
keyfile_composite = b''
# create composite key from password and keyfile composites
return hashlib.sha256(password_composite + keyfile_composite).digest()
|
[
"def",
"compute_key_composite",
"(",
"password",
"=",
"None",
",",
"keyfile",
"=",
"None",
")",
":",
"# hash the password",
"if",
"password",
":",
"password_composite",
"=",
"hashlib",
".",
"sha256",
"(",
"password",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"digest",
"(",
")",
"else",
":",
"password_composite",
"=",
"b''",
"# hash the keyfile",
"if",
"keyfile",
":",
"# try to read XML keyfile",
"try",
":",
"with",
"open",
"(",
"keyfile",
",",
"'r'",
")",
"as",
"f",
":",
"tree",
"=",
"etree",
".",
"parse",
"(",
"f",
")",
".",
"getroot",
"(",
")",
"keyfile_composite",
"=",
"base64",
".",
"b64decode",
"(",
"tree",
".",
"find",
"(",
"'Key/Data'",
")",
".",
"text",
")",
"# otherwise, try to read plain keyfile",
"except",
"(",
"etree",
".",
"XMLSyntaxError",
",",
"UnicodeDecodeError",
")",
":",
"try",
":",
"with",
"open",
"(",
"keyfile",
",",
"'rb'",
")",
"as",
"f",
":",
"key",
"=",
"f",
".",
"read",
"(",
")",
"try",
":",
"int",
"(",
"key",
",",
"16",
")",
"is_hex",
"=",
"True",
"except",
"ValueError",
":",
"is_hex",
"=",
"False",
"# if the length is 32 bytes we assume it is the key",
"if",
"len",
"(",
"key",
")",
"==",
"32",
":",
"keyfile_composite",
"=",
"key",
"# if the length is 64 bytes we assume the key is hex encoded",
"elif",
"len",
"(",
"key",
")",
"==",
"64",
"and",
"is_hex",
":",
"keyfile_composite",
"=",
"codecs",
".",
"decode",
"(",
"key",
",",
"'hex'",
")",
"# anything else may be a file to hash for the key",
"else",
":",
"keyfile_composite",
"=",
"hashlib",
".",
"sha256",
"(",
"key",
")",
".",
"digest",
"(",
")",
"except",
":",
"raise",
"IOError",
"(",
"'Could not read keyfile'",
")",
"else",
":",
"keyfile_composite",
"=",
"b''",
"# create composite key from password and keyfile composites",
"return",
"hashlib",
".",
"sha256",
"(",
"password_composite",
"+",
"keyfile_composite",
")",
".",
"digest",
"(",
")"
] |
Compute composite key.
Used in header verification and payload decryption.
|
[
"Compute",
"composite",
"key",
".",
"Used",
"in",
"header",
"verification",
"and",
"payload",
"decryption",
"."
] |
85da3630d6e410b2a10d3e711cd69308b51d401d
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/common.py#L101-L144
|
12,254
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/common.py
|
compute_master
|
def compute_master(context):
"""Computes master key from transformed key and master seed.
Used in payload decryption."""
# combine the transformed key with the header master seed to find the master_key
master_key = hashlib.sha256(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key).digest()
return master_key
|
python
|
def compute_master(context):
"""Computes master key from transformed key and master seed.
Used in payload decryption."""
# combine the transformed key with the header master seed to find the master_key
master_key = hashlib.sha256(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key).digest()
return master_key
|
[
"def",
"compute_master",
"(",
"context",
")",
":",
"# combine the transformed key with the header master seed to find the master_key",
"master_key",
"=",
"hashlib",
".",
"sha256",
"(",
"context",
".",
"_",
".",
"header",
".",
"value",
".",
"dynamic_header",
".",
"master_seed",
".",
"data",
"+",
"context",
".",
"transformed_key",
")",
".",
"digest",
"(",
")",
"return",
"master_key"
] |
Computes master key from transformed key and master seed.
Used in payload decryption.
|
[
"Computes",
"master",
"key",
"from",
"transformed",
"key",
"and",
"master",
"seed",
".",
"Used",
"in",
"payload",
"decryption",
"."
] |
85da3630d6e410b2a10d3e711cd69308b51d401d
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/common.py#L146-L154
|
12,255
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/common.py
|
Unprotect
|
def Unprotect(protected_stream_id, protected_stream_key, subcon):
"""Select stream cipher based on protected_stream_id"""
return Switch(
protected_stream_id,
{'arcfourvariant': ARCFourVariantStream(protected_stream_key, subcon),
'salsa20': Salsa20Stream(protected_stream_key, subcon),
'chacha20': ChaCha20Stream(protected_stream_key, subcon),
},
default=subcon
)
|
python
|
def Unprotect(protected_stream_id, protected_stream_key, subcon):
"""Select stream cipher based on protected_stream_id"""
return Switch(
protected_stream_id,
{'arcfourvariant': ARCFourVariantStream(protected_stream_key, subcon),
'salsa20': Salsa20Stream(protected_stream_key, subcon),
'chacha20': ChaCha20Stream(protected_stream_key, subcon),
},
default=subcon
)
|
[
"def",
"Unprotect",
"(",
"protected_stream_id",
",",
"protected_stream_key",
",",
"subcon",
")",
":",
"return",
"Switch",
"(",
"protected_stream_id",
",",
"{",
"'arcfourvariant'",
":",
"ARCFourVariantStream",
"(",
"protected_stream_key",
",",
"subcon",
")",
",",
"'salsa20'",
":",
"Salsa20Stream",
"(",
"protected_stream_key",
",",
"subcon",
")",
",",
"'chacha20'",
":",
"ChaCha20Stream",
"(",
"protected_stream_key",
",",
"subcon",
")",
",",
"}",
",",
"default",
"=",
"subcon",
")"
] |
Select stream cipher based on protected_stream_id
|
[
"Select",
"stream",
"cipher",
"based",
"on",
"protected_stream_id"
] |
85da3630d6e410b2a10d3e711cd69308b51d401d
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/common.py#L231-L241
|
12,256
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/twofish.py
|
BlockCipher.encrypt
|
def encrypt(self,plaintext,n=''):
"""Encrypt some plaintext
plaintext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The encrypt function will encrypt the supplied plaintext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied plaintext is not a multiple of the blocksize
of the cipher, then the remaining plaintext will be cached.
The next time the encrypt function is called with some plaintext,
the new plaintext will be concatenated to the cache and then
cache+plaintext will be encrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the encrypt function will always encrypt all of the supplied
plaintext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of plaintext to be supplied at once.
Every encrypt function called on a XTS cipher will output
an encrypted block based on the current supplied plaintext block.
CMAC:
-----
Everytime the function is called, the hash from the input data is calculated.
No finalizing needed.
The hashlength is equal to block size of the used block cipher.
"""
#self.ed = 'e' if chain is encrypting, 'd' if decrypting,
# None if nothing happened with the chain yet
#assert self.ed in ('e',None)
# makes sure you don't encrypt with a cipher that has started decrypting
self.ed = 'e'
if self.mode == MODE_XTS:
# data sequence number (or 'tweak') has to be provided when in XTS mode
return self.chain.update(plaintext,'e',n)
else:
return self.chain.update(plaintext,'e')
|
python
|
def encrypt(self,plaintext,n=''):
"""Encrypt some plaintext
plaintext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The encrypt function will encrypt the supplied plaintext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied plaintext is not a multiple of the blocksize
of the cipher, then the remaining plaintext will be cached.
The next time the encrypt function is called with some plaintext,
the new plaintext will be concatenated to the cache and then
cache+plaintext will be encrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the encrypt function will always encrypt all of the supplied
plaintext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of plaintext to be supplied at once.
Every encrypt function called on a XTS cipher will output
an encrypted block based on the current supplied plaintext block.
CMAC:
-----
Everytime the function is called, the hash from the input data is calculated.
No finalizing needed.
The hashlength is equal to block size of the used block cipher.
"""
#self.ed = 'e' if chain is encrypting, 'd' if decrypting,
# None if nothing happened with the chain yet
#assert self.ed in ('e',None)
# makes sure you don't encrypt with a cipher that has started decrypting
self.ed = 'e'
if self.mode == MODE_XTS:
# data sequence number (or 'tweak') has to be provided when in XTS mode
return self.chain.update(plaintext,'e',n)
else:
return self.chain.update(plaintext,'e')
|
[
"def",
"encrypt",
"(",
"self",
",",
"plaintext",
",",
"n",
"=",
"''",
")",
":",
"#self.ed = 'e' if chain is encrypting, 'd' if decrypting,",
"# None if nothing happened with the chain yet",
"#assert self.ed in ('e',None) ",
"# makes sure you don't encrypt with a cipher that has started decrypting",
"self",
".",
"ed",
"=",
"'e'",
"if",
"self",
".",
"mode",
"==",
"MODE_XTS",
":",
"# data sequence number (or 'tweak') has to be provided when in XTS mode",
"return",
"self",
".",
"chain",
".",
"update",
"(",
"plaintext",
",",
"'e'",
",",
"n",
")",
"else",
":",
"return",
"self",
".",
"chain",
".",
"update",
"(",
"plaintext",
",",
"'e'",
")"
] |
Encrypt some plaintext
plaintext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The encrypt function will encrypt the supplied plaintext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied plaintext is not a multiple of the blocksize
of the cipher, then the remaining plaintext will be cached.
The next time the encrypt function is called with some plaintext,
the new plaintext will be concatenated to the cache and then
cache+plaintext will be encrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the encrypt function will always encrypt all of the supplied
plaintext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of plaintext to be supplied at once.
Every encrypt function called on a XTS cipher will output
an encrypted block based on the current supplied plaintext block.
CMAC:
-----
Everytime the function is called, the hash from the input data is calculated.
No finalizing needed.
The hashlength is equal to block size of the used block cipher.
|
[
"Encrypt",
"some",
"plaintext"
] |
85da3630d6e410b2a10d3e711cd69308b51d401d
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/twofish.py#L114-L159
|
12,257
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/twofish.py
|
BlockCipher.decrypt
|
def decrypt(self,ciphertext,n=''):
"""Decrypt some ciphertext
ciphertext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The decrypt function will decrypt the supplied ciphertext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied ciphertext is not a multiple of the blocksize
of the cipher, then the remaining ciphertext will be cached.
The next time the decrypt function is called with some ciphertext,
the new ciphertext will be concatenated to the cache and then
cache+ciphertext will be decrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the decrypt function will always decrypt all of the supplied
ciphertext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of ciphertext to be supplied at once.
Every decrypt function called on a XTS cipher will output
a decrypted block based on the current supplied ciphertext block.
CMAC:
-----
Mode not supported for decryption as this does not make sense.
"""
#self.ed = 'e' if chain is encrypting, 'd' if decrypting,
# None if nothing happened with the chain yet
#assert self.ed in ('d',None)
# makes sure you don't decrypt with a cipher that has started encrypting
self.ed = 'd'
if self.mode == MODE_XTS:
# data sequence number (or 'tweak') has to be provided when in XTS mode
return self.chain.update(ciphertext,'d',n)
else:
return self.chain.update(ciphertext,'d')
|
python
|
def decrypt(self,ciphertext,n=''):
"""Decrypt some ciphertext
ciphertext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The decrypt function will decrypt the supplied ciphertext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied ciphertext is not a multiple of the blocksize
of the cipher, then the remaining ciphertext will be cached.
The next time the decrypt function is called with some ciphertext,
the new ciphertext will be concatenated to the cache and then
cache+ciphertext will be decrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the decrypt function will always decrypt all of the supplied
ciphertext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of ciphertext to be supplied at once.
Every decrypt function called on a XTS cipher will output
a decrypted block based on the current supplied ciphertext block.
CMAC:
-----
Mode not supported for decryption as this does not make sense.
"""
#self.ed = 'e' if chain is encrypting, 'd' if decrypting,
# None if nothing happened with the chain yet
#assert self.ed in ('d',None)
# makes sure you don't decrypt with a cipher that has started encrypting
self.ed = 'd'
if self.mode == MODE_XTS:
# data sequence number (or 'tweak') has to be provided when in XTS mode
return self.chain.update(ciphertext,'d',n)
else:
return self.chain.update(ciphertext,'d')
|
[
"def",
"decrypt",
"(",
"self",
",",
"ciphertext",
",",
"n",
"=",
"''",
")",
":",
"#self.ed = 'e' if chain is encrypting, 'd' if decrypting,",
"# None if nothing happened with the chain yet",
"#assert self.ed in ('d',None)",
"# makes sure you don't decrypt with a cipher that has started encrypting",
"self",
".",
"ed",
"=",
"'d'",
"if",
"self",
".",
"mode",
"==",
"MODE_XTS",
":",
"# data sequence number (or 'tweak') has to be provided when in XTS mode",
"return",
"self",
".",
"chain",
".",
"update",
"(",
"ciphertext",
",",
"'d'",
",",
"n",
")",
"else",
":",
"return",
"self",
".",
"chain",
".",
"update",
"(",
"ciphertext",
",",
"'d'",
")"
] |
Decrypt some ciphertext
ciphertext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The decrypt function will decrypt the supplied ciphertext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied ciphertext is not a multiple of the blocksize
of the cipher, then the remaining ciphertext will be cached.
The next time the decrypt function is called with some ciphertext,
the new ciphertext will be concatenated to the cache and then
cache+ciphertext will be decrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the decrypt function will always decrypt all of the supplied
ciphertext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of ciphertext to be supplied at once.
Every decrypt function called on a XTS cipher will output
a decrypted block based on the current supplied ciphertext block.
CMAC:
-----
Mode not supported for decryption as this does not make sense.
|
[
"Decrypt",
"some",
"ciphertext"
] |
85da3630d6e410b2a10d3e711cd69308b51d401d
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/twofish.py#L161-L204
|
12,258
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/twofish.py
|
BlockCipher.final
|
def final(self,style='pkcs7'):
# TODO: after calling final, reset the IV? so the cipher is as good as new?
"""Finalizes the encryption by padding the cache
padfct = padding function
import from CryptoPlus.Util.padding
For ECB, CBC: the remaining bytes in the cache will be padded and
encrypted.
For OFB,CFB, CTR: an encrypted padding will be returned, making the
total outputed bytes since construction of the cipher
a multiple of the blocksize of that cipher.
If the cipher has been used for decryption, the final function won't do
anything. You have to manually unpad if necessary.
After finalization, the chain can still be used but the IV, counter etc
aren't reset but just continue as they were after the last step (finalization step).
"""
assert self.mode not in (MODE_XTS, MODE_CMAC) # finalizing (=padding) doesn't make sense when in XTS or CMAC mode
if self.ed == b'e':
# when the chain is in encryption mode, finalizing will pad the cache and encrypt this last block
if self.mode in (MODE_OFB,MODE_CFB,MODE_CTR):
dummy = b'0'*(self.chain.totalbytes%self.blocksize) # a dummy string that will be used to get a valid padding
else: #ECB, CBC
dummy = self.chain.cache
pdata = pad(dummy,self.blocksize,style=style)[len(dummy):]
#~ pad = padfct(dummy,padding.PAD,self.blocksize)[len(dummy):] # construct the padding necessary
return self.chain.update(pdata,b'e') # supply the padding to the update function => chain cache will be "cache+padding"
else:
# final function doesn't make sense when decrypting => padding should be removed manually
pass
|
python
|
def final(self,style='pkcs7'):
# TODO: after calling final, reset the IV? so the cipher is as good as new?
"""Finalizes the encryption by padding the cache
padfct = padding function
import from CryptoPlus.Util.padding
For ECB, CBC: the remaining bytes in the cache will be padded and
encrypted.
For OFB,CFB, CTR: an encrypted padding will be returned, making the
total outputed bytes since construction of the cipher
a multiple of the blocksize of that cipher.
If the cipher has been used for decryption, the final function won't do
anything. You have to manually unpad if necessary.
After finalization, the chain can still be used but the IV, counter etc
aren't reset but just continue as they were after the last step (finalization step).
"""
assert self.mode not in (MODE_XTS, MODE_CMAC) # finalizing (=padding) doesn't make sense when in XTS or CMAC mode
if self.ed == b'e':
# when the chain is in encryption mode, finalizing will pad the cache and encrypt this last block
if self.mode in (MODE_OFB,MODE_CFB,MODE_CTR):
dummy = b'0'*(self.chain.totalbytes%self.blocksize) # a dummy string that will be used to get a valid padding
else: #ECB, CBC
dummy = self.chain.cache
pdata = pad(dummy,self.blocksize,style=style)[len(dummy):]
#~ pad = padfct(dummy,padding.PAD,self.blocksize)[len(dummy):] # construct the padding necessary
return self.chain.update(pdata,b'e') # supply the padding to the update function => chain cache will be "cache+padding"
else:
# final function doesn't make sense when decrypting => padding should be removed manually
pass
|
[
"def",
"final",
"(",
"self",
",",
"style",
"=",
"'pkcs7'",
")",
":",
"# TODO: after calling final, reset the IV? so the cipher is as good as new?",
"assert",
"self",
".",
"mode",
"not",
"in",
"(",
"MODE_XTS",
",",
"MODE_CMAC",
")",
"# finalizing (=padding) doesn't make sense when in XTS or CMAC mode",
"if",
"self",
".",
"ed",
"==",
"b'e'",
":",
"# when the chain is in encryption mode, finalizing will pad the cache and encrypt this last block",
"if",
"self",
".",
"mode",
"in",
"(",
"MODE_OFB",
",",
"MODE_CFB",
",",
"MODE_CTR",
")",
":",
"dummy",
"=",
"b'0'",
"*",
"(",
"self",
".",
"chain",
".",
"totalbytes",
"%",
"self",
".",
"blocksize",
")",
"# a dummy string that will be used to get a valid padding",
"else",
":",
"#ECB, CBC",
"dummy",
"=",
"self",
".",
"chain",
".",
"cache",
"pdata",
"=",
"pad",
"(",
"dummy",
",",
"self",
".",
"blocksize",
",",
"style",
"=",
"style",
")",
"[",
"len",
"(",
"dummy",
")",
":",
"]",
"#~ pad = padfct(dummy,padding.PAD,self.blocksize)[len(dummy):] # construct the padding necessary",
"return",
"self",
".",
"chain",
".",
"update",
"(",
"pdata",
",",
"b'e'",
")",
"# supply the padding to the update function => chain cache will be \"cache+padding\"",
"else",
":",
"# final function doesn't make sense when decrypting => padding should be removed manually",
"pass"
] |
Finalizes the encryption by padding the cache
padfct = padding function
import from CryptoPlus.Util.padding
For ECB, CBC: the remaining bytes in the cache will be padded and
encrypted.
For OFB,CFB, CTR: an encrypted padding will be returned, making the
total outputed bytes since construction of the cipher
a multiple of the blocksize of that cipher.
If the cipher has been used for decryption, the final function won't do
anything. You have to manually unpad if necessary.
After finalization, the chain can still be used but the IV, counter etc
aren't reset but just continue as they were after the last step (finalization step).
|
[
"Finalizes",
"the",
"encryption",
"by",
"padding",
"the",
"cache"
] |
85da3630d6e410b2a10d3e711cd69308b51d401d
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/twofish.py#L206-L237
|
12,259
|
pschmitt/pykeepass
|
pykeepass/baseelement.py
|
BaseElement._datetime_to_utc
|
def _datetime_to_utc(self, dt):
"""Convert naive datetimes to UTC"""
if not dt.tzinfo:
dt = dt.replace(tzinfo=tz.gettz())
return dt.astimezone(tz.gettz('UTC'))
|
python
|
def _datetime_to_utc(self, dt):
"""Convert naive datetimes to UTC"""
if not dt.tzinfo:
dt = dt.replace(tzinfo=tz.gettz())
return dt.astimezone(tz.gettz('UTC'))
|
[
"def",
"_datetime_to_utc",
"(",
"self",
",",
"dt",
")",
":",
"if",
"not",
"dt",
".",
"tzinfo",
":",
"dt",
"=",
"dt",
".",
"replace",
"(",
"tzinfo",
"=",
"tz",
".",
"gettz",
"(",
")",
")",
"return",
"dt",
".",
"astimezone",
"(",
"tz",
".",
"gettz",
"(",
"'UTC'",
")",
")"
] |
Convert naive datetimes to UTC
|
[
"Convert",
"naive",
"datetimes",
"to",
"UTC"
] |
85da3630d6e410b2a10d3e711cd69308b51d401d
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/baseelement.py#L92-L97
|
12,260
|
pschmitt/pykeepass
|
pykeepass/baseelement.py
|
BaseElement._encode_time
|
def _encode_time(self, value):
"""Convert datetime to base64 or plaintext string"""
if self._kp.version >= (4, 0):
diff_seconds = int(
(
self._datetime_to_utc(value) -
datetime(
year=1,
month=1,
day=1,
tzinfo=tz.gettz('UTC')
)
).total_seconds()
)
return base64.b64encode(
struct.pack('<Q', diff_seconds)
).decode('utf-8')
else:
return self._datetime_to_utc(value).isoformat()
|
python
|
def _encode_time(self, value):
"""Convert datetime to base64 or plaintext string"""
if self._kp.version >= (4, 0):
diff_seconds = int(
(
self._datetime_to_utc(value) -
datetime(
year=1,
month=1,
day=1,
tzinfo=tz.gettz('UTC')
)
).total_seconds()
)
return base64.b64encode(
struct.pack('<Q', diff_seconds)
).decode('utf-8')
else:
return self._datetime_to_utc(value).isoformat()
|
[
"def",
"_encode_time",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"_kp",
".",
"version",
">=",
"(",
"4",
",",
"0",
")",
":",
"diff_seconds",
"=",
"int",
"(",
"(",
"self",
".",
"_datetime_to_utc",
"(",
"value",
")",
"-",
"datetime",
"(",
"year",
"=",
"1",
",",
"month",
"=",
"1",
",",
"day",
"=",
"1",
",",
"tzinfo",
"=",
"tz",
".",
"gettz",
"(",
"'UTC'",
")",
")",
")",
".",
"total_seconds",
"(",
")",
")",
"return",
"base64",
".",
"b64encode",
"(",
"struct",
".",
"pack",
"(",
"'<Q'",
",",
"diff_seconds",
")",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"else",
":",
"return",
"self",
".",
"_datetime_to_utc",
"(",
"value",
")",
".",
"isoformat",
"(",
")"
] |
Convert datetime to base64 or plaintext string
|
[
"Convert",
"datetime",
"to",
"base64",
"or",
"plaintext",
"string"
] |
85da3630d6e410b2a10d3e711cd69308b51d401d
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/baseelement.py#L99-L118
|
12,261
|
pschmitt/pykeepass
|
pykeepass/baseelement.py
|
BaseElement._decode_time
|
def _decode_time(self, text):
"""Convert base64 time or plaintext time to datetime"""
if self._kp.version >= (4, 0):
# decode KDBX4 date from b64 format
try:
return (
datetime(year=1, month=1, day=1, tzinfo=tz.gettz('UTC')) +
timedelta(
seconds = struct.unpack('<Q', base64.b64decode(text))[0]
)
)
except BinasciiError:
return parser.parse(
text,
tzinfos={'UTC':tz.gettz('UTC')}
)
else:
return parser.parse(
text,
tzinfos={'UTC':tz.gettz('UTC')}
)
|
python
|
def _decode_time(self, text):
"""Convert base64 time or plaintext time to datetime"""
if self._kp.version >= (4, 0):
# decode KDBX4 date from b64 format
try:
return (
datetime(year=1, month=1, day=1, tzinfo=tz.gettz('UTC')) +
timedelta(
seconds = struct.unpack('<Q', base64.b64decode(text))[0]
)
)
except BinasciiError:
return parser.parse(
text,
tzinfos={'UTC':tz.gettz('UTC')}
)
else:
return parser.parse(
text,
tzinfos={'UTC':tz.gettz('UTC')}
)
|
[
"def",
"_decode_time",
"(",
"self",
",",
"text",
")",
":",
"if",
"self",
".",
"_kp",
".",
"version",
">=",
"(",
"4",
",",
"0",
")",
":",
"# decode KDBX4 date from b64 format",
"try",
":",
"return",
"(",
"datetime",
"(",
"year",
"=",
"1",
",",
"month",
"=",
"1",
",",
"day",
"=",
"1",
",",
"tzinfo",
"=",
"tz",
".",
"gettz",
"(",
"'UTC'",
")",
")",
"+",
"timedelta",
"(",
"seconds",
"=",
"struct",
".",
"unpack",
"(",
"'<Q'",
",",
"base64",
".",
"b64decode",
"(",
"text",
")",
")",
"[",
"0",
"]",
")",
")",
"except",
"BinasciiError",
":",
"return",
"parser",
".",
"parse",
"(",
"text",
",",
"tzinfos",
"=",
"{",
"'UTC'",
":",
"tz",
".",
"gettz",
"(",
"'UTC'",
")",
"}",
")",
"else",
":",
"return",
"parser",
".",
"parse",
"(",
"text",
",",
"tzinfos",
"=",
"{",
"'UTC'",
":",
"tz",
".",
"gettz",
"(",
"'UTC'",
")",
"}",
")"
] |
Convert base64 time or plaintext time to datetime
|
[
"Convert",
"base64",
"time",
"or",
"plaintext",
"time",
"to",
"datetime"
] |
85da3630d6e410b2a10d3e711cd69308b51d401d
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/baseelement.py#L120-L141
|
12,262
|
thunder-project/thunder
|
thunder/images/readers.py
|
fromrdd
|
def fromrdd(rdd, dims=None, nrecords=None, dtype=None, labels=None, ordered=False):
"""
Load images from a Spark RDD.
Input RDD must be a collection of key-value pairs
where keys are singleton tuples indexing images,
and values are 2d or 3d ndarrays.
Parameters
----------
rdd : SparkRDD
An RDD containing the images.
dims : tuple or array, optional, default = None
Image dimensions (if provided will avoid check).
nrecords : int, optional, default = None
Number of images (if provided will avoid check).
dtype : string, default = None
Data numerical type (if provided will avoid check)
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
ordered : boolean, optional, default = False
Whether or not the rdd is ordered by key
"""
from .images import Images
from bolt.spark.array import BoltArraySpark
if dims is None or dtype is None:
item = rdd.values().first()
dtype = item.dtype
dims = item.shape
if nrecords is None:
nrecords = rdd.count()
def process_keys(record):
k, v = record
if isinstance(k, int):
k = (k,)
return k, v
values = BoltArraySpark(rdd.map(process_keys), shape=(nrecords,) + tuple(dims), dtype=dtype, split=1, ordered=ordered)
return Images(values, labels=labels)
|
python
|
def fromrdd(rdd, dims=None, nrecords=None, dtype=None, labels=None, ordered=False):
"""
Load images from a Spark RDD.
Input RDD must be a collection of key-value pairs
where keys are singleton tuples indexing images,
and values are 2d or 3d ndarrays.
Parameters
----------
rdd : SparkRDD
An RDD containing the images.
dims : tuple or array, optional, default = None
Image dimensions (if provided will avoid check).
nrecords : int, optional, default = None
Number of images (if provided will avoid check).
dtype : string, default = None
Data numerical type (if provided will avoid check)
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
ordered : boolean, optional, default = False
Whether or not the rdd is ordered by key
"""
from .images import Images
from bolt.spark.array import BoltArraySpark
if dims is None or dtype is None:
item = rdd.values().first()
dtype = item.dtype
dims = item.shape
if nrecords is None:
nrecords = rdd.count()
def process_keys(record):
k, v = record
if isinstance(k, int):
k = (k,)
return k, v
values = BoltArraySpark(rdd.map(process_keys), shape=(nrecords,) + tuple(dims), dtype=dtype, split=1, ordered=ordered)
return Images(values, labels=labels)
|
[
"def",
"fromrdd",
"(",
"rdd",
",",
"dims",
"=",
"None",
",",
"nrecords",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"ordered",
"=",
"False",
")",
":",
"from",
".",
"images",
"import",
"Images",
"from",
"bolt",
".",
"spark",
".",
"array",
"import",
"BoltArraySpark",
"if",
"dims",
"is",
"None",
"or",
"dtype",
"is",
"None",
":",
"item",
"=",
"rdd",
".",
"values",
"(",
")",
".",
"first",
"(",
")",
"dtype",
"=",
"item",
".",
"dtype",
"dims",
"=",
"item",
".",
"shape",
"if",
"nrecords",
"is",
"None",
":",
"nrecords",
"=",
"rdd",
".",
"count",
"(",
")",
"def",
"process_keys",
"(",
"record",
")",
":",
"k",
",",
"v",
"=",
"record",
"if",
"isinstance",
"(",
"k",
",",
"int",
")",
":",
"k",
"=",
"(",
"k",
",",
")",
"return",
"k",
",",
"v",
"values",
"=",
"BoltArraySpark",
"(",
"rdd",
".",
"map",
"(",
"process_keys",
")",
",",
"shape",
"=",
"(",
"nrecords",
",",
")",
"+",
"tuple",
"(",
"dims",
")",
",",
"dtype",
"=",
"dtype",
",",
"split",
"=",
"1",
",",
"ordered",
"=",
"ordered",
")",
"return",
"Images",
"(",
"values",
",",
"labels",
"=",
"labels",
")"
] |
Load images from a Spark RDD.
Input RDD must be a collection of key-value pairs
where keys are singleton tuples indexing images,
and values are 2d or 3d ndarrays.
Parameters
----------
rdd : SparkRDD
An RDD containing the images.
dims : tuple or array, optional, default = None
Image dimensions (if provided will avoid check).
nrecords : int, optional, default = None
Number of images (if provided will avoid check).
dtype : string, default = None
Data numerical type (if provided will avoid check)
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
ordered : boolean, optional, default = False
Whether or not the rdd is ordered by key
|
[
"Load",
"images",
"from",
"a",
"Spark",
"RDD",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L10-L56
|
12,263
|
thunder-project/thunder
|
thunder/images/readers.py
|
fromarray
|
def fromarray(values, labels=None, npartitions=None, engine=None):
"""
Load images from an array.
First dimension will be used to index images,
so remaining dimensions after the first should
be the dimensions of the images,
e.g. (3, 100, 200) for 3 x (100, 200) images
Parameters
----------
values : array-like
The array of images. Can be a numpy array,
a bolt array, or an array-like.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int, default = None
Number of partitions for parallelization (spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for spark)
"""
from .images import Images
import bolt
if isinstance(values, bolt.spark.array.BoltArraySpark):
return Images(values)
values = asarray(values)
if values.ndim < 2:
raise ValueError('Array for images must have at least 2 dimensions, got %g' % values.ndim)
if values.ndim == 2:
values = expand_dims(values, 0)
shape = None
dtype = None
for im in values:
if shape is None:
shape = im.shape
dtype = im.dtype
if not im.shape == shape:
raise ValueError('Arrays must all be of same shape; got both %s and %s' %
(str(shape), str(im.shape)))
if not im.dtype == dtype:
raise ValueError('Arrays must all be of same data type; got both %s and %s' %
(str(dtype), str(im.dtype)))
if spark and isinstance(engine, spark):
if not npartitions:
npartitions = engine.defaultParallelism
values = bolt.array(values, context=engine, npartitions=npartitions, axis=(0,))
values._ordered = True
return Images(values)
return Images(values, labels=labels)
|
python
|
def fromarray(values, labels=None, npartitions=None, engine=None):
"""
Load images from an array.
First dimension will be used to index images,
so remaining dimensions after the first should
be the dimensions of the images,
e.g. (3, 100, 200) for 3 x (100, 200) images
Parameters
----------
values : array-like
The array of images. Can be a numpy array,
a bolt array, or an array-like.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int, default = None
Number of partitions for parallelization (spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for spark)
"""
from .images import Images
import bolt
if isinstance(values, bolt.spark.array.BoltArraySpark):
return Images(values)
values = asarray(values)
if values.ndim < 2:
raise ValueError('Array for images must have at least 2 dimensions, got %g' % values.ndim)
if values.ndim == 2:
values = expand_dims(values, 0)
shape = None
dtype = None
for im in values:
if shape is None:
shape = im.shape
dtype = im.dtype
if not im.shape == shape:
raise ValueError('Arrays must all be of same shape; got both %s and %s' %
(str(shape), str(im.shape)))
if not im.dtype == dtype:
raise ValueError('Arrays must all be of same data type; got both %s and %s' %
(str(dtype), str(im.dtype)))
if spark and isinstance(engine, spark):
if not npartitions:
npartitions = engine.defaultParallelism
values = bolt.array(values, context=engine, npartitions=npartitions, axis=(0,))
values._ordered = True
return Images(values)
return Images(values, labels=labels)
|
[
"def",
"fromarray",
"(",
"values",
",",
"labels",
"=",
"None",
",",
"npartitions",
"=",
"None",
",",
"engine",
"=",
"None",
")",
":",
"from",
".",
"images",
"import",
"Images",
"import",
"bolt",
"if",
"isinstance",
"(",
"values",
",",
"bolt",
".",
"spark",
".",
"array",
".",
"BoltArraySpark",
")",
":",
"return",
"Images",
"(",
"values",
")",
"values",
"=",
"asarray",
"(",
"values",
")",
"if",
"values",
".",
"ndim",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"'Array for images must have at least 2 dimensions, got %g'",
"%",
"values",
".",
"ndim",
")",
"if",
"values",
".",
"ndim",
"==",
"2",
":",
"values",
"=",
"expand_dims",
"(",
"values",
",",
"0",
")",
"shape",
"=",
"None",
"dtype",
"=",
"None",
"for",
"im",
"in",
"values",
":",
"if",
"shape",
"is",
"None",
":",
"shape",
"=",
"im",
".",
"shape",
"dtype",
"=",
"im",
".",
"dtype",
"if",
"not",
"im",
".",
"shape",
"==",
"shape",
":",
"raise",
"ValueError",
"(",
"'Arrays must all be of same shape; got both %s and %s'",
"%",
"(",
"str",
"(",
"shape",
")",
",",
"str",
"(",
"im",
".",
"shape",
")",
")",
")",
"if",
"not",
"im",
".",
"dtype",
"==",
"dtype",
":",
"raise",
"ValueError",
"(",
"'Arrays must all be of same data type; got both %s and %s'",
"%",
"(",
"str",
"(",
"dtype",
")",
",",
"str",
"(",
"im",
".",
"dtype",
")",
")",
")",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"if",
"not",
"npartitions",
":",
"npartitions",
"=",
"engine",
".",
"defaultParallelism",
"values",
"=",
"bolt",
".",
"array",
"(",
"values",
",",
"context",
"=",
"engine",
",",
"npartitions",
"=",
"npartitions",
",",
"axis",
"=",
"(",
"0",
",",
")",
")",
"values",
".",
"_ordered",
"=",
"True",
"return",
"Images",
"(",
"values",
")",
"return",
"Images",
"(",
"values",
",",
"labels",
"=",
"labels",
")"
] |
Load images from an array.
First dimension will be used to index images,
so remaining dimensions after the first should
be the dimensions of the images,
e.g. (3, 100, 200) for 3 x (100, 200) images
Parameters
----------
values : array-like
The array of images. Can be a numpy array,
a bolt array, or an array-like.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int, default = None
Number of partitions for parallelization (spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for spark)
|
[
"Load",
"images",
"from",
"an",
"array",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L58-L116
|
12,264
|
thunder-project/thunder
|
thunder/images/readers.py
|
fromlist
|
def fromlist(items, accessor=None, keys=None, dims=None, dtype=None, labels=None, npartitions=None, engine=None):
"""
Load images from a list of items using the given accessor.
Parameters
----------
accessor : function
Apply to each item from the list to yield an image.
keys : list, optional, default=None
An optional list of keys.
dims : tuple, optional, default=None
Specify a known image dimension to avoid computation.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int
Number of partitions for computational engine.
"""
if spark and isinstance(engine, spark):
nrecords = len(items)
if keys:
items = zip(keys, items)
else:
keys = [(i,) for i in range(nrecords)]
items = zip(keys, items)
if not npartitions:
npartitions = engine.defaultParallelism
rdd = engine.parallelize(items, npartitions)
if accessor:
rdd = rdd.mapValues(accessor)
return fromrdd(rdd, nrecords=nrecords, dims=dims, dtype=dtype, labels=labels, ordered=True)
else:
if accessor:
items = asarray([accessor(i) for i in items])
return fromarray(items, labels=labels)
|
python
|
def fromlist(items, accessor=None, keys=None, dims=None, dtype=None, labels=None, npartitions=None, engine=None):
"""
Load images from a list of items using the given accessor.
Parameters
----------
accessor : function
Apply to each item from the list to yield an image.
keys : list, optional, default=None
An optional list of keys.
dims : tuple, optional, default=None
Specify a known image dimension to avoid computation.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int
Number of partitions for computational engine.
"""
if spark and isinstance(engine, spark):
nrecords = len(items)
if keys:
items = zip(keys, items)
else:
keys = [(i,) for i in range(nrecords)]
items = zip(keys, items)
if not npartitions:
npartitions = engine.defaultParallelism
rdd = engine.parallelize(items, npartitions)
if accessor:
rdd = rdd.mapValues(accessor)
return fromrdd(rdd, nrecords=nrecords, dims=dims, dtype=dtype, labels=labels, ordered=True)
else:
if accessor:
items = asarray([accessor(i) for i in items])
return fromarray(items, labels=labels)
|
[
"def",
"fromlist",
"(",
"items",
",",
"accessor",
"=",
"None",
",",
"keys",
"=",
"None",
",",
"dims",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"npartitions",
"=",
"None",
",",
"engine",
"=",
"None",
")",
":",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"nrecords",
"=",
"len",
"(",
"items",
")",
"if",
"keys",
":",
"items",
"=",
"zip",
"(",
"keys",
",",
"items",
")",
"else",
":",
"keys",
"=",
"[",
"(",
"i",
",",
")",
"for",
"i",
"in",
"range",
"(",
"nrecords",
")",
"]",
"items",
"=",
"zip",
"(",
"keys",
",",
"items",
")",
"if",
"not",
"npartitions",
":",
"npartitions",
"=",
"engine",
".",
"defaultParallelism",
"rdd",
"=",
"engine",
".",
"parallelize",
"(",
"items",
",",
"npartitions",
")",
"if",
"accessor",
":",
"rdd",
"=",
"rdd",
".",
"mapValues",
"(",
"accessor",
")",
"return",
"fromrdd",
"(",
"rdd",
",",
"nrecords",
"=",
"nrecords",
",",
"dims",
"=",
"dims",
",",
"dtype",
"=",
"dtype",
",",
"labels",
"=",
"labels",
",",
"ordered",
"=",
"True",
")",
"else",
":",
"if",
"accessor",
":",
"items",
"=",
"asarray",
"(",
"[",
"accessor",
"(",
"i",
")",
"for",
"i",
"in",
"items",
"]",
")",
"return",
"fromarray",
"(",
"items",
",",
"labels",
"=",
"labels",
")"
] |
Load images from a list of items using the given accessor.
Parameters
----------
accessor : function
Apply to each item from the list to yield an image.
keys : list, optional, default=None
An optional list of keys.
dims : tuple, optional, default=None
Specify a known image dimension to avoid computation.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int
Number of partitions for computational engine.
|
[
"Load",
"images",
"from",
"a",
"list",
"of",
"items",
"using",
"the",
"given",
"accessor",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L119-L157
|
12,265
|
thunder-project/thunder
|
thunder/images/readers.py
|
frompath
|
def frompath(path, accessor=None, ext=None, start=None, stop=None, recursive=False, npartitions=None, dims=None, dtype=None, labels=None, recount=False, engine=None, credentials=None):
"""
Load images from a path using the given accessor.
Supports both local and remote filesystems.
Parameters
----------
accessor : function
Apply to each item after loading to yield an image.
ext : str, optional, default=None
File extension.
npartitions : int, optional, default=None
Number of partitions for computational engine,
if None will use default for engine.
dims : tuple, optional, default=None
Dimensions of images.
dtype : str, optional, default=None
Numerical type of images.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
start, stop : nonnegative int, optional, default=None
Indices of files to load, interpreted using Python slicing conventions.
recursive : boolean, optional, default=False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
recount : boolean, optional, default=False
Force subsequent record counting.
"""
from thunder.readers import get_parallel_reader
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext, start=start, stop=stop,
recursive=recursive, npartitions=npartitions)
if spark and isinstance(engine, spark):
if accessor:
data = data.flatMap(accessor)
if recount:
nrecords = None
def switch(record):
ary, idx = record
return (idx,), ary
data = data.values().zipWithIndex().map(switch)
else:
nrecords = reader.nfiles
return fromrdd(data, nrecords=nrecords, dims=dims, dtype=dtype, labels=labels, ordered=True)
else:
if accessor:
data = [accessor(d) for d in data]
flattened = list(itertools.chain(*data))
values = [kv[1] for kv in flattened]
return fromarray(values, labels=labels)
|
python
|
def frompath(path, accessor=None, ext=None, start=None, stop=None, recursive=False, npartitions=None, dims=None, dtype=None, labels=None, recount=False, engine=None, credentials=None):
"""
Load images from a path using the given accessor.
Supports both local and remote filesystems.
Parameters
----------
accessor : function
Apply to each item after loading to yield an image.
ext : str, optional, default=None
File extension.
npartitions : int, optional, default=None
Number of partitions for computational engine,
if None will use default for engine.
dims : tuple, optional, default=None
Dimensions of images.
dtype : str, optional, default=None
Numerical type of images.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
start, stop : nonnegative int, optional, default=None
Indices of files to load, interpreted using Python slicing conventions.
recursive : boolean, optional, default=False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
recount : boolean, optional, default=False
Force subsequent record counting.
"""
from thunder.readers import get_parallel_reader
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext, start=start, stop=stop,
recursive=recursive, npartitions=npartitions)
if spark and isinstance(engine, spark):
if accessor:
data = data.flatMap(accessor)
if recount:
nrecords = None
def switch(record):
ary, idx = record
return (idx,), ary
data = data.values().zipWithIndex().map(switch)
else:
nrecords = reader.nfiles
return fromrdd(data, nrecords=nrecords, dims=dims, dtype=dtype, labels=labels, ordered=True)
else:
if accessor:
data = [accessor(d) for d in data]
flattened = list(itertools.chain(*data))
values = [kv[1] for kv in flattened]
return fromarray(values, labels=labels)
|
[
"def",
"frompath",
"(",
"path",
",",
"accessor",
"=",
"None",
",",
"ext",
"=",
"None",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"recursive",
"=",
"False",
",",
"npartitions",
"=",
"None",
",",
"dims",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"recount",
"=",
"False",
",",
"engine",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"from",
"thunder",
".",
"readers",
"import",
"get_parallel_reader",
"reader",
"=",
"get_parallel_reader",
"(",
"path",
")",
"(",
"engine",
",",
"credentials",
"=",
"credentials",
")",
"data",
"=",
"reader",
".",
"read",
"(",
"path",
",",
"ext",
"=",
"ext",
",",
"start",
"=",
"start",
",",
"stop",
"=",
"stop",
",",
"recursive",
"=",
"recursive",
",",
"npartitions",
"=",
"npartitions",
")",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"if",
"accessor",
":",
"data",
"=",
"data",
".",
"flatMap",
"(",
"accessor",
")",
"if",
"recount",
":",
"nrecords",
"=",
"None",
"def",
"switch",
"(",
"record",
")",
":",
"ary",
",",
"idx",
"=",
"record",
"return",
"(",
"idx",
",",
")",
",",
"ary",
"data",
"=",
"data",
".",
"values",
"(",
")",
".",
"zipWithIndex",
"(",
")",
".",
"map",
"(",
"switch",
")",
"else",
":",
"nrecords",
"=",
"reader",
".",
"nfiles",
"return",
"fromrdd",
"(",
"data",
",",
"nrecords",
"=",
"nrecords",
",",
"dims",
"=",
"dims",
",",
"dtype",
"=",
"dtype",
",",
"labels",
"=",
"labels",
",",
"ordered",
"=",
"True",
")",
"else",
":",
"if",
"accessor",
":",
"data",
"=",
"[",
"accessor",
"(",
"d",
")",
"for",
"d",
"in",
"data",
"]",
"flattened",
"=",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"data",
")",
")",
"values",
"=",
"[",
"kv",
"[",
"1",
"]",
"for",
"kv",
"in",
"flattened",
"]",
"return",
"fromarray",
"(",
"values",
",",
"labels",
"=",
"labels",
")"
] |
Load images from a path using the given accessor.
Supports both local and remote filesystems.
Parameters
----------
accessor : function
Apply to each item after loading to yield an image.
ext : str, optional, default=None
File extension.
npartitions : int, optional, default=None
Number of partitions for computational engine,
if None will use default for engine.
dims : tuple, optional, default=None
Dimensions of images.
dtype : str, optional, default=None
Numerical type of images.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
start, stop : nonnegative int, optional, default=None
Indices of files to load, interpreted using Python slicing conventions.
recursive : boolean, optional, default=False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
recount : boolean, optional, default=False
Force subsequent record counting.
|
[
"Load",
"images",
"from",
"a",
"path",
"using",
"the",
"given",
"accessor",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L159-L221
|
12,266
|
thunder-project/thunder
|
thunder/images/readers.py
|
fromtif
|
def fromtif(path, ext='tif', start=None, stop=None, recursive=False, nplanes=None, npartitions=None, labels=None, engine=None, credentials=None, discard_extra=False):
"""
Loads images from single or multi-page TIF files.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
ext : string, optional, default = 'tif'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching 'path' and 'ext'. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
nplanes : positive integer, optional, default = None
If passed, will cause single files to be subdivided into nplanes separate images.
Otherwise, each file is taken to represent one image.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
discard_extra : boolean, optional, default = False
If True and nplanes doesn't divide by the number of pages in a multi-page tiff, the reminder will
be discarded and a warning will be shown. If False, it will raise an error
"""
from tifffile import TiffFile
if nplanes is not None and nplanes <= 0:
raise ValueError('nplanes must be positive if passed, got %d' % nplanes)
def getarray(idx_buffer_filename):
idx, buf, fname = idx_buffer_filename
fbuf = BytesIO(buf)
tfh = TiffFile(fbuf)
ary = tfh.asarray()
pageCount = ary.shape[0]
if nplanes is not None:
extra = pageCount % nplanes
if extra:
if discard_extra:
pageCount = pageCount - extra
logging.getLogger('thunder').warn('Ignored %d pages in file %s' % (extra, fname))
else:
raise ValueError("nplanes '%d' does not evenly divide '%d in file %s'" % (nplanes, pageCount,
fname))
values = [ary[i:(i+nplanes)] for i in range(0, pageCount, nplanes)]
else:
values = [ary]
tfh.close()
if ary.ndim == 3:
values = [val.squeeze() for val in values]
nvals = len(values)
keys = [(idx*nvals + timepoint,) for timepoint in range(nvals)]
return zip(keys, values)
recount = False if nplanes is None else True
data = frompath(path, accessor=getarray, ext=ext, start=start, stop=stop,
recursive=recursive, npartitions=npartitions, recount=recount,
labels=labels, engine=engine, credentials=credentials)
if engine is not None and npartitions is not None and data.npartitions() < npartitions:
data = data.repartition(npartitions)
return data
|
python
|
def fromtif(path, ext='tif', start=None, stop=None, recursive=False, nplanes=None, npartitions=None, labels=None, engine=None, credentials=None, discard_extra=False):
"""
Loads images from single or multi-page TIF files.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
ext : string, optional, default = 'tif'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching 'path' and 'ext'. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
nplanes : positive integer, optional, default = None
If passed, will cause single files to be subdivided into nplanes separate images.
Otherwise, each file is taken to represent one image.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
discard_extra : boolean, optional, default = False
If True and nplanes doesn't divide by the number of pages in a multi-page tiff, the reminder will
be discarded and a warning will be shown. If False, it will raise an error
"""
from tifffile import TiffFile
if nplanes is not None and nplanes <= 0:
raise ValueError('nplanes must be positive if passed, got %d' % nplanes)
def getarray(idx_buffer_filename):
idx, buf, fname = idx_buffer_filename
fbuf = BytesIO(buf)
tfh = TiffFile(fbuf)
ary = tfh.asarray()
pageCount = ary.shape[0]
if nplanes is not None:
extra = pageCount % nplanes
if extra:
if discard_extra:
pageCount = pageCount - extra
logging.getLogger('thunder').warn('Ignored %d pages in file %s' % (extra, fname))
else:
raise ValueError("nplanes '%d' does not evenly divide '%d in file %s'" % (nplanes, pageCount,
fname))
values = [ary[i:(i+nplanes)] for i in range(0, pageCount, nplanes)]
else:
values = [ary]
tfh.close()
if ary.ndim == 3:
values = [val.squeeze() for val in values]
nvals = len(values)
keys = [(idx*nvals + timepoint,) for timepoint in range(nvals)]
return zip(keys, values)
recount = False if nplanes is None else True
data = frompath(path, accessor=getarray, ext=ext, start=start, stop=stop,
recursive=recursive, npartitions=npartitions, recount=recount,
labels=labels, engine=engine, credentials=credentials)
if engine is not None and npartitions is not None and data.npartitions() < npartitions:
data = data.repartition(npartitions)
return data
|
[
"def",
"fromtif",
"(",
"path",
",",
"ext",
"=",
"'tif'",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"recursive",
"=",
"False",
",",
"nplanes",
"=",
"None",
",",
"npartitions",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"engine",
"=",
"None",
",",
"credentials",
"=",
"None",
",",
"discard_extra",
"=",
"False",
")",
":",
"from",
"tifffile",
"import",
"TiffFile",
"if",
"nplanes",
"is",
"not",
"None",
"and",
"nplanes",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'nplanes must be positive if passed, got %d'",
"%",
"nplanes",
")",
"def",
"getarray",
"(",
"idx_buffer_filename",
")",
":",
"idx",
",",
"buf",
",",
"fname",
"=",
"idx_buffer_filename",
"fbuf",
"=",
"BytesIO",
"(",
"buf",
")",
"tfh",
"=",
"TiffFile",
"(",
"fbuf",
")",
"ary",
"=",
"tfh",
".",
"asarray",
"(",
")",
"pageCount",
"=",
"ary",
".",
"shape",
"[",
"0",
"]",
"if",
"nplanes",
"is",
"not",
"None",
":",
"extra",
"=",
"pageCount",
"%",
"nplanes",
"if",
"extra",
":",
"if",
"discard_extra",
":",
"pageCount",
"=",
"pageCount",
"-",
"extra",
"logging",
".",
"getLogger",
"(",
"'thunder'",
")",
".",
"warn",
"(",
"'Ignored %d pages in file %s'",
"%",
"(",
"extra",
",",
"fname",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"nplanes '%d' does not evenly divide '%d in file %s'\"",
"%",
"(",
"nplanes",
",",
"pageCount",
",",
"fname",
")",
")",
"values",
"=",
"[",
"ary",
"[",
"i",
":",
"(",
"i",
"+",
"nplanes",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"pageCount",
",",
"nplanes",
")",
"]",
"else",
":",
"values",
"=",
"[",
"ary",
"]",
"tfh",
".",
"close",
"(",
")",
"if",
"ary",
".",
"ndim",
"==",
"3",
":",
"values",
"=",
"[",
"val",
".",
"squeeze",
"(",
")",
"for",
"val",
"in",
"values",
"]",
"nvals",
"=",
"len",
"(",
"values",
")",
"keys",
"=",
"[",
"(",
"idx",
"*",
"nvals",
"+",
"timepoint",
",",
")",
"for",
"timepoint",
"in",
"range",
"(",
"nvals",
")",
"]",
"return",
"zip",
"(",
"keys",
",",
"values",
")",
"recount",
"=",
"False",
"if",
"nplanes",
"is",
"None",
"else",
"True",
"data",
"=",
"frompath",
"(",
"path",
",",
"accessor",
"=",
"getarray",
",",
"ext",
"=",
"ext",
",",
"start",
"=",
"start",
",",
"stop",
"=",
"stop",
",",
"recursive",
"=",
"recursive",
",",
"npartitions",
"=",
"npartitions",
",",
"recount",
"=",
"recount",
",",
"labels",
"=",
"labels",
",",
"engine",
"=",
"engine",
",",
"credentials",
"=",
"credentials",
")",
"if",
"engine",
"is",
"not",
"None",
"and",
"npartitions",
"is",
"not",
"None",
"and",
"data",
".",
"npartitions",
"(",
")",
"<",
"npartitions",
":",
"data",
"=",
"data",
".",
"repartition",
"(",
"npartitions",
")",
"return",
"data"
] |
Loads images from single or multi-page TIF files.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
ext : string, optional, default = 'tif'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching 'path' and 'ext'. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
nplanes : positive integer, optional, default = None
If passed, will cause single files to be subdivided into nplanes separate images.
Otherwise, each file is taken to represent one image.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
discard_extra : boolean, optional, default = False
If True and nplanes doesn't divide by the number of pages in a multi-page tiff, the reminder will
be discarded and a warning will be shown. If False, it will raise an error
|
[
"Loads",
"images",
"from",
"single",
"or",
"multi",
"-",
"page",
"TIF",
"files",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L323-L397
|
12,267
|
thunder-project/thunder
|
thunder/images/readers.py
|
frompng
|
def frompng(path, ext='png', start=None, stop=None, recursive=False, npartitions=None, labels=None, engine=None, credentials=None):
"""
Load images from PNG files.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
ext : string, optional, default = 'tif'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching `path` and `ext`. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
"""
from scipy.misc import imread
def getarray(idx_buffer_filename):
idx, buf, _ = idx_buffer_filename
fbuf = BytesIO(buf)
yield (idx,), imread(fbuf)
return frompath(path, accessor=getarray, ext=ext, start=start,
stop=stop, recursive=recursive, npartitions=npartitions,
labels=labels, engine=engine, credentials=credentials)
|
python
|
def frompng(path, ext='png', start=None, stop=None, recursive=False, npartitions=None, labels=None, engine=None, credentials=None):
"""
Load images from PNG files.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
ext : string, optional, default = 'tif'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching `path` and `ext`. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
"""
from scipy.misc import imread
def getarray(idx_buffer_filename):
idx, buf, _ = idx_buffer_filename
fbuf = BytesIO(buf)
yield (idx,), imread(fbuf)
return frompath(path, accessor=getarray, ext=ext, start=start,
stop=stop, recursive=recursive, npartitions=npartitions,
labels=labels, engine=engine, credentials=credentials)
|
[
"def",
"frompng",
"(",
"path",
",",
"ext",
"=",
"'png'",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"recursive",
"=",
"False",
",",
"npartitions",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"engine",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"from",
"scipy",
".",
"misc",
"import",
"imread",
"def",
"getarray",
"(",
"idx_buffer_filename",
")",
":",
"idx",
",",
"buf",
",",
"_",
"=",
"idx_buffer_filename",
"fbuf",
"=",
"BytesIO",
"(",
"buf",
")",
"yield",
"(",
"idx",
",",
")",
",",
"imread",
"(",
"fbuf",
")",
"return",
"frompath",
"(",
"path",
",",
"accessor",
"=",
"getarray",
",",
"ext",
"=",
"ext",
",",
"start",
"=",
"start",
",",
"stop",
"=",
"stop",
",",
"recursive",
"=",
"recursive",
",",
"npartitions",
"=",
"npartitions",
",",
"labels",
"=",
"labels",
",",
"engine",
"=",
"engine",
",",
"credentials",
"=",
"credentials",
")"
] |
Load images from PNG files.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
ext : string, optional, default = 'tif'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching `path` and `ext`. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
|
[
"Load",
"images",
"from",
"PNG",
"files",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L399-L436
|
12,268
|
thunder-project/thunder
|
thunder/images/readers.py
|
fromrandom
|
def fromrandom(shape=(10, 50, 50), npartitions=1, seed=42, engine=None):
"""
Generate random image data.
Parameters
----------
shape : tuple, optional, default=(10, 50, 50)
Dimensions of images.
npartitions : int, optional, default=1
Number of partitions.
seed : int, optional, default=42
Random seed.
"""
seed = hash(seed)
def generate(v):
random.seed(seed + v)
return random.randn(*shape[1:])
return fromlist(range(shape[0]), accessor=generate, npartitions=npartitions, engine=engine)
|
python
|
def fromrandom(shape=(10, 50, 50), npartitions=1, seed=42, engine=None):
"""
Generate random image data.
Parameters
----------
shape : tuple, optional, default=(10, 50, 50)
Dimensions of images.
npartitions : int, optional, default=1
Number of partitions.
seed : int, optional, default=42
Random seed.
"""
seed = hash(seed)
def generate(v):
random.seed(seed + v)
return random.randn(*shape[1:])
return fromlist(range(shape[0]), accessor=generate, npartitions=npartitions, engine=engine)
|
[
"def",
"fromrandom",
"(",
"shape",
"=",
"(",
"10",
",",
"50",
",",
"50",
")",
",",
"npartitions",
"=",
"1",
",",
"seed",
"=",
"42",
",",
"engine",
"=",
"None",
")",
":",
"seed",
"=",
"hash",
"(",
"seed",
")",
"def",
"generate",
"(",
"v",
")",
":",
"random",
".",
"seed",
"(",
"seed",
"+",
"v",
")",
"return",
"random",
".",
"randn",
"(",
"*",
"shape",
"[",
"1",
":",
"]",
")",
"return",
"fromlist",
"(",
"range",
"(",
"shape",
"[",
"0",
"]",
")",
",",
"accessor",
"=",
"generate",
",",
"npartitions",
"=",
"npartitions",
",",
"engine",
"=",
"engine",
")"
] |
Generate random image data.
Parameters
----------
shape : tuple, optional, default=(10, 50, 50)
Dimensions of images.
npartitions : int, optional, default=1
Number of partitions.
seed : int, optional, default=42
Random seed.
|
[
"Generate",
"random",
"image",
"data",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L438-L459
|
12,269
|
thunder-project/thunder
|
thunder/images/readers.py
|
fromexample
|
def fromexample(name=None, engine=None):
"""
Load example image data.
Data are downloaded from S3, so this method requires an internet connection.
Parameters
----------
name : str
Name of dataset, if not specified will print options.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
datasets = ['mouse', 'fish']
if name is None:
print('Availiable example image datasets')
for d in datasets:
print('- ' + d)
return
check_options(name, datasets)
path = 's3n://thunder-sample-data/images/' + name
if name == 'mouse':
data = frombinary(path=path, npartitions=1, order='F', engine=engine)
if name == 'fish':
data = fromtif(path=path, npartitions=1, engine=engine)
if spark and isinstance(engine, spark):
data.cache()
data.compute()
return data
|
python
|
def fromexample(name=None, engine=None):
"""
Load example image data.
Data are downloaded from S3, so this method requires an internet connection.
Parameters
----------
name : str
Name of dataset, if not specified will print options.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
datasets = ['mouse', 'fish']
if name is None:
print('Availiable example image datasets')
for d in datasets:
print('- ' + d)
return
check_options(name, datasets)
path = 's3n://thunder-sample-data/images/' + name
if name == 'mouse':
data = frombinary(path=path, npartitions=1, order='F', engine=engine)
if name == 'fish':
data = fromtif(path=path, npartitions=1, engine=engine)
if spark and isinstance(engine, spark):
data.cache()
data.compute()
return data
|
[
"def",
"fromexample",
"(",
"name",
"=",
"None",
",",
"engine",
"=",
"None",
")",
":",
"datasets",
"=",
"[",
"'mouse'",
",",
"'fish'",
"]",
"if",
"name",
"is",
"None",
":",
"print",
"(",
"'Availiable example image datasets'",
")",
"for",
"d",
"in",
"datasets",
":",
"print",
"(",
"'- '",
"+",
"d",
")",
"return",
"check_options",
"(",
"name",
",",
"datasets",
")",
"path",
"=",
"'s3n://thunder-sample-data/images/'",
"+",
"name",
"if",
"name",
"==",
"'mouse'",
":",
"data",
"=",
"frombinary",
"(",
"path",
"=",
"path",
",",
"npartitions",
"=",
"1",
",",
"order",
"=",
"'F'",
",",
"engine",
"=",
"engine",
")",
"if",
"name",
"==",
"'fish'",
":",
"data",
"=",
"fromtif",
"(",
"path",
"=",
"path",
",",
"npartitions",
"=",
"1",
",",
"engine",
"=",
"engine",
")",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"data",
".",
"cache",
"(",
")",
"data",
".",
"compute",
"(",
")",
"return",
"data"
] |
Load example image data.
Data are downloaded from S3, so this method requires an internet connection.
Parameters
----------
name : str
Name of dataset, if not specified will print options.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
|
[
"Load",
"example",
"image",
"data",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L461-L497
|
12,270
|
thunder-project/thunder
|
thunder/blocks/local.py
|
LocalChunks.unchunk
|
def unchunk(self):
"""
Reconstitute the chunked array back into a full ndarray.
Returns
-------
ndarray
"""
if self.padding != len(self.shape)*(0,):
shape = self.values.shape
arr = empty(shape, dtype=object)
for inds in product(*[arange(s) for s in shape]):
slices = []
for i, p, n in zip(inds, self.padding, shape):
start = None if (i == 0 or p == 0) else p
stop = None if (i == n-1 or p == 0) else -p
slices.append(slice(start, stop, None))
arr[inds] = self.values[inds][tuple(slices)]
else:
arr = self.values
return allstack(arr.tolist())
|
python
|
def unchunk(self):
"""
Reconstitute the chunked array back into a full ndarray.
Returns
-------
ndarray
"""
if self.padding != len(self.shape)*(0,):
shape = self.values.shape
arr = empty(shape, dtype=object)
for inds in product(*[arange(s) for s in shape]):
slices = []
for i, p, n in zip(inds, self.padding, shape):
start = None if (i == 0 or p == 0) else p
stop = None if (i == n-1 or p == 0) else -p
slices.append(slice(start, stop, None))
arr[inds] = self.values[inds][tuple(slices)]
else:
arr = self.values
return allstack(arr.tolist())
|
[
"def",
"unchunk",
"(",
"self",
")",
":",
"if",
"self",
".",
"padding",
"!=",
"len",
"(",
"self",
".",
"shape",
")",
"*",
"(",
"0",
",",
")",
":",
"shape",
"=",
"self",
".",
"values",
".",
"shape",
"arr",
"=",
"empty",
"(",
"shape",
",",
"dtype",
"=",
"object",
")",
"for",
"inds",
"in",
"product",
"(",
"*",
"[",
"arange",
"(",
"s",
")",
"for",
"s",
"in",
"shape",
"]",
")",
":",
"slices",
"=",
"[",
"]",
"for",
"i",
",",
"p",
",",
"n",
"in",
"zip",
"(",
"inds",
",",
"self",
".",
"padding",
",",
"shape",
")",
":",
"start",
"=",
"None",
"if",
"(",
"i",
"==",
"0",
"or",
"p",
"==",
"0",
")",
"else",
"p",
"stop",
"=",
"None",
"if",
"(",
"i",
"==",
"n",
"-",
"1",
"or",
"p",
"==",
"0",
")",
"else",
"-",
"p",
"slices",
".",
"append",
"(",
"slice",
"(",
"start",
",",
"stop",
",",
"None",
")",
")",
"arr",
"[",
"inds",
"]",
"=",
"self",
".",
"values",
"[",
"inds",
"]",
"[",
"tuple",
"(",
"slices",
")",
"]",
"else",
":",
"arr",
"=",
"self",
".",
"values",
"return",
"allstack",
"(",
"arr",
".",
"tolist",
"(",
")",
")"
] |
Reconstitute the chunked array back into a full ndarray.
Returns
-------
ndarray
|
[
"Reconstitute",
"the",
"chunked",
"array",
"back",
"into",
"a",
"full",
"ndarray",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/local.py#L54-L75
|
12,271
|
thunder-project/thunder
|
thunder/blocks/local.py
|
LocalChunks.chunk
|
def chunk(arr, chunk_size="150", padding=None):
"""
Created a chunked array from a full array and a chunk size.
Parameters
----------
array : ndarray
Array that will be broken into chunks
chunk_size : string or tuple, default = '150'
Size of each image chunk.
If a str, size of memory footprint in KB.
If a tuple, then the dimensions of each chunk.
If an int, then all dimensions will use this number
padding : tuple or int
Amount of padding along each dimensions for chunks. If an int, then
the same amount of padding is used for all dimensions
Returns
-------
LocalChunks
"""
plan, _ = LocalChunks.getplan(chunk_size, arr.shape[1:], arr.dtype)
plan = r_[arr.shape[0], plan]
if padding is None:
pad = arr.ndim*(0,)
elif isinstance(padding, int):
pad = (0,) + (arr.ndim-1)*(padding,)
else:
pad = (0,) + padding
shape = arr.shape
if any([x + y > z for x, y, z in zip(plan, pad, shape)]):
raise ValueError("Chunk sizes %s plus padding sizes %s cannot exceed value dimensions %s along any axis"
% (tuple(plan), tuple(pad), tuple(shape)))
if any([x > y for x, y in zip(pad, plan)]):
raise ValueError("Padding sizes %s cannot exceed chunk sizes %s along any axis"
% (tuple(pad), tuple(plan)))
def rectify(x):
x[x<0] = 0
return x
breaks = [r_[arange(0, n, s), n] for n, s in zip(shape, plan)]
limits = [zip(rectify(b[:-1]-p), b[1:]+p) for b, p in zip(breaks, pad)]
slices = product(*[[slice(x[0], x[1]) for x in l] for l in limits])
vals = [arr[s] for s in slices]
newarr = empty(len(vals), dtype=object)
for i in range(len(vals)):
newarr[i] = vals[i]
newsize = [b.shape[0]-1 for b in breaks]
newarr = newarr.reshape(*newsize)
return LocalChunks(newarr, shape, plan, dtype=arr.dtype, padding=pad)
|
python
|
def chunk(arr, chunk_size="150", padding=None):
"""
Created a chunked array from a full array and a chunk size.
Parameters
----------
array : ndarray
Array that will be broken into chunks
chunk_size : string or tuple, default = '150'
Size of each image chunk.
If a str, size of memory footprint in KB.
If a tuple, then the dimensions of each chunk.
If an int, then all dimensions will use this number
padding : tuple or int
Amount of padding along each dimensions for chunks. If an int, then
the same amount of padding is used for all dimensions
Returns
-------
LocalChunks
"""
plan, _ = LocalChunks.getplan(chunk_size, arr.shape[1:], arr.dtype)
plan = r_[arr.shape[0], plan]
if padding is None:
pad = arr.ndim*(0,)
elif isinstance(padding, int):
pad = (0,) + (arr.ndim-1)*(padding,)
else:
pad = (0,) + padding
shape = arr.shape
if any([x + y > z for x, y, z in zip(plan, pad, shape)]):
raise ValueError("Chunk sizes %s plus padding sizes %s cannot exceed value dimensions %s along any axis"
% (tuple(plan), tuple(pad), tuple(shape)))
if any([x > y for x, y in zip(pad, plan)]):
raise ValueError("Padding sizes %s cannot exceed chunk sizes %s along any axis"
% (tuple(pad), tuple(plan)))
def rectify(x):
x[x<0] = 0
return x
breaks = [r_[arange(0, n, s), n] for n, s in zip(shape, plan)]
limits = [zip(rectify(b[:-1]-p), b[1:]+p) for b, p in zip(breaks, pad)]
slices = product(*[[slice(x[0], x[1]) for x in l] for l in limits])
vals = [arr[s] for s in slices]
newarr = empty(len(vals), dtype=object)
for i in range(len(vals)):
newarr[i] = vals[i]
newsize = [b.shape[0]-1 for b in breaks]
newarr = newarr.reshape(*newsize)
return LocalChunks(newarr, shape, plan, dtype=arr.dtype, padding=pad)
|
[
"def",
"chunk",
"(",
"arr",
",",
"chunk_size",
"=",
"\"150\"",
",",
"padding",
"=",
"None",
")",
":",
"plan",
",",
"_",
"=",
"LocalChunks",
".",
"getplan",
"(",
"chunk_size",
",",
"arr",
".",
"shape",
"[",
"1",
":",
"]",
",",
"arr",
".",
"dtype",
")",
"plan",
"=",
"r_",
"[",
"arr",
".",
"shape",
"[",
"0",
"]",
",",
"plan",
"]",
"if",
"padding",
"is",
"None",
":",
"pad",
"=",
"arr",
".",
"ndim",
"*",
"(",
"0",
",",
")",
"elif",
"isinstance",
"(",
"padding",
",",
"int",
")",
":",
"pad",
"=",
"(",
"0",
",",
")",
"+",
"(",
"arr",
".",
"ndim",
"-",
"1",
")",
"*",
"(",
"padding",
",",
")",
"else",
":",
"pad",
"=",
"(",
"0",
",",
")",
"+",
"padding",
"shape",
"=",
"arr",
".",
"shape",
"if",
"any",
"(",
"[",
"x",
"+",
"y",
">",
"z",
"for",
"x",
",",
"y",
",",
"z",
"in",
"zip",
"(",
"plan",
",",
"pad",
",",
"shape",
")",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Chunk sizes %s plus padding sizes %s cannot exceed value dimensions %s along any axis\"",
"%",
"(",
"tuple",
"(",
"plan",
")",
",",
"tuple",
"(",
"pad",
")",
",",
"tuple",
"(",
"shape",
")",
")",
")",
"if",
"any",
"(",
"[",
"x",
">",
"y",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"pad",
",",
"plan",
")",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Padding sizes %s cannot exceed chunk sizes %s along any axis\"",
"%",
"(",
"tuple",
"(",
"pad",
")",
",",
"tuple",
"(",
"plan",
")",
")",
")",
"def",
"rectify",
"(",
"x",
")",
":",
"x",
"[",
"x",
"<",
"0",
"]",
"=",
"0",
"return",
"x",
"breaks",
"=",
"[",
"r_",
"[",
"arange",
"(",
"0",
",",
"n",
",",
"s",
")",
",",
"n",
"]",
"for",
"n",
",",
"s",
"in",
"zip",
"(",
"shape",
",",
"plan",
")",
"]",
"limits",
"=",
"[",
"zip",
"(",
"rectify",
"(",
"b",
"[",
":",
"-",
"1",
"]",
"-",
"p",
")",
",",
"b",
"[",
"1",
":",
"]",
"+",
"p",
")",
"for",
"b",
",",
"p",
"in",
"zip",
"(",
"breaks",
",",
"pad",
")",
"]",
"slices",
"=",
"product",
"(",
"*",
"[",
"[",
"slice",
"(",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"1",
"]",
")",
"for",
"x",
"in",
"l",
"]",
"for",
"l",
"in",
"limits",
"]",
")",
"vals",
"=",
"[",
"arr",
"[",
"s",
"]",
"for",
"s",
"in",
"slices",
"]",
"newarr",
"=",
"empty",
"(",
"len",
"(",
"vals",
")",
",",
"dtype",
"=",
"object",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"vals",
")",
")",
":",
"newarr",
"[",
"i",
"]",
"=",
"vals",
"[",
"i",
"]",
"newsize",
"=",
"[",
"b",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
"for",
"b",
"in",
"breaks",
"]",
"newarr",
"=",
"newarr",
".",
"reshape",
"(",
"*",
"newsize",
")",
"return",
"LocalChunks",
"(",
"newarr",
",",
"shape",
",",
"plan",
",",
"dtype",
"=",
"arr",
".",
"dtype",
",",
"padding",
"=",
"pad",
")"
] |
Created a chunked array from a full array and a chunk size.
Parameters
----------
array : ndarray
Array that will be broken into chunks
chunk_size : string or tuple, default = '150'
Size of each image chunk.
If a str, size of memory footprint in KB.
If a tuple, then the dimensions of each chunk.
If an int, then all dimensions will use this number
padding : tuple or int
Amount of padding along each dimensions for chunks. If an int, then
the same amount of padding is used for all dimensions
Returns
-------
LocalChunks
|
[
"Created",
"a",
"chunked",
"array",
"from",
"a",
"full",
"array",
"and",
"a",
"chunk",
"size",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/local.py#L121-L178
|
12,272
|
thunder-project/thunder
|
thunder/base.py
|
Data.filter
|
def filter(self, func):
"""
Filter array along an axis.
Applies a function which should evaluate to boolean,
along a single axis or multiple axes. Array will be
aligned so that the desired set of axes are in the
keys, which may require a transpose/reshape.
Parameters
----------
func : function
Function to apply, should return boolean
"""
if self.mode == 'local':
reshaped = self._align(self.baseaxes)
filtered = asarray(list(filter(func, reshaped)))
if self.labels is not None:
mask = asarray(list(map(func, reshaped)))
if self.mode == 'spark':
sort = False if self.labels is None else True
filtered = self.values.filter(func, axis=self.baseaxes, sort=sort)
if self.labels is not None:
keys, vals = zip(*self.values.map(func, axis=self.baseaxes, value_shape=(1,)).tordd().collect())
perm = sorted(range(len(keys)), key=keys.__getitem__)
mask = asarray(vals)[perm]
if self.labels is not None:
s1 = prod(self.baseshape)
newlabels = self.labels.reshape(s1, 1)[mask].squeeze()
else:
newlabels = None
return self._constructor(filtered, labels=newlabels).__finalize__(self, noprop=('labels',))
|
python
|
def filter(self, func):
"""
Filter array along an axis.
Applies a function which should evaluate to boolean,
along a single axis or multiple axes. Array will be
aligned so that the desired set of axes are in the
keys, which may require a transpose/reshape.
Parameters
----------
func : function
Function to apply, should return boolean
"""
if self.mode == 'local':
reshaped = self._align(self.baseaxes)
filtered = asarray(list(filter(func, reshaped)))
if self.labels is not None:
mask = asarray(list(map(func, reshaped)))
if self.mode == 'spark':
sort = False if self.labels is None else True
filtered = self.values.filter(func, axis=self.baseaxes, sort=sort)
if self.labels is not None:
keys, vals = zip(*self.values.map(func, axis=self.baseaxes, value_shape=(1,)).tordd().collect())
perm = sorted(range(len(keys)), key=keys.__getitem__)
mask = asarray(vals)[perm]
if self.labels is not None:
s1 = prod(self.baseshape)
newlabels = self.labels.reshape(s1, 1)[mask].squeeze()
else:
newlabels = None
return self._constructor(filtered, labels=newlabels).__finalize__(self, noprop=('labels',))
|
[
"def",
"filter",
"(",
"self",
",",
"func",
")",
":",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"reshaped",
"=",
"self",
".",
"_align",
"(",
"self",
".",
"baseaxes",
")",
"filtered",
"=",
"asarray",
"(",
"list",
"(",
"filter",
"(",
"func",
",",
"reshaped",
")",
")",
")",
"if",
"self",
".",
"labels",
"is",
"not",
"None",
":",
"mask",
"=",
"asarray",
"(",
"list",
"(",
"map",
"(",
"func",
",",
"reshaped",
")",
")",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"sort",
"=",
"False",
"if",
"self",
".",
"labels",
"is",
"None",
"else",
"True",
"filtered",
"=",
"self",
".",
"values",
".",
"filter",
"(",
"func",
",",
"axis",
"=",
"self",
".",
"baseaxes",
",",
"sort",
"=",
"sort",
")",
"if",
"self",
".",
"labels",
"is",
"not",
"None",
":",
"keys",
",",
"vals",
"=",
"zip",
"(",
"*",
"self",
".",
"values",
".",
"map",
"(",
"func",
",",
"axis",
"=",
"self",
".",
"baseaxes",
",",
"value_shape",
"=",
"(",
"1",
",",
")",
")",
".",
"tordd",
"(",
")",
".",
"collect",
"(",
")",
")",
"perm",
"=",
"sorted",
"(",
"range",
"(",
"len",
"(",
"keys",
")",
")",
",",
"key",
"=",
"keys",
".",
"__getitem__",
")",
"mask",
"=",
"asarray",
"(",
"vals",
")",
"[",
"perm",
"]",
"if",
"self",
".",
"labels",
"is",
"not",
"None",
":",
"s1",
"=",
"prod",
"(",
"self",
".",
"baseshape",
")",
"newlabels",
"=",
"self",
".",
"labels",
".",
"reshape",
"(",
"s1",
",",
"1",
")",
"[",
"mask",
"]",
".",
"squeeze",
"(",
")",
"else",
":",
"newlabels",
"=",
"None",
"return",
"self",
".",
"_constructor",
"(",
"filtered",
",",
"labels",
"=",
"newlabels",
")",
".",
"__finalize__",
"(",
"self",
",",
"noprop",
"=",
"(",
"'labels'",
",",
")",
")"
] |
Filter array along an axis.
Applies a function which should evaluate to boolean,
along a single axis or multiple axes. Array will be
aligned so that the desired set of axes are in the
keys, which may require a transpose/reshape.
Parameters
----------
func : function
Function to apply, should return boolean
|
[
"Filter",
"array",
"along",
"an",
"axis",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/base.py#L372-L410
|
12,273
|
thunder-project/thunder
|
thunder/base.py
|
Data.map
|
def map(self, func, value_shape=None, dtype=None, with_keys=False):
"""
Apply an array -> array function across an axis.
Array will be aligned so that the desired set of axes
are in the keys, which may require a transpose/reshape.
Parameters
----------
func : function
Function of a single array to apply. If with_keys=True,
function should be of a (tuple, array) pair.
axis : tuple or int, optional, default=(0,)
Axis or multiple axes to apply function along.
value_shape : tuple, optional, default=None
Known shape of values resulting from operation. Only
valid in spark mode.
dtype : numpy dtype, optional, default=None
Known shape of dtype resulting from operation. Only
valid in spark mode.
with_keys : bool, optional, default=False
Include keys as an argument to the function
"""
axis = self.baseaxes
if self.mode == 'local':
axes = sorted(tupleize(axis))
key_shape = [self.shape[axis] for axis in axes]
reshaped = self._align(axes, key_shape=key_shape)
if with_keys:
keys = zip(*unravel_index(range(prod(key_shape)), key_shape))
mapped = asarray(list(map(func, zip(keys, reshaped))))
else:
mapped = asarray(list(map(func, reshaped)))
try:
elem_shape = mapped[0].shape
except:
elem_shape = (1,)
expand = list(elem_shape)
expand = [1] if len(expand) == 0 else expand
# invert the previous reshape operation, using the shape of the map result
linearized_shape_inv = key_shape + expand
reordered = mapped.reshape(*linearized_shape_inv)
return self._constructor(reordered, mode=self.mode).__finalize__(self, noprop=('index'))
if self.mode == 'spark':
expand = lambda x: array(func(x), ndmin=1)
mapped = self.values.map(expand, axis, value_shape, dtype, with_keys)
return self._constructor(mapped, mode=self.mode).__finalize__(self, noprop=('index',))
|
python
|
def map(self, func, value_shape=None, dtype=None, with_keys=False):
"""
Apply an array -> array function across an axis.
Array will be aligned so that the desired set of axes
are in the keys, which may require a transpose/reshape.
Parameters
----------
func : function
Function of a single array to apply. If with_keys=True,
function should be of a (tuple, array) pair.
axis : tuple or int, optional, default=(0,)
Axis or multiple axes to apply function along.
value_shape : tuple, optional, default=None
Known shape of values resulting from operation. Only
valid in spark mode.
dtype : numpy dtype, optional, default=None
Known shape of dtype resulting from operation. Only
valid in spark mode.
with_keys : bool, optional, default=False
Include keys as an argument to the function
"""
axis = self.baseaxes
if self.mode == 'local':
axes = sorted(tupleize(axis))
key_shape = [self.shape[axis] for axis in axes]
reshaped = self._align(axes, key_shape=key_shape)
if with_keys:
keys = zip(*unravel_index(range(prod(key_shape)), key_shape))
mapped = asarray(list(map(func, zip(keys, reshaped))))
else:
mapped = asarray(list(map(func, reshaped)))
try:
elem_shape = mapped[0].shape
except:
elem_shape = (1,)
expand = list(elem_shape)
expand = [1] if len(expand) == 0 else expand
# invert the previous reshape operation, using the shape of the map result
linearized_shape_inv = key_shape + expand
reordered = mapped.reshape(*linearized_shape_inv)
return self._constructor(reordered, mode=self.mode).__finalize__(self, noprop=('index'))
if self.mode == 'spark':
expand = lambda x: array(func(x), ndmin=1)
mapped = self.values.map(expand, axis, value_shape, dtype, with_keys)
return self._constructor(mapped, mode=self.mode).__finalize__(self, noprop=('index',))
|
[
"def",
"map",
"(",
"self",
",",
"func",
",",
"value_shape",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"with_keys",
"=",
"False",
")",
":",
"axis",
"=",
"self",
".",
"baseaxes",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"axes",
"=",
"sorted",
"(",
"tupleize",
"(",
"axis",
")",
")",
"key_shape",
"=",
"[",
"self",
".",
"shape",
"[",
"axis",
"]",
"for",
"axis",
"in",
"axes",
"]",
"reshaped",
"=",
"self",
".",
"_align",
"(",
"axes",
",",
"key_shape",
"=",
"key_shape",
")",
"if",
"with_keys",
":",
"keys",
"=",
"zip",
"(",
"*",
"unravel_index",
"(",
"range",
"(",
"prod",
"(",
"key_shape",
")",
")",
",",
"key_shape",
")",
")",
"mapped",
"=",
"asarray",
"(",
"list",
"(",
"map",
"(",
"func",
",",
"zip",
"(",
"keys",
",",
"reshaped",
")",
")",
")",
")",
"else",
":",
"mapped",
"=",
"asarray",
"(",
"list",
"(",
"map",
"(",
"func",
",",
"reshaped",
")",
")",
")",
"try",
":",
"elem_shape",
"=",
"mapped",
"[",
"0",
"]",
".",
"shape",
"except",
":",
"elem_shape",
"=",
"(",
"1",
",",
")",
"expand",
"=",
"list",
"(",
"elem_shape",
")",
"expand",
"=",
"[",
"1",
"]",
"if",
"len",
"(",
"expand",
")",
"==",
"0",
"else",
"expand",
"# invert the previous reshape operation, using the shape of the map result",
"linearized_shape_inv",
"=",
"key_shape",
"+",
"expand",
"reordered",
"=",
"mapped",
".",
"reshape",
"(",
"*",
"linearized_shape_inv",
")",
"return",
"self",
".",
"_constructor",
"(",
"reordered",
",",
"mode",
"=",
"self",
".",
"mode",
")",
".",
"__finalize__",
"(",
"self",
",",
"noprop",
"=",
"(",
"'index'",
")",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"expand",
"=",
"lambda",
"x",
":",
"array",
"(",
"func",
"(",
"x",
")",
",",
"ndmin",
"=",
"1",
")",
"mapped",
"=",
"self",
".",
"values",
".",
"map",
"(",
"expand",
",",
"axis",
",",
"value_shape",
",",
"dtype",
",",
"with_keys",
")",
"return",
"self",
".",
"_constructor",
"(",
"mapped",
",",
"mode",
"=",
"self",
".",
"mode",
")",
".",
"__finalize__",
"(",
"self",
",",
"noprop",
"=",
"(",
"'index'",
",",
")",
")"
] |
Apply an array -> array function across an axis.
Array will be aligned so that the desired set of axes
are in the keys, which may require a transpose/reshape.
Parameters
----------
func : function
Function of a single array to apply. If with_keys=True,
function should be of a (tuple, array) pair.
axis : tuple or int, optional, default=(0,)
Axis or multiple axes to apply function along.
value_shape : tuple, optional, default=None
Known shape of values resulting from operation. Only
valid in spark mode.
dtype : numpy dtype, optional, default=None
Known shape of dtype resulting from operation. Only
valid in spark mode.
with_keys : bool, optional, default=False
Include keys as an argument to the function
|
[
"Apply",
"an",
"array",
"-",
">",
"array",
"function",
"across",
"an",
"axis",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/base.py#L412-L469
|
12,274
|
thunder-project/thunder
|
thunder/base.py
|
Data._reduce
|
def _reduce(self, func, axis=0):
"""
Reduce an array along an axis.
Applies an associative/commutative function of two arguments
cumulatively to all arrays along an axis. Array will be aligned
so that the desired set of axes are in the keys, which may
require a transpose/reshape.
Parameters
----------
func : function
Function of two arrays that returns a single array
axis : tuple or int, optional, default=(0,)
Axis or multiple axes to reduce along.
"""
if self.mode == 'local':
axes = sorted(tupleize(axis))
# if the function is a ufunc, it can automatically handle reducing over multiple axes
if isinstance(func, ufunc):
inshape(self.shape, axes)
reduced = func.reduce(self, axis=tuple(axes))
else:
reshaped = self._align(axes)
reduced = reduce(func, reshaped)
# ensure that the shape of the reduced array is valid
expected_shape = [self.shape[i] for i in range(len(self.shape)) if i not in axes]
if reduced.shape != tuple(expected_shape):
raise ValueError("reduce did not yield an array with valid dimensions")
return self._constructor(reduced[newaxis, :]).__finalize__(self)
if self.mode == 'spark':
reduced = self.values.reduce(func, axis, keepdims=True)
return self._constructor(reduced).__finalize__(self)
|
python
|
def _reduce(self, func, axis=0):
"""
Reduce an array along an axis.
Applies an associative/commutative function of two arguments
cumulatively to all arrays along an axis. Array will be aligned
so that the desired set of axes are in the keys, which may
require a transpose/reshape.
Parameters
----------
func : function
Function of two arrays that returns a single array
axis : tuple or int, optional, default=(0,)
Axis or multiple axes to reduce along.
"""
if self.mode == 'local':
axes = sorted(tupleize(axis))
# if the function is a ufunc, it can automatically handle reducing over multiple axes
if isinstance(func, ufunc):
inshape(self.shape, axes)
reduced = func.reduce(self, axis=tuple(axes))
else:
reshaped = self._align(axes)
reduced = reduce(func, reshaped)
# ensure that the shape of the reduced array is valid
expected_shape = [self.shape[i] for i in range(len(self.shape)) if i not in axes]
if reduced.shape != tuple(expected_shape):
raise ValueError("reduce did not yield an array with valid dimensions")
return self._constructor(reduced[newaxis, :]).__finalize__(self)
if self.mode == 'spark':
reduced = self.values.reduce(func, axis, keepdims=True)
return self._constructor(reduced).__finalize__(self)
|
[
"def",
"_reduce",
"(",
"self",
",",
"func",
",",
"axis",
"=",
"0",
")",
":",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"axes",
"=",
"sorted",
"(",
"tupleize",
"(",
"axis",
")",
")",
"# if the function is a ufunc, it can automatically handle reducing over multiple axes",
"if",
"isinstance",
"(",
"func",
",",
"ufunc",
")",
":",
"inshape",
"(",
"self",
".",
"shape",
",",
"axes",
")",
"reduced",
"=",
"func",
".",
"reduce",
"(",
"self",
",",
"axis",
"=",
"tuple",
"(",
"axes",
")",
")",
"else",
":",
"reshaped",
"=",
"self",
".",
"_align",
"(",
"axes",
")",
"reduced",
"=",
"reduce",
"(",
"func",
",",
"reshaped",
")",
"# ensure that the shape of the reduced array is valid",
"expected_shape",
"=",
"[",
"self",
".",
"shape",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"shape",
")",
")",
"if",
"i",
"not",
"in",
"axes",
"]",
"if",
"reduced",
".",
"shape",
"!=",
"tuple",
"(",
"expected_shape",
")",
":",
"raise",
"ValueError",
"(",
"\"reduce did not yield an array with valid dimensions\"",
")",
"return",
"self",
".",
"_constructor",
"(",
"reduced",
"[",
"newaxis",
",",
":",
"]",
")",
".",
"__finalize__",
"(",
"self",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"reduced",
"=",
"self",
".",
"values",
".",
"reduce",
"(",
"func",
",",
"axis",
",",
"keepdims",
"=",
"True",
")",
"return",
"self",
".",
"_constructor",
"(",
"reduced",
")",
".",
"__finalize__",
"(",
"self",
")"
] |
Reduce an array along an axis.
Applies an associative/commutative function of two arguments
cumulatively to all arrays along an axis. Array will be aligned
so that the desired set of axes are in the keys, which may
require a transpose/reshape.
Parameters
----------
func : function
Function of two arrays that returns a single array
axis : tuple or int, optional, default=(0,)
Axis or multiple axes to reduce along.
|
[
"Reduce",
"an",
"array",
"along",
"an",
"axis",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/base.py#L471-L508
|
12,275
|
thunder-project/thunder
|
thunder/base.py
|
Data.element_wise
|
def element_wise(self, other, op):
"""
Apply an elementwise operation to data.
Both self and other data must have the same mode.
If self is in local mode, other can also be a numpy array.
Self and other must have the same shape, or other must be a scalar.
Parameters
----------
other : Data or numpy array
Data to apply elementwise operation to
op : function
Binary operator to use for elementwise operations, e.g. add, subtract
"""
if not isscalar(other) and not self.shape == other.shape:
raise ValueError("shapes %s and %s must be equal" % (self.shape, other.shape))
if not isscalar(other) and isinstance(other, Data) and not self.mode == other.mode:
raise NotImplementedError
if isscalar(other):
return self.map(lambda x: op(x, other))
if self.mode == 'local' and isinstance(other, ndarray):
return self._constructor(op(self.values, other)).__finalize__(self)
if self.mode == 'local' and isinstance(other, Data):
return self._constructor(op(self.values, other.values)).__finalize__(self)
if self.mode == 'spark' and isinstance(other, Data):
def func(record):
(k1, x), (k2, y) = record
return k1, op(x, y)
rdd = self.tordd().zip(other.tordd()).map(func)
barray = BoltArraySpark(rdd, shape=self.shape, dtype=self.dtype, split=self.values.split)
return self._constructor(barray).__finalize__(self)
|
python
|
def element_wise(self, other, op):
"""
Apply an elementwise operation to data.
Both self and other data must have the same mode.
If self is in local mode, other can also be a numpy array.
Self and other must have the same shape, or other must be a scalar.
Parameters
----------
other : Data or numpy array
Data to apply elementwise operation to
op : function
Binary operator to use for elementwise operations, e.g. add, subtract
"""
if not isscalar(other) and not self.shape == other.shape:
raise ValueError("shapes %s and %s must be equal" % (self.shape, other.shape))
if not isscalar(other) and isinstance(other, Data) and not self.mode == other.mode:
raise NotImplementedError
if isscalar(other):
return self.map(lambda x: op(x, other))
if self.mode == 'local' and isinstance(other, ndarray):
return self._constructor(op(self.values, other)).__finalize__(self)
if self.mode == 'local' and isinstance(other, Data):
return self._constructor(op(self.values, other.values)).__finalize__(self)
if self.mode == 'spark' and isinstance(other, Data):
def func(record):
(k1, x), (k2, y) = record
return k1, op(x, y)
rdd = self.tordd().zip(other.tordd()).map(func)
barray = BoltArraySpark(rdd, shape=self.shape, dtype=self.dtype, split=self.values.split)
return self._constructor(barray).__finalize__(self)
|
[
"def",
"element_wise",
"(",
"self",
",",
"other",
",",
"op",
")",
":",
"if",
"not",
"isscalar",
"(",
"other",
")",
"and",
"not",
"self",
".",
"shape",
"==",
"other",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"\"shapes %s and %s must be equal\"",
"%",
"(",
"self",
".",
"shape",
",",
"other",
".",
"shape",
")",
")",
"if",
"not",
"isscalar",
"(",
"other",
")",
"and",
"isinstance",
"(",
"other",
",",
"Data",
")",
"and",
"not",
"self",
".",
"mode",
"==",
"other",
".",
"mode",
":",
"raise",
"NotImplementedError",
"if",
"isscalar",
"(",
"other",
")",
":",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"op",
"(",
"x",
",",
"other",
")",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
"and",
"isinstance",
"(",
"other",
",",
"ndarray",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"op",
"(",
"self",
".",
"values",
",",
"other",
")",
")",
".",
"__finalize__",
"(",
"self",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
"and",
"isinstance",
"(",
"other",
",",
"Data",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"op",
"(",
"self",
".",
"values",
",",
"other",
".",
"values",
")",
")",
".",
"__finalize__",
"(",
"self",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
"and",
"isinstance",
"(",
"other",
",",
"Data",
")",
":",
"def",
"func",
"(",
"record",
")",
":",
"(",
"k1",
",",
"x",
")",
",",
"(",
"k2",
",",
"y",
")",
"=",
"record",
"return",
"k1",
",",
"op",
"(",
"x",
",",
"y",
")",
"rdd",
"=",
"self",
".",
"tordd",
"(",
")",
".",
"zip",
"(",
"other",
".",
"tordd",
"(",
")",
")",
".",
"map",
"(",
"func",
")",
"barray",
"=",
"BoltArraySpark",
"(",
"rdd",
",",
"shape",
"=",
"self",
".",
"shape",
",",
"dtype",
"=",
"self",
".",
"dtype",
",",
"split",
"=",
"self",
".",
"values",
".",
"split",
")",
"return",
"self",
".",
"_constructor",
"(",
"barray",
")",
".",
"__finalize__",
"(",
"self",
")"
] |
Apply an elementwise operation to data.
Both self and other data must have the same mode.
If self is in local mode, other can also be a numpy array.
Self and other must have the same shape, or other must be a scalar.
Parameters
----------
other : Data or numpy array
Data to apply elementwise operation to
op : function
Binary operator to use for elementwise operations, e.g. add, subtract
|
[
"Apply",
"an",
"elementwise",
"operation",
"to",
"data",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/base.py#L510-L549
|
12,276
|
thunder-project/thunder
|
thunder/base.py
|
Data.clip
|
def clip(self, min=None, max=None):
"""
Clip values above and below.
Parameters
----------
min : scalar or array-like
Minimum value. If array, will be broadcasted
max : scalar or array-like
Maximum value. If array, will be broadcasted.
"""
return self._constructor(
self.values.clip(min=min, max=max)).__finalize__(self)
|
python
|
def clip(self, min=None, max=None):
"""
Clip values above and below.
Parameters
----------
min : scalar or array-like
Minimum value. If array, will be broadcasted
max : scalar or array-like
Maximum value. If array, will be broadcasted.
"""
return self._constructor(
self.values.clip(min=min, max=max)).__finalize__(self)
|
[
"def",
"clip",
"(",
"self",
",",
"min",
"=",
"None",
",",
"max",
"=",
"None",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"self",
".",
"values",
".",
"clip",
"(",
"min",
"=",
"min",
",",
"max",
"=",
"max",
")",
")",
".",
"__finalize__",
"(",
"self",
")"
] |
Clip values above and below.
Parameters
----------
min : scalar or array-like
Minimum value. If array, will be broadcasted
max : scalar or array-like
Maximum value. If array, will be broadcasted.
|
[
"Clip",
"values",
"above",
"and",
"below",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/base.py#L575-L588
|
12,277
|
thunder-project/thunder
|
thunder/series/readers.py
|
fromrdd
|
def fromrdd(rdd, nrecords=None, shape=None, index=None, labels=None, dtype=None, ordered=False):
"""
Load series data from a Spark RDD.
Assumes keys are tuples with increasing and unique indices,
and values are 1d ndarrays. Will try to infer properties
that are not explicitly provided.
Parameters
----------
rdd : SparkRDD
An RDD containing series data.
shape : tuple or array, optional, default = None
Total shape of data (if provided will avoid check).
nrecords : int, optional, default = None
Number of records (if provided will avoid check).
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
dtype : string, default = None
Data numerical type (if provided will avoid check)
ordered : boolean, optional, default = False
Whether or not the rdd is ordered by key
"""
from .series import Series
from bolt.spark.array import BoltArraySpark
if index is None or dtype is None:
item = rdd.values().first()
if index is None:
index = range(len(item))
if dtype is None:
dtype = item.dtype
if nrecords is None and shape is not None:
nrecords = prod(shape[:-1])
if nrecords is None:
nrecords = rdd.count()
if shape is None:
shape = (nrecords, asarray(index).shape[0])
def process_keys(record):
k, v = record
if isinstance(k, int):
k = (k,)
return k, v
values = BoltArraySpark(rdd.map(process_keys), shape=shape, dtype=dtype, split=len(shape)-1, ordered=ordered)
return Series(values, index=index, labels=labels)
|
python
|
def fromrdd(rdd, nrecords=None, shape=None, index=None, labels=None, dtype=None, ordered=False):
"""
Load series data from a Spark RDD.
Assumes keys are tuples with increasing and unique indices,
and values are 1d ndarrays. Will try to infer properties
that are not explicitly provided.
Parameters
----------
rdd : SparkRDD
An RDD containing series data.
shape : tuple or array, optional, default = None
Total shape of data (if provided will avoid check).
nrecords : int, optional, default = None
Number of records (if provided will avoid check).
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
dtype : string, default = None
Data numerical type (if provided will avoid check)
ordered : boolean, optional, default = False
Whether or not the rdd is ordered by key
"""
from .series import Series
from bolt.spark.array import BoltArraySpark
if index is None or dtype is None:
item = rdd.values().first()
if index is None:
index = range(len(item))
if dtype is None:
dtype = item.dtype
if nrecords is None and shape is not None:
nrecords = prod(shape[:-1])
if nrecords is None:
nrecords = rdd.count()
if shape is None:
shape = (nrecords, asarray(index).shape[0])
def process_keys(record):
k, v = record
if isinstance(k, int):
k = (k,)
return k, v
values = BoltArraySpark(rdd.map(process_keys), shape=shape, dtype=dtype, split=len(shape)-1, ordered=ordered)
return Series(values, index=index, labels=labels)
|
[
"def",
"fromrdd",
"(",
"rdd",
",",
"nrecords",
"=",
"None",
",",
"shape",
"=",
"None",
",",
"index",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"ordered",
"=",
"False",
")",
":",
"from",
".",
"series",
"import",
"Series",
"from",
"bolt",
".",
"spark",
".",
"array",
"import",
"BoltArraySpark",
"if",
"index",
"is",
"None",
"or",
"dtype",
"is",
"None",
":",
"item",
"=",
"rdd",
".",
"values",
"(",
")",
".",
"first",
"(",
")",
"if",
"index",
"is",
"None",
":",
"index",
"=",
"range",
"(",
"len",
"(",
"item",
")",
")",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"item",
".",
"dtype",
"if",
"nrecords",
"is",
"None",
"and",
"shape",
"is",
"not",
"None",
":",
"nrecords",
"=",
"prod",
"(",
"shape",
"[",
":",
"-",
"1",
"]",
")",
"if",
"nrecords",
"is",
"None",
":",
"nrecords",
"=",
"rdd",
".",
"count",
"(",
")",
"if",
"shape",
"is",
"None",
":",
"shape",
"=",
"(",
"nrecords",
",",
"asarray",
"(",
"index",
")",
".",
"shape",
"[",
"0",
"]",
")",
"def",
"process_keys",
"(",
"record",
")",
":",
"k",
",",
"v",
"=",
"record",
"if",
"isinstance",
"(",
"k",
",",
"int",
")",
":",
"k",
"=",
"(",
"k",
",",
")",
"return",
"k",
",",
"v",
"values",
"=",
"BoltArraySpark",
"(",
"rdd",
".",
"map",
"(",
"process_keys",
")",
",",
"shape",
"=",
"shape",
",",
"dtype",
"=",
"dtype",
",",
"split",
"=",
"len",
"(",
"shape",
")",
"-",
"1",
",",
"ordered",
"=",
"ordered",
")",
"return",
"Series",
"(",
"values",
",",
"index",
"=",
"index",
",",
"labels",
"=",
"labels",
")"
] |
Load series data from a Spark RDD.
Assumes keys are tuples with increasing and unique indices,
and values are 1d ndarrays. Will try to infer properties
that are not explicitly provided.
Parameters
----------
rdd : SparkRDD
An RDD containing series data.
shape : tuple or array, optional, default = None
Total shape of data (if provided will avoid check).
nrecords : int, optional, default = None
Number of records (if provided will avoid check).
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
dtype : string, default = None
Data numerical type (if provided will avoid check)
ordered : boolean, optional, default = False
Whether or not the rdd is ordered by key
|
[
"Load",
"series",
"data",
"from",
"a",
"Spark",
"RDD",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L13-L72
|
12,278
|
thunder-project/thunder
|
thunder/series/readers.py
|
fromarray
|
def fromarray(values, index=None, labels=None, npartitions=None, engine=None):
"""
Load series data from an array.
Assumes that all but final dimension index the records,
and the size of the final dimension is the length of each record,
e.g. a (2, 3, 4) array will be treated as 2 x 3 records of size (4,)
Parameters
----------
values : array-like
An array containing the data. Can be a numpy array,
a bolt array, or an array-like.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same shape as values.shape[:-1].
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
from .series import Series
import bolt
if isinstance(values, bolt.spark.array.BoltArraySpark):
return Series(values)
values = asarray(values)
if values.ndim < 2:
values = expand_dims(values, 0)
if index is not None and not asarray(index).shape[0] == values.shape[-1]:
raise ValueError('Index length %s not equal to record length %s'
% (asarray(index).shape[0], values.shape[-1]))
if index is None:
index = arange(values.shape[-1])
if spark and isinstance(engine, spark):
axis = tuple(range(values.ndim - 1))
values = bolt.array(values, context=engine, npartitions=npartitions, axis=axis)
values._ordered = True
return Series(values, index=index)
return Series(values, index=index, labels=labels)
|
python
|
def fromarray(values, index=None, labels=None, npartitions=None, engine=None):
"""
Load series data from an array.
Assumes that all but final dimension index the records,
and the size of the final dimension is the length of each record,
e.g. a (2, 3, 4) array will be treated as 2 x 3 records of size (4,)
Parameters
----------
values : array-like
An array containing the data. Can be a numpy array,
a bolt array, or an array-like.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same shape as values.shape[:-1].
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
from .series import Series
import bolt
if isinstance(values, bolt.spark.array.BoltArraySpark):
return Series(values)
values = asarray(values)
if values.ndim < 2:
values = expand_dims(values, 0)
if index is not None and not asarray(index).shape[0] == values.shape[-1]:
raise ValueError('Index length %s not equal to record length %s'
% (asarray(index).shape[0], values.shape[-1]))
if index is None:
index = arange(values.shape[-1])
if spark and isinstance(engine, spark):
axis = tuple(range(values.ndim - 1))
values = bolt.array(values, context=engine, npartitions=npartitions, axis=axis)
values._ordered = True
return Series(values, index=index)
return Series(values, index=index, labels=labels)
|
[
"def",
"fromarray",
"(",
"values",
",",
"index",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"npartitions",
"=",
"None",
",",
"engine",
"=",
"None",
")",
":",
"from",
".",
"series",
"import",
"Series",
"import",
"bolt",
"if",
"isinstance",
"(",
"values",
",",
"bolt",
".",
"spark",
".",
"array",
".",
"BoltArraySpark",
")",
":",
"return",
"Series",
"(",
"values",
")",
"values",
"=",
"asarray",
"(",
"values",
")",
"if",
"values",
".",
"ndim",
"<",
"2",
":",
"values",
"=",
"expand_dims",
"(",
"values",
",",
"0",
")",
"if",
"index",
"is",
"not",
"None",
"and",
"not",
"asarray",
"(",
"index",
")",
".",
"shape",
"[",
"0",
"]",
"==",
"values",
".",
"shape",
"[",
"-",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'Index length %s not equal to record length %s'",
"%",
"(",
"asarray",
"(",
"index",
")",
".",
"shape",
"[",
"0",
"]",
",",
"values",
".",
"shape",
"[",
"-",
"1",
"]",
")",
")",
"if",
"index",
"is",
"None",
":",
"index",
"=",
"arange",
"(",
"values",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"axis",
"=",
"tuple",
"(",
"range",
"(",
"values",
".",
"ndim",
"-",
"1",
")",
")",
"values",
"=",
"bolt",
".",
"array",
"(",
"values",
",",
"context",
"=",
"engine",
",",
"npartitions",
"=",
"npartitions",
",",
"axis",
"=",
"axis",
")",
"values",
".",
"_ordered",
"=",
"True",
"return",
"Series",
"(",
"values",
",",
"index",
"=",
"index",
")",
"return",
"Series",
"(",
"values",
",",
"index",
"=",
"index",
",",
"labels",
"=",
"labels",
")"
] |
Load series data from an array.
Assumes that all but final dimension index the records,
and the size of the final dimension is the length of each record,
e.g. a (2, 3, 4) array will be treated as 2 x 3 records of size (4,)
Parameters
----------
values : array-like
An array containing the data. Can be a numpy array,
a bolt array, or an array-like.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same shape as values.shape[:-1].
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
|
[
"Load",
"series",
"data",
"from",
"an",
"array",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L74-L124
|
12,279
|
thunder-project/thunder
|
thunder/series/readers.py
|
fromlist
|
def fromlist(items, accessor=None, index=None, labels=None, dtype=None, npartitions=None, engine=None):
"""
Load series data from a list with an optional accessor function.
Will call accessor function on each item from the list,
providing a generic interface for data loading.
Parameters
----------
items : list
A list of items to load.
accessor : function, optional, default = None
A function to apply to each item in the list during loading.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same length as items.
dtype : string, default = None
Data numerical type (if provided will avoid check)
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
if spark and isinstance(engine, spark):
if dtype is None:
dtype = accessor(items[0]).dtype if accessor else items[0].dtype
nrecords = len(items)
keys = map(lambda k: (k, ), range(len(items)))
if not npartitions:
npartitions = engine.defaultParallelism
items = zip(keys, items)
rdd = engine.parallelize(items, npartitions)
if accessor:
rdd = rdd.mapValues(accessor)
return fromrdd(rdd, nrecords=nrecords, index=index, labels=labels, dtype=dtype, ordered=True)
else:
if accessor:
items = [accessor(i) for i in items]
return fromarray(items, index=index, labels=labels)
|
python
|
def fromlist(items, accessor=None, index=None, labels=None, dtype=None, npartitions=None, engine=None):
"""
Load series data from a list with an optional accessor function.
Will call accessor function on each item from the list,
providing a generic interface for data loading.
Parameters
----------
items : list
A list of items to load.
accessor : function, optional, default = None
A function to apply to each item in the list during loading.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same length as items.
dtype : string, default = None
Data numerical type (if provided will avoid check)
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
if spark and isinstance(engine, spark):
if dtype is None:
dtype = accessor(items[0]).dtype if accessor else items[0].dtype
nrecords = len(items)
keys = map(lambda k: (k, ), range(len(items)))
if not npartitions:
npartitions = engine.defaultParallelism
items = zip(keys, items)
rdd = engine.parallelize(items, npartitions)
if accessor:
rdd = rdd.mapValues(accessor)
return fromrdd(rdd, nrecords=nrecords, index=index, labels=labels, dtype=dtype, ordered=True)
else:
if accessor:
items = [accessor(i) for i in items]
return fromarray(items, index=index, labels=labels)
|
[
"def",
"fromlist",
"(",
"items",
",",
"accessor",
"=",
"None",
",",
"index",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"npartitions",
"=",
"None",
",",
"engine",
"=",
"None",
")",
":",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"accessor",
"(",
"items",
"[",
"0",
"]",
")",
".",
"dtype",
"if",
"accessor",
"else",
"items",
"[",
"0",
"]",
".",
"dtype",
"nrecords",
"=",
"len",
"(",
"items",
")",
"keys",
"=",
"map",
"(",
"lambda",
"k",
":",
"(",
"k",
",",
")",
",",
"range",
"(",
"len",
"(",
"items",
")",
")",
")",
"if",
"not",
"npartitions",
":",
"npartitions",
"=",
"engine",
".",
"defaultParallelism",
"items",
"=",
"zip",
"(",
"keys",
",",
"items",
")",
"rdd",
"=",
"engine",
".",
"parallelize",
"(",
"items",
",",
"npartitions",
")",
"if",
"accessor",
":",
"rdd",
"=",
"rdd",
".",
"mapValues",
"(",
"accessor",
")",
"return",
"fromrdd",
"(",
"rdd",
",",
"nrecords",
"=",
"nrecords",
",",
"index",
"=",
"index",
",",
"labels",
"=",
"labels",
",",
"dtype",
"=",
"dtype",
",",
"ordered",
"=",
"True",
")",
"else",
":",
"if",
"accessor",
":",
"items",
"=",
"[",
"accessor",
"(",
"i",
")",
"for",
"i",
"in",
"items",
"]",
"return",
"fromarray",
"(",
"items",
",",
"index",
"=",
"index",
",",
"labels",
"=",
"labels",
")"
] |
Load series data from a list with an optional accessor function.
Will call accessor function on each item from the list,
providing a generic interface for data loading.
Parameters
----------
items : list
A list of items to load.
accessor : function, optional, default = None
A function to apply to each item in the list during loading.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same length as items.
dtype : string, default = None
Data numerical type (if provided will avoid check)
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
|
[
"Load",
"series",
"data",
"from",
"a",
"list",
"with",
"an",
"optional",
"accessor",
"function",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L126-L173
|
12,280
|
thunder-project/thunder
|
thunder/series/readers.py
|
fromtext
|
def fromtext(path, ext='txt', dtype='float64', skip=0, shape=None, index=None, labels=None, npartitions=None, engine=None, credentials=None):
"""
Loads series data from text files.
Assumes data are formatted as rows, where each record is a row
of numbers separated by spaces e.g. 'v v v v v'. You can
optionally specify a fixed number of initial items per row to skip / discard.
Parameters
----------
path : string
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'txt'
File extension.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
skip : int, optional, default = 0
Number of items in each record to skip.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have length equal to number of rows.
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
from thunder.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
if spark and isinstance(engine, spark):
def parse(line, skip):
vec = [float(x) for x in line.split(' ')]
return array(vec[skip:], dtype=dtype)
lines = engine.textFile(path, npartitions)
data = lines.map(lambda x: parse(x, skip))
def switch(record):
ary, idx = record
return (idx,), ary
rdd = data.zipWithIndex().map(switch)
return fromrdd(rdd, dtype=str(dtype), shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for kv in data:
for line in str(kv[1].decode('utf-8')).split('\n')[:-1]:
values.append(fromstring(line, sep=' '))
values = asarray(values)
if skip > 0:
values = values[:, skip:]
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels)
|
python
|
def fromtext(path, ext='txt', dtype='float64', skip=0, shape=None, index=None, labels=None, npartitions=None, engine=None, credentials=None):
"""
Loads series data from text files.
Assumes data are formatted as rows, where each record is a row
of numbers separated by spaces e.g. 'v v v v v'. You can
optionally specify a fixed number of initial items per row to skip / discard.
Parameters
----------
path : string
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'txt'
File extension.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
skip : int, optional, default = 0
Number of items in each record to skip.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have length equal to number of rows.
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
from thunder.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
if spark and isinstance(engine, spark):
def parse(line, skip):
vec = [float(x) for x in line.split(' ')]
return array(vec[skip:], dtype=dtype)
lines = engine.textFile(path, npartitions)
data = lines.map(lambda x: parse(x, skip))
def switch(record):
ary, idx = record
return (idx,), ary
rdd = data.zipWithIndex().map(switch)
return fromrdd(rdd, dtype=str(dtype), shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for kv in data:
for line in str(kv[1].decode('utf-8')).split('\n')[:-1]:
values.append(fromstring(line, sep=' '))
values = asarray(values)
if skip > 0:
values = values[:, skip:]
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels)
|
[
"def",
"fromtext",
"(",
"path",
",",
"ext",
"=",
"'txt'",
",",
"dtype",
"=",
"'float64'",
",",
"skip",
"=",
"0",
",",
"shape",
"=",
"None",
",",
"index",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"npartitions",
"=",
"None",
",",
"engine",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"from",
"thunder",
".",
"readers",
"import",
"normalize_scheme",
",",
"get_parallel_reader",
"path",
"=",
"normalize_scheme",
"(",
"path",
",",
"ext",
")",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"def",
"parse",
"(",
"line",
",",
"skip",
")",
":",
"vec",
"=",
"[",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"line",
".",
"split",
"(",
"' '",
")",
"]",
"return",
"array",
"(",
"vec",
"[",
"skip",
":",
"]",
",",
"dtype",
"=",
"dtype",
")",
"lines",
"=",
"engine",
".",
"textFile",
"(",
"path",
",",
"npartitions",
")",
"data",
"=",
"lines",
".",
"map",
"(",
"lambda",
"x",
":",
"parse",
"(",
"x",
",",
"skip",
")",
")",
"def",
"switch",
"(",
"record",
")",
":",
"ary",
",",
"idx",
"=",
"record",
"return",
"(",
"idx",
",",
")",
",",
"ary",
"rdd",
"=",
"data",
".",
"zipWithIndex",
"(",
")",
".",
"map",
"(",
"switch",
")",
"return",
"fromrdd",
"(",
"rdd",
",",
"dtype",
"=",
"str",
"(",
"dtype",
")",
",",
"shape",
"=",
"shape",
",",
"index",
"=",
"index",
",",
"ordered",
"=",
"True",
")",
"else",
":",
"reader",
"=",
"get_parallel_reader",
"(",
"path",
")",
"(",
"engine",
",",
"credentials",
"=",
"credentials",
")",
"data",
"=",
"reader",
".",
"read",
"(",
"path",
",",
"ext",
"=",
"ext",
")",
"values",
"=",
"[",
"]",
"for",
"kv",
"in",
"data",
":",
"for",
"line",
"in",
"str",
"(",
"kv",
"[",
"1",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
")",
".",
"split",
"(",
"'\\n'",
")",
"[",
":",
"-",
"1",
"]",
":",
"values",
".",
"append",
"(",
"fromstring",
"(",
"line",
",",
"sep",
"=",
"' '",
")",
")",
"values",
"=",
"asarray",
"(",
"values",
")",
"if",
"skip",
">",
"0",
":",
"values",
"=",
"values",
"[",
":",
",",
"skip",
":",
"]",
"if",
"shape",
":",
"values",
"=",
"values",
".",
"reshape",
"(",
"shape",
")",
"return",
"fromarray",
"(",
"values",
",",
"index",
"=",
"index",
",",
"labels",
"=",
"labels",
")"
] |
Loads series data from text files.
Assumes data are formatted as rows, where each record is a row
of numbers separated by spaces e.g. 'v v v v v'. You can
optionally specify a fixed number of initial items per row to skip / discard.
Parameters
----------
path : string
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'txt'
File extension.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
skip : int, optional, default = 0
Number of items in each record to skip.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have length equal to number of rows.
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
|
[
"Loads",
"series",
"data",
"from",
"text",
"files",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L175-L252
|
12,281
|
thunder-project/thunder
|
thunder/series/readers.py
|
frombinary
|
def frombinary(path, ext='bin', conf='conf.json', dtype=None, shape=None, skip=0, index=None, labels=None, engine=None, credentials=None):
"""
Load series data from flat binary files.
Parameters
----------
path : string URI or local filesystem path
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'bin'
Optional file extension specifier.
conf : str, optional, default = 'conf.json'
Name of conf file with type and size information.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
skip : int, optional, default = 0
Number of items in each record to skip.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
shape, dtype = _binaryconfig(path, conf, dtype, shape, credentials)
from thunder.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
from numpy import dtype as dtype_func
nelements = shape[-1] + skip
recordsize = dtype_func(dtype).itemsize * nelements
if spark and isinstance(engine, spark):
lines = engine.binaryRecords(path, recordsize)
raw = lines.map(lambda x: frombuffer(buffer(x), offset=0, count=nelements, dtype=dtype)[skip:])
def switch(record):
ary, idx = record
return (idx,), ary
rdd = raw.zipWithIndex().map(switch)
if shape and len(shape) > 2:
expand = lambda k: unravel_index(k[0], shape[0:-1])
rdd = rdd.map(lambda kv: (expand(kv[0]), kv[1]))
if not index:
index = arange(shape[-1])
return fromrdd(rdd, dtype=dtype, shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for record in data:
buf = record[1]
offset = 0
while offset < len(buf):
v = frombuffer(buffer(buf), offset=offset, count=nelements, dtype=dtype)
values.append(v[skip:])
offset += recordsize
if not len(values) == prod(shape[0:-1]):
raise ValueError('Unexpected shape, got %g records but expected %g'
% (len(values), prod(shape[0:-1])))
values = asarray(values, dtype=dtype)
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels)
|
python
|
def frombinary(path, ext='bin', conf='conf.json', dtype=None, shape=None, skip=0, index=None, labels=None, engine=None, credentials=None):
"""
Load series data from flat binary files.
Parameters
----------
path : string URI or local filesystem path
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'bin'
Optional file extension specifier.
conf : str, optional, default = 'conf.json'
Name of conf file with type and size information.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
skip : int, optional, default = 0
Number of items in each record to skip.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
shape, dtype = _binaryconfig(path, conf, dtype, shape, credentials)
from thunder.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
from numpy import dtype as dtype_func
nelements = shape[-1] + skip
recordsize = dtype_func(dtype).itemsize * nelements
if spark and isinstance(engine, spark):
lines = engine.binaryRecords(path, recordsize)
raw = lines.map(lambda x: frombuffer(buffer(x), offset=0, count=nelements, dtype=dtype)[skip:])
def switch(record):
ary, idx = record
return (idx,), ary
rdd = raw.zipWithIndex().map(switch)
if shape and len(shape) > 2:
expand = lambda k: unravel_index(k[0], shape[0:-1])
rdd = rdd.map(lambda kv: (expand(kv[0]), kv[1]))
if not index:
index = arange(shape[-1])
return fromrdd(rdd, dtype=dtype, shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for record in data:
buf = record[1]
offset = 0
while offset < len(buf):
v = frombuffer(buffer(buf), offset=offset, count=nelements, dtype=dtype)
values.append(v[skip:])
offset += recordsize
if not len(values) == prod(shape[0:-1]):
raise ValueError('Unexpected shape, got %g records but expected %g'
% (len(values), prod(shape[0:-1])))
values = asarray(values, dtype=dtype)
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels)
|
[
"def",
"frombinary",
"(",
"path",
",",
"ext",
"=",
"'bin'",
",",
"conf",
"=",
"'conf.json'",
",",
"dtype",
"=",
"None",
",",
"shape",
"=",
"None",
",",
"skip",
"=",
"0",
",",
"index",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"engine",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"shape",
",",
"dtype",
"=",
"_binaryconfig",
"(",
"path",
",",
"conf",
",",
"dtype",
",",
"shape",
",",
"credentials",
")",
"from",
"thunder",
".",
"readers",
"import",
"normalize_scheme",
",",
"get_parallel_reader",
"path",
"=",
"normalize_scheme",
"(",
"path",
",",
"ext",
")",
"from",
"numpy",
"import",
"dtype",
"as",
"dtype_func",
"nelements",
"=",
"shape",
"[",
"-",
"1",
"]",
"+",
"skip",
"recordsize",
"=",
"dtype_func",
"(",
"dtype",
")",
".",
"itemsize",
"*",
"nelements",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"lines",
"=",
"engine",
".",
"binaryRecords",
"(",
"path",
",",
"recordsize",
")",
"raw",
"=",
"lines",
".",
"map",
"(",
"lambda",
"x",
":",
"frombuffer",
"(",
"buffer",
"(",
"x",
")",
",",
"offset",
"=",
"0",
",",
"count",
"=",
"nelements",
",",
"dtype",
"=",
"dtype",
")",
"[",
"skip",
":",
"]",
")",
"def",
"switch",
"(",
"record",
")",
":",
"ary",
",",
"idx",
"=",
"record",
"return",
"(",
"idx",
",",
")",
",",
"ary",
"rdd",
"=",
"raw",
".",
"zipWithIndex",
"(",
")",
".",
"map",
"(",
"switch",
")",
"if",
"shape",
"and",
"len",
"(",
"shape",
")",
">",
"2",
":",
"expand",
"=",
"lambda",
"k",
":",
"unravel_index",
"(",
"k",
"[",
"0",
"]",
",",
"shape",
"[",
"0",
":",
"-",
"1",
"]",
")",
"rdd",
"=",
"rdd",
".",
"map",
"(",
"lambda",
"kv",
":",
"(",
"expand",
"(",
"kv",
"[",
"0",
"]",
")",
",",
"kv",
"[",
"1",
"]",
")",
")",
"if",
"not",
"index",
":",
"index",
"=",
"arange",
"(",
"shape",
"[",
"-",
"1",
"]",
")",
"return",
"fromrdd",
"(",
"rdd",
",",
"dtype",
"=",
"dtype",
",",
"shape",
"=",
"shape",
",",
"index",
"=",
"index",
",",
"ordered",
"=",
"True",
")",
"else",
":",
"reader",
"=",
"get_parallel_reader",
"(",
"path",
")",
"(",
"engine",
",",
"credentials",
"=",
"credentials",
")",
"data",
"=",
"reader",
".",
"read",
"(",
"path",
",",
"ext",
"=",
"ext",
")",
"values",
"=",
"[",
"]",
"for",
"record",
"in",
"data",
":",
"buf",
"=",
"record",
"[",
"1",
"]",
"offset",
"=",
"0",
"while",
"offset",
"<",
"len",
"(",
"buf",
")",
":",
"v",
"=",
"frombuffer",
"(",
"buffer",
"(",
"buf",
")",
",",
"offset",
"=",
"offset",
",",
"count",
"=",
"nelements",
",",
"dtype",
"=",
"dtype",
")",
"values",
".",
"append",
"(",
"v",
"[",
"skip",
":",
"]",
")",
"offset",
"+=",
"recordsize",
"if",
"not",
"len",
"(",
"values",
")",
"==",
"prod",
"(",
"shape",
"[",
"0",
":",
"-",
"1",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'Unexpected shape, got %g records but expected %g'",
"%",
"(",
"len",
"(",
"values",
")",
",",
"prod",
"(",
"shape",
"[",
"0",
":",
"-",
"1",
"]",
")",
")",
")",
"values",
"=",
"asarray",
"(",
"values",
",",
"dtype",
"=",
"dtype",
")",
"if",
"shape",
":",
"values",
"=",
"values",
".",
"reshape",
"(",
"shape",
")",
"return",
"fromarray",
"(",
"values",
",",
"index",
"=",
"index",
",",
"labels",
"=",
"labels",
")"
] |
Load series data from flat binary files.
Parameters
----------
path : string URI or local filesystem path
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'bin'
Optional file extension specifier.
conf : str, optional, default = 'conf.json'
Name of conf file with type and size information.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
skip : int, optional, default = 0
Number of items in each record to skip.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
|
[
"Load",
"series",
"data",
"from",
"flat",
"binary",
"files",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L254-L342
|
12,282
|
thunder-project/thunder
|
thunder/series/readers.py
|
_binaryconfig
|
def _binaryconfig(path, conf, dtype=None, shape=None, credentials=None):
"""
Collects parameters to use for binary series loading.
"""
import json
from thunder.readers import get_file_reader, FileNotFoundError
reader = get_file_reader(path)(credentials=credentials)
try:
buf = reader.read(path, filename=conf)
params = json.loads(str(buf.decode('utf-8')))
except FileNotFoundError:
params = {}
if dtype:
params['dtype'] = dtype
if shape:
params['shape'] = shape
if 'dtype' not in params.keys():
raise ValueError('dtype not specified either in conf.json or as argument')
if 'shape' not in params.keys():
raise ValueError('shape not specified either in conf.json or as argument')
return params['shape'], params['dtype']
|
python
|
def _binaryconfig(path, conf, dtype=None, shape=None, credentials=None):
"""
Collects parameters to use for binary series loading.
"""
import json
from thunder.readers import get_file_reader, FileNotFoundError
reader = get_file_reader(path)(credentials=credentials)
try:
buf = reader.read(path, filename=conf)
params = json.loads(str(buf.decode('utf-8')))
except FileNotFoundError:
params = {}
if dtype:
params['dtype'] = dtype
if shape:
params['shape'] = shape
if 'dtype' not in params.keys():
raise ValueError('dtype not specified either in conf.json or as argument')
if 'shape' not in params.keys():
raise ValueError('shape not specified either in conf.json or as argument')
return params['shape'], params['dtype']
|
[
"def",
"_binaryconfig",
"(",
"path",
",",
"conf",
",",
"dtype",
"=",
"None",
",",
"shape",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"import",
"json",
"from",
"thunder",
".",
"readers",
"import",
"get_file_reader",
",",
"FileNotFoundError",
"reader",
"=",
"get_file_reader",
"(",
"path",
")",
"(",
"credentials",
"=",
"credentials",
")",
"try",
":",
"buf",
"=",
"reader",
".",
"read",
"(",
"path",
",",
"filename",
"=",
"conf",
")",
"params",
"=",
"json",
".",
"loads",
"(",
"str",
"(",
"buf",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")",
"except",
"FileNotFoundError",
":",
"params",
"=",
"{",
"}",
"if",
"dtype",
":",
"params",
"[",
"'dtype'",
"]",
"=",
"dtype",
"if",
"shape",
":",
"params",
"[",
"'shape'",
"]",
"=",
"shape",
"if",
"'dtype'",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'dtype not specified either in conf.json or as argument'",
")",
"if",
"'shape'",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'shape not specified either in conf.json or as argument'",
")",
"return",
"params",
"[",
"'shape'",
"]",
",",
"params",
"[",
"'dtype'",
"]"
] |
Collects parameters to use for binary series loading.
|
[
"Collects",
"parameters",
"to",
"use",
"for",
"binary",
"series",
"loading",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L344-L370
|
12,283
|
thunder-project/thunder
|
thunder/series/readers.py
|
fromexample
|
def fromexample(name=None, engine=None):
"""
Load example series data.
Data are downloaded from S3, so this method requires an internet connection.
Parameters
----------
name : str
Name of dataset, options include 'iris' | 'mouse' | 'fish'.
If not specified will print options.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
import os
import tempfile
import shutil
from boto.s3.connection import S3Connection
datasets = ['iris', 'mouse', 'fish']
if name is None:
print('Availiable example series datasets')
for d in datasets:
print('- ' + d)
return
check_options(name, datasets)
d = tempfile.mkdtemp()
try:
os.mkdir(os.path.join(d, 'series'))
os.mkdir(os.path.join(d, 'series', name))
conn = S3Connection(anon=True)
bucket = conn.get_bucket('thunder-sample-data')
for key in bucket.list(os.path.join('series', name) + '/'):
if not key.name.endswith('/'):
key.get_contents_to_filename(os.path.join(d, key.name))
data = frombinary(os.path.join(d, 'series', name), engine=engine)
if spark and isinstance(engine, spark):
data.cache()
data.compute()
finally:
shutil.rmtree(d)
return data
|
python
|
def fromexample(name=None, engine=None):
"""
Load example series data.
Data are downloaded from S3, so this method requires an internet connection.
Parameters
----------
name : str
Name of dataset, options include 'iris' | 'mouse' | 'fish'.
If not specified will print options.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
import os
import tempfile
import shutil
from boto.s3.connection import S3Connection
datasets = ['iris', 'mouse', 'fish']
if name is None:
print('Availiable example series datasets')
for d in datasets:
print('- ' + d)
return
check_options(name, datasets)
d = tempfile.mkdtemp()
try:
os.mkdir(os.path.join(d, 'series'))
os.mkdir(os.path.join(d, 'series', name))
conn = S3Connection(anon=True)
bucket = conn.get_bucket('thunder-sample-data')
for key in bucket.list(os.path.join('series', name) + '/'):
if not key.name.endswith('/'):
key.get_contents_to_filename(os.path.join(d, key.name))
data = frombinary(os.path.join(d, 'series', name), engine=engine)
if spark and isinstance(engine, spark):
data.cache()
data.compute()
finally:
shutil.rmtree(d)
return data
|
[
"def",
"fromexample",
"(",
"name",
"=",
"None",
",",
"engine",
"=",
"None",
")",
":",
"import",
"os",
"import",
"tempfile",
"import",
"shutil",
"from",
"boto",
".",
"s3",
".",
"connection",
"import",
"S3Connection",
"datasets",
"=",
"[",
"'iris'",
",",
"'mouse'",
",",
"'fish'",
"]",
"if",
"name",
"is",
"None",
":",
"print",
"(",
"'Availiable example series datasets'",
")",
"for",
"d",
"in",
"datasets",
":",
"print",
"(",
"'- '",
"+",
"d",
")",
"return",
"check_options",
"(",
"name",
",",
"datasets",
")",
"d",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"try",
":",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"'series'",
")",
")",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"'series'",
",",
"name",
")",
")",
"conn",
"=",
"S3Connection",
"(",
"anon",
"=",
"True",
")",
"bucket",
"=",
"conn",
".",
"get_bucket",
"(",
"'thunder-sample-data'",
")",
"for",
"key",
"in",
"bucket",
".",
"list",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'series'",
",",
"name",
")",
"+",
"'/'",
")",
":",
"if",
"not",
"key",
".",
"name",
".",
"endswith",
"(",
"'/'",
")",
":",
"key",
".",
"get_contents_to_filename",
"(",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"key",
".",
"name",
")",
")",
"data",
"=",
"frombinary",
"(",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"'series'",
",",
"name",
")",
",",
"engine",
"=",
"engine",
")",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"data",
".",
"cache",
"(",
")",
"data",
".",
"compute",
"(",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"d",
")",
"return",
"data"
] |
Load example series data.
Data are downloaded from S3, so this method requires an internet connection.
Parameters
----------
name : str
Name of dataset, options include 'iris' | 'mouse' | 'fish'.
If not specified will print options.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
|
[
"Load",
"example",
"series",
"data",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L398-L447
|
12,284
|
thunder-project/thunder
|
thunder/series/writers.py
|
tobinary
|
def tobinary(series, path, prefix='series', overwrite=False, credentials=None):
"""
Writes out data to binary format.
Parameters
----------
series : Series
The data to write
path : string path or URI to directory to be created
Output files will be written underneath path.
Directory will be created as a result of this call.
prefix : str, optional, default = 'series'
String prefix for files.
overwrite : bool
If true, path and all its contents will be deleted and
recreated as partof this call.
"""
from six import BytesIO
from thunder.utils import check_path
from thunder.writers import get_parallel_writer
if not overwrite:
check_path(path, credentials=credentials)
overwrite = True
def tobuffer(kv):
firstkey = None
buf = BytesIO()
for k, v in kv:
if firstkey is None:
firstkey = k
buf.write(v.tostring())
val = buf.getvalue()
buf.close()
if firstkey is None:
return iter([])
else:
label = prefix + '-' + getlabel(firstkey) + ".bin"
return iter([(label, val)])
writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials)
if series.mode == 'spark':
binary = series.values.tordd().sortByKey().mapPartitions(tobuffer)
binary.foreach(writer.write)
else:
basedims = [series.shape[d] for d in series.baseaxes]
def split(k):
ind = unravel_index(k, basedims)
return ind, series.values[ind]
buf = tobuffer([split(i) for i in range(prod(basedims))])
[writer.write(b) for b in buf]
shape = series.shape
dtype = series.dtype
write_config(path, shape=shape, dtype=dtype, overwrite=overwrite, credentials=credentials)
|
python
|
def tobinary(series, path, prefix='series', overwrite=False, credentials=None):
"""
Writes out data to binary format.
Parameters
----------
series : Series
The data to write
path : string path or URI to directory to be created
Output files will be written underneath path.
Directory will be created as a result of this call.
prefix : str, optional, default = 'series'
String prefix for files.
overwrite : bool
If true, path and all its contents will be deleted and
recreated as partof this call.
"""
from six import BytesIO
from thunder.utils import check_path
from thunder.writers import get_parallel_writer
if not overwrite:
check_path(path, credentials=credentials)
overwrite = True
def tobuffer(kv):
firstkey = None
buf = BytesIO()
for k, v in kv:
if firstkey is None:
firstkey = k
buf.write(v.tostring())
val = buf.getvalue()
buf.close()
if firstkey is None:
return iter([])
else:
label = prefix + '-' + getlabel(firstkey) + ".bin"
return iter([(label, val)])
writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials)
if series.mode == 'spark':
binary = series.values.tordd().sortByKey().mapPartitions(tobuffer)
binary.foreach(writer.write)
else:
basedims = [series.shape[d] for d in series.baseaxes]
def split(k):
ind = unravel_index(k, basedims)
return ind, series.values[ind]
buf = tobuffer([split(i) for i in range(prod(basedims))])
[writer.write(b) for b in buf]
shape = series.shape
dtype = series.dtype
write_config(path, shape=shape, dtype=dtype, overwrite=overwrite, credentials=credentials)
|
[
"def",
"tobinary",
"(",
"series",
",",
"path",
",",
"prefix",
"=",
"'series'",
",",
"overwrite",
"=",
"False",
",",
"credentials",
"=",
"None",
")",
":",
"from",
"six",
"import",
"BytesIO",
"from",
"thunder",
".",
"utils",
"import",
"check_path",
"from",
"thunder",
".",
"writers",
"import",
"get_parallel_writer",
"if",
"not",
"overwrite",
":",
"check_path",
"(",
"path",
",",
"credentials",
"=",
"credentials",
")",
"overwrite",
"=",
"True",
"def",
"tobuffer",
"(",
"kv",
")",
":",
"firstkey",
"=",
"None",
"buf",
"=",
"BytesIO",
"(",
")",
"for",
"k",
",",
"v",
"in",
"kv",
":",
"if",
"firstkey",
"is",
"None",
":",
"firstkey",
"=",
"k",
"buf",
".",
"write",
"(",
"v",
".",
"tostring",
"(",
")",
")",
"val",
"=",
"buf",
".",
"getvalue",
"(",
")",
"buf",
".",
"close",
"(",
")",
"if",
"firstkey",
"is",
"None",
":",
"return",
"iter",
"(",
"[",
"]",
")",
"else",
":",
"label",
"=",
"prefix",
"+",
"'-'",
"+",
"getlabel",
"(",
"firstkey",
")",
"+",
"\".bin\"",
"return",
"iter",
"(",
"[",
"(",
"label",
",",
"val",
")",
"]",
")",
"writer",
"=",
"get_parallel_writer",
"(",
"path",
")",
"(",
"path",
",",
"overwrite",
"=",
"overwrite",
",",
"credentials",
"=",
"credentials",
")",
"if",
"series",
".",
"mode",
"==",
"'spark'",
":",
"binary",
"=",
"series",
".",
"values",
".",
"tordd",
"(",
")",
".",
"sortByKey",
"(",
")",
".",
"mapPartitions",
"(",
"tobuffer",
")",
"binary",
".",
"foreach",
"(",
"writer",
".",
"write",
")",
"else",
":",
"basedims",
"=",
"[",
"series",
".",
"shape",
"[",
"d",
"]",
"for",
"d",
"in",
"series",
".",
"baseaxes",
"]",
"def",
"split",
"(",
"k",
")",
":",
"ind",
"=",
"unravel_index",
"(",
"k",
",",
"basedims",
")",
"return",
"ind",
",",
"series",
".",
"values",
"[",
"ind",
"]",
"buf",
"=",
"tobuffer",
"(",
"[",
"split",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"prod",
"(",
"basedims",
")",
")",
"]",
")",
"[",
"writer",
".",
"write",
"(",
"b",
")",
"for",
"b",
"in",
"buf",
"]",
"shape",
"=",
"series",
".",
"shape",
"dtype",
"=",
"series",
".",
"dtype",
"write_config",
"(",
"path",
",",
"shape",
"=",
"shape",
",",
"dtype",
"=",
"dtype",
",",
"overwrite",
"=",
"overwrite",
",",
"credentials",
"=",
"credentials",
")"
] |
Writes out data to binary format.
Parameters
----------
series : Series
The data to write
path : string path or URI to directory to be created
Output files will be written underneath path.
Directory will be created as a result of this call.
prefix : str, optional, default = 'series'
String prefix for files.
overwrite : bool
If true, path and all its contents will be deleted and
recreated as partof this call.
|
[
"Writes",
"out",
"data",
"to",
"binary",
"format",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/writers.py#L3-L65
|
12,285
|
thunder-project/thunder
|
thunder/series/writers.py
|
write_config
|
def write_config(path, shape=None, dtype=None, name="conf.json", overwrite=True, credentials=None):
"""
Write a conf.json file with required information to load Series binary data.
"""
import json
from thunder.writers import get_file_writer
writer = get_file_writer(path)
conf = {'shape': shape, 'dtype': str(dtype)}
confwriter = writer(path, name, overwrite=overwrite, credentials=credentials)
confwriter.write(json.dumps(conf, indent=2))
successwriter = writer(path, "SUCCESS", overwrite=overwrite, credentials=credentials)
successwriter.write('')
|
python
|
def write_config(path, shape=None, dtype=None, name="conf.json", overwrite=True, credentials=None):
"""
Write a conf.json file with required information to load Series binary data.
"""
import json
from thunder.writers import get_file_writer
writer = get_file_writer(path)
conf = {'shape': shape, 'dtype': str(dtype)}
confwriter = writer(path, name, overwrite=overwrite, credentials=credentials)
confwriter.write(json.dumps(conf, indent=2))
successwriter = writer(path, "SUCCESS", overwrite=overwrite, credentials=credentials)
successwriter.write('')
|
[
"def",
"write_config",
"(",
"path",
",",
"shape",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"name",
"=",
"\"conf.json\"",
",",
"overwrite",
"=",
"True",
",",
"credentials",
"=",
"None",
")",
":",
"import",
"json",
"from",
"thunder",
".",
"writers",
"import",
"get_file_writer",
"writer",
"=",
"get_file_writer",
"(",
"path",
")",
"conf",
"=",
"{",
"'shape'",
":",
"shape",
",",
"'dtype'",
":",
"str",
"(",
"dtype",
")",
"}",
"confwriter",
"=",
"writer",
"(",
"path",
",",
"name",
",",
"overwrite",
"=",
"overwrite",
",",
"credentials",
"=",
"credentials",
")",
"confwriter",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"conf",
",",
"indent",
"=",
"2",
")",
")",
"successwriter",
"=",
"writer",
"(",
"path",
",",
"\"SUCCESS\"",
",",
"overwrite",
"=",
"overwrite",
",",
"credentials",
"=",
"credentials",
")",
"successwriter",
".",
"write",
"(",
"''",
")"
] |
Write a conf.json file with required information to load Series binary data.
|
[
"Write",
"a",
"conf",
".",
"json",
"file",
"with",
"required",
"information",
"to",
"load",
"Series",
"binary",
"data",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/writers.py#L67-L81
|
12,286
|
thunder-project/thunder
|
thunder/images/images.py
|
Images.toblocks
|
def toblocks(self, chunk_size='auto', padding=None):
"""
Convert to blocks which represent subdivisions of the images data.
Parameters
----------
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto'. In spark mode, 'auto' will choose a chunk size to make the
resulting blocks ~100 MB in size. In local mode, 'auto' will create a single block.
Tuple of ints interpreted as 'pixels per dimension'.
padding : tuple or int
Amount of padding along each dimensions for blocks. If an int, then
the same amount of padding is used for all dimensions
"""
from thunder.blocks.blocks import Blocks
from thunder.blocks.local import LocalChunks
if self.mode == 'spark':
if chunk_size is 'auto':
chunk_size = str(max([int(1e5/self.shape[0]), 1]))
chunks = self.values.chunk(chunk_size, padding=padding).keys_to_values((0,))
if self.mode == 'local':
if chunk_size is 'auto':
chunk_size = self.shape[1:]
chunks = LocalChunks.chunk(self.values, chunk_size, padding=padding)
return Blocks(chunks)
|
python
|
def toblocks(self, chunk_size='auto', padding=None):
"""
Convert to blocks which represent subdivisions of the images data.
Parameters
----------
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto'. In spark mode, 'auto' will choose a chunk size to make the
resulting blocks ~100 MB in size. In local mode, 'auto' will create a single block.
Tuple of ints interpreted as 'pixels per dimension'.
padding : tuple or int
Amount of padding along each dimensions for blocks. If an int, then
the same amount of padding is used for all dimensions
"""
from thunder.blocks.blocks import Blocks
from thunder.blocks.local import LocalChunks
if self.mode == 'spark':
if chunk_size is 'auto':
chunk_size = str(max([int(1e5/self.shape[0]), 1]))
chunks = self.values.chunk(chunk_size, padding=padding).keys_to_values((0,))
if self.mode == 'local':
if chunk_size is 'auto':
chunk_size = self.shape[1:]
chunks = LocalChunks.chunk(self.values, chunk_size, padding=padding)
return Blocks(chunks)
|
[
"def",
"toblocks",
"(",
"self",
",",
"chunk_size",
"=",
"'auto'",
",",
"padding",
"=",
"None",
")",
":",
"from",
"thunder",
".",
"blocks",
".",
"blocks",
"import",
"Blocks",
"from",
"thunder",
".",
"blocks",
".",
"local",
"import",
"LocalChunks",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"if",
"chunk_size",
"is",
"'auto'",
":",
"chunk_size",
"=",
"str",
"(",
"max",
"(",
"[",
"int",
"(",
"1e5",
"/",
"self",
".",
"shape",
"[",
"0",
"]",
")",
",",
"1",
"]",
")",
")",
"chunks",
"=",
"self",
".",
"values",
".",
"chunk",
"(",
"chunk_size",
",",
"padding",
"=",
"padding",
")",
".",
"keys_to_values",
"(",
"(",
"0",
",",
")",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"if",
"chunk_size",
"is",
"'auto'",
":",
"chunk_size",
"=",
"self",
".",
"shape",
"[",
"1",
":",
"]",
"chunks",
"=",
"LocalChunks",
".",
"chunk",
"(",
"self",
".",
"values",
",",
"chunk_size",
",",
"padding",
"=",
"padding",
")",
"return",
"Blocks",
"(",
"chunks",
")"
] |
Convert to blocks which represent subdivisions of the images data.
Parameters
----------
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto'. In spark mode, 'auto' will choose a chunk size to make the
resulting blocks ~100 MB in size. In local mode, 'auto' will create a single block.
Tuple of ints interpreted as 'pixels per dimension'.
padding : tuple or int
Amount of padding along each dimensions for blocks. If an int, then
the same amount of padding is used for all dimensions
|
[
"Convert",
"to",
"blocks",
"which",
"represent",
"subdivisions",
"of",
"the",
"images",
"data",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L60-L89
|
12,287
|
thunder-project/thunder
|
thunder/images/images.py
|
Images.toseries
|
def toseries(self, chunk_size='auto'):
"""
Converts to series data.
This method is equivalent to images.toblocks(size).toSeries().
Parameters
----------
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto', which will choose a chunk size to make the
resulting blocks ~100 MB in size. Tuple of ints interpreted as 'pixels per dimension'.
Only valid in spark mode.
"""
from thunder.series.series import Series
if chunk_size is 'auto':
chunk_size = str(max([int(1e5/self.shape[0]), 1]))
n = len(self.shape) - 1
index = arange(self.shape[0])
if self.mode == 'spark':
return Series(self.values.swap((0,), tuple(range(n)), size=chunk_size), index=index)
if self.mode == 'local':
return Series(self.values.transpose(tuple(range(1, n+1)) + (0,)), index=index)
|
python
|
def toseries(self, chunk_size='auto'):
"""
Converts to series data.
This method is equivalent to images.toblocks(size).toSeries().
Parameters
----------
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto', which will choose a chunk size to make the
resulting blocks ~100 MB in size. Tuple of ints interpreted as 'pixels per dimension'.
Only valid in spark mode.
"""
from thunder.series.series import Series
if chunk_size is 'auto':
chunk_size = str(max([int(1e5/self.shape[0]), 1]))
n = len(self.shape) - 1
index = arange(self.shape[0])
if self.mode == 'spark':
return Series(self.values.swap((0,), tuple(range(n)), size=chunk_size), index=index)
if self.mode == 'local':
return Series(self.values.transpose(tuple(range(1, n+1)) + (0,)), index=index)
|
[
"def",
"toseries",
"(",
"self",
",",
"chunk_size",
"=",
"'auto'",
")",
":",
"from",
"thunder",
".",
"series",
".",
"series",
"import",
"Series",
"if",
"chunk_size",
"is",
"'auto'",
":",
"chunk_size",
"=",
"str",
"(",
"max",
"(",
"[",
"int",
"(",
"1e5",
"/",
"self",
".",
"shape",
"[",
"0",
"]",
")",
",",
"1",
"]",
")",
")",
"n",
"=",
"len",
"(",
"self",
".",
"shape",
")",
"-",
"1",
"index",
"=",
"arange",
"(",
"self",
".",
"shape",
"[",
"0",
"]",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"return",
"Series",
"(",
"self",
".",
"values",
".",
"swap",
"(",
"(",
"0",
",",
")",
",",
"tuple",
"(",
"range",
"(",
"n",
")",
")",
",",
"size",
"=",
"chunk_size",
")",
",",
"index",
"=",
"index",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"return",
"Series",
"(",
"self",
".",
"values",
".",
"transpose",
"(",
"tuple",
"(",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
")",
"+",
"(",
"0",
",",
")",
")",
",",
"index",
"=",
"index",
")"
] |
Converts to series data.
This method is equivalent to images.toblocks(size).toSeries().
Parameters
----------
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto', which will choose a chunk size to make the
resulting blocks ~100 MB in size. Tuple of ints interpreted as 'pixels per dimension'.
Only valid in spark mode.
|
[
"Converts",
"to",
"series",
"data",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L91-L117
|
12,288
|
thunder-project/thunder
|
thunder/images/images.py
|
Images.tospark
|
def tospark(self, engine=None):
"""
Convert to distributed spark mode.
"""
from thunder.images.readers import fromarray
if self.mode == 'spark':
logging.getLogger('thunder').warn('images already in spark mode')
pass
if engine is None:
raise ValueError('Must provide a SparkContext')
return fromarray(self.toarray(), engine=engine)
|
python
|
def tospark(self, engine=None):
"""
Convert to distributed spark mode.
"""
from thunder.images.readers import fromarray
if self.mode == 'spark':
logging.getLogger('thunder').warn('images already in spark mode')
pass
if engine is None:
raise ValueError('Must provide a SparkContext')
return fromarray(self.toarray(), engine=engine)
|
[
"def",
"tospark",
"(",
"self",
",",
"engine",
"=",
"None",
")",
":",
"from",
"thunder",
".",
"images",
".",
"readers",
"import",
"fromarray",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"logging",
".",
"getLogger",
"(",
"'thunder'",
")",
".",
"warn",
"(",
"'images already in spark mode'",
")",
"pass",
"if",
"engine",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Must provide a SparkContext'",
")",
"return",
"fromarray",
"(",
"self",
".",
"toarray",
"(",
")",
",",
"engine",
"=",
"engine",
")"
] |
Convert to distributed spark mode.
|
[
"Convert",
"to",
"distributed",
"spark",
"mode",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L131-L144
|
12,289
|
thunder-project/thunder
|
thunder/images/images.py
|
Images.foreach
|
def foreach(self, func):
"""
Execute a function on each image.
Functions can have side effects. There is no return value.
"""
if self.mode == 'spark':
self.values.tordd().map(lambda kv: (kv[0][0], kv[1])).foreach(func)
else:
[func(kv) for kv in enumerate(self.values)]
|
python
|
def foreach(self, func):
"""
Execute a function on each image.
Functions can have side effects. There is no return value.
"""
if self.mode == 'spark':
self.values.tordd().map(lambda kv: (kv[0][0], kv[1])).foreach(func)
else:
[func(kv) for kv in enumerate(self.values)]
|
[
"def",
"foreach",
"(",
"self",
",",
"func",
")",
":",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"self",
".",
"values",
".",
"tordd",
"(",
")",
".",
"map",
"(",
"lambda",
"kv",
":",
"(",
"kv",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"kv",
"[",
"1",
"]",
")",
")",
".",
"foreach",
"(",
"func",
")",
"else",
":",
"[",
"func",
"(",
"kv",
")",
"for",
"kv",
"in",
"enumerate",
"(",
"self",
".",
"values",
")",
"]"
] |
Execute a function on each image.
Functions can have side effects. There is no return value.
|
[
"Execute",
"a",
"function",
"on",
"each",
"image",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L146-L155
|
12,290
|
thunder-project/thunder
|
thunder/images/images.py
|
Images.sample
|
def sample(self, nsamples=100, seed=None):
"""
Extract a random sample of images.
Parameters
----------
nsamples : int, optional, default = 100
The number of data points to sample.
seed : int, optional, default = None
Random seed.
"""
if nsamples < 1:
raise ValueError("Number of samples must be larger than 0, got '%g'" % nsamples)
if seed is None:
seed = random.randint(0, 2 ** 32)
if self.mode == 'spark':
result = asarray(self.values.tordd().values().takeSample(False, nsamples, seed))
else:
inds = [int(k) for k in random.rand(nsamples) * self.shape[0]]
result = asarray([self.values[i] for i in inds])
return self._constructor(result)
|
python
|
def sample(self, nsamples=100, seed=None):
"""
Extract a random sample of images.
Parameters
----------
nsamples : int, optional, default = 100
The number of data points to sample.
seed : int, optional, default = None
Random seed.
"""
if nsamples < 1:
raise ValueError("Number of samples must be larger than 0, got '%g'" % nsamples)
if seed is None:
seed = random.randint(0, 2 ** 32)
if self.mode == 'spark':
result = asarray(self.values.tordd().values().takeSample(False, nsamples, seed))
else:
inds = [int(k) for k in random.rand(nsamples) * self.shape[0]]
result = asarray([self.values[i] for i in inds])
return self._constructor(result)
|
[
"def",
"sample",
"(",
"self",
",",
"nsamples",
"=",
"100",
",",
"seed",
"=",
"None",
")",
":",
"if",
"nsamples",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Number of samples must be larger than 0, got '%g'\"",
"%",
"nsamples",
")",
"if",
"seed",
"is",
"None",
":",
"seed",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"2",
"**",
"32",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"result",
"=",
"asarray",
"(",
"self",
".",
"values",
".",
"tordd",
"(",
")",
".",
"values",
"(",
")",
".",
"takeSample",
"(",
"False",
",",
"nsamples",
",",
"seed",
")",
")",
"else",
":",
"inds",
"=",
"[",
"int",
"(",
"k",
")",
"for",
"k",
"in",
"random",
".",
"rand",
"(",
"nsamples",
")",
"*",
"self",
".",
"shape",
"[",
"0",
"]",
"]",
"result",
"=",
"asarray",
"(",
"[",
"self",
".",
"values",
"[",
"i",
"]",
"for",
"i",
"in",
"inds",
"]",
")",
"return",
"self",
".",
"_constructor",
"(",
"result",
")"
] |
Extract a random sample of images.
Parameters
----------
nsamples : int, optional, default = 100
The number of data points to sample.
seed : int, optional, default = None
Random seed.
|
[
"Extract",
"a",
"random",
"sample",
"of",
"images",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L157-L182
|
12,291
|
thunder-project/thunder
|
thunder/images/images.py
|
Images.var
|
def var(self):
"""
Compute the variance across images.
"""
return self._constructor(self.values.var(axis=0, keepdims=True))
|
python
|
def var(self):
"""
Compute the variance across images.
"""
return self._constructor(self.values.var(axis=0, keepdims=True))
|
[
"def",
"var",
"(",
"self",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"self",
".",
"values",
".",
"var",
"(",
"axis",
"=",
"0",
",",
"keepdims",
"=",
"True",
")",
")"
] |
Compute the variance across images.
|
[
"Compute",
"the",
"variance",
"across",
"images",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L201-L205
|
12,292
|
thunder-project/thunder
|
thunder/images/images.py
|
Images.std
|
def std(self):
"""
Compute the standard deviation across images.
"""
return self._constructor(self.values.std(axis=0, keepdims=True))
|
python
|
def std(self):
"""
Compute the standard deviation across images.
"""
return self._constructor(self.values.std(axis=0, keepdims=True))
|
[
"def",
"std",
"(",
"self",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"self",
".",
"values",
".",
"std",
"(",
"axis",
"=",
"0",
",",
"keepdims",
"=",
"True",
")",
")"
] |
Compute the standard deviation across images.
|
[
"Compute",
"the",
"standard",
"deviation",
"across",
"images",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L207-L211
|
12,293
|
thunder-project/thunder
|
thunder/images/images.py
|
Images.squeeze
|
def squeeze(self):
"""
Remove single-dimensional axes from images.
"""
axis = tuple(range(1, len(self.shape) - 1)) if prod(self.shape[1:]) == 1 else None
return self.map(lambda x: x.squeeze(axis=axis))
|
python
|
def squeeze(self):
"""
Remove single-dimensional axes from images.
"""
axis = tuple(range(1, len(self.shape) - 1)) if prod(self.shape[1:]) == 1 else None
return self.map(lambda x: x.squeeze(axis=axis))
|
[
"def",
"squeeze",
"(",
"self",
")",
":",
"axis",
"=",
"tuple",
"(",
"range",
"(",
"1",
",",
"len",
"(",
"self",
".",
"shape",
")",
"-",
"1",
")",
")",
"if",
"prod",
"(",
"self",
".",
"shape",
"[",
"1",
":",
"]",
")",
"==",
"1",
"else",
"None",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"squeeze",
"(",
"axis",
"=",
"axis",
")",
")"
] |
Remove single-dimensional axes from images.
|
[
"Remove",
"single",
"-",
"dimensional",
"axes",
"from",
"images",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L231-L236
|
12,294
|
thunder-project/thunder
|
thunder/images/images.py
|
Images.max_projection
|
def max_projection(self, axis=2):
"""
Compute maximum projections of images along a dimension.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along.
"""
if axis >= size(self.value_shape):
raise Exception('Axis for projection (%s) exceeds '
'image dimensions (%s-%s)' % (axis, 0, size(self.value_shape)-1))
new_value_shape = list(self.value_shape)
del new_value_shape[axis]
return self.map(lambda x: amax(x, axis), value_shape=new_value_shape)
|
python
|
def max_projection(self, axis=2):
"""
Compute maximum projections of images along a dimension.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along.
"""
if axis >= size(self.value_shape):
raise Exception('Axis for projection (%s) exceeds '
'image dimensions (%s-%s)' % (axis, 0, size(self.value_shape)-1))
new_value_shape = list(self.value_shape)
del new_value_shape[axis]
return self.map(lambda x: amax(x, axis), value_shape=new_value_shape)
|
[
"def",
"max_projection",
"(",
"self",
",",
"axis",
"=",
"2",
")",
":",
"if",
"axis",
">=",
"size",
"(",
"self",
".",
"value_shape",
")",
":",
"raise",
"Exception",
"(",
"'Axis for projection (%s) exceeds '",
"'image dimensions (%s-%s)'",
"%",
"(",
"axis",
",",
"0",
",",
"size",
"(",
"self",
".",
"value_shape",
")",
"-",
"1",
")",
")",
"new_value_shape",
"=",
"list",
"(",
"self",
".",
"value_shape",
")",
"del",
"new_value_shape",
"[",
"axis",
"]",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"amax",
"(",
"x",
",",
"axis",
")",
",",
"value_shape",
"=",
"new_value_shape",
")"
] |
Compute maximum projections of images along a dimension.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along.
|
[
"Compute",
"maximum",
"projections",
"of",
"images",
"along",
"a",
"dimension",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L258-L273
|
12,295
|
thunder-project/thunder
|
thunder/images/images.py
|
Images.max_min_projection
|
def max_min_projection(self, axis=2):
"""
Compute maximum-minimum projection along a dimension.
This computes the sum of the maximum and minimum values.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along.
"""
if axis >= size(self.value_shape):
raise Exception('Axis for projection (%s) exceeds '
'image dimensions (%s-%s)' % (axis, 0, size(self.value_shape)-1))
new_value_shape = list(self.value_shape)
del new_value_shape[axis]
return self.map(lambda x: amax(x, axis) + amin(x, axis), value_shape=new_value_shape)
|
python
|
def max_min_projection(self, axis=2):
"""
Compute maximum-minimum projection along a dimension.
This computes the sum of the maximum and minimum values.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along.
"""
if axis >= size(self.value_shape):
raise Exception('Axis for projection (%s) exceeds '
'image dimensions (%s-%s)' % (axis, 0, size(self.value_shape)-1))
new_value_shape = list(self.value_shape)
del new_value_shape[axis]
return self.map(lambda x: amax(x, axis) + amin(x, axis), value_shape=new_value_shape)
|
[
"def",
"max_min_projection",
"(",
"self",
",",
"axis",
"=",
"2",
")",
":",
"if",
"axis",
">=",
"size",
"(",
"self",
".",
"value_shape",
")",
":",
"raise",
"Exception",
"(",
"'Axis for projection (%s) exceeds '",
"'image dimensions (%s-%s)'",
"%",
"(",
"axis",
",",
"0",
",",
"size",
"(",
"self",
".",
"value_shape",
")",
"-",
"1",
")",
")",
"new_value_shape",
"=",
"list",
"(",
"self",
".",
"value_shape",
")",
"del",
"new_value_shape",
"[",
"axis",
"]",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"amax",
"(",
"x",
",",
"axis",
")",
"+",
"amin",
"(",
"x",
",",
"axis",
")",
",",
"value_shape",
"=",
"new_value_shape",
")"
] |
Compute maximum-minimum projection along a dimension.
This computes the sum of the maximum and minimum values.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along.
|
[
"Compute",
"maximum",
"-",
"minimum",
"projection",
"along",
"a",
"dimension",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L275-L292
|
12,296
|
thunder-project/thunder
|
thunder/images/images.py
|
Images.subsample
|
def subsample(self, factor):
"""
Downsample images by an integer factor.
Parameters
----------
factor : positive int or tuple of positive ints
Stride to use in subsampling. If a single int is passed,
each dimension of the image will be downsampled by this factor.
If a tuple is passed, each dimension will be downsampled by the given factor.
"""
value_shape = self.value_shape
ndims = len(value_shape)
if not hasattr(factor, '__len__'):
factor = [factor] * ndims
factor = [int(sf) for sf in factor]
if any((sf <= 0 for sf in factor)):
raise ValueError('All sampling factors must be positive; got ' + str(factor))
def roundup(a, b):
return (a + b - 1) // b
slices = [slice(0, value_shape[i], factor[i]) for i in range(ndims)]
new_value_shape = tuple([roundup(value_shape[i], factor[i]) for i in range(ndims)])
return self.map(lambda v: v[slices], value_shape=new_value_shape)
|
python
|
def subsample(self, factor):
"""
Downsample images by an integer factor.
Parameters
----------
factor : positive int or tuple of positive ints
Stride to use in subsampling. If a single int is passed,
each dimension of the image will be downsampled by this factor.
If a tuple is passed, each dimension will be downsampled by the given factor.
"""
value_shape = self.value_shape
ndims = len(value_shape)
if not hasattr(factor, '__len__'):
factor = [factor] * ndims
factor = [int(sf) for sf in factor]
if any((sf <= 0 for sf in factor)):
raise ValueError('All sampling factors must be positive; got ' + str(factor))
def roundup(a, b):
return (a + b - 1) // b
slices = [slice(0, value_shape[i], factor[i]) for i in range(ndims)]
new_value_shape = tuple([roundup(value_shape[i], factor[i]) for i in range(ndims)])
return self.map(lambda v: v[slices], value_shape=new_value_shape)
|
[
"def",
"subsample",
"(",
"self",
",",
"factor",
")",
":",
"value_shape",
"=",
"self",
".",
"value_shape",
"ndims",
"=",
"len",
"(",
"value_shape",
")",
"if",
"not",
"hasattr",
"(",
"factor",
",",
"'__len__'",
")",
":",
"factor",
"=",
"[",
"factor",
"]",
"*",
"ndims",
"factor",
"=",
"[",
"int",
"(",
"sf",
")",
"for",
"sf",
"in",
"factor",
"]",
"if",
"any",
"(",
"(",
"sf",
"<=",
"0",
"for",
"sf",
"in",
"factor",
")",
")",
":",
"raise",
"ValueError",
"(",
"'All sampling factors must be positive; got '",
"+",
"str",
"(",
"factor",
")",
")",
"def",
"roundup",
"(",
"a",
",",
"b",
")",
":",
"return",
"(",
"a",
"+",
"b",
"-",
"1",
")",
"//",
"b",
"slices",
"=",
"[",
"slice",
"(",
"0",
",",
"value_shape",
"[",
"i",
"]",
",",
"factor",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"ndims",
")",
"]",
"new_value_shape",
"=",
"tuple",
"(",
"[",
"roundup",
"(",
"value_shape",
"[",
"i",
"]",
",",
"factor",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"ndims",
")",
"]",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"v",
":",
"v",
"[",
"slices",
"]",
",",
"value_shape",
"=",
"new_value_shape",
")"
] |
Downsample images by an integer factor.
Parameters
----------
factor : positive int or tuple of positive ints
Stride to use in subsampling. If a single int is passed,
each dimension of the image will be downsampled by this factor.
If a tuple is passed, each dimension will be downsampled by the given factor.
|
[
"Downsample",
"images",
"by",
"an",
"integer",
"factor",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L294-L320
|
12,297
|
thunder-project/thunder
|
thunder/images/images.py
|
Images.gaussian_filter
|
def gaussian_filter(self, sigma=2, order=0):
"""
Spatially smooth images with a gaussian filter.
Filtering will be applied to every image in the collection.
Parameters
----------
sigma : scalar or sequence of scalars, default = 2
Size of the filter size as standard deviation in pixels.
A sequence is interpreted as the standard deviation for each axis.
A single scalar is applied equally to all axes.
order : choice of 0 / 1 / 2 / 3 or sequence from same set, optional, default = 0
Order of the gaussian kernel, 0 is a gaussian,
higher numbers correspond to derivatives of a gaussian.
"""
from scipy.ndimage.filters import gaussian_filter
return self.map(lambda v: gaussian_filter(v, sigma, order), value_shape=self.value_shape)
|
python
|
def gaussian_filter(self, sigma=2, order=0):
"""
Spatially smooth images with a gaussian filter.
Filtering will be applied to every image in the collection.
Parameters
----------
sigma : scalar or sequence of scalars, default = 2
Size of the filter size as standard deviation in pixels.
A sequence is interpreted as the standard deviation for each axis.
A single scalar is applied equally to all axes.
order : choice of 0 / 1 / 2 / 3 or sequence from same set, optional, default = 0
Order of the gaussian kernel, 0 is a gaussian,
higher numbers correspond to derivatives of a gaussian.
"""
from scipy.ndimage.filters import gaussian_filter
return self.map(lambda v: gaussian_filter(v, sigma, order), value_shape=self.value_shape)
|
[
"def",
"gaussian_filter",
"(",
"self",
",",
"sigma",
"=",
"2",
",",
"order",
"=",
"0",
")",
":",
"from",
"scipy",
".",
"ndimage",
".",
"filters",
"import",
"gaussian_filter",
"return",
"self",
".",
"map",
"(",
"lambda",
"v",
":",
"gaussian_filter",
"(",
"v",
",",
"sigma",
",",
"order",
")",
",",
"value_shape",
"=",
"self",
".",
"value_shape",
")"
] |
Spatially smooth images with a gaussian filter.
Filtering will be applied to every image in the collection.
Parameters
----------
sigma : scalar or sequence of scalars, default = 2
Size of the filter size as standard deviation in pixels.
A sequence is interpreted as the standard deviation for each axis.
A single scalar is applied equally to all axes.
order : choice of 0 / 1 / 2 / 3 or sequence from same set, optional, default = 0
Order of the gaussian kernel, 0 is a gaussian,
higher numbers correspond to derivatives of a gaussian.
|
[
"Spatially",
"smooth",
"images",
"with",
"a",
"gaussian",
"filter",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L322-L341
|
12,298
|
thunder-project/thunder
|
thunder/images/images.py
|
Images._image_filter
|
def _image_filter(self, filter=None, size=2):
"""
Generic function for maping a filtering operation over images.
Parameters
----------
filter : string
Which filter to use.
size : int or tuple
Size parameter for filter.
"""
from numpy import isscalar
from scipy.ndimage.filters import median_filter, uniform_filter
FILTERS = {
'median': median_filter,
'uniform': uniform_filter
}
func = FILTERS[filter]
mode = self.mode
value_shape = self.value_shape
ndims = len(value_shape)
if ndims == 3 and isscalar(size) == 1:
size = [size, size, size]
if ndims == 3 and size[2] == 0:
def filter_(im):
if mode == 'spark':
im.setflags(write=True)
else:
im = im.copy()
for z in arange(0, value_shape[2]):
im[:, :, z] = func(im[:, :, z], size[0:2])
return im
else:
filter_ = lambda x: func(x, size)
return self.map(lambda v: filter_(v), value_shape=self.value_shape)
|
python
|
def _image_filter(self, filter=None, size=2):
"""
Generic function for maping a filtering operation over images.
Parameters
----------
filter : string
Which filter to use.
size : int or tuple
Size parameter for filter.
"""
from numpy import isscalar
from scipy.ndimage.filters import median_filter, uniform_filter
FILTERS = {
'median': median_filter,
'uniform': uniform_filter
}
func = FILTERS[filter]
mode = self.mode
value_shape = self.value_shape
ndims = len(value_shape)
if ndims == 3 and isscalar(size) == 1:
size = [size, size, size]
if ndims == 3 and size[2] == 0:
def filter_(im):
if mode == 'spark':
im.setflags(write=True)
else:
im = im.copy()
for z in arange(0, value_shape[2]):
im[:, :, z] = func(im[:, :, z], size[0:2])
return im
else:
filter_ = lambda x: func(x, size)
return self.map(lambda v: filter_(v), value_shape=self.value_shape)
|
[
"def",
"_image_filter",
"(",
"self",
",",
"filter",
"=",
"None",
",",
"size",
"=",
"2",
")",
":",
"from",
"numpy",
"import",
"isscalar",
"from",
"scipy",
".",
"ndimage",
".",
"filters",
"import",
"median_filter",
",",
"uniform_filter",
"FILTERS",
"=",
"{",
"'median'",
":",
"median_filter",
",",
"'uniform'",
":",
"uniform_filter",
"}",
"func",
"=",
"FILTERS",
"[",
"filter",
"]",
"mode",
"=",
"self",
".",
"mode",
"value_shape",
"=",
"self",
".",
"value_shape",
"ndims",
"=",
"len",
"(",
"value_shape",
")",
"if",
"ndims",
"==",
"3",
"and",
"isscalar",
"(",
"size",
")",
"==",
"1",
":",
"size",
"=",
"[",
"size",
",",
"size",
",",
"size",
"]",
"if",
"ndims",
"==",
"3",
"and",
"size",
"[",
"2",
"]",
"==",
"0",
":",
"def",
"filter_",
"(",
"im",
")",
":",
"if",
"mode",
"==",
"'spark'",
":",
"im",
".",
"setflags",
"(",
"write",
"=",
"True",
")",
"else",
":",
"im",
"=",
"im",
".",
"copy",
"(",
")",
"for",
"z",
"in",
"arange",
"(",
"0",
",",
"value_shape",
"[",
"2",
"]",
")",
":",
"im",
"[",
":",
",",
":",
",",
"z",
"]",
"=",
"func",
"(",
"im",
"[",
":",
",",
":",
",",
"z",
"]",
",",
"size",
"[",
"0",
":",
"2",
"]",
")",
"return",
"im",
"else",
":",
"filter_",
"=",
"lambda",
"x",
":",
"func",
"(",
"x",
",",
"size",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"v",
":",
"filter_",
"(",
"v",
")",
",",
"value_shape",
"=",
"self",
".",
"value_shape",
")"
] |
Generic function for maping a filtering operation over images.
Parameters
----------
filter : string
Which filter to use.
size : int or tuple
Size parameter for filter.
|
[
"Generic",
"function",
"for",
"maping",
"a",
"filtering",
"operation",
"over",
"images",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L373-L414
|
12,299
|
thunder-project/thunder
|
thunder/images/images.py
|
Images.localcorr
|
def localcorr(self, size=2):
"""
Correlate every pixel in an image sequence to the average of its local neighborhood.
This algorithm computes, for every pixel, the correlation coefficient
between the sequence of values for that pixel, and the average of all pixels
in a local neighborhood. It does this by blurring the image(s) with a uniform filter,
and then correlates the original sequence with the blurred sequence.
Parameters
----------
size : int or tuple, optional, default = 2
Size of the filter in pixels. If a scalar, will use the same filter size
along each dimension.
"""
from thunder.images.readers import fromarray, fromrdd
from numpy import corrcoef, concatenate
nimages = self.shape[0]
# spatially average the original image set over the specified neighborhood
blurred = self.uniform_filter(size)
# union the averaged images with the originals to create an
# Images object containing 2N images (where N is the original number of images),
# ordered such that the first N images are the averaged ones.
if self.mode == 'spark':
combined = self.values.concatenate(blurred.values)
combined_images = fromrdd(combined.tordd())
else:
combined = concatenate((self.values, blurred.values), axis=0)
combined_images = fromarray(combined)
# correlate the first N (averaged) records with the last N (original) records
series = combined_images.toseries()
corr = series.map(lambda x: corrcoef(x[:nimages], x[nimages:])[0, 1])
return corr.toarray()
|
python
|
def localcorr(self, size=2):
"""
Correlate every pixel in an image sequence to the average of its local neighborhood.
This algorithm computes, for every pixel, the correlation coefficient
between the sequence of values for that pixel, and the average of all pixels
in a local neighborhood. It does this by blurring the image(s) with a uniform filter,
and then correlates the original sequence with the blurred sequence.
Parameters
----------
size : int or tuple, optional, default = 2
Size of the filter in pixels. If a scalar, will use the same filter size
along each dimension.
"""
from thunder.images.readers import fromarray, fromrdd
from numpy import corrcoef, concatenate
nimages = self.shape[0]
# spatially average the original image set over the specified neighborhood
blurred = self.uniform_filter(size)
# union the averaged images with the originals to create an
# Images object containing 2N images (where N is the original number of images),
# ordered such that the first N images are the averaged ones.
if self.mode == 'spark':
combined = self.values.concatenate(blurred.values)
combined_images = fromrdd(combined.tordd())
else:
combined = concatenate((self.values, blurred.values), axis=0)
combined_images = fromarray(combined)
# correlate the first N (averaged) records with the last N (original) records
series = combined_images.toseries()
corr = series.map(lambda x: corrcoef(x[:nimages], x[nimages:])[0, 1])
return corr.toarray()
|
[
"def",
"localcorr",
"(",
"self",
",",
"size",
"=",
"2",
")",
":",
"from",
"thunder",
".",
"images",
".",
"readers",
"import",
"fromarray",
",",
"fromrdd",
"from",
"numpy",
"import",
"corrcoef",
",",
"concatenate",
"nimages",
"=",
"self",
".",
"shape",
"[",
"0",
"]",
"# spatially average the original image set over the specified neighborhood",
"blurred",
"=",
"self",
".",
"uniform_filter",
"(",
"size",
")",
"# union the averaged images with the originals to create an",
"# Images object containing 2N images (where N is the original number of images),",
"# ordered such that the first N images are the averaged ones.",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"combined",
"=",
"self",
".",
"values",
".",
"concatenate",
"(",
"blurred",
".",
"values",
")",
"combined_images",
"=",
"fromrdd",
"(",
"combined",
".",
"tordd",
"(",
")",
")",
"else",
":",
"combined",
"=",
"concatenate",
"(",
"(",
"self",
".",
"values",
",",
"blurred",
".",
"values",
")",
",",
"axis",
"=",
"0",
")",
"combined_images",
"=",
"fromarray",
"(",
"combined",
")",
"# correlate the first N (averaged) records with the last N (original) records",
"series",
"=",
"combined_images",
".",
"toseries",
"(",
")",
"corr",
"=",
"series",
".",
"map",
"(",
"lambda",
"x",
":",
"corrcoef",
"(",
"x",
"[",
":",
"nimages",
"]",
",",
"x",
"[",
"nimages",
":",
"]",
")",
"[",
"0",
",",
"1",
"]",
")",
"return",
"corr",
".",
"toarray",
"(",
")"
] |
Correlate every pixel in an image sequence to the average of its local neighborhood.
This algorithm computes, for every pixel, the correlation coefficient
between the sequence of values for that pixel, and the average of all pixels
in a local neighborhood. It does this by blurring the image(s) with a uniform filter,
and then correlates the original sequence with the blurred sequence.
Parameters
----------
size : int or tuple, optional, default = 2
Size of the filter in pixels. If a scalar, will use the same filter size
along each dimension.
|
[
"Correlate",
"every",
"pixel",
"in",
"an",
"image",
"sequence",
"to",
"the",
"average",
"of",
"its",
"local",
"neighborhood",
"."
] |
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L416-L454
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.