id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
244,500
scottgigante/tasklogger
tasklogger/api.py
log_warning
def log_warning(msg, logger="TaskLogger"): """Log a WARNING message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger """ tasklogger = get_tasklogger(logger) tasklogger.warning(msg) return tasklogger
python
def log_warning(msg, logger="TaskLogger"): """Log a WARNING message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger """ tasklogger = get_tasklogger(logger) tasklogger.warning(msg) return tasklogger
[ "def", "log_warning", "(", "msg", ",", "logger", "=", "\"TaskLogger\"", ")", ":", "tasklogger", "=", "get_tasklogger", "(", "logger", ")", "tasklogger", ".", "warning", "(", "msg", ")", "return", "tasklogger" ]
Log a WARNING message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger
[ "Log", "a", "WARNING", "message" ]
06a263715d2db0653615c17b2df14b8272967b8d
https://github.com/scottgigante/tasklogger/blob/06a263715d2db0653615c17b2df14b8272967b8d/tasklogger/api.py#L110-L128
244,501
scottgigante/tasklogger
tasklogger/api.py
log_error
def log_error(msg, logger="TaskLogger"): """Log an ERROR message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger """ tasklogger = get_tasklogger(logger) tasklogger.error(msg) return tasklogger
python
def log_error(msg, logger="TaskLogger"): """Log an ERROR message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger """ tasklogger = get_tasklogger(logger) tasklogger.error(msg) return tasklogger
[ "def", "log_error", "(", "msg", ",", "logger", "=", "\"TaskLogger\"", ")", ":", "tasklogger", "=", "get_tasklogger", "(", "logger", ")", "tasklogger", ".", "error", "(", "msg", ")", "return", "tasklogger" ]
Log an ERROR message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger
[ "Log", "an", "ERROR", "message" ]
06a263715d2db0653615c17b2df14b8272967b8d
https://github.com/scottgigante/tasklogger/blob/06a263715d2db0653615c17b2df14b8272967b8d/tasklogger/api.py#L131-L149
244,502
scottgigante/tasklogger
tasklogger/api.py
log_critical
def log_critical(msg, logger="TaskLogger"): """Log a CRITICAL message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged name : `str`, optional (default: "TaskLogger") Name used to retrieve the unique TaskLogger Returns ------- logger : TaskLogger """ tasklogger = get_tasklogger(logger) tasklogger.critical(msg) return tasklogger
python
def log_critical(msg, logger="TaskLogger"): """Log a CRITICAL message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged name : `str`, optional (default: "TaskLogger") Name used to retrieve the unique TaskLogger Returns ------- logger : TaskLogger """ tasklogger = get_tasklogger(logger) tasklogger.critical(msg) return tasklogger
[ "def", "log_critical", "(", "msg", ",", "logger", "=", "\"TaskLogger\"", ")", ":", "tasklogger", "=", "get_tasklogger", "(", "logger", ")", "tasklogger", ".", "critical", "(", "msg", ")", "return", "tasklogger" ]
Log a CRITICAL message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged name : `str`, optional (default: "TaskLogger") Name used to retrieve the unique TaskLogger Returns ------- logger : TaskLogger
[ "Log", "a", "CRITICAL", "message" ]
06a263715d2db0653615c17b2df14b8272967b8d
https://github.com/scottgigante/tasklogger/blob/06a263715d2db0653615c17b2df14b8272967b8d/tasklogger/api.py#L152-L170
244,503
scottgigante/tasklogger
tasklogger/api.py
set_indent
def set_indent(indent=2, logger="TaskLogger"): """Set the indent function Convenience function to set the indent size Parameters ---------- indent : int, optional (default: 2) number of spaces by which to indent based on the number of tasks currently running` Returns ------- logger : TaskLogger """ tasklogger = get_tasklogger(logger) tasklogger.set_indent(indent) return tasklogger
python
def set_indent(indent=2, logger="TaskLogger"): """Set the indent function Convenience function to set the indent size Parameters ---------- indent : int, optional (default: 2) number of spaces by which to indent based on the number of tasks currently running` Returns ------- logger : TaskLogger """ tasklogger = get_tasklogger(logger) tasklogger.set_indent(indent) return tasklogger
[ "def", "set_indent", "(", "indent", "=", "2", ",", "logger", "=", "\"TaskLogger\"", ")", ":", "tasklogger", "=", "get_tasklogger", "(", "logger", ")", "tasklogger", ".", "set_indent", "(", "indent", ")", "return", "tasklogger" ]
Set the indent function Convenience function to set the indent size Parameters ---------- indent : int, optional (default: 2) number of spaces by which to indent based on the number of tasks currently running` Returns ------- logger : TaskLogger
[ "Set", "the", "indent", "function" ]
06a263715d2db0653615c17b2df14b8272967b8d
https://github.com/scottgigante/tasklogger/blob/06a263715d2db0653615c17b2df14b8272967b8d/tasklogger/api.py#L214-L231
244,504
etcher-be/emiz
emiz/parse_parking_spots.py
main
def main(miz_path): """ Artifact from earlier development """ from emiz.miz import Miz with Miz(miz_path) as m: mis = m.mission result = defaultdict(dict) for unit in mis.units: airport, spot = unit.group_name.split('#') spot = int(spot) # print(airport, int(spot), unit.unit_position) result[airport][spot] = unit.unit_position import pickle # nosec with open('_parking_spots.py', mode='w') as f: f.write('parkings = {}\n'.format(pickle.dumps(result)))
python
def main(miz_path): """ Artifact from earlier development """ from emiz.miz import Miz with Miz(miz_path) as m: mis = m.mission result = defaultdict(dict) for unit in mis.units: airport, spot = unit.group_name.split('#') spot = int(spot) # print(airport, int(spot), unit.unit_position) result[airport][spot] = unit.unit_position import pickle # nosec with open('_parking_spots.py', mode='w') as f: f.write('parkings = {}\n'.format(pickle.dumps(result)))
[ "def", "main", "(", "miz_path", ")", ":", "from", "emiz", ".", "miz", "import", "Miz", "with", "Miz", "(", "miz_path", ")", "as", "m", ":", "mis", "=", "m", ".", "mission", "result", "=", "defaultdict", "(", "dict", ")", "for", "unit", "in", "mis", ".", "units", ":", "airport", ",", "spot", "=", "unit", ".", "group_name", ".", "split", "(", "'#'", ")", "spot", "=", "int", "(", "spot", ")", "# print(airport, int(spot), unit.unit_position)", "result", "[", "airport", "]", "[", "spot", "]", "=", "unit", ".", "unit_position", "import", "pickle", "# nosec", "with", "open", "(", "'_parking_spots.py'", ",", "mode", "=", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'parkings = {}\\n'", ".", "format", "(", "pickle", ".", "dumps", "(", "result", ")", ")", ")" ]
Artifact from earlier development
[ "Artifact", "from", "earlier", "development" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/parse_parking_spots.py#L15-L32
244,505
PSU-OIT-ARC/django-local-settings
local_settings/settings.py
DottedAccessMixin._traverse
def _traverse(self, name, create_missing=False, action=None, value=NO_DEFAULT): """Traverse to the item specified by ``name``. To create missing items on the way to the ``name``d item, pass ``create_missing=True``. This will insert an item for each missing segment in ``name``. The type and value of item that will be inserted for a missing segment depends on the *next* segment. If a ``default`` value is passed, the ``name``d item will be set to this value; otherwise, a default default will be used. See :meth:`_create_segment` for more info. """ obj = self segments = self._parse_path(name) for segment, next_segment in zip(segments, segments[1:] + [None]): last = next_segment is None if create_missing: self._create_segment(obj, segment, next_segment) try: next_obj = obj[segment] except IndexError: raise KeyError(segment) if not last: obj = next_obj else: if action: value = action(obj, segment) elif value is not NO_DEFAULT: obj[segment] = value else: value = obj[segment] return value
python
def _traverse(self, name, create_missing=False, action=None, value=NO_DEFAULT): """Traverse to the item specified by ``name``. To create missing items on the way to the ``name``d item, pass ``create_missing=True``. This will insert an item for each missing segment in ``name``. The type and value of item that will be inserted for a missing segment depends on the *next* segment. If a ``default`` value is passed, the ``name``d item will be set to this value; otherwise, a default default will be used. See :meth:`_create_segment` for more info. """ obj = self segments = self._parse_path(name) for segment, next_segment in zip(segments, segments[1:] + [None]): last = next_segment is None if create_missing: self._create_segment(obj, segment, next_segment) try: next_obj = obj[segment] except IndexError: raise KeyError(segment) if not last: obj = next_obj else: if action: value = action(obj, segment) elif value is not NO_DEFAULT: obj[segment] = value else: value = obj[segment] return value
[ "def", "_traverse", "(", "self", ",", "name", ",", "create_missing", "=", "False", ",", "action", "=", "None", ",", "value", "=", "NO_DEFAULT", ")", ":", "obj", "=", "self", "segments", "=", "self", ".", "_parse_path", "(", "name", ")", "for", "segment", ",", "next_segment", "in", "zip", "(", "segments", ",", "segments", "[", "1", ":", "]", "+", "[", "None", "]", ")", ":", "last", "=", "next_segment", "is", "None", "if", "create_missing", ":", "self", ".", "_create_segment", "(", "obj", ",", "segment", ",", "next_segment", ")", "try", ":", "next_obj", "=", "obj", "[", "segment", "]", "except", "IndexError", ":", "raise", "KeyError", "(", "segment", ")", "if", "not", "last", ":", "obj", "=", "next_obj", "else", ":", "if", "action", ":", "value", "=", "action", "(", "obj", ",", "segment", ")", "elif", "value", "is", "not", "NO_DEFAULT", ":", "obj", "[", "segment", "]", "=", "value", "else", ":", "value", "=", "obj", "[", "segment", "]", "return", "value" ]
Traverse to the item specified by ``name``. To create missing items on the way to the ``name``d item, pass ``create_missing=True``. This will insert an item for each missing segment in ``name``. The type and value of item that will be inserted for a missing segment depends on the *next* segment. If a ``default`` value is passed, the ``name``d item will be set to this value; otherwise, a default default will be used. See :meth:`_create_segment` for more info.
[ "Traverse", "to", "the", "item", "specified", "by", "name", "." ]
758810fbd9411c2046a187afcac6532155cac694
https://github.com/PSU-OIT-ARC/django-local-settings/blob/758810fbd9411c2046a187afcac6532155cac694/local_settings/settings.py#L73-L109
244,506
PSU-OIT-ARC/django-local-settings
local_settings/settings.py
DottedAccessMixin._parse_path
def _parse_path(self, path): """Parse ``path`` into segments. Paths must start with a WORD (i.e., a top level Django setting name). Path segments are separated by dots. Compound path segments (i.e., a name with a dot in it) can be grouped inside parentheses. Examples:: >>> settings = Settings() >>> settings._parse_path('WORD') ['WORD'] >>> settings._parse_path('WORD.x') ['WORD', 'x'] >>> settings._parse_path('WORD.(x)') ['WORD', 'x'] >>> settings._parse_path('WORD.(x.y)') ['WORD', 'x.y'] >>> settings._parse_path('WORD.(x.y).z') ['WORD', 'x.y', 'z'] >>> settings._parse_path('WORD.0.z') ['WORD', 0, 'z'] >>> settings._parse_path('WORD.(0).z') ['WORD', '0', 'z'] >>> settings._parse_path('WORD.(0)X.z') ['WORD', '0X', 'z'] An example of where compound names are actually useful is in logger settings:: LOGGING.loggers.(package.module).handlers = ["console"] LOGGING.loggers.(package.module).level = "DEBUG" Paths may also contain interpolation groups. Dotted names in these groups will not be split (so there's no need to group them inside parentheses):: >>> settings = Settings() >>> settings._parse_path('WORD.{{x}}') ['WORD', '{{x}}'] >>> settings._parse_path('WORD.{{x.y}}') ['WORD', '{{x.y}}'] >>> settings._parse_path('WORD.{{x.y.z}}XYZ') ['WORD', '{{x.y.z}}XYZ'] Interpolation groups *can* be wrapped in parentheses, but doing so is redundant:: >>> settings._parse_path('WORD.({{x.y.z}}XYZ)') ['WORD', '{{x.y.z}}XYZ'] Any segment that A) looks like an int and B) does *not* contain a (...) or {{...}} group will be converted to an int. Segments that start with a leading "0" followed by other digits will not be converted. """ if not path: raise ValueError('path cannot be empty') segments = [] path_iter = zip(iter(path), chain(path[1:], (None,))) if six.PY2: # zip() returns a list on Python 2 path_iter = iter(path_iter) convert_name = self._convert_name current_segment = [] current_segment_contains_group = False def append_segment(): segment = ''.join(current_segment) if not current_segment_contains_group: segment = convert_name(segment) segments.append(segment) del current_segment[:] for c, d in path_iter: if c == '.': append_segment() current_segment_contains_group = False elif c == '(': nested = 0 for c, d in path_iter: current_segment.append(c) if c == '(': nested += 1 elif c == ')': if nested: nested -= 1 else: current_segment.pop() # Remove the closing paren current_segment_contains_group = True break else: raise ValueError('Unclosed (...) in %s' % path) elif c == '{' and d == '{': current_segment_contains_group = True current_segment.append(c) for c, d in path_iter: current_segment.append(c) if c == '}' and d == '}': current_segment_contains_group = True break else: raise ValueError('Unclosed {{...}} in %s' % path) else: current_segment.append(c) if current_segment: append_segment() return segments
python
def _parse_path(self, path): """Parse ``path`` into segments. Paths must start with a WORD (i.e., a top level Django setting name). Path segments are separated by dots. Compound path segments (i.e., a name with a dot in it) can be grouped inside parentheses. Examples:: >>> settings = Settings() >>> settings._parse_path('WORD') ['WORD'] >>> settings._parse_path('WORD.x') ['WORD', 'x'] >>> settings._parse_path('WORD.(x)') ['WORD', 'x'] >>> settings._parse_path('WORD.(x.y)') ['WORD', 'x.y'] >>> settings._parse_path('WORD.(x.y).z') ['WORD', 'x.y', 'z'] >>> settings._parse_path('WORD.0.z') ['WORD', 0, 'z'] >>> settings._parse_path('WORD.(0).z') ['WORD', '0', 'z'] >>> settings._parse_path('WORD.(0)X.z') ['WORD', '0X', 'z'] An example of where compound names are actually useful is in logger settings:: LOGGING.loggers.(package.module).handlers = ["console"] LOGGING.loggers.(package.module).level = "DEBUG" Paths may also contain interpolation groups. Dotted names in these groups will not be split (so there's no need to group them inside parentheses):: >>> settings = Settings() >>> settings._parse_path('WORD.{{x}}') ['WORD', '{{x}}'] >>> settings._parse_path('WORD.{{x.y}}') ['WORD', '{{x.y}}'] >>> settings._parse_path('WORD.{{x.y.z}}XYZ') ['WORD', '{{x.y.z}}XYZ'] Interpolation groups *can* be wrapped in parentheses, but doing so is redundant:: >>> settings._parse_path('WORD.({{x.y.z}}XYZ)') ['WORD', '{{x.y.z}}XYZ'] Any segment that A) looks like an int and B) does *not* contain a (...) or {{...}} group will be converted to an int. Segments that start with a leading "0" followed by other digits will not be converted. """ if not path: raise ValueError('path cannot be empty') segments = [] path_iter = zip(iter(path), chain(path[1:], (None,))) if six.PY2: # zip() returns a list on Python 2 path_iter = iter(path_iter) convert_name = self._convert_name current_segment = [] current_segment_contains_group = False def append_segment(): segment = ''.join(current_segment) if not current_segment_contains_group: segment = convert_name(segment) segments.append(segment) del current_segment[:] for c, d in path_iter: if c == '.': append_segment() current_segment_contains_group = False elif c == '(': nested = 0 for c, d in path_iter: current_segment.append(c) if c == '(': nested += 1 elif c == ')': if nested: nested -= 1 else: current_segment.pop() # Remove the closing paren current_segment_contains_group = True break else: raise ValueError('Unclosed (...) in %s' % path) elif c == '{' and d == '{': current_segment_contains_group = True current_segment.append(c) for c, d in path_iter: current_segment.append(c) if c == '}' and d == '}': current_segment_contains_group = True break else: raise ValueError('Unclosed {{...}} in %s' % path) else: current_segment.append(c) if current_segment: append_segment() return segments
[ "def", "_parse_path", "(", "self", ",", "path", ")", ":", "if", "not", "path", ":", "raise", "ValueError", "(", "'path cannot be empty'", ")", "segments", "=", "[", "]", "path_iter", "=", "zip", "(", "iter", "(", "path", ")", ",", "chain", "(", "path", "[", "1", ":", "]", ",", "(", "None", ",", ")", ")", ")", "if", "six", ".", "PY2", ":", "# zip() returns a list on Python 2", "path_iter", "=", "iter", "(", "path_iter", ")", "convert_name", "=", "self", ".", "_convert_name", "current_segment", "=", "[", "]", "current_segment_contains_group", "=", "False", "def", "append_segment", "(", ")", ":", "segment", "=", "''", ".", "join", "(", "current_segment", ")", "if", "not", "current_segment_contains_group", ":", "segment", "=", "convert_name", "(", "segment", ")", "segments", ".", "append", "(", "segment", ")", "del", "current_segment", "[", ":", "]", "for", "c", ",", "d", "in", "path_iter", ":", "if", "c", "==", "'.'", ":", "append_segment", "(", ")", "current_segment_contains_group", "=", "False", "elif", "c", "==", "'('", ":", "nested", "=", "0", "for", "c", ",", "d", "in", "path_iter", ":", "current_segment", ".", "append", "(", "c", ")", "if", "c", "==", "'('", ":", "nested", "+=", "1", "elif", "c", "==", "')'", ":", "if", "nested", ":", "nested", "-=", "1", "else", ":", "current_segment", ".", "pop", "(", ")", "# Remove the closing paren", "current_segment_contains_group", "=", "True", "break", "else", ":", "raise", "ValueError", "(", "'Unclosed (...) in %s'", "%", "path", ")", "elif", "c", "==", "'{'", "and", "d", "==", "'{'", ":", "current_segment_contains_group", "=", "True", "current_segment", ".", "append", "(", "c", ")", "for", "c", ",", "d", "in", "path_iter", ":", "current_segment", ".", "append", "(", "c", ")", "if", "c", "==", "'}'", "and", "d", "==", "'}'", ":", "current_segment_contains_group", "=", "True", "break", "else", ":", "raise", "ValueError", "(", "'Unclosed {{...}} in %s'", "%", "path", ")", "else", ":", "current_segment", ".", "append", "(", "c", ")", "if", "current_segment", ":", "append_segment", "(", ")", "return", "segments" ]
Parse ``path`` into segments. Paths must start with a WORD (i.e., a top level Django setting name). Path segments are separated by dots. Compound path segments (i.e., a name with a dot in it) can be grouped inside parentheses. Examples:: >>> settings = Settings() >>> settings._parse_path('WORD') ['WORD'] >>> settings._parse_path('WORD.x') ['WORD', 'x'] >>> settings._parse_path('WORD.(x)') ['WORD', 'x'] >>> settings._parse_path('WORD.(x.y)') ['WORD', 'x.y'] >>> settings._parse_path('WORD.(x.y).z') ['WORD', 'x.y', 'z'] >>> settings._parse_path('WORD.0.z') ['WORD', 0, 'z'] >>> settings._parse_path('WORD.(0).z') ['WORD', '0', 'z'] >>> settings._parse_path('WORD.(0)X.z') ['WORD', '0X', 'z'] An example of where compound names are actually useful is in logger settings:: LOGGING.loggers.(package.module).handlers = ["console"] LOGGING.loggers.(package.module).level = "DEBUG" Paths may also contain interpolation groups. Dotted names in these groups will not be split (so there's no need to group them inside parentheses):: >>> settings = Settings() >>> settings._parse_path('WORD.{{x}}') ['WORD', '{{x}}'] >>> settings._parse_path('WORD.{{x.y}}') ['WORD', '{{x.y}}'] >>> settings._parse_path('WORD.{{x.y.z}}XYZ') ['WORD', '{{x.y.z}}XYZ'] Interpolation groups *can* be wrapped in parentheses, but doing so is redundant:: >>> settings._parse_path('WORD.({{x.y.z}}XYZ)') ['WORD', '{{x.y.z}}XYZ'] Any segment that A) looks like an int and B) does *not* contain a (...) or {{...}} group will be converted to an int. Segments that start with a leading "0" followed by other digits will not be converted.
[ "Parse", "path", "into", "segments", "." ]
758810fbd9411c2046a187afcac6532155cac694
https://github.com/PSU-OIT-ARC/django-local-settings/blob/758810fbd9411c2046a187afcac6532155cac694/local_settings/settings.py#L137-L249
244,507
PSU-OIT-ARC/django-local-settings
local_settings/settings.py
DottedAccessMixin._convert_name
def _convert_name(self, name): """Convert ``name`` to int if it looks like an int. Otherwise, return it as is. """ if re.search('^\d+$', name): if len(name) > 1 and name[0] == '0': # Don't treat strings beginning with "0" as ints return name return int(name) return name
python
def _convert_name(self, name): """Convert ``name`` to int if it looks like an int. Otherwise, return it as is. """ if re.search('^\d+$', name): if len(name) > 1 and name[0] == '0': # Don't treat strings beginning with "0" as ints return name return int(name) return name
[ "def", "_convert_name", "(", "self", ",", "name", ")", ":", "if", "re", ".", "search", "(", "'^\\d+$'", ",", "name", ")", ":", "if", "len", "(", "name", ")", ">", "1", "and", "name", "[", "0", "]", "==", "'0'", ":", "# Don't treat strings beginning with \"0\" as ints", "return", "name", "return", "int", "(", "name", ")", "return", "name" ]
Convert ``name`` to int if it looks like an int. Otherwise, return it as is.
[ "Convert", "name", "to", "int", "if", "it", "looks", "like", "an", "int", "." ]
758810fbd9411c2046a187afcac6532155cac694
https://github.com/PSU-OIT-ARC/django-local-settings/blob/758810fbd9411c2046a187afcac6532155cac694/local_settings/settings.py#L251-L262
244,508
jucacrispim/asyncamqp
asyncamqp/channel.py
Channel.basic_consume
async def basic_consume(self, queue_name='', consumer_tag='', no_local=False, no_ack=False, exclusive=False, no_wait=False, arguments=None, wait_message=True, timeout=0): """Starts the consumption of message into a queue. the callback will be called each time we're receiving a message. Args: queue_name: str, the queue to receive message from consumer_tag: str, optional consumer tag no_local: bool, if set the server will not send messages to the connection that published them. no_ack: bool, if set the server does not expect acknowledgements for messages exclusive: bool, request exclusive consumer access, meaning only this consumer can access the queue no_wait: bool, if set, the server will not respond to the method arguments: dict, AMQP arguments to be passed to the server wait_message: Indicates if the consumer should wait for new messages in the queue or simply return None if the queue is empty. timeout: A timeout for waiting messages. ``wait_message`` has precendence over timeout. """ # If a consumer tag was not passed, create one consumer_tag = consumer_tag or 'ctag%i.%s' % ( self.channel_id, uuid.uuid4().hex) if arguments is None: arguments = {} frame = amqp_frame.AmqpRequest( self.protocol._stream_writer, amqp_constants.TYPE_METHOD, self.channel_id) frame.declare_method( amqp_constants.CLASS_BASIC, amqp_constants.BASIC_CONSUME) request = amqp_frame.AmqpEncoder() request.write_short(0) request.write_shortstr(queue_name) request.write_shortstr(consumer_tag) request.write_bits(no_local, no_ack, exclusive, no_wait) request.write_table(arguments) self.consumer_queues[consumer_tag] = asyncio.Queue(self.max_queue_size) self.last_consumer_tag = consumer_tag consumer = self.CONSUMER_CLASS( self, self.consumer_queues[consumer_tag], consumer_tag, nowait=not wait_message, timeout=timeout) await self._write_frame_awaiting_response( 'basic_consume', frame, request, no_wait) if not no_wait: self._ctag_events[consumer_tag].set() return consumer
python
async def basic_consume(self, queue_name='', consumer_tag='', no_local=False, no_ack=False, exclusive=False, no_wait=False, arguments=None, wait_message=True, timeout=0): """Starts the consumption of message into a queue. the callback will be called each time we're receiving a message. Args: queue_name: str, the queue to receive message from consumer_tag: str, optional consumer tag no_local: bool, if set the server will not send messages to the connection that published them. no_ack: bool, if set the server does not expect acknowledgements for messages exclusive: bool, request exclusive consumer access, meaning only this consumer can access the queue no_wait: bool, if set, the server will not respond to the method arguments: dict, AMQP arguments to be passed to the server wait_message: Indicates if the consumer should wait for new messages in the queue or simply return None if the queue is empty. timeout: A timeout for waiting messages. ``wait_message`` has precendence over timeout. """ # If a consumer tag was not passed, create one consumer_tag = consumer_tag or 'ctag%i.%s' % ( self.channel_id, uuid.uuid4().hex) if arguments is None: arguments = {} frame = amqp_frame.AmqpRequest( self.protocol._stream_writer, amqp_constants.TYPE_METHOD, self.channel_id) frame.declare_method( amqp_constants.CLASS_BASIC, amqp_constants.BASIC_CONSUME) request = amqp_frame.AmqpEncoder() request.write_short(0) request.write_shortstr(queue_name) request.write_shortstr(consumer_tag) request.write_bits(no_local, no_ack, exclusive, no_wait) request.write_table(arguments) self.consumer_queues[consumer_tag] = asyncio.Queue(self.max_queue_size) self.last_consumer_tag = consumer_tag consumer = self.CONSUMER_CLASS( self, self.consumer_queues[consumer_tag], consumer_tag, nowait=not wait_message, timeout=timeout) await self._write_frame_awaiting_response( 'basic_consume', frame, request, no_wait) if not no_wait: self._ctag_events[consumer_tag].set() return consumer
[ "async", "def", "basic_consume", "(", "self", ",", "queue_name", "=", "''", ",", "consumer_tag", "=", "''", ",", "no_local", "=", "False", ",", "no_ack", "=", "False", ",", "exclusive", "=", "False", ",", "no_wait", "=", "False", ",", "arguments", "=", "None", ",", "wait_message", "=", "True", ",", "timeout", "=", "0", ")", ":", "# If a consumer tag was not passed, create one", "consumer_tag", "=", "consumer_tag", "or", "'ctag%i.%s'", "%", "(", "self", ".", "channel_id", ",", "uuid", ".", "uuid4", "(", ")", ".", "hex", ")", "if", "arguments", "is", "None", ":", "arguments", "=", "{", "}", "frame", "=", "amqp_frame", ".", "AmqpRequest", "(", "self", ".", "protocol", ".", "_stream_writer", ",", "amqp_constants", ".", "TYPE_METHOD", ",", "self", ".", "channel_id", ")", "frame", ".", "declare_method", "(", "amqp_constants", ".", "CLASS_BASIC", ",", "amqp_constants", ".", "BASIC_CONSUME", ")", "request", "=", "amqp_frame", ".", "AmqpEncoder", "(", ")", "request", ".", "write_short", "(", "0", ")", "request", ".", "write_shortstr", "(", "queue_name", ")", "request", ".", "write_shortstr", "(", "consumer_tag", ")", "request", ".", "write_bits", "(", "no_local", ",", "no_ack", ",", "exclusive", ",", "no_wait", ")", "request", ".", "write_table", "(", "arguments", ")", "self", ".", "consumer_queues", "[", "consumer_tag", "]", "=", "asyncio", ".", "Queue", "(", "self", ".", "max_queue_size", ")", "self", ".", "last_consumer_tag", "=", "consumer_tag", "consumer", "=", "self", ".", "CONSUMER_CLASS", "(", "self", ",", "self", ".", "consumer_queues", "[", "consumer_tag", "]", ",", "consumer_tag", ",", "nowait", "=", "not", "wait_message", ",", "timeout", "=", "timeout", ")", "await", "self", ".", "_write_frame_awaiting_response", "(", "'basic_consume'", ",", "frame", ",", "request", ",", "no_wait", ")", "if", "not", "no_wait", ":", "self", ".", "_ctag_events", "[", "consumer_tag", "]", ".", "set", "(", ")", "return", "consumer" ]
Starts the consumption of message into a queue. the callback will be called each time we're receiving a message. Args: queue_name: str, the queue to receive message from consumer_tag: str, optional consumer tag no_local: bool, if set the server will not send messages to the connection that published them. no_ack: bool, if set the server does not expect acknowledgements for messages exclusive: bool, request exclusive consumer access, meaning only this consumer can access the queue no_wait: bool, if set, the server will not respond to the method arguments: dict, AMQP arguments to be passed to the server wait_message: Indicates if the consumer should wait for new messages in the queue or simply return None if the queue is empty. timeout: A timeout for waiting messages. ``wait_message`` has precendence over timeout.
[ "Starts", "the", "consumption", "of", "message", "into", "a", "queue", ".", "the", "callback", "will", "be", "called", "each", "time", "we", "re", "receiving", "a", "message", "." ]
fa50541ab528f4e0753cb9562c29b0d89d738889
https://github.com/jucacrispim/asyncamqp/blob/fa50541ab528f4e0753cb9562c29b0d89d738889/asyncamqp/channel.py#L48-L106
244,509
SergeySatskiy/cdm-gc-plugin
setup.py
getPluginVersion
def getPluginVersion(): """The version must be updated in the .cdmp file""" desc_file = os.path.join('cdmplugins', 'gc', plugin_desc_file) if not os.path.exists(desc_file): print('Cannot find the plugin description file. Expected here: ' + desc_file, file=sys.stderr) sys.exit(1) with open(desc_file) as dec_file: for line in dec_file: line = line.strip() if line.startswith('Version'): return line.split('=')[1].strip() print('Cannot find a version line in the ' + desc_file, file=sys.stderr) sys.exit(1)
python
def getPluginVersion(): """The version must be updated in the .cdmp file""" desc_file = os.path.join('cdmplugins', 'gc', plugin_desc_file) if not os.path.exists(desc_file): print('Cannot find the plugin description file. Expected here: ' + desc_file, file=sys.stderr) sys.exit(1) with open(desc_file) as dec_file: for line in dec_file: line = line.strip() if line.startswith('Version'): return line.split('=')[1].strip() print('Cannot find a version line in the ' + desc_file, file=sys.stderr) sys.exit(1)
[ "def", "getPluginVersion", "(", ")", ":", "desc_file", "=", "os", ".", "path", ".", "join", "(", "'cdmplugins'", ",", "'gc'", ",", "plugin_desc_file", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "desc_file", ")", ":", "print", "(", "'Cannot find the plugin description file. Expected here: '", "+", "desc_file", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "with", "open", "(", "desc_file", ")", "as", "dec_file", ":", "for", "line", "in", "dec_file", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "'Version'", ")", ":", "return", "line", ".", "split", "(", "'='", ")", "[", "1", "]", ".", "strip", "(", ")", "print", "(", "'Cannot find a version line in the '", "+", "desc_file", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")" ]
The version must be updated in the .cdmp file
[ "The", "version", "must", "be", "updated", "in", "the", ".", "cdmp", "file" ]
f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df
https://github.com/SergeySatskiy/cdm-gc-plugin/blob/f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df/setup.py#L30-L45
244,510
fusionapp/fusion-util
fusion_util/cert.py
chainCerts
def chainCerts(data): """ Matches and returns any certificates found except the first match. Regex code copied from L{twisted.internet.endpoints._parseSSL}. Related ticket: https://twistedmatrix.com/trac/ticket/7732 @type path: L{bytes} @param data: PEM-encoded data containing the certificates. @rtype: L{list} containing L{Certificate}s. """ matches = re.findall( r'(-----BEGIN CERTIFICATE-----\n.+?\n-----END CERTIFICATE-----)', data, flags=re.DOTALL) chainCertificates = [ Certificate.loadPEM(chainCertPEM).original for chainCertPEM in matches] return chainCertificates[1:]
python
def chainCerts(data): """ Matches and returns any certificates found except the first match. Regex code copied from L{twisted.internet.endpoints._parseSSL}. Related ticket: https://twistedmatrix.com/trac/ticket/7732 @type path: L{bytes} @param data: PEM-encoded data containing the certificates. @rtype: L{list} containing L{Certificate}s. """ matches = re.findall( r'(-----BEGIN CERTIFICATE-----\n.+?\n-----END CERTIFICATE-----)', data, flags=re.DOTALL) chainCertificates = [ Certificate.loadPEM(chainCertPEM).original for chainCertPEM in matches] return chainCertificates[1:]
[ "def", "chainCerts", "(", "data", ")", ":", "matches", "=", "re", ".", "findall", "(", "r'(-----BEGIN CERTIFICATE-----\\n.+?\\n-----END CERTIFICATE-----)'", ",", "data", ",", "flags", "=", "re", ".", "DOTALL", ")", "chainCertificates", "=", "[", "Certificate", ".", "loadPEM", "(", "chainCertPEM", ")", ".", "original", "for", "chainCertPEM", "in", "matches", "]", "return", "chainCertificates", "[", "1", ":", "]" ]
Matches and returns any certificates found except the first match. Regex code copied from L{twisted.internet.endpoints._parseSSL}. Related ticket: https://twistedmatrix.com/trac/ticket/7732 @type path: L{bytes} @param data: PEM-encoded data containing the certificates. @rtype: L{list} containing L{Certificate}s.
[ "Matches", "and", "returns", "any", "certificates", "found", "except", "the", "first", "match", "." ]
089c525799926c8b8bf1117ab22ed055dc99c7e6
https://github.com/fusionapp/fusion-util/blob/089c525799926c8b8bf1117ab22ed055dc99c7e6/fusion_util/cert.py#L13-L32
244,511
stephanepechard/projy
projy/templates/DjangoProjectTemplate.py
DjangoProjectTemplate.directories
def directories(self): """ Return the names of directories to be created. """ directories_description = [ self.project_name, self.project_name + '/conf', self.project_name + '/static', ] return directories_description
python
def directories(self): """ Return the names of directories to be created. """ directories_description = [ self.project_name, self.project_name + '/conf', self.project_name + '/static', ] return directories_description
[ "def", "directories", "(", "self", ")", ":", "directories_description", "=", "[", "self", ".", "project_name", ",", "self", ".", "project_name", "+", "'/conf'", ",", "self", ".", "project_name", "+", "'/static'", ",", "]", "return", "directories_description" ]
Return the names of directories to be created.
[ "Return", "the", "names", "of", "directories", "to", "be", "created", "." ]
3146b0e3c207b977e1b51fcb33138746dae83c23
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/templates/DjangoProjectTemplate.py#L23-L30
244,512
alexpearce/jobmonitor
jobmonitor/FlaskWithJobResolvers.py
FlaskWithJobResolvers.remove_job_resolver
def remove_job_resolver(self, job_resolver): """Remove job_resolver from the list of job resolvers. Keyword arguments: job_resolver -- Function reference of the job resolver to be removed. """ for i, r in enumerate(self.job_resolvers()): if job_resolver == r: del self._job_resolvers[i]
python
def remove_job_resolver(self, job_resolver): """Remove job_resolver from the list of job resolvers. Keyword arguments: job_resolver -- Function reference of the job resolver to be removed. """ for i, r in enumerate(self.job_resolvers()): if job_resolver == r: del self._job_resolvers[i]
[ "def", "remove_job_resolver", "(", "self", ",", "job_resolver", ")", ":", "for", "i", ",", "r", "in", "enumerate", "(", "self", ".", "job_resolvers", "(", ")", ")", ":", "if", "job_resolver", "==", "r", ":", "del", "self", ".", "_job_resolvers", "[", "i", "]" ]
Remove job_resolver from the list of job resolvers. Keyword arguments: job_resolver -- Function reference of the job resolver to be removed.
[ "Remove", "job_resolver", "from", "the", "list", "of", "job", "resolvers", "." ]
c08955ed3c357b2b3518aa0853b43bc237bc0814
https://github.com/alexpearce/jobmonitor/blob/c08955ed3c357b2b3518aa0853b43bc237bc0814/jobmonitor/FlaskWithJobResolvers.py#L60-L68
244,513
alexpearce/jobmonitor
jobmonitor/FlaskWithJobResolvers.py
FlaskWithJobResolvers.resolve_job
def resolve_job(self, name): """Attempt to resolve the task name in to a job name. If no job resolver can resolve the task, i.e. they all return None, return None. Keyword arguments: name -- Name of the task to be resolved. """ for r in self.job_resolvers(): resolved_name = r(name) if resolved_name is not None: return resolved_name return None
python
def resolve_job(self, name): """Attempt to resolve the task name in to a job name. If no job resolver can resolve the task, i.e. they all return None, return None. Keyword arguments: name -- Name of the task to be resolved. """ for r in self.job_resolvers(): resolved_name = r(name) if resolved_name is not None: return resolved_name return None
[ "def", "resolve_job", "(", "self", ",", "name", ")", ":", "for", "r", "in", "self", ".", "job_resolvers", "(", ")", ":", "resolved_name", "=", "r", "(", "name", ")", "if", "resolved_name", "is", "not", "None", ":", "return", "resolved_name", "return", "None" ]
Attempt to resolve the task name in to a job name. If no job resolver can resolve the task, i.e. they all return None, return None. Keyword arguments: name -- Name of the task to be resolved.
[ "Attempt", "to", "resolve", "the", "task", "name", "in", "to", "a", "job", "name", "." ]
c08955ed3c357b2b3518aa0853b43bc237bc0814
https://github.com/alexpearce/jobmonitor/blob/c08955ed3c357b2b3518aa0853b43bc237bc0814/jobmonitor/FlaskWithJobResolvers.py#L70-L83
244,514
tripzero/python-photons
photons/lightprotocol.py
LightProtocol.setColor
def setColor(self, id, color): """ Command 0x01 sets the color of a specific light Data: [Command][Number_Lights_to_set][id_1][r][g][b][id_n][r][g][b]... """ header = bytearray() header.append(LightProtocolCommand.SetColor) if not isinstance(id, list): id = [id] if not isinstance(color, list): color = [color] header.extend(struct.pack('<H', len(id))) i = 0 light = bytearray() for curr_id in id: light.extend(struct.pack('<H', curr_id)) light.extend(color[i]) i += 1 buff = header + light return self.send(buff)
python
def setColor(self, id, color): """ Command 0x01 sets the color of a specific light Data: [Command][Number_Lights_to_set][id_1][r][g][b][id_n][r][g][b]... """ header = bytearray() header.append(LightProtocolCommand.SetColor) if not isinstance(id, list): id = [id] if not isinstance(color, list): color = [color] header.extend(struct.pack('<H', len(id))) i = 0 light = bytearray() for curr_id in id: light.extend(struct.pack('<H', curr_id)) light.extend(color[i]) i += 1 buff = header + light return self.send(buff)
[ "def", "setColor", "(", "self", ",", "id", ",", "color", ")", ":", "header", "=", "bytearray", "(", ")", "header", ".", "append", "(", "LightProtocolCommand", ".", "SetColor", ")", "if", "not", "isinstance", "(", "id", ",", "list", ")", ":", "id", "=", "[", "id", "]", "if", "not", "isinstance", "(", "color", ",", "list", ")", ":", "color", "=", "[", "color", "]", "header", ".", "extend", "(", "struct", ".", "pack", "(", "'<H'", ",", "len", "(", "id", ")", ")", ")", "i", "=", "0", "light", "=", "bytearray", "(", ")", "for", "curr_id", "in", "id", ":", "light", ".", "extend", "(", "struct", ".", "pack", "(", "'<H'", ",", "curr_id", ")", ")", "light", ".", "extend", "(", "color", "[", "i", "]", ")", "i", "+=", "1", "buff", "=", "header", "+", "light", "return", "self", ".", "send", "(", "buff", ")" ]
Command 0x01 sets the color of a specific light Data: [Command][Number_Lights_to_set][id_1][r][g][b][id_n][r][g][b]...
[ "Command", "0x01", "sets", "the", "color", "of", "a", "specific", "light" ]
e185bbb55881189c7deeb599384944e61cf6048d
https://github.com/tripzero/python-photons/blob/e185bbb55881189c7deeb599384944e61cf6048d/photons/lightprotocol.py#L160-L190
244,515
tripzero/python-photons
photons/lightprotocol.py
LightProtocol.setSeries
def setSeries(self, startId, length, color): """ Command 0x07 sets all lights in the series starting from "startId" to "endId" to "color" Data: [0x07][startId][length][r][g][b] """ buff = bytearray() buff.append(LightProtocolCommand.SetSeries) buff.extend(struct.pack('<H', startId)) buff.extend(struct.pack('<H', length)) buff.extend(color) return self.send(buff)
python
def setSeries(self, startId, length, color): """ Command 0x07 sets all lights in the series starting from "startId" to "endId" to "color" Data: [0x07][startId][length][r][g][b] """ buff = bytearray() buff.append(LightProtocolCommand.SetSeries) buff.extend(struct.pack('<H', startId)) buff.extend(struct.pack('<H', length)) buff.extend(color) return self.send(buff)
[ "def", "setSeries", "(", "self", ",", "startId", ",", "length", ",", "color", ")", ":", "buff", "=", "bytearray", "(", ")", "buff", ".", "append", "(", "LightProtocolCommand", ".", "SetSeries", ")", "buff", ".", "extend", "(", "struct", ".", "pack", "(", "'<H'", ",", "startId", ")", ")", "buff", ".", "extend", "(", "struct", ".", "pack", "(", "'<H'", ",", "length", ")", ")", "buff", ".", "extend", "(", "color", ")", "return", "self", ".", "send", "(", "buff", ")" ]
Command 0x07 sets all lights in the series starting from "startId" to "endId" to "color" Data: [0x07][startId][length][r][g][b]
[ "Command", "0x07", "sets", "all", "lights", "in", "the", "series", "starting", "from", "startId", "to", "endId", "to", "color" ]
e185bbb55881189c7deeb599384944e61cf6048d
https://github.com/tripzero/python-photons/blob/e185bbb55881189c7deeb599384944e61cf6048d/photons/lightprotocol.py#L192-L207
244,516
dossier/dossier.models
dossier/models/web/routes.py
v0_highlighter_post
def v0_highlighter_post(request, response, tfidf, cid): '''Obtain highlights for a document POSTed as the body, which is the pre-design-thinking structure of the highlights API. See v1 below. NB: This end point will soon be deleted. The route for this endpoint is: ``POST /dossier/v0/highlighter/<cid>``. ``content_id`` is the id to associate with the given feature collection. The feature collection should be in the request body serialized as JSON. ''' logger.info('got %r', cid) tfidf = tfidf or None content_type = request.headers.get('content-type', '') if not content_type.startswith('text/html'): logger.critical('content-type=%r', content_type) response.status = 415 return {'error': {'code': 0, 'message': 'content_type=%r and should be text/html' % content_type}} url = urllib.unquote(cid.split('|', 1)[1]) body = request.body.read() if len(body) == 0: response.status = 420 return {'error': {'code': 1, 'message': 'empty body'}} logger.info('parsing %d bytes for url: %r', len(body), url) fc = etl.create_fc_from_html(url, body, tfidf=tfidf) if fc is None: logger.critical('failed to get FC using %d bytes from %r', len(body), url) response.status = 506 return {'error': {'code': 2, 'message': 'FC not generated for that content'}} highlights = dict() for feature_name, pretty_name in feature_pretty_names: # Each type of string is if feature_name not in fc: continue total = sum(fc[feature_name].values()) highlights[pretty_name] = [ (phrase, count / total, [], []) for phrase, count in sorted(fc[feature_name].items(), key=itemgetter(1), reverse=True)] logger.info('%r and %d keys', feature_name, len(highlights[pretty_name])) return {'highlights': highlights}
python
def v0_highlighter_post(request, response, tfidf, cid): '''Obtain highlights for a document POSTed as the body, which is the pre-design-thinking structure of the highlights API. See v1 below. NB: This end point will soon be deleted. The route for this endpoint is: ``POST /dossier/v0/highlighter/<cid>``. ``content_id`` is the id to associate with the given feature collection. The feature collection should be in the request body serialized as JSON. ''' logger.info('got %r', cid) tfidf = tfidf or None content_type = request.headers.get('content-type', '') if not content_type.startswith('text/html'): logger.critical('content-type=%r', content_type) response.status = 415 return {'error': {'code': 0, 'message': 'content_type=%r and should be text/html' % content_type}} url = urllib.unquote(cid.split('|', 1)[1]) body = request.body.read() if len(body) == 0: response.status = 420 return {'error': {'code': 1, 'message': 'empty body'}} logger.info('parsing %d bytes for url: %r', len(body), url) fc = etl.create_fc_from_html(url, body, tfidf=tfidf) if fc is None: logger.critical('failed to get FC using %d bytes from %r', len(body), url) response.status = 506 return {'error': {'code': 2, 'message': 'FC not generated for that content'}} highlights = dict() for feature_name, pretty_name in feature_pretty_names: # Each type of string is if feature_name not in fc: continue total = sum(fc[feature_name].values()) highlights[pretty_name] = [ (phrase, count / total, [], []) for phrase, count in sorted(fc[feature_name].items(), key=itemgetter(1), reverse=True)] logger.info('%r and %d keys', feature_name, len(highlights[pretty_name])) return {'highlights': highlights}
[ "def", "v0_highlighter_post", "(", "request", ",", "response", ",", "tfidf", ",", "cid", ")", ":", "logger", ".", "info", "(", "'got %r'", ",", "cid", ")", "tfidf", "=", "tfidf", "or", "None", "content_type", "=", "request", ".", "headers", ".", "get", "(", "'content-type'", ",", "''", ")", "if", "not", "content_type", ".", "startswith", "(", "'text/html'", ")", ":", "logger", ".", "critical", "(", "'content-type=%r'", ",", "content_type", ")", "response", ".", "status", "=", "415", "return", "{", "'error'", ":", "{", "'code'", ":", "0", ",", "'message'", ":", "'content_type=%r and should be text/html'", "%", "content_type", "}", "}", "url", "=", "urllib", ".", "unquote", "(", "cid", ".", "split", "(", "'|'", ",", "1", ")", "[", "1", "]", ")", "body", "=", "request", ".", "body", ".", "read", "(", ")", "if", "len", "(", "body", ")", "==", "0", ":", "response", ".", "status", "=", "420", "return", "{", "'error'", ":", "{", "'code'", ":", "1", ",", "'message'", ":", "'empty body'", "}", "}", "logger", ".", "info", "(", "'parsing %d bytes for url: %r'", ",", "len", "(", "body", ")", ",", "url", ")", "fc", "=", "etl", ".", "create_fc_from_html", "(", "url", ",", "body", ",", "tfidf", "=", "tfidf", ")", "if", "fc", "is", "None", ":", "logger", ".", "critical", "(", "'failed to get FC using %d bytes from %r'", ",", "len", "(", "body", ")", ",", "url", ")", "response", ".", "status", "=", "506", "return", "{", "'error'", ":", "{", "'code'", ":", "2", ",", "'message'", ":", "'FC not generated for that content'", "}", "}", "highlights", "=", "dict", "(", ")", "for", "feature_name", ",", "pretty_name", "in", "feature_pretty_names", ":", "# Each type of string is", "if", "feature_name", "not", "in", "fc", ":", "continue", "total", "=", "sum", "(", "fc", "[", "feature_name", "]", ".", "values", "(", ")", ")", "highlights", "[", "pretty_name", "]", "=", "[", "(", "phrase", ",", "count", "/", "total", ",", "[", "]", ",", "[", "]", ")", "for", "phrase", ",", "count", "in", "sorted", "(", "fc", "[", "feature_name", "]", ".", "items", "(", ")", ",", "key", "=", "itemgetter", "(", "1", ")", ",", "reverse", "=", "True", ")", "]", "logger", ".", "info", "(", "'%r and %d keys'", ",", "feature_name", ",", "len", "(", "highlights", "[", "pretty_name", "]", ")", ")", "return", "{", "'highlights'", ":", "highlights", "}" ]
Obtain highlights for a document POSTed as the body, which is the pre-design-thinking structure of the highlights API. See v1 below. NB: This end point will soon be deleted. The route for this endpoint is: ``POST /dossier/v0/highlighter/<cid>``. ``content_id`` is the id to associate with the given feature collection. The feature collection should be in the request body serialized as JSON.
[ "Obtain", "highlights", "for", "a", "document", "POSTed", "as", "the", "body", "which", "is", "the", "pre", "-", "design", "-", "thinking", "structure", "of", "the", "highlights", "API", ".", "See", "v1", "below", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/web/routes.py#L289-L331
244,517
dossier/dossier.models
dossier/models/web/routes.py
v1_highlights_get
def v1_highlights_get(response, kvlclient, file_id_str, max_elapsed = 300): '''Obtain highlights for a document POSTed previously to this end point. See documentation for v1_highlights_post for further details. If the `state` is still `pending` for more than `max_elapsed` after the start of the `WorkUnit`, then this reports an error, although the `WorkUnit` may continue in the background. ''' file_id = make_file_id(file_id_str) kvlclient.setup_namespace(highlights_kvlayer_tables) payload_strs = list(kvlclient.get('highlights', file_id)) if not (payload_strs and payload_strs[0][1]): response.status = 500 payload = { 'state': ERROR, 'error': { 'code': 8, 'message': 'unknown error'}} logger.critical('got bogus info for %r: %r', file_id, payload_strs) else: payload_str = payload_strs[0][1] try: payload = json.loads(payload_str) if payload['state'] == HIGHLIGHTS_PENDING: elapsed = time.time() - payload.get('start', 0) if elapsed > max_elapsed: response.status = 500 payload = { 'state': ERROR, 'error': { 'code': 8, 'message': 'hit timeout'}} logger.critical('hit timeout on %r', file_id) kvlclient.put('highlights', (file_id, json.dumps(payload))) else: payload['elapsed'] = elapsed logger.info('returning stored payload for %r', file_id) except Exception, exc: logger.critical('failed to decode out of %r', payload_str, exc_info=True) response.status = 400 payload = { 'state': ERROR, 'error': { 'code': 9, 'message': 'nothing known about file_id=%r' % file_id} } # only place where payload is returned return payload
python
def v1_highlights_get(response, kvlclient, file_id_str, max_elapsed = 300): '''Obtain highlights for a document POSTed previously to this end point. See documentation for v1_highlights_post for further details. If the `state` is still `pending` for more than `max_elapsed` after the start of the `WorkUnit`, then this reports an error, although the `WorkUnit` may continue in the background. ''' file_id = make_file_id(file_id_str) kvlclient.setup_namespace(highlights_kvlayer_tables) payload_strs = list(kvlclient.get('highlights', file_id)) if not (payload_strs and payload_strs[0][1]): response.status = 500 payload = { 'state': ERROR, 'error': { 'code': 8, 'message': 'unknown error'}} logger.critical('got bogus info for %r: %r', file_id, payload_strs) else: payload_str = payload_strs[0][1] try: payload = json.loads(payload_str) if payload['state'] == HIGHLIGHTS_PENDING: elapsed = time.time() - payload.get('start', 0) if elapsed > max_elapsed: response.status = 500 payload = { 'state': ERROR, 'error': { 'code': 8, 'message': 'hit timeout'}} logger.critical('hit timeout on %r', file_id) kvlclient.put('highlights', (file_id, json.dumps(payload))) else: payload['elapsed'] = elapsed logger.info('returning stored payload for %r', file_id) except Exception, exc: logger.critical('failed to decode out of %r', payload_str, exc_info=True) response.status = 400 payload = { 'state': ERROR, 'error': { 'code': 9, 'message': 'nothing known about file_id=%r' % file_id} } # only place where payload is returned return payload
[ "def", "v1_highlights_get", "(", "response", ",", "kvlclient", ",", "file_id_str", ",", "max_elapsed", "=", "300", ")", ":", "file_id", "=", "make_file_id", "(", "file_id_str", ")", "kvlclient", ".", "setup_namespace", "(", "highlights_kvlayer_tables", ")", "payload_strs", "=", "list", "(", "kvlclient", ".", "get", "(", "'highlights'", ",", "file_id", ")", ")", "if", "not", "(", "payload_strs", "and", "payload_strs", "[", "0", "]", "[", "1", "]", ")", ":", "response", ".", "status", "=", "500", "payload", "=", "{", "'state'", ":", "ERROR", ",", "'error'", ":", "{", "'code'", ":", "8", ",", "'message'", ":", "'unknown error'", "}", "}", "logger", ".", "critical", "(", "'got bogus info for %r: %r'", ",", "file_id", ",", "payload_strs", ")", "else", ":", "payload_str", "=", "payload_strs", "[", "0", "]", "[", "1", "]", "try", ":", "payload", "=", "json", ".", "loads", "(", "payload_str", ")", "if", "payload", "[", "'state'", "]", "==", "HIGHLIGHTS_PENDING", ":", "elapsed", "=", "time", ".", "time", "(", ")", "-", "payload", ".", "get", "(", "'start'", ",", "0", ")", "if", "elapsed", ">", "max_elapsed", ":", "response", ".", "status", "=", "500", "payload", "=", "{", "'state'", ":", "ERROR", ",", "'error'", ":", "{", "'code'", ":", "8", ",", "'message'", ":", "'hit timeout'", "}", "}", "logger", ".", "critical", "(", "'hit timeout on %r'", ",", "file_id", ")", "kvlclient", ".", "put", "(", "'highlights'", ",", "(", "file_id", ",", "json", ".", "dumps", "(", "payload", ")", ")", ")", "else", ":", "payload", "[", "'elapsed'", "]", "=", "elapsed", "logger", ".", "info", "(", "'returning stored payload for %r'", ",", "file_id", ")", "except", "Exception", ",", "exc", ":", "logger", ".", "critical", "(", "'failed to decode out of %r'", ",", "payload_str", ",", "exc_info", "=", "True", ")", "response", ".", "status", "=", "400", "payload", "=", "{", "'state'", ":", "ERROR", ",", "'error'", ":", "{", "'code'", ":", "9", ",", "'message'", ":", "'nothing known about file_id=%r'", "%", "file_id", "}", "}", "# only place where payload is returned", "return", "payload" ]
Obtain highlights for a document POSTed previously to this end point. See documentation for v1_highlights_post for further details. If the `state` is still `pending` for more than `max_elapsed` after the start of the `WorkUnit`, then this reports an error, although the `WorkUnit` may continue in the background.
[ "Obtain", "highlights", "for", "a", "document", "POSTed", "previously", "to", "this", "end", "point", ".", "See", "documentation", "for", "v1_highlights_post", "for", "further", "details", ".", "If", "the", "state", "is", "still", "pending", "for", "more", "than", "max_elapsed", "after", "the", "start", "of", "the", "WorkUnit", "then", "this", "reports", "an", "error", "although", "the", "WorkUnit", "may", "continue", "in", "the", "background", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/web/routes.py#L345-L393
244,518
dossier/dossier.models
dossier/models/web/routes.py
create_highlights
def create_highlights(data, tfidf): '''compute highlights for `data`, store it in the store using `kvlclient`, and return a `highlights` response payload. ''' try: fc = etl.create_fc_from_html( data['content-location'], data['body'], tfidf=tfidf, encoding=None) except Exception, exc: logger.critical('failed to build FC', exc_info=True) return { 'state': ERROR, 'error': {'code': 7, 'message': 'internal error: %s' % traceback.format_exc(exc), } } if fc is None: logger.critical('failed to get FC using %d bytes from %r', len(body), data['content-location']) response.status = 500 return { 'state': ERROR, 'error': { 'code': 7, 'message': 'internal error: FC not generated for that content', }, } try: highlights = dict() for feature_name, pretty_name in feature_pretty_names: # Each type of string is if feature_name not in fc: continue total = sum(fc[feature_name].values()) bow = sorted(fc[feature_name].items(), key=itemgetter(1), reverse=True) highlights[pretty_name] = [(phrase, count / total) for phrase, count in bow] logger.info('%r and %d keys', feature_name, len(highlights[pretty_name])) highlight_objs = build_highlight_objects(data['body'], highlights) except Exception, exc: logger.critical('failed to build highlights', exc_info=True) return { 'state': ERROR, 'error': {'code': 7, 'message': 'internal error: %s' % traceback.format_exc(exc), } } payload = { 'highlights': highlight_objs, 'state': COMPLETED, } return payload
python
def create_highlights(data, tfidf): '''compute highlights for `data`, store it in the store using `kvlclient`, and return a `highlights` response payload. ''' try: fc = etl.create_fc_from_html( data['content-location'], data['body'], tfidf=tfidf, encoding=None) except Exception, exc: logger.critical('failed to build FC', exc_info=True) return { 'state': ERROR, 'error': {'code': 7, 'message': 'internal error: %s' % traceback.format_exc(exc), } } if fc is None: logger.critical('failed to get FC using %d bytes from %r', len(body), data['content-location']) response.status = 500 return { 'state': ERROR, 'error': { 'code': 7, 'message': 'internal error: FC not generated for that content', }, } try: highlights = dict() for feature_name, pretty_name in feature_pretty_names: # Each type of string is if feature_name not in fc: continue total = sum(fc[feature_name].values()) bow = sorted(fc[feature_name].items(), key=itemgetter(1), reverse=True) highlights[pretty_name] = [(phrase, count / total) for phrase, count in bow] logger.info('%r and %d keys', feature_name, len(highlights[pretty_name])) highlight_objs = build_highlight_objects(data['body'], highlights) except Exception, exc: logger.critical('failed to build highlights', exc_info=True) return { 'state': ERROR, 'error': {'code': 7, 'message': 'internal error: %s' % traceback.format_exc(exc), } } payload = { 'highlights': highlight_objs, 'state': COMPLETED, } return payload
[ "def", "create_highlights", "(", "data", ",", "tfidf", ")", ":", "try", ":", "fc", "=", "etl", ".", "create_fc_from_html", "(", "data", "[", "'content-location'", "]", ",", "data", "[", "'body'", "]", ",", "tfidf", "=", "tfidf", ",", "encoding", "=", "None", ")", "except", "Exception", ",", "exc", ":", "logger", ".", "critical", "(", "'failed to build FC'", ",", "exc_info", "=", "True", ")", "return", "{", "'state'", ":", "ERROR", ",", "'error'", ":", "{", "'code'", ":", "7", ",", "'message'", ":", "'internal error: %s'", "%", "traceback", ".", "format_exc", "(", "exc", ")", ",", "}", "}", "if", "fc", "is", "None", ":", "logger", ".", "critical", "(", "'failed to get FC using %d bytes from %r'", ",", "len", "(", "body", ")", ",", "data", "[", "'content-location'", "]", ")", "response", ".", "status", "=", "500", "return", "{", "'state'", ":", "ERROR", ",", "'error'", ":", "{", "'code'", ":", "7", ",", "'message'", ":", "'internal error: FC not generated for that content'", ",", "}", ",", "}", "try", ":", "highlights", "=", "dict", "(", ")", "for", "feature_name", ",", "pretty_name", "in", "feature_pretty_names", ":", "# Each type of string is", "if", "feature_name", "not", "in", "fc", ":", "continue", "total", "=", "sum", "(", "fc", "[", "feature_name", "]", ".", "values", "(", ")", ")", "bow", "=", "sorted", "(", "fc", "[", "feature_name", "]", ".", "items", "(", ")", ",", "key", "=", "itemgetter", "(", "1", ")", ",", "reverse", "=", "True", ")", "highlights", "[", "pretty_name", "]", "=", "[", "(", "phrase", ",", "count", "/", "total", ")", "for", "phrase", ",", "count", "in", "bow", "]", "logger", ".", "info", "(", "'%r and %d keys'", ",", "feature_name", ",", "len", "(", "highlights", "[", "pretty_name", "]", ")", ")", "highlight_objs", "=", "build_highlight_objects", "(", "data", "[", "'body'", "]", ",", "highlights", ")", "except", "Exception", ",", "exc", ":", "logger", ".", "critical", "(", "'failed to build highlights'", ",", "exc_info", "=", "True", ")", "return", "{", "'state'", ":", "ERROR", ",", "'error'", ":", "{", "'code'", ":", "7", ",", "'message'", ":", "'internal error: %s'", "%", "traceback", ".", "format_exc", "(", "exc", ")", ",", "}", "}", "payload", "=", "{", "'highlights'", ":", "highlight_objs", ",", "'state'", ":", "COMPLETED", ",", "}", "return", "payload" ]
compute highlights for `data`, store it in the store using `kvlclient`, and return a `highlights` response payload.
[ "compute", "highlights", "for", "data", "store", "it", "in", "the", "store", "using", "kvlclient", "and", "return", "a", "highlights", "response", "payload", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/web/routes.py#L744-L798
244,519
dossier/dossier.models
dossier/models/web/routes.py
make_xpath_ranges
def make_xpath_ranges(html, phrase): '''Given a HTML string and a `phrase`, build a regex to find offsets for the phrase, and then build a list of `XPathRange` objects for it. If this fails, return empty list. ''' if not html: return [] if not isinstance(phrase, unicode): try: phrase = phrase.decode('utf8') except: logger.info('failed %r.decode("utf8")', exc_info=True) return [] phrase_re = re.compile( phrase, flags=re.UNICODE | re.IGNORECASE | re.MULTILINE) spans = [] for match in phrase_re.finditer(html, overlapped=False): spans.append(match.span()) # a list of tuple(start, end) char indexes # now run fancy aligner magic to get xpath info and format them as # XPathRange per above try: xpath_ranges = list(char_offsets_to_xpaths(html, spans)) except: logger.info('failed to get xpaths', exc_info=True) return [] ranges = [] for xpath_range in filter(None, xpath_ranges): ranges.append(dict( start=dict(node=xpath_range.start_xpath, idx=xpath_range.start_offset), end=dict(node=xpath_range.end_xpath, idx=xpath_range.end_offset))) return ranges
python
def make_xpath_ranges(html, phrase): '''Given a HTML string and a `phrase`, build a regex to find offsets for the phrase, and then build a list of `XPathRange` objects for it. If this fails, return empty list. ''' if not html: return [] if not isinstance(phrase, unicode): try: phrase = phrase.decode('utf8') except: logger.info('failed %r.decode("utf8")', exc_info=True) return [] phrase_re = re.compile( phrase, flags=re.UNICODE | re.IGNORECASE | re.MULTILINE) spans = [] for match in phrase_re.finditer(html, overlapped=False): spans.append(match.span()) # a list of tuple(start, end) char indexes # now run fancy aligner magic to get xpath info and format them as # XPathRange per above try: xpath_ranges = list(char_offsets_to_xpaths(html, spans)) except: logger.info('failed to get xpaths', exc_info=True) return [] ranges = [] for xpath_range in filter(None, xpath_ranges): ranges.append(dict( start=dict(node=xpath_range.start_xpath, idx=xpath_range.start_offset), end=dict(node=xpath_range.end_xpath, idx=xpath_range.end_offset))) return ranges
[ "def", "make_xpath_ranges", "(", "html", ",", "phrase", ")", ":", "if", "not", "html", ":", "return", "[", "]", "if", "not", "isinstance", "(", "phrase", ",", "unicode", ")", ":", "try", ":", "phrase", "=", "phrase", ".", "decode", "(", "'utf8'", ")", "except", ":", "logger", ".", "info", "(", "'failed %r.decode(\"utf8\")'", ",", "exc_info", "=", "True", ")", "return", "[", "]", "phrase_re", "=", "re", ".", "compile", "(", "phrase", ",", "flags", "=", "re", ".", "UNICODE", "|", "re", ".", "IGNORECASE", "|", "re", ".", "MULTILINE", ")", "spans", "=", "[", "]", "for", "match", "in", "phrase_re", ".", "finditer", "(", "html", ",", "overlapped", "=", "False", ")", ":", "spans", ".", "append", "(", "match", ".", "span", "(", ")", ")", "# a list of tuple(start, end) char indexes", "# now run fancy aligner magic to get xpath info and format them as", "# XPathRange per above", "try", ":", "xpath_ranges", "=", "list", "(", "char_offsets_to_xpaths", "(", "html", ",", "spans", ")", ")", "except", ":", "logger", ".", "info", "(", "'failed to get xpaths'", ",", "exc_info", "=", "True", ")", "return", "[", "]", "ranges", "=", "[", "]", "for", "xpath_range", "in", "filter", "(", "None", ",", "xpath_ranges", ")", ":", "ranges", ".", "append", "(", "dict", "(", "start", "=", "dict", "(", "node", "=", "xpath_range", ".", "start_xpath", ",", "idx", "=", "xpath_range", ".", "start_offset", ")", ",", "end", "=", "dict", "(", "node", "=", "xpath_range", ".", "end_xpath", ",", "idx", "=", "xpath_range", ".", "end_offset", ")", ")", ")", "return", "ranges" ]
Given a HTML string and a `phrase`, build a regex to find offsets for the phrase, and then build a list of `XPathRange` objects for it. If this fails, return empty list.
[ "Given", "a", "HTML", "string", "and", "a", "phrase", "build", "a", "regex", "to", "find", "offsets", "for", "the", "phrase", "and", "then", "build", "a", "list", "of", "XPathRange", "objects", "for", "it", ".", "If", "this", "fails", "return", "empty", "list", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/web/routes.py#L835-L871
244,520
lwcook/horsetail-matching
horsetailmatching/surrogates.py
eval_poly
def eval_poly(uvec, nvec, Jvec): '''Evaluate multi-dimensional polynomials through tensor multiplication. :param list uvec: vector value of the uncertain parameters at which to evaluate the polynomial :param list nvec: order in each dimension at which to evaluate the polynomial :param list Jvec: Jacobi matrix of each dimension's 1D polynomial :return: poly_value - value of the polynomial evaluated at uvec :rtype: float ''' us = _makeIter(uvec) ns = _makeIter(nvec) Js = _makeIter(Jvec) return np.prod([_eval_poly_1D(u, n, J) for u, n, J in zip(us, ns, Js)])
python
def eval_poly(uvec, nvec, Jvec): '''Evaluate multi-dimensional polynomials through tensor multiplication. :param list uvec: vector value of the uncertain parameters at which to evaluate the polynomial :param list nvec: order in each dimension at which to evaluate the polynomial :param list Jvec: Jacobi matrix of each dimension's 1D polynomial :return: poly_value - value of the polynomial evaluated at uvec :rtype: float ''' us = _makeIter(uvec) ns = _makeIter(nvec) Js = _makeIter(Jvec) return np.prod([_eval_poly_1D(u, n, J) for u, n, J in zip(us, ns, Js)])
[ "def", "eval_poly", "(", "uvec", ",", "nvec", ",", "Jvec", ")", ":", "us", "=", "_makeIter", "(", "uvec", ")", "ns", "=", "_makeIter", "(", "nvec", ")", "Js", "=", "_makeIter", "(", "Jvec", ")", "return", "np", ".", "prod", "(", "[", "_eval_poly_1D", "(", "u", ",", "n", ",", "J", ")", "for", "u", ",", "n", ",", "J", "in", "zip", "(", "us", ",", "ns", ",", "Js", ")", "]", ")" ]
Evaluate multi-dimensional polynomials through tensor multiplication. :param list uvec: vector value of the uncertain parameters at which to evaluate the polynomial :param list nvec: order in each dimension at which to evaluate the polynomial :param list Jvec: Jacobi matrix of each dimension's 1D polynomial :return: poly_value - value of the polynomial evaluated at uvec :rtype: float
[ "Evaluate", "multi", "-", "dimensional", "polynomials", "through", "tensor", "multiplication", "." ]
f3d5f8d01249debbca978f412ce4eae017458119
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/surrogates.py#L186-L204
244,521
lwcook/horsetail-matching
horsetailmatching/surrogates.py
PolySurrogate.train
def train(self, ftrain): '''Trains the polynomial expansion. :param numpy.ndarray/function ftrain: output values corresponding to the quadrature points given by the getQuadraturePoints method to which the expansion should be trained. Or a function that should be evaluated at the quadrature points to give these output values. *Sample Usage*:: >>> thePC = PolySurrogate(dimensions=2) >>> thePC.train(myFunc) >>> predicted_q = thePC.predict([0, 1]) >>> thePC = PolySurrogate(dimensions=2) >>> U = thePC.getQuadraturePoints() >>> Q = [myFunc(u) for u in U] >>> thePC.train(Q) >>> predicted_q = thePC.predict([0, 1]) ''' self.coeffs = 0*self.coeffs upoints, wpoints = self.getQuadraturePointsAndWeights() try: fpoints = [ftrain(u) for u in upoints] except TypeError: fpoints = ftrain for ipoly in np.arange(self.N_poly): inds = tuple(self.index_polys[ipoly]) coeff = 0.0 for (u, q, w) in zip(upoints, fpoints, wpoints): coeff += eval_poly(u, inds, self.J_list)*q*np.prod(w) self.coeffs[inds] = coeff return None
python
def train(self, ftrain): '''Trains the polynomial expansion. :param numpy.ndarray/function ftrain: output values corresponding to the quadrature points given by the getQuadraturePoints method to which the expansion should be trained. Or a function that should be evaluated at the quadrature points to give these output values. *Sample Usage*:: >>> thePC = PolySurrogate(dimensions=2) >>> thePC.train(myFunc) >>> predicted_q = thePC.predict([0, 1]) >>> thePC = PolySurrogate(dimensions=2) >>> U = thePC.getQuadraturePoints() >>> Q = [myFunc(u) for u in U] >>> thePC.train(Q) >>> predicted_q = thePC.predict([0, 1]) ''' self.coeffs = 0*self.coeffs upoints, wpoints = self.getQuadraturePointsAndWeights() try: fpoints = [ftrain(u) for u in upoints] except TypeError: fpoints = ftrain for ipoly in np.arange(self.N_poly): inds = tuple(self.index_polys[ipoly]) coeff = 0.0 for (u, q, w) in zip(upoints, fpoints, wpoints): coeff += eval_poly(u, inds, self.J_list)*q*np.prod(w) self.coeffs[inds] = coeff return None
[ "def", "train", "(", "self", ",", "ftrain", ")", ":", "self", ".", "coeffs", "=", "0", "*", "self", ".", "coeffs", "upoints", ",", "wpoints", "=", "self", ".", "getQuadraturePointsAndWeights", "(", ")", "try", ":", "fpoints", "=", "[", "ftrain", "(", "u", ")", "for", "u", "in", "upoints", "]", "except", "TypeError", ":", "fpoints", "=", "ftrain", "for", "ipoly", "in", "np", ".", "arange", "(", "self", ".", "N_poly", ")", ":", "inds", "=", "tuple", "(", "self", ".", "index_polys", "[", "ipoly", "]", ")", "coeff", "=", "0.0", "for", "(", "u", ",", "q", ",", "w", ")", "in", "zip", "(", "upoints", ",", "fpoints", ",", "wpoints", ")", ":", "coeff", "+=", "eval_poly", "(", "u", ",", "inds", ",", "self", ".", "J_list", ")", "*", "q", "*", "np", ".", "prod", "(", "w", ")", "self", ".", "coeffs", "[", "inds", "]", "=", "coeff", "return", "None" ]
Trains the polynomial expansion. :param numpy.ndarray/function ftrain: output values corresponding to the quadrature points given by the getQuadraturePoints method to which the expansion should be trained. Or a function that should be evaluated at the quadrature points to give these output values. *Sample Usage*:: >>> thePC = PolySurrogate(dimensions=2) >>> thePC.train(myFunc) >>> predicted_q = thePC.predict([0, 1]) >>> thePC = PolySurrogate(dimensions=2) >>> U = thePC.getQuadraturePoints() >>> Q = [myFunc(u) for u in U] >>> thePC.train(Q) >>> predicted_q = thePC.predict([0, 1])
[ "Trains", "the", "polynomial", "expansion", "." ]
f3d5f8d01249debbca978f412ce4eae017458119
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/surrogates.py#L100-L138
244,522
lwcook/horsetail-matching
horsetailmatching/surrogates.py
PolySurrogate.getQuadraturePointsAndWeights
def getQuadraturePointsAndWeights(self): '''Gets the quadrature points and weights for gaussian quadrature integration of inner products from the definition of the polynomials in each dimension. :return: (u_points, w_points) - np.ndarray of shape (num_polynomials, num_dimensions) and a np.ndarray of size (num_polynomials) :rtype: (np.ndarray, np.ndarray) ''' qw_list, qp_list = [], [] for ii in np.arange(len(self.J_list)): d, Q = np.linalg.eig(self.J_list[ii]) qp, qpi = d[np.argsort(d)].reshape([d.size, 1]), np.argsort(d) qw = (Q[0, qpi]**2).reshape([d.size, 1]) qw_list.append(qw) qp_list.append(qp) umesh = np.meshgrid(*qp_list) upoints = np.vstack([m.flatten() for m in umesh]).T wmesh = np.meshgrid(*qw_list) wpoints = np.vstack([m.flatten() for m in wmesh]).T return upoints, wpoints
python
def getQuadraturePointsAndWeights(self): '''Gets the quadrature points and weights for gaussian quadrature integration of inner products from the definition of the polynomials in each dimension. :return: (u_points, w_points) - np.ndarray of shape (num_polynomials, num_dimensions) and a np.ndarray of size (num_polynomials) :rtype: (np.ndarray, np.ndarray) ''' qw_list, qp_list = [], [] for ii in np.arange(len(self.J_list)): d, Q = np.linalg.eig(self.J_list[ii]) qp, qpi = d[np.argsort(d)].reshape([d.size, 1]), np.argsort(d) qw = (Q[0, qpi]**2).reshape([d.size, 1]) qw_list.append(qw) qp_list.append(qp) umesh = np.meshgrid(*qp_list) upoints = np.vstack([m.flatten() for m in umesh]).T wmesh = np.meshgrid(*qw_list) wpoints = np.vstack([m.flatten() for m in wmesh]).T return upoints, wpoints
[ "def", "getQuadraturePointsAndWeights", "(", "self", ")", ":", "qw_list", ",", "qp_list", "=", "[", "]", ",", "[", "]", "for", "ii", "in", "np", ".", "arange", "(", "len", "(", "self", ".", "J_list", ")", ")", ":", "d", ",", "Q", "=", "np", ".", "linalg", ".", "eig", "(", "self", ".", "J_list", "[", "ii", "]", ")", "qp", ",", "qpi", "=", "d", "[", "np", ".", "argsort", "(", "d", ")", "]", ".", "reshape", "(", "[", "d", ".", "size", ",", "1", "]", ")", ",", "np", ".", "argsort", "(", "d", ")", "qw", "=", "(", "Q", "[", "0", ",", "qpi", "]", "**", "2", ")", ".", "reshape", "(", "[", "d", ".", "size", ",", "1", "]", ")", "qw_list", ".", "append", "(", "qw", ")", "qp_list", ".", "append", "(", "qp", ")", "umesh", "=", "np", ".", "meshgrid", "(", "*", "qp_list", ")", "upoints", "=", "np", ".", "vstack", "(", "[", "m", ".", "flatten", "(", ")", "for", "m", "in", "umesh", "]", ")", ".", "T", "wmesh", "=", "np", ".", "meshgrid", "(", "*", "qw_list", ")", "wpoints", "=", "np", ".", "vstack", "(", "[", "m", ".", "flatten", "(", ")", "for", "m", "in", "wmesh", "]", ")", ".", "T", "return", "upoints", ",", "wpoints" ]
Gets the quadrature points and weights for gaussian quadrature integration of inner products from the definition of the polynomials in each dimension. :return: (u_points, w_points) - np.ndarray of shape (num_polynomials, num_dimensions) and a np.ndarray of size (num_polynomials) :rtype: (np.ndarray, np.ndarray)
[ "Gets", "the", "quadrature", "points", "and", "weights", "for", "gaussian", "quadrature", "integration", "of", "inner", "products", "from", "the", "definition", "of", "the", "polynomials", "in", "each", "dimension", "." ]
f3d5f8d01249debbca978f412ce4eae017458119
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/surrogates.py#L140-L169
244,523
kolypto/py-exdoc
exdoc/sa/__init__.py
_model_columns
def _model_columns(ins): """ Get columns info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[SaColumnDoc] """ columns = [] for c in ins.column_attrs: # Skip protected if c.key.startswith('_'): continue # Collect columns.append(SaColumnDoc( key=c.key, doc=c.doc or '', type=str(c.columns[0].type), # FIXME: support multi-column properties null=c.columns[0].nullable, )) return columns
python
def _model_columns(ins): """ Get columns info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[SaColumnDoc] """ columns = [] for c in ins.column_attrs: # Skip protected if c.key.startswith('_'): continue # Collect columns.append(SaColumnDoc( key=c.key, doc=c.doc or '', type=str(c.columns[0].type), # FIXME: support multi-column properties null=c.columns[0].nullable, )) return columns
[ "def", "_model_columns", "(", "ins", ")", ":", "columns", "=", "[", "]", "for", "c", "in", "ins", ".", "column_attrs", ":", "# Skip protected", "if", "c", ".", "key", ".", "startswith", "(", "'_'", ")", ":", "continue", "# Collect", "columns", ".", "append", "(", "SaColumnDoc", "(", "key", "=", "c", ".", "key", ",", "doc", "=", "c", ".", "doc", "or", "''", ",", "type", "=", "str", "(", "c", ".", "columns", "[", "0", "]", ".", "type", ")", ",", "# FIXME: support multi-column properties", "null", "=", "c", ".", "columns", "[", "0", "]", ".", "nullable", ",", ")", ")", "return", "columns" ]
Get columns info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[SaColumnDoc]
[ "Get", "columns", "info" ]
516526c01c203271410e7d7340024ef9f0bfa46a
https://github.com/kolypto/py-exdoc/blob/516526c01c203271410e7d7340024ef9f0bfa46a/exdoc/sa/__init__.py#L55-L74
244,524
kolypto/py-exdoc
exdoc/sa/__init__.py
_model_foreign
def _model_foreign(ins): """ Get foreign keys info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[SaForeignkeyDoc] """ fks = [] for t in ins.tables: fks.extend([ SaForeignkeyDoc( key=fk.column.key, target=fk.target_fullname, onupdate=fk.onupdate, ondelete=fk.ondelete ) for fk in t.foreign_keys]) return fks
python
def _model_foreign(ins): """ Get foreign keys info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[SaForeignkeyDoc] """ fks = [] for t in ins.tables: fks.extend([ SaForeignkeyDoc( key=fk.column.key, target=fk.target_fullname, onupdate=fk.onupdate, ondelete=fk.ondelete ) for fk in t.foreign_keys]) return fks
[ "def", "_model_foreign", "(", "ins", ")", ":", "fks", "=", "[", "]", "for", "t", "in", "ins", ".", "tables", ":", "fks", ".", "extend", "(", "[", "SaForeignkeyDoc", "(", "key", "=", "fk", ".", "column", ".", "key", ",", "target", "=", "fk", ".", "target_fullname", ",", "onupdate", "=", "fk", ".", "onupdate", ",", "ondelete", "=", "fk", ".", "ondelete", ")", "for", "fk", "in", "t", ".", "foreign_keys", "]", ")", "return", "fks" ]
Get foreign keys info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[SaForeignkeyDoc]
[ "Get", "foreign", "keys", "info" ]
516526c01c203271410e7d7340024ef9f0bfa46a
https://github.com/kolypto/py-exdoc/blob/516526c01c203271410e7d7340024ef9f0bfa46a/exdoc/sa/__init__.py#L86-L102
244,525
kolypto/py-exdoc
exdoc/sa/__init__.py
_model_unique
def _model_unique(ins): """ Get unique constraints info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[tuple[str]] """ unique = [] for t in ins.tables: for c in t.constraints: if isinstance(c, UniqueConstraint): unique.append(tuple(col.key for col in c.columns)) return unique
python
def _model_unique(ins): """ Get unique constraints info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[tuple[str]] """ unique = [] for t in ins.tables: for c in t.constraints: if isinstance(c, UniqueConstraint): unique.append(tuple(col.key for col in c.columns)) return unique
[ "def", "_model_unique", "(", "ins", ")", ":", "unique", "=", "[", "]", "for", "t", "in", "ins", ".", "tables", ":", "for", "c", "in", "t", ".", "constraints", ":", "if", "isinstance", "(", "c", ",", "UniqueConstraint", ")", ":", "unique", ".", "append", "(", "tuple", "(", "col", ".", "key", "for", "col", "in", "c", ".", "columns", ")", ")", "return", "unique" ]
Get unique constraints info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[tuple[str]]
[ "Get", "unique", "constraints", "info" ]
516526c01c203271410e7d7340024ef9f0bfa46a
https://github.com/kolypto/py-exdoc/blob/516526c01c203271410e7d7340024ef9f0bfa46a/exdoc/sa/__init__.py#L105-L116
244,526
kolypto/py-exdoc
exdoc/sa/__init__.py
_model_relations
def _model_relations(ins): """ Get relationships info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[SaRelationshipDoc] """ relations = [] for r in ins.relationships: # Hard times with the foreign model :) if isinstance(r.argument, Mapper): model_name = r.argument.class_.__name__ elif hasattr(r.argument, 'arg'): model_name = r.argument.arg else: model_name = r.argument.__name__ # Format relations.append(SaRelationshipDoc( key=r.key, doc=r.doc or '', model=model_name, pairs=map(lambda a_b_tuple: a_b_tuple[0].key if a_b_tuple[0].key == a_b_tuple[1].key else '{}={}'.format(a_b_tuple[0].key, a_b_tuple[1].key), r.local_remote_pairs), uselist=r.uselist )) return relations
python
def _model_relations(ins): """ Get relationships info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[SaRelationshipDoc] """ relations = [] for r in ins.relationships: # Hard times with the foreign model :) if isinstance(r.argument, Mapper): model_name = r.argument.class_.__name__ elif hasattr(r.argument, 'arg'): model_name = r.argument.arg else: model_name = r.argument.__name__ # Format relations.append(SaRelationshipDoc( key=r.key, doc=r.doc or '', model=model_name, pairs=map(lambda a_b_tuple: a_b_tuple[0].key if a_b_tuple[0].key == a_b_tuple[1].key else '{}={}'.format(a_b_tuple[0].key, a_b_tuple[1].key), r.local_remote_pairs), uselist=r.uselist )) return relations
[ "def", "_model_relations", "(", "ins", ")", ":", "relations", "=", "[", "]", "for", "r", "in", "ins", ".", "relationships", ":", "# Hard times with the foreign model :)", "if", "isinstance", "(", "r", ".", "argument", ",", "Mapper", ")", ":", "model_name", "=", "r", ".", "argument", ".", "class_", ".", "__name__", "elif", "hasattr", "(", "r", ".", "argument", ",", "'arg'", ")", ":", "model_name", "=", "r", ".", "argument", ".", "arg", "else", ":", "model_name", "=", "r", ".", "argument", ".", "__name__", "# Format", "relations", ".", "append", "(", "SaRelationshipDoc", "(", "key", "=", "r", ".", "key", ",", "doc", "=", "r", ".", "doc", "or", "''", ",", "model", "=", "model_name", ",", "pairs", "=", "map", "(", "lambda", "a_b_tuple", ":", "a_b_tuple", "[", "0", "]", ".", "key", "if", "a_b_tuple", "[", "0", "]", ".", "key", "==", "a_b_tuple", "[", "1", "]", ".", "key", "else", "'{}={}'", ".", "format", "(", "a_b_tuple", "[", "0", "]", ".", "key", ",", "a_b_tuple", "[", "1", "]", ".", "key", ")", ",", "r", ".", "local_remote_pairs", ")", ",", "uselist", "=", "r", ".", "uselist", ")", ")", "return", "relations" ]
Get relationships info :type ins: sqlalchemy.orm.mapper.Mapper :rtype: list[SaRelationshipDoc]
[ "Get", "relationships", "info" ]
516526c01c203271410e7d7340024ef9f0bfa46a
https://github.com/kolypto/py-exdoc/blob/516526c01c203271410e7d7340024ef9f0bfa46a/exdoc/sa/__init__.py#L119-L143
244,527
kolypto/py-exdoc
exdoc/sa/__init__.py
doc
def doc(model): """ Get documentation object for an SqlAlchemy model :param model: Model :type model: sqlalchemy.ext.declarative.DeclarativeBase :rtype: SaModelDoc """ ins = inspect(model) return SaModelDoc( name=model.__name__, table=[t.name for t in ins.tables], doc=getdoc(ins.class_), columns=_model_columns(ins), primary=_model_primary(ins), foreign=_model_foreign(ins), unique=_model_unique(ins), relations=_model_relations(ins) )
python
def doc(model): """ Get documentation object for an SqlAlchemy model :param model: Model :type model: sqlalchemy.ext.declarative.DeclarativeBase :rtype: SaModelDoc """ ins = inspect(model) return SaModelDoc( name=model.__name__, table=[t.name for t in ins.tables], doc=getdoc(ins.class_), columns=_model_columns(ins), primary=_model_primary(ins), foreign=_model_foreign(ins), unique=_model_unique(ins), relations=_model_relations(ins) )
[ "def", "doc", "(", "model", ")", ":", "ins", "=", "inspect", "(", "model", ")", "return", "SaModelDoc", "(", "name", "=", "model", ".", "__name__", ",", "table", "=", "[", "t", ".", "name", "for", "t", "in", "ins", ".", "tables", "]", ",", "doc", "=", "getdoc", "(", "ins", ".", "class_", ")", ",", "columns", "=", "_model_columns", "(", "ins", ")", ",", "primary", "=", "_model_primary", "(", "ins", ")", ",", "foreign", "=", "_model_foreign", "(", "ins", ")", ",", "unique", "=", "_model_unique", "(", "ins", ")", ",", "relations", "=", "_model_relations", "(", "ins", ")", ")" ]
Get documentation object for an SqlAlchemy model :param model: Model :type model: sqlalchemy.ext.declarative.DeclarativeBase :rtype: SaModelDoc
[ "Get", "documentation", "object", "for", "an", "SqlAlchemy", "model" ]
516526c01c203271410e7d7340024ef9f0bfa46a
https://github.com/kolypto/py-exdoc/blob/516526c01c203271410e7d7340024ef9f0bfa46a/exdoc/sa/__init__.py#L146-L164
244,528
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/utility.py
GetModuleBaseNameFromWSDL
def GetModuleBaseNameFromWSDL(wsdl): """By default try to construct a reasonable base name for all generated modules. Otherwise return None. """ base_name = wsdl.name or wsdl.services[0].name base_name = SplitQName(base_name)[1] if base_name is None: return None return NCName_to_ModuleName(base_name)
python
def GetModuleBaseNameFromWSDL(wsdl): """By default try to construct a reasonable base name for all generated modules. Otherwise return None. """ base_name = wsdl.name or wsdl.services[0].name base_name = SplitQName(base_name)[1] if base_name is None: return None return NCName_to_ModuleName(base_name)
[ "def", "GetModuleBaseNameFromWSDL", "(", "wsdl", ")", ":", "base_name", "=", "wsdl", ".", "name", "or", "wsdl", ".", "services", "[", "0", "]", ".", "name", "base_name", "=", "SplitQName", "(", "base_name", ")", "[", "1", "]", "if", "base_name", "is", "None", ":", "return", "None", "return", "NCName_to_ModuleName", "(", "base_name", ")" ]
By default try to construct a reasonable base name for all generated modules. Otherwise return None.
[ "By", "default", "try", "to", "construct", "a", "reasonable", "base", "name", "for", "all", "generated", "modules", ".", "Otherwise", "return", "None", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/utility.py#L26-L34
244,529
jeroyang/txttk
txttk/retools.py
condense
def condense(ss_unescaped): """ Given multiple strings, returns a compressed regular expression just for these strings >>> condense(['she', 'he', 'her', 'hemoglobin']) 'he(moglobin|r)?|she' """ def estimated_len(longg, short): return (3 + len(short) + sum(map(len, longg)) - len(longg) * (len(short) - 1) - 1 ) def stupid_len(longg): return sum(map(len, longg)) + len(longg) ss = [re.escape(s) for s in set(ss_unescaped)] ss.sort(key=len) short2long = defaultdict(lambda: {'p':[],'s':[]}) for short, longg in combinations(ss, 2): if longg.startswith(short): short2long[short]['p'].append(longg) if longg.endswith(short): short2long[short]['s'].append(longg) short2long = sorted(list(short2long.items()), key=lambda x: len(x[0]), reverse=True) output = [] objs = set(ss) for s, pre_sur in short2long: pp = set(pre_sur['p']) & objs ss = set(pre_sur['s']) & objs if ((stupid_len(pp) - estimated_len(pp, s)) < (stupid_len(ss) - estimated_len(ss, s))): reg = (r'({heads})?{surfix}' .format(surfix=s, heads='|'.join(sorted([p[:-len(s)] for p in ss], key=len, reverse=True)))) assert len(reg) == estimated_len(ss, s) output.append(reg) objs -= (ss | set([s])) elif ((stupid_len(pp) - estimated_len(pp, s)) > (stupid_len(ss) - estimated_len(ss, s))): reg = (r'{prefix}({tails})?' .format(prefix=s, tails='|'.join(sorted([p[len(s):] for p in pp], key=len, reverse=True)))) assert len(reg) == estimated_len(pp, s) output.append(reg) objs -= (pp | set([s])) for residual in objs: output.append(residual) return re.sub(r'\(([^)])\)\?', r'\1?', r'|'.join(output))
python
def condense(ss_unescaped): """ Given multiple strings, returns a compressed regular expression just for these strings >>> condense(['she', 'he', 'her', 'hemoglobin']) 'he(moglobin|r)?|she' """ def estimated_len(longg, short): return (3 + len(short) + sum(map(len, longg)) - len(longg) * (len(short) - 1) - 1 ) def stupid_len(longg): return sum(map(len, longg)) + len(longg) ss = [re.escape(s) for s in set(ss_unescaped)] ss.sort(key=len) short2long = defaultdict(lambda: {'p':[],'s':[]}) for short, longg in combinations(ss, 2): if longg.startswith(short): short2long[short]['p'].append(longg) if longg.endswith(short): short2long[short]['s'].append(longg) short2long = sorted(list(short2long.items()), key=lambda x: len(x[0]), reverse=True) output = [] objs = set(ss) for s, pre_sur in short2long: pp = set(pre_sur['p']) & objs ss = set(pre_sur['s']) & objs if ((stupid_len(pp) - estimated_len(pp, s)) < (stupid_len(ss) - estimated_len(ss, s))): reg = (r'({heads})?{surfix}' .format(surfix=s, heads='|'.join(sorted([p[:-len(s)] for p in ss], key=len, reverse=True)))) assert len(reg) == estimated_len(ss, s) output.append(reg) objs -= (ss | set([s])) elif ((stupid_len(pp) - estimated_len(pp, s)) > (stupid_len(ss) - estimated_len(ss, s))): reg = (r'{prefix}({tails})?' .format(prefix=s, tails='|'.join(sorted([p[len(s):] for p in pp], key=len, reverse=True)))) assert len(reg) == estimated_len(pp, s) output.append(reg) objs -= (pp | set([s])) for residual in objs: output.append(residual) return re.sub(r'\(([^)])\)\?', r'\1?', r'|'.join(output))
[ "def", "condense", "(", "ss_unescaped", ")", ":", "def", "estimated_len", "(", "longg", ",", "short", ")", ":", "return", "(", "3", "+", "len", "(", "short", ")", "+", "sum", "(", "map", "(", "len", ",", "longg", ")", ")", "-", "len", "(", "longg", ")", "*", "(", "len", "(", "short", ")", "-", "1", ")", "-", "1", ")", "def", "stupid_len", "(", "longg", ")", ":", "return", "sum", "(", "map", "(", "len", ",", "longg", ")", ")", "+", "len", "(", "longg", ")", "ss", "=", "[", "re", ".", "escape", "(", "s", ")", "for", "s", "in", "set", "(", "ss_unescaped", ")", "]", "ss", ".", "sort", "(", "key", "=", "len", ")", "short2long", "=", "defaultdict", "(", "lambda", ":", "{", "'p'", ":", "[", "]", ",", "'s'", ":", "[", "]", "}", ")", "for", "short", ",", "longg", "in", "combinations", "(", "ss", ",", "2", ")", ":", "if", "longg", ".", "startswith", "(", "short", ")", ":", "short2long", "[", "short", "]", "[", "'p'", "]", ".", "append", "(", "longg", ")", "if", "longg", ".", "endswith", "(", "short", ")", ":", "short2long", "[", "short", "]", "[", "'s'", "]", ".", "append", "(", "longg", ")", "short2long", "=", "sorted", "(", "list", "(", "short2long", ".", "items", "(", ")", ")", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", "[", "0", "]", ")", ",", "reverse", "=", "True", ")", "output", "=", "[", "]", "objs", "=", "set", "(", "ss", ")", "for", "s", ",", "pre_sur", "in", "short2long", ":", "pp", "=", "set", "(", "pre_sur", "[", "'p'", "]", ")", "&", "objs", "ss", "=", "set", "(", "pre_sur", "[", "'s'", "]", ")", "&", "objs", "if", "(", "(", "stupid_len", "(", "pp", ")", "-", "estimated_len", "(", "pp", ",", "s", ")", ")", "<", "(", "stupid_len", "(", "ss", ")", "-", "estimated_len", "(", "ss", ",", "s", ")", ")", ")", ":", "reg", "=", "(", "r'({heads})?{surfix}'", ".", "format", "(", "surfix", "=", "s", ",", "heads", "=", "'|'", ".", "join", "(", "sorted", "(", "[", "p", "[", ":", "-", "len", "(", "s", ")", "]", "for", "p", "in", "ss", "]", ",", "key", "=", "len", ",", "reverse", "=", "True", ")", ")", ")", ")", "assert", "len", "(", "reg", ")", "==", "estimated_len", "(", "ss", ",", "s", ")", "output", ".", "append", "(", "reg", ")", "objs", "-=", "(", "ss", "|", "set", "(", "[", "s", "]", ")", ")", "elif", "(", "(", "stupid_len", "(", "pp", ")", "-", "estimated_len", "(", "pp", ",", "s", ")", ")", ">", "(", "stupid_len", "(", "ss", ")", "-", "estimated_len", "(", "ss", ",", "s", ")", ")", ")", ":", "reg", "=", "(", "r'{prefix}({tails})?'", ".", "format", "(", "prefix", "=", "s", ",", "tails", "=", "'|'", ".", "join", "(", "sorted", "(", "[", "p", "[", "len", "(", "s", ")", ":", "]", "for", "p", "in", "pp", "]", ",", "key", "=", "len", ",", "reverse", "=", "True", ")", ")", ")", ")", "assert", "len", "(", "reg", ")", "==", "estimated_len", "(", "pp", ",", "s", ")", "output", ".", "append", "(", "reg", ")", "objs", "-=", "(", "pp", "|", "set", "(", "[", "s", "]", ")", ")", "for", "residual", "in", "objs", ":", "output", ".", "append", "(", "residual", ")", "return", "re", ".", "sub", "(", "r'\\(([^)])\\)\\?'", ",", "r'\\1?'", ",", "r'|'", ".", "join", "(", "output", ")", ")" ]
Given multiple strings, returns a compressed regular expression just for these strings >>> condense(['she', 'he', 'her', 'hemoglobin']) 'he(moglobin|r)?|she'
[ "Given", "multiple", "strings", "returns", "a", "compressed", "regular", "expression", "just", "for", "these", "strings" ]
8e6daf9cbb7dfbc4900870fb365add17929bd4ab
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/retools.py#L10-L73
244,530
jeroyang/txttk
txttk/retools.py
is_solid
def is_solid(regex): """ Check the given regular expression is solid. >>> is_solid(r'a') True >>> is_solid(r'[ab]') True >>> is_solid(r'(a|b|c)') True >>> is_solid(r'(a|b|c)?') True >>> is_solid(r'(a|b)(c)') False >>> is_solid(r'(a|b)(c)?') False """ shape = re.sub(r'(\\.|[^\[\]\(\)\|\?\+\*])', '#', regex) skeleton = shape.replace('#', '') if len(shape) <= 1: return True if re.match(r'^\[[^\]]*\][\*\+\?]?$', shape): return True if re.match(r'^\([^\(]*\)[\*\+\?]?$', shape): return True if re.match(r'^\(\)#*?\)\)', skeleton): return True else: return False
python
def is_solid(regex): """ Check the given regular expression is solid. >>> is_solid(r'a') True >>> is_solid(r'[ab]') True >>> is_solid(r'(a|b|c)') True >>> is_solid(r'(a|b|c)?') True >>> is_solid(r'(a|b)(c)') False >>> is_solid(r'(a|b)(c)?') False """ shape = re.sub(r'(\\.|[^\[\]\(\)\|\?\+\*])', '#', regex) skeleton = shape.replace('#', '') if len(shape) <= 1: return True if re.match(r'^\[[^\]]*\][\*\+\?]?$', shape): return True if re.match(r'^\([^\(]*\)[\*\+\?]?$', shape): return True if re.match(r'^\(\)#*?\)\)', skeleton): return True else: return False
[ "def", "is_solid", "(", "regex", ")", ":", "shape", "=", "re", ".", "sub", "(", "r'(\\\\.|[^\\[\\]\\(\\)\\|\\?\\+\\*])'", ",", "'#'", ",", "regex", ")", "skeleton", "=", "shape", ".", "replace", "(", "'#'", ",", "''", ")", "if", "len", "(", "shape", ")", "<=", "1", ":", "return", "True", "if", "re", ".", "match", "(", "r'^\\[[^\\]]*\\][\\*\\+\\?]?$'", ",", "shape", ")", ":", "return", "True", "if", "re", ".", "match", "(", "r'^\\([^\\(]*\\)[\\*\\+\\?]?$'", ",", "shape", ")", ":", "return", "True", "if", "re", ".", "match", "(", "r'^\\(\\)#*?\\)\\)'", ",", "skeleton", ")", ":", "return", "True", "else", ":", "return", "False" ]
Check the given regular expression is solid. >>> is_solid(r'a') True >>> is_solid(r'[ab]') True >>> is_solid(r'(a|b|c)') True >>> is_solid(r'(a|b|c)?') True >>> is_solid(r'(a|b)(c)') False >>> is_solid(r'(a|b)(c)?') False
[ "Check", "the", "given", "regular", "expression", "is", "solid", "." ]
8e6daf9cbb7dfbc4900870fb365add17929bd4ab
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/retools.py#L75-L104
244,531
jeroyang/txttk
txttk/retools.py
danger_unpack
def danger_unpack(regex): """ Remove the outermost parens >>> unpack(r'(abc)') 'abc' >>> unpack(r'(?:abc)') 'abc' >>> unpack(r'(?P<xyz>abc)') 'abc' >>> unpack(r'[abc]') '[abc]' """ if is_packed(regex): return re.sub(r'^\((\?(:|P<.*?>))?(?P<content>.*?)\)$', r'\g<content>', regex) else: return regex
python
def danger_unpack(regex): """ Remove the outermost parens >>> unpack(r'(abc)') 'abc' >>> unpack(r'(?:abc)') 'abc' >>> unpack(r'(?P<xyz>abc)') 'abc' >>> unpack(r'[abc]') '[abc]' """ if is_packed(regex): return re.sub(r'^\((\?(:|P<.*?>))?(?P<content>.*?)\)$', r'\g<content>', regex) else: return regex
[ "def", "danger_unpack", "(", "regex", ")", ":", "if", "is_packed", "(", "regex", ")", ":", "return", "re", ".", "sub", "(", "r'^\\((\\?(:|P<.*?>))?(?P<content>.*?)\\)$'", ",", "r'\\g<content>'", ",", "regex", ")", "else", ":", "return", "regex" ]
Remove the outermost parens >>> unpack(r'(abc)') 'abc' >>> unpack(r'(?:abc)') 'abc' >>> unpack(r'(?P<xyz>abc)') 'abc' >>> unpack(r'[abc]') '[abc]'
[ "Remove", "the", "outermost", "parens" ]
8e6daf9cbb7dfbc4900870fb365add17929bd4ab
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/retools.py#L122-L139
244,532
jeroyang/txttk
txttk/retools.py
concat
def concat(regex_list): """ Concat multiple regular expression into one, if the given regular expression is not packed, a pair of paren will be add. >>> reg_1 = r'a|b' >>> reg_2 = r'(c|d|e)' >>> concat([reg_1, reg2]) (a|b)(c|d|e) """ output_list = [] for regex in regex_list: output_list.append(consolidate(regex)) return r''.join(output_list)
python
def concat(regex_list): """ Concat multiple regular expression into one, if the given regular expression is not packed, a pair of paren will be add. >>> reg_1 = r'a|b' >>> reg_2 = r'(c|d|e)' >>> concat([reg_1, reg2]) (a|b)(c|d|e) """ output_list = [] for regex in regex_list: output_list.append(consolidate(regex)) return r''.join(output_list)
[ "def", "concat", "(", "regex_list", ")", ":", "output_list", "=", "[", "]", "for", "regex", "in", "regex_list", ":", "output_list", ".", "append", "(", "consolidate", "(", "regex", ")", ")", "return", "r''", ".", "join", "(", "output_list", ")" ]
Concat multiple regular expression into one, if the given regular expression is not packed, a pair of paren will be add. >>> reg_1 = r'a|b' >>> reg_2 = r'(c|d|e)' >>> concat([reg_1, reg2]) (a|b)(c|d|e)
[ "Concat", "multiple", "regular", "expression", "into", "one", "if", "the", "given", "regular", "expression", "is", "not", "packed", "a", "pair", "of", "paren", "will", "be", "add", "." ]
8e6daf9cbb7dfbc4900870fb365add17929bd4ab
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/retools.py#L188-L202
244,533
jic-dtool/dtool-config
dtool_config/utils.py
set_cache
def set_cache(config_fpath, cache_dir): """Write the cache directory to the dtool config file. :param config_fpath: path to the dtool config file :param cache_dir: the path to the dtool cache direcotory """ cache_dir = os.path.abspath(cache_dir) return write_config_value_to_file( CACHE_DIRECTORY_KEY, cache_dir, config_fpath )
python
def set_cache(config_fpath, cache_dir): """Write the cache directory to the dtool config file. :param config_fpath: path to the dtool config file :param cache_dir: the path to the dtool cache direcotory """ cache_dir = os.path.abspath(cache_dir) return write_config_value_to_file( CACHE_DIRECTORY_KEY, cache_dir, config_fpath )
[ "def", "set_cache", "(", "config_fpath", ",", "cache_dir", ")", ":", "cache_dir", "=", "os", ".", "path", ".", "abspath", "(", "cache_dir", ")", "return", "write_config_value_to_file", "(", "CACHE_DIRECTORY_KEY", ",", "cache_dir", ",", "config_fpath", ")" ]
Write the cache directory to the dtool config file. :param config_fpath: path to the dtool config file :param cache_dir: the path to the dtool cache direcotory
[ "Write", "the", "cache", "directory", "to", "the", "dtool", "config", "file", "." ]
21afa99a6794909e1d0180a45895492b4b726a51
https://github.com/jic-dtool/dtool-config/blob/21afa99a6794909e1d0180a45895492b4b726a51/dtool_config/utils.py#L167-L178
244,534
jic-dtool/dtool-config
dtool_config/utils.py
set_azure_secret_access_key
def set_azure_secret_access_key(config_fpath, container, az_secret_access_key): """Write the ECS access key id to the dtool config file. :param config_fpath: path to the dtool config file :param container: azure storage container name :param az_secret_access_key: azure secret access key for the container """ key = AZURE_KEY_PREFIX + container return write_config_value_to_file(key, az_secret_access_key, config_fpath)
python
def set_azure_secret_access_key(config_fpath, container, az_secret_access_key): """Write the ECS access key id to the dtool config file. :param config_fpath: path to the dtool config file :param container: azure storage container name :param az_secret_access_key: azure secret access key for the container """ key = AZURE_KEY_PREFIX + container return write_config_value_to_file(key, az_secret_access_key, config_fpath)
[ "def", "set_azure_secret_access_key", "(", "config_fpath", ",", "container", ",", "az_secret_access_key", ")", ":", "key", "=", "AZURE_KEY_PREFIX", "+", "container", "return", "write_config_value_to_file", "(", "key", ",", "az_secret_access_key", ",", "config_fpath", ")" ]
Write the ECS access key id to the dtool config file. :param config_fpath: path to the dtool config file :param container: azure storage container name :param az_secret_access_key: azure secret access key for the container
[ "Write", "the", "ECS", "access", "key", "id", "to", "the", "dtool", "config", "file", "." ]
21afa99a6794909e1d0180a45895492b4b726a51
https://github.com/jic-dtool/dtool-config/blob/21afa99a6794909e1d0180a45895492b4b726a51/dtool_config/utils.py#L192-L200
244,535
jic-dtool/dtool-config
dtool_config/utils.py
list_azure_containers
def list_azure_containers(config_fpath): """List the azure storage containers in the config file. :param config_fpath: path to the dtool config file :returns: the list of azure storage container names """ config_content = _get_config_dict_from_file(config_fpath) az_container_names = [] for key in config_content.keys(): if key.startswith(AZURE_KEY_PREFIX): name = key[len(AZURE_KEY_PREFIX):] az_container_names.append(name) return sorted(az_container_names)
python
def list_azure_containers(config_fpath): """List the azure storage containers in the config file. :param config_fpath: path to the dtool config file :returns: the list of azure storage container names """ config_content = _get_config_dict_from_file(config_fpath) az_container_names = [] for key in config_content.keys(): if key.startswith(AZURE_KEY_PREFIX): name = key[len(AZURE_KEY_PREFIX):] az_container_names.append(name) return sorted(az_container_names)
[ "def", "list_azure_containers", "(", "config_fpath", ")", ":", "config_content", "=", "_get_config_dict_from_file", "(", "config_fpath", ")", "az_container_names", "=", "[", "]", "for", "key", "in", "config_content", ".", "keys", "(", ")", ":", "if", "key", ".", "startswith", "(", "AZURE_KEY_PREFIX", ")", ":", "name", "=", "key", "[", "len", "(", "AZURE_KEY_PREFIX", ")", ":", "]", "az_container_names", ".", "append", "(", "name", ")", "return", "sorted", "(", "az_container_names", ")" ]
List the azure storage containers in the config file. :param config_fpath: path to the dtool config file :returns: the list of azure storage container names
[ "List", "the", "azure", "storage", "containers", "in", "the", "config", "file", "." ]
21afa99a6794909e1d0180a45895492b4b726a51
https://github.com/jic-dtool/dtool-config/blob/21afa99a6794909e1d0180a45895492b4b726a51/dtool_config/utils.py#L203-L215
244,536
voidpp/python-tools
voidpp_tools/config_loader.py
ConfigLoader.load
def load(self, filename, create = None, default_conf = {}): """Load the config file Args: filename (str): the filename of the config, without any path create (str): if the config file not found, and this parameter is not None, a config file will be create with content of default_conf default_conf (dict): content of the default config data Returns: Return value of the ConfigFormatter.decode or the default_conf value Raises: ConfigFileNotFoundException: if the config file not found """ filenames, tries = self.__search_config_files(filename) if len(filenames): self.__loaded_config_file = filenames if self.__nested else filenames[0] return self.__load_config_files(filenames if self.__nested else filenames[:1]) if create is not None: self.__loaded_config_file = os.path.join(create, filename) self.save(default_conf) return default_conf raise ConfigFileNotFoundException("Config file not found in: %s" % tries)
python
def load(self, filename, create = None, default_conf = {}): """Load the config file Args: filename (str): the filename of the config, without any path create (str): if the config file not found, and this parameter is not None, a config file will be create with content of default_conf default_conf (dict): content of the default config data Returns: Return value of the ConfigFormatter.decode or the default_conf value Raises: ConfigFileNotFoundException: if the config file not found """ filenames, tries = self.__search_config_files(filename) if len(filenames): self.__loaded_config_file = filenames if self.__nested else filenames[0] return self.__load_config_files(filenames if self.__nested else filenames[:1]) if create is not None: self.__loaded_config_file = os.path.join(create, filename) self.save(default_conf) return default_conf raise ConfigFileNotFoundException("Config file not found in: %s" % tries)
[ "def", "load", "(", "self", ",", "filename", ",", "create", "=", "None", ",", "default_conf", "=", "{", "}", ")", ":", "filenames", ",", "tries", "=", "self", ".", "__search_config_files", "(", "filename", ")", "if", "len", "(", "filenames", ")", ":", "self", ".", "__loaded_config_file", "=", "filenames", "if", "self", ".", "__nested", "else", "filenames", "[", "0", "]", "return", "self", ".", "__load_config_files", "(", "filenames", "if", "self", ".", "__nested", "else", "filenames", "[", ":", "1", "]", ")", "if", "create", "is", "not", "None", ":", "self", ".", "__loaded_config_file", "=", "os", ".", "path", ".", "join", "(", "create", ",", "filename", ")", "self", ".", "save", "(", "default_conf", ")", "return", "default_conf", "raise", "ConfigFileNotFoundException", "(", "\"Config file not found in: %s\"", "%", "tries", ")" ]
Load the config file Args: filename (str): the filename of the config, without any path create (str): if the config file not found, and this parameter is not None, a config file will be create with content of default_conf default_conf (dict): content of the default config data Returns: Return value of the ConfigFormatter.decode or the default_conf value Raises: ConfigFileNotFoundException: if the config file not found
[ "Load", "the", "config", "file" ]
0fc7460c827b02d8914411cedddadc23ccb3cc73
https://github.com/voidpp/python-tools/blob/0fc7460c827b02d8914411cedddadc23ccb3cc73/voidpp_tools/config_loader.py#L79-L106
244,537
voidpp/python-tools
voidpp_tools/config_loader.py
ConfigLoader.save
def save(self, data): """Save the config data Args: data: any serializable config data Raises: ConfigLoaderException: if the ConfigLoader.load not called, so there is no config file name, or the data is not serializable or the loader is nested """ if self.__nested: raise ConfigLoaderException("Cannot save the config if the 'nested' paramter is True!") if self.__loaded_config_file is None: raise ConfigLoaderException("Load not called yet!") try: with open(self.__loaded_config_file, 'w') as f: f.write(self.__formatter.encode(data)) except Exception as e: raise ConfigLoaderException("Config data is not serializable: %s" % e)
python
def save(self, data): """Save the config data Args: data: any serializable config data Raises: ConfigLoaderException: if the ConfigLoader.load not called, so there is no config file name, or the data is not serializable or the loader is nested """ if self.__nested: raise ConfigLoaderException("Cannot save the config if the 'nested' paramter is True!") if self.__loaded_config_file is None: raise ConfigLoaderException("Load not called yet!") try: with open(self.__loaded_config_file, 'w') as f: f.write(self.__formatter.encode(data)) except Exception as e: raise ConfigLoaderException("Config data is not serializable: %s" % e)
[ "def", "save", "(", "self", ",", "data", ")", ":", "if", "self", ".", "__nested", ":", "raise", "ConfigLoaderException", "(", "\"Cannot save the config if the 'nested' paramter is True!\"", ")", "if", "self", ".", "__loaded_config_file", "is", "None", ":", "raise", "ConfigLoaderException", "(", "\"Load not called yet!\"", ")", "try", ":", "with", "open", "(", "self", ".", "__loaded_config_file", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "self", ".", "__formatter", ".", "encode", "(", "data", ")", ")", "except", "Exception", "as", "e", ":", "raise", "ConfigLoaderException", "(", "\"Config data is not serializable: %s\"", "%", "e", ")" ]
Save the config data Args: data: any serializable config data Raises: ConfigLoaderException: if the ConfigLoader.load not called, so there is no config file name, or the data is not serializable or the loader is nested
[ "Save", "the", "config", "data" ]
0fc7460c827b02d8914411cedddadc23ccb3cc73
https://github.com/voidpp/python-tools/blob/0fc7460c827b02d8914411cedddadc23ccb3cc73/voidpp_tools/config_loader.py#L108-L128
244,538
etcher-be/emiz
emiz/weather/avwx/avwx.py
AVWX.metar_to_speech
def metar_to_speech(metar: str) -> str: """ Creates a speakable text from a METAR Args: metar: METAR string to use Returns: speakable METAR for TTS """ LOGGER.info('getting speech text from METAR: %s', metar) metar_data, metar_units = emiz.avwx.metar.parse_in(metar) speech = emiz.avwx.speech.metar(metar_data, metar_units) speech = str(speech).replace('Altimeter', 'Q N H') LOGGER.debug('resulting speech: %s', speech) return speech
python
def metar_to_speech(metar: str) -> str: """ Creates a speakable text from a METAR Args: metar: METAR string to use Returns: speakable METAR for TTS """ LOGGER.info('getting speech text from METAR: %s', metar) metar_data, metar_units = emiz.avwx.metar.parse_in(metar) speech = emiz.avwx.speech.metar(metar_data, metar_units) speech = str(speech).replace('Altimeter', 'Q N H') LOGGER.debug('resulting speech: %s', speech) return speech
[ "def", "metar_to_speech", "(", "metar", ":", "str", ")", "->", "str", ":", "LOGGER", ".", "info", "(", "'getting speech text from METAR: %s'", ",", "metar", ")", "metar_data", ",", "metar_units", "=", "emiz", ".", "avwx", ".", "metar", ".", "parse_in", "(", "metar", ")", "speech", "=", "emiz", ".", "avwx", ".", "speech", ".", "metar", "(", "metar_data", ",", "metar_units", ")", "speech", "=", "str", "(", "speech", ")", ".", "replace", "(", "'Altimeter'", ",", "'Q N H'", ")", "LOGGER", ".", "debug", "(", "'resulting speech: %s'", ",", "speech", ")", "return", "speech" ]
Creates a speakable text from a METAR Args: metar: METAR string to use Returns: speakable METAR for TTS
[ "Creates", "a", "speakable", "text", "from", "a", "METAR" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/avwx/avwx.py#L22-L37
244,539
kallimachos/chios
chios/remoteinclude/__init__.py
RemoteInclude.run
def run(self): """Return rel path to a downloaded file as `include` node argument.""" document = self.state.document env = document.settings.env buildpath = env.app.outdir link = self.arguments[0] try: r = requests.get(link) r.raise_for_status() downloadpath = os.path.join(buildpath, '_downloads') if not os.path.isdir(downloadpath): os.makedirs(downloadpath) rstfile = os.path.join(downloadpath, os.path.basename(link)) with open(rstfile, 'w') as f: f.write(r.text) rstfile = os.path.relpath(rstfile, os.path.dirname(env.doc2path (env.docname))) self.arguments = [rstfile] return super(RemoteInclude, self).run() except Exception: err = 'Unable to resolve ' + link return [document.reporter.warning(str(err), line=self.lineno)]
python
def run(self): """Return rel path to a downloaded file as `include` node argument.""" document = self.state.document env = document.settings.env buildpath = env.app.outdir link = self.arguments[0] try: r = requests.get(link) r.raise_for_status() downloadpath = os.path.join(buildpath, '_downloads') if not os.path.isdir(downloadpath): os.makedirs(downloadpath) rstfile = os.path.join(downloadpath, os.path.basename(link)) with open(rstfile, 'w') as f: f.write(r.text) rstfile = os.path.relpath(rstfile, os.path.dirname(env.doc2path (env.docname))) self.arguments = [rstfile] return super(RemoteInclude, self).run() except Exception: err = 'Unable to resolve ' + link return [document.reporter.warning(str(err), line=self.lineno)]
[ "def", "run", "(", "self", ")", ":", "document", "=", "self", ".", "state", ".", "document", "env", "=", "document", ".", "settings", ".", "env", "buildpath", "=", "env", ".", "app", ".", "outdir", "link", "=", "self", ".", "arguments", "[", "0", "]", "try", ":", "r", "=", "requests", ".", "get", "(", "link", ")", "r", ".", "raise_for_status", "(", ")", "downloadpath", "=", "os", ".", "path", ".", "join", "(", "buildpath", ",", "'_downloads'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "downloadpath", ")", ":", "os", ".", "makedirs", "(", "downloadpath", ")", "rstfile", "=", "os", ".", "path", ".", "join", "(", "downloadpath", ",", "os", ".", "path", ".", "basename", "(", "link", ")", ")", "with", "open", "(", "rstfile", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "r", ".", "text", ")", "rstfile", "=", "os", ".", "path", ".", "relpath", "(", "rstfile", ",", "os", ".", "path", ".", "dirname", "(", "env", ".", "doc2path", "(", "env", ".", "docname", ")", ")", ")", "self", ".", "arguments", "=", "[", "rstfile", "]", "return", "super", "(", "RemoteInclude", ",", "self", ")", ".", "run", "(", ")", "except", "Exception", ":", "err", "=", "'Unable to resolve '", "+", "link", "return", "[", "document", ".", "reporter", ".", "warning", "(", "str", "(", "err", ")", ",", "line", "=", "self", ".", "lineno", ")", "]" ]
Return rel path to a downloaded file as `include` node argument.
[ "Return", "rel", "path", "to", "a", "downloaded", "file", "as", "include", "node", "argument", "." ]
e14044e4019d57089c625d4ad2f73ccb66b8b7b8
https://github.com/kallimachos/chios/blob/e14044e4019d57089c625d4ad2f73ccb66b8b7b8/chios/remoteinclude/__init__.py#L35-L57
244,540
sassoo/goldman
goldman/deserializers/jsonapi.py
Deserializer.normalize
def normalize(self, body): """ Invoke the JSON API normalizer Perform the following: * add the type as a rtype property * flatten the payload * add the id as a rid property ONLY if present We don't need to vet the inputs much because the Parser has already done all the work. :param body: the already vetted & parsed payload :return: normalized dict """ resource = body['data'] data = {'rtype': resource['type']} if 'attributes' in resource: attributes = resource['attributes'] attributes = self._normalize_attributes(attributes) data.update(attributes) if 'relationships' in resource: relationships = resource['relationships'] relationships = self._normalize_relationships(relationships) data.update(relationships) if resource.get('id'): data['rid'] = resource['id'] return data
python
def normalize(self, body): """ Invoke the JSON API normalizer Perform the following: * add the type as a rtype property * flatten the payload * add the id as a rid property ONLY if present We don't need to vet the inputs much because the Parser has already done all the work. :param body: the already vetted & parsed payload :return: normalized dict """ resource = body['data'] data = {'rtype': resource['type']} if 'attributes' in resource: attributes = resource['attributes'] attributes = self._normalize_attributes(attributes) data.update(attributes) if 'relationships' in resource: relationships = resource['relationships'] relationships = self._normalize_relationships(relationships) data.update(relationships) if resource.get('id'): data['rid'] = resource['id'] return data
[ "def", "normalize", "(", "self", ",", "body", ")", ":", "resource", "=", "body", "[", "'data'", "]", "data", "=", "{", "'rtype'", ":", "resource", "[", "'type'", "]", "}", "if", "'attributes'", "in", "resource", ":", "attributes", "=", "resource", "[", "'attributes'", "]", "attributes", "=", "self", ".", "_normalize_attributes", "(", "attributes", ")", "data", ".", "update", "(", "attributes", ")", "if", "'relationships'", "in", "resource", ":", "relationships", "=", "resource", "[", "'relationships'", "]", "relationships", "=", "self", ".", "_normalize_relationships", "(", "relationships", ")", "data", ".", "update", "(", "relationships", ")", "if", "resource", ".", "get", "(", "'id'", ")", ":", "data", "[", "'rid'", "]", "=", "resource", "[", "'id'", "]", "return", "data" ]
Invoke the JSON API normalizer Perform the following: * add the type as a rtype property * flatten the payload * add the id as a rid property ONLY if present We don't need to vet the inputs much because the Parser has already done all the work. :param body: the already vetted & parsed payload :return: normalized dict
[ "Invoke", "the", "JSON", "API", "normalizer" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/jsonapi.py#L85-L121
244,541
sassoo/goldman
goldman/deserializers/jsonapi.py
Deserializer._parse_attributes
def _parse_attributes(self, attributes): """ Ensure compliance with the spec's attributes section Specifically, the attributes object of the single resource object. This contains the key / values to be mapped to the model. :param attributes: dict JSON API attributes object """ link = 'jsonapi.org/format/#document-resource-object-attributes' if not isinstance(attributes, dict): self.fail('The JSON API resource object attributes key MUST ' 'be a hash.', link) elif 'id' in attributes or 'type' in attributes: self.fail('A field name of `id` or `type` is not allowed in ' 'the attributes object. They should be top-level ' 'keys.', link)
python
def _parse_attributes(self, attributes): """ Ensure compliance with the spec's attributes section Specifically, the attributes object of the single resource object. This contains the key / values to be mapped to the model. :param attributes: dict JSON API attributes object """ link = 'jsonapi.org/format/#document-resource-object-attributes' if not isinstance(attributes, dict): self.fail('The JSON API resource object attributes key MUST ' 'be a hash.', link) elif 'id' in attributes or 'type' in attributes: self.fail('A field name of `id` or `type` is not allowed in ' 'the attributes object. They should be top-level ' 'keys.', link)
[ "def", "_parse_attributes", "(", "self", ",", "attributes", ")", ":", "link", "=", "'jsonapi.org/format/#document-resource-object-attributes'", "if", "not", "isinstance", "(", "attributes", ",", "dict", ")", ":", "self", ".", "fail", "(", "'The JSON API resource object attributes key MUST '", "'be a hash.'", ",", "link", ")", "elif", "'id'", "in", "attributes", "or", "'type'", "in", "attributes", ":", "self", ".", "fail", "(", "'A field name of `id` or `type` is not allowed in '", "'the attributes object. They should be top-level '", "'keys.'", ",", "link", ")" ]
Ensure compliance with the spec's attributes section Specifically, the attributes object of the single resource object. This contains the key / values to be mapped to the model. :param attributes: dict JSON API attributes object
[ "Ensure", "compliance", "with", "the", "spec", "s", "attributes", "section" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/jsonapi.py#L123-L142
244,542
sassoo/goldman
goldman/deserializers/jsonapi.py
Deserializer._parse_relationships
def _parse_relationships(self, relationships): """ Ensure compliance with the spec's relationships section Specifically, the relationships object of the single resource object. For modifications we only support relationships via the `data` key referred to as Resource Linkage. :param relationships: dict JSON API relationships object """ link = 'jsonapi.org/format/#document-resource-object-relationships' if not isinstance(relationships, dict): self.fail('The JSON API resource object relationships key MUST ' 'be a hash & comply with the spec\'s resource linkage ' 'section.', link) for key, val in relationships.items(): if not isinstance(val, dict) or 'data' not in val: self.fail('Relationship key %s MUST be a hash & contain ' 'a `data` field compliant with the spec\'s ' 'resource linkage section.' % key, link) elif isinstance(val['data'], dict): data = val['data'] rid = isinstance(data.get('id'), unicode) rtype = isinstance(data.get('type'), unicode) if not rid or not rtype: self.fail('%s relationship\'s resource linkage MUST ' 'contain `id` & `type` fields. Additionally, ' 'they must both be strings.' % key, link) elif isinstance(val['data'], list): abort(exceptions.ModificationDenied(**{ 'detail': 'Modifying the %s relationship or any to-many ' 'relationships for that matter are is not ' 'currently supported. Instead, modify the ' 'to-one side directly.' % key, 'links': link, })) elif val['data']: self.fail('The relationship key %s is malformed & impossible ' 'for us to understand your intentions. It MUST be ' 'a hash & contain a `data` field compliant with ' 'the spec\'s resource linkage section or null if ' 'you want to unset the relationship.' % key, link)
python
def _parse_relationships(self, relationships): """ Ensure compliance with the spec's relationships section Specifically, the relationships object of the single resource object. For modifications we only support relationships via the `data` key referred to as Resource Linkage. :param relationships: dict JSON API relationships object """ link = 'jsonapi.org/format/#document-resource-object-relationships' if not isinstance(relationships, dict): self.fail('The JSON API resource object relationships key MUST ' 'be a hash & comply with the spec\'s resource linkage ' 'section.', link) for key, val in relationships.items(): if not isinstance(val, dict) or 'data' not in val: self.fail('Relationship key %s MUST be a hash & contain ' 'a `data` field compliant with the spec\'s ' 'resource linkage section.' % key, link) elif isinstance(val['data'], dict): data = val['data'] rid = isinstance(data.get('id'), unicode) rtype = isinstance(data.get('type'), unicode) if not rid or not rtype: self.fail('%s relationship\'s resource linkage MUST ' 'contain `id` & `type` fields. Additionally, ' 'they must both be strings.' % key, link) elif isinstance(val['data'], list): abort(exceptions.ModificationDenied(**{ 'detail': 'Modifying the %s relationship or any to-many ' 'relationships for that matter are is not ' 'currently supported. Instead, modify the ' 'to-one side directly.' % key, 'links': link, })) elif val['data']: self.fail('The relationship key %s is malformed & impossible ' 'for us to understand your intentions. It MUST be ' 'a hash & contain a `data` field compliant with ' 'the spec\'s resource linkage section or null if ' 'you want to unset the relationship.' % key, link)
[ "def", "_parse_relationships", "(", "self", ",", "relationships", ")", ":", "link", "=", "'jsonapi.org/format/#document-resource-object-relationships'", "if", "not", "isinstance", "(", "relationships", ",", "dict", ")", ":", "self", ".", "fail", "(", "'The JSON API resource object relationships key MUST '", "'be a hash & comply with the spec\\'s resource linkage '", "'section.'", ",", "link", ")", "for", "key", ",", "val", "in", "relationships", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "val", ",", "dict", ")", "or", "'data'", "not", "in", "val", ":", "self", ".", "fail", "(", "'Relationship key %s MUST be a hash & contain '", "'a `data` field compliant with the spec\\'s '", "'resource linkage section.'", "%", "key", ",", "link", ")", "elif", "isinstance", "(", "val", "[", "'data'", "]", ",", "dict", ")", ":", "data", "=", "val", "[", "'data'", "]", "rid", "=", "isinstance", "(", "data", ".", "get", "(", "'id'", ")", ",", "unicode", ")", "rtype", "=", "isinstance", "(", "data", ".", "get", "(", "'type'", ")", ",", "unicode", ")", "if", "not", "rid", "or", "not", "rtype", ":", "self", ".", "fail", "(", "'%s relationship\\'s resource linkage MUST '", "'contain `id` & `type` fields. Additionally, '", "'they must both be strings.'", "%", "key", ",", "link", ")", "elif", "isinstance", "(", "val", "[", "'data'", "]", ",", "list", ")", ":", "abort", "(", "exceptions", ".", "ModificationDenied", "(", "*", "*", "{", "'detail'", ":", "'Modifying the %s relationship or any to-many '", "'relationships for that matter are is not '", "'currently supported. Instead, modify the '", "'to-one side directly.'", "%", "key", ",", "'links'", ":", "link", ",", "}", ")", ")", "elif", "val", "[", "'data'", "]", ":", "self", ".", "fail", "(", "'The relationship key %s is malformed & impossible '", "'for us to understand your intentions. It MUST be '", "'a hash & contain a `data` field compliant with '", "'the spec\\'s resource linkage section or null if '", "'you want to unset the relationship.'", "%", "key", ",", "link", ")" ]
Ensure compliance with the spec's relationships section Specifically, the relationships object of the single resource object. For modifications we only support relationships via the `data` key referred to as Resource Linkage. :param relationships: dict JSON API relationships object
[ "Ensure", "compliance", "with", "the", "spec", "s", "relationships", "section" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/jsonapi.py#L144-L189
244,543
sassoo/goldman
goldman/deserializers/jsonapi.py
Deserializer._parse_resource
def _parse_resource(self, resource): """ Ensure compliance with the spec's resource objects section :param resource: dict JSON API resource object """ link = 'jsonapi.org/format/#document-resource-objects' rid = isinstance(resource.get('id'), unicode) rtype = isinstance(resource.get('type'), unicode) if not rtype or (self.req.is_patching and not rid): self.fail('JSON API requires that every resource object MUST ' 'contain a `type` top-level key. Additionally, when ' 'modifying an existing resource object an `id` ' 'top-level key is required. The values of both keys ' 'MUST be strings. Your request did not comply with ' 'one or more of these 3 rules', link) elif 'attributes' not in resource and 'relationships' not in resource: self.fail('Modifiying or creating resources require at minimum ' 'an attributes object and/or relationship object.', link) elif rid and self.req.is_posting: abort(exceptions.ModificationDenied(**{ 'detail': 'Our API does not support client-generated ID\'s ' 'when creating NEW resources. Instead, our API ' 'will generate one for you & return it in the ' 'response.', 'links': 'jsonapi.org/format/#crud-creating-client-ids', }))
python
def _parse_resource(self, resource): """ Ensure compliance with the spec's resource objects section :param resource: dict JSON API resource object """ link = 'jsonapi.org/format/#document-resource-objects' rid = isinstance(resource.get('id'), unicode) rtype = isinstance(resource.get('type'), unicode) if not rtype or (self.req.is_patching and not rid): self.fail('JSON API requires that every resource object MUST ' 'contain a `type` top-level key. Additionally, when ' 'modifying an existing resource object an `id` ' 'top-level key is required. The values of both keys ' 'MUST be strings. Your request did not comply with ' 'one or more of these 3 rules', link) elif 'attributes' not in resource and 'relationships' not in resource: self.fail('Modifiying or creating resources require at minimum ' 'an attributes object and/or relationship object.', link) elif rid and self.req.is_posting: abort(exceptions.ModificationDenied(**{ 'detail': 'Our API does not support client-generated ID\'s ' 'when creating NEW resources. Instead, our API ' 'will generate one for you & return it in the ' 'response.', 'links': 'jsonapi.org/format/#crud-creating-client-ids', }))
[ "def", "_parse_resource", "(", "self", ",", "resource", ")", ":", "link", "=", "'jsonapi.org/format/#document-resource-objects'", "rid", "=", "isinstance", "(", "resource", ".", "get", "(", "'id'", ")", ",", "unicode", ")", "rtype", "=", "isinstance", "(", "resource", ".", "get", "(", "'type'", ")", ",", "unicode", ")", "if", "not", "rtype", "or", "(", "self", ".", "req", ".", "is_patching", "and", "not", "rid", ")", ":", "self", ".", "fail", "(", "'JSON API requires that every resource object MUST '", "'contain a `type` top-level key. Additionally, when '", "'modifying an existing resource object an `id` '", "'top-level key is required. The values of both keys '", "'MUST be strings. Your request did not comply with '", "'one or more of these 3 rules'", ",", "link", ")", "elif", "'attributes'", "not", "in", "resource", "and", "'relationships'", "not", "in", "resource", ":", "self", ".", "fail", "(", "'Modifiying or creating resources require at minimum '", "'an attributes object and/or relationship object.'", ",", "link", ")", "elif", "rid", "and", "self", ".", "req", ".", "is_posting", ":", "abort", "(", "exceptions", ".", "ModificationDenied", "(", "*", "*", "{", "'detail'", ":", "'Our API does not support client-generated ID\\'s '", "'when creating NEW resources. Instead, our API '", "'will generate one for you & return it in the '", "'response.'", ",", "'links'", ":", "'jsonapi.org/format/#crud-creating-client-ids'", ",", "}", ")", ")" ]
Ensure compliance with the spec's resource objects section :param resource: dict JSON API resource object
[ "Ensure", "compliance", "with", "the", "spec", "s", "resource", "objects", "section" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/jsonapi.py#L191-L219
244,544
sassoo/goldman
goldman/deserializers/jsonapi.py
Deserializer._parse_top_level
def _parse_top_level(self, body): """ Ensure compliance with the spec's top-level section """ link = 'jsonapi.org/format/#document-top-level' try: if not isinstance(body['data'], dict): raise TypeError except (KeyError, TypeError): self.fail('JSON API payloads MUST be a hash at the most ' 'top-level; rooted at a key named `data` where the ' 'value must be a hash. Currently, we only support ' 'JSON API payloads that comply with the single ' 'Resource Object section.', link) if 'errors' in body: self.fail('JSON API payloads MUST not have both `data` & ' '`errors` top-level keys.', link)
python
def _parse_top_level(self, body): """ Ensure compliance with the spec's top-level section """ link = 'jsonapi.org/format/#document-top-level' try: if not isinstance(body['data'], dict): raise TypeError except (KeyError, TypeError): self.fail('JSON API payloads MUST be a hash at the most ' 'top-level; rooted at a key named `data` where the ' 'value must be a hash. Currently, we only support ' 'JSON API payloads that comply with the single ' 'Resource Object section.', link) if 'errors' in body: self.fail('JSON API payloads MUST not have both `data` & ' '`errors` top-level keys.', link)
[ "def", "_parse_top_level", "(", "self", ",", "body", ")", ":", "link", "=", "'jsonapi.org/format/#document-top-level'", "try", ":", "if", "not", "isinstance", "(", "body", "[", "'data'", "]", ",", "dict", ")", ":", "raise", "TypeError", "except", "(", "KeyError", ",", "TypeError", ")", ":", "self", ".", "fail", "(", "'JSON API payloads MUST be a hash at the most '", "'top-level; rooted at a key named `data` where the '", "'value must be a hash. Currently, we only support '", "'JSON API payloads that comply with the single '", "'Resource Object section.'", ",", "link", ")", "if", "'errors'", "in", "body", ":", "self", ".", "fail", "(", "'JSON API payloads MUST not have both `data` & '", "'`errors` top-level keys.'", ",", "link", ")" ]
Ensure compliance with the spec's top-level section
[ "Ensure", "compliance", "with", "the", "spec", "s", "top", "-", "level", "section" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/jsonapi.py#L221-L238
244,545
sassoo/goldman
goldman/deserializers/jsonapi.py
Deserializer.parse
def parse(self, body): """ Invoke the JSON API spec compliant parser Order is important. Start from the request body root key & work your way down so exception handling is easier to follow. :return: the parsed & vetted request body """ self._parse_top_level(body) self._parse_resource(body['data']) resource = body['data'] if 'attributes' in resource: self._parse_attributes(resource['attributes']) if 'relationships' in resource: self._parse_relationships(resource['relationships'])
python
def parse(self, body): """ Invoke the JSON API spec compliant parser Order is important. Start from the request body root key & work your way down so exception handling is easier to follow. :return: the parsed & vetted request body """ self._parse_top_level(body) self._parse_resource(body['data']) resource = body['data'] if 'attributes' in resource: self._parse_attributes(resource['attributes']) if 'relationships' in resource: self._parse_relationships(resource['relationships'])
[ "def", "parse", "(", "self", ",", "body", ")", ":", "self", ".", "_parse_top_level", "(", "body", ")", "self", ".", "_parse_resource", "(", "body", "[", "'data'", "]", ")", "resource", "=", "body", "[", "'data'", "]", "if", "'attributes'", "in", "resource", ":", "self", ".", "_parse_attributes", "(", "resource", "[", "'attributes'", "]", ")", "if", "'relationships'", "in", "resource", ":", "self", ".", "_parse_relationships", "(", "resource", "[", "'relationships'", "]", ")" ]
Invoke the JSON API spec compliant parser Order is important. Start from the request body root key & work your way down so exception handling is easier to follow. :return: the parsed & vetted request body
[ "Invoke", "the", "JSON", "API", "spec", "compliant", "parser" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/jsonapi.py#L240-L259
244,546
baliame/http-hmac-python
httphmac/compat.py
get_signer_by_version
def get_signer_by_version(digest, ver): """Returns a new signer object for a digest and version combination. Keyword arguments: digest -- a callable that may be passed to the initializer of any Signer object in this library. The callable must return a hasher object when called with no arguments. ver -- the version of the signature. This may be any value convertible to an int. """ if int(ver) == 1: return V1Signer(digest) elif int(ver) == 2: return V2Signer(digest) else: return None
python
def get_signer_by_version(digest, ver): """Returns a new signer object for a digest and version combination. Keyword arguments: digest -- a callable that may be passed to the initializer of any Signer object in this library. The callable must return a hasher object when called with no arguments. ver -- the version of the signature. This may be any value convertible to an int. """ if int(ver) == 1: return V1Signer(digest) elif int(ver) == 2: return V2Signer(digest) else: return None
[ "def", "get_signer_by_version", "(", "digest", ",", "ver", ")", ":", "if", "int", "(", "ver", ")", "==", "1", ":", "return", "V1Signer", "(", "digest", ")", "elif", "int", "(", "ver", ")", "==", "2", ":", "return", "V2Signer", "(", "digest", ")", "else", ":", "return", "None" ]
Returns a new signer object for a digest and version combination. Keyword arguments: digest -- a callable that may be passed to the initializer of any Signer object in this library. The callable must return a hasher object when called with no arguments. ver -- the version of the signature. This may be any value convertible to an int.
[ "Returns", "a", "new", "signer", "object", "for", "a", "digest", "and", "version", "combination", "." ]
9884c0cbfdb712f9f37080a8efbfdce82850785f
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/compat.py#L6-L19
244,547
baliame/http-hmac-python
httphmac/compat.py
SignatureIdentifier.identify
def identify(self, header): """Identifies a signature and returns the appropriate Signer object. This is done by reading an authorization header and matching it to signature characteristics. None is returned if the authorization header does not match the format of any signature identified by this identifier. Keyword arguments: header -- the Authorization header of a request. """ for ver, signer in self.signers.items(): if signer.matches(header): return signer return None
python
def identify(self, header): """Identifies a signature and returns the appropriate Signer object. This is done by reading an authorization header and matching it to signature characteristics. None is returned if the authorization header does not match the format of any signature identified by this identifier. Keyword arguments: header -- the Authorization header of a request. """ for ver, signer in self.signers.items(): if signer.matches(header): return signer return None
[ "def", "identify", "(", "self", ",", "header", ")", ":", "for", "ver", ",", "signer", "in", "self", ".", "signers", ".", "items", "(", ")", ":", "if", "signer", ".", "matches", "(", "header", ")", ":", "return", "signer", "return", "None" ]
Identifies a signature and returns the appropriate Signer object. This is done by reading an authorization header and matching it to signature characteristics. None is returned if the authorization header does not match the format of any signature identified by this identifier. Keyword arguments: header -- the Authorization header of a request.
[ "Identifies", "a", "signature", "and", "returns", "the", "appropriate", "Signer", "object", ".", "This", "is", "done", "by", "reading", "an", "authorization", "header", "and", "matching", "it", "to", "signature", "characteristics", ".", "None", "is", "returned", "if", "the", "authorization", "header", "does", "not", "match", "the", "format", "of", "any", "signature", "identified", "by", "this", "identifier", "." ]
9884c0cbfdb712f9f37080a8efbfdce82850785f
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/compat.py#L41-L52
244,548
keik/audiotrans
audiotrans/load_transforms.py
load_transforms
def load_transforms(transforms): """ Load transform modules and return instance of transform class. Parameters ---------- transforms : [str] or [[str]] array of transform module name, or nested array of transform module name with argv to load Returns ------- array of transform instance """ from . import Transform import inspect # normalize arguments to form as [(name, [option, ...]), ...] transforms_with_argv = map(lambda t: (t[0], t[1:]) if isinstance(t, list) else (t, []), transforms) def instantiate_transform(module_name, argv): tr_module = __import__( module_name if module_name.count('.') > 0 else TRANSFORM_MODULE_PREFIX + module_name, fromlist=['dummy']) tr_classes = inspect.getmembers( tr_module, lambda c: issubclass(c if inspect.isclass(c) else None.__class__, Transform)) if len(tr_classes) != 1: raise TypeError('Transform module must have only one subclass of Transform') tr_class = tr_classes[0] return tr_class[1](argv) return [instantiate_transform(tr[0], tr[1]) for tr in transforms_with_argv]
python
def load_transforms(transforms): """ Load transform modules and return instance of transform class. Parameters ---------- transforms : [str] or [[str]] array of transform module name, or nested array of transform module name with argv to load Returns ------- array of transform instance """ from . import Transform import inspect # normalize arguments to form as [(name, [option, ...]), ...] transforms_with_argv = map(lambda t: (t[0], t[1:]) if isinstance(t, list) else (t, []), transforms) def instantiate_transform(module_name, argv): tr_module = __import__( module_name if module_name.count('.') > 0 else TRANSFORM_MODULE_PREFIX + module_name, fromlist=['dummy']) tr_classes = inspect.getmembers( tr_module, lambda c: issubclass(c if inspect.isclass(c) else None.__class__, Transform)) if len(tr_classes) != 1: raise TypeError('Transform module must have only one subclass of Transform') tr_class = tr_classes[0] return tr_class[1](argv) return [instantiate_transform(tr[0], tr[1]) for tr in transforms_with_argv]
[ "def", "load_transforms", "(", "transforms", ")", ":", "from", ".", "import", "Transform", "import", "inspect", "# normalize arguments to form as [(name, [option, ...]), ...]", "transforms_with_argv", "=", "map", "(", "lambda", "t", ":", "(", "t", "[", "0", "]", ",", "t", "[", "1", ":", "]", ")", "if", "isinstance", "(", "t", ",", "list", ")", "else", "(", "t", ",", "[", "]", ")", ",", "transforms", ")", "def", "instantiate_transform", "(", "module_name", ",", "argv", ")", ":", "tr_module", "=", "__import__", "(", "module_name", "if", "module_name", ".", "count", "(", "'.'", ")", ">", "0", "else", "TRANSFORM_MODULE_PREFIX", "+", "module_name", ",", "fromlist", "=", "[", "'dummy'", "]", ")", "tr_classes", "=", "inspect", ".", "getmembers", "(", "tr_module", ",", "lambda", "c", ":", "issubclass", "(", "c", "if", "inspect", ".", "isclass", "(", "c", ")", "else", "None", ".", "__class__", ",", "Transform", ")", ")", "if", "len", "(", "tr_classes", ")", "!=", "1", ":", "raise", "TypeError", "(", "'Transform module must have only one subclass of Transform'", ")", "tr_class", "=", "tr_classes", "[", "0", "]", "return", "tr_class", "[", "1", "]", "(", "argv", ")", "return", "[", "instantiate_transform", "(", "tr", "[", "0", "]", ",", "tr", "[", "1", "]", ")", "for", "tr", "in", "transforms_with_argv", "]" ]
Load transform modules and return instance of transform class. Parameters ---------- transforms : [str] or [[str]] array of transform module name, or nested array of transform module name with argv to load Returns ------- array of transform instance
[ "Load", "transform", "modules", "and", "return", "instance", "of", "transform", "class", "." ]
0849e32b9eacc3256a9da48d1cab8935448fa1e7
https://github.com/keik/audiotrans/blob/0849e32b9eacc3256a9da48d1cab8935448fa1e7/audiotrans/load_transforms.py#L4-L42
244,549
fakedrake/overlay_parse
overlay_parse/dates.py
date_tuple
def date_tuple(ovls): """ We should have a list of overlays from which to extract day month year. """ day = month = year = 0 for o in ovls: if 'day' in o.props: day = o.value if 'month' in o.props: month = o.value if 'year' in o.props: year = o.value if 'date' in o.props: day, month, year = [(o or n) for o, n in zip((day, month, year), o.value)] return (day, month, year)
python
def date_tuple(ovls): """ We should have a list of overlays from which to extract day month year. """ day = month = year = 0 for o in ovls: if 'day' in o.props: day = o.value if 'month' in o.props: month = o.value if 'year' in o.props: year = o.value if 'date' in o.props: day, month, year = [(o or n) for o, n in zip((day, month, year), o.value)] return (day, month, year)
[ "def", "date_tuple", "(", "ovls", ")", ":", "day", "=", "month", "=", "year", "=", "0", "for", "o", "in", "ovls", ":", "if", "'day'", "in", "o", ".", "props", ":", "day", "=", "o", ".", "value", "if", "'month'", "in", "o", ".", "props", ":", "month", "=", "o", ".", "value", "if", "'year'", "in", "o", ".", "props", ":", "year", "=", "o", ".", "value", "if", "'date'", "in", "o", ".", "props", ":", "day", ",", "month", ",", "year", "=", "[", "(", "o", "or", "n", ")", "for", "o", ",", "n", "in", "zip", "(", "(", "day", ",", "month", ",", "year", ")", ",", "o", ".", "value", ")", "]", "return", "(", "day", ",", "month", ",", "year", ")" ]
We should have a list of overlays from which to extract day month year.
[ "We", "should", "have", "a", "list", "of", "overlays", "from", "which", "to", "extract", "day", "month", "year", "." ]
9ac362d6aef1ea41aff7375af088c6ebef93d0cd
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/dates.py#L57-L78
244,550
fakedrake/overlay_parse
overlay_parse/dates.py
longest_overlap
def longest_overlap(ovls): """ From a list of overlays if any overlap keep the longest. """ # Ovls know how to compare to each other. ovls = sorted(ovls) # I know this could be better but ovls wont be more than 50 or so. for i, s in enumerate(ovls): passing = True for l in ovls[i + 1:]: if s.start in Rng(l.start, l.end, rng=(True, True)) or \ s.end in Rng(l.start, l.end, rng=(True, True)): passing = False break if passing: yield s
python
def longest_overlap(ovls): """ From a list of overlays if any overlap keep the longest. """ # Ovls know how to compare to each other. ovls = sorted(ovls) # I know this could be better but ovls wont be more than 50 or so. for i, s in enumerate(ovls): passing = True for l in ovls[i + 1:]: if s.start in Rng(l.start, l.end, rng=(True, True)) or \ s.end in Rng(l.start, l.end, rng=(True, True)): passing = False break if passing: yield s
[ "def", "longest_overlap", "(", "ovls", ")", ":", "# Ovls know how to compare to each other.", "ovls", "=", "sorted", "(", "ovls", ")", "# I know this could be better but ovls wont be more than 50 or so.", "for", "i", ",", "s", "in", "enumerate", "(", "ovls", ")", ":", "passing", "=", "True", "for", "l", "in", "ovls", "[", "i", "+", "1", ":", "]", ":", "if", "s", ".", "start", "in", "Rng", "(", "l", ".", "start", ",", "l", ".", "end", ",", "rng", "=", "(", "True", ",", "True", ")", ")", "or", "s", ".", "end", "in", "Rng", "(", "l", ".", "start", ",", "l", ".", "end", ",", "rng", "=", "(", "True", ",", "True", ")", ")", ":", "passing", "=", "False", "break", "if", "passing", ":", "yield", "s" ]
From a list of overlays if any overlap keep the longest.
[ "From", "a", "list", "of", "overlays", "if", "any", "overlap", "keep", "the", "longest", "." ]
9ac362d6aef1ea41aff7375af088c6ebef93d0cd
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/dates.py#L81-L100
244,551
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/XMLSchema.py
GetSchema
def GetSchema(component): """convience function for finding the parent XMLSchema instance. """ parent = component while not isinstance(parent, XMLSchema): parent = parent._parent() return parent
python
def GetSchema(component): """convience function for finding the parent XMLSchema instance. """ parent = component while not isinstance(parent, XMLSchema): parent = parent._parent() return parent
[ "def", "GetSchema", "(", "component", ")", ":", "parent", "=", "component", "while", "not", "isinstance", "(", "parent", ",", "XMLSchema", ")", ":", "parent", "=", "parent", ".", "_parent", "(", ")", "return", "parent" ]
convience function for finding the parent XMLSchema instance.
[ "convience", "function", "for", "finding", "the", "parent", "XMLSchema", "instance", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/XMLSchema.py#L42-L48
244,552
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/XMLSchema.py
XMLSchemaComponent.getXMLNS
def getXMLNS(self, prefix=None): """deference prefix or by default xmlns, returns namespace. """ if prefix == XMLSchemaComponent.xml: return XMLNS.XML parent = self ns = self.attributes[XMLSchemaComponent.xmlns].get(prefix or\ XMLSchemaComponent.xmlns_key) while not ns: parent = parent._parent() ns = parent.attributes[XMLSchemaComponent.xmlns].get(prefix or\ XMLSchemaComponent.xmlns_key) if not ns and isinstance(parent, WSDLToolsAdapter): if prefix is None: return '' raise SchemaError, 'unknown prefix %s' %prefix return ns
python
def getXMLNS(self, prefix=None): """deference prefix or by default xmlns, returns namespace. """ if prefix == XMLSchemaComponent.xml: return XMLNS.XML parent = self ns = self.attributes[XMLSchemaComponent.xmlns].get(prefix or\ XMLSchemaComponent.xmlns_key) while not ns: parent = parent._parent() ns = parent.attributes[XMLSchemaComponent.xmlns].get(prefix or\ XMLSchemaComponent.xmlns_key) if not ns and isinstance(parent, WSDLToolsAdapter): if prefix is None: return '' raise SchemaError, 'unknown prefix %s' %prefix return ns
[ "def", "getXMLNS", "(", "self", ",", "prefix", "=", "None", ")", ":", "if", "prefix", "==", "XMLSchemaComponent", ".", "xml", ":", "return", "XMLNS", ".", "XML", "parent", "=", "self", "ns", "=", "self", ".", "attributes", "[", "XMLSchemaComponent", ".", "xmlns", "]", ".", "get", "(", "prefix", "or", "XMLSchemaComponent", ".", "xmlns_key", ")", "while", "not", "ns", ":", "parent", "=", "parent", ".", "_parent", "(", ")", "ns", "=", "parent", ".", "attributes", "[", "XMLSchemaComponent", ".", "xmlns", "]", ".", "get", "(", "prefix", "or", "XMLSchemaComponent", ".", "xmlns_key", ")", "if", "not", "ns", "and", "isinstance", "(", "parent", ",", "WSDLToolsAdapter", ")", ":", "if", "prefix", "is", "None", ":", "return", "''", "raise", "SchemaError", ",", "'unknown prefix %s'", "%", "prefix", "return", "ns" ]
deference prefix or by default xmlns, returns namespace.
[ "deference", "prefix", "or", "by", "default", "xmlns", "returns", "namespace", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/XMLSchema.py#L648-L664
244,553
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/XMLSchema.py
XMLSchemaComponent.getAttribute
def getAttribute(self, attribute): """return requested attribute value or None """ if type(attribute) in (list, tuple): if len(attribute) != 2: raise LookupError, 'To access attributes must use name or (namespace,name)' ns_dict = self.attributes.get(attribute[0]) if ns_dict is None: return None return ns_dict.get(attribute[1]) return self.attributes.get(attribute)
python
def getAttribute(self, attribute): """return requested attribute value or None """ if type(attribute) in (list, tuple): if len(attribute) != 2: raise LookupError, 'To access attributes must use name or (namespace,name)' ns_dict = self.attributes.get(attribute[0]) if ns_dict is None: return None return ns_dict.get(attribute[1]) return self.attributes.get(attribute)
[ "def", "getAttribute", "(", "self", ",", "attribute", ")", ":", "if", "type", "(", "attribute", ")", "in", "(", "list", ",", "tuple", ")", ":", "if", "len", "(", "attribute", ")", "!=", "2", ":", "raise", "LookupError", ",", "'To access attributes must use name or (namespace,name)'", "ns_dict", "=", "self", ".", "attributes", ".", "get", "(", "attribute", "[", "0", "]", ")", "if", "ns_dict", "is", "None", ":", "return", "None", "return", "ns_dict", ".", "get", "(", "attribute", "[", "1", "]", ")", "return", "self", ".", "attributes", ".", "get", "(", "attribute", ")" ]
return requested attribute value or None
[ "return", "requested", "attribute", "value", "or", "None" ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/XMLSchema.py#L666-L679
244,554
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/XMLSchema.py
XMLSchemaComponent.__setAttributeDefaults
def __setAttributeDefaults(self): """Looks for default values for unset attributes. If class variable representing attribute is None, then it must be defined as an instance variable. """ for k,v in self.__class__.attributes.items(): if v is not None and self.attributes.has_key(k) is False: if isinstance(v, types.FunctionType): self.attributes[k] = v(self) else: self.attributes[k] = v
python
def __setAttributeDefaults(self): """Looks for default values for unset attributes. If class variable representing attribute is None, then it must be defined as an instance variable. """ for k,v in self.__class__.attributes.items(): if v is not None and self.attributes.has_key(k) is False: if isinstance(v, types.FunctionType): self.attributes[k] = v(self) else: self.attributes[k] = v
[ "def", "__setAttributeDefaults", "(", "self", ")", ":", "for", "k", ",", "v", "in", "self", ".", "__class__", ".", "attributes", ".", "items", "(", ")", ":", "if", "v", "is", "not", "None", "and", "self", ".", "attributes", ".", "has_key", "(", "k", ")", "is", "False", ":", "if", "isinstance", "(", "v", ",", "types", ".", "FunctionType", ")", ":", "self", ".", "attributes", "[", "k", "]", "=", "v", "(", "self", ")", "else", ":", "self", ".", "attributes", "[", "k", "]", "=", "v" ]
Looks for default values for unset attributes. If class variable representing attribute is None, then it must be defined as an instance variable.
[ "Looks", "for", "default", "values", "for", "unset", "attributes", ".", "If", "class", "variable", "representing", "attribute", "is", "None", "then", "it", "must", "be", "defined", "as", "an", "instance", "variable", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/XMLSchema.py#L760-L770
244,555
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/XMLSchema.py
LocalElementDeclaration.isQualified
def isQualified(self): """ Local elements can be qualified or unqualifed according to the attribute form, or the elementFormDefault. By default local elements are unqualified. """ form = self.getAttribute('form') if form == 'qualified': return True if form == 'unqualified': return False raise SchemaError, 'Bad form (%s) for element: %s' %(form, self.getItemTrace())
python
def isQualified(self): """ Local elements can be qualified or unqualifed according to the attribute form, or the elementFormDefault. By default local elements are unqualified. """ form = self.getAttribute('form') if form == 'qualified': return True if form == 'unqualified': return False raise SchemaError, 'Bad form (%s) for element: %s' %(form, self.getItemTrace())
[ "def", "isQualified", "(", "self", ")", ":", "form", "=", "self", ".", "getAttribute", "(", "'form'", ")", "if", "form", "==", "'qualified'", ":", "return", "True", "if", "form", "==", "'unqualified'", ":", "return", "False", "raise", "SchemaError", ",", "'Bad form (%s) for element: %s'", "%", "(", "form", ",", "self", ".", "getItemTrace", "(", ")", ")" ]
Local elements can be qualified or unqualifed according to the attribute form, or the elementFormDefault. By default local elements are unqualified.
[ "Local", "elements", "can", "be", "qualified", "or", "unqualifed", "according", "to", "the", "attribute", "form", "or", "the", "elementFormDefault", ".", "By", "default", "local", "elements", "are", "unqualified", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/XMLSchema.py#L2019-L2030
244,556
dreipol/meta-tagger
meta_tagger/templatetags/meta_tagger_tags.py
render_title_tag
def render_title_tag(context, is_og=False): """ Returns the title as string or a complete open graph meta tag. """ request = context['request'] content = '' # Try to get the title from the context object (e.g. DetailViews). if context.get('object'): try: content = context['object'].get_meta_title() except AttributeError: pass elif context.get('meta_tagger'): content = context['meta_tagger'].get('title') if not content: # Try to get the title from the cms page. try: content = request.current_page.get_page_title() # Try the `page_title` before the `title of the CMS page. if not content: content = request.current_page.get_title() except (AttributeError, NoReverseMatch): pass if not is_og: return content else: return mark_safe('<meta property="og:title" content="{content}">'.format(content=content))
python
def render_title_tag(context, is_og=False): """ Returns the title as string or a complete open graph meta tag. """ request = context['request'] content = '' # Try to get the title from the context object (e.g. DetailViews). if context.get('object'): try: content = context['object'].get_meta_title() except AttributeError: pass elif context.get('meta_tagger'): content = context['meta_tagger'].get('title') if not content: # Try to get the title from the cms page. try: content = request.current_page.get_page_title() # Try the `page_title` before the `title of the CMS page. if not content: content = request.current_page.get_title() except (AttributeError, NoReverseMatch): pass if not is_og: return content else: return mark_safe('<meta property="og:title" content="{content}">'.format(content=content))
[ "def", "render_title_tag", "(", "context", ",", "is_og", "=", "False", ")", ":", "request", "=", "context", "[", "'request'", "]", "content", "=", "''", "# Try to get the title from the context object (e.g. DetailViews).", "if", "context", ".", "get", "(", "'object'", ")", ":", "try", ":", "content", "=", "context", "[", "'object'", "]", ".", "get_meta_title", "(", ")", "except", "AttributeError", ":", "pass", "elif", "context", ".", "get", "(", "'meta_tagger'", ")", ":", "content", "=", "context", "[", "'meta_tagger'", "]", ".", "get", "(", "'title'", ")", "if", "not", "content", ":", "# Try to get the title from the cms page.", "try", ":", "content", "=", "request", ".", "current_page", ".", "get_page_title", "(", ")", "# Try the `page_title` before the `title of the CMS page.", "if", "not", "content", ":", "content", "=", "request", ".", "current_page", ".", "get_title", "(", ")", "except", "(", "AttributeError", ",", "NoReverseMatch", ")", ":", "pass", "if", "not", "is_og", ":", "return", "content", "else", ":", "return", "mark_safe", "(", "'<meta property=\"og:title\" content=\"{content}\">'", ".", "format", "(", "content", "=", "content", ")", ")" ]
Returns the title as string or a complete open graph meta tag.
[ "Returns", "the", "title", "as", "string", "or", "a", "complete", "open", "graph", "meta", "tag", "." ]
e00a8c774d7b45882f068ba6d2706bb559de2b80
https://github.com/dreipol/meta-tagger/blob/e00a8c774d7b45882f068ba6d2706bb559de2b80/meta_tagger/templatetags/meta_tagger_tags.py#L29-L57
244,557
dreipol/meta-tagger
meta_tagger/templatetags/meta_tagger_tags.py
render_description_meta_tag
def render_description_meta_tag(context, is_og=False): """ Returns the description as meta or open graph tag. """ request = context['request'] content = '' # Try to get the description from the context object (e.g. DetailViews). if context.get('object'): try: content = context['object'].get_meta_description() except AttributeError: pass elif context.get('meta_tagger'): content = context['meta_tagger'].get('description') if not content: try: # Try for the meta description of the cms page. content = request.current_page.get_meta_description() except (AttributeError, NoReverseMatch): pass if content: return mark_safe('<meta {attr_name}="{tag_name}" content="{content}">'.format( attr_name='name' if not is_og else 'property', tag_name='description' if not is_og else 'og:description', content=content )) else: return ''
python
def render_description_meta_tag(context, is_og=False): """ Returns the description as meta or open graph tag. """ request = context['request'] content = '' # Try to get the description from the context object (e.g. DetailViews). if context.get('object'): try: content = context['object'].get_meta_description() except AttributeError: pass elif context.get('meta_tagger'): content = context['meta_tagger'].get('description') if not content: try: # Try for the meta description of the cms page. content = request.current_page.get_meta_description() except (AttributeError, NoReverseMatch): pass if content: return mark_safe('<meta {attr_name}="{tag_name}" content="{content}">'.format( attr_name='name' if not is_og else 'property', tag_name='description' if not is_og else 'og:description', content=content )) else: return ''
[ "def", "render_description_meta_tag", "(", "context", ",", "is_og", "=", "False", ")", ":", "request", "=", "context", "[", "'request'", "]", "content", "=", "''", "# Try to get the description from the context object (e.g. DetailViews).", "if", "context", ".", "get", "(", "'object'", ")", ":", "try", ":", "content", "=", "context", "[", "'object'", "]", ".", "get_meta_description", "(", ")", "except", "AttributeError", ":", "pass", "elif", "context", ".", "get", "(", "'meta_tagger'", ")", ":", "content", "=", "context", "[", "'meta_tagger'", "]", ".", "get", "(", "'description'", ")", "if", "not", "content", ":", "try", ":", "# Try for the meta description of the cms page.", "content", "=", "request", ".", "current_page", ".", "get_meta_description", "(", ")", "except", "(", "AttributeError", ",", "NoReverseMatch", ")", ":", "pass", "if", "content", ":", "return", "mark_safe", "(", "'<meta {attr_name}=\"{tag_name}\" content=\"{content}\">'", ".", "format", "(", "attr_name", "=", "'name'", "if", "not", "is_og", "else", "'property'", ",", "tag_name", "=", "'description'", "if", "not", "is_og", "else", "'og:description'", ",", "content", "=", "content", ")", ")", "else", ":", "return", "''" ]
Returns the description as meta or open graph tag.
[ "Returns", "the", "description", "as", "meta", "or", "open", "graph", "tag", "." ]
e00a8c774d7b45882f068ba6d2706bb559de2b80
https://github.com/dreipol/meta-tagger/blob/e00a8c774d7b45882f068ba6d2706bb559de2b80/meta_tagger/templatetags/meta_tagger_tags.py#L61-L91
244,558
dreipol/meta-tagger
meta_tagger/templatetags/meta_tagger_tags.py
render_robots_meta_tag
def render_robots_meta_tag(context): """ Returns the robots meta tag. """ request = context['request'] robots_indexing = None robots_following = None # Prevent indexing any unwanted domains (e.g. staging). if context.request.get_host() in settings.META_TAGGER_ROBOTS_DOMAIN_WHITELIST: # Try to get the title from the context object (e.g. DetailViews). if context.get('object'): try: robots_indexing = context['object'].get_robots_indexing() robots_following = context['object'].get_robots_following() except AttributeError: pass elif context.get('meta_tagger'): robots_indexing = context['meta_tagger'].get('robots_indexing', robots_indexing) robots_following = context['meta_tagger'].get('robots_following', robots_following) # Try fetching the robots values of the cms page. if robots_indexing is None: try: robots_indexing = request.current_page.metatagpageextension.robots_indexing except (AttributeError, NoReverseMatch, MetaTagPageExtension.DoesNotExist): robots_indexing = True if robots_following is None: try: robots_following = request.current_page.metatagpageextension.robots_following except (AttributeError, NoReverseMatch, MetaTagPageExtension.DoesNotExist): robots_following = True return mark_safe('<meta name="robots" content="{robots_indexing}, {robots_following}">'.format( robots_indexing='index' if robots_indexing else 'noindex', robots_following='follow' if robots_following else 'nofollow' ))
python
def render_robots_meta_tag(context): """ Returns the robots meta tag. """ request = context['request'] robots_indexing = None robots_following = None # Prevent indexing any unwanted domains (e.g. staging). if context.request.get_host() in settings.META_TAGGER_ROBOTS_DOMAIN_WHITELIST: # Try to get the title from the context object (e.g. DetailViews). if context.get('object'): try: robots_indexing = context['object'].get_robots_indexing() robots_following = context['object'].get_robots_following() except AttributeError: pass elif context.get('meta_tagger'): robots_indexing = context['meta_tagger'].get('robots_indexing', robots_indexing) robots_following = context['meta_tagger'].get('robots_following', robots_following) # Try fetching the robots values of the cms page. if robots_indexing is None: try: robots_indexing = request.current_page.metatagpageextension.robots_indexing except (AttributeError, NoReverseMatch, MetaTagPageExtension.DoesNotExist): robots_indexing = True if robots_following is None: try: robots_following = request.current_page.metatagpageextension.robots_following except (AttributeError, NoReverseMatch, MetaTagPageExtension.DoesNotExist): robots_following = True return mark_safe('<meta name="robots" content="{robots_indexing}, {robots_following}">'.format( robots_indexing='index' if robots_indexing else 'noindex', robots_following='follow' if robots_following else 'nofollow' ))
[ "def", "render_robots_meta_tag", "(", "context", ")", ":", "request", "=", "context", "[", "'request'", "]", "robots_indexing", "=", "None", "robots_following", "=", "None", "# Prevent indexing any unwanted domains (e.g. staging).", "if", "context", ".", "request", ".", "get_host", "(", ")", "in", "settings", ".", "META_TAGGER_ROBOTS_DOMAIN_WHITELIST", ":", "# Try to get the title from the context object (e.g. DetailViews).", "if", "context", ".", "get", "(", "'object'", ")", ":", "try", ":", "robots_indexing", "=", "context", "[", "'object'", "]", ".", "get_robots_indexing", "(", ")", "robots_following", "=", "context", "[", "'object'", "]", ".", "get_robots_following", "(", ")", "except", "AttributeError", ":", "pass", "elif", "context", ".", "get", "(", "'meta_tagger'", ")", ":", "robots_indexing", "=", "context", "[", "'meta_tagger'", "]", ".", "get", "(", "'robots_indexing'", ",", "robots_indexing", ")", "robots_following", "=", "context", "[", "'meta_tagger'", "]", ".", "get", "(", "'robots_following'", ",", "robots_following", ")", "# Try fetching the robots values of the cms page.", "if", "robots_indexing", "is", "None", ":", "try", ":", "robots_indexing", "=", "request", ".", "current_page", ".", "metatagpageextension", ".", "robots_indexing", "except", "(", "AttributeError", ",", "NoReverseMatch", ",", "MetaTagPageExtension", ".", "DoesNotExist", ")", ":", "robots_indexing", "=", "True", "if", "robots_following", "is", "None", ":", "try", ":", "robots_following", "=", "request", ".", "current_page", ".", "metatagpageextension", ".", "robots_following", "except", "(", "AttributeError", ",", "NoReverseMatch", ",", "MetaTagPageExtension", ".", "DoesNotExist", ")", ":", "robots_following", "=", "True", "return", "mark_safe", "(", "'<meta name=\"robots\" content=\"{robots_indexing}, {robots_following}\">'", ".", "format", "(", "robots_indexing", "=", "'index'", "if", "robots_indexing", "else", "'noindex'", ",", "robots_following", "=", "'follow'", "if", "robots_following", "else", "'nofollow'", ")", ")" ]
Returns the robots meta tag.
[ "Returns", "the", "robots", "meta", "tag", "." ]
e00a8c774d7b45882f068ba6d2706bb559de2b80
https://github.com/dreipol/meta-tagger/blob/e00a8c774d7b45882f068ba6d2706bb559de2b80/meta_tagger/templatetags/meta_tagger_tags.py#L95-L133
244,559
collectiveacuity/labPack
labpack/storage/aws/s3.py
_s3Client.list_buckets
def list_buckets(self): ''' a method to retrieve a list of buckets on s3 :return: list of buckets ''' title = '%s.list_buckets' % self.__class__.__name__ bucket_list = [] # send request to s3 try: response = self.connection.list_buckets() except: raise AWSConnectionError(title) # create list from response for bucket in response['Buckets']: bucket_list.append(bucket['Name']) self.bucket_list = bucket_list return self.bucket_list
python
def list_buckets(self): ''' a method to retrieve a list of buckets on s3 :return: list of buckets ''' title = '%s.list_buckets' % self.__class__.__name__ bucket_list = [] # send request to s3 try: response = self.connection.list_buckets() except: raise AWSConnectionError(title) # create list from response for bucket in response['Buckets']: bucket_list.append(bucket['Name']) self.bucket_list = bucket_list return self.bucket_list
[ "def", "list_buckets", "(", "self", ")", ":", "title", "=", "'%s.list_buckets'", "%", "self", ".", "__class__", ".", "__name__", "bucket_list", "=", "[", "]", "# send request to s3", "try", ":", "response", "=", "self", ".", "connection", ".", "list_buckets", "(", ")", "except", ":", "raise", "AWSConnectionError", "(", "title", ")", "# create list from response", "for", "bucket", "in", "response", "[", "'Buckets'", "]", ":", "bucket_list", ".", "append", "(", "bucket", "[", "'Name'", "]", ")", "self", ".", "bucket_list", "=", "bucket_list", "return", "self", ".", "bucket_list" ]
a method to retrieve a list of buckets on s3 :return: list of buckets
[ "a", "method", "to", "retrieve", "a", "list", "of", "buckets", "on", "s3" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/aws/s3.py#L87-L111
244,560
collectiveacuity/labPack
labpack/storage/aws/s3.py
_s3Client.delete_bucket
def delete_bucket(self, bucket_name): ''' a method to delete a bucket in s3 and all its contents :param bucket_name: string with name of bucket :return: string with status of method ''' title = '%s.delete_bucket' % self.__class__.__name__ # validate inputs input_fields = { 'bucket_name': bucket_name } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # check for existence of bucket if not bucket_name in self.bucket_list: if not bucket_name in self.list_buckets(): status_msg = 'S3 bucket "%s" does not exist.' % bucket_name self.iam.printer(status_msg) return status_msg # retrieve list of records in bucket record_keys = [] record_list, next_key = self.list_versions(bucket_name) for record in record_list: details = { 'Key': record['key'], 'VersionId': record['version_id'] } record_keys.append(details) # delete records in bucket kw_args = { 'Bucket': bucket_name, 'Delete': { 'Objects': record_keys } } if record_keys: try: response = self.connection.delete_objects(**kw_args) except: raise AWSConnectionError(title) # continue deleting objects in bucket until empty if next_key: while next_key: record_keys = [] record_list, next_key = self.list_versions(bucket_name, starting_key=next_key['key'], starting_version=next_key['version_id']) for record in record_list: details = { 'Key': record['key'], 'VersionId': record['version_id'] } record_keys.append(details) kw_args = { 'Bucket': bucket_name, 'Delete': { 'Objects': record_keys } } try: response = self.connection.delete_objects(**kw_args) except: raise AWSConnectionError(title) # send delete bucket request try: self.connection.delete_bucket( Bucket=bucket_name ) except: raise AWSConnectionError(title) # report result and return true status_msg = 'S3 bucket "%s" deleted.' % bucket_name self.iam.printer(status_msg) return status_msg
python
def delete_bucket(self, bucket_name): ''' a method to delete a bucket in s3 and all its contents :param bucket_name: string with name of bucket :return: string with status of method ''' title = '%s.delete_bucket' % self.__class__.__name__ # validate inputs input_fields = { 'bucket_name': bucket_name } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # check for existence of bucket if not bucket_name in self.bucket_list: if not bucket_name in self.list_buckets(): status_msg = 'S3 bucket "%s" does not exist.' % bucket_name self.iam.printer(status_msg) return status_msg # retrieve list of records in bucket record_keys = [] record_list, next_key = self.list_versions(bucket_name) for record in record_list: details = { 'Key': record['key'], 'VersionId': record['version_id'] } record_keys.append(details) # delete records in bucket kw_args = { 'Bucket': bucket_name, 'Delete': { 'Objects': record_keys } } if record_keys: try: response = self.connection.delete_objects(**kw_args) except: raise AWSConnectionError(title) # continue deleting objects in bucket until empty if next_key: while next_key: record_keys = [] record_list, next_key = self.list_versions(bucket_name, starting_key=next_key['key'], starting_version=next_key['version_id']) for record in record_list: details = { 'Key': record['key'], 'VersionId': record['version_id'] } record_keys.append(details) kw_args = { 'Bucket': bucket_name, 'Delete': { 'Objects': record_keys } } try: response = self.connection.delete_objects(**kw_args) except: raise AWSConnectionError(title) # send delete bucket request try: self.connection.delete_bucket( Bucket=bucket_name ) except: raise AWSConnectionError(title) # report result and return true status_msg = 'S3 bucket "%s" deleted.' % bucket_name self.iam.printer(status_msg) return status_msg
[ "def", "delete_bucket", "(", "self", ",", "bucket_name", ")", ":", "title", "=", "'%s.delete_bucket'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'bucket_name'", ":", "bucket_name", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# check for existence of bucket", "if", "not", "bucket_name", "in", "self", ".", "bucket_list", ":", "if", "not", "bucket_name", "in", "self", ".", "list_buckets", "(", ")", ":", "status_msg", "=", "'S3 bucket \"%s\" does not exist.'", "%", "bucket_name", "self", ".", "iam", ".", "printer", "(", "status_msg", ")", "return", "status_msg", "# retrieve list of records in bucket", "record_keys", "=", "[", "]", "record_list", ",", "next_key", "=", "self", ".", "list_versions", "(", "bucket_name", ")", "for", "record", "in", "record_list", ":", "details", "=", "{", "'Key'", ":", "record", "[", "'key'", "]", ",", "'VersionId'", ":", "record", "[", "'version_id'", "]", "}", "record_keys", ".", "append", "(", "details", ")", "# delete records in bucket", "kw_args", "=", "{", "'Bucket'", ":", "bucket_name", ",", "'Delete'", ":", "{", "'Objects'", ":", "record_keys", "}", "}", "if", "record_keys", ":", "try", ":", "response", "=", "self", ".", "connection", ".", "delete_objects", "(", "*", "*", "kw_args", ")", "except", ":", "raise", "AWSConnectionError", "(", "title", ")", "# continue deleting objects in bucket until empty", "if", "next_key", ":", "while", "next_key", ":", "record_keys", "=", "[", "]", "record_list", ",", "next_key", "=", "self", ".", "list_versions", "(", "bucket_name", ",", "starting_key", "=", "next_key", "[", "'key'", "]", ",", "starting_version", "=", "next_key", "[", "'version_id'", "]", ")", "for", "record", "in", "record_list", ":", "details", "=", "{", "'Key'", ":", "record", "[", "'key'", "]", ",", "'VersionId'", ":", "record", "[", "'version_id'", "]", "}", "record_keys", ".", "append", "(", "details", ")", "kw_args", "=", "{", "'Bucket'", ":", "bucket_name", ",", "'Delete'", ":", "{", "'Objects'", ":", "record_keys", "}", "}", "try", ":", "response", "=", "self", ".", "connection", ".", "delete_objects", "(", "*", "*", "kw_args", ")", "except", ":", "raise", "AWSConnectionError", "(", "title", ")", "# send delete bucket request", "try", ":", "self", ".", "connection", ".", "delete_bucket", "(", "Bucket", "=", "bucket_name", ")", "except", ":", "raise", "AWSConnectionError", "(", "title", ")", "# report result and return true", "status_msg", "=", "'S3 bucket \"%s\" deleted.'", "%", "bucket_name", "self", ".", "iam", ".", "printer", "(", "status_msg", ")", "return", "status_msg" ]
a method to delete a bucket in s3 and all its contents :param bucket_name: string with name of bucket :return: string with status of method
[ "a", "method", "to", "delete", "a", "bucket", "in", "s3", "and", "all", "its", "contents" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/aws/s3.py#L790-L867
244,561
collectiveacuity/labPack
labpack/storage/aws/s3.py
_s3Client.read_headers
def read_headers(self, bucket_name, record_key, record_version='', version_check=False): ''' a method for retrieving the headers of a record from s3 :param bucket_name: string with name of bucket :param record_key: string with key value of record :param record_version: [optional] string with aws id of version of record :param version_check: [optional] boolean to enable current version check :return: dictionary with headers of record ''' title = '%s.read_headers' % self.__class__.__name__ from datetime import datetime from dateutil.tz import tzutc # validate inputs input_fields = { 'bucket_name': bucket_name, 'record_key': record_key, 'record_version': record_version } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # verify existence of bucket if not bucket_name in self.bucket_list: if not bucket_name in self.list_buckets(): raise ValueError('S3 bucket "%s" does not exist in aws region %s.' % (bucket_name, self.iam.region_name)) # create key word argument dictionary headers_kwargs = { 'Bucket': bucket_name, 'Key': record_key } if record_version: headers_kwargs['VersionId'] = record_version # create metadata default metadata_details = {} # send request for record header try: record = self.connection.head_object(**headers_kwargs) except Exception as err: try: import requests requests.get('https://www.google.com') return metadata_details except: raise AWSConnectionError(title, captured_error=err) # create metadata from response metadata_details = { 'key': record_key, 'version_id': '', 'current_version': True, 'content_type': '', 'content_encoding': '', 'metadata': {} } metadata_details = self.iam.ingest(record, metadata_details) epoch_zero = datetime.fromtimestamp(0).replace(tzinfo=tzutc()) metadata_details['last_modified'] = (metadata_details['last_modified'] - epoch_zero).total_seconds() if 'response_metadata' in metadata_details.keys(): del metadata_details['response_metadata'] # determine current version from version id if record_version and version_check: version_kwargs = { 'Bucket': bucket_name, 'Prefix': record_key } try: version_check = self.connection.list_object_versions(**version_kwargs) for version in version_check['Versions']: if version['VersionId'] == metadata_details['version_id']: metadata_details['current_version'] = version['IsLatest'] break except: raise AWSConnectionError(title) return metadata_details
python
def read_headers(self, bucket_name, record_key, record_version='', version_check=False): ''' a method for retrieving the headers of a record from s3 :param bucket_name: string with name of bucket :param record_key: string with key value of record :param record_version: [optional] string with aws id of version of record :param version_check: [optional] boolean to enable current version check :return: dictionary with headers of record ''' title = '%s.read_headers' % self.__class__.__name__ from datetime import datetime from dateutil.tz import tzutc # validate inputs input_fields = { 'bucket_name': bucket_name, 'record_key': record_key, 'record_version': record_version } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # verify existence of bucket if not bucket_name in self.bucket_list: if not bucket_name in self.list_buckets(): raise ValueError('S3 bucket "%s" does not exist in aws region %s.' % (bucket_name, self.iam.region_name)) # create key word argument dictionary headers_kwargs = { 'Bucket': bucket_name, 'Key': record_key } if record_version: headers_kwargs['VersionId'] = record_version # create metadata default metadata_details = {} # send request for record header try: record = self.connection.head_object(**headers_kwargs) except Exception as err: try: import requests requests.get('https://www.google.com') return metadata_details except: raise AWSConnectionError(title, captured_error=err) # create metadata from response metadata_details = { 'key': record_key, 'version_id': '', 'current_version': True, 'content_type': '', 'content_encoding': '', 'metadata': {} } metadata_details = self.iam.ingest(record, metadata_details) epoch_zero = datetime.fromtimestamp(0).replace(tzinfo=tzutc()) metadata_details['last_modified'] = (metadata_details['last_modified'] - epoch_zero).total_seconds() if 'response_metadata' in metadata_details.keys(): del metadata_details['response_metadata'] # determine current version from version id if record_version and version_check: version_kwargs = { 'Bucket': bucket_name, 'Prefix': record_key } try: version_check = self.connection.list_object_versions(**version_kwargs) for version in version_check['Versions']: if version['VersionId'] == metadata_details['version_id']: metadata_details['current_version'] = version['IsLatest'] break except: raise AWSConnectionError(title) return metadata_details
[ "def", "read_headers", "(", "self", ",", "bucket_name", ",", "record_key", ",", "record_version", "=", "''", ",", "version_check", "=", "False", ")", ":", "title", "=", "'%s.read_headers'", "%", "self", ".", "__class__", ".", "__name__", "from", "datetime", "import", "datetime", "from", "dateutil", ".", "tz", "import", "tzutc", "# validate inputs", "input_fields", "=", "{", "'bucket_name'", ":", "bucket_name", ",", "'record_key'", ":", "record_key", ",", "'record_version'", ":", "record_version", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# verify existence of bucket", "if", "not", "bucket_name", "in", "self", ".", "bucket_list", ":", "if", "not", "bucket_name", "in", "self", ".", "list_buckets", "(", ")", ":", "raise", "ValueError", "(", "'S3 bucket \"%s\" does not exist in aws region %s.'", "%", "(", "bucket_name", ",", "self", ".", "iam", ".", "region_name", ")", ")", "# create key word argument dictionary", "headers_kwargs", "=", "{", "'Bucket'", ":", "bucket_name", ",", "'Key'", ":", "record_key", "}", "if", "record_version", ":", "headers_kwargs", "[", "'VersionId'", "]", "=", "record_version", "# create metadata default", "metadata_details", "=", "{", "}", "# send request for record header", "try", ":", "record", "=", "self", ".", "connection", ".", "head_object", "(", "*", "*", "headers_kwargs", ")", "except", "Exception", "as", "err", ":", "try", ":", "import", "requests", "requests", ".", "get", "(", "'https://www.google.com'", ")", "return", "metadata_details", "except", ":", "raise", "AWSConnectionError", "(", "title", ",", "captured_error", "=", "err", ")", "# create metadata from response", "metadata_details", "=", "{", "'key'", ":", "record_key", ",", "'version_id'", ":", "''", ",", "'current_version'", ":", "True", ",", "'content_type'", ":", "''", ",", "'content_encoding'", ":", "''", ",", "'metadata'", ":", "{", "}", "}", "metadata_details", "=", "self", ".", "iam", ".", "ingest", "(", "record", ",", "metadata_details", ")", "epoch_zero", "=", "datetime", ".", "fromtimestamp", "(", "0", ")", ".", "replace", "(", "tzinfo", "=", "tzutc", "(", ")", ")", "metadata_details", "[", "'last_modified'", "]", "=", "(", "metadata_details", "[", "'last_modified'", "]", "-", "epoch_zero", ")", ".", "total_seconds", "(", ")", "if", "'response_metadata'", "in", "metadata_details", ".", "keys", "(", ")", ":", "del", "metadata_details", "[", "'response_metadata'", "]", "# determine current version from version id", "if", "record_version", "and", "version_check", ":", "version_kwargs", "=", "{", "'Bucket'", ":", "bucket_name", ",", "'Prefix'", ":", "record_key", "}", "try", ":", "version_check", "=", "self", ".", "connection", ".", "list_object_versions", "(", "*", "*", "version_kwargs", ")", "for", "version", "in", "version_check", "[", "'Versions'", "]", ":", "if", "version", "[", "'VersionId'", "]", "==", "metadata_details", "[", "'version_id'", "]", ":", "metadata_details", "[", "'current_version'", "]", "=", "version", "[", "'IsLatest'", "]", "break", "except", ":", "raise", "AWSConnectionError", "(", "title", ")", "return", "metadata_details" ]
a method for retrieving the headers of a record from s3 :param bucket_name: string with name of bucket :param record_key: string with key value of record :param record_version: [optional] string with aws id of version of record :param version_check: [optional] boolean to enable current version check :return: dictionary with headers of record
[ "a", "method", "for", "retrieving", "the", "headers", "of", "a", "record", "from", "s3" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/aws/s3.py#L1167-L1252
244,562
collectiveacuity/labPack
labpack/storage/aws/s3.py
_s3Client.delete_record
def delete_record(self, bucket_name, record_key, record_version=''): ''' a method for deleting an object record in s3 :param bucket_name: string with name of bucket :param record_key: string with key value of record :param record_version: [optional] string with aws id of version of record :return: dictionary with status of delete request ''' title = '%s.delete_record' % self.__class__.__name__ # validate inputs input_fields = { 'bucket_name': bucket_name, 'record_key': record_key, 'record_version': record_version } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # verify existence of bucket if not bucket_name in self.bucket_list: if not bucket_name in self.list_buckets(): raise ValueError('S3 bucket "%s" does not exist in aws region %s.' % (bucket_name, self.iam.region_name)) # create key word argument dictionary delete_kwargs = { 'Bucket': bucket_name, 'Key': record_key } if record_version: delete_kwargs['VersionId'] = record_version # send request to delete record try: response = self.connection.delete_object(**delete_kwargs) except: raise AWSConnectionError(title) # report status response_details = { 'version_id': '' } response_details = self.iam.ingest(response, response_details) if 'response_metadata' in response_details.keys(): del response_details['response_metadata'] return response_details
python
def delete_record(self, bucket_name, record_key, record_version=''): ''' a method for deleting an object record in s3 :param bucket_name: string with name of bucket :param record_key: string with key value of record :param record_version: [optional] string with aws id of version of record :return: dictionary with status of delete request ''' title = '%s.delete_record' % self.__class__.__name__ # validate inputs input_fields = { 'bucket_name': bucket_name, 'record_key': record_key, 'record_version': record_version } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # verify existence of bucket if not bucket_name in self.bucket_list: if not bucket_name in self.list_buckets(): raise ValueError('S3 bucket "%s" does not exist in aws region %s.' % (bucket_name, self.iam.region_name)) # create key word argument dictionary delete_kwargs = { 'Bucket': bucket_name, 'Key': record_key } if record_version: delete_kwargs['VersionId'] = record_version # send request to delete record try: response = self.connection.delete_object(**delete_kwargs) except: raise AWSConnectionError(title) # report status response_details = { 'version_id': '' } response_details = self.iam.ingest(response, response_details) if 'response_metadata' in response_details.keys(): del response_details['response_metadata'] return response_details
[ "def", "delete_record", "(", "self", ",", "bucket_name", ",", "record_key", ",", "record_version", "=", "''", ")", ":", "title", "=", "'%s.delete_record'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'bucket_name'", ":", "bucket_name", ",", "'record_key'", ":", "record_key", ",", "'record_version'", ":", "record_version", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# verify existence of bucket", "if", "not", "bucket_name", "in", "self", ".", "bucket_list", ":", "if", "not", "bucket_name", "in", "self", ".", "list_buckets", "(", ")", ":", "raise", "ValueError", "(", "'S3 bucket \"%s\" does not exist in aws region %s.'", "%", "(", "bucket_name", ",", "self", ".", "iam", ".", "region_name", ")", ")", "# create key word argument dictionary", "delete_kwargs", "=", "{", "'Bucket'", ":", "bucket_name", ",", "'Key'", ":", "record_key", "}", "if", "record_version", ":", "delete_kwargs", "[", "'VersionId'", "]", "=", "record_version", "# send request to delete record", "try", ":", "response", "=", "self", ".", "connection", ".", "delete_object", "(", "*", "*", "delete_kwargs", ")", "except", ":", "raise", "AWSConnectionError", "(", "title", ")", "# report status", "response_details", "=", "{", "'version_id'", ":", "''", "}", "response_details", "=", "self", ".", "iam", ".", "ingest", "(", "response", ",", "response_details", ")", "if", "'response_metadata'", "in", "response_details", ".", "keys", "(", ")", ":", "del", "response_details", "[", "'response_metadata'", "]", "return", "response_details" ]
a method for deleting an object record in s3 :param bucket_name: string with name of bucket :param record_key: string with key value of record :param record_version: [optional] string with aws id of version of record :return: dictionary with status of delete request
[ "a", "method", "for", "deleting", "an", "object", "record", "in", "s3" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/aws/s3.py#L1345-L1396
244,563
collectiveacuity/labPack
labpack/storage/aws/s3.py
_s3Client.import_records
def import_records(self, bucket_name, import_path='', overwrite=True): ''' a method to importing records from local files to a bucket :param bucket_name: string with name of bucket :param export_path: [optional] string with path to root directory of files :param overwrite: [optional] boolean to overwrite existing files matching records :return: True ''' title = '%s.import_records' % self.__class__.__name__ # validate inputs input_fields = { 'bucket_name': bucket_name, 'import_path': import_path } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # validate path from os import path if not import_path: import_path = './' if not path.exists(import_path): raise ValueError('%s(import_path="%s") is not a valid path.' % (title, import_path)) elif not path.isdir(import_path): raise ValueError('%s(import_path="%s") must be a directory.' % (title, import_path)) # verify existence of bucket if not bucket_name in self.bucket_list: if not bucket_name in self.list_buckets(): raise ValueError('S3 bucket "%s" does not exist in aws region %s.' % (bucket_name, self.iam.region_name)) # create records from walk of local path self.iam.printer('Importing records from path "%s" to bucket "%s".' % (import_path, bucket_name), flush=True) from labpack.platforms.localhost import localhostClient localhost_client = localhostClient() import_path = path.abspath(import_path) for file_path in localhost_client.walk(import_path): relative_path = path.relpath(file_path, import_path) try: byte_data = open(file_path, 'rb').read() self.create_record(bucket_name, relative_path, byte_data, overwrite=overwrite) self.iam.printer('.', flush=True) except ValueError as err: if str(err).find('already contains') > -1: self.iam.printer('.\n%s already exists. Record skipped. Continuing.' % relative_path, flush=True) else: raise except: raise AWSConnectionError(title) # report completion and return true self.iam.printer(' done.') return True
python
def import_records(self, bucket_name, import_path='', overwrite=True): ''' a method to importing records from local files to a bucket :param bucket_name: string with name of bucket :param export_path: [optional] string with path to root directory of files :param overwrite: [optional] boolean to overwrite existing files matching records :return: True ''' title = '%s.import_records' % self.__class__.__name__ # validate inputs input_fields = { 'bucket_name': bucket_name, 'import_path': import_path } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # validate path from os import path if not import_path: import_path = './' if not path.exists(import_path): raise ValueError('%s(import_path="%s") is not a valid path.' % (title, import_path)) elif not path.isdir(import_path): raise ValueError('%s(import_path="%s") must be a directory.' % (title, import_path)) # verify existence of bucket if not bucket_name in self.bucket_list: if not bucket_name in self.list_buckets(): raise ValueError('S3 bucket "%s" does not exist in aws region %s.' % (bucket_name, self.iam.region_name)) # create records from walk of local path self.iam.printer('Importing records from path "%s" to bucket "%s".' % (import_path, bucket_name), flush=True) from labpack.platforms.localhost import localhostClient localhost_client = localhostClient() import_path = path.abspath(import_path) for file_path in localhost_client.walk(import_path): relative_path = path.relpath(file_path, import_path) try: byte_data = open(file_path, 'rb').read() self.create_record(bucket_name, relative_path, byte_data, overwrite=overwrite) self.iam.printer('.', flush=True) except ValueError as err: if str(err).find('already contains') > -1: self.iam.printer('.\n%s already exists. Record skipped. Continuing.' % relative_path, flush=True) else: raise except: raise AWSConnectionError(title) # report completion and return true self.iam.printer(' done.') return True
[ "def", "import_records", "(", "self", ",", "bucket_name", ",", "import_path", "=", "''", ",", "overwrite", "=", "True", ")", ":", "title", "=", "'%s.import_records'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'bucket_name'", ":", "bucket_name", ",", "'import_path'", ":", "import_path", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# validate path", "from", "os", "import", "path", "if", "not", "import_path", ":", "import_path", "=", "'./'", "if", "not", "path", ".", "exists", "(", "import_path", ")", ":", "raise", "ValueError", "(", "'%s(import_path=\"%s\") is not a valid path.'", "%", "(", "title", ",", "import_path", ")", ")", "elif", "not", "path", ".", "isdir", "(", "import_path", ")", ":", "raise", "ValueError", "(", "'%s(import_path=\"%s\") must be a directory.'", "%", "(", "title", ",", "import_path", ")", ")", "# verify existence of bucket", "if", "not", "bucket_name", "in", "self", ".", "bucket_list", ":", "if", "not", "bucket_name", "in", "self", ".", "list_buckets", "(", ")", ":", "raise", "ValueError", "(", "'S3 bucket \"%s\" does not exist in aws region %s.'", "%", "(", "bucket_name", ",", "self", ".", "iam", ".", "region_name", ")", ")", "# create records from walk of local path", "self", ".", "iam", ".", "printer", "(", "'Importing records from path \"%s\" to bucket \"%s\".'", "%", "(", "import_path", ",", "bucket_name", ")", ",", "flush", "=", "True", ")", "from", "labpack", ".", "platforms", ".", "localhost", "import", "localhostClient", "localhost_client", "=", "localhostClient", "(", ")", "import_path", "=", "path", ".", "abspath", "(", "import_path", ")", "for", "file_path", "in", "localhost_client", ".", "walk", "(", "import_path", ")", ":", "relative_path", "=", "path", ".", "relpath", "(", "file_path", ",", "import_path", ")", "try", ":", "byte_data", "=", "open", "(", "file_path", ",", "'rb'", ")", ".", "read", "(", ")", "self", ".", "create_record", "(", "bucket_name", ",", "relative_path", ",", "byte_data", ",", "overwrite", "=", "overwrite", ")", "self", ".", "iam", ".", "printer", "(", "'.'", ",", "flush", "=", "True", ")", "except", "ValueError", "as", "err", ":", "if", "str", "(", "err", ")", ".", "find", "(", "'already contains'", ")", ">", "-", "1", ":", "self", ".", "iam", ".", "printer", "(", "'.\\n%s already exists. Record skipped. Continuing.'", "%", "relative_path", ",", "flush", "=", "True", ")", "else", ":", "raise", "except", ":", "raise", "AWSConnectionError", "(", "title", ")", "# report completion and return true", "self", ".", "iam", ".", "printer", "(", "' done.'", ")", "return", "True" ]
a method to importing records from local files to a bucket :param bucket_name: string with name of bucket :param export_path: [optional] string with path to root directory of files :param overwrite: [optional] boolean to overwrite existing files matching records :return: True
[ "a", "method", "to", "importing", "records", "from", "local", "files", "to", "a", "bucket" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/aws/s3.py#L1490-L1548
244,564
collectiveacuity/labPack
labpack/storage/aws/s3.py
s3Client.save
def save(self, record_key, record_data, overwrite=True, secret_key=''): ''' a method to create a file in the collection folder on S3 :param record_key: string with name to assign to record (see NOTES below) :param record_data: byte data for record body :param overwrite: [optional] boolean to overwrite records with same name :param secret_key: [optional] string with key to encrypt data :return: string with name of record NOTE: record_key may only contain alphanumeric, /, _, . or - characters and may not begin with the . or / character. NOTE: using one or more / characters splits the key into separate segments. these segments will appear as a sub directories inside the record collection and each segment is used as a separate index for that record when using the list method eg. lab/unittests/1473719695.2165067.json is indexed: [ 'lab', 'unittests', '1473719695.2165067', '.json' ] ''' title = '%s.save' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key, 'secret_key': secret_key } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # validate byte data if not isinstance(record_data, bytes): raise ValueError('%s(record_data=b"...") must be byte data.' % title) # encrypt data if secret_key: from labpack.encryption import cryptolab record_data, secret_key = cryptolab.encrypt(record_data, secret_key) # define keyword arguments from time import time create_kwargs = { 'bucket_name': self.bucket_name, 'record_key': record_key, 'record_data': record_data, 'overwrite': overwrite, 'record_metadata': { 'last_modified': str(time()) } } # add encryption metadata if secret_key: create_kwargs['record_metadata']['encryption'] = 'lab512' # add record mimetype and encoding import mimetypes guess_mimetype, guess_encoding = mimetypes.guess_type(record_key) if not guess_mimetype: if record_key.find('.yaml') or record_key.find('.yml'): guess_mimetype = 'application/x-yaml' if record_key.find('.drep'): guess_mimetype = 'application/x-drep' if guess_mimetype: create_kwargs['record_mimetype'] = guess_mimetype if guess_encoding: create_kwargs['record_encoding'] = guess_encoding # create record self.s3.create_record(**create_kwargs) return record_key
python
def save(self, record_key, record_data, overwrite=True, secret_key=''): ''' a method to create a file in the collection folder on S3 :param record_key: string with name to assign to record (see NOTES below) :param record_data: byte data for record body :param overwrite: [optional] boolean to overwrite records with same name :param secret_key: [optional] string with key to encrypt data :return: string with name of record NOTE: record_key may only contain alphanumeric, /, _, . or - characters and may not begin with the . or / character. NOTE: using one or more / characters splits the key into separate segments. these segments will appear as a sub directories inside the record collection and each segment is used as a separate index for that record when using the list method eg. lab/unittests/1473719695.2165067.json is indexed: [ 'lab', 'unittests', '1473719695.2165067', '.json' ] ''' title = '%s.save' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key, 'secret_key': secret_key } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # validate byte data if not isinstance(record_data, bytes): raise ValueError('%s(record_data=b"...") must be byte data.' % title) # encrypt data if secret_key: from labpack.encryption import cryptolab record_data, secret_key = cryptolab.encrypt(record_data, secret_key) # define keyword arguments from time import time create_kwargs = { 'bucket_name': self.bucket_name, 'record_key': record_key, 'record_data': record_data, 'overwrite': overwrite, 'record_metadata': { 'last_modified': str(time()) } } # add encryption metadata if secret_key: create_kwargs['record_metadata']['encryption'] = 'lab512' # add record mimetype and encoding import mimetypes guess_mimetype, guess_encoding = mimetypes.guess_type(record_key) if not guess_mimetype: if record_key.find('.yaml') or record_key.find('.yml'): guess_mimetype = 'application/x-yaml' if record_key.find('.drep'): guess_mimetype = 'application/x-drep' if guess_mimetype: create_kwargs['record_mimetype'] = guess_mimetype if guess_encoding: create_kwargs['record_encoding'] = guess_encoding # create record self.s3.create_record(**create_kwargs) return record_key
[ "def", "save", "(", "self", ",", "record_key", ",", "record_data", ",", "overwrite", "=", "True", ",", "secret_key", "=", "''", ")", ":", "title", "=", "'%s.save'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'record_key'", ":", "record_key", ",", "'secret_key'", ":", "secret_key", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# validate byte data", "if", "not", "isinstance", "(", "record_data", ",", "bytes", ")", ":", "raise", "ValueError", "(", "'%s(record_data=b\"...\") must be byte data.'", "%", "title", ")", "# encrypt data", "if", "secret_key", ":", "from", "labpack", ".", "encryption", "import", "cryptolab", "record_data", ",", "secret_key", "=", "cryptolab", ".", "encrypt", "(", "record_data", ",", "secret_key", ")", "# define keyword arguments", "from", "time", "import", "time", "create_kwargs", "=", "{", "'bucket_name'", ":", "self", ".", "bucket_name", ",", "'record_key'", ":", "record_key", ",", "'record_data'", ":", "record_data", ",", "'overwrite'", ":", "overwrite", ",", "'record_metadata'", ":", "{", "'last_modified'", ":", "str", "(", "time", "(", ")", ")", "}", "}", "# add encryption metadata", "if", "secret_key", ":", "create_kwargs", "[", "'record_metadata'", "]", "[", "'encryption'", "]", "=", "'lab512'", "# add record mimetype and encoding", "import", "mimetypes", "guess_mimetype", ",", "guess_encoding", "=", "mimetypes", ".", "guess_type", "(", "record_key", ")", "if", "not", "guess_mimetype", ":", "if", "record_key", ".", "find", "(", "'.yaml'", ")", "or", "record_key", ".", "find", "(", "'.yml'", ")", ":", "guess_mimetype", "=", "'application/x-yaml'", "if", "record_key", ".", "find", "(", "'.drep'", ")", ":", "guess_mimetype", "=", "'application/x-drep'", "if", "guess_mimetype", ":", "create_kwargs", "[", "'record_mimetype'", "]", "=", "guess_mimetype", "if", "guess_encoding", ":", "create_kwargs", "[", "'record_encoding'", "]", "=", "guess_encoding", "# create record", "self", ".", "s3", ".", "create_record", "(", "*", "*", "create_kwargs", ")", "return", "record_key" ]
a method to create a file in the collection folder on S3 :param record_key: string with name to assign to record (see NOTES below) :param record_data: byte data for record body :param overwrite: [optional] boolean to overwrite records with same name :param secret_key: [optional] string with key to encrypt data :return: string with name of record NOTE: record_key may only contain alphanumeric, /, _, . or - characters and may not begin with the . or / character. NOTE: using one or more / characters splits the key into separate segments. these segments will appear as a sub directories inside the record collection and each segment is used as a separate index for that record when using the list method eg. lab/unittests/1473719695.2165067.json is indexed: [ 'lab', 'unittests', '1473719695.2165067', '.json' ]
[ "a", "method", "to", "create", "a", "file", "in", "the", "collection", "folder", "on", "S3" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/aws/s3.py#L1722-L1796
244,565
collectiveacuity/labPack
labpack/storage/aws/s3.py
s3Client.load
def load(self, record_key, secret_key=''): ''' a method to retrieve byte data of an S3 record :param record_key: string with name of record :param secret_key: [optional] string used to decrypt data :return: byte data for record body ''' title = '%s.load' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key, 'secret_key': secret_key } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # retrieve record data from s3 record_data, record_metadata = self.s3.read_record(self.bucket_name, record_key) # validate secret key error_msg = '%s(secret_key="...") required to decrypt record "%s"' % (title, record_key) if 'encryption' in record_metadata['metadata'].keys(): if record_metadata['metadata']['encryption'] == 'lab512': if not secret_key: raise Exception(error_msg) else: self.s3.iam.printer('[WARNING]: %s uses unrecognized encryption method. Decryption skipped.' % record_key) secret_key = '' # decrypt (if necessary) if secret_key: from labpack.encryption import cryptolab record_data = cryptolab.decrypt(record_data, secret_key) return record_data
python
def load(self, record_key, secret_key=''): ''' a method to retrieve byte data of an S3 record :param record_key: string with name of record :param secret_key: [optional] string used to decrypt data :return: byte data for record body ''' title = '%s.load' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key, 'secret_key': secret_key } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # retrieve record data from s3 record_data, record_metadata = self.s3.read_record(self.bucket_name, record_key) # validate secret key error_msg = '%s(secret_key="...") required to decrypt record "%s"' % (title, record_key) if 'encryption' in record_metadata['metadata'].keys(): if record_metadata['metadata']['encryption'] == 'lab512': if not secret_key: raise Exception(error_msg) else: self.s3.iam.printer('[WARNING]: %s uses unrecognized encryption method. Decryption skipped.' % record_key) secret_key = '' # decrypt (if necessary) if secret_key: from labpack.encryption import cryptolab record_data = cryptolab.decrypt(record_data, secret_key) return record_data
[ "def", "load", "(", "self", ",", "record_key", ",", "secret_key", "=", "''", ")", ":", "title", "=", "'%s.load'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'record_key'", ":", "record_key", ",", "'secret_key'", ":", "secret_key", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# retrieve record data from s3", "record_data", ",", "record_metadata", "=", "self", ".", "s3", ".", "read_record", "(", "self", ".", "bucket_name", ",", "record_key", ")", "# validate secret key", "error_msg", "=", "'%s(secret_key=\"...\") required to decrypt record \"%s\"'", "%", "(", "title", ",", "record_key", ")", "if", "'encryption'", "in", "record_metadata", "[", "'metadata'", "]", ".", "keys", "(", ")", ":", "if", "record_metadata", "[", "'metadata'", "]", "[", "'encryption'", "]", "==", "'lab512'", ":", "if", "not", "secret_key", ":", "raise", "Exception", "(", "error_msg", ")", "else", ":", "self", ".", "s3", ".", "iam", ".", "printer", "(", "'[WARNING]: %s uses unrecognized encryption method. Decryption skipped.'", "%", "record_key", ")", "secret_key", "=", "''", "# decrypt (if necessary)", "if", "secret_key", ":", "from", "labpack", ".", "encryption", "import", "cryptolab", "record_data", "=", "cryptolab", ".", "decrypt", "(", "record_data", ",", "secret_key", ")", "return", "record_data" ]
a method to retrieve byte data of an S3 record :param record_key: string with name of record :param secret_key: [optional] string used to decrypt data :return: byte data for record body
[ "a", "method", "to", "retrieve", "byte", "data", "of", "an", "S3", "record" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/aws/s3.py#L1798-L1838
244,566
collectiveacuity/labPack
labpack/storage/aws/s3.py
s3Client.list
def list(self, prefix='', delimiter='', filter_function=None, max_results=1, previous_key=''): ''' a method to list keys in the collection :param prefix: string with prefix value to filter results :param delimiter: string with value results must not contain (after prefix) :param filter_function: (positional arguments) function used to filter results :param max_results: integer with maximum number of results to return :param previous_key: string with key in collection to begin search after :return: list of key strings NOTE: each key string can be divided into one or more segments based upon the / characters which occur in the key string as well as its file extension type. if the key string represents a file path, then each directory in the path, the file name and the file extension are all separate indexed values. eg. lab/unittests/1473719695.2165067.json is indexed: [ 'lab', 'unittests', '1473719695.2165067', '.json' ] it is possible to filter the records in the collection according to one or more of these path segments using a filter_function. NOTE: the filter_function must be able to accept an array of positional arguments and return a value that can evaluate to true or false. while searching the records, list produces an array of strings which represent the directory structure in relative path of each key string. if a filter_function is provided, this list of strings is fed to the filter function. if the function evaluates this input and returns a true value the file will be included in the list results. ''' title = '%s.list' % self.__class__.__name__ # validate input input_fields = { 'prefix': prefix, 'delimiter': delimiter, 'max_results': max_results, 'record_key': previous_key } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct default response results_list = [] # handle filter function filter if filter_function: # validate filter function try: path_segments = [ 'lab', 'unittests', '1473719695.2165067', '.json' ] filter_function(*path_segments) except: err_msg = '%s(filter_function=%s)' % (title, filter_function.__class__.__name__) raise TypeError('%s must accept positional arguments.' % err_msg) # construct keyword arguments list_kwargs = { 'bucket_name': self.bucket_name, 'prefix': prefix, 'delimiter': delimiter } # determine starting key starting_key = '1' if previous_key: previous_kwargs = {} previous_kwargs.update(**list_kwargs) previous_kwargs['max_results'] = 1 previous_kwargs['starting_key'] = previous_key search_list, next_key = self.s3.list_records(**list_kwargs) list_kwargs['starting_key'] = next_key # iterate filter over collection import os while starting_key: search_list, starting_key = self.s3.list_records(**list_kwargs) for record in search_list: record_key = record['key'] path_segments = record_key.split(os.sep) if filter_function(*path_segments): results_list.append(record_key) if len(results_list) == max_results: return results_list # handle other filters else: # construct keyword arguments list_kwargs = { 'bucket_name': self.bucket_name, 'prefix': prefix, 'delimiter': delimiter, 'max_results': max_results } # determine starting key if previous_key: previous_kwargs = {} previous_kwargs.update(**list_kwargs) previous_kwargs['max_results'] = 1 previous_kwargs['starting_key'] = previous_key search_list, starting_key = self.s3.list_records(**list_kwargs) list_kwargs['starting_key'] = starting_key # retrieve results search_list, starting_key = self.s3.list_records(**list_kwargs) # construct result list for record in search_list: results_list.append(record['key']) return results_list
python
def list(self, prefix='', delimiter='', filter_function=None, max_results=1, previous_key=''): ''' a method to list keys in the collection :param prefix: string with prefix value to filter results :param delimiter: string with value results must not contain (after prefix) :param filter_function: (positional arguments) function used to filter results :param max_results: integer with maximum number of results to return :param previous_key: string with key in collection to begin search after :return: list of key strings NOTE: each key string can be divided into one or more segments based upon the / characters which occur in the key string as well as its file extension type. if the key string represents a file path, then each directory in the path, the file name and the file extension are all separate indexed values. eg. lab/unittests/1473719695.2165067.json is indexed: [ 'lab', 'unittests', '1473719695.2165067', '.json' ] it is possible to filter the records in the collection according to one or more of these path segments using a filter_function. NOTE: the filter_function must be able to accept an array of positional arguments and return a value that can evaluate to true or false. while searching the records, list produces an array of strings which represent the directory structure in relative path of each key string. if a filter_function is provided, this list of strings is fed to the filter function. if the function evaluates this input and returns a true value the file will be included in the list results. ''' title = '%s.list' % self.__class__.__name__ # validate input input_fields = { 'prefix': prefix, 'delimiter': delimiter, 'max_results': max_results, 'record_key': previous_key } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct default response results_list = [] # handle filter function filter if filter_function: # validate filter function try: path_segments = [ 'lab', 'unittests', '1473719695.2165067', '.json' ] filter_function(*path_segments) except: err_msg = '%s(filter_function=%s)' % (title, filter_function.__class__.__name__) raise TypeError('%s must accept positional arguments.' % err_msg) # construct keyword arguments list_kwargs = { 'bucket_name': self.bucket_name, 'prefix': prefix, 'delimiter': delimiter } # determine starting key starting_key = '1' if previous_key: previous_kwargs = {} previous_kwargs.update(**list_kwargs) previous_kwargs['max_results'] = 1 previous_kwargs['starting_key'] = previous_key search_list, next_key = self.s3.list_records(**list_kwargs) list_kwargs['starting_key'] = next_key # iterate filter over collection import os while starting_key: search_list, starting_key = self.s3.list_records(**list_kwargs) for record in search_list: record_key = record['key'] path_segments = record_key.split(os.sep) if filter_function(*path_segments): results_list.append(record_key) if len(results_list) == max_results: return results_list # handle other filters else: # construct keyword arguments list_kwargs = { 'bucket_name': self.bucket_name, 'prefix': prefix, 'delimiter': delimiter, 'max_results': max_results } # determine starting key if previous_key: previous_kwargs = {} previous_kwargs.update(**list_kwargs) previous_kwargs['max_results'] = 1 previous_kwargs['starting_key'] = previous_key search_list, starting_key = self.s3.list_records(**list_kwargs) list_kwargs['starting_key'] = starting_key # retrieve results search_list, starting_key = self.s3.list_records(**list_kwargs) # construct result list for record in search_list: results_list.append(record['key']) return results_list
[ "def", "list", "(", "self", ",", "prefix", "=", "''", ",", "delimiter", "=", "''", ",", "filter_function", "=", "None", ",", "max_results", "=", "1", ",", "previous_key", "=", "''", ")", ":", "title", "=", "'%s.list'", "%", "self", ".", "__class__", ".", "__name__", "# validate input", "input_fields", "=", "{", "'prefix'", ":", "prefix", ",", "'delimiter'", ":", "delimiter", ",", "'max_results'", ":", "max_results", ",", "'record_key'", ":", "previous_key", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# construct default response", "results_list", "=", "[", "]", "# handle filter function filter", "if", "filter_function", ":", "# validate filter function", "try", ":", "path_segments", "=", "[", "'lab'", ",", "'unittests'", ",", "'1473719695.2165067'", ",", "'.json'", "]", "filter_function", "(", "*", "path_segments", ")", "except", ":", "err_msg", "=", "'%s(filter_function=%s)'", "%", "(", "title", ",", "filter_function", ".", "__class__", ".", "__name__", ")", "raise", "TypeError", "(", "'%s must accept positional arguments.'", "%", "err_msg", ")", "# construct keyword arguments", "list_kwargs", "=", "{", "'bucket_name'", ":", "self", ".", "bucket_name", ",", "'prefix'", ":", "prefix", ",", "'delimiter'", ":", "delimiter", "}", "# determine starting key", "starting_key", "=", "'1'", "if", "previous_key", ":", "previous_kwargs", "=", "{", "}", "previous_kwargs", ".", "update", "(", "*", "*", "list_kwargs", ")", "previous_kwargs", "[", "'max_results'", "]", "=", "1", "previous_kwargs", "[", "'starting_key'", "]", "=", "previous_key", "search_list", ",", "next_key", "=", "self", ".", "s3", ".", "list_records", "(", "*", "*", "list_kwargs", ")", "list_kwargs", "[", "'starting_key'", "]", "=", "next_key", "# iterate filter over collection", "import", "os", "while", "starting_key", ":", "search_list", ",", "starting_key", "=", "self", ".", "s3", ".", "list_records", "(", "*", "*", "list_kwargs", ")", "for", "record", "in", "search_list", ":", "record_key", "=", "record", "[", "'key'", "]", "path_segments", "=", "record_key", ".", "split", "(", "os", ".", "sep", ")", "if", "filter_function", "(", "*", "path_segments", ")", ":", "results_list", ".", "append", "(", "record_key", ")", "if", "len", "(", "results_list", ")", "==", "max_results", ":", "return", "results_list", "# handle other filters", "else", ":", "# construct keyword arguments", "list_kwargs", "=", "{", "'bucket_name'", ":", "self", ".", "bucket_name", ",", "'prefix'", ":", "prefix", ",", "'delimiter'", ":", "delimiter", ",", "'max_results'", ":", "max_results", "}", "# determine starting key", "if", "previous_key", ":", "previous_kwargs", "=", "{", "}", "previous_kwargs", ".", "update", "(", "*", "*", "list_kwargs", ")", "previous_kwargs", "[", "'max_results'", "]", "=", "1", "previous_kwargs", "[", "'starting_key'", "]", "=", "previous_key", "search_list", ",", "starting_key", "=", "self", ".", "s3", ".", "list_records", "(", "*", "*", "list_kwargs", ")", "list_kwargs", "[", "'starting_key'", "]", "=", "starting_key", "# retrieve results ", "search_list", ",", "starting_key", "=", "self", ".", "s3", ".", "list_records", "(", "*", "*", "list_kwargs", ")", "# construct result list", "for", "record", "in", "search_list", ":", "results_list", ".", "append", "(", "record", "[", "'key'", "]", ")", "return", "results_list" ]
a method to list keys in the collection :param prefix: string with prefix value to filter results :param delimiter: string with value results must not contain (after prefix) :param filter_function: (positional arguments) function used to filter results :param max_results: integer with maximum number of results to return :param previous_key: string with key in collection to begin search after :return: list of key strings NOTE: each key string can be divided into one or more segments based upon the / characters which occur in the key string as well as its file extension type. if the key string represents a file path, then each directory in the path, the file name and the file extension are all separate indexed values. eg. lab/unittests/1473719695.2165067.json is indexed: [ 'lab', 'unittests', '1473719695.2165067', '.json' ] it is possible to filter the records in the collection according to one or more of these path segments using a filter_function. NOTE: the filter_function must be able to accept an array of positional arguments and return a value that can evaluate to true or false. while searching the records, list produces an array of strings which represent the directory structure in relative path of each key string. if a filter_function is provided, this list of strings is fed to the filter function. if the function evaluates this input and returns a true value the file will be included in the list results.
[ "a", "method", "to", "list", "keys", "in", "the", "collection" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/aws/s3.py#L1873-L1991
244,567
collectiveacuity/labPack
labpack/storage/aws/s3.py
s3Client.delete
def delete(self, record_key): ''' a method to delete a record from S3 :param record_key: string with key of record :return: string reporting outcome ''' title = '%s.delete' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # delete record try: self.s3.delete_record(self.bucket_name, record_key) except: if not self.exists(record_key): exit_msg = '%s does not exist.' % record_key return exit_msg raise exit_msg = '%s has been deleted.' % record_key return exit_msg
python
def delete(self, record_key): ''' a method to delete a record from S3 :param record_key: string with key of record :return: string reporting outcome ''' title = '%s.delete' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # delete record try: self.s3.delete_record(self.bucket_name, record_key) except: if not self.exists(record_key): exit_msg = '%s does not exist.' % record_key return exit_msg raise exit_msg = '%s has been deleted.' % record_key return exit_msg
[ "def", "delete", "(", "self", ",", "record_key", ")", ":", "title", "=", "'%s.delete'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'record_key'", ":", "record_key", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# delete record", "try", ":", "self", ".", "s3", ".", "delete_record", "(", "self", ".", "bucket_name", ",", "record_key", ")", "except", ":", "if", "not", "self", ".", "exists", "(", "record_key", ")", ":", "exit_msg", "=", "'%s does not exist.'", "%", "record_key", "return", "exit_msg", "raise", "exit_msg", "=", "'%s has been deleted.'", "%", "record_key", "return", "exit_msg" ]
a method to delete a record from S3 :param record_key: string with key of record :return: string reporting outcome
[ "a", "method", "to", "delete", "a", "record", "from", "S3" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/aws/s3.py#L1993-L2021
244,568
collectiveacuity/labPack
labpack/storage/aws/s3.py
s3Client.remove
def remove(self): ''' a method to remove collection and all records in the collection :return: string with confirmation of deletion ''' title = '%s.remove' % self.__class__.__name__ # request bucket delete self.s3.delete_bucket(self.bucket_name) # return confirmation exit_msg = '%s collection has been removed from S3.' % self.bucket_name return exit_msg
python
def remove(self): ''' a method to remove collection and all records in the collection :return: string with confirmation of deletion ''' title = '%s.remove' % self.__class__.__name__ # request bucket delete self.s3.delete_bucket(self.bucket_name) # return confirmation exit_msg = '%s collection has been removed from S3.' % self.bucket_name return exit_msg
[ "def", "remove", "(", "self", ")", ":", "title", "=", "'%s.remove'", "%", "self", ".", "__class__", ".", "__name__", "# request bucket delete ", "self", ".", "s3", ".", "delete_bucket", "(", "self", ".", "bucket_name", ")", "# return confirmation", "exit_msg", "=", "'%s collection has been removed from S3.'", "%", "self", ".", "bucket_name", "return", "exit_msg" ]
a method to remove collection and all records in the collection :return: string with confirmation of deletion
[ "a", "method", "to", "remove", "collection", "and", "all", "records", "in", "the", "collection" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/aws/s3.py#L2023-L2038
244,569
nefarioustim/parker
parker/crawlpage.py
CrawlPage.get_uris
def get_uris(self, base_uri, filter_list=None): """Return a set of internal URIs.""" return { re.sub(r'^/', base_uri, link.attrib['href']) for link in self.parsedpage.get_nodes_by_selector('a') if 'href' in link.attrib and ( link.attrib['href'].startswith(base_uri) or link.attrib['href'].startswith('/') ) and not is_uri_to_be_filtered(link.attrib['href'], filter_list) }
python
def get_uris(self, base_uri, filter_list=None): """Return a set of internal URIs.""" return { re.sub(r'^/', base_uri, link.attrib['href']) for link in self.parsedpage.get_nodes_by_selector('a') if 'href' in link.attrib and ( link.attrib['href'].startswith(base_uri) or link.attrib['href'].startswith('/') ) and not is_uri_to_be_filtered(link.attrib['href'], filter_list) }
[ "def", "get_uris", "(", "self", ",", "base_uri", ",", "filter_list", "=", "None", ")", ":", "return", "{", "re", ".", "sub", "(", "r'^/'", ",", "base_uri", ",", "link", ".", "attrib", "[", "'href'", "]", ")", "for", "link", "in", "self", ".", "parsedpage", ".", "get_nodes_by_selector", "(", "'a'", ")", "if", "'href'", "in", "link", ".", "attrib", "and", "(", "link", ".", "attrib", "[", "'href'", "]", ".", "startswith", "(", "base_uri", ")", "or", "link", ".", "attrib", "[", "'href'", "]", ".", "startswith", "(", "'/'", ")", ")", "and", "not", "is_uri_to_be_filtered", "(", "link", ".", "attrib", "[", "'href'", "]", ",", "filter_list", ")", "}" ]
Return a set of internal URIs.
[ "Return", "a", "set", "of", "internal", "URIs", "." ]
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/crawlpage.py#L53-L63
244,570
lwcook/horsetail-matching
horsetailmatching/demoproblems.py
TP0
def TP0(dv, u): '''Demo problem 0 for horsetail matching, takes two input vectors of any size and returns a single output''' return np.linalg.norm(np.array(dv)) + np.linalg.norm(np.array(u))
python
def TP0(dv, u): '''Demo problem 0 for horsetail matching, takes two input vectors of any size and returns a single output''' return np.linalg.norm(np.array(dv)) + np.linalg.norm(np.array(u))
[ "def", "TP0", "(", "dv", ",", "u", ")", ":", "return", "np", ".", "linalg", ".", "norm", "(", "np", ".", "array", "(", "dv", ")", ")", "+", "np", ".", "linalg", ".", "norm", "(", "np", ".", "array", "(", "u", ")", ")" ]
Demo problem 0 for horsetail matching, takes two input vectors of any size and returns a single output
[ "Demo", "problem", "0", "for", "horsetail", "matching", "takes", "two", "input", "vectors", "of", "any", "size", "and", "returns", "a", "single", "output" ]
f3d5f8d01249debbca978f412ce4eae017458119
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/demoproblems.py#L3-L6
244,571
lwcook/horsetail-matching
horsetailmatching/demoproblems.py
TP1
def TP1(x, u, jac=False): '''Demo problem 1 for horsetail matching, takes two input vectors of size 2 and returns just the qoi if jac is False or the qoi and its gradient if jac is True''' factor = 0.1*(u[0]**2 + 2*u[0]*u[1] + u[1]**2) q = 0 + factor*(x[0]**2 + 2*x[1]*x[0] + x[1]**2) if not jac: return q else: grad = [factor*(2*x[0] + 2*x[1]), factor*(2*x[0] + 2*x[1])] return q, grad
python
def TP1(x, u, jac=False): '''Demo problem 1 for horsetail matching, takes two input vectors of size 2 and returns just the qoi if jac is False or the qoi and its gradient if jac is True''' factor = 0.1*(u[0]**2 + 2*u[0]*u[1] + u[1]**2) q = 0 + factor*(x[0]**2 + 2*x[1]*x[0] + x[1]**2) if not jac: return q else: grad = [factor*(2*x[0] + 2*x[1]), factor*(2*x[0] + 2*x[1])] return q, grad
[ "def", "TP1", "(", "x", ",", "u", ",", "jac", "=", "False", ")", ":", "factor", "=", "0.1", "*", "(", "u", "[", "0", "]", "**", "2", "+", "2", "*", "u", "[", "0", "]", "*", "u", "[", "1", "]", "+", "u", "[", "1", "]", "**", "2", ")", "q", "=", "0", "+", "factor", "*", "(", "x", "[", "0", "]", "**", "2", "+", "2", "*", "x", "[", "1", "]", "*", "x", "[", "0", "]", "+", "x", "[", "1", "]", "**", "2", ")", "if", "not", "jac", ":", "return", "q", "else", ":", "grad", "=", "[", "factor", "*", "(", "2", "*", "x", "[", "0", "]", "+", "2", "*", "x", "[", "1", "]", ")", ",", "factor", "*", "(", "2", "*", "x", "[", "0", "]", "+", "2", "*", "x", "[", "1", "]", ")", "]", "return", "q", ",", "grad" ]
Demo problem 1 for horsetail matching, takes two input vectors of size 2 and returns just the qoi if jac is False or the qoi and its gradient if jac is True
[ "Demo", "problem", "1", "for", "horsetail", "matching", "takes", "two", "input", "vectors", "of", "size", "2", "and", "returns", "just", "the", "qoi", "if", "jac", "is", "False", "or", "the", "qoi", "and", "its", "gradient", "if", "jac", "is", "True" ]
f3d5f8d01249debbca978f412ce4eae017458119
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/demoproblems.py#L8-L18
244,572
lwcook/horsetail-matching
horsetailmatching/demoproblems.py
TP2
def TP2(dv, u, jac=False): '''Demo problem 2 for horsetail matching, takes two input vectors of size 2 and returns just the qoi if jac is False or the qoi and its gradient if jac is True''' y = dv[0]/2. z = dv[1]/2. + 12 q = 0.25*((y**2 + z**2)/10 + 5*u[0]*u[1] - z*u[1]**2) + 0.2*z*u[1]**3 + 7 if not jac: return q else: dqdx1 = (1./8.)*( 2*y/10. ) dqdx2 = (1./8.)*( 2*z/10. - u[1]**2) + 0.1*u[1]**3 return q, [dqdx1, dqdx2]
python
def TP2(dv, u, jac=False): '''Demo problem 2 for horsetail matching, takes two input vectors of size 2 and returns just the qoi if jac is False or the qoi and its gradient if jac is True''' y = dv[0]/2. z = dv[1]/2. + 12 q = 0.25*((y**2 + z**2)/10 + 5*u[0]*u[1] - z*u[1]**2) + 0.2*z*u[1]**3 + 7 if not jac: return q else: dqdx1 = (1./8.)*( 2*y/10. ) dqdx2 = (1./8.)*( 2*z/10. - u[1]**2) + 0.1*u[1]**3 return q, [dqdx1, dqdx2]
[ "def", "TP2", "(", "dv", ",", "u", ",", "jac", "=", "False", ")", ":", "y", "=", "dv", "[", "0", "]", "/", "2.", "z", "=", "dv", "[", "1", "]", "/", "2.", "+", "12", "q", "=", "0.25", "*", "(", "(", "y", "**", "2", "+", "z", "**", "2", ")", "/", "10", "+", "5", "*", "u", "[", "0", "]", "*", "u", "[", "1", "]", "-", "z", "*", "u", "[", "1", "]", "**", "2", ")", "+", "0.2", "*", "z", "*", "u", "[", "1", "]", "**", "3", "+", "7", "if", "not", "jac", ":", "return", "q", "else", ":", "dqdx1", "=", "(", "1.", "/", "8.", ")", "*", "(", "2", "*", "y", "/", "10.", ")", "dqdx2", "=", "(", "1.", "/", "8.", ")", "*", "(", "2", "*", "z", "/", "10.", "-", "u", "[", "1", "]", "**", "2", ")", "+", "0.1", "*", "u", "[", "1", "]", "**", "3", "return", "q", ",", "[", "dqdx1", ",", "dqdx2", "]" ]
Demo problem 2 for horsetail matching, takes two input vectors of size 2 and returns just the qoi if jac is False or the qoi and its gradient if jac is True
[ "Demo", "problem", "2", "for", "horsetail", "matching", "takes", "two", "input", "vectors", "of", "size", "2", "and", "returns", "just", "the", "qoi", "if", "jac", "is", "False", "or", "the", "qoi", "and", "its", "gradient", "if", "jac", "is", "True" ]
f3d5f8d01249debbca978f412ce4eae017458119
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/demoproblems.py#L20-L34
244,573
lwcook/horsetail-matching
horsetailmatching/demoproblems.py
TP3
def TP3(x, u, jac=False): '''Demo problem 1 for horsetail matching, takes two input values of size 1''' q = 2 + 0.5*x + 1.5*(1-x)*u if not jac: return q else: grad = 0.5 -1.5*u return q, grad
python
def TP3(x, u, jac=False): '''Demo problem 1 for horsetail matching, takes two input values of size 1''' q = 2 + 0.5*x + 1.5*(1-x)*u if not jac: return q else: grad = 0.5 -1.5*u return q, grad
[ "def", "TP3", "(", "x", ",", "u", ",", "jac", "=", "False", ")", ":", "q", "=", "2", "+", "0.5", "*", "x", "+", "1.5", "*", "(", "1", "-", "x", ")", "*", "u", "if", "not", "jac", ":", "return", "q", "else", ":", "grad", "=", "0.5", "-", "1.5", "*", "u", "return", "q", ",", "grad" ]
Demo problem 1 for horsetail matching, takes two input values of size 1
[ "Demo", "problem", "1", "for", "horsetail", "matching", "takes", "two", "input", "values", "of", "size", "1" ]
f3d5f8d01249debbca978f412ce4eae017458119
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/demoproblems.py#L53-L62
244,574
sassoo/goldman
goldman/stores/base.py
Cache.get
def get(self, key, bucket): """ Get a cached item by key If the cached item isn't found the return None. """ try: return self._cache[bucket][key] except (KeyError, TypeError): return None
python
def get(self, key, bucket): """ Get a cached item by key If the cached item isn't found the return None. """ try: return self._cache[bucket][key] except (KeyError, TypeError): return None
[ "def", "get", "(", "self", ",", "key", ",", "bucket", ")", ":", "try", ":", "return", "self", ".", "_cache", "[", "bucket", "]", "[", "key", "]", "except", "(", "KeyError", ",", "TypeError", ")", ":", "return", "None" ]
Get a cached item by key If the cached item isn't found the return None.
[ "Get", "a", "cached", "item", "by", "key" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/stores/base.py#L24-L33
244,575
sassoo/goldman
goldman/stores/base.py
Cache.set
def set(self, key, val, bucket): """ Set a cached item by key WARN: Regardless if the item is already in the cache, it will be udpated with the new value. """ if bucket not in self._cache: self._cache[bucket] = {} self._cache[bucket][key] = val
python
def set(self, key, val, bucket): """ Set a cached item by key WARN: Regardless if the item is already in the cache, it will be udpated with the new value. """ if bucket not in self._cache: self._cache[bucket] = {} self._cache[bucket][key] = val
[ "def", "set", "(", "self", ",", "key", ",", "val", ",", "bucket", ")", ":", "if", "bucket", "not", "in", "self", ".", "_cache", ":", "self", ".", "_cache", "[", "bucket", "]", "=", "{", "}", "self", ".", "_cache", "[", "bucket", "]", "[", "key", "]", "=", "val" ]
Set a cached item by key WARN: Regardless if the item is already in the cache, it will be udpated with the new value.
[ "Set", "a", "cached", "item", "by", "key" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/stores/base.py#L35-L45
244,576
stephanepechard/projy
projy/templates/ProjyTemplate.py
ProjyTemplate.create
def create(self, project_name, template_name, substitutions): """ Launch the project creation. """ self.project_name = project_name self.template_name = template_name # create substitutions dictionary from user arguments # TODO: check what is given for subs in substitutions: current_sub = subs.split(',') current_key = current_sub[0].strip() current_val = current_sub[1].strip() self.substitutes_dict[current_key] = current_val self.term.print_info(u"Creating project '{0}' with template {1}" .format(self.term.text_in_color(project_name, TERM_PINK), template_name)) self.make_directories() self.make_files() self.make_posthook()
python
def create(self, project_name, template_name, substitutions): """ Launch the project creation. """ self.project_name = project_name self.template_name = template_name # create substitutions dictionary from user arguments # TODO: check what is given for subs in substitutions: current_sub = subs.split(',') current_key = current_sub[0].strip() current_val = current_sub[1].strip() self.substitutes_dict[current_key] = current_val self.term.print_info(u"Creating project '{0}' with template {1}" .format(self.term.text_in_color(project_name, TERM_PINK), template_name)) self.make_directories() self.make_files() self.make_posthook()
[ "def", "create", "(", "self", ",", "project_name", ",", "template_name", ",", "substitutions", ")", ":", "self", ".", "project_name", "=", "project_name", "self", ".", "template_name", "=", "template_name", "# create substitutions dictionary from user arguments", "# TODO: check what is given", "for", "subs", "in", "substitutions", ":", "current_sub", "=", "subs", ".", "split", "(", "','", ")", "current_key", "=", "current_sub", "[", "0", "]", ".", "strip", "(", ")", "current_val", "=", "current_sub", "[", "1", "]", ".", "strip", "(", ")", "self", ".", "substitutes_dict", "[", "current_key", "]", "=", "current_val", "self", ".", "term", ".", "print_info", "(", "u\"Creating project '{0}' with template {1}\"", ".", "format", "(", "self", ".", "term", ".", "text_in_color", "(", "project_name", ",", "TERM_PINK", ")", ",", "template_name", ")", ")", "self", ".", "make_directories", "(", ")", "self", ".", "make_files", "(", ")", "self", ".", "make_posthook", "(", ")" ]
Launch the project creation.
[ "Launch", "the", "project", "creation", "." ]
3146b0e3c207b977e1b51fcb33138746dae83c23
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/templates/ProjyTemplate.py#L27-L45
244,577
stephanepechard/projy
projy/templates/ProjyTemplate.py
ProjyTemplate.make_directories
def make_directories(self): """ Create the directories of the template. """ # get the directories from the template directories = [] try: directories = self.directories() except AttributeError: self.term.print_info(u"No directory in the template.") working_dir = os.getcwd() # iteratively create the directories for directory in directories: dir_name = working_dir + '/' + directory if not os.path.isdir(dir_name): try: os.makedirs(dir_name) except OSError as error: if error.errno != errno.EEXIST: raise else: self.term.print_error_and_exit(u"The directory {0} already exists." .format(directory)) self.term.print_info(u"Creating directory '{0}'" .format(self.term.text_in_color(directory, TERM_GREEN)))
python
def make_directories(self): """ Create the directories of the template. """ # get the directories from the template directories = [] try: directories = self.directories() except AttributeError: self.term.print_info(u"No directory in the template.") working_dir = os.getcwd() # iteratively create the directories for directory in directories: dir_name = working_dir + '/' + directory if not os.path.isdir(dir_name): try: os.makedirs(dir_name) except OSError as error: if error.errno != errno.EEXIST: raise else: self.term.print_error_and_exit(u"The directory {0} already exists." .format(directory)) self.term.print_info(u"Creating directory '{0}'" .format(self.term.text_in_color(directory, TERM_GREEN)))
[ "def", "make_directories", "(", "self", ")", ":", "# get the directories from the template", "directories", "=", "[", "]", "try", ":", "directories", "=", "self", ".", "directories", "(", ")", "except", "AttributeError", ":", "self", ".", "term", ".", "print_info", "(", "u\"No directory in the template.\"", ")", "working_dir", "=", "os", ".", "getcwd", "(", ")", "# iteratively create the directories", "for", "directory", "in", "directories", ":", "dir_name", "=", "working_dir", "+", "'/'", "+", "directory", "if", "not", "os", ".", "path", ".", "isdir", "(", "dir_name", ")", ":", "try", ":", "os", ".", "makedirs", "(", "dir_name", ")", "except", "OSError", "as", "error", ":", "if", "error", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise", "else", ":", "self", ".", "term", ".", "print_error_and_exit", "(", "u\"The directory {0} already exists.\"", ".", "format", "(", "directory", ")", ")", "self", ".", "term", ".", "print_info", "(", "u\"Creating directory '{0}'\"", ".", "format", "(", "self", ".", "term", ".", "text_in_color", "(", "directory", ",", "TERM_GREEN", ")", ")", ")" ]
Create the directories of the template.
[ "Create", "the", "directories", "of", "the", "template", "." ]
3146b0e3c207b977e1b51fcb33138746dae83c23
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/templates/ProjyTemplate.py#L48-L73
244,578
stephanepechard/projy
projy/templates/ProjyTemplate.py
ProjyTemplate.make_files
def make_files(self): """ Create the files of the template. """ # get the files from the template files = [] try: files = self.files() except AttributeError: self.term.print_info(u"No file in the template. Weird, but why not?") # get the substitutes intersecting the template and the cli try: for key in self.substitutes().keys(): if key not in self.substitutes_dict: self.substitutes_dict[key] = self.substitutes()[key] except AttributeError: self.term.print_info(u"No substitute in the template.") working_dir = os.getcwd() # iteratively create the files for directory, name, template_file in files: if directory: filepath = working_dir + '/' + directory + '/' + name filename = directory + '/' + name else: filepath = working_dir + '/' + name filename = name # open the file to write into try: output = open(filepath, 'w') except IOError: self.term.print_error_and_exit(u"Can't create destination"\ " file: {0}".format(filepath)) # open the template to read from if template_file: input_file = join(dirname(__file__), template_file + '.txt') # write each line of input file into output file, # templatized with substitutes try: with open(input_file, 'r') as line: template_line = Template(line.read()) try: output.write(template_line. safe_substitute(self.substitutes_dict).encode('utf-8')) except TypeError: output.write(template_line. safe_substitute(self.substitutes_dict)) output.close() except IOError: self.term.print_error_and_exit(u"Can't create template file"\ ": {0}".format(input_file)) else: output.close() # the file is empty, but still created self.term.print_info(u"Creating file '{0}'" .format(self.term.text_in_color(filename, TERM_YELLOW)))
python
def make_files(self): """ Create the files of the template. """ # get the files from the template files = [] try: files = self.files() except AttributeError: self.term.print_info(u"No file in the template. Weird, but why not?") # get the substitutes intersecting the template and the cli try: for key in self.substitutes().keys(): if key not in self.substitutes_dict: self.substitutes_dict[key] = self.substitutes()[key] except AttributeError: self.term.print_info(u"No substitute in the template.") working_dir = os.getcwd() # iteratively create the files for directory, name, template_file in files: if directory: filepath = working_dir + '/' + directory + '/' + name filename = directory + '/' + name else: filepath = working_dir + '/' + name filename = name # open the file to write into try: output = open(filepath, 'w') except IOError: self.term.print_error_and_exit(u"Can't create destination"\ " file: {0}".format(filepath)) # open the template to read from if template_file: input_file = join(dirname(__file__), template_file + '.txt') # write each line of input file into output file, # templatized with substitutes try: with open(input_file, 'r') as line: template_line = Template(line.read()) try: output.write(template_line. safe_substitute(self.substitutes_dict).encode('utf-8')) except TypeError: output.write(template_line. safe_substitute(self.substitutes_dict)) output.close() except IOError: self.term.print_error_and_exit(u"Can't create template file"\ ": {0}".format(input_file)) else: output.close() # the file is empty, but still created self.term.print_info(u"Creating file '{0}'" .format(self.term.text_in_color(filename, TERM_YELLOW)))
[ "def", "make_files", "(", "self", ")", ":", "# get the files from the template", "files", "=", "[", "]", "try", ":", "files", "=", "self", ".", "files", "(", ")", "except", "AttributeError", ":", "self", ".", "term", ".", "print_info", "(", "u\"No file in the template. Weird, but why not?\"", ")", "# get the substitutes intersecting the template and the cli", "try", ":", "for", "key", "in", "self", ".", "substitutes", "(", ")", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "self", ".", "substitutes_dict", ":", "self", ".", "substitutes_dict", "[", "key", "]", "=", "self", ".", "substitutes", "(", ")", "[", "key", "]", "except", "AttributeError", ":", "self", ".", "term", ".", "print_info", "(", "u\"No substitute in the template.\"", ")", "working_dir", "=", "os", ".", "getcwd", "(", ")", "# iteratively create the files", "for", "directory", ",", "name", ",", "template_file", "in", "files", ":", "if", "directory", ":", "filepath", "=", "working_dir", "+", "'/'", "+", "directory", "+", "'/'", "+", "name", "filename", "=", "directory", "+", "'/'", "+", "name", "else", ":", "filepath", "=", "working_dir", "+", "'/'", "+", "name", "filename", "=", "name", "# open the file to write into", "try", ":", "output", "=", "open", "(", "filepath", ",", "'w'", ")", "except", "IOError", ":", "self", ".", "term", ".", "print_error_and_exit", "(", "u\"Can't create destination\"", "\" file: {0}\"", ".", "format", "(", "filepath", ")", ")", "# open the template to read from", "if", "template_file", ":", "input_file", "=", "join", "(", "dirname", "(", "__file__", ")", ",", "template_file", "+", "'.txt'", ")", "# write each line of input file into output file,", "# templatized with substitutes", "try", ":", "with", "open", "(", "input_file", ",", "'r'", ")", "as", "line", ":", "template_line", "=", "Template", "(", "line", ".", "read", "(", ")", ")", "try", ":", "output", ".", "write", "(", "template_line", ".", "safe_substitute", "(", "self", ".", "substitutes_dict", ")", ".", "encode", "(", "'utf-8'", ")", ")", "except", "TypeError", ":", "output", ".", "write", "(", "template_line", ".", "safe_substitute", "(", "self", ".", "substitutes_dict", ")", ")", "output", ".", "close", "(", ")", "except", "IOError", ":", "self", ".", "term", ".", "print_error_and_exit", "(", "u\"Can't create template file\"", "\": {0}\"", ".", "format", "(", "input_file", ")", ")", "else", ":", "output", ".", "close", "(", ")", "# the file is empty, but still created", "self", ".", "term", ".", "print_info", "(", "u\"Creating file '{0}'\"", ".", "format", "(", "self", ".", "term", ".", "text_in_color", "(", "filename", ",", "TERM_YELLOW", ")", ")", ")" ]
Create the files of the template.
[ "Create", "the", "files", "of", "the", "template", "." ]
3146b0e3c207b977e1b51fcb33138746dae83c23
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/templates/ProjyTemplate.py#L76-L135
244,579
stephanepechard/projy
projy/templates/ProjyTemplate.py
ProjyTemplate.make_posthook
def make_posthook(self): """ Run the post hook into the project directory. """ print(id(self.posthook), self.posthook) print(id(super(self.__class__, self).posthook), super(self.__class__, self).posthook) import ipdb;ipdb.set_trace() if self.posthook: os.chdir(self.project_name) # enter the project main directory self.posthook()
python
def make_posthook(self): """ Run the post hook into the project directory. """ print(id(self.posthook), self.posthook) print(id(super(self.__class__, self).posthook), super(self.__class__, self).posthook) import ipdb;ipdb.set_trace() if self.posthook: os.chdir(self.project_name) # enter the project main directory self.posthook()
[ "def", "make_posthook", "(", "self", ")", ":", "print", "(", "id", "(", "self", ".", "posthook", ")", ",", "self", ".", "posthook", ")", "print", "(", "id", "(", "super", "(", "self", ".", "__class__", ",", "self", ")", ".", "posthook", ")", ",", "super", "(", "self", ".", "__class__", ",", "self", ")", ".", "posthook", ")", "import", "ipdb", "ipdb", ".", "set_trace", "(", ")", "if", "self", ".", "posthook", ":", "os", ".", "chdir", "(", "self", ".", "project_name", ")", "# enter the project main directory", "self", ".", "posthook", "(", ")" ]
Run the post hook into the project directory.
[ "Run", "the", "post", "hook", "into", "the", "project", "directory", "." ]
3146b0e3c207b977e1b51fcb33138746dae83c23
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/templates/ProjyTemplate.py#L138-L145
244,580
stephanepechard/projy
projy/templates/ProjyTemplate.py
ProjyTemplate.replace_in_file
def replace_in_file(self, file_path, old_exp, new_exp): """ In the given file, replace all 'old_exp' by 'new_exp'. """ self.term.print_info(u"Making replacement into {}" .format(self.term.text_in_color(file_path, TERM_GREEN))) # write the new version into a temporary file tmp_file = tempfile.NamedTemporaryFile(mode='w+t', delete=False) for filelineno, line in enumerate(io.open(file_path, encoding="utf-8")): if old_exp in line: line = line.replace(old_exp, new_exp) try: tmp_file.write(line.encode('utf-8')) except TypeError: tmp_file.write(line) name = tmp_file.name # keep the name tmp_file.close() shutil.copy(name, file_path) # replace the original one os.remove(name)
python
def replace_in_file(self, file_path, old_exp, new_exp): """ In the given file, replace all 'old_exp' by 'new_exp'. """ self.term.print_info(u"Making replacement into {}" .format(self.term.text_in_color(file_path, TERM_GREEN))) # write the new version into a temporary file tmp_file = tempfile.NamedTemporaryFile(mode='w+t', delete=False) for filelineno, line in enumerate(io.open(file_path, encoding="utf-8")): if old_exp in line: line = line.replace(old_exp, new_exp) try: tmp_file.write(line.encode('utf-8')) except TypeError: tmp_file.write(line) name = tmp_file.name # keep the name tmp_file.close() shutil.copy(name, file_path) # replace the original one os.remove(name)
[ "def", "replace_in_file", "(", "self", ",", "file_path", ",", "old_exp", ",", "new_exp", ")", ":", "self", ".", "term", ".", "print_info", "(", "u\"Making replacement into {}\"", ".", "format", "(", "self", ".", "term", ".", "text_in_color", "(", "file_path", ",", "TERM_GREEN", ")", ")", ")", "# write the new version into a temporary file", "tmp_file", "=", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "'w+t'", ",", "delete", "=", "False", ")", "for", "filelineno", ",", "line", "in", "enumerate", "(", "io", ".", "open", "(", "file_path", ",", "encoding", "=", "\"utf-8\"", ")", ")", ":", "if", "old_exp", "in", "line", ":", "line", "=", "line", ".", "replace", "(", "old_exp", ",", "new_exp", ")", "try", ":", "tmp_file", ".", "write", "(", "line", ".", "encode", "(", "'utf-8'", ")", ")", "except", "TypeError", ":", "tmp_file", ".", "write", "(", "line", ")", "name", "=", "tmp_file", ".", "name", "# keep the name", "tmp_file", ".", "close", "(", ")", "shutil", ".", "copy", "(", "name", ",", "file_path", ")", "# replace the original one", "os", ".", "remove", "(", "name", ")" ]
In the given file, replace all 'old_exp' by 'new_exp'.
[ "In", "the", "given", "file", "replace", "all", "old_exp", "by", "new_exp", "." ]
3146b0e3c207b977e1b51fcb33138746dae83c23
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/templates/ProjyTemplate.py#L148-L166
244,581
dossier/dossier.models
dossier/models/openquery/fetcher.py
ChunkRoller.add
def add(self, si): '''puts `si` into the currently open chunk, which it creates if necessary. If this item causes the chunk to cross chunk_max, then the chunk closed after adding. ''' if self.o_chunk is None: if os.path.exists(self.t_path): os.remove(self.t_path) self.o_chunk = streamcorpus.Chunk(self.t_path, mode='wb') self.o_chunk.add(si) logger.debug('added %d-th item to chunk', len(self.o_chunk)) if len(self.o_chunk) == self.chunk_max: self.close()
python
def add(self, si): '''puts `si` into the currently open chunk, which it creates if necessary. If this item causes the chunk to cross chunk_max, then the chunk closed after adding. ''' if self.o_chunk is None: if os.path.exists(self.t_path): os.remove(self.t_path) self.o_chunk = streamcorpus.Chunk(self.t_path, mode='wb') self.o_chunk.add(si) logger.debug('added %d-th item to chunk', len(self.o_chunk)) if len(self.o_chunk) == self.chunk_max: self.close()
[ "def", "add", "(", "self", ",", "si", ")", ":", "if", "self", ".", "o_chunk", "is", "None", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "t_path", ")", ":", "os", ".", "remove", "(", "self", ".", "t_path", ")", "self", ".", "o_chunk", "=", "streamcorpus", ".", "Chunk", "(", "self", ".", "t_path", ",", "mode", "=", "'wb'", ")", "self", ".", "o_chunk", ".", "add", "(", "si", ")", "logger", ".", "debug", "(", "'added %d-th item to chunk'", ",", "len", "(", "self", ".", "o_chunk", ")", ")", "if", "len", "(", "self", ".", "o_chunk", ")", "==", "self", ".", "chunk_max", ":", "self", ".", "close", "(", ")" ]
puts `si` into the currently open chunk, which it creates if necessary. If this item causes the chunk to cross chunk_max, then the chunk closed after adding.
[ "puts", "si", "into", "the", "currently", "open", "chunk", "which", "it", "creates", "if", "necessary", ".", "If", "this", "item", "causes", "the", "chunk", "to", "cross", "chunk_max", "then", "the", "chunk", "closed", "after", "adding", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/openquery/fetcher.py#L108-L121
244,582
ikalnytskyi/dooku
dooku/algorithm.py
find_if
def find_if(pred, iterable, default=None): """ Returns a reference to the first element in the ``iterable`` range for which ``pred`` returns ``True``. If no such element is found, the function returns ``default``. >>> find_if(lambda x: x == 3, [1, 2, 3, 4]) 3 :param pred: a predicate function to check a value from the iterable range :param iterable: an iterable range to check in :param default: a value that will be returned if no elements were found :returns: a reference to the first found element or default """ return next((i for i in iterable if pred(i)), default)
python
def find_if(pred, iterable, default=None): """ Returns a reference to the first element in the ``iterable`` range for which ``pred`` returns ``True``. If no such element is found, the function returns ``default``. >>> find_if(lambda x: x == 3, [1, 2, 3, 4]) 3 :param pred: a predicate function to check a value from the iterable range :param iterable: an iterable range to check in :param default: a value that will be returned if no elements were found :returns: a reference to the first found element or default """ return next((i for i in iterable if pred(i)), default)
[ "def", "find_if", "(", "pred", ",", "iterable", ",", "default", "=", "None", ")", ":", "return", "next", "(", "(", "i", "for", "i", "in", "iterable", "if", "pred", "(", "i", ")", ")", ",", "default", ")" ]
Returns a reference to the first element in the ``iterable`` range for which ``pred`` returns ``True``. If no such element is found, the function returns ``default``. >>> find_if(lambda x: x == 3, [1, 2, 3, 4]) 3 :param pred: a predicate function to check a value from the iterable range :param iterable: an iterable range to check in :param default: a value that will be returned if no elements were found :returns: a reference to the first found element or default
[ "Returns", "a", "reference", "to", "the", "first", "element", "in", "the", "iterable", "range", "for", "which", "pred", "returns", "True", ".", "If", "no", "such", "element", "is", "found", "the", "function", "returns", "default", "." ]
77e6c82c9c41211c86ee36ae5e591d477945fedf
https://github.com/ikalnytskyi/dooku/blob/77e6c82c9c41211c86ee36ae5e591d477945fedf/dooku/algorithm.py#L43-L57
244,583
twidi/py-dataql
dataql/parsers/generic.py
DataQLParser.visit_root
def visit_root(self, _, children): """The main node holding all the query. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``WS`` (whitespace): ``None``. - 1: for ``NAMED_RESOURCE``: an instance of a subclass of ``.resources.Resource``. - 2: for ``WS`` (whitespace): ``None``. Returns ------- .resources.Resource An instance of a subclass of ``.resources.Resource``, with ``is_root`` set to ``True``. Example ------- >>> data = DataQLParser(r''' ... foo ... ''', default_rule='ROOT').data >>> data <Field[foo] /> >>> data.is_root True >>> data = DataQLParser(r''' ... bar[name] ... ''', default_rule='ROOT').data >>> data <List[bar]> <Field[name] /> </List[bar]> >>> data.is_root True >>> data = DataQLParser(r''' ... baz{name} ... ''', default_rule='ROOT').data >>> data <Object[baz]> <Field[name] /> </Object[baz]> >>> data.is_root True """ resource = children[1] resource.is_root = True return resource
python
def visit_root(self, _, children): """The main node holding all the query. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``WS`` (whitespace): ``None``. - 1: for ``NAMED_RESOURCE``: an instance of a subclass of ``.resources.Resource``. - 2: for ``WS`` (whitespace): ``None``. Returns ------- .resources.Resource An instance of a subclass of ``.resources.Resource``, with ``is_root`` set to ``True``. Example ------- >>> data = DataQLParser(r''' ... foo ... ''', default_rule='ROOT').data >>> data <Field[foo] /> >>> data.is_root True >>> data = DataQLParser(r''' ... bar[name] ... ''', default_rule='ROOT').data >>> data <List[bar]> <Field[name] /> </List[bar]> >>> data.is_root True >>> data = DataQLParser(r''' ... baz{name} ... ''', default_rule='ROOT').data >>> data <Object[baz]> <Field[name] /> </Object[baz]> >>> data.is_root True """ resource = children[1] resource.is_root = True return resource
[ "def", "visit_root", "(", "self", ",", "_", ",", "children", ")", ":", "resource", "=", "children", "[", "1", "]", "resource", ".", "is_root", "=", "True", "return", "resource" ]
The main node holding all the query. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``WS`` (whitespace): ``None``. - 1: for ``NAMED_RESOURCE``: an instance of a subclass of ``.resources.Resource``. - 2: for ``WS`` (whitespace): ``None``. Returns ------- .resources.Resource An instance of a subclass of ``.resources.Resource``, with ``is_root`` set to ``True``. Example ------- >>> data = DataQLParser(r''' ... foo ... ''', default_rule='ROOT').data >>> data <Field[foo] /> >>> data.is_root True >>> data = DataQLParser(r''' ... bar[name] ... ''', default_rule='ROOT').data >>> data <List[bar]> <Field[name] /> </List[bar]> >>> data.is_root True >>> data = DataQLParser(r''' ... baz{name} ... ''', default_rule='ROOT').data >>> data <Object[baz]> <Field[name] /> </Object[baz]> >>> data.is_root True
[ "The", "main", "node", "holding", "all", "the", "query", "." ]
5841a3fd559829193ed709c255166085bdde1c52
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/parsers/generic.py#L78-L127
244,584
twidi/py-dataql
dataql/parsers/generic.py
DataQLParser.visit_named_resource
def visit_named_resource(self, _, children): """A resource in the query with its optional name. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``OPTIONAL_RESOURCE_NAME``: str, the name of the resource, or ``None`` if not set in the query. - 1: for ``RESOURCE``: an instance of a subclass of ``.resources.Resource``. Returns ------- .resources.Resource An instance of a subclass of ``.resources.Resource``, with its ``name`` field set to the name in the query if set. Example ------- >>> DataQLParser(r'bar', default_rule='NAMED_RESOURCE').data <Field[bar] /> >>> DataQLParser(r'foo:bar', default_rule='NAMED_RESOURCE').data <Field[foo] .bar /> >>> DataQLParser(r'foo : bar', default_rule='NAMED_RESOURCE').data <Field[foo] .bar /> """ name, resource = children if name: resource.name = name return resource
python
def visit_named_resource(self, _, children): """A resource in the query with its optional name. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``OPTIONAL_RESOURCE_NAME``: str, the name of the resource, or ``None`` if not set in the query. - 1: for ``RESOURCE``: an instance of a subclass of ``.resources.Resource``. Returns ------- .resources.Resource An instance of a subclass of ``.resources.Resource``, with its ``name`` field set to the name in the query if set. Example ------- >>> DataQLParser(r'bar', default_rule='NAMED_RESOURCE').data <Field[bar] /> >>> DataQLParser(r'foo:bar', default_rule='NAMED_RESOURCE').data <Field[foo] .bar /> >>> DataQLParser(r'foo : bar', default_rule='NAMED_RESOURCE').data <Field[foo] .bar /> """ name, resource = children if name: resource.name = name return resource
[ "def", "visit_named_resource", "(", "self", ",", "_", ",", "children", ")", ":", "name", ",", "resource", "=", "children", "if", "name", ":", "resource", ".", "name", "=", "name", "return", "resource" ]
A resource in the query with its optional name. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``OPTIONAL_RESOURCE_NAME``: str, the name of the resource, or ``None`` if not set in the query. - 1: for ``RESOURCE``: an instance of a subclass of ``.resources.Resource``. Returns ------- .resources.Resource An instance of a subclass of ``.resources.Resource``, with its ``name`` field set to the name in the query if set. Example ------- >>> DataQLParser(r'bar', default_rule='NAMED_RESOURCE').data <Field[bar] /> >>> DataQLParser(r'foo:bar', default_rule='NAMED_RESOURCE').data <Field[foo] .bar /> >>> DataQLParser(r'foo : bar', default_rule='NAMED_RESOURCE').data <Field[foo] .bar />
[ "A", "resource", "in", "the", "query", "with", "its", "optional", "name", "." ]
5841a3fd559829193ed709c255166085bdde1c52
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/parsers/generic.py#L167-L199
244,585
twidi/py-dataql
dataql/parsers/generic.py
DataQLParser.visit_field
def visit_field(self, _, children): """A simple field. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``FILTERS``: list of instances of ``.resources.Field``. Returns ------- .resources.Field An instance of ``.resources.Field`` with the correct name. Example ------- >>> DataQLParser(r'foo', default_rule='FIELD').data <Field[foo] /> >>> DataQLParser(r'foo(1)', default_rule='FIELD').data <Field[foo] .foo(1) /> >>> DataQLParser(r'foo.bar()', default_rule='FIELD').data <Field[foo] .foo.bar() /> """ filters = children[0] return self.Field(getattr(filters[0], 'name', None), filters=filters)
python
def visit_field(self, _, children): """A simple field. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``FILTERS``: list of instances of ``.resources.Field``. Returns ------- .resources.Field An instance of ``.resources.Field`` with the correct name. Example ------- >>> DataQLParser(r'foo', default_rule='FIELD').data <Field[foo] /> >>> DataQLParser(r'foo(1)', default_rule='FIELD').data <Field[foo] .foo(1) /> >>> DataQLParser(r'foo.bar()', default_rule='FIELD').data <Field[foo] .foo.bar() /> """ filters = children[0] return self.Field(getattr(filters[0], 'name', None), filters=filters)
[ "def", "visit_field", "(", "self", ",", "_", ",", "children", ")", ":", "filters", "=", "children", "[", "0", "]", "return", "self", ".", "Field", "(", "getattr", "(", "filters", "[", "0", "]", ",", "'name'", ",", "None", ")", ",", "filters", "=", "filters", ")" ]
A simple field. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``FILTERS``: list of instances of ``.resources.Field``. Returns ------- .resources.Field An instance of ``.resources.Field`` with the correct name. Example ------- >>> DataQLParser(r'foo', default_rule='FIELD').data <Field[foo] /> >>> DataQLParser(r'foo(1)', default_rule='FIELD').data <Field[foo] .foo(1) /> >>> DataQLParser(r'foo.bar()', default_rule='FIELD').data <Field[foo] .foo.bar() />
[ "A", "simple", "field", "." ]
5841a3fd559829193ed709c255166085bdde1c52
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/parsers/generic.py#L373-L399
244,586
twidi/py-dataql
dataql/parsers/generic.py
DataQLParser.visit_named_object
def visit_named_object(self, _, children): """Manage an object, represented by a ``.resources.Object`` instance. This object is populated with data from the result of the ``FILTERS``. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``FILTERS``: list of instances of ``.resources.Field``. - 1: for ``OBJECT``: an ``Object`` resource Example ------- >>> DataQLParser(r'foo{name}', default_rule='NAMED_OBJECT').data <Object[foo]> <Field[name] /> </Object[foo]> """ filters, resource = children resource.name = filters[0].name resource.filters = filters return resource
python
def visit_named_object(self, _, children): """Manage an object, represented by a ``.resources.Object`` instance. This object is populated with data from the result of the ``FILTERS``. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``FILTERS``: list of instances of ``.resources.Field``. - 1: for ``OBJECT``: an ``Object`` resource Example ------- >>> DataQLParser(r'foo{name}', default_rule='NAMED_OBJECT').data <Object[foo]> <Field[name] /> </Object[foo]> """ filters, resource = children resource.name = filters[0].name resource.filters = filters return resource
[ "def", "visit_named_object", "(", "self", ",", "_", ",", "children", ")", ":", "filters", ",", "resource", "=", "children", "resource", ".", "name", "=", "filters", "[", "0", "]", ".", "name", "resource", ".", "filters", "=", "filters", "return", "resource" ]
Manage an object, represented by a ``.resources.Object`` instance. This object is populated with data from the result of the ``FILTERS``. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``FILTERS``: list of instances of ``.resources.Field``. - 1: for ``OBJECT``: an ``Object`` resource Example ------- >>> DataQLParser(r'foo{name}', default_rule='NAMED_OBJECT').data <Object[foo]> <Field[name] /> </Object[foo]>
[ "Manage", "an", "object", "represented", "by", "a", ".", "resources", ".", "Object", "instance", "." ]
5841a3fd559829193ed709c255166085bdde1c52
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/parsers/generic.py#L402-L428
244,587
twidi/py-dataql
dataql/parsers/generic.py
DataQLParser.visit_named_list
def visit_named_list(self, _, children): """Manage a list, represented by a ``.resources.List`` instance. This list is populated with data from the result of the ``FILTERS``. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``FILTERS``: list of instances of ``.resources.Field``. - 1: for ``LIST``: a ``List`` resource Example ------- >>> DataQLParser(r'foo(1)[name]', default_rule='NAMED_LIST').data <List[foo] .foo(1)> <Field[name] /> </List[foo]> """ filters, resource = children resource.name = filters[0].name resource.filters = filters return resource
python
def visit_named_list(self, _, children): """Manage a list, represented by a ``.resources.List`` instance. This list is populated with data from the result of the ``FILTERS``. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``FILTERS``: list of instances of ``.resources.Field``. - 1: for ``LIST``: a ``List`` resource Example ------- >>> DataQLParser(r'foo(1)[name]', default_rule='NAMED_LIST').data <List[foo] .foo(1)> <Field[name] /> </List[foo]> """ filters, resource = children resource.name = filters[0].name resource.filters = filters return resource
[ "def", "visit_named_list", "(", "self", ",", "_", ",", "children", ")", ":", "filters", ",", "resource", "=", "children", "resource", ".", "name", "=", "filters", "[", "0", "]", ".", "name", "resource", ".", "filters", "=", "filters", "return", "resource" ]
Manage a list, represented by a ``.resources.List`` instance. This list is populated with data from the result of the ``FILTERS``. Arguments --------- _ (node) : parsimonious.nodes.Node. children : list - 0: for ``FILTERS``: list of instances of ``.resources.Field``. - 1: for ``LIST``: a ``List`` resource Example ------- >>> DataQLParser(r'foo(1)[name]', default_rule='NAMED_LIST').data <List[foo] .foo(1)> <Field[name] /> </List[foo]>
[ "Manage", "a", "list", "represented", "by", "a", ".", "resources", ".", "List", "instance", "." ]
5841a3fd559829193ed709c255166085bdde1c52
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/parsers/generic.py#L456-L482
244,588
CS207-Final-Project-Group-10/cs207-FinalProject
solar_system/solar_system.py
load_constants
def load_constants(): """Load physical constants to simulate the earth-sun system""" # The universal gravitational constant # https://en.wikipedia.org/wiki/Gravitational_constant G: float = 6.67408E-11 # The names of the celestial bodies body_name = \ ['sun', 'moon', 'mercury', 'venus', 'earth', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune'] # The mass of the celestial bodies # https://en.wikipedia.org/wiki/Earth_mass mass_earth: float = 5.9722E24 mass: Dict[str, float] = \ {'sun': mass_earth * 332946.0487, 'moon' : mass_earth * 0.012300037, 'mercury': mass_earth * 0.0553, 'venus': mass_earth * 0.815, 'earth': mass_earth * 1.0000, 'mars': mass_earth * 0.107, 'jupiter': mass_earth * 317.8, 'saturn': mass_earth * 95.2, 'uranus': mass_earth * 14.5, 'neptune': mass_earth * 17.1, } # The radii of the celestial bodiea # https://nineplanets.org/data1.html radius: Dict[str, float] = \ {'sun': 695000e3, 'moon': 1738e3, 'mercury': 2440e3, 'venus': 6052e3, 'earth': 6378e3, 'mars': 3397e3, 'jupiter': 71492e3, 'saturn': 60268e3, 'uranus': 35559e3, 'neptune': 24766e3 } return G, body_name, mass, radius
python
def load_constants(): """Load physical constants to simulate the earth-sun system""" # The universal gravitational constant # https://en.wikipedia.org/wiki/Gravitational_constant G: float = 6.67408E-11 # The names of the celestial bodies body_name = \ ['sun', 'moon', 'mercury', 'venus', 'earth', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune'] # The mass of the celestial bodies # https://en.wikipedia.org/wiki/Earth_mass mass_earth: float = 5.9722E24 mass: Dict[str, float] = \ {'sun': mass_earth * 332946.0487, 'moon' : mass_earth * 0.012300037, 'mercury': mass_earth * 0.0553, 'venus': mass_earth * 0.815, 'earth': mass_earth * 1.0000, 'mars': mass_earth * 0.107, 'jupiter': mass_earth * 317.8, 'saturn': mass_earth * 95.2, 'uranus': mass_earth * 14.5, 'neptune': mass_earth * 17.1, } # The radii of the celestial bodiea # https://nineplanets.org/data1.html radius: Dict[str, float] = \ {'sun': 695000e3, 'moon': 1738e3, 'mercury': 2440e3, 'venus': 6052e3, 'earth': 6378e3, 'mars': 3397e3, 'jupiter': 71492e3, 'saturn': 60268e3, 'uranus': 35559e3, 'neptune': 24766e3 } return G, body_name, mass, radius
[ "def", "load_constants", "(", ")", ":", "# The universal gravitational constant", "# https://en.wikipedia.org/wiki/Gravitational_constant", "G", ":", "float", "=", "6.67408E-11", "# The names of the celestial bodies", "body_name", "=", "[", "'sun'", ",", "'moon'", ",", "'mercury'", ",", "'venus'", ",", "'earth'", ",", "'mars'", ",", "'jupiter'", ",", "'saturn'", ",", "'uranus'", ",", "'neptune'", "]", "# The mass of the celestial bodies", "# https://en.wikipedia.org/wiki/Earth_mass", "mass_earth", ":", "float", "=", "5.9722E24", "mass", ":", "Dict", "[", "str", ",", "float", "]", "=", "{", "'sun'", ":", "mass_earth", "*", "332946.0487", ",", "'moon'", ":", "mass_earth", "*", "0.012300037", ",", "'mercury'", ":", "mass_earth", "*", "0.0553", ",", "'venus'", ":", "mass_earth", "*", "0.815", ",", "'earth'", ":", "mass_earth", "*", "1.0000", ",", "'mars'", ":", "mass_earth", "*", "0.107", ",", "'jupiter'", ":", "mass_earth", "*", "317.8", ",", "'saturn'", ":", "mass_earth", "*", "95.2", ",", "'uranus'", ":", "mass_earth", "*", "14.5", ",", "'neptune'", ":", "mass_earth", "*", "17.1", ",", "}", "# The radii of the celestial bodiea", "# https://nineplanets.org/data1.html", "radius", ":", "Dict", "[", "str", ",", "float", "]", "=", "{", "'sun'", ":", "695000e3", ",", "'moon'", ":", "1738e3", ",", "'mercury'", ":", "2440e3", ",", "'venus'", ":", "6052e3", ",", "'earth'", ":", "6378e3", ",", "'mars'", ":", "3397e3", ",", "'jupiter'", ":", "71492e3", ",", "'saturn'", ":", "60268e3", ",", "'uranus'", ":", "35559e3", ",", "'neptune'", ":", "24766e3", "}", "return", "G", ",", "body_name", ",", "mass", ",", "radius" ]
Load physical constants to simulate the earth-sun system
[ "Load", "physical", "constants", "to", "simulate", "the", "earth", "-", "sun", "system" ]
842e9c2d3ca1490cef18c086dfde81856d8d3a82
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/solar_system/solar_system.py#L71-L122
244,589
CS207-Final-Project-Group-10/cs207-FinalProject
solar_system/solar_system.py
julian_day
def julian_day(t: date) -> int: """Convert a Python datetime to a Julian day""" # Compute the number of days from January 1, 2000 to date t dt = t - julian_base_date # Add the julian base number to the number of days from the julian base date to date t return julian_base_number + dt.days
python
def julian_day(t: date) -> int: """Convert a Python datetime to a Julian day""" # Compute the number of days from January 1, 2000 to date t dt = t - julian_base_date # Add the julian base number to the number of days from the julian base date to date t return julian_base_number + dt.days
[ "def", "julian_day", "(", "t", ":", "date", ")", "->", "int", ":", "# Compute the number of days from January 1, 2000 to date t", "dt", "=", "t", "-", "julian_base_date", "# Add the julian base number to the number of days from the julian base date to date t", "return", "julian_base_number", "+", "dt", ".", "days" ]
Convert a Python datetime to a Julian day
[ "Convert", "a", "Python", "datetime", "to", "a", "Julian", "day" ]
842e9c2d3ca1490cef18c086dfde81856d8d3a82
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/solar_system/solar_system.py#L126-L131
244,590
CS207-Final-Project-Group-10/cs207-FinalProject
solar_system/solar_system.py
calc_mse
def calc_mse(q1, q2): """Compare the results of two simulations""" # Difference in positions between two simulations dq = q2 - q1 # Mean squared error in AUs return np.sqrt(np.mean(dq*dq))/au2m
python
def calc_mse(q1, q2): """Compare the results of two simulations""" # Difference in positions between two simulations dq = q2 - q1 # Mean squared error in AUs return np.sqrt(np.mean(dq*dq))/au2m
[ "def", "calc_mse", "(", "q1", ",", "q2", ")", ":", "# Difference in positions between two simulations", "dq", "=", "q2", "-", "q1", "# Mean squared error in AUs", "return", "np", ".", "sqrt", "(", "np", ".", "mean", "(", "dq", "*", "dq", ")", ")", "/", "au2m" ]
Compare the results of two simulations
[ "Compare", "the", "results", "of", "two", "simulations" ]
842e9c2d3ca1490cef18c086dfde81856d8d3a82
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/solar_system/solar_system.py#L134-L139
244,591
CS207-Final-Project-Group-10/cs207-FinalProject
solar_system/solar_system.py
flux_v2
def flux_v2(v_vars: List[fl.Var], i: int): """Make Fluxion with the speed squared of body i""" # Index with the base of (v_x, v_y, v_z) for body i k = 3*i # The speed squared of body i return fl.square(v_vars[k+0]) + fl.square(v_vars[k+1]) + fl.square(v_vars[k+2])
python
def flux_v2(v_vars: List[fl.Var], i: int): """Make Fluxion with the speed squared of body i""" # Index with the base of (v_x, v_y, v_z) for body i k = 3*i # The speed squared of body i return fl.square(v_vars[k+0]) + fl.square(v_vars[k+1]) + fl.square(v_vars[k+2])
[ "def", "flux_v2", "(", "v_vars", ":", "List", "[", "fl", ".", "Var", "]", ",", "i", ":", "int", ")", ":", "# Index with the base of (v_x, v_y, v_z) for body i", "k", "=", "3", "*", "i", "# The speed squared of body i", "return", "fl", ".", "square", "(", "v_vars", "[", "k", "+", "0", "]", ")", "+", "fl", ".", "square", "(", "v_vars", "[", "k", "+", "1", "]", ")", "+", "fl", ".", "square", "(", "v_vars", "[", "k", "+", "2", "]", ")" ]
Make Fluxion with the speed squared of body i
[ "Make", "Fluxion", "with", "the", "speed", "squared", "of", "body", "i" ]
842e9c2d3ca1490cef18c086dfde81856d8d3a82
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/solar_system/solar_system.py#L213-L218
244,592
CS207-Final-Project-Group-10/cs207-FinalProject
solar_system/solar_system.py
U_ij
def U_ij(q_vars: List[fl.Var], mass: np.ndarray, i: int, j: int): """Make Fluxion with the gratiational potential energy beween body i and j""" # Check that the lengths are consistent assert len(q_vars) == 3 * len(mass) # Masses of the bodies i and j mi = mass[i] mj = mass[j] # Gravitational potential is -G * m1 * m2 / r U = -(G * mi * mj) / flux_r(q_vars, i, j) return U
python
def U_ij(q_vars: List[fl.Var], mass: np.ndarray, i: int, j: int): """Make Fluxion with the gratiational potential energy beween body i and j""" # Check that the lengths are consistent assert len(q_vars) == 3 * len(mass) # Masses of the bodies i and j mi = mass[i] mj = mass[j] # Gravitational potential is -G * m1 * m2 / r U = -(G * mi * mj) / flux_r(q_vars, i, j) return U
[ "def", "U_ij", "(", "q_vars", ":", "List", "[", "fl", ".", "Var", "]", ",", "mass", ":", "np", ".", "ndarray", ",", "i", ":", "int", ",", "j", ":", "int", ")", ":", "# Check that the lengths are consistent", "assert", "len", "(", "q_vars", ")", "==", "3", "*", "len", "(", "mass", ")", "# Masses of the bodies i and j", "mi", "=", "mass", "[", "i", "]", "mj", "=", "mass", "[", "j", "]", "# Gravitational potential is -G * m1 * m2 / r", "U", "=", "-", "(", "G", "*", "mi", "*", "mj", ")", "/", "flux_r", "(", "q_vars", ",", "i", ",", "j", ")", "return", "U" ]
Make Fluxion with the gratiational potential energy beween body i and j
[ "Make", "Fluxion", "with", "the", "gratiational", "potential", "energy", "beween", "body", "i", "and", "j" ]
842e9c2d3ca1490cef18c086dfde81856d8d3a82
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/solar_system/solar_system.py#L225-L236
244,593
CS207-Final-Project-Group-10/cs207-FinalProject
solar_system/solar_system.py
T_i
def T_i(v_vars: List[fl.Var], mass: np.ndarray, i: int): """Make Fluxion with the kinetic energy of body i""" # Check that the lengths are consistent assert len(v_vars) == 3 * len(mass) # Mass of the body i m = mass[i] # kineteic energy = 1/2 * mass * speed^2 T = (0.5 * m) * flux_v2(v_vars, i) return T
python
def T_i(v_vars: List[fl.Var], mass: np.ndarray, i: int): """Make Fluxion with the kinetic energy of body i""" # Check that the lengths are consistent assert len(v_vars) == 3 * len(mass) # Mass of the body i m = mass[i] # kineteic energy = 1/2 * mass * speed^2 T = (0.5 * m) * flux_v2(v_vars, i) return T
[ "def", "T_i", "(", "v_vars", ":", "List", "[", "fl", ".", "Var", "]", ",", "mass", ":", "np", ".", "ndarray", ",", "i", ":", "int", ")", ":", "# Check that the lengths are consistent", "assert", "len", "(", "v_vars", ")", "==", "3", "*", "len", "(", "mass", ")", "# Mass of the body i", "m", "=", "mass", "[", "i", "]", "# kineteic energy = 1/2 * mass * speed^2", "T", "=", "(", "0.5", "*", "m", ")", "*", "flux_v2", "(", "v_vars", ",", "i", ")", "return", "T" ]
Make Fluxion with the kinetic energy of body i
[ "Make", "Fluxion", "with", "the", "kinetic", "energy", "of", "body", "i" ]
842e9c2d3ca1490cef18c086dfde81856d8d3a82
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/solar_system/solar_system.py#L239-L249
244,594
CS207-Final-Project-Group-10/cs207-FinalProject
solar_system/solar_system.py
plot_energy
def plot_energy(time, H, T, U): """Plot kinetic and potential energy of system over time""" # Normalize energy to initial KE T0 = T[0] H = H / T0 T = T / T0 U = U / T0 # Plot fig, ax = plt.subplots(figsize=[16,8]) ax.set_title('System Energy vs. Time') ax.set_xlabel('Time in Days') ax.set_ylabel('Energy (Ratio Initial KE)') ax.plot(time, T, label='T', color='r') ax.plot(time, U, label='U', color='b') ax.plot(time, H, label='H', color='k') ax.legend() ax.grid() plt.show()
python
def plot_energy(time, H, T, U): """Plot kinetic and potential energy of system over time""" # Normalize energy to initial KE T0 = T[0] H = H / T0 T = T / T0 U = U / T0 # Plot fig, ax = plt.subplots(figsize=[16,8]) ax.set_title('System Energy vs. Time') ax.set_xlabel('Time in Days') ax.set_ylabel('Energy (Ratio Initial KE)') ax.plot(time, T, label='T', color='r') ax.plot(time, U, label='U', color='b') ax.plot(time, H, label='H', color='k') ax.legend() ax.grid() plt.show()
[ "def", "plot_energy", "(", "time", ",", "H", ",", "T", ",", "U", ")", ":", "# Normalize energy to initial KE", "T0", "=", "T", "[", "0", "]", "H", "=", "H", "/", "T0", "T", "=", "T", "/", "T0", "U", "=", "U", "/", "T0", "# Plot", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "figsize", "=", "[", "16", ",", "8", "]", ")", "ax", ".", "set_title", "(", "'System Energy vs. Time'", ")", "ax", ".", "set_xlabel", "(", "'Time in Days'", ")", "ax", ".", "set_ylabel", "(", "'Energy (Ratio Initial KE)'", ")", "ax", ".", "plot", "(", "time", ",", "T", ",", "label", "=", "'T'", ",", "color", "=", "'r'", ")", "ax", ".", "plot", "(", "time", ",", "U", ",", "label", "=", "'U'", ",", "color", "=", "'b'", ")", "ax", ".", "plot", "(", "time", ",", "H", ",", "label", "=", "'H'", ",", "color", "=", "'k'", ")", "ax", ".", "legend", "(", ")", "ax", ".", "grid", "(", ")", "plt", ".", "show", "(", ")" ]
Plot kinetic and potential energy of system over time
[ "Plot", "kinetic", "and", "potential", "energy", "of", "system", "over", "time" ]
842e9c2d3ca1490cef18c086dfde81856d8d3a82
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/solar_system/solar_system.py#L253-L271
244,595
carlosp420/dataset-creator
dataset_creator/dataset.py
Dataset.sort_seq_records
def sort_seq_records(self, seq_records): """Checks that SeqExpandedRecords are sorted by gene_code and then by voucher code. The dashes in taxon names need to be converted to underscores so the dataset will be accepted by Biopython to do format conversions. """ for seq_record in seq_records: seq_record.voucher_code = seq_record.voucher_code.replace("-", "_") unsorted_gene_codes = set([i.gene_code for i in seq_records]) sorted_gene_codes = list(unsorted_gene_codes) sorted_gene_codes.sort(key=lambda x: x.lower()) unsorted_voucher_codes = set([i.voucher_code for i in seq_records]) sorted_voucher_codes = list(unsorted_voucher_codes) sorted_voucher_codes.sort(key=lambda x: x.lower()) sorted_seq_records = [] for gene_code in sorted_gene_codes: for voucher_code in sorted_voucher_codes: for seq_record in seq_records: should_be_done = ( seq_record.gene_code == gene_code and seq_record.voucher_code == voucher_code ) if should_be_done: sorted_seq_records.append(seq_record) return sorted_seq_records
python
def sort_seq_records(self, seq_records): """Checks that SeqExpandedRecords are sorted by gene_code and then by voucher code. The dashes in taxon names need to be converted to underscores so the dataset will be accepted by Biopython to do format conversions. """ for seq_record in seq_records: seq_record.voucher_code = seq_record.voucher_code.replace("-", "_") unsorted_gene_codes = set([i.gene_code for i in seq_records]) sorted_gene_codes = list(unsorted_gene_codes) sorted_gene_codes.sort(key=lambda x: x.lower()) unsorted_voucher_codes = set([i.voucher_code for i in seq_records]) sorted_voucher_codes = list(unsorted_voucher_codes) sorted_voucher_codes.sort(key=lambda x: x.lower()) sorted_seq_records = [] for gene_code in sorted_gene_codes: for voucher_code in sorted_voucher_codes: for seq_record in seq_records: should_be_done = ( seq_record.gene_code == gene_code and seq_record.voucher_code == voucher_code ) if should_be_done: sorted_seq_records.append(seq_record) return sorted_seq_records
[ "def", "sort_seq_records", "(", "self", ",", "seq_records", ")", ":", "for", "seq_record", "in", "seq_records", ":", "seq_record", ".", "voucher_code", "=", "seq_record", ".", "voucher_code", ".", "replace", "(", "\"-\"", ",", "\"_\"", ")", "unsorted_gene_codes", "=", "set", "(", "[", "i", ".", "gene_code", "for", "i", "in", "seq_records", "]", ")", "sorted_gene_codes", "=", "list", "(", "unsorted_gene_codes", ")", "sorted_gene_codes", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "lower", "(", ")", ")", "unsorted_voucher_codes", "=", "set", "(", "[", "i", ".", "voucher_code", "for", "i", "in", "seq_records", "]", ")", "sorted_voucher_codes", "=", "list", "(", "unsorted_voucher_codes", ")", "sorted_voucher_codes", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "lower", "(", ")", ")", "sorted_seq_records", "=", "[", "]", "for", "gene_code", "in", "sorted_gene_codes", ":", "for", "voucher_code", "in", "sorted_voucher_codes", ":", "for", "seq_record", "in", "seq_records", ":", "should_be_done", "=", "(", "seq_record", ".", "gene_code", "==", "gene_code", "and", "seq_record", ".", "voucher_code", "==", "voucher_code", ")", "if", "should_be_done", ":", "sorted_seq_records", ".", "append", "(", "seq_record", ")", "return", "sorted_seq_records" ]
Checks that SeqExpandedRecords are sorted by gene_code and then by voucher code. The dashes in taxon names need to be converted to underscores so the dataset will be accepted by Biopython to do format conversions.
[ "Checks", "that", "SeqExpandedRecords", "are", "sorted", "by", "gene_code", "and", "then", "by", "voucher", "code", "." ]
ea27340b145cb566a36c1836ff42263f1b2003a0
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/dataset.py#L81-L109
244,596
carlosp420/dataset-creator
dataset_creator/dataset.py
Dataset._validate_outgroup
def _validate_outgroup(self, outgroup): """All voucher codes in our datasets have dashes converted to underscores.""" if outgroup: outgroup = outgroup.replace("-", "_") good_outgroup = False for seq_record in self.seq_records: if seq_record.voucher_code == outgroup: good_outgroup = True break if good_outgroup: self.outgroup = outgroup else: raise ValueError("The given outgroup {0!r} cannot be found in the " "input sequence records.".format(outgroup)) else: self.outgroup = None
python
def _validate_outgroup(self, outgroup): """All voucher codes in our datasets have dashes converted to underscores.""" if outgroup: outgroup = outgroup.replace("-", "_") good_outgroup = False for seq_record in self.seq_records: if seq_record.voucher_code == outgroup: good_outgroup = True break if good_outgroup: self.outgroup = outgroup else: raise ValueError("The given outgroup {0!r} cannot be found in the " "input sequence records.".format(outgroup)) else: self.outgroup = None
[ "def", "_validate_outgroup", "(", "self", ",", "outgroup", ")", ":", "if", "outgroup", ":", "outgroup", "=", "outgroup", ".", "replace", "(", "\"-\"", ",", "\"_\"", ")", "good_outgroup", "=", "False", "for", "seq_record", "in", "self", ".", "seq_records", ":", "if", "seq_record", ".", "voucher_code", "==", "outgroup", ":", "good_outgroup", "=", "True", "break", "if", "good_outgroup", ":", "self", ".", "outgroup", "=", "outgroup", "else", ":", "raise", "ValueError", "(", "\"The given outgroup {0!r} cannot be found in the \"", "\"input sequence records.\"", ".", "format", "(", "outgroup", ")", ")", "else", ":", "self", ".", "outgroup", "=", "None" ]
All voucher codes in our datasets have dashes converted to underscores.
[ "All", "voucher", "codes", "in", "our", "datasets", "have", "dashes", "converted", "to", "underscores", "." ]
ea27340b145cb566a36c1836ff42263f1b2003a0
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/dataset.py#L131-L146
244,597
carlosp420/dataset-creator
dataset_creator/dataset.py
Dataset._prepare_data
def _prepare_data(self): """ Creates named tuple with info needed to create a dataset. :return: named tuple """ self._extract_genes() self._extract_total_number_of_chars() self._extract_number_of_taxa() self._extract_reading_frames() Data = namedtuple('Data', ['gene_codes', 'number_taxa', 'number_chars', 'seq_records', 'gene_codes_and_lengths', 'reading_frames']) self.data = Data(self.gene_codes, self.number_taxa, self.number_chars, self.seq_records, self._gene_codes_and_lengths, self.reading_frames)
python
def _prepare_data(self): """ Creates named tuple with info needed to create a dataset. :return: named tuple """ self._extract_genes() self._extract_total_number_of_chars() self._extract_number_of_taxa() self._extract_reading_frames() Data = namedtuple('Data', ['gene_codes', 'number_taxa', 'number_chars', 'seq_records', 'gene_codes_and_lengths', 'reading_frames']) self.data = Data(self.gene_codes, self.number_taxa, self.number_chars, self.seq_records, self._gene_codes_and_lengths, self.reading_frames)
[ "def", "_prepare_data", "(", "self", ")", ":", "self", ".", "_extract_genes", "(", ")", "self", ".", "_extract_total_number_of_chars", "(", ")", "self", ".", "_extract_number_of_taxa", "(", ")", "self", ".", "_extract_reading_frames", "(", ")", "Data", "=", "namedtuple", "(", "'Data'", ",", "[", "'gene_codes'", ",", "'number_taxa'", ",", "'number_chars'", ",", "'seq_records'", ",", "'gene_codes_and_lengths'", ",", "'reading_frames'", "]", ")", "self", ".", "data", "=", "Data", "(", "self", ".", "gene_codes", ",", "self", ".", "number_taxa", ",", "self", ".", "number_chars", ",", "self", ".", "seq_records", ",", "self", ".", "_gene_codes_and_lengths", ",", "self", ".", "reading_frames", ")" ]
Creates named tuple with info needed to create a dataset. :return: named tuple
[ "Creates", "named", "tuple", "with", "info", "needed", "to", "create", "a", "dataset", "." ]
ea27340b145cb566a36c1836ff42263f1b2003a0
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/dataset.py#L148-L164
244,598
carlosp420/dataset-creator
dataset_creator/dataset.py
Dataset._extract_total_number_of_chars
def _extract_total_number_of_chars(self): """ sets `self.number_chars` to the number of characters as string. """ self._get_gene_codes_and_seq_lengths() sum = 0 for seq_length in self._gene_codes_and_lengths.values(): sum += sorted(seq_length, reverse=True)[0] self.number_chars = str(sum)
python
def _extract_total_number_of_chars(self): """ sets `self.number_chars` to the number of characters as string. """ self._get_gene_codes_and_seq_lengths() sum = 0 for seq_length in self._gene_codes_and_lengths.values(): sum += sorted(seq_length, reverse=True)[0] self.number_chars = str(sum)
[ "def", "_extract_total_number_of_chars", "(", "self", ")", ":", "self", ".", "_get_gene_codes_and_seq_lengths", "(", ")", "sum", "=", "0", "for", "seq_length", "in", "self", ".", "_gene_codes_and_lengths", ".", "values", "(", ")", ":", "sum", "+=", "sorted", "(", "seq_length", ",", "reverse", "=", "True", ")", "[", "0", "]", "self", ".", "number_chars", "=", "str", "(", "sum", ")" ]
sets `self.number_chars` to the number of characters as string.
[ "sets", "self", ".", "number_chars", "to", "the", "number", "of", "characters", "as", "string", "." ]
ea27340b145cb566a36c1836ff42263f1b2003a0
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/dataset.py#L174-L183
244,599
carlosp420/dataset-creator
dataset_creator/dataset.py
Dataset._extract_number_of_taxa
def _extract_number_of_taxa(self): """ sets `self.number_taxa` to the number of taxa as string """ n_taxa = dict() for i in self.seq_records: if i.gene_code not in n_taxa: n_taxa[i.gene_code] = 0 n_taxa[i.gene_code] += 1 number_taxa = sorted([i for i in n_taxa.values()], reverse=True)[0] self.number_taxa = str(number_taxa)
python
def _extract_number_of_taxa(self): """ sets `self.number_taxa` to the number of taxa as string """ n_taxa = dict() for i in self.seq_records: if i.gene_code not in n_taxa: n_taxa[i.gene_code] = 0 n_taxa[i.gene_code] += 1 number_taxa = sorted([i for i in n_taxa.values()], reverse=True)[0] self.number_taxa = str(number_taxa)
[ "def", "_extract_number_of_taxa", "(", "self", ")", ":", "n_taxa", "=", "dict", "(", ")", "for", "i", "in", "self", ".", "seq_records", ":", "if", "i", ".", "gene_code", "not", "in", "n_taxa", ":", "n_taxa", "[", "i", ".", "gene_code", "]", "=", "0", "n_taxa", "[", "i", ".", "gene_code", "]", "+=", "1", "number_taxa", "=", "sorted", "(", "[", "i", "for", "i", "in", "n_taxa", ".", "values", "(", ")", "]", ",", "reverse", "=", "True", ")", "[", "0", "]", "self", ".", "number_taxa", "=", "str", "(", "number_taxa", ")" ]
sets `self.number_taxa` to the number of taxa as string
[ "sets", "self", ".", "number_taxa", "to", "the", "number", "of", "taxa", "as", "string" ]
ea27340b145cb566a36c1836ff42263f1b2003a0
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/dataset.py#L201-L211