repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
kata198/AdvancedHTMLParser
AdvancedHTMLParser/SpecialAttributes.py
StyleAttribute.styleToDict
def styleToDict(styleStr): ''' getStyleDict - Gets a dictionary of style attribute/value pairs. NOTE: dash-names (like padding-top) are used here @return - OrderedDict of "style" attribute. ''' styleStr = styleStr.strip() styles = styleStr.split(';') # Won't work for strings containing semicolon.. styleDict = OrderedDict() for item in styles: try: splitIdx = item.index(':') name = item[:splitIdx].strip().lower() value = item[splitIdx+1:].strip() styleDict[name] = value except: continue return styleDict
python
def styleToDict(styleStr): ''' getStyleDict - Gets a dictionary of style attribute/value pairs. NOTE: dash-names (like padding-top) are used here @return - OrderedDict of "style" attribute. ''' styleStr = styleStr.strip() styles = styleStr.split(';') # Won't work for strings containing semicolon.. styleDict = OrderedDict() for item in styles: try: splitIdx = item.index(':') name = item[:splitIdx].strip().lower() value = item[splitIdx+1:].strip() styleDict[name] = value except: continue return styleDict
[ "def", "styleToDict", "(", "styleStr", ")", ":", "styleStr", "=", "styleStr", ".", "strip", "(", ")", "styles", "=", "styleStr", ".", "split", "(", "';'", ")", "# Won't work for strings containing semicolon..", "styleDict", "=", "OrderedDict", "(", ")", "for", "item", "in", "styles", ":", "try", ":", "splitIdx", "=", "item", ".", "index", "(", "':'", ")", "name", "=", "item", "[", ":", "splitIdx", "]", ".", "strip", "(", ")", ".", "lower", "(", ")", "value", "=", "item", "[", "splitIdx", "+", "1", ":", "]", ".", "strip", "(", ")", "styleDict", "[", "name", "]", "=", "value", "except", ":", "continue", "return", "styleDict" ]
getStyleDict - Gets a dictionary of style attribute/value pairs. NOTE: dash-names (like padding-top) are used here @return - OrderedDict of "style" attribute.
[ "getStyleDict", "-", "Gets", "a", "dictionary", "of", "style", "attribute", "/", "value", "pairs", "." ]
train
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/SpecialAttributes.py#L638-L659
kata198/AdvancedHTMLParser
AdvancedHTMLParser/SpecialAttributes.py
StyleAttribute._asStr
def _asStr(self): ''' _asStr - Get the string representation of this style @return <str> - A string representation of this style (semicolon separated, key: value format) ''' styleDict = self._styleDict if styleDict: return '; '.join([name + ': ' + value for name, value in styleDict.items()]) return ''
python
def _asStr(self): ''' _asStr - Get the string representation of this style @return <str> - A string representation of this style (semicolon separated, key: value format) ''' styleDict = self._styleDict if styleDict: return '; '.join([name + ': ' + value for name, value in styleDict.items()]) return ''
[ "def", "_asStr", "(", "self", ")", ":", "styleDict", "=", "self", ".", "_styleDict", "if", "styleDict", ":", "return", "'; '", ".", "join", "(", "[", "name", "+", "': '", "+", "value", "for", "name", ",", "value", "in", "styleDict", ".", "items", "(", ")", "]", ")", "return", "''" ]
_asStr - Get the string representation of this style @return <str> - A string representation of this style (semicolon separated, key: value format)
[ "_asStr", "-", "Get", "the", "string", "representation", "of", "this", "style" ]
train
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/SpecialAttributes.py#L738-L748
kata198/AdvancedHTMLParser
AdvancedHTMLParser/constants.py
_special_value_rows
def _special_value_rows(em): ''' _special_value_rows - Handle "rows" special attribute, which differs if tagName is a textarea or frameset ''' if em.tagName == 'textarea': return convertToIntRange(em.getAttribute('rows', 2), minValue=1, maxValue=None, invalidDefault=2) else: # frameset return em.getAttribute('rows', '')
python
def _special_value_rows(em): ''' _special_value_rows - Handle "rows" special attribute, which differs if tagName is a textarea or frameset ''' if em.tagName == 'textarea': return convertToIntRange(em.getAttribute('rows', 2), minValue=1, maxValue=None, invalidDefault=2) else: # frameset return em.getAttribute('rows', '')
[ "def", "_special_value_rows", "(", "em", ")", ":", "if", "em", ".", "tagName", "==", "'textarea'", ":", "return", "convertToIntRange", "(", "em", ".", "getAttribute", "(", "'rows'", ",", "2", ")", ",", "minValue", "=", "1", ",", "maxValue", "=", "None", ",", "invalidDefault", "=", "2", ")", "else", ":", "# frameset", "return", "em", ".", "getAttribute", "(", "'rows'", ",", "''", ")" ]
_special_value_rows - Handle "rows" special attribute, which differs if tagName is a textarea or frameset
[ "_special_value_rows", "-", "Handle", "rows", "special", "attribute", "which", "differs", "if", "tagName", "is", "a", "textarea", "or", "frameset" ]
train
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/constants.py#L248-L256
kata198/AdvancedHTMLParser
AdvancedHTMLParser/constants.py
_special_value_cols
def _special_value_cols(em): ''' _special_value_cols - Handle "cols" special attribute, which differs if tagName is a textarea or frameset ''' if em.tagName == 'textarea': return convertToIntRange(em.getAttribute('cols', 20), minValue=1, maxValue=None, invalidDefault=20) else: # frameset return em.getAttribute('cols', '')
python
def _special_value_cols(em): ''' _special_value_cols - Handle "cols" special attribute, which differs if tagName is a textarea or frameset ''' if em.tagName == 'textarea': return convertToIntRange(em.getAttribute('cols', 20), minValue=1, maxValue=None, invalidDefault=20) else: # frameset return em.getAttribute('cols', '')
[ "def", "_special_value_cols", "(", "em", ")", ":", "if", "em", ".", "tagName", "==", "'textarea'", ":", "return", "convertToIntRange", "(", "em", ".", "getAttribute", "(", "'cols'", ",", "20", ")", ",", "minValue", "=", "1", ",", "maxValue", "=", "None", ",", "invalidDefault", "=", "20", ")", "else", ":", "# frameset", "return", "em", ".", "getAttribute", "(", "'cols'", ",", "''", ")" ]
_special_value_cols - Handle "cols" special attribute, which differs if tagName is a textarea or frameset
[ "_special_value_cols", "-", "Handle", "cols", "special", "attribute", "which", "differs", "if", "tagName", "is", "a", "textarea", "or", "frameset" ]
train
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/constants.py#L258-L266
kata198/AdvancedHTMLParser
AdvancedHTMLParser/constants.py
_special_value_autocomplete
def _special_value_autocomplete(em): ''' handle "autocomplete" property, which has different behaviour for form vs input" ''' if em.tagName == 'form': return convertPossibleValues(em.getAttribute('autocomplete', 'on'), POSSIBLE_VALUES_ON_OFF, invalidDefault='on', emptyValue=EMPTY_IS_INVALID) # else: input return convertPossibleValues(em.getAttribute('autocomplete', ''), POSSIBLE_VALUES_ON_OFF, invalidDefault="", emptyValue='')
python
def _special_value_autocomplete(em): ''' handle "autocomplete" property, which has different behaviour for form vs input" ''' if em.tagName == 'form': return convertPossibleValues(em.getAttribute('autocomplete', 'on'), POSSIBLE_VALUES_ON_OFF, invalidDefault='on', emptyValue=EMPTY_IS_INVALID) # else: input return convertPossibleValues(em.getAttribute('autocomplete', ''), POSSIBLE_VALUES_ON_OFF, invalidDefault="", emptyValue='')
[ "def", "_special_value_autocomplete", "(", "em", ")", ":", "if", "em", ".", "tagName", "==", "'form'", ":", "return", "convertPossibleValues", "(", "em", ".", "getAttribute", "(", "'autocomplete'", ",", "'on'", ")", ",", "POSSIBLE_VALUES_ON_OFF", ",", "invalidDefault", "=", "'on'", ",", "emptyValue", "=", "EMPTY_IS_INVALID", ")", "# else: input", "return", "convertPossibleValues", "(", "em", ".", "getAttribute", "(", "'autocomplete'", ",", "''", ")", ",", "POSSIBLE_VALUES_ON_OFF", ",", "invalidDefault", "=", "\"\"", ",", "emptyValue", "=", "''", ")" ]
handle "autocomplete" property, which has different behaviour for form vs input"
[ "handle", "autocomplete", "property", "which", "has", "different", "behaviour", "for", "form", "vs", "input" ]
train
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/constants.py#L268-L275
kata198/AdvancedHTMLParser
AdvancedHTMLParser/constants.py
_special_value_size
def _special_value_size(em): ''' handle "size" property, which has different behaviour for input vs everything else ''' if em.tagName == 'input': # TODO: "size" on an input is implemented very weirdly. Negative values are treated as invalid, # A value of "0" raises an exception (and does not set HTML attribute) # No upper limit. return convertToPositiveInt(em.getAttribute('size', 20), invalidDefault=20) return em.getAttribute('size', '')
python
def _special_value_size(em): ''' handle "size" property, which has different behaviour for input vs everything else ''' if em.tagName == 'input': # TODO: "size" on an input is implemented very weirdly. Negative values are treated as invalid, # A value of "0" raises an exception (and does not set HTML attribute) # No upper limit. return convertToPositiveInt(em.getAttribute('size', 20), invalidDefault=20) return em.getAttribute('size', '')
[ "def", "_special_value_size", "(", "em", ")", ":", "if", "em", ".", "tagName", "==", "'input'", ":", "# TODO: \"size\" on an input is implemented very weirdly. Negative values are treated as invalid,", "# A value of \"0\" raises an exception (and does not set HTML attribute)", "# No upper limit.", "return", "convertToPositiveInt", "(", "em", ".", "getAttribute", "(", "'size'", ",", "20", ")", ",", "invalidDefault", "=", "20", ")", "return", "em", ".", "getAttribute", "(", "'size'", ",", "''", ")" ]
handle "size" property, which has different behaviour for input vs everything else
[ "handle", "size", "property", "which", "has", "different", "behaviour", "for", "input", "vs", "everything", "else" ]
train
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/constants.py#L277-L286
kata198/AdvancedHTMLParser
AdvancedHTMLParser/constants.py
_special_value_maxLength
def _special_value_maxLength(em, newValue=NOT_PROVIDED): ''' _special_value_maxLength - Handle the special "maxLength" property @param em <AdvancedTag> - The tag element @param newValue - Default NOT_PROVIDED, if provided will use that value instead of the current .getAttribute value on the tag. This is because this method can be used for both validation and getting/setting ''' if newValue is NOT_PROVIDED: if not em.hasAttribute('maxlength'): return -1 curValue = em.getAttribute('maxlength', '-1') # If we are accessing, the invalid default should be negative invalidDefault = -1 else: curValue = newValue # If we are setting, we should raise an exception upon invalid value invalidDefault = IndexSizeErrorException return convertToIntRange(curValue, minValue=0, maxValue=None, emptyValue='0', invalidDefault=invalidDefault)
python
def _special_value_maxLength(em, newValue=NOT_PROVIDED): ''' _special_value_maxLength - Handle the special "maxLength" property @param em <AdvancedTag> - The tag element @param newValue - Default NOT_PROVIDED, if provided will use that value instead of the current .getAttribute value on the tag. This is because this method can be used for both validation and getting/setting ''' if newValue is NOT_PROVIDED: if not em.hasAttribute('maxlength'): return -1 curValue = em.getAttribute('maxlength', '-1') # If we are accessing, the invalid default should be negative invalidDefault = -1 else: curValue = newValue # If we are setting, we should raise an exception upon invalid value invalidDefault = IndexSizeErrorException return convertToIntRange(curValue, minValue=0, maxValue=None, emptyValue='0', invalidDefault=invalidDefault)
[ "def", "_special_value_maxLength", "(", "em", ",", "newValue", "=", "NOT_PROVIDED", ")", ":", "if", "newValue", "is", "NOT_PROVIDED", ":", "if", "not", "em", ".", "hasAttribute", "(", "'maxlength'", ")", ":", "return", "-", "1", "curValue", "=", "em", ".", "getAttribute", "(", "'maxlength'", ",", "'-1'", ")", "# If we are accessing, the invalid default should be negative", "invalidDefault", "=", "-", "1", "else", ":", "curValue", "=", "newValue", "# If we are setting, we should raise an exception upon invalid value", "invalidDefault", "=", "IndexSizeErrorException", "return", "convertToIntRange", "(", "curValue", ",", "minValue", "=", "0", ",", "maxValue", "=", "None", ",", "emptyValue", "=", "'0'", ",", "invalidDefault", "=", "invalidDefault", ")" ]
_special_value_maxLength - Handle the special "maxLength" property @param em <AdvancedTag> - The tag element @param newValue - Default NOT_PROVIDED, if provided will use that value instead of the current .getAttribute value on the tag. This is because this method can be used for both validation and getting/setting
[ "_special_value_maxLength", "-", "Handle", "the", "special", "maxLength", "property" ]
train
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/constants.py#L300-L327
StanfordBioinformatics/loom
server/loomengine_server/api/models/data_objects.py
DataObject.get_by_value
def get_by_value(cls, value, type): """ Converts a value into a corresponding data object. For files, this looks up a file DataObject by name, uuid, and/or md5. For other types, it creates a new DataObject. """ if type == 'file': return cls._get_file_by_value(value) else: data_object = DataObject(data={ 'value': cls._type_cast(value, type)}, type=type) data_object.full_clean() data_object.save() return data_object
python
def get_by_value(cls, value, type): """ Converts a value into a corresponding data object. For files, this looks up a file DataObject by name, uuid, and/or md5. For other types, it creates a new DataObject. """ if type == 'file': return cls._get_file_by_value(value) else: data_object = DataObject(data={ 'value': cls._type_cast(value, type)}, type=type) data_object.full_clean() data_object.save() return data_object
[ "def", "get_by_value", "(", "cls", ",", "value", ",", "type", ")", ":", "if", "type", "==", "'file'", ":", "return", "cls", ".", "_get_file_by_value", "(", "value", ")", "else", ":", "data_object", "=", "DataObject", "(", "data", "=", "{", "'value'", ":", "cls", ".", "_type_cast", "(", "value", ",", "type", ")", "}", ",", "type", "=", "type", ")", "data_object", ".", "full_clean", "(", ")", "data_object", ".", "save", "(", ")", "return", "data_object" ]
Converts a value into a corresponding data object. For files, this looks up a file DataObject by name, uuid, and/or md5. For other types, it creates a new DataObject.
[ "Converts", "a", "value", "into", "a", "corresponding", "data", "object", ".", "For", "files", "this", "looks", "up", "a", "file", "DataObject", "by", "name", "uuid", "and", "/", "or", "md5", ".", "For", "other", "types", "it", "creates", "a", "new", "DataObject", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/models/data_objects.py#L55-L67
StanfordBioinformatics/loom
server/loomengine_server/api/models/data_objects.py
DataObject._get_file_by_value
def _get_file_by_value(cls, value): """Look up a file DataObject by name, uuid, and/or md5. """ # Ignore any FileResource with no DataObject. This is a typical state # for a deleted file that has not yet been cleaned up. queryset = FileResource.objects.exclude(data_object__isnull=True) matches = FileResource.filter_by_name_or_id_or_tag_or_hash( value, queryset=queryset) if matches.count() == 0: raise ValidationError( 'No file found that matches value "%s"' % value) elif matches.count() > 1: match_id_list = ['%s@%s' % (match.filename, match.get_uuid()) for match in matches] match_id_string = ('", "'.join(match_id_list)) raise ValidationError( 'Multiple files were found matching value "%s": "%s". '\ 'Use a more precise identifier to select just one file.' % ( value, match_id_string)) return matches.first().data_object
python
def _get_file_by_value(cls, value): """Look up a file DataObject by name, uuid, and/or md5. """ # Ignore any FileResource with no DataObject. This is a typical state # for a deleted file that has not yet been cleaned up. queryset = FileResource.objects.exclude(data_object__isnull=True) matches = FileResource.filter_by_name_or_id_or_tag_or_hash( value, queryset=queryset) if matches.count() == 0: raise ValidationError( 'No file found that matches value "%s"' % value) elif matches.count() > 1: match_id_list = ['%s@%s' % (match.filename, match.get_uuid()) for match in matches] match_id_string = ('", "'.join(match_id_list)) raise ValidationError( 'Multiple files were found matching value "%s": "%s". '\ 'Use a more precise identifier to select just one file.' % ( value, match_id_string)) return matches.first().data_object
[ "def", "_get_file_by_value", "(", "cls", ",", "value", ")", ":", "# Ignore any FileResource with no DataObject. This is a typical state", "# for a deleted file that has not yet been cleaned up.", "queryset", "=", "FileResource", ".", "objects", ".", "exclude", "(", "data_object__isnull", "=", "True", ")", "matches", "=", "FileResource", ".", "filter_by_name_or_id_or_tag_or_hash", "(", "value", ",", "queryset", "=", "queryset", ")", "if", "matches", ".", "count", "(", ")", "==", "0", ":", "raise", "ValidationError", "(", "'No file found that matches value \"%s\"'", "%", "value", ")", "elif", "matches", ".", "count", "(", ")", ">", "1", ":", "match_id_list", "=", "[", "'%s@%s'", "%", "(", "match", ".", "filename", ",", "match", ".", "get_uuid", "(", ")", ")", "for", "match", "in", "matches", "]", "match_id_string", "=", "(", "'\", \"'", ".", "join", "(", "match_id_list", ")", ")", "raise", "ValidationError", "(", "'Multiple files were found matching value \"%s\": \"%s\". '", "'Use a more precise identifier to select just one file.'", "%", "(", "value", ",", "match_id_string", ")", ")", "return", "matches", ".", "first", "(", ")", ".", "data_object" ]
Look up a file DataObject by name, uuid, and/or md5.
[ "Look", "up", "a", "file", "DataObject", "by", "name", "uuid", "and", "/", "or", "md5", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/models/data_objects.py#L87-L106
StanfordBioinformatics/loom
server/loomengine_server/api/models/data_objects.py
FileResource._get_run_breadcrumbs
def _get_run_breadcrumbs(cls, source_type, data_object, task_attempt): """Create a path for a given file, in such a way that files end up being organized and browsable by run """ # We cannot generate the path unless connect to a TaskAttempt # and a run if not task_attempt: return [] # If multiple tasks exist, use the original. task = task_attempt.tasks.earliest('datetime_created') if task is None: return [] run = task.run if run is None: return [] breadcrumbs = [ run.name, "task-%s" % str(task.uuid)[0:8], "attempt-%s" % str(task_attempt.uuid)[0:8], ] # Include any ancestors if run is nested while run.parent is not None: run = run.parent breadcrumbs = [run.name] + breadcrumbs # Prepend first breadcrumb with datetime and id breadcrumbs[0] = "%s-%s-%s" % ( run.datetime_created.strftime('%Y-%m-%dT%H.%M.%SZ'), str(run.uuid)[0:8], breadcrumbs[0]) breadcrumbs = ['runs'] + breadcrumbs return breadcrumbs
python
def _get_run_breadcrumbs(cls, source_type, data_object, task_attempt): """Create a path for a given file, in such a way that files end up being organized and browsable by run """ # We cannot generate the path unless connect to a TaskAttempt # and a run if not task_attempt: return [] # If multiple tasks exist, use the original. task = task_attempt.tasks.earliest('datetime_created') if task is None: return [] run = task.run if run is None: return [] breadcrumbs = [ run.name, "task-%s" % str(task.uuid)[0:8], "attempt-%s" % str(task_attempt.uuid)[0:8], ] # Include any ancestors if run is nested while run.parent is not None: run = run.parent breadcrumbs = [run.name] + breadcrumbs # Prepend first breadcrumb with datetime and id breadcrumbs[0] = "%s-%s-%s" % ( run.datetime_created.strftime('%Y-%m-%dT%H.%M.%SZ'), str(run.uuid)[0:8], breadcrumbs[0]) breadcrumbs = ['runs'] + breadcrumbs return breadcrumbs
[ "def", "_get_run_breadcrumbs", "(", "cls", ",", "source_type", ",", "data_object", ",", "task_attempt", ")", ":", "# We cannot generate the path unless connect to a TaskAttempt", "# and a run", "if", "not", "task_attempt", ":", "return", "[", "]", "# If multiple tasks exist, use the original.", "task", "=", "task_attempt", ".", "tasks", ".", "earliest", "(", "'datetime_created'", ")", "if", "task", "is", "None", ":", "return", "[", "]", "run", "=", "task", ".", "run", "if", "run", "is", "None", ":", "return", "[", "]", "breadcrumbs", "=", "[", "run", ".", "name", ",", "\"task-%s\"", "%", "str", "(", "task", ".", "uuid", ")", "[", "0", ":", "8", "]", ",", "\"attempt-%s\"", "%", "str", "(", "task_attempt", ".", "uuid", ")", "[", "0", ":", "8", "]", ",", "]", "# Include any ancestors if run is nested", "while", "run", ".", "parent", "is", "not", "None", ":", "run", "=", "run", ".", "parent", "breadcrumbs", "=", "[", "run", ".", "name", "]", "+", "breadcrumbs", "# Prepend first breadcrumb with datetime and id", "breadcrumbs", "[", "0", "]", "=", "\"%s-%s-%s\"", "%", "(", "run", ".", "datetime_created", ".", "strftime", "(", "'%Y-%m-%dT%H.%M.%SZ'", ")", ",", "str", "(", "run", ".", "uuid", ")", "[", "0", ":", "8", "]", ",", "breadcrumbs", "[", "0", "]", ")", "breadcrumbs", "=", "[", "'runs'", "]", "+", "breadcrumbs", "return", "breadcrumbs" ]
Create a path for a given file, in such a way that files end up being organized and browsable by run
[ "Create", "a", "path", "for", "a", "given", "file", "in", "such", "a", "way", "that", "files", "end", "up", "being", "organized", "and", "browsable", "by", "run" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/models/data_objects.py#L384-L418
StanfordBioinformatics/loom
server/loomengine_server/api/models/base.py
FilterHelper.filter_by_name_or_id_or_tag
def filter_by_name_or_id_or_tag(self, query_string, queryset = None): """Find objects that match the identifier of form {name}@{ID}, {name}, or @{ID}, where ID may be truncated """ assert self.Model.NAME_FIELD, \ 'NAME_FIELD is missing on model %s' % self.Model.__name__ assert self.Model.ID_FIELD, \ 'ID_FIELD is missing on model %s' % self.Model.__name__ assert self.Model.TAG_FIELD, \ 'TAG_FIELD is missing on model %s' % self.Model.__name__ filter_args = {} name, uuid, tag = self._parse_as_name_or_id_or_tag(query_string) if name is not None: filter_args[self.Model.NAME_FIELD] = name if uuid is not None: filter_args[self.Model.ID_FIELD+'__startswith'] = uuid if tag is not None: filter_args[self.Model.TAG_FIELD] = tag if queryset is None: queryset = self.Model.objects.all() return queryset.filter(**filter_args)
python
def filter_by_name_or_id_or_tag(self, query_string, queryset = None): """Find objects that match the identifier of form {name}@{ID}, {name}, or @{ID}, where ID may be truncated """ assert self.Model.NAME_FIELD, \ 'NAME_FIELD is missing on model %s' % self.Model.__name__ assert self.Model.ID_FIELD, \ 'ID_FIELD is missing on model %s' % self.Model.__name__ assert self.Model.TAG_FIELD, \ 'TAG_FIELD is missing on model %s' % self.Model.__name__ filter_args = {} name, uuid, tag = self._parse_as_name_or_id_or_tag(query_string) if name is not None: filter_args[self.Model.NAME_FIELD] = name if uuid is not None: filter_args[self.Model.ID_FIELD+'__startswith'] = uuid if tag is not None: filter_args[self.Model.TAG_FIELD] = tag if queryset is None: queryset = self.Model.objects.all() return queryset.filter(**filter_args)
[ "def", "filter_by_name_or_id_or_tag", "(", "self", ",", "query_string", ",", "queryset", "=", "None", ")", ":", "assert", "self", ".", "Model", ".", "NAME_FIELD", ",", "'NAME_FIELD is missing on model %s'", "%", "self", ".", "Model", ".", "__name__", "assert", "self", ".", "Model", ".", "ID_FIELD", ",", "'ID_FIELD is missing on model %s'", "%", "self", ".", "Model", ".", "__name__", "assert", "self", ".", "Model", ".", "TAG_FIELD", ",", "'TAG_FIELD is missing on model %s'", "%", "self", ".", "Model", ".", "__name__", "filter_args", "=", "{", "}", "name", ",", "uuid", ",", "tag", "=", "self", ".", "_parse_as_name_or_id_or_tag", "(", "query_string", ")", "if", "name", "is", "not", "None", ":", "filter_args", "[", "self", ".", "Model", ".", "NAME_FIELD", "]", "=", "name", "if", "uuid", "is", "not", "None", ":", "filter_args", "[", "self", ".", "Model", ".", "ID_FIELD", "+", "'__startswith'", "]", "=", "uuid", "if", "tag", "is", "not", "None", ":", "filter_args", "[", "self", ".", "Model", ".", "TAG_FIELD", "]", "=", "tag", "if", "queryset", "is", "None", ":", "queryset", "=", "self", ".", "Model", ".", "objects", ".", "all", "(", ")", "return", "queryset", ".", "filter", "(", "*", "*", "filter_args", ")" ]
Find objects that match the identifier of form {name}@{ID}, {name}, or @{ID}, where ID may be truncated
[ "Find", "objects", "that", "match", "the", "identifier", "of", "form", "{", "name", "}" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/models/base.py#L126-L147
StanfordBioinformatics/loom
server/loomengine_server/api/models/base.py
BaseModel.save
def save(self, *args, **kwargs): """ This save method protects against two processesses concurrently modifying the same object. Normally the second save would silently overwrite the changes from the first. Instead we raise a ConcurrentModificationError. """ cls = self.__class__ if self.pk: rows = cls.objects.filter( pk=self.pk, _change=self._change).update( _change=self._change + 1) if not rows: raise ConcurrentModificationError(cls.__name__, self.pk) self._change += 1 count = 0 max_retries=3 while True: try: return super(BaseModel, self).save(*args, **kwargs) except django.db.utils.OperationalError: if count >= max_retries: raise count += 1
python
def save(self, *args, **kwargs): """ This save method protects against two processesses concurrently modifying the same object. Normally the second save would silently overwrite the changes from the first. Instead we raise a ConcurrentModificationError. """ cls = self.__class__ if self.pk: rows = cls.objects.filter( pk=self.pk, _change=self._change).update( _change=self._change + 1) if not rows: raise ConcurrentModificationError(cls.__name__, self.pk) self._change += 1 count = 0 max_retries=3 while True: try: return super(BaseModel, self).save(*args, **kwargs) except django.db.utils.OperationalError: if count >= max_retries: raise count += 1
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cls", "=", "self", ".", "__class__", "if", "self", ".", "pk", ":", "rows", "=", "cls", ".", "objects", ".", "filter", "(", "pk", "=", "self", ".", "pk", ",", "_change", "=", "self", ".", "_change", ")", ".", "update", "(", "_change", "=", "self", ".", "_change", "+", "1", ")", "if", "not", "rows", ":", "raise", "ConcurrentModificationError", "(", "cls", ".", "__name__", ",", "self", ".", "pk", ")", "self", ".", "_change", "+=", "1", "count", "=", "0", "max_retries", "=", "3", "while", "True", ":", "try", ":", "return", "super", "(", "BaseModel", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "django", ".", "db", ".", "utils", ".", "OperationalError", ":", "if", "count", ">=", "max_retries", ":", "raise", "count", "+=", "1" ]
This save method protects against two processesses concurrently modifying the same object. Normally the second save would silently overwrite the changes from the first. Instead we raise a ConcurrentModificationError.
[ "This", "save", "method", "protects", "against", "two", "processesses", "concurrently", "modifying", "the", "same", "object", ".", "Normally", "the", "second", "save", "would", "silently", "overwrite", "the", "changes", "from", "the", "first", ".", "Instead", "we", "raise", "a", "ConcurrentModificationError", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/models/base.py#L225-L248
StanfordBioinformatics/loom
server/loomengine_server/api/models/base.py
BaseModel.setattrs_and_save_with_retries
def setattrs_and_save_with_retries(self, assignments, max_retries=5): """ If the object is being edited by other processes, save may fail due to concurrent modification. This method recovers and retries the edit. assignments is a dict of {attribute: value} """ count = 0 obj=self while True: for attribute, value in assignments.iteritems(): setattr(obj, attribute, value) try: obj.full_clean() obj.save() except ConcurrentModificationError: if count >= max_retries: raise SaveRetriesExceededError( 'Exceeded retries when saving "%s" of id "%s" '\ 'with assigned values "%s"' % (self.__class__, self.id, assignments)) count += 1 obj = self.__class__.objects.get(id=self.id) continue return obj
python
def setattrs_and_save_with_retries(self, assignments, max_retries=5): """ If the object is being edited by other processes, save may fail due to concurrent modification. This method recovers and retries the edit. assignments is a dict of {attribute: value} """ count = 0 obj=self while True: for attribute, value in assignments.iteritems(): setattr(obj, attribute, value) try: obj.full_clean() obj.save() except ConcurrentModificationError: if count >= max_retries: raise SaveRetriesExceededError( 'Exceeded retries when saving "%s" of id "%s" '\ 'with assigned values "%s"' % (self.__class__, self.id, assignments)) count += 1 obj = self.__class__.objects.get(id=self.id) continue return obj
[ "def", "setattrs_and_save_with_retries", "(", "self", ",", "assignments", ",", "max_retries", "=", "5", ")", ":", "count", "=", "0", "obj", "=", "self", "while", "True", ":", "for", "attribute", ",", "value", "in", "assignments", ".", "iteritems", "(", ")", ":", "setattr", "(", "obj", ",", "attribute", ",", "value", ")", "try", ":", "obj", ".", "full_clean", "(", ")", "obj", ".", "save", "(", ")", "except", "ConcurrentModificationError", ":", "if", "count", ">=", "max_retries", ":", "raise", "SaveRetriesExceededError", "(", "'Exceeded retries when saving \"%s\" of id \"%s\" '", "'with assigned values \"%s\"'", "%", "(", "self", ".", "__class__", ",", "self", ".", "id", ",", "assignments", ")", ")", "count", "+=", "1", "obj", "=", "self", ".", "__class__", ".", "objects", ".", "get", "(", "id", "=", "self", ".", "id", ")", "continue", "return", "obj" ]
If the object is being edited by other processes, save may fail due to concurrent modification. This method recovers and retries the edit. assignments is a dict of {attribute: value}
[ "If", "the", "object", "is", "being", "edited", "by", "other", "processes", "save", "may", "fail", "due", "to", "concurrent", "modification", ".", "This", "method", "recovers", "and", "retries", "the", "edit", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/models/base.py#L250-L275
StanfordBioinformatics/loom
server/loomengine_server/api/models/base.py
BaseModel.delete
def delete(self, *args, **kwargs): """ This method implements retries for object deletion. """ count = 0 max_retries=3 while True: try: return super(BaseModel, self).delete(*args, **kwargs) except django.db.utils.OperationalError: if count >= max_retries: raise count += 1
python
def delete(self, *args, **kwargs): """ This method implements retries for object deletion. """ count = 0 max_retries=3 while True: try: return super(BaseModel, self).delete(*args, **kwargs) except django.db.utils.OperationalError: if count >= max_retries: raise count += 1
[ "def", "delete", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "count", "=", "0", "max_retries", "=", "3", "while", "True", ":", "try", ":", "return", "super", "(", "BaseModel", ",", "self", ")", ".", "delete", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "django", ".", "db", ".", "utils", ".", "OperationalError", ":", "if", "count", ">=", "max_retries", ":", "raise", "count", "+=", "1" ]
This method implements retries for object deletion.
[ "This", "method", "implements", "retries", "for", "object", "deletion", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/models/base.py#L277-L289
StanfordBioinformatics/loom
client/loomengine/common.py
get_server_type
def get_server_type(): """Checks server.ini for server type.""" server_location_file = os.path.expanduser(SERVER_LOCATION_FILE) if not os.path.exists(server_location_file): raise Exception( "%s not found. Please run 'loom server set " "<servertype>' first." % server_location_file) config = ConfigParser.SafeConfigParser() config.read(server_location_file) server_type = config.get('server', 'type') return server_type
python
def get_server_type(): """Checks server.ini for server type.""" server_location_file = os.path.expanduser(SERVER_LOCATION_FILE) if not os.path.exists(server_location_file): raise Exception( "%s not found. Please run 'loom server set " "<servertype>' first." % server_location_file) config = ConfigParser.SafeConfigParser() config.read(server_location_file) server_type = config.get('server', 'type') return server_type
[ "def", "get_server_type", "(", ")", ":", "server_location_file", "=", "os", ".", "path", ".", "expanduser", "(", "SERVER_LOCATION_FILE", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "server_location_file", ")", ":", "raise", "Exception", "(", "\"%s not found. Please run 'loom server set \"", "\"<servertype>' first.\"", "%", "server_location_file", ")", "config", "=", "ConfigParser", ".", "SafeConfigParser", "(", ")", "config", ".", "read", "(", "server_location_file", ")", "server_type", "=", "config", ".", "get", "(", "'server'", ",", "'type'", ")", "return", "server_type" ]
Checks server.ini for server type.
[ "Checks", "server", ".", "ini", "for", "server", "type", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/common.py#L110-L120
StanfordBioinformatics/loom
utils/loomengine_utils/import_manager.py
ImportManager._set_upload_status
def _set_upload_status(self, file_data_object, upload_status): """ Set file_data_object.file_resource.upload_status """ uuid = file_data_object['uuid'] return self.connection.update_data_object( uuid, {'uuid': uuid, 'value': { 'upload_status': upload_status}} )
python
def _set_upload_status(self, file_data_object, upload_status): """ Set file_data_object.file_resource.upload_status """ uuid = file_data_object['uuid'] return self.connection.update_data_object( uuid, {'uuid': uuid, 'value': { 'upload_status': upload_status}} )
[ "def", "_set_upload_status", "(", "self", ",", "file_data_object", ",", "upload_status", ")", ":", "uuid", "=", "file_data_object", "[", "'uuid'", "]", "return", "self", ".", "connection", ".", "update_data_object", "(", "uuid", ",", "{", "'uuid'", ":", "uuid", ",", "'value'", ":", "{", "'upload_status'", ":", "upload_status", "}", "}", ")" ]
Set file_data_object.file_resource.upload_status
[ "Set", "file_data_object", ".", "file_resource", ".", "upload_status" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/utils/loomengine_utils/import_manager.py#L406-L413
StanfordBioinformatics/loom
utils/loomengine_utils/import_manager.py
ImportManager._substitute_file_uuids_throughout_template
def _substitute_file_uuids_throughout_template(self, template, file_dependencies): """Anywhere in "template" that refers to a data object but does not give a specific UUID, if a matching file can be found in "file_dependencies", we will change the data object reference to use that UUID. That way templates have a preference to connect to files nested under their ".dependencies" over files that were previously imported to the server. """ if not isinstance(template, dict): # Nothing to do if this is a reference to a previously imported template. return for input in template.get('inputs', []): self._substitute_file_uuids_in_input(input, file_dependencies) for step in template.get('steps', []): self._substitute_file_uuids_throughout_template(step, file_dependencies)
python
def _substitute_file_uuids_throughout_template(self, template, file_dependencies): """Anywhere in "template" that refers to a data object but does not give a specific UUID, if a matching file can be found in "file_dependencies", we will change the data object reference to use that UUID. That way templates have a preference to connect to files nested under their ".dependencies" over files that were previously imported to the server. """ if not isinstance(template, dict): # Nothing to do if this is a reference to a previously imported template. return for input in template.get('inputs', []): self._substitute_file_uuids_in_input(input, file_dependencies) for step in template.get('steps', []): self._substitute_file_uuids_throughout_template(step, file_dependencies)
[ "def", "_substitute_file_uuids_throughout_template", "(", "self", ",", "template", ",", "file_dependencies", ")", ":", "if", "not", "isinstance", "(", "template", ",", "dict", ")", ":", "# Nothing to do if this is a reference to a previously imported template.", "return", "for", "input", "in", "template", ".", "get", "(", "'inputs'", ",", "[", "]", ")", ":", "self", ".", "_substitute_file_uuids_in_input", "(", "input", ",", "file_dependencies", ")", "for", "step", "in", "template", ".", "get", "(", "'steps'", ",", "[", "]", ")", ":", "self", ".", "_substitute_file_uuids_throughout_template", "(", "step", ",", "file_dependencies", ")" ]
Anywhere in "template" that refers to a data object but does not give a specific UUID, if a matching file can be found in "file_dependencies", we will change the data object reference to use that UUID. That way templates have a preference to connect to files nested under their ".dependencies" over files that were previously imported to the server.
[ "Anywhere", "in", "template", "that", "refers", "to", "a", "data", "object", "but", "does", "not", "give", "a", "specific", "UUID", "if", "a", "matching", "file", "can", "be", "found", "in", "file_dependencies", "we", "will", "change", "the", "data", "object", "reference", "to", "use", "that", "UUID", ".", "That", "way", "templates", "have", "a", "preference", "to", "connect", "to", "files", "nested", "under", "their", ".", "dependencies", "over", "files", "that", "were", "previously", "imported", "to", "the", "server", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/utils/loomengine_utils/import_manager.py#L614-L627
StanfordBioinformatics/loom
client/loomengine/run.py
RunStart._get_inputs
def _get_inputs(self): """Converts command line args into a list of template inputs """ # Convert file inputs to a dict, to make it easier to override # them with commandline inputs file_inputs = self._get_file_inputs() try: jsonschema.validate(file_inputs, file_input_schema) except jsonschema.ValidationError: raise SystemExit("ERROR! Input file was invalid") input_dict = {} for (channel, input_id) in file_inputs.iteritems(): input_dict[channel] = input_id if self.args.inputs: for kv_pair in self.args.inputs: (channel, input_id) = kv_pair.split('=') input_dict[channel] = self._parse_string_to_nested_lists( input_id) inputs = [] for (channel, contents) in input_dict.iteritems(): inputs.append({ 'channel': channel, 'data': { 'contents': contents } }) return inputs
python
def _get_inputs(self): """Converts command line args into a list of template inputs """ # Convert file inputs to a dict, to make it easier to override # them with commandline inputs file_inputs = self._get_file_inputs() try: jsonschema.validate(file_inputs, file_input_schema) except jsonschema.ValidationError: raise SystemExit("ERROR! Input file was invalid") input_dict = {} for (channel, input_id) in file_inputs.iteritems(): input_dict[channel] = input_id if self.args.inputs: for kv_pair in self.args.inputs: (channel, input_id) = kv_pair.split('=') input_dict[channel] = self._parse_string_to_nested_lists( input_id) inputs = [] for (channel, contents) in input_dict.iteritems(): inputs.append({ 'channel': channel, 'data': { 'contents': contents } }) return inputs
[ "def", "_get_inputs", "(", "self", ")", ":", "# Convert file inputs to a dict, to make it easier to override", "# them with commandline inputs", "file_inputs", "=", "self", ".", "_get_file_inputs", "(", ")", "try", ":", "jsonschema", ".", "validate", "(", "file_inputs", ",", "file_input_schema", ")", "except", "jsonschema", ".", "ValidationError", ":", "raise", "SystemExit", "(", "\"ERROR! Input file was invalid\"", ")", "input_dict", "=", "{", "}", "for", "(", "channel", ",", "input_id", ")", "in", "file_inputs", ".", "iteritems", "(", ")", ":", "input_dict", "[", "channel", "]", "=", "input_id", "if", "self", ".", "args", ".", "inputs", ":", "for", "kv_pair", "in", "self", ".", "args", ".", "inputs", ":", "(", "channel", ",", "input_id", ")", "=", "kv_pair", ".", "split", "(", "'='", ")", "input_dict", "[", "channel", "]", "=", "self", ".", "_parse_string_to_nested_lists", "(", "input_id", ")", "inputs", "=", "[", "]", "for", "(", "channel", ",", "contents", ")", "in", "input_dict", ".", "iteritems", "(", ")", ":", "inputs", ".", "append", "(", "{", "'channel'", ":", "channel", ",", "'data'", ":", "{", "'contents'", ":", "contents", "}", "}", ")", "return", "inputs" ]
Converts command line args into a list of template inputs
[ "Converts", "command", "line", "args", "into", "a", "list", "of", "template", "inputs" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/run.py#L113-L141
StanfordBioinformatics/loom
client/loomengine/run.py
RunStart._parse_string_to_nested_lists
def _parse_string_to_nested_lists(self, value): """e.g., convert "[[a,b,c],[d,e],[f,g]]" into [["a","b","c"],["d","e"],["f","g"]] """ if not re.match(r'\[.*\]', value.strip()): if '[' in value or ']' in value or ',' in value: raise Exception('Missing outer brace') elif len(value.strip()) == 0: raise Exception('Missing value') else: terms = value.split(',') terms = [term.strip() for term in terms] if len(terms) == 1: return terms[0] else: return terms # remove outer braces value = value[1:-1] terms = [] depth = 0 leftmost = 0 first_open_brace = None break_on_commas = False for i in range(len(value)): if value[i] == ',' and depth == 0: terms.append( self._parse_string_to_nested_lists(value[leftmost:i])) leftmost = i+1 if value[i] == '[': if first_open_brace is None: first_open_brace = i depth += 1 if value[i] == ']': depth -= 1 if depth < 0: raise Exception('Unbalanced close brace') i += i if depth > 0: raise Exception('Expected "]"') terms.append( self._parse_string_to_nested_lists(value[leftmost:len(value)])) return terms
python
def _parse_string_to_nested_lists(self, value): """e.g., convert "[[a,b,c],[d,e],[f,g]]" into [["a","b","c"],["d","e"],["f","g"]] """ if not re.match(r'\[.*\]', value.strip()): if '[' in value or ']' in value or ',' in value: raise Exception('Missing outer brace') elif len(value.strip()) == 0: raise Exception('Missing value') else: terms = value.split(',') terms = [term.strip() for term in terms] if len(terms) == 1: return terms[0] else: return terms # remove outer braces value = value[1:-1] terms = [] depth = 0 leftmost = 0 first_open_brace = None break_on_commas = False for i in range(len(value)): if value[i] == ',' and depth == 0: terms.append( self._parse_string_to_nested_lists(value[leftmost:i])) leftmost = i+1 if value[i] == '[': if first_open_brace is None: first_open_brace = i depth += 1 if value[i] == ']': depth -= 1 if depth < 0: raise Exception('Unbalanced close brace') i += i if depth > 0: raise Exception('Expected "]"') terms.append( self._parse_string_to_nested_lists(value[leftmost:len(value)])) return terms
[ "def", "_parse_string_to_nested_lists", "(", "self", ",", "value", ")", ":", "if", "not", "re", ".", "match", "(", "r'\\[.*\\]'", ",", "value", ".", "strip", "(", ")", ")", ":", "if", "'['", "in", "value", "or", "']'", "in", "value", "or", "','", "in", "value", ":", "raise", "Exception", "(", "'Missing outer brace'", ")", "elif", "len", "(", "value", ".", "strip", "(", ")", ")", "==", "0", ":", "raise", "Exception", "(", "'Missing value'", ")", "else", ":", "terms", "=", "value", ".", "split", "(", "','", ")", "terms", "=", "[", "term", ".", "strip", "(", ")", "for", "term", "in", "terms", "]", "if", "len", "(", "terms", ")", "==", "1", ":", "return", "terms", "[", "0", "]", "else", ":", "return", "terms", "# remove outer braces", "value", "=", "value", "[", "1", ":", "-", "1", "]", "terms", "=", "[", "]", "depth", "=", "0", "leftmost", "=", "0", "first_open_brace", "=", "None", "break_on_commas", "=", "False", "for", "i", "in", "range", "(", "len", "(", "value", ")", ")", ":", "if", "value", "[", "i", "]", "==", "','", "and", "depth", "==", "0", ":", "terms", ".", "append", "(", "self", ".", "_parse_string_to_nested_lists", "(", "value", "[", "leftmost", ":", "i", "]", ")", ")", "leftmost", "=", "i", "+", "1", "if", "value", "[", "i", "]", "==", "'['", ":", "if", "first_open_brace", "is", "None", ":", "first_open_brace", "=", "i", "depth", "+=", "1", "if", "value", "[", "i", "]", "==", "']'", ":", "depth", "-=", "1", "if", "depth", "<", "0", ":", "raise", "Exception", "(", "'Unbalanced close brace'", ")", "i", "+=", "i", "if", "depth", ">", "0", ":", "raise", "Exception", "(", "'Expected \"]\"'", ")", "terms", ".", "append", "(", "self", ".", "_parse_string_to_nested_lists", "(", "value", "[", "leftmost", ":", "len", "(", "value", ")", "]", ")", ")", "return", "terms" ]
e.g., convert "[[a,b,c],[d,e],[f,g]]" into [["a","b","c"],["d","e"],["f","g"]]
[ "e", ".", "g", ".", "convert", "[[", "a", "b", "c", "]", "[", "d", "e", "]", "[", "f", "g", "]]", "into", "[[", "a", "b", "c", "]", "[", "d", "e", "]", "[", "f", "g", "]]" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/run.py#L179-L221
StanfordBioinformatics/loom
client/loomengine/run.py
RunRestart._get_inputs
def _get_inputs(self, old_inputs): """Converts command line args into a list of template inputs """ # Convert inputs to dict to facilitate overriding by channel name # Also, drop DataNode ID and keep only contents. input_dict = {} for input in old_inputs: # Strip out DataNode UUID and URL input['data'] = {'contents': input['data']['contents']} input_dict[input['channel']] = input file_inputs = self._get_file_inputs() try: jsonschema.validate(file_inputs, file_input_schema) except jsonschema.ValidationError: raise SystemExit("ERROR! User inputs file is not valid") for (channel, input_id) in file_inputs.iteritems(): input_dict[channel] = { 'channel': channel, 'data': {'contents': input_id} } # Override with cli user inputs if specified if self.args.inputs: for kv_pair in self.args.inputs: (channel, input_id) = kv_pair.split('=') input_dict[channel] = { 'channel': channel, 'data': { 'contents': self._parse_string_to_nested_lists(input_id)} } return input_dict.values()
python
def _get_inputs(self, old_inputs): """Converts command line args into a list of template inputs """ # Convert inputs to dict to facilitate overriding by channel name # Also, drop DataNode ID and keep only contents. input_dict = {} for input in old_inputs: # Strip out DataNode UUID and URL input['data'] = {'contents': input['data']['contents']} input_dict[input['channel']] = input file_inputs = self._get_file_inputs() try: jsonschema.validate(file_inputs, file_input_schema) except jsonschema.ValidationError: raise SystemExit("ERROR! User inputs file is not valid") for (channel, input_id) in file_inputs.iteritems(): input_dict[channel] = { 'channel': channel, 'data': {'contents': input_id} } # Override with cli user inputs if specified if self.args.inputs: for kv_pair in self.args.inputs: (channel, input_id) = kv_pair.split('=') input_dict[channel] = { 'channel': channel, 'data': { 'contents': self._parse_string_to_nested_lists(input_id)} } return input_dict.values()
[ "def", "_get_inputs", "(", "self", ",", "old_inputs", ")", ":", "# Convert inputs to dict to facilitate overriding by channel name", "# Also, drop DataNode ID and keep only contents.", "input_dict", "=", "{", "}", "for", "input", "in", "old_inputs", ":", "# Strip out DataNode UUID and URL", "input", "[", "'data'", "]", "=", "{", "'contents'", ":", "input", "[", "'data'", "]", "[", "'contents'", "]", "}", "input_dict", "[", "input", "[", "'channel'", "]", "]", "=", "input", "file_inputs", "=", "self", ".", "_get_file_inputs", "(", ")", "try", ":", "jsonschema", ".", "validate", "(", "file_inputs", ",", "file_input_schema", ")", "except", "jsonschema", ".", "ValidationError", ":", "raise", "SystemExit", "(", "\"ERROR! User inputs file is not valid\"", ")", "for", "(", "channel", ",", "input_id", ")", "in", "file_inputs", ".", "iteritems", "(", ")", ":", "input_dict", "[", "channel", "]", "=", "{", "'channel'", ":", "channel", ",", "'data'", ":", "{", "'contents'", ":", "input_id", "}", "}", "# Override with cli user inputs if specified", "if", "self", ".", "args", ".", "inputs", ":", "for", "kv_pair", "in", "self", ".", "args", ".", "inputs", ":", "(", "channel", ",", "input_id", ")", "=", "kv_pair", ".", "split", "(", "'='", ")", "input_dict", "[", "channel", "]", "=", "{", "'channel'", ":", "channel", ",", "'data'", ":", "{", "'contents'", ":", "self", ".", "_parse_string_to_nested_lists", "(", "input_id", ")", "}", "}", "return", "input_dict", ".", "values", "(", ")" ]
Converts command line args into a list of template inputs
[ "Converts", "command", "line", "args", "into", "a", "list", "of", "template", "inputs" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/run.py#L285-L316
StanfordBioinformatics/loom
server/loomengine_server/api/serializers/__init__.py
CreateWithParentModelSerializer.create
def create(self, validated_data): """ This is a standard method called indirectly by calling 'save' on the serializer. This method expects the 'parent_field' and 'parent_instance' to be included in the Serializer context. """ if self.context.get('parent_field') \ and self.context.get('parent_instance'): validated_data.update({ self.context.get('parent_field'): self.context.get('parent_instance')}) instance = self.Meta.model(**validated_data) instance.full_clean() instance.save() return instance
python
def create(self, validated_data): """ This is a standard method called indirectly by calling 'save' on the serializer. This method expects the 'parent_field' and 'parent_instance' to be included in the Serializer context. """ if self.context.get('parent_field') \ and self.context.get('parent_instance'): validated_data.update({ self.context.get('parent_field'): self.context.get('parent_instance')}) instance = self.Meta.model(**validated_data) instance.full_clean() instance.save() return instance
[ "def", "create", "(", "self", ",", "validated_data", ")", ":", "if", "self", ".", "context", ".", "get", "(", "'parent_field'", ")", "and", "self", ".", "context", ".", "get", "(", "'parent_instance'", ")", ":", "validated_data", ".", "update", "(", "{", "self", ".", "context", ".", "get", "(", "'parent_field'", ")", ":", "self", ".", "context", ".", "get", "(", "'parent_instance'", ")", "}", ")", "instance", "=", "self", ".", "Meta", ".", "model", "(", "*", "*", "validated_data", ")", "instance", ".", "full_clean", "(", ")", "instance", ".", "save", "(", ")", "return", "instance" ]
This is a standard method called indirectly by calling 'save' on the serializer. This method expects the 'parent_field' and 'parent_instance' to be included in the Serializer context.
[ "This", "is", "a", "standard", "method", "called", "indirectly", "by", "calling", "save", "on", "the", "serializer", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/serializers/__init__.py#L42-L57
StanfordBioinformatics/loom
utils/loomengine_utils/connection.py
disable_insecure_request_warning
def disable_insecure_request_warning(): """Suppress warning about untrusted SSL certificate.""" import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
python
def disable_insecure_request_warning(): """Suppress warning about untrusted SSL certificate.""" import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
[ "def", "disable_insecure_request_warning", "(", ")", ":", "import", "requests", "from", "requests", ".", "packages", ".", "urllib3", ".", "exceptions", "import", "InsecureRequestWarning", "requests", ".", "packages", ".", "urllib3", ".", "disable_warnings", "(", "InsecureRequestWarning", ")" ]
Suppress warning about untrusted SSL certificate.
[ "Suppress", "warning", "about", "untrusted", "SSL", "certificate", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/utils/loomengine_utils/connection.py#L14-L18
StanfordBioinformatics/loom
utils/loomengine_utils/connection.py
Connection._make_request_to_server
def _make_request_to_server(self, query_function, raise_for_status=True, time_limit_seconds=2, retry_delay_seconds=0.2): """Retry sending request until timeout or until receiving a response. """ start_time = datetime.datetime.now() while datetime.datetime.now() - start_time < datetime.timedelta( 0, time_limit_seconds): error = None response = None try: response = query_function() except requests.exceptions.ConnectionError as e: error = ServerConnectionError( "No response from server.\n%s" % e) except: if response: logger.info(response.text) raise if response is not None and raise_for_status: # raises requests.exceptions.HTTPError self._raise_for_status(response) if error: time.sleep(retry_delay_seconds) continue else: return response raise error
python
def _make_request_to_server(self, query_function, raise_for_status=True, time_limit_seconds=2, retry_delay_seconds=0.2): """Retry sending request until timeout or until receiving a response. """ start_time = datetime.datetime.now() while datetime.datetime.now() - start_time < datetime.timedelta( 0, time_limit_seconds): error = None response = None try: response = query_function() except requests.exceptions.ConnectionError as e: error = ServerConnectionError( "No response from server.\n%s" % e) except: if response: logger.info(response.text) raise if response is not None and raise_for_status: # raises requests.exceptions.HTTPError self._raise_for_status(response) if error: time.sleep(retry_delay_seconds) continue else: return response raise error
[ "def", "_make_request_to_server", "(", "self", ",", "query_function", ",", "raise_for_status", "=", "True", ",", "time_limit_seconds", "=", "2", ",", "retry_delay_seconds", "=", "0.2", ")", ":", "start_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "while", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "start_time", "<", "datetime", ".", "timedelta", "(", "0", ",", "time_limit_seconds", ")", ":", "error", "=", "None", "response", "=", "None", "try", ":", "response", "=", "query_function", "(", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", "as", "e", ":", "error", "=", "ServerConnectionError", "(", "\"No response from server.\\n%s\"", "%", "e", ")", "except", ":", "if", "response", ":", "logger", ".", "info", "(", "response", ".", "text", ")", "raise", "if", "response", "is", "not", "None", "and", "raise_for_status", ":", "# raises requests.exceptions.HTTPError", "self", ".", "_raise_for_status", "(", "response", ")", "if", "error", ":", "time", ".", "sleep", "(", "retry_delay_seconds", ")", "continue", "else", ":", "return", "response", "raise", "error" ]
Retry sending request until timeout or until receiving a response.
[ "Retry", "sending", "request", "until", "timeout", "or", "until", "receiving", "a", "response", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/utils/loomengine_utils/connection.py#L108-L134
StanfordBioinformatics/loom
utils/loomengine_utils/connection.py
Connection._get_resource
def _get_resource(self, relative_url, params=None): """Convenience function for retrieving a resource. If resource does not exist, return None. """ response = self._get(relative_url, params=params, raise_for_status=False) if response.status_code == 404: return None self._raise_for_status(response) return response.json()
python
def _get_resource(self, relative_url, params=None): """Convenience function for retrieving a resource. If resource does not exist, return None. """ response = self._get(relative_url, params=params, raise_for_status=False) if response.status_code == 404: return None self._raise_for_status(response) return response.json()
[ "def", "_get_resource", "(", "self", ",", "relative_url", ",", "params", "=", "None", ")", ":", "response", "=", "self", ".", "_get", "(", "relative_url", ",", "params", "=", "params", ",", "raise_for_status", "=", "False", ")", "if", "response", ".", "status_code", "==", "404", ":", "return", "None", "self", ".", "_raise_for_status", "(", "response", ")", "return", "response", ".", "json", "(", ")" ]
Convenience function for retrieving a resource. If resource does not exist, return None.
[ "Convenience", "function", "for", "retrieving", "a", "resource", ".", "If", "resource", "does", "not", "exist", "return", "None", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/utils/loomengine_utils/connection.py#L172-L180
StanfordBioinformatics/loom
worker/loomengine_worker/inputs.py
TaskAttemptInput
def TaskAttemptInput(input, task_attempt): """Returns the correct Input class for a given data type and gather mode """ (data_type, mode) = _get_input_info(input) if data_type != 'file': return NoOpInput(None, task_attempt) if mode == 'no_gather': return FileInput(input['data']['contents'], task_attempt) else: assert mode.startswith('gather') return FileListInput(input['data']['contents'], task_attempt)
python
def TaskAttemptInput(input, task_attempt): """Returns the correct Input class for a given data type and gather mode """ (data_type, mode) = _get_input_info(input) if data_type != 'file': return NoOpInput(None, task_attempt) if mode == 'no_gather': return FileInput(input['data']['contents'], task_attempt) else: assert mode.startswith('gather') return FileListInput(input['data']['contents'], task_attempt)
[ "def", "TaskAttemptInput", "(", "input", ",", "task_attempt", ")", ":", "(", "data_type", ",", "mode", ")", "=", "_get_input_info", "(", "input", ")", "if", "data_type", "!=", "'file'", ":", "return", "NoOpInput", "(", "None", ",", "task_attempt", ")", "if", "mode", "==", "'no_gather'", ":", "return", "FileInput", "(", "input", "[", "'data'", "]", "[", "'contents'", "]", ",", "task_attempt", ")", "else", ":", "assert", "mode", ".", "startswith", "(", "'gather'", ")", "return", "FileListInput", "(", "input", "[", "'data'", "]", "[", "'contents'", "]", ",", "task_attempt", ")" ]
Returns the correct Input class for a given data type and gather mode
[ "Returns", "the", "correct", "Input", "class", "for", "a", "given", "data", "type", "and", "gather", "mode" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/worker/loomengine_worker/inputs.py#L83-L97
StanfordBioinformatics/loom
server/loomengine_server/api/async.py
execute
def execute(task_function, *args, **kwargs): """Run a task asynchronously """ if get_setting('TEST_DISABLE_ASYNC_DELAY'): # Delay disabled, run synchronously logger.debug('Running function "%s" synchronously because '\ 'TEST_DISABLE_ASYNC_DELAY is True' % task_function.__name__) return task_function(*args, **kwargs) db.connections.close_all() task_function.delay(*args, **kwargs)
python
def execute(task_function, *args, **kwargs): """Run a task asynchronously """ if get_setting('TEST_DISABLE_ASYNC_DELAY'): # Delay disabled, run synchronously logger.debug('Running function "%s" synchronously because '\ 'TEST_DISABLE_ASYNC_DELAY is True' % task_function.__name__) return task_function(*args, **kwargs) db.connections.close_all() task_function.delay(*args, **kwargs)
[ "def", "execute", "(", "task_function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "get_setting", "(", "'TEST_DISABLE_ASYNC_DELAY'", ")", ":", "# Delay disabled, run synchronously", "logger", ".", "debug", "(", "'Running function \"%s\" synchronously because '", "'TEST_DISABLE_ASYNC_DELAY is True'", "%", "task_function", ".", "__name__", ")", "return", "task_function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "db", ".", "connections", ".", "close_all", "(", ")", "task_function", ".", "delay", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Run a task asynchronously
[ "Run", "a", "task", "asynchronously" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/async.py#L24-L36
StanfordBioinformatics/loom
server/loomengine_server/api/async.py
execute_with_delay
def execute_with_delay(task_function, *args, **kwargs): """Run a task asynchronously after at least delay_seconds """ delay = kwargs.pop('delay', 0) if get_setting('TEST_DISABLE_ASYNC_DELAY'): # Delay disabled, run synchronously logger.debug('Running function "%s" synchronously because '\ 'TEST_DISABLE_ASYNC_DELAY is True' % task_function.__name__) return task_function(*args, **kwargs) db.connections.close_all() task_function.apply_async(args=args, kwargs=kwargs, countdown=delay)
python
def execute_with_delay(task_function, *args, **kwargs): """Run a task asynchronously after at least delay_seconds """ delay = kwargs.pop('delay', 0) if get_setting('TEST_DISABLE_ASYNC_DELAY'): # Delay disabled, run synchronously logger.debug('Running function "%s" synchronously because '\ 'TEST_DISABLE_ASYNC_DELAY is True' % task_function.__name__) return task_function(*args, **kwargs) db.connections.close_all() task_function.apply_async(args=args, kwargs=kwargs, countdown=delay)
[ "def", "execute_with_delay", "(", "task_function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "delay", "=", "kwargs", ".", "pop", "(", "'delay'", ",", "0", ")", "if", "get_setting", "(", "'TEST_DISABLE_ASYNC_DELAY'", ")", ":", "# Delay disabled, run synchronously", "logger", ".", "debug", "(", "'Running function \"%s\" synchronously because '", "'TEST_DISABLE_ASYNC_DELAY is True'", "%", "task_function", ".", "__name__", ")", "return", "task_function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "db", ".", "connections", ".", "close_all", "(", ")", "task_function", ".", "apply_async", "(", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "countdown", "=", "delay", ")" ]
Run a task asynchronously after at least delay_seconds
[ "Run", "a", "task", "asynchronously", "after", "at", "least", "delay_seconds" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/async.py#L38-L50
StanfordBioinformatics/loom
server/loomengine_server/api/async.py
check_for_stalled_tasks
def check_for_stalled_tasks(): """Check for tasks that are no longer sending a heartbeat """ from api.models.tasks import Task for task in Task.objects.filter(status_is_running=True): if not task.is_responsive(): task.system_error() if task.is_timed_out(): task.timeout_error()
python
def check_for_stalled_tasks(): """Check for tasks that are no longer sending a heartbeat """ from api.models.tasks import Task for task in Task.objects.filter(status_is_running=True): if not task.is_responsive(): task.system_error() if task.is_timed_out(): task.timeout_error()
[ "def", "check_for_stalled_tasks", "(", ")", ":", "from", "api", ".", "models", ".", "tasks", "import", "Task", "for", "task", "in", "Task", ".", "objects", ".", "filter", "(", "status_is_running", "=", "True", ")", ":", "if", "not", "task", ".", "is_responsive", "(", ")", ":", "task", ".", "system_error", "(", ")", "if", "task", ".", "is_timed_out", "(", ")", ":", "task", ".", "timeout_error", "(", ")" ]
Check for tasks that are no longer sending a heartbeat
[ "Check", "for", "tasks", "that", "are", "no", "longer", "sending", "a", "heartbeat" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/async.py#L55-L63
StanfordBioinformatics/loom
server/loomengine_server/api/async.py
check_for_missed_cleanup
def check_for_missed_cleanup(): """Check for TaskAttempts that were never cleaned up """ if get_setting('PRESERVE_ALL'): return from api.models.tasks import TaskAttempt if get_setting('PRESERVE_ON_FAILURE'): for task_attempt in TaskAttempt.objects.filter( status_is_running=False).filter( status_is_cleaned_up=False).exclude( status_is_failed=True): task_attempt.cleanup() else: for task_attempt in TaskAttempt.objects.filter( status_is_running=False).filter(status_is_cleaned_up=False): task_attempt.cleanup()
python
def check_for_missed_cleanup(): """Check for TaskAttempts that were never cleaned up """ if get_setting('PRESERVE_ALL'): return from api.models.tasks import TaskAttempt if get_setting('PRESERVE_ON_FAILURE'): for task_attempt in TaskAttempt.objects.filter( status_is_running=False).filter( status_is_cleaned_up=False).exclude( status_is_failed=True): task_attempt.cleanup() else: for task_attempt in TaskAttempt.objects.filter( status_is_running=False).filter(status_is_cleaned_up=False): task_attempt.cleanup()
[ "def", "check_for_missed_cleanup", "(", ")", ":", "if", "get_setting", "(", "'PRESERVE_ALL'", ")", ":", "return", "from", "api", ".", "models", ".", "tasks", "import", "TaskAttempt", "if", "get_setting", "(", "'PRESERVE_ON_FAILURE'", ")", ":", "for", "task_attempt", "in", "TaskAttempt", ".", "objects", ".", "filter", "(", "status_is_running", "=", "False", ")", ".", "filter", "(", "status_is_cleaned_up", "=", "False", ")", ".", "exclude", "(", "status_is_failed", "=", "True", ")", ":", "task_attempt", ".", "cleanup", "(", ")", "else", ":", "for", "task_attempt", "in", "TaskAttempt", ".", "objects", ".", "filter", "(", "status_is_running", "=", "False", ")", ".", "filter", "(", "status_is_cleaned_up", "=", "False", ")", ":", "task_attempt", ".", "cleanup", "(", ")" ]
Check for TaskAttempts that were never cleaned up
[ "Check", "for", "TaskAttempts", "that", "were", "never", "cleaned", "up" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/async.py#L66-L81
StanfordBioinformatics/loom
utils/loomengine_utils/__init__.py
execute_with_retries
def execute_with_retries(retryable_function, retryable_errors, logger, human_readable_action_name='Action', nonretryable_errors=None): """This attempts to execute "retryable_function" with exponential backoff on delay time. 10 retries adds up to about 34 minutes total delay before the last attempt. "human_readable_action_name" is an option input to customize retry message. """ max_retries = 10 attempt = 0 if not nonretryable_errors: nonretryable_errors = () while True: try: return retryable_function() except tuple(nonretryable_errors): raise except tuple(retryable_errors) as e: attempt += 1 if attempt > max_retries: raise # Exponentional backoff on retry delay as suggested by # https://cloud.google.com/storage/docs/exponential-backoff delay = 2**attempt + random.random() logger.info('"%s" failed with error "%s". '\ 'Retry number %s of %s in %s seconds' % (human_readable_action_name, str(e), attempt, max_retries, delay)) time.sleep(delay)
python
def execute_with_retries(retryable_function, retryable_errors, logger, human_readable_action_name='Action', nonretryable_errors=None): """This attempts to execute "retryable_function" with exponential backoff on delay time. 10 retries adds up to about 34 minutes total delay before the last attempt. "human_readable_action_name" is an option input to customize retry message. """ max_retries = 10 attempt = 0 if not nonretryable_errors: nonretryable_errors = () while True: try: return retryable_function() except tuple(nonretryable_errors): raise except tuple(retryable_errors) as e: attempt += 1 if attempt > max_retries: raise # Exponentional backoff on retry delay as suggested by # https://cloud.google.com/storage/docs/exponential-backoff delay = 2**attempt + random.random() logger.info('"%s" failed with error "%s". '\ 'Retry number %s of %s in %s seconds' % (human_readable_action_name, str(e), attempt, max_retries, delay)) time.sleep(delay)
[ "def", "execute_with_retries", "(", "retryable_function", ",", "retryable_errors", ",", "logger", ",", "human_readable_action_name", "=", "'Action'", ",", "nonretryable_errors", "=", "None", ")", ":", "max_retries", "=", "10", "attempt", "=", "0", "if", "not", "nonretryable_errors", ":", "nonretryable_errors", "=", "(", ")", "while", "True", ":", "try", ":", "return", "retryable_function", "(", ")", "except", "tuple", "(", "nonretryable_errors", ")", ":", "raise", "except", "tuple", "(", "retryable_errors", ")", "as", "e", ":", "attempt", "+=", "1", "if", "attempt", ">", "max_retries", ":", "raise", "# Exponentional backoff on retry delay as suggested by", "# https://cloud.google.com/storage/docs/exponential-backoff", "delay", "=", "2", "**", "attempt", "+", "random", ".", "random", "(", ")", "logger", ".", "info", "(", "'\"%s\" failed with error \"%s\". '", "'Retry number %s of %s in %s seconds'", "%", "(", "human_readable_action_name", ",", "str", "(", "e", ")", ",", "attempt", ",", "max_retries", ",", "delay", ")", ")", "time", ".", "sleep", "(", "delay", ")" ]
This attempts to execute "retryable_function" with exponential backoff on delay time. 10 retries adds up to about 34 minutes total delay before the last attempt. "human_readable_action_name" is an option input to customize retry message.
[ "This", "attempts", "to", "execute", "retryable_function", "with", "exponential", "backoff", "on", "delay", "time", ".", "10", "retries", "adds", "up", "to", "about", "34", "minutes", "total", "delay", "before", "the", "last", "attempt", ".", "human_readable_action_name", "is", "an", "option", "input", "to", "customize", "retry", "message", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/utils/loomengine_utils/__init__.py#L18-L48
StanfordBioinformatics/loom
utils/loomengine_utils/export_manager.py
ExportManager.export_file
def export_file(self, data_object, destination_directory=None, destination_filename=None, retry=False, export_metadata=False, export_raw_file=True): """Export a file from Loom to some file storage location. Default destination_directory is cwd. Default destination_filename is the filename from the file data object associated with the given file_id. """ if not destination_directory: destination_directory = os.getcwd() # We get filename from the dataobject if not destination_filename: destination_filename = data_object['value']['filename'] destination_file_url = os.path.join(destination_directory, destination_filename) logger.info('Exporting file %s@%s ...' % ( data_object['value']['filename'], data_object['uuid'])) if export_raw_file: destination = File( destination_file_url, self.storage_settings, retry=retry) if destination.exists(): raise FileAlreadyExistsError( 'File already exists at %s' % destination_file_url) logger.info('...copying file to %s' % ( destination.get_url())) # Copy from the first file location file_resource = data_object.get('value') md5 = file_resource.get('md5') source_url = data_object['value']['file_url'] File(source_url, self.storage_settings, retry=retry).copy_to( destination, expected_md5=md5) data_object['value'] = self._create_new_file_resource( data_object['value'], destination.get_url()) else: logger.info('...skipping raw file') if export_metadata: data_object['value'].pop('link', None) data_object['value'].pop('upload_status', None) destination_metadata_url = os.path.join( destination_file_url + '.metadata.yaml') logger.info('...writing metadata to %s' % destination_metadata_url) metadata = yaml.safe_dump(data_object, default_flow_style=False) metadata_file = File(destination_metadata_url, self.storage_settings, retry=retry) metadata_file.write(metadata) else: logger.info('...skipping metadata') logger.info('...finished file export')
python
def export_file(self, data_object, destination_directory=None, destination_filename=None, retry=False, export_metadata=False, export_raw_file=True): """Export a file from Loom to some file storage location. Default destination_directory is cwd. Default destination_filename is the filename from the file data object associated with the given file_id. """ if not destination_directory: destination_directory = os.getcwd() # We get filename from the dataobject if not destination_filename: destination_filename = data_object['value']['filename'] destination_file_url = os.path.join(destination_directory, destination_filename) logger.info('Exporting file %s@%s ...' % ( data_object['value']['filename'], data_object['uuid'])) if export_raw_file: destination = File( destination_file_url, self.storage_settings, retry=retry) if destination.exists(): raise FileAlreadyExistsError( 'File already exists at %s' % destination_file_url) logger.info('...copying file to %s' % ( destination.get_url())) # Copy from the first file location file_resource = data_object.get('value') md5 = file_resource.get('md5') source_url = data_object['value']['file_url'] File(source_url, self.storage_settings, retry=retry).copy_to( destination, expected_md5=md5) data_object['value'] = self._create_new_file_resource( data_object['value'], destination.get_url()) else: logger.info('...skipping raw file') if export_metadata: data_object['value'].pop('link', None) data_object['value'].pop('upload_status', None) destination_metadata_url = os.path.join( destination_file_url + '.metadata.yaml') logger.info('...writing metadata to %s' % destination_metadata_url) metadata = yaml.safe_dump(data_object, default_flow_style=False) metadata_file = File(destination_metadata_url, self.storage_settings, retry=retry) metadata_file.write(metadata) else: logger.info('...skipping metadata') logger.info('...finished file export')
[ "def", "export_file", "(", "self", ",", "data_object", ",", "destination_directory", "=", "None", ",", "destination_filename", "=", "None", ",", "retry", "=", "False", ",", "export_metadata", "=", "False", ",", "export_raw_file", "=", "True", ")", ":", "if", "not", "destination_directory", ":", "destination_directory", "=", "os", ".", "getcwd", "(", ")", "# We get filename from the dataobject", "if", "not", "destination_filename", ":", "destination_filename", "=", "data_object", "[", "'value'", "]", "[", "'filename'", "]", "destination_file_url", "=", "os", ".", "path", ".", "join", "(", "destination_directory", ",", "destination_filename", ")", "logger", ".", "info", "(", "'Exporting file %s@%s ...'", "%", "(", "data_object", "[", "'value'", "]", "[", "'filename'", "]", ",", "data_object", "[", "'uuid'", "]", ")", ")", "if", "export_raw_file", ":", "destination", "=", "File", "(", "destination_file_url", ",", "self", ".", "storage_settings", ",", "retry", "=", "retry", ")", "if", "destination", ".", "exists", "(", ")", ":", "raise", "FileAlreadyExistsError", "(", "'File already exists at %s'", "%", "destination_file_url", ")", "logger", ".", "info", "(", "'...copying file to %s'", "%", "(", "destination", ".", "get_url", "(", ")", ")", ")", "# Copy from the first file location", "file_resource", "=", "data_object", ".", "get", "(", "'value'", ")", "md5", "=", "file_resource", ".", "get", "(", "'md5'", ")", "source_url", "=", "data_object", "[", "'value'", "]", "[", "'file_url'", "]", "File", "(", "source_url", ",", "self", ".", "storage_settings", ",", "retry", "=", "retry", ")", ".", "copy_to", "(", "destination", ",", "expected_md5", "=", "md5", ")", "data_object", "[", "'value'", "]", "=", "self", ".", "_create_new_file_resource", "(", "data_object", "[", "'value'", "]", ",", "destination", ".", "get_url", "(", ")", ")", "else", ":", "logger", ".", "info", "(", "'...skipping raw file'", ")", "if", "export_metadata", ":", "data_object", "[", "'value'", "]", ".", "pop", "(", "'link'", ",", "None", ")", "data_object", "[", "'value'", "]", ".", "pop", "(", "'upload_status'", ",", "None", ")", "destination_metadata_url", "=", "os", ".", "path", ".", "join", "(", "destination_file_url", "+", "'.metadata.yaml'", ")", "logger", ".", "info", "(", "'...writing metadata to %s'", "%", "destination_metadata_url", ")", "metadata", "=", "yaml", ".", "safe_dump", "(", "data_object", ",", "default_flow_style", "=", "False", ")", "metadata_file", "=", "File", "(", "destination_metadata_url", ",", "self", ".", "storage_settings", ",", "retry", "=", "retry", ")", "metadata_file", ".", "write", "(", "metadata", ")", "else", ":", "logger", ".", "info", "(", "'...skipping metadata'", ")", "logger", ".", "info", "(", "'...finished file export'", ")" ]
Export a file from Loom to some file storage location. Default destination_directory is cwd. Default destination_filename is the filename from the file data object associated with the given file_id.
[ "Export", "a", "file", "from", "Loom", "to", "some", "file", "storage", "location", ".", "Default", "destination_directory", "is", "cwd", ".", "Default", "destination_filename", "is", "the", "filename", "from", "the", "file", "data", "object", "associated", "with", "the", "given", "file_id", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/utils/loomengine_utils/export_manager.py#L47-L101
StanfordBioinformatics/loom
utils/loomengine_utils/file_utils.py
_urlparse
def _urlparse(path): """Like urlparse except it assumes 'file://' if no scheme is specified """ url = urlparse.urlparse(path) _validate_url(url) if not url.scheme or url.scheme == 'file://': # Normalize path, and set scheme to "file" if missing path = os.path.abspath( os.path.expanduser(path)) url = urlparse.urlparse('file://'+path) return url
python
def _urlparse(path): """Like urlparse except it assumes 'file://' if no scheme is specified """ url = urlparse.urlparse(path) _validate_url(url) if not url.scheme or url.scheme == 'file://': # Normalize path, and set scheme to "file" if missing path = os.path.abspath( os.path.expanduser(path)) url = urlparse.urlparse('file://'+path) return url
[ "def", "_urlparse", "(", "path", ")", ":", "url", "=", "urlparse", ".", "urlparse", "(", "path", ")", "_validate_url", "(", "url", ")", "if", "not", "url", ".", "scheme", "or", "url", ".", "scheme", "==", "'file://'", ":", "# Normalize path, and set scheme to \"file\" if missing", "path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ")", "url", "=", "urlparse", ".", "urlparse", "(", "'file://'", "+", "path", ")", "return", "url" ]
Like urlparse except it assumes 'file://' if no scheme is specified
[ "Like", "urlparse", "except", "it", "assumes", "file", ":", "//", "if", "no", "scheme", "is", "specified" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/utils/loomengine_utils/file_utils.py#L68-L78
StanfordBioinformatics/loom
utils/loomengine_utils/file_utils.py
FilePattern
def FilePattern(pattern, settings, **kwargs): """Factory method returns LocalFilePattern or GoogleStorageFilePattern """ url = _urlparse(pattern) if url.scheme == 'gs': return GoogleStorageFilePattern(pattern, settings, **kwargs) else: assert url.scheme == 'file' return LocalFilePattern(pattern, settings, **kwargs)
python
def FilePattern(pattern, settings, **kwargs): """Factory method returns LocalFilePattern or GoogleStorageFilePattern """ url = _urlparse(pattern) if url.scheme == 'gs': return GoogleStorageFilePattern(pattern, settings, **kwargs) else: assert url.scheme == 'file' return LocalFilePattern(pattern, settings, **kwargs)
[ "def", "FilePattern", "(", "pattern", ",", "settings", ",", "*", "*", "kwargs", ")", ":", "url", "=", "_urlparse", "(", "pattern", ")", "if", "url", ".", "scheme", "==", "'gs'", ":", "return", "GoogleStorageFilePattern", "(", "pattern", ",", "settings", ",", "*", "*", "kwargs", ")", "else", ":", "assert", "url", ".", "scheme", "==", "'file'", "return", "LocalFilePattern", "(", "pattern", ",", "settings", ",", "*", "*", "kwargs", ")" ]
Factory method returns LocalFilePattern or GoogleStorageFilePattern
[ "Factory", "method", "returns", "LocalFilePattern", "or", "GoogleStorageFilePattern" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/utils/loomengine_utils/file_utils.py#L119-L127
StanfordBioinformatics/loom
utils/loomengine_utils/file_utils.py
File
def File(url, settings, retry=False): """Factory method """ parsed_url = _urlparse(url) if parsed_url.scheme == 'gs': return GoogleStorageFile(url, settings, retry=retry) elif parsed_url.scheme == 'file': if parsed_url.hostname == 'localhost' or parsed_url.hostname is None: return LocalFile(url, settings, retry=retry) else: raise FileUtilsError( "Cannot process file url %s. Remote file hosts not supported." % url) else: raise FileUtilsError('Unsupported scheme "%s" in file "%s"' % (parsed_url.scheme, url))
python
def File(url, settings, retry=False): """Factory method """ parsed_url = _urlparse(url) if parsed_url.scheme == 'gs': return GoogleStorageFile(url, settings, retry=retry) elif parsed_url.scheme == 'file': if parsed_url.hostname == 'localhost' or parsed_url.hostname is None: return LocalFile(url, settings, retry=retry) else: raise FileUtilsError( "Cannot process file url %s. Remote file hosts not supported." % url) else: raise FileUtilsError('Unsupported scheme "%s" in file "%s"' % (parsed_url.scheme, url))
[ "def", "File", "(", "url", ",", "settings", ",", "retry", "=", "False", ")", ":", "parsed_url", "=", "_urlparse", "(", "url", ")", "if", "parsed_url", ".", "scheme", "==", "'gs'", ":", "return", "GoogleStorageFile", "(", "url", ",", "settings", ",", "retry", "=", "retry", ")", "elif", "parsed_url", ".", "scheme", "==", "'file'", ":", "if", "parsed_url", ".", "hostname", "==", "'localhost'", "or", "parsed_url", ".", "hostname", "is", "None", ":", "return", "LocalFile", "(", "url", ",", "settings", ",", "retry", "=", "retry", ")", "else", ":", "raise", "FileUtilsError", "(", "\"Cannot process file url %s. Remote file hosts not supported.\"", "%", "url", ")", "else", ":", "raise", "FileUtilsError", "(", "'Unsupported scheme \"%s\" in file \"%s\"'", "%", "(", "parsed_url", ".", "scheme", ",", "url", ")", ")" ]
Factory method
[ "Factory", "method" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/utils/loomengine_utils/file_utils.py#L291-L307
StanfordBioinformatics/loom
utils/loomengine_utils/file_utils.py
Copier
def Copier(source, destination): """Factory method to select the right copier for a given source and destination. """ if source.type == 'local' and destination.type == 'local': return LocalCopier(source, destination) elif source.type == 'local' and destination.type == 'google_storage': return Local2GoogleStorageCopier(source, destination) elif source.type == 'google_storage' and destination.type == 'local': return GoogleStorage2LocalCopier(source, destination) elif source.type == 'google_storage' and destination.type == 'google_storage': return GoogleStorageCopier(source, destination) else: raise FileUtilsError('Could not find method to copy from source '\ '"%s" to destination "%s".' % (source, destination))
python
def Copier(source, destination): """Factory method to select the right copier for a given source and destination. """ if source.type == 'local' and destination.type == 'local': return LocalCopier(source, destination) elif source.type == 'local' and destination.type == 'google_storage': return Local2GoogleStorageCopier(source, destination) elif source.type == 'google_storage' and destination.type == 'local': return GoogleStorage2LocalCopier(source, destination) elif source.type == 'google_storage' and destination.type == 'google_storage': return GoogleStorageCopier(source, destination) else: raise FileUtilsError('Could not find method to copy from source '\ '"%s" to destination "%s".' % (source, destination))
[ "def", "Copier", "(", "source", ",", "destination", ")", ":", "if", "source", ".", "type", "==", "'local'", "and", "destination", ".", "type", "==", "'local'", ":", "return", "LocalCopier", "(", "source", ",", "destination", ")", "elif", "source", ".", "type", "==", "'local'", "and", "destination", ".", "type", "==", "'google_storage'", ":", "return", "Local2GoogleStorageCopier", "(", "source", ",", "destination", ")", "elif", "source", ".", "type", "==", "'google_storage'", "and", "destination", ".", "type", "==", "'local'", ":", "return", "GoogleStorage2LocalCopier", "(", "source", ",", "destination", ")", "elif", "source", ".", "type", "==", "'google_storage'", "and", "destination", ".", "type", "==", "'google_storage'", ":", "return", "GoogleStorageCopier", "(", "source", ",", "destination", ")", "else", ":", "raise", "FileUtilsError", "(", "'Could not find method to copy from source '", "'\"%s\" to destination \"%s\".'", "%", "(", "source", ",", "destination", ")", ")" ]
Factory method to select the right copier for a given source and destination.
[ "Factory", "method", "to", "select", "the", "right", "copier", "for", "a", "given", "source", "and", "destination", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/utils/loomengine_utils/file_utils.py#L496-L510
StanfordBioinformatics/loom
server/loomengine_server/api/models/input_calculator.py
InputSetGeneratorNode.create_from_data_channel
def create_from_data_channel(cls, data_channel): """Scan the data tree on the given data_channel to create a corresponding InputSetGenerator tree. """ gather_depth = cls._get_gather_depth(data_channel) generator = InputSetGeneratorNode() for (data_path, data_node) in data_channel.get_ready_data_nodes( [], gather_depth): flat_data_node = data_node.flattened_clone(save=False) input_item = InputItem( flat_data_node, data_channel.channel, data_channel.as_channel, mode=data_channel.mode) generator._add_input_item(data_path, input_item) return generator
python
def create_from_data_channel(cls, data_channel): """Scan the data tree on the given data_channel to create a corresponding InputSetGenerator tree. """ gather_depth = cls._get_gather_depth(data_channel) generator = InputSetGeneratorNode() for (data_path, data_node) in data_channel.get_ready_data_nodes( [], gather_depth): flat_data_node = data_node.flattened_clone(save=False) input_item = InputItem( flat_data_node, data_channel.channel, data_channel.as_channel, mode=data_channel.mode) generator._add_input_item(data_path, input_item) return generator
[ "def", "create_from_data_channel", "(", "cls", ",", "data_channel", ")", ":", "gather_depth", "=", "cls", ".", "_get_gather_depth", "(", "data_channel", ")", "generator", "=", "InputSetGeneratorNode", "(", ")", "for", "(", "data_path", ",", "data_node", ")", "in", "data_channel", ".", "get_ready_data_nodes", "(", "[", "]", ",", "gather_depth", ")", ":", "flat_data_node", "=", "data_node", ".", "flattened_clone", "(", "save", "=", "False", ")", "input_item", "=", "InputItem", "(", "flat_data_node", ",", "data_channel", ".", "channel", ",", "data_channel", ".", "as_channel", ",", "mode", "=", "data_channel", ".", "mode", ")", "generator", ".", "_add_input_item", "(", "data_path", ",", "input_item", ")", "return", "generator" ]
Scan the data tree on the given data_channel to create a corresponding InputSetGenerator tree.
[ "Scan", "the", "data", "tree", "on", "the", "given", "data_channel", "to", "create", "a", "corresponding", "InputSetGenerator", "tree", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/models/input_calculator.py#L119-L133
StanfordBioinformatics/loom
worker/loomengine_worker/outputs.py
TaskAttemptOutput
def TaskAttemptOutput(output, task_attempt): """Returns the correct Output class for a given data type, source type, and scatter mode """ (data_type, mode, source_type) = _get_output_info(output) if data_type == 'file': if mode == 'scatter': assert source_type in ['filenames', 'glob'], \ 'source type "%s" not allowed' % source_type if source_type == 'filenames': return FileListScatterOutput(output, task_attempt) return GlobScatterOutput(output, task_attempt) else: assert mode == 'no_scatter' assert source_type == 'filename', \ 'source type "%s" not allowed' % source_type return FileOutput(output, task_attempt) else: # data_type is non-file if mode == 'scatter': assert source_type in [ 'filename', 'filenames', 'glob', 'stream'], \ 'source type "%s" not allowed' % source_type if source_type == 'filename': return FileContentsScatterOutput(output, task_attempt) if source_type == 'filenames': return FileListContentsScatterOutput(output, task_attempt) if source_type == 'glob': return GlobContentsScatterOutput(output, task_attempt) assert source_type == 'stream' return StreamScatterOutput(output, task_attempt) else: assert mode == 'no_scatter' assert source_type in ['filename', 'stream'], \ 'source type "%s" not allowed' % source_type if source_type == 'filename': return FileContentsOutput(output, task_attempt) assert source_type == 'stream' return StreamOutput(output, task_attempt)
python
def TaskAttemptOutput(output, task_attempt): """Returns the correct Output class for a given data type, source type, and scatter mode """ (data_type, mode, source_type) = _get_output_info(output) if data_type == 'file': if mode == 'scatter': assert source_type in ['filenames', 'glob'], \ 'source type "%s" not allowed' % source_type if source_type == 'filenames': return FileListScatterOutput(output, task_attempt) return GlobScatterOutput(output, task_attempt) else: assert mode == 'no_scatter' assert source_type == 'filename', \ 'source type "%s" not allowed' % source_type return FileOutput(output, task_attempt) else: # data_type is non-file if mode == 'scatter': assert source_type in [ 'filename', 'filenames', 'glob', 'stream'], \ 'source type "%s" not allowed' % source_type if source_type == 'filename': return FileContentsScatterOutput(output, task_attempt) if source_type == 'filenames': return FileListContentsScatterOutput(output, task_attempt) if source_type == 'glob': return GlobContentsScatterOutput(output, task_attempt) assert source_type == 'stream' return StreamScatterOutput(output, task_attempt) else: assert mode == 'no_scatter' assert source_type in ['filename', 'stream'], \ 'source type "%s" not allowed' % source_type if source_type == 'filename': return FileContentsOutput(output, task_attempt) assert source_type == 'stream' return StreamOutput(output, task_attempt)
[ "def", "TaskAttemptOutput", "(", "output", ",", "task_attempt", ")", ":", "(", "data_type", ",", "mode", ",", "source_type", ")", "=", "_get_output_info", "(", "output", ")", "if", "data_type", "==", "'file'", ":", "if", "mode", "==", "'scatter'", ":", "assert", "source_type", "in", "[", "'filenames'", ",", "'glob'", "]", ",", "'source type \"%s\" not allowed'", "%", "source_type", "if", "source_type", "==", "'filenames'", ":", "return", "FileListScatterOutput", "(", "output", ",", "task_attempt", ")", "return", "GlobScatterOutput", "(", "output", ",", "task_attempt", ")", "else", ":", "assert", "mode", "==", "'no_scatter'", "assert", "source_type", "==", "'filename'", ",", "'source type \"%s\" not allowed'", "%", "source_type", "return", "FileOutput", "(", "output", ",", "task_attempt", ")", "else", ":", "# data_type is non-file", "if", "mode", "==", "'scatter'", ":", "assert", "source_type", "in", "[", "'filename'", ",", "'filenames'", ",", "'glob'", ",", "'stream'", "]", ",", "'source type \"%s\" not allowed'", "%", "source_type", "if", "source_type", "==", "'filename'", ":", "return", "FileContentsScatterOutput", "(", "output", ",", "task_attempt", ")", "if", "source_type", "==", "'filenames'", ":", "return", "FileListContentsScatterOutput", "(", "output", ",", "task_attempt", ")", "if", "source_type", "==", "'glob'", ":", "return", "GlobContentsScatterOutput", "(", "output", ",", "task_attempt", ")", "assert", "source_type", "==", "'stream'", "return", "StreamScatterOutput", "(", "output", ",", "task_attempt", ")", "else", ":", "assert", "mode", "==", "'no_scatter'", "assert", "source_type", "in", "[", "'filename'", ",", "'stream'", "]", ",", "'source type \"%s\" not allowed'", "%", "source_type", "if", "source_type", "==", "'filename'", ":", "return", "FileContentsOutput", "(", "output", ",", "task_attempt", ")", "assert", "source_type", "==", "'stream'", "return", "StreamOutput", "(", "output", ",", "task_attempt", ")" ]
Returns the correct Output class for a given data type, source type, and scatter mode
[ "Returns", "the", "correct", "Output", "class", "for", "a", "given", "data", "type", "source", "type", "and", "scatter", "mode" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/worker/loomengine_worker/outputs.py#L181-L220
StanfordBioinformatics/loom
server/loomengine_server/api/models/data_nodes.py
DataNode.add_leaf
def add_leaf(self, index, data_object, save=False): """Adds a new leaf node at the given index with the given data_object """ assert self.type == data_object.type, 'data type mismatch' if self._get_child_by_index(index) is not None: raise NodeAlreadyExistsError( 'Leaf data node already exists at this index') else: data_node = DataNode( parent=self, index=index, data_object=data_object, type=self.type) if save: data_node.full_clean() data_node.save() self._add_unsaved_child(data_node) return data_node
python
def add_leaf(self, index, data_object, save=False): """Adds a new leaf node at the given index with the given data_object """ assert self.type == data_object.type, 'data type mismatch' if self._get_child_by_index(index) is not None: raise NodeAlreadyExistsError( 'Leaf data node already exists at this index') else: data_node = DataNode( parent=self, index=index, data_object=data_object, type=self.type) if save: data_node.full_clean() data_node.save() self._add_unsaved_child(data_node) return data_node
[ "def", "add_leaf", "(", "self", ",", "index", ",", "data_object", ",", "save", "=", "False", ")", ":", "assert", "self", ".", "type", "==", "data_object", ".", "type", ",", "'data type mismatch'", "if", "self", ".", "_get_child_by_index", "(", "index", ")", "is", "not", "None", ":", "raise", "NodeAlreadyExistsError", "(", "'Leaf data node already exists at this index'", ")", "else", ":", "data_node", "=", "DataNode", "(", "parent", "=", "self", ",", "index", "=", "index", ",", "data_object", "=", "data_object", ",", "type", "=", "self", ".", "type", ")", "if", "save", ":", "data_node", ".", "full_clean", "(", ")", "data_node", ".", "save", "(", ")", "self", ".", "_add_unsaved_child", "(", "data_node", ")", "return", "data_node" ]
Adds a new leaf node at the given index with the given data_object
[ "Adds", "a", "new", "leaf", "node", "at", "the", "given", "index", "with", "the", "given", "data_object" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/models/data_nodes.py#L103-L120
StanfordBioinformatics/loom
server/loomengine_server/api/models/data_nodes.py
DataNode.get_ready_data_nodes
def get_ready_data_nodes(self, seed_path, gather_depth): """Returns a list [(path1,data_node1),...] with entries only for existing nodes with DataObjects where is_ready==True. Missing nodes or those with non-ready or non-existing data are ignored. """ try: seed_node = self.get_node(seed_path) except MissingBranchError: return [] all_paths = seed_node._get_all_paths(seed_path, gather_depth) ready_data_nodes = [] for path in all_paths: if self.is_ready(data_path=path): ready_data_nodes.append((path, self.get_node(path))) return ready_data_nodes
python
def get_ready_data_nodes(self, seed_path, gather_depth): """Returns a list [(path1,data_node1),...] with entries only for existing nodes with DataObjects where is_ready==True. Missing nodes or those with non-ready or non-existing data are ignored. """ try: seed_node = self.get_node(seed_path) except MissingBranchError: return [] all_paths = seed_node._get_all_paths(seed_path, gather_depth) ready_data_nodes = [] for path in all_paths: if self.is_ready(data_path=path): ready_data_nodes.append((path, self.get_node(path))) return ready_data_nodes
[ "def", "get_ready_data_nodes", "(", "self", ",", "seed_path", ",", "gather_depth", ")", ":", "try", ":", "seed_node", "=", "self", ".", "get_node", "(", "seed_path", ")", "except", "MissingBranchError", ":", "return", "[", "]", "all_paths", "=", "seed_node", ".", "_get_all_paths", "(", "seed_path", ",", "gather_depth", ")", "ready_data_nodes", "=", "[", "]", "for", "path", "in", "all_paths", ":", "if", "self", ".", "is_ready", "(", "data_path", "=", "path", ")", ":", "ready_data_nodes", ".", "append", "(", "(", "path", ",", "self", ".", "get_node", "(", "path", ")", ")", ")", "return", "ready_data_nodes" ]
Returns a list [(path1,data_node1),...] with entries only for existing nodes with DataObjects where is_ready==True. Missing nodes or those with non-ready or non-existing data are ignored.
[ "Returns", "a", "list", "[", "(", "path1", "data_node1", ")", "...", "]", "with", "entries", "only", "for", "existing", "nodes", "with", "DataObjects", "where", "is_ready", "==", "True", ".", "Missing", "nodes", "or", "those", "with", "non", "-", "ready", "or", "non", "-", "existing", "data", "are", "ignored", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/models/data_nodes.py#L183-L197
StanfordBioinformatics/loom
server/loomengine_server/api/models/data_nodes.py
DataNode._check_index
def _check_index(self, index): """Verify that the given index is consistent with the degree of the node. """ if self.degree is None: raise UnknownDegreeError( 'Cannot access child DataNode on a parent with degree of None. '\ 'Set the degree on the parent first.') if index < 0 or index >= self.degree: raise IndexOutOfRangeError( 'Out of range index %s. DataNode parent has degree %s, so index '\ 'should be in the range 0 to %s' % ( index, self.degree, self.degree-1))
python
def _check_index(self, index): """Verify that the given index is consistent with the degree of the node. """ if self.degree is None: raise UnknownDegreeError( 'Cannot access child DataNode on a parent with degree of None. '\ 'Set the degree on the parent first.') if index < 0 or index >= self.degree: raise IndexOutOfRangeError( 'Out of range index %s. DataNode parent has degree %s, so index '\ 'should be in the range 0 to %s' % ( index, self.degree, self.degree-1))
[ "def", "_check_index", "(", "self", ",", "index", ")", ":", "if", "self", ".", "degree", "is", "None", ":", "raise", "UnknownDegreeError", "(", "'Cannot access child DataNode on a parent with degree of None. '", "'Set the degree on the parent first.'", ")", "if", "index", "<", "0", "or", "index", ">=", "self", ".", "degree", ":", "raise", "IndexOutOfRangeError", "(", "'Out of range index %s. DataNode parent has degree %s, so index '", "'should be in the range 0 to %s'", "%", "(", "index", ",", "self", ".", "degree", ",", "self", ".", "degree", "-", "1", ")", ")" ]
Verify that the given index is consistent with the degree of the node.
[ "Verify", "that", "the", "given", "index", "is", "consistent", "with", "the", "degree", "of", "the", "node", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/models/data_nodes.py#L328-L339
StanfordBioinformatics/loom
client/loomengine/playbooks/files/gcloud_utils.py
on_gcloud_vm
def on_gcloud_vm(): """ Determines if we're running on a GCE instance.""" r = None try: r = requests.get('http://metadata.google.internal') except requests.ConnectionError: return False try: if r.headers['Metadata-Flavor'] == 'Google' and \ r.headers['Server'] == 'Metadata Server for VM': return True except KeyError: return False
python
def on_gcloud_vm(): """ Determines if we're running on a GCE instance.""" r = None try: r = requests.get('http://metadata.google.internal') except requests.ConnectionError: return False try: if r.headers['Metadata-Flavor'] == 'Google' and \ r.headers['Server'] == 'Metadata Server for VM': return True except KeyError: return False
[ "def", "on_gcloud_vm", "(", ")", ":", "r", "=", "None", "try", ":", "r", "=", "requests", ".", "get", "(", "'http://metadata.google.internal'", ")", "except", "requests", ".", "ConnectionError", ":", "return", "False", "try", ":", "if", "r", ".", "headers", "[", "'Metadata-Flavor'", "]", "==", "'Google'", "and", "r", ".", "headers", "[", "'Server'", "]", "==", "'Metadata Server for VM'", ":", "return", "True", "except", "KeyError", ":", "return", "False" ]
Determines if we're running on a GCE instance.
[ "Determines", "if", "we", "re", "running", "on", "a", "GCE", "instance", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/files/gcloud_utils.py#L11-L24
StanfordBioinformatics/loom
client/loomengine/playbooks/files/gcloud_utils.py
get_cheapest_instance_type
def get_cheapest_instance_type(cores, memory): """Determine the cheapest instance type given a minimum number of cores and minimum amount of RAM (in GB). """ pricelist = get_gcloud_pricelist() # Filter out preemptible, shared-CPU, and non-US instance types us_instance_types = {k: v for k, v in pricelist.items() if k.startswith('CP-COMPUTEENGINE-VMIMAGE-') and not k.endswith('-PREEMPTIBLE') and 'us' in v and v['cores'] != 'shared'} # Convert to array and add keys (instance type names) as type names price_array = [] for key in us_instance_types: value = us_instance_types[key] value.update({'name': key.replace( 'CP-COMPUTEENGINE-VMIMAGE-', '').lower()}) price_array.append(value) # Sort by price in US price_array.sort(None, lambda x: x['us']) # Look for an instance type that satisfies requested # cores and memory; first will be cheapest for instance_type in price_array: if int(instance_type['cores']) >= int(cores) \ and float(instance_type['memory']) >= float(memory): print instance_type['name'] return instance_type['name'] # No instance type found that can fulfill requested cores and memory raise Exception('No instance type found with at least %d cores ' 'and %f GB of RAM.' % (cores, memory))
python
def get_cheapest_instance_type(cores, memory): """Determine the cheapest instance type given a minimum number of cores and minimum amount of RAM (in GB). """ pricelist = get_gcloud_pricelist() # Filter out preemptible, shared-CPU, and non-US instance types us_instance_types = {k: v for k, v in pricelist.items() if k.startswith('CP-COMPUTEENGINE-VMIMAGE-') and not k.endswith('-PREEMPTIBLE') and 'us' in v and v['cores'] != 'shared'} # Convert to array and add keys (instance type names) as type names price_array = [] for key in us_instance_types: value = us_instance_types[key] value.update({'name': key.replace( 'CP-COMPUTEENGINE-VMIMAGE-', '').lower()}) price_array.append(value) # Sort by price in US price_array.sort(None, lambda x: x['us']) # Look for an instance type that satisfies requested # cores and memory; first will be cheapest for instance_type in price_array: if int(instance_type['cores']) >= int(cores) \ and float(instance_type['memory']) >= float(memory): print instance_type['name'] return instance_type['name'] # No instance type found that can fulfill requested cores and memory raise Exception('No instance type found with at least %d cores ' 'and %f GB of RAM.' % (cores, memory))
[ "def", "get_cheapest_instance_type", "(", "cores", ",", "memory", ")", ":", "pricelist", "=", "get_gcloud_pricelist", "(", ")", "# Filter out preemptible, shared-CPU, and non-US instance types", "us_instance_types", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "pricelist", ".", "items", "(", ")", "if", "k", ".", "startswith", "(", "'CP-COMPUTEENGINE-VMIMAGE-'", ")", "and", "not", "k", ".", "endswith", "(", "'-PREEMPTIBLE'", ")", "and", "'us'", "in", "v", "and", "v", "[", "'cores'", "]", "!=", "'shared'", "}", "# Convert to array and add keys (instance type names) as type names", "price_array", "=", "[", "]", "for", "key", "in", "us_instance_types", ":", "value", "=", "us_instance_types", "[", "key", "]", "value", ".", "update", "(", "{", "'name'", ":", "key", ".", "replace", "(", "'CP-COMPUTEENGINE-VMIMAGE-'", ",", "''", ")", ".", "lower", "(", ")", "}", ")", "price_array", ".", "append", "(", "value", ")", "# Sort by price in US", "price_array", ".", "sort", "(", "None", ",", "lambda", "x", ":", "x", "[", "'us'", "]", ")", "# Look for an instance type that satisfies requested", "# cores and memory; first will be cheapest", "for", "instance_type", "in", "price_array", ":", "if", "int", "(", "instance_type", "[", "'cores'", "]", ")", ">=", "int", "(", "cores", ")", "and", "float", "(", "instance_type", "[", "'memory'", "]", ")", ">=", "float", "(", "memory", ")", ":", "print", "instance_type", "[", "'name'", "]", "return", "instance_type", "[", "'name'", "]", "# No instance type found that can fulfill requested cores and memory", "raise", "Exception", "(", "'No instance type found with at least %d cores '", "'and %f GB of RAM.'", "%", "(", "cores", ",", "memory", ")", ")" ]
Determine the cheapest instance type given a minimum number of cores and minimum amount of RAM (in GB).
[ "Determine", "the", "cheapest", "instance", "type", "given", "a", "minimum", "number", "of", "cores", "and", "minimum", "amount", "of", "RAM", "(", "in", "GB", ")", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/files/gcloud_utils.py#L27-L61
StanfordBioinformatics/loom
client/loomengine/playbooks/files/gcloud_utils.py
get_gcloud_pricelist
def get_gcloud_pricelist(): """Retrieve latest pricelist from Google Cloud, or use cached copy if not reachable. """ try: r = requests.get('http://cloudpricingcalculator.appspot.com' '/static/data/pricelist.json') content = json.loads(r.content) except ConnectionError: logger.warning( "Couldn't get updated pricelist from " "http://cloudpricingcalculator.appspot.com" "/static/data/pricelist.json. Falling back to cached " "copy, but prices may be out of date.") with open('gcloudpricelist.json') as infile: content = json.load(infile) pricelist = content['gcp_price_list'] return pricelist
python
def get_gcloud_pricelist(): """Retrieve latest pricelist from Google Cloud, or use cached copy if not reachable. """ try: r = requests.get('http://cloudpricingcalculator.appspot.com' '/static/data/pricelist.json') content = json.loads(r.content) except ConnectionError: logger.warning( "Couldn't get updated pricelist from " "http://cloudpricingcalculator.appspot.com" "/static/data/pricelist.json. Falling back to cached " "copy, but prices may be out of date.") with open('gcloudpricelist.json') as infile: content = json.load(infile) pricelist = content['gcp_price_list'] return pricelist
[ "def", "get_gcloud_pricelist", "(", ")", ":", "try", ":", "r", "=", "requests", ".", "get", "(", "'http://cloudpricingcalculator.appspot.com'", "'/static/data/pricelist.json'", ")", "content", "=", "json", ".", "loads", "(", "r", ".", "content", ")", "except", "ConnectionError", ":", "logger", ".", "warning", "(", "\"Couldn't get updated pricelist from \"", "\"http://cloudpricingcalculator.appspot.com\"", "\"/static/data/pricelist.json. Falling back to cached \"", "\"copy, but prices may be out of date.\"", ")", "with", "open", "(", "'gcloudpricelist.json'", ")", "as", "infile", ":", "content", "=", "json", ".", "load", "(", "infile", ")", "pricelist", "=", "content", "[", "'gcp_price_list'", "]", "return", "pricelist" ]
Retrieve latest pricelist from Google Cloud, or use cached copy if not reachable.
[ "Retrieve", "latest", "pricelist", "from", "Google", "Cloud", "or", "use", "cached", "copy", "if", "not", "reachable", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/files/gcloud_utils.py#L64-L82
StanfordBioinformatics/loom
client/loomengine/playbooks/files/gcloud_utils.py
_get_base_name
def _get_base_name(hostname, step_name, attempt_id, max_length): """Create a base name for the worker instance that will run the specified task run attempt, from this server. Since hostname and step name will be duplicated across workers (reruns, etc.), ensure that at least MIN_TASK_ID_CHARS are preserved in the instance name. Also, prevent names from ending with dashes. """ max_length = int(max_length) if len(hostname)+len(step_name)+MIN_TASK_ID_CHARS+2 > max_length: # round with ceil/floor such that extra char goes to hostname if odd hostname_chars = int(math.ceil( (max_length-MIN_TASK_ID_CHARS-2)/float(2))) step_name_chars = int(math.floor( (max_length-MIN_TASK_ID_CHARS-2)/float(2))) hostname = hostname[:hostname_chars] step_name = step_name[:step_name_chars] name_base = '-'.join([hostname, step_name, attempt_id]) return _sanitize_instance_name(name_base, max_length)
python
def _get_base_name(hostname, step_name, attempt_id, max_length): """Create a base name for the worker instance that will run the specified task run attempt, from this server. Since hostname and step name will be duplicated across workers (reruns, etc.), ensure that at least MIN_TASK_ID_CHARS are preserved in the instance name. Also, prevent names from ending with dashes. """ max_length = int(max_length) if len(hostname)+len(step_name)+MIN_TASK_ID_CHARS+2 > max_length: # round with ceil/floor such that extra char goes to hostname if odd hostname_chars = int(math.ceil( (max_length-MIN_TASK_ID_CHARS-2)/float(2))) step_name_chars = int(math.floor( (max_length-MIN_TASK_ID_CHARS-2)/float(2))) hostname = hostname[:hostname_chars] step_name = step_name[:step_name_chars] name_base = '-'.join([hostname, step_name, attempt_id]) return _sanitize_instance_name(name_base, max_length)
[ "def", "_get_base_name", "(", "hostname", ",", "step_name", ",", "attempt_id", ",", "max_length", ")", ":", "max_length", "=", "int", "(", "max_length", ")", "if", "len", "(", "hostname", ")", "+", "len", "(", "step_name", ")", "+", "MIN_TASK_ID_CHARS", "+", "2", ">", "max_length", ":", "# round with ceil/floor such that extra char goes to hostname if odd", "hostname_chars", "=", "int", "(", "math", ".", "ceil", "(", "(", "max_length", "-", "MIN_TASK_ID_CHARS", "-", "2", ")", "/", "float", "(", "2", ")", ")", ")", "step_name_chars", "=", "int", "(", "math", ".", "floor", "(", "(", "max_length", "-", "MIN_TASK_ID_CHARS", "-", "2", ")", "/", "float", "(", "2", ")", ")", ")", "hostname", "=", "hostname", "[", ":", "hostname_chars", "]", "step_name", "=", "step_name", "[", ":", "step_name_chars", "]", "name_base", "=", "'-'", ".", "join", "(", "[", "hostname", ",", "step_name", ",", "attempt_id", "]", ")", "return", "_sanitize_instance_name", "(", "name_base", ",", "max_length", ")" ]
Create a base name for the worker instance that will run the specified task run attempt, from this server. Since hostname and step name will be duplicated across workers (reruns, etc.), ensure that at least MIN_TASK_ID_CHARS are preserved in the instance name. Also, prevent names from ending with dashes.
[ "Create", "a", "base", "name", "for", "the", "worker", "instance", "that", "will", "run", "the", "specified", "task", "run", "attempt", "from", "this", "server", ".", "Since", "hostname", "and", "step", "name", "will", "be", "duplicated", "across", "workers", "(", "reruns", "etc", ".", ")", "ensure", "that", "at", "least", "MIN_TASK_ID_CHARS", "are", "preserved", "in", "the", "instance", "name", ".", "Also", "prevent", "names", "from", "ending", "with", "dashes", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/files/gcloud_utils.py#L88-L105
StanfordBioinformatics/loom
client/loomengine/playbooks/files/gcloud_utils.py
_sanitize_instance_name
def _sanitize_instance_name(name, max_length): """Instance names must start with a lowercase letter. All following characters must be a dash, lowercase letter, or digit. """ name = str(name).lower() # make all letters lowercase name = re.sub(r'[^-a-z0-9]', '', name) # remove invalid characters # remove non-lowercase letters from the beginning name = re.sub(r'^[^a-z]+', '', name) name = name[:max_length] name = re.sub(r'-+$', '', name) # remove hyphens from the end return name
python
def _sanitize_instance_name(name, max_length): """Instance names must start with a lowercase letter. All following characters must be a dash, lowercase letter, or digit. """ name = str(name).lower() # make all letters lowercase name = re.sub(r'[^-a-z0-9]', '', name) # remove invalid characters # remove non-lowercase letters from the beginning name = re.sub(r'^[^a-z]+', '', name) name = name[:max_length] name = re.sub(r'-+$', '', name) # remove hyphens from the end return name
[ "def", "_sanitize_instance_name", "(", "name", ",", "max_length", ")", ":", "name", "=", "str", "(", "name", ")", ".", "lower", "(", ")", "# make all letters lowercase", "name", "=", "re", ".", "sub", "(", "r'[^-a-z0-9]'", ",", "''", ",", "name", ")", "# remove invalid characters", "# remove non-lowercase letters from the beginning", "name", "=", "re", ".", "sub", "(", "r'^[^a-z]+'", ",", "''", ",", "name", ")", "name", "=", "name", "[", ":", "max_length", "]", "name", "=", "re", ".", "sub", "(", "r'-+$'", ",", "''", ",", "name", ")", "# remove hyphens from the end", "return", "name" ]
Instance names must start with a lowercase letter. All following characters must be a dash, lowercase letter, or digit.
[ "Instance", "names", "must", "start", "with", "a", "lowercase", "letter", ".", "All", "following", "characters", "must", "be", "a", "dash", "lowercase", "letter", "or", "digit", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/files/gcloud_utils.py#L115-L126
StanfordBioinformatics/loom
client/loomengine/playbooks/gce.py
CloudInventoryCache.is_valid
def is_valid(self, max_age=None): ''' Determines if the cache files have expired, or if it is still valid ''' if max_age is None: max_age = self.cache_max_age if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + max_age) > current_time: return True return False
python
def is_valid(self, max_age=None): ''' Determines if the cache files have expired, or if it is still valid ''' if max_age is None: max_age = self.cache_max_age if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + max_age) > current_time: return True return False
[ "def", "is_valid", "(", "self", ",", "max_age", "=", "None", ")", ":", "if", "max_age", "is", "None", ":", "max_age", "=", "self", ".", "cache_max_age", "if", "os", ".", "path", ".", "isfile", "(", "self", ".", "cache_path_cache", ")", ":", "mod_time", "=", "os", ".", "path", ".", "getmtime", "(", "self", ".", "cache_path_cache", ")", "current_time", "=", "time", "(", ")", "if", "(", "mod_time", "+", "max_age", ")", ">", "current_time", ":", "return", "True", "return", "False" ]
Determines if the cache files have expired, or if it is still valid
[ "Determines", "if", "the", "cache", "files", "have", "expired", "or", "if", "it", "is", "still", "valid" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/gce.py#L123-L135
StanfordBioinformatics/loom
client/loomengine/playbooks/gce.py
CloudInventoryCache.get_all_data_from_cache
def get_all_data_from_cache(self, filename=''): ''' Reads the JSON inventory from the cache file. Returns Python dictionary. ''' data = '' if not filename: filename = self.cache_path_cache with open(filename, 'r') as cache: data = cache.read() return json.loads(data)
python
def get_all_data_from_cache(self, filename=''): ''' Reads the JSON inventory from the cache file. Returns Python dictionary. ''' data = '' if not filename: filename = self.cache_path_cache with open(filename, 'r') as cache: data = cache.read() return json.loads(data)
[ "def", "get_all_data_from_cache", "(", "self", ",", "filename", "=", "''", ")", ":", "data", "=", "''", "if", "not", "filename", ":", "filename", "=", "self", ".", "cache_path_cache", "with", "open", "(", "filename", ",", "'r'", ")", "as", "cache", ":", "data", "=", "cache", ".", "read", "(", ")", "return", "json", ".", "loads", "(", "data", ")" ]
Reads the JSON inventory from the cache file. Returns Python dictionary.
[ "Reads", "the", "JSON", "inventory", "from", "the", "cache", "file", ".", "Returns", "Python", "dictionary", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/gce.py#L137-L145
StanfordBioinformatics/loom
client/loomengine/playbooks/gce.py
CloudInventoryCache.write_to_cache
def write_to_cache(self, data, filename=''): ''' Writes data to file as JSON. Returns True. ''' if not filename: filename = self.cache_path_cache json_data = json.dumps(data) with open(filename, 'w') as cache: cache.write(json_data) return True
python
def write_to_cache(self, data, filename=''): ''' Writes data to file as JSON. Returns True. ''' if not filename: filename = self.cache_path_cache json_data = json.dumps(data) with open(filename, 'w') as cache: cache.write(json_data) return True
[ "def", "write_to_cache", "(", "self", ",", "data", ",", "filename", "=", "''", ")", ":", "if", "not", "filename", ":", "filename", "=", "self", ".", "cache_path_cache", "json_data", "=", "json", ".", "dumps", "(", "data", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "cache", ":", "cache", ".", "write", "(", "json_data", ")", "return", "True" ]
Writes data to file as JSON. Returns True.
[ "Writes", "data", "to", "file", "as", "JSON", ".", "Returns", "True", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/gce.py#L147-L154
StanfordBioinformatics/loom
client/loomengine/playbooks/gce.py
GceInventory.get_config
def get_config(self): """ Reads the settings from the gce.ini file. Populates a SafeConfigParser object with defaults and attempts to read an .ini-style configuration from the filename specified in GCE_INI_PATH. If the environment variable is not present, the filename defaults to gce.ini in the current working directory. """ gce_ini_default_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "gce.ini") gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) # Create a ConfigParser. # This provides empty defaults to each key, so that environment # variable configuration (as opposed to INI configuration) is able # to work. config = ConfigParser.SafeConfigParser(defaults={ 'gce_service_account_email_address': '', 'gce_service_account_pem_file_path': '', 'gce_project_id': '', 'libcloud_secrets': '', 'inventory_ip_type': '', 'cache_path': '~/.ansible/tmp', 'cache_max_age': '300' }) if 'gce' not in config.sections(): config.add_section('gce') if 'inventory' not in config.sections(): config.add_section('inventory') if 'cache' not in config.sections(): config.add_section('cache') config.read(gce_ini_path) ######### # Section added for processing ini settings ######### # Set the instance_states filter based on config file options self.instance_states = [] if config.has_option('gce', 'instance_states'): states = config.get('gce', 'instance_states') # Ignore if instance_states is an empty string. if states: self.instance_states = states.split(',') # Caching cache_path = config.get('cache', 'cache_path') cache_max_age = config.getint('cache', 'cache_max_age') # TOOD(supertom): support project-specific caches cache_name = 'ansible-gce.cache' self.cache = CloudInventoryCache(cache_path=cache_path, cache_max_age=cache_max_age, cache_name=cache_name) return config
python
def get_config(self): """ Reads the settings from the gce.ini file. Populates a SafeConfigParser object with defaults and attempts to read an .ini-style configuration from the filename specified in GCE_INI_PATH. If the environment variable is not present, the filename defaults to gce.ini in the current working directory. """ gce_ini_default_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "gce.ini") gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) # Create a ConfigParser. # This provides empty defaults to each key, so that environment # variable configuration (as opposed to INI configuration) is able # to work. config = ConfigParser.SafeConfigParser(defaults={ 'gce_service_account_email_address': '', 'gce_service_account_pem_file_path': '', 'gce_project_id': '', 'libcloud_secrets': '', 'inventory_ip_type': '', 'cache_path': '~/.ansible/tmp', 'cache_max_age': '300' }) if 'gce' not in config.sections(): config.add_section('gce') if 'inventory' not in config.sections(): config.add_section('inventory') if 'cache' not in config.sections(): config.add_section('cache') config.read(gce_ini_path) ######### # Section added for processing ini settings ######### # Set the instance_states filter based on config file options self.instance_states = [] if config.has_option('gce', 'instance_states'): states = config.get('gce', 'instance_states') # Ignore if instance_states is an empty string. if states: self.instance_states = states.split(',') # Caching cache_path = config.get('cache', 'cache_path') cache_max_age = config.getint('cache', 'cache_max_age') # TOOD(supertom): support project-specific caches cache_name = 'ansible-gce.cache' self.cache = CloudInventoryCache(cache_path=cache_path, cache_max_age=cache_max_age, cache_name=cache_name) return config
[ "def", "get_config", "(", "self", ")", ":", "gce_ini_default_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", ",", "\"gce.ini\"", ")", "gce_ini_path", "=", "os", ".", "environ", ".", "get", "(", "'GCE_INI_PATH'", ",", "gce_ini_default_path", ")", "# Create a ConfigParser.", "# This provides empty defaults to each key, so that environment", "# variable configuration (as opposed to INI configuration) is able", "# to work.", "config", "=", "ConfigParser", ".", "SafeConfigParser", "(", "defaults", "=", "{", "'gce_service_account_email_address'", ":", "''", ",", "'gce_service_account_pem_file_path'", ":", "''", ",", "'gce_project_id'", ":", "''", ",", "'libcloud_secrets'", ":", "''", ",", "'inventory_ip_type'", ":", "''", ",", "'cache_path'", ":", "'~/.ansible/tmp'", ",", "'cache_max_age'", ":", "'300'", "}", ")", "if", "'gce'", "not", "in", "config", ".", "sections", "(", ")", ":", "config", ".", "add_section", "(", "'gce'", ")", "if", "'inventory'", "not", "in", "config", ".", "sections", "(", ")", ":", "config", ".", "add_section", "(", "'inventory'", ")", "if", "'cache'", "not", "in", "config", ".", "sections", "(", ")", ":", "config", ".", "add_section", "(", "'cache'", ")", "config", ".", "read", "(", "gce_ini_path", ")", "#########", "# Section added for processing ini settings", "#########", "# Set the instance_states filter based on config file options", "self", ".", "instance_states", "=", "[", "]", "if", "config", ".", "has_option", "(", "'gce'", ",", "'instance_states'", ")", ":", "states", "=", "config", ".", "get", "(", "'gce'", ",", "'instance_states'", ")", "# Ignore if instance_states is an empty string.", "if", "states", ":", "self", ".", "instance_states", "=", "states", ".", "split", "(", "','", ")", "# Caching", "cache_path", "=", "config", ".", "get", "(", "'cache'", ",", "'cache_path'", ")", "cache_max_age", "=", "config", ".", "getint", "(", "'cache'", ",", "'cache_max_age'", ")", "# TOOD(supertom): support project-specific caches", "cache_name", "=", "'ansible-gce.cache'", "self", ".", "cache", "=", "CloudInventoryCache", "(", "cache_path", "=", "cache_path", ",", "cache_max_age", "=", "cache_max_age", ",", "cache_name", "=", "cache_name", ")", "return", "config" ]
Reads the settings from the gce.ini file. Populates a SafeConfigParser object with defaults and attempts to read an .ini-style configuration from the filename specified in GCE_INI_PATH. If the environment variable is not present, the filename defaults to gce.ini in the current working directory.
[ "Reads", "the", "settings", "from", "the", "gce", ".", "ini", "file", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/gce.py#L198-L254
StanfordBioinformatics/loom
client/loomengine/playbooks/gce.py
GceInventory.get_inventory_options
def get_inventory_options(self): """Determine inventory options. Environment variables always take precedence over configuration files.""" ip_type = self.config.get('inventory', 'inventory_ip_type') # If the appropriate environment variables are set, they override # other configuration ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type) return ip_type
python
def get_inventory_options(self): """Determine inventory options. Environment variables always take precedence over configuration files.""" ip_type = self.config.get('inventory', 'inventory_ip_type') # If the appropriate environment variables are set, they override # other configuration ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type) return ip_type
[ "def", "get_inventory_options", "(", "self", ")", ":", "ip_type", "=", "self", ".", "config", ".", "get", "(", "'inventory'", ",", "'inventory_ip_type'", ")", "# If the appropriate environment variables are set, they override", "# other configuration", "ip_type", "=", "os", ".", "environ", ".", "get", "(", "'INVENTORY_IP_TYPE'", ",", "ip_type", ")", "return", "ip_type" ]
Determine inventory options. Environment variables always take precedence over configuration files.
[ "Determine", "inventory", "options", ".", "Environment", "variables", "always", "take", "precedence", "over", "configuration", "files", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/gce.py#L256-L263
StanfordBioinformatics/loom
client/loomengine/playbooks/gce.py
GceInventory.get_gce_driver
def get_gce_driver(self): """Determine the GCE authorization settings and return a libcloud driver. """ # Attempt to get GCE params from a configuration file, if one # exists. secrets_path = self.config.get('gce', 'libcloud_secrets') secrets_found = False try: import secrets args = list(getattr(secrets, 'GCE_PARAMS', [])) kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) secrets_found = True except: pass if not secrets_found and secrets_path: if not secrets_path.endswith('secrets.py'): err = "Must specify libcloud secrets file as " err += "/absolute/path/to/secrets.py" sys.exit(err) sys.path.append(os.path.dirname(secrets_path)) try: import secrets args = list(getattr(secrets, 'GCE_PARAMS', [])) kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) secrets_found = True except: pass if not secrets_found: args = [ self.config.get('gce','gce_service_account_email_address'), self.config.get('gce','gce_service_account_pem_file_path') ] kwargs = {'project': self.config.get('gce', 'gce_project_id')} # If the appropriate environment variables are set, they override # other configuration; process those into our args and kwargs. args[0] = os.environ.get('GCE_EMAIL', args[0]) args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) # Retrieve and return the GCE driver. gce = get_driver(Provider.GCE)(*args, **kwargs) gce.connection.user_agent_append( '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION), ) return gce
python
def get_gce_driver(self): """Determine the GCE authorization settings and return a libcloud driver. """ # Attempt to get GCE params from a configuration file, if one # exists. secrets_path = self.config.get('gce', 'libcloud_secrets') secrets_found = False try: import secrets args = list(getattr(secrets, 'GCE_PARAMS', [])) kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) secrets_found = True except: pass if not secrets_found and secrets_path: if not secrets_path.endswith('secrets.py'): err = "Must specify libcloud secrets file as " err += "/absolute/path/to/secrets.py" sys.exit(err) sys.path.append(os.path.dirname(secrets_path)) try: import secrets args = list(getattr(secrets, 'GCE_PARAMS', [])) kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) secrets_found = True except: pass if not secrets_found: args = [ self.config.get('gce','gce_service_account_email_address'), self.config.get('gce','gce_service_account_pem_file_path') ] kwargs = {'project': self.config.get('gce', 'gce_project_id')} # If the appropriate environment variables are set, they override # other configuration; process those into our args and kwargs. args[0] = os.environ.get('GCE_EMAIL', args[0]) args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) # Retrieve and return the GCE driver. gce = get_driver(Provider.GCE)(*args, **kwargs) gce.connection.user_agent_append( '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION), ) return gce
[ "def", "get_gce_driver", "(", "self", ")", ":", "# Attempt to get GCE params from a configuration file, if one", "# exists.", "secrets_path", "=", "self", ".", "config", ".", "get", "(", "'gce'", ",", "'libcloud_secrets'", ")", "secrets_found", "=", "False", "try", ":", "import", "secrets", "args", "=", "list", "(", "getattr", "(", "secrets", ",", "'GCE_PARAMS'", ",", "[", "]", ")", ")", "kwargs", "=", "getattr", "(", "secrets", ",", "'GCE_KEYWORD_PARAMS'", ",", "{", "}", ")", "secrets_found", "=", "True", "except", ":", "pass", "if", "not", "secrets_found", "and", "secrets_path", ":", "if", "not", "secrets_path", ".", "endswith", "(", "'secrets.py'", ")", ":", "err", "=", "\"Must specify libcloud secrets file as \"", "err", "+=", "\"/absolute/path/to/secrets.py\"", "sys", ".", "exit", "(", "err", ")", "sys", ".", "path", ".", "append", "(", "os", ".", "path", ".", "dirname", "(", "secrets_path", ")", ")", "try", ":", "import", "secrets", "args", "=", "list", "(", "getattr", "(", "secrets", ",", "'GCE_PARAMS'", ",", "[", "]", ")", ")", "kwargs", "=", "getattr", "(", "secrets", ",", "'GCE_KEYWORD_PARAMS'", ",", "{", "}", ")", "secrets_found", "=", "True", "except", ":", "pass", "if", "not", "secrets_found", ":", "args", "=", "[", "self", ".", "config", ".", "get", "(", "'gce'", ",", "'gce_service_account_email_address'", ")", ",", "self", ".", "config", ".", "get", "(", "'gce'", ",", "'gce_service_account_pem_file_path'", ")", "]", "kwargs", "=", "{", "'project'", ":", "self", ".", "config", ".", "get", "(", "'gce'", ",", "'gce_project_id'", ")", "}", "# If the appropriate environment variables are set, they override", "# other configuration; process those into our args and kwargs.", "args", "[", "0", "]", "=", "os", ".", "environ", ".", "get", "(", "'GCE_EMAIL'", ",", "args", "[", "0", "]", ")", "args", "[", "1", "]", "=", "os", ".", "environ", ".", "get", "(", "'GCE_PEM_FILE_PATH'", ",", "args", "[", "1", "]", ")", "kwargs", "[", "'project'", "]", "=", "os", ".", "environ", ".", "get", "(", "'GCE_PROJECT'", ",", "kwargs", "[", "'project'", "]", ")", "# Retrieve and return the GCE driver.", "gce", "=", "get_driver", "(", "Provider", ".", "GCE", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "gce", ".", "connection", ".", "user_agent_append", "(", "'%s/%s'", "%", "(", "USER_AGENT_PRODUCT", ",", "USER_AGENT_VERSION", ")", ",", ")", "return", "gce" ]
Determine the GCE authorization settings and return a libcloud driver.
[ "Determine", "the", "GCE", "authorization", "settings", "and", "return", "a", "libcloud", "driver", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/gce.py#L265-L312
StanfordBioinformatics/loom
client/loomengine/playbooks/gce.py
GceInventory.parse_env_zones
def parse_env_zones(self): '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable. If provided, this will be used to filter the results of the grouped_instances call''' import csv reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True) zones = [r for r in reader] return [z for z in zones[0]]
python
def parse_env_zones(self): '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable. If provided, this will be used to filter the results of the grouped_instances call''' import csv reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True) zones = [r for r in reader] return [z for z in zones[0]]
[ "def", "parse_env_zones", "(", "self", ")", ":", "import", "csv", "reader", "=", "csv", ".", "reader", "(", "[", "os", ".", "environ", ".", "get", "(", "'GCE_ZONE'", ",", "\"\"", ")", "]", ",", "skipinitialspace", "=", "True", ")", "zones", "=", "[", "r", "for", "r", "in", "reader", "]", "return", "[", "z", "for", "z", "in", "zones", "[", "0", "]", "]" ]
returns a list of comma separated zones parsed from the GCE_ZONE environment variable. If provided, this will be used to filter the results of the grouped_instances call
[ "returns", "a", "list", "of", "comma", "separated", "zones", "parsed", "from", "the", "GCE_ZONE", "environment", "variable", ".", "If", "provided", "this", "will", "be", "used", "to", "filter", "the", "results", "of", "the", "grouped_instances", "call" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/gce.py#L314-L320
StanfordBioinformatics/loom
client/loomengine/playbooks/gce.py
GceInventory.parse_cli_args
def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser( description='Produce an Ansible Inventory file based on GCE') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all information about an instance') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)') parser.add_argument( '--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests (default: False - use cache files)') self.args = parser.parse_args()
python
def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser( description='Produce an Ansible Inventory file based on GCE') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all information about an instance') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)') parser.add_argument( '--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests (default: False - use cache files)') self.args = parser.parse_args()
[ "def", "parse_cli_args", "(", "self", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Produce an Ansible Inventory file based on GCE'", ")", "parser", ".", "add_argument", "(", "'--list'", ",", "action", "=", "'store_true'", ",", "default", "=", "True", ",", "help", "=", "'List instances (default: True)'", ")", "parser", ".", "add_argument", "(", "'--host'", ",", "action", "=", "'store'", ",", "help", "=", "'Get all information about an instance'", ")", "parser", ".", "add_argument", "(", "'--pretty'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Pretty format (default: False)'", ")", "parser", ".", "add_argument", "(", "'--refresh-cache'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Force refresh of cache by making API requests (default: False - use cache files)'", ")", "self", ".", "args", "=", "parser", ".", "parse_args", "(", ")" ]
Command line argument processing
[ "Command", "line", "argument", "processing" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/gce.py#L322-L336
StanfordBioinformatics/loom
client/loomengine/playbooks/gce.py
GceInventory.load_inventory_from_cache
def load_inventory_from_cache(self): ''' Loads inventory from JSON on disk. ''' try: self.inventory = self.cache.get_all_data_from_cache() hosts = self.inventory['_meta']['hostvars'] except Exception as e: print( "Invalid inventory file %s. Please rebuild with -refresh-cache option." % (self.cache.cache_path_cache)) raise
python
def load_inventory_from_cache(self): ''' Loads inventory from JSON on disk. ''' try: self.inventory = self.cache.get_all_data_from_cache() hosts = self.inventory['_meta']['hostvars'] except Exception as e: print( "Invalid inventory file %s. Please rebuild with -refresh-cache option." % (self.cache.cache_path_cache)) raise
[ "def", "load_inventory_from_cache", "(", "self", ")", ":", "try", ":", "self", ".", "inventory", "=", "self", ".", "cache", ".", "get_all_data_from_cache", "(", ")", "hosts", "=", "self", ".", "inventory", "[", "'_meta'", "]", "[", "'hostvars'", "]", "except", "Exception", "as", "e", ":", "print", "(", "\"Invalid inventory file %s. Please rebuild with -refresh-cache option.\"", "%", "(", "self", ".", "cache", ".", "cache_path_cache", ")", ")", "raise" ]
Loads inventory from JSON on disk.
[ "Loads", "inventory", "from", "JSON", "on", "disk", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/gce.py#L374-L384
StanfordBioinformatics/loom
client/loomengine/playbooks/gce.py
GceInventory.do_api_calls_update_cache
def do_api_calls_update_cache(self): ''' Do API calls and save data in cache. ''' zones = self.parse_env_zones() data = self.group_instances(zones) self.cache.write_to_cache(data) self.inventory = data
python
def do_api_calls_update_cache(self): ''' Do API calls and save data in cache. ''' zones = self.parse_env_zones() data = self.group_instances(zones) self.cache.write_to_cache(data) self.inventory = data
[ "def", "do_api_calls_update_cache", "(", "self", ")", ":", "zones", "=", "self", ".", "parse_env_zones", "(", ")", "data", "=", "self", ".", "group_instances", "(", "zones", ")", "self", ".", "cache", ".", "write_to_cache", "(", "data", ")", "self", ".", "inventory", "=", "data" ]
Do API calls and save data in cache.
[ "Do", "API", "calls", "and", "save", "data", "in", "cache", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/gce.py#L386-L391
StanfordBioinformatics/loom
client/loomengine/playbooks/gce.py
GceInventory.group_instances
def group_instances(self, zones=None): '''Group all instances''' groups = {} meta = {} meta["hostvars"] = {} for node in self.list_nodes(): # This check filters on the desired instance states defined in the # config file with the instance_states config option. # # If the instance_states list is _empty_ then _ALL_ states are returned. # # If the instance_states list is _populated_ then check the current # state against the instance_states list if self.instance_states and not node.extra['status'] in self.instance_states: continue name = node.name meta["hostvars"][name] = self.node_to_dict(node) zone = node.extra['zone'].name # To avoid making multiple requests per zone # we list all nodes and then filter the results if zones and zone not in zones: continue if zone in groups: groups[zone].append(name) else: groups[zone] = [name] tags = node.extra['tags'] for t in tags: if t.startswith('group-'): tag = t[6:] else: tag = 'tag_%s' % t if tag in groups: groups[tag].append(name) else: groups[tag] = [name] net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] net = 'network_%s' % net if net in groups: groups[net].append(name) else: groups[net] = [name] machine_type = node.size if machine_type in groups: groups[machine_type].append(name) else: groups[machine_type] = [name] image = node.image and node.image or 'persistent_disk' if image in groups: groups[image].append(name) else: groups[image] = [name] status = node.extra['status'] stat = 'status_%s' % status.lower() if stat in groups: groups[stat].append(name) else: groups[stat] = [name] groups["_meta"] = meta return groups
python
def group_instances(self, zones=None): '''Group all instances''' groups = {} meta = {} meta["hostvars"] = {} for node in self.list_nodes(): # This check filters on the desired instance states defined in the # config file with the instance_states config option. # # If the instance_states list is _empty_ then _ALL_ states are returned. # # If the instance_states list is _populated_ then check the current # state against the instance_states list if self.instance_states and not node.extra['status'] in self.instance_states: continue name = node.name meta["hostvars"][name] = self.node_to_dict(node) zone = node.extra['zone'].name # To avoid making multiple requests per zone # we list all nodes and then filter the results if zones and zone not in zones: continue if zone in groups: groups[zone].append(name) else: groups[zone] = [name] tags = node.extra['tags'] for t in tags: if t.startswith('group-'): tag = t[6:] else: tag = 'tag_%s' % t if tag in groups: groups[tag].append(name) else: groups[tag] = [name] net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] net = 'network_%s' % net if net in groups: groups[net].append(name) else: groups[net] = [name] machine_type = node.size if machine_type in groups: groups[machine_type].append(name) else: groups[machine_type] = [name] image = node.image and node.image or 'persistent_disk' if image in groups: groups[image].append(name) else: groups[image] = [name] status = node.extra['status'] stat = 'status_%s' % status.lower() if stat in groups: groups[stat].append(name) else: groups[stat] = [name] groups["_meta"] = meta return groups
[ "def", "group_instances", "(", "self", ",", "zones", "=", "None", ")", ":", "groups", "=", "{", "}", "meta", "=", "{", "}", "meta", "[", "\"hostvars\"", "]", "=", "{", "}", "for", "node", "in", "self", ".", "list_nodes", "(", ")", ":", "# This check filters on the desired instance states defined in the", "# config file with the instance_states config option.", "#", "# If the instance_states list is _empty_ then _ALL_ states are returned.", "#", "# If the instance_states list is _populated_ then check the current", "# state against the instance_states list", "if", "self", ".", "instance_states", "and", "not", "node", ".", "extra", "[", "'status'", "]", "in", "self", ".", "instance_states", ":", "continue", "name", "=", "node", ".", "name", "meta", "[", "\"hostvars\"", "]", "[", "name", "]", "=", "self", ".", "node_to_dict", "(", "node", ")", "zone", "=", "node", ".", "extra", "[", "'zone'", "]", ".", "name", "# To avoid making multiple requests per zone", "# we list all nodes and then filter the results", "if", "zones", "and", "zone", "not", "in", "zones", ":", "continue", "if", "zone", "in", "groups", ":", "groups", "[", "zone", "]", ".", "append", "(", "name", ")", "else", ":", "groups", "[", "zone", "]", "=", "[", "name", "]", "tags", "=", "node", ".", "extra", "[", "'tags'", "]", "for", "t", "in", "tags", ":", "if", "t", ".", "startswith", "(", "'group-'", ")", ":", "tag", "=", "t", "[", "6", ":", "]", "else", ":", "tag", "=", "'tag_%s'", "%", "t", "if", "tag", "in", "groups", ":", "groups", "[", "tag", "]", ".", "append", "(", "name", ")", "else", ":", "groups", "[", "tag", "]", "=", "[", "name", "]", "net", "=", "node", ".", "extra", "[", "'networkInterfaces'", "]", "[", "0", "]", "[", "'network'", "]", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "net", "=", "'network_%s'", "%", "net", "if", "net", "in", "groups", ":", "groups", "[", "net", "]", ".", "append", "(", "name", ")", "else", ":", "groups", "[", "net", "]", "=", "[", "name", "]", "machine_type", "=", "node", ".", "size", "if", "machine_type", "in", "groups", ":", "groups", "[", "machine_type", "]", ".", "append", "(", "name", ")", "else", ":", "groups", "[", "machine_type", "]", "=", "[", "name", "]", "image", "=", "node", ".", "image", "and", "node", ".", "image", "or", "'persistent_disk'", "if", "image", "in", "groups", ":", "groups", "[", "image", "]", ".", "append", "(", "name", ")", "else", ":", "groups", "[", "image", "]", "=", "[", "name", "]", "status", "=", "node", ".", "extra", "[", "'status'", "]", "stat", "=", "'status_%s'", "%", "status", ".", "lower", "(", ")", "if", "stat", "in", "groups", ":", "groups", "[", "stat", "]", ".", "append", "(", "name", ")", "else", ":", "groups", "[", "stat", "]", "=", "[", "name", "]", "groups", "[", "\"_meta\"", "]", "=", "meta", "return", "groups" ]
Group all instances
[ "Group", "all", "instances" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/gce.py#L402-L463
StanfordBioinformatics/loom
client/loomengine/playbooks/gce.py
GceInventory.json_format_dict
def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data)
python
def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data)
[ "def", "json_format_dict", "(", "self", ",", "data", ",", "pretty", "=", "False", ")", ":", "if", "pretty", ":", "return", "json", ".", "dumps", "(", "data", ",", "sort_keys", "=", "True", ",", "indent", "=", "2", ")", "else", ":", "return", "json", ".", "dumps", "(", "data", ")" ]
Converts a dict to a JSON object and dumps it as a formatted string
[ "Converts", "a", "dict", "to", "a", "JSON", "object", "and", "dumps", "it", "as", "a", "formatted", "string" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/gce.py#L465-L472
StanfordBioinformatics/loom
worker/loomengine_worker/task_monitor.py
TaskMonitor._stream_docker_logs
def _stream_docker_logs(self): """Stream stdout and stderr from the task container to this process's stdout and stderr, respectively. """ thread = threading.Thread(target=self._stderr_stream_worker) thread.start() for line in self.docker_client.logs(self.container, stdout=True, stderr=False, stream=True): sys.stdout.write(line) thread.join()
python
def _stream_docker_logs(self): """Stream stdout and stderr from the task container to this process's stdout and stderr, respectively. """ thread = threading.Thread(target=self._stderr_stream_worker) thread.start() for line in self.docker_client.logs(self.container, stdout=True, stderr=False, stream=True): sys.stdout.write(line) thread.join()
[ "def", "_stream_docker_logs", "(", "self", ")", ":", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_stderr_stream_worker", ")", "thread", ".", "start", "(", ")", "for", "line", "in", "self", ".", "docker_client", ".", "logs", "(", "self", ".", "container", ",", "stdout", "=", "True", ",", "stderr", "=", "False", ",", "stream", "=", "True", ")", ":", "sys", ".", "stdout", ".", "write", "(", "line", ")", "thread", ".", "join", "(", ")" ]
Stream stdout and stderr from the task container to this process's stdout and stderr, respectively.
[ "Stream", "stdout", "and", "stderr", "from", "the", "task", "container", "to", "this", "process", "s", "stdout", "and", "stderr", "respectively", "." ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/worker/loomengine_worker/task_monitor.py#L286-L295
StanfordBioinformatics/loom
server/loomengine_server/api/serializers/templates.py
TemplateSerializer.to_internal_value
def to_internal_value(self, data): """Because we allow template ID string values, where serializers normally expect a dict """ converted_data = _convert_template_id_to_dict(data) return super(TemplateSerializer, self)\ .to_internal_value(converted_data)
python
def to_internal_value(self, data): """Because we allow template ID string values, where serializers normally expect a dict """ converted_data = _convert_template_id_to_dict(data) return super(TemplateSerializer, self)\ .to_internal_value(converted_data)
[ "def", "to_internal_value", "(", "self", ",", "data", ")", ":", "converted_data", "=", "_convert_template_id_to_dict", "(", "data", ")", "return", "super", "(", "TemplateSerializer", ",", "self", ")", ".", "to_internal_value", "(", "converted_data", ")" ]
Because we allow template ID string values, where serializers normally expect a dict
[ "Because", "we", "allow", "template", "ID", "string", "values", "where", "serializers", "normally", "expect", "a", "dict" ]
train
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/serializers/templates.py#L105-L111
rbarrois/python-semanticversion
semantic_version/base.py
identifier_cmp
def identifier_cmp(a, b): """Compare two identifier (for pre-release/build components).""" a_cmp, a_is_int = _to_int(a) b_cmp, b_is_int = _to_int(b) if a_is_int and b_is_int: # Numeric identifiers are compared as integers return base_cmp(a_cmp, b_cmp) elif a_is_int: # Numeric identifiers have lower precedence return -1 elif b_is_int: return 1 else: # Non-numeric identifiers are compared lexicographically return base_cmp(a_cmp, b_cmp)
python
def identifier_cmp(a, b): """Compare two identifier (for pre-release/build components).""" a_cmp, a_is_int = _to_int(a) b_cmp, b_is_int = _to_int(b) if a_is_int and b_is_int: # Numeric identifiers are compared as integers return base_cmp(a_cmp, b_cmp) elif a_is_int: # Numeric identifiers have lower precedence return -1 elif b_is_int: return 1 else: # Non-numeric identifiers are compared lexicographically return base_cmp(a_cmp, b_cmp)
[ "def", "identifier_cmp", "(", "a", ",", "b", ")", ":", "a_cmp", ",", "a_is_int", "=", "_to_int", "(", "a", ")", "b_cmp", ",", "b_is_int", "=", "_to_int", "(", "b", ")", "if", "a_is_int", "and", "b_is_int", ":", "# Numeric identifiers are compared as integers", "return", "base_cmp", "(", "a_cmp", ",", "b_cmp", ")", "elif", "a_is_int", ":", "# Numeric identifiers have lower precedence", "return", "-", "1", "elif", "b_is_int", ":", "return", "1", "else", ":", "# Non-numeric identifiers are compared lexicographically", "return", "base_cmp", "(", "a_cmp", ",", "b_cmp", ")" ]
Compare two identifier (for pre-release/build components).
[ "Compare", "two", "identifier", "(", "for", "pre", "-", "release", "/", "build", "components", ")", "." ]
train
https://github.com/rbarrois/python-semanticversion/blob/fdef1e9cdae901d095d8e8c9cd6fa6adcfe02074/semantic_version/base.py#L28-L44
rbarrois/python-semanticversion
semantic_version/base.py
identifier_list_cmp
def identifier_list_cmp(a, b): """Compare two identifier list (pre-release/build components). The rule is: - Identifiers are paired between lists - They are compared from left to right - If all first identifiers match, the longest list is greater. >>> identifier_list_cmp(['1', '2'], ['1', '2']) 0 >>> identifier_list_cmp(['1', '2a'], ['1', '2b']) -1 >>> identifier_list_cmp(['1'], ['1', '2']) -1 """ identifier_pairs = zip(a, b) for id_a, id_b in identifier_pairs: cmp_res = identifier_cmp(id_a, id_b) if cmp_res != 0: return cmp_res # alpha1.3 < alpha1.3.1 return base_cmp(len(a), len(b))
python
def identifier_list_cmp(a, b): """Compare two identifier list (pre-release/build components). The rule is: - Identifiers are paired between lists - They are compared from left to right - If all first identifiers match, the longest list is greater. >>> identifier_list_cmp(['1', '2'], ['1', '2']) 0 >>> identifier_list_cmp(['1', '2a'], ['1', '2b']) -1 >>> identifier_list_cmp(['1'], ['1', '2']) -1 """ identifier_pairs = zip(a, b) for id_a, id_b in identifier_pairs: cmp_res = identifier_cmp(id_a, id_b) if cmp_res != 0: return cmp_res # alpha1.3 < alpha1.3.1 return base_cmp(len(a), len(b))
[ "def", "identifier_list_cmp", "(", "a", ",", "b", ")", ":", "identifier_pairs", "=", "zip", "(", "a", ",", "b", ")", "for", "id_a", ",", "id_b", "in", "identifier_pairs", ":", "cmp_res", "=", "identifier_cmp", "(", "id_a", ",", "id_b", ")", "if", "cmp_res", "!=", "0", ":", "return", "cmp_res", "# alpha1.3 < alpha1.3.1", "return", "base_cmp", "(", "len", "(", "a", ")", ",", "len", "(", "b", ")", ")" ]
Compare two identifier list (pre-release/build components). The rule is: - Identifiers are paired between lists - They are compared from left to right - If all first identifiers match, the longest list is greater. >>> identifier_list_cmp(['1', '2'], ['1', '2']) 0 >>> identifier_list_cmp(['1', '2a'], ['1', '2b']) -1 >>> identifier_list_cmp(['1'], ['1', '2']) -1
[ "Compare", "two", "identifier", "list", "(", "pre", "-", "release", "/", "build", "components", ")", "." ]
train
https://github.com/rbarrois/python-semanticversion/blob/fdef1e9cdae901d095d8e8c9cd6fa6adcfe02074/semantic_version/base.py#L47-L68
rbarrois/python-semanticversion
semantic_version/base.py
Version.coerce
def coerce(cls, version_string, partial=False): """Coerce an arbitrary version string into a semver-compatible one. The rule is: - If not enough components, fill minor/patch with zeroes; unless partial=True - If more than 3 dot-separated components, extra components are "build" data. If some "build" data already appeared, append it to the extra components Examples: >>> Version.coerce('0.1') Version(0, 1, 0) >>> Version.coerce('0.1.2.3') Version(0, 1, 2, (), ('3',)) >>> Version.coerce('0.1.2.3+4') Version(0, 1, 2, (), ('3', '4')) >>> Version.coerce('0.1+2-3+4_5') Version(0, 1, 0, (), ('2-3', '4-5')) """ base_re = re.compile(r'^\d+(?:\.\d+(?:\.\d+)?)?') match = base_re.match(version_string) if not match: raise ValueError( "Version string lacks a numerical component: %r" % version_string ) version = version_string[:match.end()] if not partial: # We need a not-partial version. while version.count('.') < 2: version += '.0' if match.end() == len(version_string): return Version(version, partial=partial) rest = version_string[match.end():] # Cleanup the 'rest' rest = re.sub(r'[^a-zA-Z0-9+.-]', '-', rest) if rest[0] == '+': # A 'build' component prerelease = '' build = rest[1:] elif rest[0] == '.': # An extra version component, probably 'build' prerelease = '' build = rest[1:] elif rest[0] == '-': rest = rest[1:] if '+' in rest: prerelease, build = rest.split('+', 1) else: prerelease, build = rest, '' elif '+' in rest: prerelease, build = rest.split('+', 1) else: prerelease, build = rest, '' build = build.replace('+', '.') if prerelease: version = '%s-%s' % (version, prerelease) if build: version = '%s+%s' % (version, build) return cls(version, partial=partial)
python
def coerce(cls, version_string, partial=False): """Coerce an arbitrary version string into a semver-compatible one. The rule is: - If not enough components, fill minor/patch with zeroes; unless partial=True - If more than 3 dot-separated components, extra components are "build" data. If some "build" data already appeared, append it to the extra components Examples: >>> Version.coerce('0.1') Version(0, 1, 0) >>> Version.coerce('0.1.2.3') Version(0, 1, 2, (), ('3',)) >>> Version.coerce('0.1.2.3+4') Version(0, 1, 2, (), ('3', '4')) >>> Version.coerce('0.1+2-3+4_5') Version(0, 1, 0, (), ('2-3', '4-5')) """ base_re = re.compile(r'^\d+(?:\.\d+(?:\.\d+)?)?') match = base_re.match(version_string) if not match: raise ValueError( "Version string lacks a numerical component: %r" % version_string ) version = version_string[:match.end()] if not partial: # We need a not-partial version. while version.count('.') < 2: version += '.0' if match.end() == len(version_string): return Version(version, partial=partial) rest = version_string[match.end():] # Cleanup the 'rest' rest = re.sub(r'[^a-zA-Z0-9+.-]', '-', rest) if rest[0] == '+': # A 'build' component prerelease = '' build = rest[1:] elif rest[0] == '.': # An extra version component, probably 'build' prerelease = '' build = rest[1:] elif rest[0] == '-': rest = rest[1:] if '+' in rest: prerelease, build = rest.split('+', 1) else: prerelease, build = rest, '' elif '+' in rest: prerelease, build = rest.split('+', 1) else: prerelease, build = rest, '' build = build.replace('+', '.') if prerelease: version = '%s-%s' % (version, prerelease) if build: version = '%s+%s' % (version, build) return cls(version, partial=partial)
[ "def", "coerce", "(", "cls", ",", "version_string", ",", "partial", "=", "False", ")", ":", "base_re", "=", "re", ".", "compile", "(", "r'^\\d+(?:\\.\\d+(?:\\.\\d+)?)?'", ")", "match", "=", "base_re", ".", "match", "(", "version_string", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "\"Version string lacks a numerical component: %r\"", "%", "version_string", ")", "version", "=", "version_string", "[", ":", "match", ".", "end", "(", ")", "]", "if", "not", "partial", ":", "# We need a not-partial version.", "while", "version", ".", "count", "(", "'.'", ")", "<", "2", ":", "version", "+=", "'.0'", "if", "match", ".", "end", "(", ")", "==", "len", "(", "version_string", ")", ":", "return", "Version", "(", "version", ",", "partial", "=", "partial", ")", "rest", "=", "version_string", "[", "match", ".", "end", "(", ")", ":", "]", "# Cleanup the 'rest'", "rest", "=", "re", ".", "sub", "(", "r'[^a-zA-Z0-9+.-]'", ",", "'-'", ",", "rest", ")", "if", "rest", "[", "0", "]", "==", "'+'", ":", "# A 'build' component", "prerelease", "=", "''", "build", "=", "rest", "[", "1", ":", "]", "elif", "rest", "[", "0", "]", "==", "'.'", ":", "# An extra version component, probably 'build'", "prerelease", "=", "''", "build", "=", "rest", "[", "1", ":", "]", "elif", "rest", "[", "0", "]", "==", "'-'", ":", "rest", "=", "rest", "[", "1", ":", "]", "if", "'+'", "in", "rest", ":", "prerelease", ",", "build", "=", "rest", ".", "split", "(", "'+'", ",", "1", ")", "else", ":", "prerelease", ",", "build", "=", "rest", ",", "''", "elif", "'+'", "in", "rest", ":", "prerelease", ",", "build", "=", "rest", ".", "split", "(", "'+'", ",", "1", ")", "else", ":", "prerelease", ",", "build", "=", "rest", ",", "''", "build", "=", "build", ".", "replace", "(", "'+'", ",", "'.'", ")", "if", "prerelease", ":", "version", "=", "'%s-%s'", "%", "(", "version", ",", "prerelease", ")", "if", "build", ":", "version", "=", "'%s+%s'", "%", "(", "version", ",", "build", ")", "return", "cls", "(", "version", ",", "partial", "=", "partial", ")" ]
Coerce an arbitrary version string into a semver-compatible one. The rule is: - If not enough components, fill minor/patch with zeroes; unless partial=True - If more than 3 dot-separated components, extra components are "build" data. If some "build" data already appeared, append it to the extra components Examples: >>> Version.coerce('0.1') Version(0, 1, 0) >>> Version.coerce('0.1.2.3') Version(0, 1, 2, (), ('3',)) >>> Version.coerce('0.1.2.3+4') Version(0, 1, 2, (), ('3', '4')) >>> Version.coerce('0.1+2-3+4_5') Version(0, 1, 0, (), ('2-3', '4-5'))
[ "Coerce", "an", "arbitrary", "version", "string", "into", "a", "semver", "-", "compatible", "one", "." ]
train
https://github.com/rbarrois/python-semanticversion/blob/fdef1e9cdae901d095d8e8c9cd6fa6adcfe02074/semantic_version/base.py#L114-L183
rbarrois/python-semanticversion
semantic_version/base.py
Version.parse
def parse(cls, version_string, partial=False, coerce=False): """Parse a version string into a Version() object. Args: version_string (str), the version string to parse partial (bool), whether to accept incomplete input coerce (bool), whether to try to map the passed in string into a valid Version. """ if not version_string: raise ValueError('Invalid empty version string: %r' % version_string) if partial: version_re = cls.partial_version_re else: version_re = cls.version_re match = version_re.match(version_string) if not match: raise ValueError('Invalid version string: %r' % version_string) major, minor, patch, prerelease, build = match.groups() if _has_leading_zero(major): raise ValueError("Invalid leading zero in major: %r" % version_string) if _has_leading_zero(minor): raise ValueError("Invalid leading zero in minor: %r" % version_string) if _has_leading_zero(patch): raise ValueError("Invalid leading zero in patch: %r" % version_string) major = int(major) minor = cls._coerce(minor, partial) patch = cls._coerce(patch, partial) if prerelease is None: if partial and (build is None): # No build info, strip here return (major, minor, patch, None, None) else: prerelease = () elif prerelease == '': prerelease = () else: prerelease = tuple(prerelease.split('.')) cls._validate_identifiers(prerelease, allow_leading_zeroes=False) if build is None: if partial: build = None else: build = () elif build == '': build = () else: build = tuple(build.split('.')) cls._validate_identifiers(build, allow_leading_zeroes=True) return (major, minor, patch, prerelease, build)
python
def parse(cls, version_string, partial=False, coerce=False): """Parse a version string into a Version() object. Args: version_string (str), the version string to parse partial (bool), whether to accept incomplete input coerce (bool), whether to try to map the passed in string into a valid Version. """ if not version_string: raise ValueError('Invalid empty version string: %r' % version_string) if partial: version_re = cls.partial_version_re else: version_re = cls.version_re match = version_re.match(version_string) if not match: raise ValueError('Invalid version string: %r' % version_string) major, minor, patch, prerelease, build = match.groups() if _has_leading_zero(major): raise ValueError("Invalid leading zero in major: %r" % version_string) if _has_leading_zero(minor): raise ValueError("Invalid leading zero in minor: %r" % version_string) if _has_leading_zero(patch): raise ValueError("Invalid leading zero in patch: %r" % version_string) major = int(major) minor = cls._coerce(minor, partial) patch = cls._coerce(patch, partial) if prerelease is None: if partial and (build is None): # No build info, strip here return (major, minor, patch, None, None) else: prerelease = () elif prerelease == '': prerelease = () else: prerelease = tuple(prerelease.split('.')) cls._validate_identifiers(prerelease, allow_leading_zeroes=False) if build is None: if partial: build = None else: build = () elif build == '': build = () else: build = tuple(build.split('.')) cls._validate_identifiers(build, allow_leading_zeroes=True) return (major, minor, patch, prerelease, build)
[ "def", "parse", "(", "cls", ",", "version_string", ",", "partial", "=", "False", ",", "coerce", "=", "False", ")", ":", "if", "not", "version_string", ":", "raise", "ValueError", "(", "'Invalid empty version string: %r'", "%", "version_string", ")", "if", "partial", ":", "version_re", "=", "cls", ".", "partial_version_re", "else", ":", "version_re", "=", "cls", ".", "version_re", "match", "=", "version_re", ".", "match", "(", "version_string", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "'Invalid version string: %r'", "%", "version_string", ")", "major", ",", "minor", ",", "patch", ",", "prerelease", ",", "build", "=", "match", ".", "groups", "(", ")", "if", "_has_leading_zero", "(", "major", ")", ":", "raise", "ValueError", "(", "\"Invalid leading zero in major: %r\"", "%", "version_string", ")", "if", "_has_leading_zero", "(", "minor", ")", ":", "raise", "ValueError", "(", "\"Invalid leading zero in minor: %r\"", "%", "version_string", ")", "if", "_has_leading_zero", "(", "patch", ")", ":", "raise", "ValueError", "(", "\"Invalid leading zero in patch: %r\"", "%", "version_string", ")", "major", "=", "int", "(", "major", ")", "minor", "=", "cls", ".", "_coerce", "(", "minor", ",", "partial", ")", "patch", "=", "cls", ".", "_coerce", "(", "patch", ",", "partial", ")", "if", "prerelease", "is", "None", ":", "if", "partial", "and", "(", "build", "is", "None", ")", ":", "# No build info, strip here", "return", "(", "major", ",", "minor", ",", "patch", ",", "None", ",", "None", ")", "else", ":", "prerelease", "=", "(", ")", "elif", "prerelease", "==", "''", ":", "prerelease", "=", "(", ")", "else", ":", "prerelease", "=", "tuple", "(", "prerelease", ".", "split", "(", "'.'", ")", ")", "cls", ".", "_validate_identifiers", "(", "prerelease", ",", "allow_leading_zeroes", "=", "False", ")", "if", "build", "is", "None", ":", "if", "partial", ":", "build", "=", "None", "else", ":", "build", "=", "(", ")", "elif", "build", "==", "''", ":", "build", "=", "(", ")", "else", ":", "build", "=", "tuple", "(", "build", ".", "split", "(", "'.'", ")", ")", "cls", ".", "_validate_identifiers", "(", "build", ",", "allow_leading_zeroes", "=", "True", ")", "return", "(", "major", ",", "minor", ",", "patch", ",", "prerelease", ",", "build", ")" ]
Parse a version string into a Version() object. Args: version_string (str), the version string to parse partial (bool), whether to accept incomplete input coerce (bool), whether to try to map the passed in string into a valid Version.
[ "Parse", "a", "version", "string", "into", "a", "Version", "()", "object", "." ]
train
https://github.com/rbarrois/python-semanticversion/blob/fdef1e9cdae901d095d8e8c9cd6fa6adcfe02074/semantic_version/base.py#L186-L243
rbarrois/python-semanticversion
semantic_version/base.py
Version._comparison_functions
def _comparison_functions(cls, partial=False): """Retrieve comparison methods to apply on version components. This is a private API. Args: partial (bool): whether to provide 'partial' or 'strict' matching. Returns: 5-tuple of cmp-like functions. """ def prerelease_cmp(a, b): """Compare prerelease components. Special rule: a version without prerelease component has higher precedence than one with a prerelease component. """ if a and b: return identifier_list_cmp(a, b) elif a: # Versions with prerelease field have lower precedence return -1 elif b: return 1 else: return 0 def build_cmp(a, b): """Compare build metadata. Special rule: there is no ordering on build metadata. """ if a == b: return 0 else: return NotImplemented def make_optional(orig_cmp_fun): """Convert a cmp-like function to consider 'None == *'.""" @functools.wraps(orig_cmp_fun) def alt_cmp_fun(a, b): if a is None or b is None: return 0 return orig_cmp_fun(a, b) return alt_cmp_fun if partial: return [ base_cmp, # Major is still mandatory make_optional(base_cmp), make_optional(base_cmp), make_optional(prerelease_cmp), make_optional(build_cmp), ] else: return [ base_cmp, base_cmp, base_cmp, prerelease_cmp, build_cmp, ]
python
def _comparison_functions(cls, partial=False): """Retrieve comparison methods to apply on version components. This is a private API. Args: partial (bool): whether to provide 'partial' or 'strict' matching. Returns: 5-tuple of cmp-like functions. """ def prerelease_cmp(a, b): """Compare prerelease components. Special rule: a version without prerelease component has higher precedence than one with a prerelease component. """ if a and b: return identifier_list_cmp(a, b) elif a: # Versions with prerelease field have lower precedence return -1 elif b: return 1 else: return 0 def build_cmp(a, b): """Compare build metadata. Special rule: there is no ordering on build metadata. """ if a == b: return 0 else: return NotImplemented def make_optional(orig_cmp_fun): """Convert a cmp-like function to consider 'None == *'.""" @functools.wraps(orig_cmp_fun) def alt_cmp_fun(a, b): if a is None or b is None: return 0 return orig_cmp_fun(a, b) return alt_cmp_fun if partial: return [ base_cmp, # Major is still mandatory make_optional(base_cmp), make_optional(base_cmp), make_optional(prerelease_cmp), make_optional(build_cmp), ] else: return [ base_cmp, base_cmp, base_cmp, prerelease_cmp, build_cmp, ]
[ "def", "_comparison_functions", "(", "cls", ",", "partial", "=", "False", ")", ":", "def", "prerelease_cmp", "(", "a", ",", "b", ")", ":", "\"\"\"Compare prerelease components.\n\n Special rule: a version without prerelease component has higher\n precedence than one with a prerelease component.\n \"\"\"", "if", "a", "and", "b", ":", "return", "identifier_list_cmp", "(", "a", ",", "b", ")", "elif", "a", ":", "# Versions with prerelease field have lower precedence", "return", "-", "1", "elif", "b", ":", "return", "1", "else", ":", "return", "0", "def", "build_cmp", "(", "a", ",", "b", ")", ":", "\"\"\"Compare build metadata.\n\n Special rule: there is no ordering on build metadata.\n \"\"\"", "if", "a", "==", "b", ":", "return", "0", "else", ":", "return", "NotImplemented", "def", "make_optional", "(", "orig_cmp_fun", ")", ":", "\"\"\"Convert a cmp-like function to consider 'None == *'.\"\"\"", "@", "functools", ".", "wraps", "(", "orig_cmp_fun", ")", "def", "alt_cmp_fun", "(", "a", ",", "b", ")", ":", "if", "a", "is", "None", "or", "b", "is", "None", ":", "return", "0", "return", "orig_cmp_fun", "(", "a", ",", "b", ")", "return", "alt_cmp_fun", "if", "partial", ":", "return", "[", "base_cmp", ",", "# Major is still mandatory", "make_optional", "(", "base_cmp", ")", ",", "make_optional", "(", "base_cmp", ")", ",", "make_optional", "(", "prerelease_cmp", ")", ",", "make_optional", "(", "build_cmp", ")", ",", "]", "else", ":", "return", "[", "base_cmp", ",", "base_cmp", ",", "base_cmp", ",", "prerelease_cmp", ",", "build_cmp", ",", "]" ]
Retrieve comparison methods to apply on version components. This is a private API. Args: partial (bool): whether to provide 'partial' or 'strict' matching. Returns: 5-tuple of cmp-like functions.
[ "Retrieve", "comparison", "methods", "to", "apply", "on", "version", "components", "." ]
train
https://github.com/rbarrois/python-semanticversion/blob/fdef1e9cdae901d095d8e8c9cd6fa6adcfe02074/semantic_version/base.py#L281-L344
rbarrois/python-semanticversion
semantic_version/base.py
Version.__compare_helper
def __compare_helper(self, other, condition, notimpl_target): """Helper for comparison. Allows the caller to provide: - The condition - The return value if the comparison is meaningless (ie versions with build metadata). """ if not isinstance(other, self.__class__): return NotImplemented cmp_res = self.__cmp__(other) if cmp_res is NotImplemented: return notimpl_target return condition(cmp_res)
python
def __compare_helper(self, other, condition, notimpl_target): """Helper for comparison. Allows the caller to provide: - The condition - The return value if the comparison is meaningless (ie versions with build metadata). """ if not isinstance(other, self.__class__): return NotImplemented cmp_res = self.__cmp__(other) if cmp_res is NotImplemented: return notimpl_target return condition(cmp_res)
[ "def", "__compare_helper", "(", "self", ",", "other", ",", "condition", ",", "notimpl_target", ")", ":", "if", "not", "isinstance", "(", "other", ",", "self", ".", "__class__", ")", ":", "return", "NotImplemented", "cmp_res", "=", "self", ".", "__cmp__", "(", "other", ")", "if", "cmp_res", "is", "NotImplemented", ":", "return", "notimpl_target", "return", "condition", "(", "cmp_res", ")" ]
Helper for comparison. Allows the caller to provide: - The condition - The return value if the comparison is meaningless (ie versions with build metadata).
[ "Helper", "for", "comparison", "." ]
train
https://github.com/rbarrois/python-semanticversion/blob/fdef1e9cdae901d095d8e8c9cd6fa6adcfe02074/semantic_version/base.py#L365-L380
rbarrois/python-semanticversion
semantic_version/base.py
Spec.match
def match(self, version): """Check whether a Version satisfies the Spec.""" return all(spec.match(version) for spec in self.specs)
python
def match(self, version): """Check whether a Version satisfies the Spec.""" return all(spec.match(version) for spec in self.specs)
[ "def", "match", "(", "self", ",", "version", ")", ":", "return", "all", "(", "spec", ".", "match", "(", "version", ")", "for", "spec", "in", "self", ".", "specs", ")" ]
Check whether a Version satisfies the Spec.
[ "Check", "whether", "a", "Version", "satisfies", "the", "Spec", "." ]
train
https://github.com/rbarrois/python-semanticversion/blob/fdef1e9cdae901d095d8e8c9cd6fa6adcfe02074/semantic_version/base.py#L514-L516
rbarrois/python-semanticversion
semantic_version/base.py
Spec.select
def select(self, versions): """Select the best compatible version among an iterable of options.""" options = list(self.filter(versions)) if options: return max(options) return None
python
def select(self, versions): """Select the best compatible version among an iterable of options.""" options = list(self.filter(versions)) if options: return max(options) return None
[ "def", "select", "(", "self", ",", "versions", ")", ":", "options", "=", "list", "(", "self", ".", "filter", "(", "versions", ")", ")", "if", "options", ":", "return", "max", "(", "options", ")", "return", "None" ]
Select the best compatible version among an iterable of options.
[ "Select", "the", "best", "compatible", "version", "among", "an", "iterable", "of", "options", "." ]
train
https://github.com/rbarrois/python-semanticversion/blob/fdef1e9cdae901d095d8e8c9cd6fa6adcfe02074/semantic_version/base.py#L524-L529
rbarrois/python-semanticversion
semantic_version/django_fields.py
VersionField.deconstruct
def deconstruct(self): """Handle django.db.migrations.""" name, path, args, kwargs = super(VersionField, self).deconstruct() kwargs['partial'] = self.partial kwargs['coerce'] = self.coerce return name, path, args, kwargs
python
def deconstruct(self): """Handle django.db.migrations.""" name, path, args, kwargs = super(VersionField, self).deconstruct() kwargs['partial'] = self.partial kwargs['coerce'] = self.coerce return name, path, args, kwargs
[ "def", "deconstruct", "(", "self", ")", ":", "name", ",", "path", ",", "args", ",", "kwargs", "=", "super", "(", "VersionField", ",", "self", ")", ".", "deconstruct", "(", ")", "kwargs", "[", "'partial'", "]", "=", "self", ".", "partial", "kwargs", "[", "'coerce'", "]", "=", "self", ".", "coerce", "return", "name", ",", "path", ",", "args", ",", "kwargs" ]
Handle django.db.migrations.
[ "Handle", "django", ".", "db", ".", "migrations", "." ]
train
https://github.com/rbarrois/python-semanticversion/blob/fdef1e9cdae901d095d8e8c9cd6fa6adcfe02074/semantic_version/django_fields.py#L53-L58
rbarrois/python-semanticversion
semantic_version/django_fields.py
VersionField.to_python
def to_python(self, value): """Converts any value to a base.Version field.""" if value is None or value == '': return value if isinstance(value, base.Version): return value if self.coerce: return base.Version.coerce(value, partial=self.partial) else: return base.Version(value, partial=self.partial)
python
def to_python(self, value): """Converts any value to a base.Version field.""" if value is None or value == '': return value if isinstance(value, base.Version): return value if self.coerce: return base.Version.coerce(value, partial=self.partial) else: return base.Version(value, partial=self.partial)
[ "def", "to_python", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", "or", "value", "==", "''", ":", "return", "value", "if", "isinstance", "(", "value", ",", "base", ".", "Version", ")", ":", "return", "value", "if", "self", ".", "coerce", ":", "return", "base", ".", "Version", ".", "coerce", "(", "value", ",", "partial", "=", "self", ".", "partial", ")", "else", ":", "return", "base", ".", "Version", "(", "value", ",", "partial", "=", "self", ".", "partial", ")" ]
Converts any value to a base.Version field.
[ "Converts", "any", "value", "to", "a", "base", ".", "Version", "field", "." ]
train
https://github.com/rbarrois/python-semanticversion/blob/fdef1e9cdae901d095d8e8c9cd6fa6adcfe02074/semantic_version/django_fields.py#L60-L69
rbarrois/python-semanticversion
semantic_version/django_fields.py
SpecField.to_python
def to_python(self, value): """Converts any value to a base.Spec field.""" if value is None or value == '': return value if isinstance(value, base.Spec): return value return base.Spec(value)
python
def to_python(self, value): """Converts any value to a base.Spec field.""" if value is None or value == '': return value if isinstance(value, base.Spec): return value return base.Spec(value)
[ "def", "to_python", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", "or", "value", "==", "''", ":", "return", "value", "if", "isinstance", "(", "value", ",", "base", ".", "Spec", ")", ":", "return", "value", "return", "base", ".", "Spec", "(", "value", ")" ]
Converts any value to a base.Spec field.
[ "Converts", "any", "value", "to", "a", "base", ".", "Spec", "field", "." ]
train
https://github.com/rbarrois/python-semanticversion/blob/fdef1e9cdae901d095d8e8c9cd6fa6adcfe02074/semantic_version/django_fields.py#L78-L84
fkmclane/python-ardrone
ardrone/drone.py
ARDrone.move_left
def move_left(self): """Make the drone move left.""" self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0)
python
def move_left(self): """Make the drone move left.""" self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0)
[ "def", "move_left", "(", "self", ")", ":", "self", ".", "at", "(", "ardrone", ".", "at", ".", "pcmd", ",", "True", ",", "-", "self", ".", "speed", ",", "0", ",", "0", ",", "0", ")" ]
Make the drone move left.
[ "Make", "the", "drone", "move", "left", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L55-L57
fkmclane/python-ardrone
ardrone/drone.py
ARDrone.move_right
def move_right(self): """Make the drone move right.""" self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
python
def move_right(self): """Make the drone move right.""" self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0)
[ "def", "move_right", "(", "self", ")", ":", "self", ".", "at", "(", "ardrone", ".", "at", ".", "pcmd", ",", "True", ",", "self", ".", "speed", ",", "0", ",", "0", ",", "0", ")" ]
Make the drone move right.
[ "Make", "the", "drone", "move", "right", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L59-L61
fkmclane/python-ardrone
ardrone/drone.py
ARDrone.move_up
def move_up(self): """Make the drone rise upwards.""" self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0)
python
def move_up(self): """Make the drone rise upwards.""" self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0)
[ "def", "move_up", "(", "self", ")", ":", "self", ".", "at", "(", "ardrone", ".", "at", ".", "pcmd", ",", "True", ",", "0", ",", "0", ",", "self", ".", "speed", ",", "0", ")" ]
Make the drone rise upwards.
[ "Make", "the", "drone", "rise", "upwards", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L63-L65
fkmclane/python-ardrone
ardrone/drone.py
ARDrone.move_down
def move_down(self): """Make the drone decent downwards.""" self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0)
python
def move_down(self): """Make the drone decent downwards.""" self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0)
[ "def", "move_down", "(", "self", ")", ":", "self", ".", "at", "(", "ardrone", ".", "at", ".", "pcmd", ",", "True", ",", "0", ",", "0", ",", "-", "self", ".", "speed", ",", "0", ")" ]
Make the drone decent downwards.
[ "Make", "the", "drone", "decent", "downwards", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L67-L69
fkmclane/python-ardrone
ardrone/drone.py
ARDrone.move_forward
def move_forward(self): """Make the drone move forward.""" self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0)
python
def move_forward(self): """Make the drone move forward.""" self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0)
[ "def", "move_forward", "(", "self", ")", ":", "self", ".", "at", "(", "ardrone", ".", "at", ".", "pcmd", ",", "True", ",", "0", ",", "-", "self", ".", "speed", ",", "0", ",", "0", ")" ]
Make the drone move forward.
[ "Make", "the", "drone", "move", "forward", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L71-L73
fkmclane/python-ardrone
ardrone/drone.py
ARDrone.move_backward
def move_backward(self): """Make the drone move backwards.""" self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0)
python
def move_backward(self): """Make the drone move backwards.""" self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0)
[ "def", "move_backward", "(", "self", ")", ":", "self", ".", "at", "(", "ardrone", ".", "at", ".", "pcmd", ",", "True", ",", "0", ",", "self", ".", "speed", ",", "0", ",", "0", ")" ]
Make the drone move backwards.
[ "Make", "the", "drone", "move", "backwards", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L75-L77
fkmclane/python-ardrone
ardrone/drone.py
ARDrone.turn_left
def turn_left(self): """Make the drone rotate left.""" self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed)
python
def turn_left(self): """Make the drone rotate left.""" self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed)
[ "def", "turn_left", "(", "self", ")", ":", "self", ".", "at", "(", "ardrone", ".", "at", ".", "pcmd", ",", "True", ",", "0", ",", "0", ",", "0", ",", "-", "self", ".", "speed", ")" ]
Make the drone rotate left.
[ "Make", "the", "drone", "rotate", "left", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L79-L81
fkmclane/python-ardrone
ardrone/drone.py
ARDrone.turn_right
def turn_right(self): """Make the drone rotate right.""" self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed)
python
def turn_right(self): """Make the drone rotate right.""" self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed)
[ "def", "turn_right", "(", "self", ")", ":", "self", ".", "at", "(", "ardrone", ".", "at", ".", "pcmd", ",", "True", ",", "0", ",", "0", ",", "0", ",", "self", ".", "speed", ")" ]
Make the drone rotate right.
[ "Make", "the", "drone", "rotate", "right", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L83-L85
fkmclane/python-ardrone
ardrone/drone.py
ARDrone.reset
def reset(self): """Toggle the drone's emergency state.""" self.at(ardrone.at.ref, False, True) time.sleep(0.1) self.at(ardrone.at.ref, False, False)
python
def reset(self): """Toggle the drone's emergency state.""" self.at(ardrone.at.ref, False, True) time.sleep(0.1) self.at(ardrone.at.ref, False, False)
[ "def", "reset", "(", "self", ")", ":", "self", ".", "at", "(", "ardrone", ".", "at", ".", "ref", ",", "False", ",", "True", ")", "time", ".", "sleep", "(", "0.1", ")", "self", ".", "at", "(", "ardrone", ".", "at", ".", "ref", ",", "False", ",", "False", ")" ]
Toggle the drone's emergency state.
[ "Toggle", "the", "drone", "s", "emergency", "state", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L87-L91
fkmclane/python-ardrone
ardrone/drone.py
ARDrone.at
def at(self, cmd, *args, **kwargs): """Wrapper for the low level at commands. This method takes care that the sequence number is increased after each at command and the watchdog timer is started to make sure the drone receives a command at least every second. """ with self.lock: self.com_watchdog_timer.cancel() cmd(self.host, self.sequence, *args, **kwargs) self.sequence += 1 self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg) self.com_watchdog_timer.start()
python
def at(self, cmd, *args, **kwargs): """Wrapper for the low level at commands. This method takes care that the sequence number is increased after each at command and the watchdog timer is started to make sure the drone receives a command at least every second. """ with self.lock: self.com_watchdog_timer.cancel() cmd(self.host, self.sequence, *args, **kwargs) self.sequence += 1 self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg) self.com_watchdog_timer.start()
[ "def", "at", "(", "self", ",", "cmd", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "com_watchdog_timer", ".", "cancel", "(", ")", "cmd", "(", "self", ".", "host", ",", "self", ".", "sequence", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "sequence", "+=", "1", "self", ".", "com_watchdog_timer", "=", "threading", ".", "Timer", "(", "self", ".", "timer", ",", "self", ".", "commwdg", ")", "self", ".", "com_watchdog_timer", ".", "start", "(", ")" ]
Wrapper for the low level at commands. This method takes care that the sequence number is increased after each at command and the watchdog timer is started to make sure the drone receives a command at least every second.
[ "Wrapper", "for", "the", "low", "level", "at", "commands", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L111-L123
fkmclane/python-ardrone
ardrone/drone.py
ARDrone.halt
def halt(self): """Shutdown the drone. This method does not land or halt the actual drone, but the communication with the drone. You should call it at the end of your application to close all sockets, pipes, processes and threads related with this object. """ with self.lock: self.com_watchdog_timer.cancel() self.ipc_thread.stop() self.ipc_thread.join() self.network_process.terminate() self.network_process.join()
python
def halt(self): """Shutdown the drone. This method does not land or halt the actual drone, but the communication with the drone. You should call it at the end of your application to close all sockets, pipes, processes and threads related with this object. """ with self.lock: self.com_watchdog_timer.cancel() self.ipc_thread.stop() self.ipc_thread.join() self.network_process.terminate() self.network_process.join()
[ "def", "halt", "(", "self", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "com_watchdog_timer", ".", "cancel", "(", ")", "self", ".", "ipc_thread", ".", "stop", "(", ")", "self", ".", "ipc_thread", ".", "join", "(", ")", "self", ".", "network_process", ".", "terminate", "(", ")", "self", ".", "network_process", ".", "join", "(", ")" ]
Shutdown the drone. This method does not land or halt the actual drone, but the communication with the drone. You should call it at the end of your application to close all sockets, pipes, processes and threads related with this object.
[ "Shutdown", "the", "drone", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L133-L146
fkmclane/python-ardrone
ardrone/drone.py
ARDrone.move
def move(self, lr, fb, vv, va): """Makes the drone move (translate/rotate). Parameters: lr -- left-right tilt: float [-1..1] negative: left, positive: right fb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right""" self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
python
def move(self, lr, fb, vv, va): """Makes the drone move (translate/rotate). Parameters: lr -- left-right tilt: float [-1..1] negative: left, positive: right fb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right""" self.at(ardrone.at.pcmd, True, lr, fb, vv, va)
[ "def", "move", "(", "self", ",", "lr", ",", "fb", ",", "vv", ",", "va", ")", ":", "self", ".", "at", "(", "ardrone", ".", "at", ".", "pcmd", ",", "True", ",", "lr", ",", "fb", ",", "vv", ",", "va", ")" ]
Makes the drone move (translate/rotate). Parameters: lr -- left-right tilt: float [-1..1] negative: left, positive: right fb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right
[ "Makes", "the", "drone", "move", "(", "translate", "/", "rotate", ")", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/drone.py#L148-L158
fkmclane/python-ardrone
ardrone/at.py
ref
def ref(host, seq, takeoff, emergency=False): """ Basic behaviour of the drone: take-off/landing, emergency stop/reset) Parameters: seq -- sequence number takeoff -- True: Takeoff / False: Land emergency -- True: Turn off the engines """ p = 0b10001010101000000000000000000 if takeoff: p |= 0b1000000000 if emergency: p |= 0b100000000 at(host, 'REF', seq, [p])
python
def ref(host, seq, takeoff, emergency=False): """ Basic behaviour of the drone: take-off/landing, emergency stop/reset) Parameters: seq -- sequence number takeoff -- True: Takeoff / False: Land emergency -- True: Turn off the engines """ p = 0b10001010101000000000000000000 if takeoff: p |= 0b1000000000 if emergency: p |= 0b100000000 at(host, 'REF', seq, [p])
[ "def", "ref", "(", "host", ",", "seq", ",", "takeoff", ",", "emergency", "=", "False", ")", ":", "p", "=", "0b10001010101000000000000000000", "if", "takeoff", ":", "p", "|=", "0b1000000000", "if", "emergency", ":", "p", "|=", "0b100000000", "at", "(", "host", ",", "'REF'", ",", "seq", ",", "[", "p", "]", ")" ]
Basic behaviour of the drone: take-off/landing, emergency stop/reset) Parameters: seq -- sequence number takeoff -- True: Takeoff / False: Land emergency -- True: Turn off the engines
[ "Basic", "behaviour", "of", "the", "drone", ":", "take", "-", "off", "/", "landing", "emergency", "stop", "/", "reset", ")" ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/at.py#L16-L30
fkmclane/python-ardrone
ardrone/at.py
pcmd
def pcmd(host, seq, progressive, lr, fb, vv, va): """ Makes the drone move (translate/rotate). Parameters: seq -- sequence number progressive -- True: enable progressive commands, False: disable (i.e. enable hovering mode) lr -- left-right tilt: float [-1..1] negative: left, positive: right rb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right The above float values are a percentage of the maximum speed. """ p = 1 if progressive else 0 at(host, 'PCMD', seq, [p, float(lr), float(fb), float(vv), float(va)])
python
def pcmd(host, seq, progressive, lr, fb, vv, va): """ Makes the drone move (translate/rotate). Parameters: seq -- sequence number progressive -- True: enable progressive commands, False: disable (i.e. enable hovering mode) lr -- left-right tilt: float [-1..1] negative: left, positive: right rb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right The above float values are a percentage of the maximum speed. """ p = 1 if progressive else 0 at(host, 'PCMD', seq, [p, float(lr), float(fb), float(vv), float(va)])
[ "def", "pcmd", "(", "host", ",", "seq", ",", "progressive", ",", "lr", ",", "fb", ",", "vv", ",", "va", ")", ":", "p", "=", "1", "if", "progressive", "else", "0", "at", "(", "host", ",", "'PCMD'", ",", "seq", ",", "[", "p", ",", "float", "(", "lr", ")", ",", "float", "(", "fb", ")", ",", "float", "(", "vv", ")", ",", "float", "(", "va", ")", "]", ")" ]
Makes the drone move (translate/rotate). Parameters: seq -- sequence number progressive -- True: enable progressive commands, False: disable (i.e. enable hovering mode) lr -- left-right tilt: float [-1..1] negative: left, positive: right rb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right The above float values are a percentage of the maximum speed.
[ "Makes", "the", "drone", "move", "(", "translate", "/", "rotate", ")", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/at.py#L33-L51
fkmclane/python-ardrone
ardrone/at.py
config
def config(host, seq, option, value): """Set configuration parameters of the drone.""" at(host, 'CONFIG', seq, [str(option), str(value)])
python
def config(host, seq, option, value): """Set configuration parameters of the drone.""" at(host, 'CONFIG', seq, [str(option), str(value)])
[ "def", "config", "(", "host", ",", "seq", ",", "option", ",", "value", ")", ":", "at", "(", "host", ",", "'CONFIG'", ",", "seq", ",", "[", "str", "(", "option", ")", ",", "str", "(", "value", ")", "]", ")" ]
Set configuration parameters of the drone.
[ "Set", "configuration", "parameters", "of", "the", "drone", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/at.py#L76-L78
fkmclane/python-ardrone
ardrone/at.py
pwm
def pwm(host, seq, m1, m2, m3, m4): """ Sends control values directly to the engines, overriding control loops. Parameters: seq -- sequence number m1 -- Integer: front left command m2 -- Integer: front right command m3 -- Integer: back right command m4 -- Integer: back left command """ at(host, 'PWM', seq, [m1, m2, m3, m4])
python
def pwm(host, seq, m1, m2, m3, m4): """ Sends control values directly to the engines, overriding control loops. Parameters: seq -- sequence number m1 -- Integer: front left command m2 -- Integer: front right command m3 -- Integer: back right command m4 -- Integer: back left command """ at(host, 'PWM', seq, [m1, m2, m3, m4])
[ "def", "pwm", "(", "host", ",", "seq", ",", "m1", ",", "m2", ",", "m3", ",", "m4", ")", ":", "at", "(", "host", ",", "'PWM'", ",", "seq", ",", "[", "m1", ",", "m2", ",", "m3", ",", "m4", "]", ")" ]
Sends control values directly to the engines, overriding control loops. Parameters: seq -- sequence number m1 -- Integer: front left command m2 -- Integer: front right command m3 -- Integer: back right command m4 -- Integer: back left command
[ "Sends", "control", "values", "directly", "to", "the", "engines", "overriding", "control", "loops", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/at.py#L100-L111
fkmclane/python-ardrone
ardrone/at.py
led
def led(host, seq, anim, f, d): """ Control the drones LED. Parameters: seq -- sequence number anim -- Integer: animation to play f -- Float: frequency in HZ of the animation d -- Integer: total duration in seconds of the animation """ at(host, 'LED', seq, [anim, float(f), d])
python
def led(host, seq, anim, f, d): """ Control the drones LED. Parameters: seq -- sequence number anim -- Integer: animation to play f -- Float: frequency in HZ of the animation d -- Integer: total duration in seconds of the animation """ at(host, 'LED', seq, [anim, float(f), d])
[ "def", "led", "(", "host", ",", "seq", ",", "anim", ",", "f", ",", "d", ")", ":", "at", "(", "host", ",", "'LED'", ",", "seq", ",", "[", "anim", ",", "float", "(", "f", ")", ",", "d", "]", ")" ]
Control the drones LED. Parameters: seq -- sequence number anim -- Integer: animation to play f -- Float: frequency in HZ of the animation d -- Integer: total duration in seconds of the animation
[ "Control", "the", "drones", "LED", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/at.py#L114-L124
fkmclane/python-ardrone
ardrone/at.py
anim
def anim(host, seq, anim, d): """ Makes the drone execute a predefined movement (animation). Parameters: seq -- sequcence number anim -- Integer: animation to play d -- Integer: total duration in seconds of the animation """ at(host, 'ANIM', seq, [anim, d])
python
def anim(host, seq, anim, d): """ Makes the drone execute a predefined movement (animation). Parameters: seq -- sequcence number anim -- Integer: animation to play d -- Integer: total duration in seconds of the animation """ at(host, 'ANIM', seq, [anim, d])
[ "def", "anim", "(", "host", ",", "seq", ",", "anim", ",", "d", ")", ":", "at", "(", "host", ",", "'ANIM'", ",", "seq", ",", "[", "anim", ",", "d", "]", ")" ]
Makes the drone execute a predefined movement (animation). Parameters: seq -- sequcence number anim -- Integer: animation to play d -- Integer: total duration in seconds of the animation
[ "Makes", "the", "drone", "execute", "a", "predefined", "movement", "(", "animation", ")", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/at.py#L127-L136
fkmclane/python-ardrone
ardrone/at.py
at
def at(host, command, seq, params): """ Parameters: command -- the command seq -- the sequence number params -- a list of elements which can be either int, float or string """ params_str = [] for p in params: if type(p) == int: params_str.append('{:d}'.format(p)) elif type(p) == float: params_str.append('{:d}'.format(f2i(p))) elif type(p) == str: params_str.append('"{:s}"'.format(p)) msg = 'AT*{:s}={:d},{:s}\r'.format(command, seq, ','.join(params_str)) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))
python
def at(host, command, seq, params): """ Parameters: command -- the command seq -- the sequence number params -- a list of elements which can be either int, float or string """ params_str = [] for p in params: if type(p) == int: params_str.append('{:d}'.format(p)) elif type(p) == float: params_str.append('{:d}'.format(f2i(p))) elif type(p) == str: params_str.append('"{:s}"'.format(p)) msg = 'AT*{:s}={:d},{:s}\r'.format(command, seq, ','.join(params_str)) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))
[ "def", "at", "(", "host", ",", "command", ",", "seq", ",", "params", ")", ":", "params_str", "=", "[", "]", "for", "p", "in", "params", ":", "if", "type", "(", "p", ")", "==", "int", ":", "params_str", ".", "append", "(", "'{:d}'", ".", "format", "(", "p", ")", ")", "elif", "type", "(", "p", ")", "==", "float", ":", "params_str", ".", "append", "(", "'{:d}'", ".", "format", "(", "f2i", "(", "p", ")", ")", ")", "elif", "type", "(", "p", ")", "==", "str", ":", "params_str", ".", "append", "(", "'\"{:s}\"'", ".", "format", "(", "p", ")", ")", "msg", "=", "'AT*{:s}={:d},{:s}\\r'", ".", "format", "(", "command", ",", "seq", ",", "','", ".", "join", "(", "params_str", ")", ")", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ")", "sock", ".", "sendto", "(", "msg", ".", "encode", "(", ")", ",", "(", "host", ",", "ardrone", ".", "constant", ".", "COMMAND_PORT", ")", ")" ]
Parameters: command -- the command seq -- the sequence number params -- a list of elements which can be either int, float or string
[ "Parameters", ":", "command", "--", "the", "command", "seq", "--", "the", "sequence", "number", "params", "--", "a", "list", "of", "elements", "which", "can", "be", "either", "int", "float", "or", "string" ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/at.py#L138-L155
fkmclane/python-ardrone
ardrone/navdata.py
decode
def decode(packet): """Decode a navdata packet.""" offset = 0 _ = struct.unpack_from('IIII', packet, offset) s = _[1] state = dict() state['fly'] = s & 1 # FLY MASK : (0) ardrone is landed, (1) ardrone is flying state['video'] = s >> 1 & 1 # VIDEO MASK : (0) video disable, (1) video enable state['vision'] = s >> 2 & 1 # VISION MASK : (0) vision disable, (1) vision enable state['control'] = s >> 3 & 1 # CONTROL ALGO (0) euler angles control, (1) angular speed control state['altitude'] = s >> 4 & 1 # ALTITUDE CONTROL ALGO : (0) altitude control inactive (1) altitude control active state['user_feedback_start'] = s >> 5 & 1 # USER feedback : Start button state state['command'] = s >> 6 & 1 # Control command ACK : (0) None, (1) one received state['fw_file'] = s >> 7 & 1 # Firmware file is good (1) state['fw_ver'] = s >> 8 & 1 # Firmware update is newer (1) state['fw_upd'] = s >> 9 & 1 # Firmware update is ongoing (1) state['navdata_demo'] = s >> 10 & 1 # Navdata demo : (0) All navdata, (1) only navdata demo state['navdata_bootstrap'] = s >> 11 & 1 # Navdata bootstrap : (0) options sent in all or demo mode, (1) no navdata options sent state['motors'] = s >> 12 & 1 # Motor status : (0) Ok, (1) Motors problem state['com_lost'] = s >> 13 & 1 # Communication lost : (1) com problem, (0) Com is ok state['vbat_low'] = s >> 15 & 1 # VBat low : (1) too low, (0) Ok state['user_el'] = s >> 16 & 1 # User Emergency Landing : (1) User EL is ON, (0) User EL is OFF state['timer_elapsed'] = s >> 17 & 1 # Timer elapsed : (1) elapsed, (0) not elapsed state['angles_out_of_range'] = s >> 19 & 1 # Angles : (0) Ok, (1) out of range state['ultrasound'] = s >> 21 & 1 # Ultrasonic sensor : (0) Ok, (1) deaf state['cutout'] = s >> 22 & 1 # Cutout system detection : (0) Not detected, (1) detected state['pic_version'] = s >> 23 & 1 # PIC Version number OK : (0) a bad version number, (1) version number is OK state['atcodec_thread_on'] = s >> 24 & 1 # ATCodec thread ON : (0) thread OFF (1) thread ON state['navdata_thread_on'] = s >> 25 & 1 # Navdata thread ON : (0) thread OFF (1) thread ON state['video_thread_on'] = s >> 26 & 1 # Video thread ON : (0) thread OFF (1) thread ON state['acq_thread_on'] = s >> 27 & 1 # Acquisition thread ON : (0) thread OFF (1) thread ON state['ctrl_watchdog'] = s >> 28 & 1 # CTRL watchdog : (1) delay in control execution (> 5ms), (0) control is well scheduled state['adc_watchdog'] = s >> 29 & 1 # ADC Watchdog : (1) delay in uart2 dsr (> 5ms), (0) uart2 is good state['com_watchdog'] = s >> 30 & 1 # Communication Watchdog : (1) com problem, (0) Com is ok state['emergency'] = s >> 31 & 1 # Emergency landing : (0) no emergency, (1) emergency data = dict() data['state'] = state data['header'] = _[0] data['sequence'] = _[2] data['vision'] = _[3] offset += struct.calcsize('IIII') demo_fields = [ 'ctrl_state', 'battery', 'theta', 'phi', 'psi', 'altitude', 'vx', 'vy', 'vz', 'num_frames' ] angles = ['theta', 'phi', 'psi'] while True: try: id_nr, size = struct.unpack_from('HH', packet, offset) offset += struct.calcsize('HH') except struct.error: break values = [] for i in range(size - struct.calcsize('HH')): values.append(struct.unpack_from('c', packet, offset)[0]) offset += struct.calcsize('c') if id_nr == 0: values = struct.unpack_from('IIfffIfffI', b''.join(values)) demo = dict(zip(demo_fields, values)) for a in angles: demo[a] = int(demo[a] / 1000) data['demo'] = demo return data
python
def decode(packet): """Decode a navdata packet.""" offset = 0 _ = struct.unpack_from('IIII', packet, offset) s = _[1] state = dict() state['fly'] = s & 1 # FLY MASK : (0) ardrone is landed, (1) ardrone is flying state['video'] = s >> 1 & 1 # VIDEO MASK : (0) video disable, (1) video enable state['vision'] = s >> 2 & 1 # VISION MASK : (0) vision disable, (1) vision enable state['control'] = s >> 3 & 1 # CONTROL ALGO (0) euler angles control, (1) angular speed control state['altitude'] = s >> 4 & 1 # ALTITUDE CONTROL ALGO : (0) altitude control inactive (1) altitude control active state['user_feedback_start'] = s >> 5 & 1 # USER feedback : Start button state state['command'] = s >> 6 & 1 # Control command ACK : (0) None, (1) one received state['fw_file'] = s >> 7 & 1 # Firmware file is good (1) state['fw_ver'] = s >> 8 & 1 # Firmware update is newer (1) state['fw_upd'] = s >> 9 & 1 # Firmware update is ongoing (1) state['navdata_demo'] = s >> 10 & 1 # Navdata demo : (0) All navdata, (1) only navdata demo state['navdata_bootstrap'] = s >> 11 & 1 # Navdata bootstrap : (0) options sent in all or demo mode, (1) no navdata options sent state['motors'] = s >> 12 & 1 # Motor status : (0) Ok, (1) Motors problem state['com_lost'] = s >> 13 & 1 # Communication lost : (1) com problem, (0) Com is ok state['vbat_low'] = s >> 15 & 1 # VBat low : (1) too low, (0) Ok state['user_el'] = s >> 16 & 1 # User Emergency Landing : (1) User EL is ON, (0) User EL is OFF state['timer_elapsed'] = s >> 17 & 1 # Timer elapsed : (1) elapsed, (0) not elapsed state['angles_out_of_range'] = s >> 19 & 1 # Angles : (0) Ok, (1) out of range state['ultrasound'] = s >> 21 & 1 # Ultrasonic sensor : (0) Ok, (1) deaf state['cutout'] = s >> 22 & 1 # Cutout system detection : (0) Not detected, (1) detected state['pic_version'] = s >> 23 & 1 # PIC Version number OK : (0) a bad version number, (1) version number is OK state['atcodec_thread_on'] = s >> 24 & 1 # ATCodec thread ON : (0) thread OFF (1) thread ON state['navdata_thread_on'] = s >> 25 & 1 # Navdata thread ON : (0) thread OFF (1) thread ON state['video_thread_on'] = s >> 26 & 1 # Video thread ON : (0) thread OFF (1) thread ON state['acq_thread_on'] = s >> 27 & 1 # Acquisition thread ON : (0) thread OFF (1) thread ON state['ctrl_watchdog'] = s >> 28 & 1 # CTRL watchdog : (1) delay in control execution (> 5ms), (0) control is well scheduled state['adc_watchdog'] = s >> 29 & 1 # ADC Watchdog : (1) delay in uart2 dsr (> 5ms), (0) uart2 is good state['com_watchdog'] = s >> 30 & 1 # Communication Watchdog : (1) com problem, (0) Com is ok state['emergency'] = s >> 31 & 1 # Emergency landing : (0) no emergency, (1) emergency data = dict() data['state'] = state data['header'] = _[0] data['sequence'] = _[2] data['vision'] = _[3] offset += struct.calcsize('IIII') demo_fields = [ 'ctrl_state', 'battery', 'theta', 'phi', 'psi', 'altitude', 'vx', 'vy', 'vz', 'num_frames' ] angles = ['theta', 'phi', 'psi'] while True: try: id_nr, size = struct.unpack_from('HH', packet, offset) offset += struct.calcsize('HH') except struct.error: break values = [] for i in range(size - struct.calcsize('HH')): values.append(struct.unpack_from('c', packet, offset)[0]) offset += struct.calcsize('c') if id_nr == 0: values = struct.unpack_from('IIfffIfffI', b''.join(values)) demo = dict(zip(demo_fields, values)) for a in angles: demo[a] = int(demo[a] / 1000) data['demo'] = demo return data
[ "def", "decode", "(", "packet", ")", ":", "offset", "=", "0", "_", "=", "struct", ".", "unpack_from", "(", "'IIII'", ",", "packet", ",", "offset", ")", "s", "=", "_", "[", "1", "]", "state", "=", "dict", "(", ")", "state", "[", "'fly'", "]", "=", "s", "&", "1", "# FLY MASK : (0) ardrone is landed, (1) ardrone is flying", "state", "[", "'video'", "]", "=", "s", ">>", "1", "&", "1", "# VIDEO MASK : (0) video disable, (1) video enable", "state", "[", "'vision'", "]", "=", "s", ">>", "2", "&", "1", "# VISION MASK : (0) vision disable, (1) vision enable", "state", "[", "'control'", "]", "=", "s", ">>", "3", "&", "1", "# CONTROL ALGO (0) euler angles control, (1) angular speed control", "state", "[", "'altitude'", "]", "=", "s", ">>", "4", "&", "1", "# ALTITUDE CONTROL ALGO : (0) altitude control inactive (1) altitude control active", "state", "[", "'user_feedback_start'", "]", "=", "s", ">>", "5", "&", "1", "# USER feedback : Start button state", "state", "[", "'command'", "]", "=", "s", ">>", "6", "&", "1", "# Control command ACK : (0) None, (1) one received", "state", "[", "'fw_file'", "]", "=", "s", ">>", "7", "&", "1", "# Firmware file is good (1)", "state", "[", "'fw_ver'", "]", "=", "s", ">>", "8", "&", "1", "# Firmware update is newer (1)", "state", "[", "'fw_upd'", "]", "=", "s", ">>", "9", "&", "1", "# Firmware update is ongoing (1)", "state", "[", "'navdata_demo'", "]", "=", "s", ">>", "10", "&", "1", "# Navdata demo : (0) All navdata, (1) only navdata demo", "state", "[", "'navdata_bootstrap'", "]", "=", "s", ">>", "11", "&", "1", "# Navdata bootstrap : (0) options sent in all or demo mode, (1) no navdata options sent", "state", "[", "'motors'", "]", "=", "s", ">>", "12", "&", "1", "# Motor status : (0) Ok, (1) Motors problem", "state", "[", "'com_lost'", "]", "=", "s", ">>", "13", "&", "1", "# Communication lost : (1) com problem, (0) Com is ok", "state", "[", "'vbat_low'", "]", "=", "s", ">>", "15", "&", "1", "# VBat low : (1) too low, (0) Ok", "state", "[", "'user_el'", "]", "=", "s", ">>", "16", "&", "1", "# User Emergency Landing : (1) User EL is ON, (0) User EL is OFF", "state", "[", "'timer_elapsed'", "]", "=", "s", ">>", "17", "&", "1", "# Timer elapsed : (1) elapsed, (0) not elapsed", "state", "[", "'angles_out_of_range'", "]", "=", "s", ">>", "19", "&", "1", "# Angles : (0) Ok, (1) out of range", "state", "[", "'ultrasound'", "]", "=", "s", ">>", "21", "&", "1", "# Ultrasonic sensor : (0) Ok, (1) deaf", "state", "[", "'cutout'", "]", "=", "s", ">>", "22", "&", "1", "# Cutout system detection : (0) Not detected, (1) detected", "state", "[", "'pic_version'", "]", "=", "s", ">>", "23", "&", "1", "# PIC Version number OK : (0) a bad version number, (1) version number is OK", "state", "[", "'atcodec_thread_on'", "]", "=", "s", ">>", "24", "&", "1", "# ATCodec thread ON : (0) thread OFF (1) thread ON", "state", "[", "'navdata_thread_on'", "]", "=", "s", ">>", "25", "&", "1", "# Navdata thread ON : (0) thread OFF (1) thread ON", "state", "[", "'video_thread_on'", "]", "=", "s", ">>", "26", "&", "1", "# Video thread ON : (0) thread OFF (1) thread ON", "state", "[", "'acq_thread_on'", "]", "=", "s", ">>", "27", "&", "1", "# Acquisition thread ON : (0) thread OFF (1) thread ON", "state", "[", "'ctrl_watchdog'", "]", "=", "s", ">>", "28", "&", "1", "# CTRL watchdog : (1) delay in control execution (> 5ms), (0) control is well scheduled", "state", "[", "'adc_watchdog'", "]", "=", "s", ">>", "29", "&", "1", "# ADC Watchdog : (1) delay in uart2 dsr (> 5ms), (0) uart2 is good", "state", "[", "'com_watchdog'", "]", "=", "s", ">>", "30", "&", "1", "# Communication Watchdog : (1) com problem, (0) Com is ok", "state", "[", "'emergency'", "]", "=", "s", ">>", "31", "&", "1", "# Emergency landing : (0) no emergency, (1) emergency", "data", "=", "dict", "(", ")", "data", "[", "'state'", "]", "=", "state", "data", "[", "'header'", "]", "=", "_", "[", "0", "]", "data", "[", "'sequence'", "]", "=", "_", "[", "2", "]", "data", "[", "'vision'", "]", "=", "_", "[", "3", "]", "offset", "+=", "struct", ".", "calcsize", "(", "'IIII'", ")", "demo_fields", "=", "[", "'ctrl_state'", ",", "'battery'", ",", "'theta'", ",", "'phi'", ",", "'psi'", ",", "'altitude'", ",", "'vx'", ",", "'vy'", ",", "'vz'", ",", "'num_frames'", "]", "angles", "=", "[", "'theta'", ",", "'phi'", ",", "'psi'", "]", "while", "True", ":", "try", ":", "id_nr", ",", "size", "=", "struct", ".", "unpack_from", "(", "'HH'", ",", "packet", ",", "offset", ")", "offset", "+=", "struct", ".", "calcsize", "(", "'HH'", ")", "except", "struct", ".", "error", ":", "break", "values", "=", "[", "]", "for", "i", "in", "range", "(", "size", "-", "struct", ".", "calcsize", "(", "'HH'", ")", ")", ":", "values", ".", "append", "(", "struct", ".", "unpack_from", "(", "'c'", ",", "packet", ",", "offset", ")", "[", "0", "]", ")", "offset", "+=", "struct", ".", "calcsize", "(", "'c'", ")", "if", "id_nr", "==", "0", ":", "values", "=", "struct", ".", "unpack_from", "(", "'IIfffIfffI'", ",", "b''", ".", "join", "(", "values", ")", ")", "demo", "=", "dict", "(", "zip", "(", "demo_fields", ",", "values", ")", ")", "for", "a", "in", "angles", ":", "demo", "[", "a", "]", "=", "int", "(", "demo", "[", "a", "]", "/", "1000", ")", "data", "[", "'demo'", "]", "=", "demo", "return", "data" ]
Decode a navdata packet.
[ "Decode", "a", "navdata", "packet", "." ]
train
https://github.com/fkmclane/python-ardrone/blob/def437148a114f66d1ca30bf2398a017002b2cd6/ardrone/navdata.py#L4-L82
pearu/pyvtk
pyvtk/__init__.py
VtkData.tofile
def tofile(self, filename, format = 'ascii'): """Save VTK data to file. """ if not common.is_string(filename): raise TypeError('argument filename must be string but got %s'%(type(filename))) if format not in ['ascii','binary']: raise TypeError('argument format must be ascii | binary') filename = filename.strip() if not filename: raise ValueError('filename must be non-empty string') if filename[-4:]!='.vtk': filename += '.vtk' f = open(filename,'wb') f.write(self.to_string(format)) f.close()
python
def tofile(self, filename, format = 'ascii'): """Save VTK data to file. """ if not common.is_string(filename): raise TypeError('argument filename must be string but got %s'%(type(filename))) if format not in ['ascii','binary']: raise TypeError('argument format must be ascii | binary') filename = filename.strip() if not filename: raise ValueError('filename must be non-empty string') if filename[-4:]!='.vtk': filename += '.vtk' f = open(filename,'wb') f.write(self.to_string(format)) f.close()
[ "def", "tofile", "(", "self", ",", "filename", ",", "format", "=", "'ascii'", ")", ":", "if", "not", "common", ".", "is_string", "(", "filename", ")", ":", "raise", "TypeError", "(", "'argument filename must be string but got %s'", "%", "(", "type", "(", "filename", ")", ")", ")", "if", "format", "not", "in", "[", "'ascii'", ",", "'binary'", "]", ":", "raise", "TypeError", "(", "'argument format must be ascii | binary'", ")", "filename", "=", "filename", ".", "strip", "(", ")", "if", "not", "filename", ":", "raise", "ValueError", "(", "'filename must be non-empty string'", ")", "if", "filename", "[", "-", "4", ":", "]", "!=", "'.vtk'", ":", "filename", "+=", "'.vtk'", "f", "=", "open", "(", "filename", ",", "'wb'", ")", "f", ".", "write", "(", "self", ".", "to_string", "(", "format", ")", ")", "f", ".", "close", "(", ")" ]
Save VTK data to file.
[ "Save", "VTK", "data", "to", "file", "." ]
train
https://github.com/pearu/pyvtk/blob/b004ec3c03299a2d75338a4be93dd29f076b70ab/pyvtk/__init__.py#L190-L204
GoodCloud/django-zebra
zebra/mixins.py
_get_attr_value
def _get_attr_value(instance, attr, default=None): """ Simple helper to get the value of an instance's attribute if it exists. If the instance attribute is callable it will be called and the result will be returned. Optionally accepts a default value to return if the attribute is missing. Defaults to `None` >>> class Foo(object): ... bar = 'baz' ... def hi(self): ... return 'hi' >>> f = Foo() >>> _get_attr_value(f, 'bar') 'baz' >>> _get_attr_value(f, 'xyz') >>> _get_attr_value(f, 'xyz', False) False >>> _get_attr_value(f, 'hi') 'hi' """ value = default if hasattr(instance, attr): value = getattr(instance, attr) if callable(value): value = value() return value
python
def _get_attr_value(instance, attr, default=None): """ Simple helper to get the value of an instance's attribute if it exists. If the instance attribute is callable it will be called and the result will be returned. Optionally accepts a default value to return if the attribute is missing. Defaults to `None` >>> class Foo(object): ... bar = 'baz' ... def hi(self): ... return 'hi' >>> f = Foo() >>> _get_attr_value(f, 'bar') 'baz' >>> _get_attr_value(f, 'xyz') >>> _get_attr_value(f, 'xyz', False) False >>> _get_attr_value(f, 'hi') 'hi' """ value = default if hasattr(instance, attr): value = getattr(instance, attr) if callable(value): value = value() return value
[ "def", "_get_attr_value", "(", "instance", ",", "attr", ",", "default", "=", "None", ")", ":", "value", "=", "default", "if", "hasattr", "(", "instance", ",", "attr", ")", ":", "value", "=", "getattr", "(", "instance", ",", "attr", ")", "if", "callable", "(", "value", ")", ":", "value", "=", "value", "(", ")", "return", "value" ]
Simple helper to get the value of an instance's attribute if it exists. If the instance attribute is callable it will be called and the result will be returned. Optionally accepts a default value to return if the attribute is missing. Defaults to `None` >>> class Foo(object): ... bar = 'baz' ... def hi(self): ... return 'hi' >>> f = Foo() >>> _get_attr_value(f, 'bar') 'baz' >>> _get_attr_value(f, 'xyz') >>> _get_attr_value(f, 'xyz', False) False >>> _get_attr_value(f, 'hi') 'hi'
[ "Simple", "helper", "to", "get", "the", "value", "of", "an", "instance", "s", "attribute", "if", "it", "exists", "." ]
train
https://github.com/GoodCloud/django-zebra/blob/fdb2f2eb9eddb83eebb339359e629c88f4f6ba17/zebra/mixins.py#L6-L35
GoodCloud/django-zebra
zebra/utils.py
audit_customer_subscription
def audit_customer_subscription(customer, unknown=True): """ Audits the provided customer's subscription against stripe and returns a pair that contains a boolean and a result type. Default result types can be found in zebra.conf.defaults and can be overridden in your project's settings. """ if (hasattr(customer, 'suspended') and customer.suspended): result = AUDIT_RESULTS['suspended'] else: if hasattr(customer, 'subscription'): try: result = AUDIT_RESULTS[customer.subscription.status] except KeyError, err: # TODO should this be a more specific exception class? raise Exception("Unable to locate a result set for \ subscription status %s in ZEBRA_AUDIT_RESULTS") % str(err) else: result = AUDIT_RESULTS['no_subscription'] return result
python
def audit_customer_subscription(customer, unknown=True): """ Audits the provided customer's subscription against stripe and returns a pair that contains a boolean and a result type. Default result types can be found in zebra.conf.defaults and can be overridden in your project's settings. """ if (hasattr(customer, 'suspended') and customer.suspended): result = AUDIT_RESULTS['suspended'] else: if hasattr(customer, 'subscription'): try: result = AUDIT_RESULTS[customer.subscription.status] except KeyError, err: # TODO should this be a more specific exception class? raise Exception("Unable to locate a result set for \ subscription status %s in ZEBRA_AUDIT_RESULTS") % str(err) else: result = AUDIT_RESULTS['no_subscription'] return result
[ "def", "audit_customer_subscription", "(", "customer", ",", "unknown", "=", "True", ")", ":", "if", "(", "hasattr", "(", "customer", ",", "'suspended'", ")", "and", "customer", ".", "suspended", ")", ":", "result", "=", "AUDIT_RESULTS", "[", "'suspended'", "]", "else", ":", "if", "hasattr", "(", "customer", ",", "'subscription'", ")", ":", "try", ":", "result", "=", "AUDIT_RESULTS", "[", "customer", ".", "subscription", ".", "status", "]", "except", "KeyError", ",", "err", ":", "# TODO should this be a more specific exception class?", "raise", "Exception", "(", "\"Unable to locate a result set for \\\nsubscription status %s in ZEBRA_AUDIT_RESULTS\"", ")", "%", "str", "(", "err", ")", "else", ":", "result", "=", "AUDIT_RESULTS", "[", "'no_subscription'", "]", "return", "result" ]
Audits the provided customer's subscription against stripe and returns a pair that contains a boolean and a result type. Default result types can be found in zebra.conf.defaults and can be overridden in your project's settings.
[ "Audits", "the", "provided", "customer", "s", "subscription", "against", "stripe", "and", "returns", "a", "pair", "that", "contains", "a", "boolean", "and", "a", "result", "type", "." ]
train
https://github.com/GoodCloud/django-zebra/blob/fdb2f2eb9eddb83eebb339359e629c88f4f6ba17/zebra/utils.py#L6-L26
pearu/pyvtk
pyvtk/PolyData.py
polydata_fromfile
def polydata_fromfile(f, self): """Use VtkData(<filename>).""" points = [] data = dict(vertices=[], lines=[], polygons=[], triangle_strips=[]) l = common._getline(f).decode('ascii') k,n,datatype = [s.strip().lower() for s in l.split(' ')] if k!='points': raise ValueError('expected points but got %s'%(repr(k))) n = int(n) assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype) log.debug('\tgetting %s points'%n) while len(points) < 3*n: l = common._getline(f).decode('ascii') points += map(eval,l.split(' ')) assert len(points)==3*n while 1: l = common._getline(f) if l is None: break l = l.decode('ascii') sl = l.split(' ') k = sl[0].strip().lower() if k not in ['vertices','lines','polygons','triangle_strips']: break assert len(sl)==3 n = int(sl[1]) size = int(sl[2]) lst = [] while len(lst) < size: l = common._getline(f).decode('ascii') lst += map(eval, l.split(' ')) assert len(lst)==size lst2 = [] j = 0 for i in range(n): lst2.append(lst[j+1:j+lst[j]+1]) j += lst[j]+1 data[k] = lst2 return PolyData(points,data['vertices'], data['lines'], data['polygons'], data['triangle_strips']), l.encode()
python
def polydata_fromfile(f, self): """Use VtkData(<filename>).""" points = [] data = dict(vertices=[], lines=[], polygons=[], triangle_strips=[]) l = common._getline(f).decode('ascii') k,n,datatype = [s.strip().lower() for s in l.split(' ')] if k!='points': raise ValueError('expected points but got %s'%(repr(k))) n = int(n) assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype) log.debug('\tgetting %s points'%n) while len(points) < 3*n: l = common._getline(f).decode('ascii') points += map(eval,l.split(' ')) assert len(points)==3*n while 1: l = common._getline(f) if l is None: break l = l.decode('ascii') sl = l.split(' ') k = sl[0].strip().lower() if k not in ['vertices','lines','polygons','triangle_strips']: break assert len(sl)==3 n = int(sl[1]) size = int(sl[2]) lst = [] while len(lst) < size: l = common._getline(f).decode('ascii') lst += map(eval, l.split(' ')) assert len(lst)==size lst2 = [] j = 0 for i in range(n): lst2.append(lst[j+1:j+lst[j]+1]) j += lst[j]+1 data[k] = lst2 return PolyData(points,data['vertices'], data['lines'], data['polygons'], data['triangle_strips']), l.encode()
[ "def", "polydata_fromfile", "(", "f", ",", "self", ")", ":", "points", "=", "[", "]", "data", "=", "dict", "(", "vertices", "=", "[", "]", ",", "lines", "=", "[", "]", ",", "polygons", "=", "[", "]", ",", "triangle_strips", "=", "[", "]", ")", "l", "=", "common", ".", "_getline", "(", "f", ")", ".", "decode", "(", "'ascii'", ")", "k", ",", "n", ",", "datatype", "=", "[", "s", ".", "strip", "(", ")", ".", "lower", "(", ")", "for", "s", "in", "l", ".", "split", "(", "' '", ")", "]", "if", "k", "!=", "'points'", ":", "raise", "ValueError", "(", "'expected points but got %s'", "%", "(", "repr", "(", "k", ")", ")", ")", "n", "=", "int", "(", "n", ")", "assert", "datatype", "in", "[", "'bit'", ",", "'unsigned_char'", ",", "'char'", ",", "'unsigned_short'", ",", "'short'", ",", "'unsigned_int'", ",", "'int'", ",", "'unsigned_long'", ",", "'long'", ",", "'float'", ",", "'double'", "]", ",", "repr", "(", "datatype", ")", "log", ".", "debug", "(", "'\\tgetting %s points'", "%", "n", ")", "while", "len", "(", "points", ")", "<", "3", "*", "n", ":", "l", "=", "common", ".", "_getline", "(", "f", ")", ".", "decode", "(", "'ascii'", ")", "points", "+=", "map", "(", "eval", ",", "l", ".", "split", "(", "' '", ")", ")", "assert", "len", "(", "points", ")", "==", "3", "*", "n", "while", "1", ":", "l", "=", "common", ".", "_getline", "(", "f", ")", "if", "l", "is", "None", ":", "break", "l", "=", "l", ".", "decode", "(", "'ascii'", ")", "sl", "=", "l", ".", "split", "(", "' '", ")", "k", "=", "sl", "[", "0", "]", ".", "strip", "(", ")", ".", "lower", "(", ")", "if", "k", "not", "in", "[", "'vertices'", ",", "'lines'", ",", "'polygons'", ",", "'triangle_strips'", "]", ":", "break", "assert", "len", "(", "sl", ")", "==", "3", "n", "=", "int", "(", "sl", "[", "1", "]", ")", "size", "=", "int", "(", "sl", "[", "2", "]", ")", "lst", "=", "[", "]", "while", "len", "(", "lst", ")", "<", "size", ":", "l", "=", "common", ".", "_getline", "(", "f", ")", ".", "decode", "(", "'ascii'", ")", "lst", "+=", "map", "(", "eval", ",", "l", ".", "split", "(", "' '", ")", ")", "assert", "len", "(", "lst", ")", "==", "size", "lst2", "=", "[", "]", "j", "=", "0", "for", "i", "in", "range", "(", "n", ")", ":", "lst2", ".", "append", "(", "lst", "[", "j", "+", "1", ":", "j", "+", "lst", "[", "j", "]", "+", "1", "]", ")", "j", "+=", "lst", "[", "j", "]", "+", "1", "data", "[", "k", "]", "=", "lst2", "return", "PolyData", "(", "points", ",", "data", "[", "'vertices'", "]", ",", "data", "[", "'lines'", "]", ",", "data", "[", "'polygons'", "]", ",", "data", "[", "'triangle_strips'", "]", ")", ",", "l", ".", "encode", "(", ")" ]
Use VtkData(<filename>).
[ "Use", "VtkData", "(", "<filename", ">", ")", "." ]
train
https://github.com/pearu/pyvtk/blob/b004ec3c03299a2d75338a4be93dd29f076b70ab/pyvtk/PolyData.py#L73-L113
GoodCloud/django-zebra
zebra/views.py
webhooks
def webhooks(request): """ Handles all known webhooks from stripe, and calls signals. Plug in as you need. """ if request.method != "POST": return HttpResponse("Invalid Request.", status=400) json = simplejson.loads(request.POST["json"]) if json["event"] == "recurring_payment_failed": zebra_webhook_recurring_payment_failed.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "invoice_ready": zebra_webhook_invoice_ready.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "recurring_payment_succeeded": zebra_webhook_recurring_payment_succeeded.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "subscription_trial_ending": zebra_webhook_subscription_trial_ending.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "subscription_final_payment_attempt_failed": zebra_webhook_subscription_final_payment_attempt_failed.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "ping": zebra_webhook_subscription_ping_sent.send(sender=None) else: return HttpResponse(status=400) return HttpResponse(status=200)
python
def webhooks(request): """ Handles all known webhooks from stripe, and calls signals. Plug in as you need. """ if request.method != "POST": return HttpResponse("Invalid Request.", status=400) json = simplejson.loads(request.POST["json"]) if json["event"] == "recurring_payment_failed": zebra_webhook_recurring_payment_failed.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "invoice_ready": zebra_webhook_invoice_ready.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "recurring_payment_succeeded": zebra_webhook_recurring_payment_succeeded.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "subscription_trial_ending": zebra_webhook_subscription_trial_ending.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "subscription_final_payment_attempt_failed": zebra_webhook_subscription_final_payment_attempt_failed.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json) elif json["event"] == "ping": zebra_webhook_subscription_ping_sent.send(sender=None) else: return HttpResponse(status=400) return HttpResponse(status=200)
[ "def", "webhooks", "(", "request", ")", ":", "if", "request", ".", "method", "!=", "\"POST\"", ":", "return", "HttpResponse", "(", "\"Invalid Request.\"", ",", "status", "=", "400", ")", "json", "=", "simplejson", ".", "loads", "(", "request", ".", "POST", "[", "\"json\"", "]", ")", "if", "json", "[", "\"event\"", "]", "==", "\"recurring_payment_failed\"", ":", "zebra_webhook_recurring_payment_failed", ".", "send", "(", "sender", "=", "None", ",", "customer", "=", "_try_to_get_customer_from_customer_id", "(", "json", "[", "\"customer\"", "]", ")", ",", "full_json", "=", "json", ")", "elif", "json", "[", "\"event\"", "]", "==", "\"invoice_ready\"", ":", "zebra_webhook_invoice_ready", ".", "send", "(", "sender", "=", "None", ",", "customer", "=", "_try_to_get_customer_from_customer_id", "(", "json", "[", "\"customer\"", "]", ")", ",", "full_json", "=", "json", ")", "elif", "json", "[", "\"event\"", "]", "==", "\"recurring_payment_succeeded\"", ":", "zebra_webhook_recurring_payment_succeeded", ".", "send", "(", "sender", "=", "None", ",", "customer", "=", "_try_to_get_customer_from_customer_id", "(", "json", "[", "\"customer\"", "]", ")", ",", "full_json", "=", "json", ")", "elif", "json", "[", "\"event\"", "]", "==", "\"subscription_trial_ending\"", ":", "zebra_webhook_subscription_trial_ending", ".", "send", "(", "sender", "=", "None", ",", "customer", "=", "_try_to_get_customer_from_customer_id", "(", "json", "[", "\"customer\"", "]", ")", ",", "full_json", "=", "json", ")", "elif", "json", "[", "\"event\"", "]", "==", "\"subscription_final_payment_attempt_failed\"", ":", "zebra_webhook_subscription_final_payment_attempt_failed", ".", "send", "(", "sender", "=", "None", ",", "customer", "=", "_try_to_get_customer_from_customer_id", "(", "json", "[", "\"customer\"", "]", ")", ",", "full_json", "=", "json", ")", "elif", "json", "[", "\"event\"", "]", "==", "\"ping\"", ":", "zebra_webhook_subscription_ping_sent", ".", "send", "(", "sender", "=", "None", ")", "else", ":", "return", "HttpResponse", "(", "status", "=", "400", ")", "return", "HttpResponse", "(", "status", "=", "200", ")" ]
Handles all known webhooks from stripe, and calls signals. Plug in as you need.
[ "Handles", "all", "known", "webhooks", "from", "stripe", "and", "calls", "signals", ".", "Plug", "in", "as", "you", "need", "." ]
train
https://github.com/GoodCloud/django-zebra/blob/fdb2f2eb9eddb83eebb339359e629c88f4f6ba17/zebra/views.py#L28-L60
GoodCloud/django-zebra
zebra/views.py
webhooks_v2
def webhooks_v2(request): """ Handles all known webhooks from stripe, and calls signals. Plug in as you need. """ if request.method != "POST": return HttpResponse("Invalid Request.", status=400) try: event_json = simplejson.loads(request.body) except AttributeError: # Backwords compatibility # Prior to Django 1.4, request.body was named request.raw_post_data event_json = simplejson.loads(request.raw_post_data) event_key = event_json['type'].replace('.', '_') if event_key in WEBHOOK_MAP: WEBHOOK_MAP[event_key].send(sender=None, full_json=event_json) return HttpResponse(status=200)
python
def webhooks_v2(request): """ Handles all known webhooks from stripe, and calls signals. Plug in as you need. """ if request.method != "POST": return HttpResponse("Invalid Request.", status=400) try: event_json = simplejson.loads(request.body) except AttributeError: # Backwords compatibility # Prior to Django 1.4, request.body was named request.raw_post_data event_json = simplejson.loads(request.raw_post_data) event_key = event_json['type'].replace('.', '_') if event_key in WEBHOOK_MAP: WEBHOOK_MAP[event_key].send(sender=None, full_json=event_json) return HttpResponse(status=200)
[ "def", "webhooks_v2", "(", "request", ")", ":", "if", "request", ".", "method", "!=", "\"POST\"", ":", "return", "HttpResponse", "(", "\"Invalid Request.\"", ",", "status", "=", "400", ")", "try", ":", "event_json", "=", "simplejson", ".", "loads", "(", "request", ".", "body", ")", "except", "AttributeError", ":", "# Backwords compatibility", "# Prior to Django 1.4, request.body was named request.raw_post_data", "event_json", "=", "simplejson", ".", "loads", "(", "request", ".", "raw_post_data", ")", "event_key", "=", "event_json", "[", "'type'", "]", ".", "replace", "(", "'.'", ",", "'_'", ")", "if", "event_key", "in", "WEBHOOK_MAP", ":", "WEBHOOK_MAP", "[", "event_key", "]", ".", "send", "(", "sender", "=", "None", ",", "full_json", "=", "event_json", ")", "return", "HttpResponse", "(", "status", "=", "200", ")" ]
Handles all known webhooks from stripe, and calls signals. Plug in as you need.
[ "Handles", "all", "known", "webhooks", "from", "stripe", "and", "calls", "signals", ".", "Plug", "in", "as", "you", "need", "." ]
train
https://github.com/GoodCloud/django-zebra/blob/fdb2f2eb9eddb83eebb339359e629c88f4f6ba17/zebra/views.py#L63-L82
pearu/pyvtk
pyvtk/common.py
is_number
def is_number(obj): """Check if obj is number.""" return isinstance(obj, (int, float, np.int_, np.float_))
python
def is_number(obj): """Check if obj is number.""" return isinstance(obj, (int, float, np.int_, np.float_))
[ "def", "is_number", "(", "obj", ")", ":", "return", "isinstance", "(", "obj", ",", "(", "int", ",", "float", ",", "np", ".", "int_", ",", "np", ".", "float_", ")", ")" ]
Check if obj is number.
[ "Check", "if", "obj", "is", "number", "." ]
train
https://github.com/pearu/pyvtk/blob/b004ec3c03299a2d75338a4be93dd29f076b70ab/pyvtk/common.py#L34-L36
pearu/pyvtk
pyvtk/common.py
Common.get_seq
def get_seq(self,obj,default=None): """Return sequence.""" if is_sequence(obj): return obj if is_number(obj): return [obj] if obj is None and default is not None: log.warning('using default value (%s)'%(default)) return self.get_seq(default) raise ValueError('expected sequence|number but got %s'%(type(obj)))
python
def get_seq(self,obj,default=None): """Return sequence.""" if is_sequence(obj): return obj if is_number(obj): return [obj] if obj is None and default is not None: log.warning('using default value (%s)'%(default)) return self.get_seq(default) raise ValueError('expected sequence|number but got %s'%(type(obj)))
[ "def", "get_seq", "(", "self", ",", "obj", ",", "default", "=", "None", ")", ":", "if", "is_sequence", "(", "obj", ")", ":", "return", "obj", "if", "is_number", "(", "obj", ")", ":", "return", "[", "obj", "]", "if", "obj", "is", "None", "and", "default", "is", "not", "None", ":", "log", ".", "warning", "(", "'using default value (%s)'", "%", "(", "default", ")", ")", "return", "self", ".", "get_seq", "(", "default", ")", "raise", "ValueError", "(", "'expected sequence|number but got %s'", "%", "(", "type", "(", "obj", ")", ")", ")" ]
Return sequence.
[ "Return", "sequence", "." ]
train
https://github.com/pearu/pyvtk/blob/b004ec3c03299a2d75338a4be93dd29f076b70ab/pyvtk/common.py#L130-L138
pearu/pyvtk
pyvtk/common.py
Common.get_seq_seq
def get_seq_seq(self,obj,default=None): """Return sequence of sequences.""" if is_sequence2(obj): return [self.get_seq(o,default) for o in obj] else: return [self.get_seq(obj,default)]
python
def get_seq_seq(self,obj,default=None): """Return sequence of sequences.""" if is_sequence2(obj): return [self.get_seq(o,default) for o in obj] else: return [self.get_seq(obj,default)]
[ "def", "get_seq_seq", "(", "self", ",", "obj", ",", "default", "=", "None", ")", ":", "if", "is_sequence2", "(", "obj", ")", ":", "return", "[", "self", ".", "get_seq", "(", "o", ",", "default", ")", "for", "o", "in", "obj", "]", "else", ":", "return", "[", "self", ".", "get_seq", "(", "obj", ",", "default", ")", "]" ]
Return sequence of sequences.
[ "Return", "sequence", "of", "sequences", "." ]
train
https://github.com/pearu/pyvtk/blob/b004ec3c03299a2d75338a4be93dd29f076b70ab/pyvtk/common.py#L139-L144
pearu/pyvtk
pyvtk/common.py
Common.get_3_tuple
def get_3_tuple(self,obj,default=None): """Return 3-tuple from number -> (obj,default[1],default[2]) 0-sequence|None -> default 1-sequence -> (obj[0],default[1],default[2]) 2-sequence -> (obj[0],obj[1],default[2]) (3 or more)-sequence -> (obj[0],obj[1],obj[2]) """ if not (default is not None \ and type(default) is tuple \ and len(default)==3): raise ValueError('argument default must be 3-tuple|None but got %s'%(default)) if is_sequence(obj): n = len(obj) if n>3: log.warning('expected 3-sequence but got %s-%s'%(n,type(obj))) if n>=3: return tuple(obj) log.warning('filling with default value (%s) to obtain size=3'%(default[0])) if default is not None: if n==0: return default elif n==1: return (obj[0],default[1],default[2]) elif n==2: return (obj[0],obj[1],default[2]) elif is_number(obj) and default is not None: log.warning('filling with default value (%s) to obtain size=3'%(default[0])) return (obj,default[1],default[2]) elif obj is None and default is not None: log.warning('filling with default value (%s) to obtain size=3'%(default[0])) return default raise ValueError('failed to construct 3-tuple from %s-%s'%(n,type(obj)))
python
def get_3_tuple(self,obj,default=None): """Return 3-tuple from number -> (obj,default[1],default[2]) 0-sequence|None -> default 1-sequence -> (obj[0],default[1],default[2]) 2-sequence -> (obj[0],obj[1],default[2]) (3 or more)-sequence -> (obj[0],obj[1],obj[2]) """ if not (default is not None \ and type(default) is tuple \ and len(default)==3): raise ValueError('argument default must be 3-tuple|None but got %s'%(default)) if is_sequence(obj): n = len(obj) if n>3: log.warning('expected 3-sequence but got %s-%s'%(n,type(obj))) if n>=3: return tuple(obj) log.warning('filling with default value (%s) to obtain size=3'%(default[0])) if default is not None: if n==0: return default elif n==1: return (obj[0],default[1],default[2]) elif n==2: return (obj[0],obj[1],default[2]) elif is_number(obj) and default is not None: log.warning('filling with default value (%s) to obtain size=3'%(default[0])) return (obj,default[1],default[2]) elif obj is None and default is not None: log.warning('filling with default value (%s) to obtain size=3'%(default[0])) return default raise ValueError('failed to construct 3-tuple from %s-%s'%(n,type(obj)))
[ "def", "get_3_tuple", "(", "self", ",", "obj", ",", "default", "=", "None", ")", ":", "if", "not", "(", "default", "is", "not", "None", "and", "type", "(", "default", ")", "is", "tuple", "and", "len", "(", "default", ")", "==", "3", ")", ":", "raise", "ValueError", "(", "'argument default must be 3-tuple|None but got %s'", "%", "(", "default", ")", ")", "if", "is_sequence", "(", "obj", ")", ":", "n", "=", "len", "(", "obj", ")", "if", "n", ">", "3", ":", "log", ".", "warning", "(", "'expected 3-sequence but got %s-%s'", "%", "(", "n", ",", "type", "(", "obj", ")", ")", ")", "if", "n", ">=", "3", ":", "return", "tuple", "(", "obj", ")", "log", ".", "warning", "(", "'filling with default value (%s) to obtain size=3'", "%", "(", "default", "[", "0", "]", ")", ")", "if", "default", "is", "not", "None", ":", "if", "n", "==", "0", ":", "return", "default", "elif", "n", "==", "1", ":", "return", "(", "obj", "[", "0", "]", ",", "default", "[", "1", "]", ",", "default", "[", "2", "]", ")", "elif", "n", "==", "2", ":", "return", "(", "obj", "[", "0", "]", ",", "obj", "[", "1", "]", ",", "default", "[", "2", "]", ")", "elif", "is_number", "(", "obj", ")", "and", "default", "is", "not", "None", ":", "log", ".", "warning", "(", "'filling with default value (%s) to obtain size=3'", "%", "(", "default", "[", "0", "]", ")", ")", "return", "(", "obj", ",", "default", "[", "1", "]", ",", "default", "[", "2", "]", ")", "elif", "obj", "is", "None", "and", "default", "is", "not", "None", ":", "log", ".", "warning", "(", "'filling with default value (%s) to obtain size=3'", "%", "(", "default", "[", "0", "]", ")", ")", "return", "default", "raise", "ValueError", "(", "'failed to construct 3-tuple from %s-%s'", "%", "(", "n", ",", "type", "(", "obj", ")", ")", ")" ]
Return 3-tuple from number -> (obj,default[1],default[2]) 0-sequence|None -> default 1-sequence -> (obj[0],default[1],default[2]) 2-sequence -> (obj[0],obj[1],default[2]) (3 or more)-sequence -> (obj[0],obj[1],obj[2])
[ "Return", "3", "-", "tuple", "from", "number", "-", ">", "(", "obj", "default", "[", "1", "]", "default", "[", "2", "]", ")", "0", "-", "sequence|None", "-", ">", "default", "1", "-", "sequence", "-", ">", "(", "obj", "[", "0", "]", "default", "[", "1", "]", "default", "[", "2", "]", ")", "2", "-", "sequence", "-", ">", "(", "obj", "[", "0", "]", "obj", "[", "1", "]", "default", "[", "2", "]", ")", "(", "3", "or", "more", ")", "-", "sequence", "-", ">", "(", "obj", "[", "0", "]", "obj", "[", "1", "]", "obj", "[", "2", "]", ")" ]
train
https://github.com/pearu/pyvtk/blob/b004ec3c03299a2d75338a4be93dd29f076b70ab/pyvtk/common.py#L163-L195