repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
benoitkugler/abstractDataLibrary
pyDLib/Core/groups.py
sortableListe.index_from_id
def index_from_id(self,Id): """Return the row of given Id if it'exists, otherwise None. Only works with pseudo-acces""" try: return [a.Id for a in self].index(Id) except IndexError: return
python
def index_from_id(self,Id): """Return the row of given Id if it'exists, otherwise None. Only works with pseudo-acces""" try: return [a.Id for a in self].index(Id) except IndexError: return
[ "def", "index_from_id", "(", "self", ",", "Id", ")", ":", "try", ":", "return", "[", "a", ".", "Id", "for", "a", "in", "self", "]", ".", "index", "(", "Id", ")", "except", "IndexError", ":", "return" ]
Return the row of given Id if it'exists, otherwise None. Only works with pseudo-acces
[ "Return", "the", "row", "of", "given", "Id", "if", "it", "exists", "otherwise", "None", ".", "Only", "works", "with", "pseudo", "-", "acces" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/groups.py#L38-L43
benoitkugler/abstractDataLibrary
pyDLib/Core/groups.py
Collection.append
def append(self, acces, **kwargs): """Append acces to list. Quite slow since it checks uniqueness. kwargs may set `info` for this acces. """ if acces.Id in set(ac.Id for ac in self): raise ValueError("Acces id already in list !") list.append(self, acces) if kwargs: self.infos[acces.Id] = kwargs
python
def append(self, acces, **kwargs): """Append acces to list. Quite slow since it checks uniqueness. kwargs may set `info` for this acces. """ if acces.Id in set(ac.Id for ac in self): raise ValueError("Acces id already in list !") list.append(self, acces) if kwargs: self.infos[acces.Id] = kwargs
[ "def", "append", "(", "self", ",", "acces", ",", "*", "*", "kwargs", ")", ":", "if", "acces", ".", "Id", "in", "set", "(", "ac", ".", "Id", "for", "ac", "in", "self", ")", ":", "raise", "ValueError", "(", "\"Acces id already in list !\"", ")", "list", ".", "append", "(", "self", ",", "acces", ")", "if", "kwargs", ":", "self", ".", "infos", "[", "acces", ".", "Id", "]", "=", "kwargs" ]
Append acces to list. Quite slow since it checks uniqueness. kwargs may set `info` for this acces.
[ "Append", "acces", "to", "list", ".", "Quite", "slow", "since", "it", "checks", "uniqueness", ".", "kwargs", "may", "set", "info", "for", "this", "acces", "." ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/groups.py#L65-L73
benoitkugler/abstractDataLibrary
pyDLib/Core/groups.py
Collection.remove_id
def remove_id(self,key): """Suppress acces with id = key""" self.infos.pop(key, "") new_l = [a for a in self if not (a.Id == key)] list.__init__(self, new_l)
python
def remove_id(self,key): """Suppress acces with id = key""" self.infos.pop(key, "") new_l = [a for a in self if not (a.Id == key)] list.__init__(self, new_l)
[ "def", "remove_id", "(", "self", ",", "key", ")", ":", "self", ".", "infos", ".", "pop", "(", "key", ",", "\"\"", ")", "new_l", "=", "[", "a", "for", "a", "in", "self", "if", "not", "(", "a", ".", "Id", "==", "key", ")", "]", "list", ".", "__init__", "(", "self", ",", "new_l", ")" ]
Suppress acces with id = key
[ "Suppress", "acces", "with", "id", "=", "key" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/groups.py#L75-L79
benoitkugler/abstractDataLibrary
pyDLib/Core/groups.py
Collection.get_info
def get_info(self, key=None, Id=None) -> dict: """Returns information associated with Id or list index""" if key is not None: Id = self[key].Id return self.infos.get(Id,{})
python
def get_info(self, key=None, Id=None) -> dict: """Returns information associated with Id or list index""" if key is not None: Id = self[key].Id return self.infos.get(Id,{})
[ "def", "get_info", "(", "self", ",", "key", "=", "None", ",", "Id", "=", "None", ")", "->", "dict", ":", "if", "key", "is", "not", "None", ":", "Id", "=", "self", "[", "key", "]", ".", "Id", "return", "self", ".", "infos", ".", "get", "(", "Id", ",", "{", "}", ")" ]
Returns information associated with Id or list index
[ "Returns", "information", "associated", "with", "Id", "or", "list", "index" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/groups.py#L91-L95
benoitkugler/abstractDataLibrary
pyDLib/Core/groups.py
Collection.recherche
def recherche(self, pattern, entete): """Performs a search field by field, using functions defined in formats. Matchs are marked with info[`font`] :param pattern: String to look for :param entete: Fields to look into :return: Nothing. The collection is changed in place """ new_liste = [] sub_patterns = pattern.split(" ") for p in self: d_font = {att: False for att in entete} row_valid = True for sub_pattern in sub_patterns: found = False for att in entete: fonction_recherche = formats.ASSOCIATION[att][1] attr_found = bool(fonction_recherche(p[att], sub_pattern)) if attr_found: found = True d_font[att] = True if not found: row_valid = False break if row_valid: new_liste.append(p) info = dict(self.get_info(Id=p.Id),font=d_font) self.infos[p.Id] = info list.__init__(self, new_liste)
python
def recherche(self, pattern, entete): """Performs a search field by field, using functions defined in formats. Matchs are marked with info[`font`] :param pattern: String to look for :param entete: Fields to look into :return: Nothing. The collection is changed in place """ new_liste = [] sub_patterns = pattern.split(" ") for p in self: d_font = {att: False for att in entete} row_valid = True for sub_pattern in sub_patterns: found = False for att in entete: fonction_recherche = formats.ASSOCIATION[att][1] attr_found = bool(fonction_recherche(p[att], sub_pattern)) if attr_found: found = True d_font[att] = True if not found: row_valid = False break if row_valid: new_liste.append(p) info = dict(self.get_info(Id=p.Id),font=d_font) self.infos[p.Id] = info list.__init__(self, new_liste)
[ "def", "recherche", "(", "self", ",", "pattern", ",", "entete", ")", ":", "new_liste", "=", "[", "]", "sub_patterns", "=", "pattern", ".", "split", "(", "\" \"", ")", "for", "p", "in", "self", ":", "d_font", "=", "{", "att", ":", "False", "for", "att", "in", "entete", "}", "row_valid", "=", "True", "for", "sub_pattern", "in", "sub_patterns", ":", "found", "=", "False", "for", "att", "in", "entete", ":", "fonction_recherche", "=", "formats", ".", "ASSOCIATION", "[", "att", "]", "[", "1", "]", "attr_found", "=", "bool", "(", "fonction_recherche", "(", "p", "[", "att", "]", ",", "sub_pattern", ")", ")", "if", "attr_found", ":", "found", "=", "True", "d_font", "[", "att", "]", "=", "True", "if", "not", "found", ":", "row_valid", "=", "False", "break", "if", "row_valid", ":", "new_liste", ".", "append", "(", "p", ")", "info", "=", "dict", "(", "self", ".", "get_info", "(", "Id", "=", "p", ".", "Id", ")", ",", "font", "=", "d_font", ")", "self", ".", "infos", "[", "p", ".", "Id", "]", "=", "info", "list", ".", "__init__", "(", "self", ",", "new_liste", ")" ]
Performs a search field by field, using functions defined in formats. Matchs are marked with info[`font`] :param pattern: String to look for :param entete: Fields to look into :return: Nothing. The collection is changed in place
[ "Performs", "a", "search", "field", "by", "field", "using", "functions", "defined", "in", "formats", ".", "Matchs", "are", "marked", "with", "info", "[", "font", "]" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/groups.py#L102-L132
benoitkugler/abstractDataLibrary
pyDLib/Core/groups.py
Collection.extend
def extend(self, collection): """Merges collections. Ensure uniqueness of ids""" l_ids = set([a.Id for a in self]) for acces in collection: if not acces.Id in l_ids: list.append(self,acces) info = collection.get_info(Id=acces.Id) if info: self.infos[acces.Id] = info
python
def extend(self, collection): """Merges collections. Ensure uniqueness of ids""" l_ids = set([a.Id for a in self]) for acces in collection: if not acces.Id in l_ids: list.append(self,acces) info = collection.get_info(Id=acces.Id) if info: self.infos[acces.Id] = info
[ "def", "extend", "(", "self", ",", "collection", ")", ":", "l_ids", "=", "set", "(", "[", "a", ".", "Id", "for", "a", "in", "self", "]", ")", "for", "acces", "in", "collection", ":", "if", "not", "acces", ".", "Id", "in", "l_ids", ":", "list", ".", "append", "(", "self", ",", "acces", ")", "info", "=", "collection", ".", "get_info", "(", "Id", "=", "acces", ".", "Id", ")", "if", "info", ":", "self", ".", "infos", "[", "acces", ".", "Id", "]", "=", "info" ]
Merges collections. Ensure uniqueness of ids
[ "Merges", "collections", ".", "Ensure", "uniqueness", "of", "ids" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/groups.py#L135-L143
QunarOPS/qg.core
qg/core/timeutils.py
isotime
def isotime(at=None, subsecond=False): """Stringify time in ISO 8601 format.""" if not at: at = utcnow() st = at.strftime(_ISO8601_TIME_FORMAT if not subsecond else _ISO8601_TIME_FORMAT_SUBSECOND) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' st += ('Z' if tz == 'UTC' else tz) return st
python
def isotime(at=None, subsecond=False): """Stringify time in ISO 8601 format.""" if not at: at = utcnow() st = at.strftime(_ISO8601_TIME_FORMAT if not subsecond else _ISO8601_TIME_FORMAT_SUBSECOND) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' st += ('Z' if tz == 'UTC' else tz) return st
[ "def", "isotime", "(", "at", "=", "None", ",", "subsecond", "=", "False", ")", ":", "if", "not", "at", ":", "at", "=", "utcnow", "(", ")", "st", "=", "at", ".", "strftime", "(", "_ISO8601_TIME_FORMAT", "if", "not", "subsecond", "else", "_ISO8601_TIME_FORMAT_SUBSECOND", ")", "tz", "=", "at", ".", "tzinfo", ".", "tzname", "(", "None", ")", "if", "at", ".", "tzinfo", "else", "'UTC'", "st", "+=", "(", "'Z'", "if", "tz", "==", "'UTC'", "else", "tz", ")", "return", "st" ]
Stringify time in ISO 8601 format.
[ "Stringify", "time", "in", "ISO", "8601", "format", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L34-L43
QunarOPS/qg.core
qg/core/timeutils.py
parse_isotime
def parse_isotime(timestr): """Parse time from ISO 8601 format.""" try: return iso8601.parse_date(timestr) except iso8601.ParseError as e: raise ValueError(six.text_type(e)) except TypeError as e: raise ValueError(six.text_type(e))
python
def parse_isotime(timestr): """Parse time from ISO 8601 format.""" try: return iso8601.parse_date(timestr) except iso8601.ParseError as e: raise ValueError(six.text_type(e)) except TypeError as e: raise ValueError(six.text_type(e))
[ "def", "parse_isotime", "(", "timestr", ")", ":", "try", ":", "return", "iso8601", ".", "parse_date", "(", "timestr", ")", "except", "iso8601", ".", "ParseError", "as", "e", ":", "raise", "ValueError", "(", "six", ".", "text_type", "(", "e", ")", ")", "except", "TypeError", "as", "e", ":", "raise", "ValueError", "(", "six", ".", "text_type", "(", "e", ")", ")" ]
Parse time from ISO 8601 format.
[ "Parse", "time", "from", "ISO", "8601", "format", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L46-L53
QunarOPS/qg.core
qg/core/timeutils.py
strtime
def strtime(at=None, fmt=PERFECT_TIME_FORMAT): """Returns formatted utcnow.""" if not at: at = utcnow() return at.strftime(fmt)
python
def strtime(at=None, fmt=PERFECT_TIME_FORMAT): """Returns formatted utcnow.""" if not at: at = utcnow() return at.strftime(fmt)
[ "def", "strtime", "(", "at", "=", "None", ",", "fmt", "=", "PERFECT_TIME_FORMAT", ")", ":", "if", "not", "at", ":", "at", "=", "utcnow", "(", ")", "return", "at", ".", "strftime", "(", "fmt", ")" ]
Returns formatted utcnow.
[ "Returns", "formatted", "utcnow", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L56-L60
QunarOPS/qg.core
qg/core/timeutils.py
normalize_time
def normalize_time(timestamp): """Normalize time in arbitrary timezone to UTC naive object.""" offset = timestamp.utcoffset() if offset is None: return timestamp return timestamp.replace(tzinfo=None) - offset
python
def normalize_time(timestamp): """Normalize time in arbitrary timezone to UTC naive object.""" offset = timestamp.utcoffset() if offset is None: return timestamp return timestamp.replace(tzinfo=None) - offset
[ "def", "normalize_time", "(", "timestamp", ")", ":", "offset", "=", "timestamp", ".", "utcoffset", "(", ")", "if", "offset", "is", "None", ":", "return", "timestamp", "return", "timestamp", ".", "replace", "(", "tzinfo", "=", "None", ")", "-", "offset" ]
Normalize time in arbitrary timezone to UTC naive object.
[ "Normalize", "time", "in", "arbitrary", "timezone", "to", "UTC", "naive", "object", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L68-L73
QunarOPS/qg.core
qg/core/timeutils.py
is_older_than
def is_older_than(before, seconds): """Return True if before is older than seconds.""" if isinstance(before, six.string_types): before = parse_strtime(before).replace(tzinfo=None) else: before = before.replace(tzinfo=None) return utcnow() - before > datetime.timedelta(seconds=seconds)
python
def is_older_than(before, seconds): """Return True if before is older than seconds.""" if isinstance(before, six.string_types): before = parse_strtime(before).replace(tzinfo=None) else: before = before.replace(tzinfo=None) return utcnow() - before > datetime.timedelta(seconds=seconds)
[ "def", "is_older_than", "(", "before", ",", "seconds", ")", ":", "if", "isinstance", "(", "before", ",", "six", ".", "string_types", ")", ":", "before", "=", "parse_strtime", "(", "before", ")", ".", "replace", "(", "tzinfo", "=", "None", ")", "else", ":", "before", "=", "before", ".", "replace", "(", "tzinfo", "=", "None", ")", "return", "utcnow", "(", ")", "-", "before", ">", "datetime", ".", "timedelta", "(", "seconds", "=", "seconds", ")" ]
Return True if before is older than seconds.
[ "Return", "True", "if", "before", "is", "older", "than", "seconds", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L76-L83
QunarOPS/qg.core
qg/core/timeutils.py
is_newer_than
def is_newer_than(after, seconds): """Return True if after is newer than seconds.""" if isinstance(after, six.string_types): after = parse_strtime(after).replace(tzinfo=None) else: after = after.replace(tzinfo=None) return after - utcnow() > datetime.timedelta(seconds=seconds)
python
def is_newer_than(after, seconds): """Return True if after is newer than seconds.""" if isinstance(after, six.string_types): after = parse_strtime(after).replace(tzinfo=None) else: after = after.replace(tzinfo=None) return after - utcnow() > datetime.timedelta(seconds=seconds)
[ "def", "is_newer_than", "(", "after", ",", "seconds", ")", ":", "if", "isinstance", "(", "after", ",", "six", ".", "string_types", ")", ":", "after", "=", "parse_strtime", "(", "after", ")", ".", "replace", "(", "tzinfo", "=", "None", ")", "else", ":", "after", "=", "after", ".", "replace", "(", "tzinfo", "=", "None", ")", "return", "after", "-", "utcnow", "(", ")", ">", "datetime", ".", "timedelta", "(", "seconds", "=", "seconds", ")" ]
Return True if after is newer than seconds.
[ "Return", "True", "if", "after", "is", "newer", "than", "seconds", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L86-L93
QunarOPS/qg.core
qg/core/timeutils.py
utcnow_ts
def utcnow_ts(): """Timestamp version of our utcnow function.""" if utcnow.override_time is None: # NOTE(kgriffs): This is several times faster # than going through calendar.timegm(...) return int(time.time()) return calendar.timegm(utcnow().timetuple())
python
def utcnow_ts(): """Timestamp version of our utcnow function.""" if utcnow.override_time is None: # NOTE(kgriffs): This is several times faster # than going through calendar.timegm(...) return int(time.time()) return calendar.timegm(utcnow().timetuple())
[ "def", "utcnow_ts", "(", ")", ":", "if", "utcnow", ".", "override_time", "is", "None", ":", "# NOTE(kgriffs): This is several times faster", "# than going through calendar.timegm(...)", "return", "int", "(", "time", ".", "time", "(", ")", ")", "return", "calendar", ".", "timegm", "(", "utcnow", "(", ")", ".", "timetuple", "(", ")", ")" ]
Timestamp version of our utcnow function.
[ "Timestamp", "version", "of", "our", "utcnow", "function", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L96-L103
QunarOPS/qg.core
qg/core/timeutils.py
utcnow
def utcnow(): """Overridable version of utils.utcnow.""" if utcnow.override_time: try: return utcnow.override_time.pop(0) except AttributeError: return utcnow.override_time return datetime.datetime.utcnow()
python
def utcnow(): """Overridable version of utils.utcnow.""" if utcnow.override_time: try: return utcnow.override_time.pop(0) except AttributeError: return utcnow.override_time return datetime.datetime.utcnow()
[ "def", "utcnow", "(", ")", ":", "if", "utcnow", ".", "override_time", ":", "try", ":", "return", "utcnow", ".", "override_time", ".", "pop", "(", "0", ")", "except", "AttributeError", ":", "return", "utcnow", ".", "override_time", "return", "datetime", ".", "datetime", ".", "utcnow", "(", ")" ]
Overridable version of utils.utcnow.
[ "Overridable", "version", "of", "utils", ".", "utcnow", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L106-L113
QunarOPS/qg.core
qg/core/timeutils.py
advance_time_delta
def advance_time_delta(timedelta): """Advance overridden time using a datetime.timedelta.""" assert(utcnow.override_time is not None) try: for dt in utcnow.override_time: dt += timedelta except TypeError: utcnow.override_time += timedelta
python
def advance_time_delta(timedelta): """Advance overridden time using a datetime.timedelta.""" assert(utcnow.override_time is not None) try: for dt in utcnow.override_time: dt += timedelta except TypeError: utcnow.override_time += timedelta
[ "def", "advance_time_delta", "(", "timedelta", ")", ":", "assert", "(", "utcnow", ".", "override_time", "is", "not", "None", ")", "try", ":", "for", "dt", "in", "utcnow", ".", "override_time", ":", "dt", "+=", "timedelta", "except", "TypeError", ":", "utcnow", ".", "override_time", "+=", "timedelta" ]
Advance overridden time using a datetime.timedelta.
[ "Advance", "overridden", "time", "using", "a", "datetime", ".", "timedelta", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L135-L142
QunarOPS/qg.core
qg/core/timeutils.py
marshall_now
def marshall_now(now=None): """Make an rpc-safe datetime with microseconds. Note: tzinfo is stripped, but not required for relative times. """ if not now: now = utcnow() return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, minute=now.minute, second=now.second, microsecond=now.microsecond)
python
def marshall_now(now=None): """Make an rpc-safe datetime with microseconds. Note: tzinfo is stripped, but not required for relative times. """ if not now: now = utcnow() return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, minute=now.minute, second=now.second, microsecond=now.microsecond)
[ "def", "marshall_now", "(", "now", "=", "None", ")", ":", "if", "not", "now", ":", "now", "=", "utcnow", "(", ")", "return", "dict", "(", "day", "=", "now", ".", "day", ",", "month", "=", "now", ".", "month", ",", "year", "=", "now", ".", "year", ",", "hour", "=", "now", ".", "hour", ",", "minute", "=", "now", ".", "minute", ",", "second", "=", "now", ".", "second", ",", "microsecond", "=", "now", ".", "microsecond", ")" ]
Make an rpc-safe datetime with microseconds. Note: tzinfo is stripped, but not required for relative times.
[ "Make", "an", "rpc", "-", "safe", "datetime", "with", "microseconds", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L155-L164
QunarOPS/qg.core
qg/core/timeutils.py
unmarshall_time
def unmarshall_time(tyme): """Unmarshall a datetime dict.""" return datetime.datetime(day=tyme['day'], month=tyme['month'], year=tyme['year'], hour=tyme['hour'], minute=tyme['minute'], second=tyme['second'], microsecond=tyme['microsecond'])
python
def unmarshall_time(tyme): """Unmarshall a datetime dict.""" return datetime.datetime(day=tyme['day'], month=tyme['month'], year=tyme['year'], hour=tyme['hour'], minute=tyme['minute'], second=tyme['second'], microsecond=tyme['microsecond'])
[ "def", "unmarshall_time", "(", "tyme", ")", ":", "return", "datetime", ".", "datetime", "(", "day", "=", "tyme", "[", "'day'", "]", ",", "month", "=", "tyme", "[", "'month'", "]", ",", "year", "=", "tyme", "[", "'year'", "]", ",", "hour", "=", "tyme", "[", "'hour'", "]", ",", "minute", "=", "tyme", "[", "'minute'", "]", ",", "second", "=", "tyme", "[", "'second'", "]", ",", "microsecond", "=", "tyme", "[", "'microsecond'", "]", ")" ]
Unmarshall a datetime dict.
[ "Unmarshall", "a", "datetime", "dict", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L167-L175
QunarOPS/qg.core
qg/core/timeutils.py
total_seconds
def total_seconds(delta): """Return the total seconds of datetime.timedelta object. Compute total seconds of datetime.timedelta, datetime.timedelta doesn't have method total_seconds in Python2.6, calculate it manually. """ try: return delta.total_seconds() except AttributeError: return ((delta.days * 24 * 3600) + delta.seconds + float(delta.microseconds) / (10 ** 6))
python
def total_seconds(delta): """Return the total seconds of datetime.timedelta object. Compute total seconds of datetime.timedelta, datetime.timedelta doesn't have method total_seconds in Python2.6, calculate it manually. """ try: return delta.total_seconds() except AttributeError: return ((delta.days * 24 * 3600) + delta.seconds + float(delta.microseconds) / (10 ** 6))
[ "def", "total_seconds", "(", "delta", ")", ":", "try", ":", "return", "delta", ".", "total_seconds", "(", ")", "except", "AttributeError", ":", "return", "(", "(", "delta", ".", "days", "*", "24", "*", "3600", ")", "+", "delta", ".", "seconds", "+", "float", "(", "delta", ".", "microseconds", ")", "/", "(", "10", "**", "6", ")", ")" ]
Return the total seconds of datetime.timedelta object. Compute total seconds of datetime.timedelta, datetime.timedelta doesn't have method total_seconds in Python2.6, calculate it manually.
[ "Return", "the", "total", "seconds", "of", "datetime", ".", "timedelta", "object", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L188-L198
QunarOPS/qg.core
qg/core/timeutils.py
is_soon
def is_soon(dt, window): """Determines if time is going to happen in the next window seconds. :params dt: the time :params window: minimum seconds to remain to consider the time not soon :return: True if expiration is within the given duration """ soon = (utcnow() + datetime.timedelta(seconds=window)) return normalize_time(dt) <= soon
python
def is_soon(dt, window): """Determines if time is going to happen in the next window seconds. :params dt: the time :params window: minimum seconds to remain to consider the time not soon :return: True if expiration is within the given duration """ soon = (utcnow() + datetime.timedelta(seconds=window)) return normalize_time(dt) <= soon
[ "def", "is_soon", "(", "dt", ",", "window", ")", ":", "soon", "=", "(", "utcnow", "(", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "window", ")", ")", "return", "normalize_time", "(", "dt", ")", "<=", "soon" ]
Determines if time is going to happen in the next window seconds. :params dt: the time :params window: minimum seconds to remain to consider the time not soon :return: True if expiration is within the given duration
[ "Determines", "if", "time", "is", "going", "to", "happen", "in", "the", "next", "window", "seconds", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/timeutils.py#L201-L210
tsnaomi/finnsyll
ez_setup.py
download_file_powershell
def download_file_powershell(url, target): ''' Download the file at url to target using Powershell (which will validate trust). Raise an exception if the command cannot complete. ''' target = os.path.abspath(target) cmd = [ 'powershell', '-Command', '(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)' % vars(), ] subprocess.check_call(cmd)
python
def download_file_powershell(url, target): ''' Download the file at url to target using Powershell (which will validate trust). Raise an exception if the command cannot complete. ''' target = os.path.abspath(target) cmd = [ 'powershell', '-Command', '(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)' % vars(), ] subprocess.check_call(cmd)
[ "def", "download_file_powershell", "(", "url", ",", "target", ")", ":", "target", "=", "os", ".", "path", ".", "abspath", "(", "target", ")", "cmd", "=", "[", "'powershell'", ",", "'-Command'", ",", "'(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)'", "%", "vars", "(", ")", ",", "]", "subprocess", ".", "check_call", "(", "cmd", ")" ]
Download the file at url to target using Powershell (which will validate trust). Raise an exception if the command cannot complete.
[ "Download", "the", "file", "at", "url", "to", "target", "using", "Powershell", "(", "which", "will", "validate", "trust", ")", ".", "Raise", "an", "exception", "if", "the", "command", "cannot", "complete", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/ez_setup.py#L162-L173
tsnaomi/finnsyll
ez_setup.py
download_file_insecure
def download_file_insecure(url, target): ''' Use Python to download the file, even though it cannot authenticate the connection. ''' try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen src = dst = None try: src = urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = src.read() dst = open(target, 'wb') dst.write(data) finally: if src: src.close() if dst: dst.close()
python
def download_file_insecure(url, target): ''' Use Python to download the file, even though it cannot authenticate the connection. ''' try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen src = dst = None try: src = urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = src.read() dst = open(target, 'wb') dst.write(data) finally: if src: src.close() if dst: dst.close()
[ "def", "download_file_insecure", "(", "url", ",", "target", ")", ":", "try", ":", "from", "urllib", ".", "request", "import", "urlopen", "except", "ImportError", ":", "from", "urllib2", "import", "urlopen", "src", "=", "dst", "=", "None", "try", ":", "src", "=", "urlopen", "(", "url", ")", "# Read/write all in one block, so we don't create a corrupt file", "# if the download is interrupted.", "data", "=", "src", ".", "read", "(", ")", "dst", "=", "open", "(", "target", ",", "'wb'", ")", "dst", ".", "write", "(", "data", ")", "finally", ":", "if", "src", ":", "src", ".", "close", "(", ")", "if", "dst", ":", "dst", ".", "close", "(", ")" ]
Use Python to download the file, even though it cannot authenticate the connection.
[ "Use", "Python", "to", "download", "the", "file", "even", "though", "it", "cannot", "authenticate", "the", "connection", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/ez_setup.py#L233-L254
tsnaomi/finnsyll
ez_setup.py
_build_install_args
def _build_install_args(options): ''' Build the arguments to 'python setup.py install' on the setuptools package ''' install_args = [] if options.user_install: if sys.version_info < (2, 6): log.warn('--user requires Python 2.6 or later') raise SystemExit(1) install_args.append('--user') return install_args
python
def _build_install_args(options): ''' Build the arguments to 'python setup.py install' on the setuptools package ''' install_args = [] if options.user_install: if sys.version_info < (2, 6): log.warn('--user requires Python 2.6 or later') raise SystemExit(1) install_args.append('--user') return install_args
[ "def", "_build_install_args", "(", "options", ")", ":", "install_args", "=", "[", "]", "if", "options", ".", "user_install", ":", "if", "sys", ".", "version_info", "<", "(", "2", ",", "6", ")", ":", "log", ".", "warn", "(", "'--user requires Python 2.6 or later'", ")", "raise", "SystemExit", "(", "1", ")", "install_args", ".", "append", "(", "'--user'", ")", "return", "install_args" ]
Build the arguments to 'python setup.py install' on the setuptools package
[ "Build", "the", "arguments", "to", "python", "setup", ".", "py", "install", "on", "the", "setuptools", "package" ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/ez_setup.py#L345-L355
MarcMeszaros/envitro
envitro/decorators.py
write
def write(name, value): """Temporarily change or set the environment variable during the execution of a function. Args: name: The name of the environment variable value: A value to set for the environment variable Returns: The function return value. """ def wrapped(func): @functools.wraps(func) def _decorator(*args, **kwargs): existing_env = core.read(name, allow_none=True) core.write(name, value) func_val = func(*args, **kwargs) core.write(name, existing_env) return func_val return _decorator return wrapped
python
def write(name, value): """Temporarily change or set the environment variable during the execution of a function. Args: name: The name of the environment variable value: A value to set for the environment variable Returns: The function return value. """ def wrapped(func): @functools.wraps(func) def _decorator(*args, **kwargs): existing_env = core.read(name, allow_none=True) core.write(name, value) func_val = func(*args, **kwargs) core.write(name, existing_env) return func_val return _decorator return wrapped
[ "def", "write", "(", "name", ",", "value", ")", ":", "def", "wrapped", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "_decorator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "existing_env", "=", "core", ".", "read", "(", "name", ",", "allow_none", "=", "True", ")", "core", ".", "write", "(", "name", ",", "value", ")", "func_val", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "core", ".", "write", "(", "name", ",", "existing_env", ")", "return", "func_val", "return", "_decorator", "return", "wrapped" ]
Temporarily change or set the environment variable during the execution of a function. Args: name: The name of the environment variable value: A value to set for the environment variable Returns: The function return value.
[ "Temporarily", "change", "or", "set", "the", "environment", "variable", "during", "the", "execution", "of", "a", "function", "." ]
train
https://github.com/MarcMeszaros/envitro/blob/19e925cd152c08d4db8126542afed35188cafff4/envitro/decorators.py#L17-L36
MarcMeszaros/envitro
envitro/decorators.py
isset
def isset(name): """Only execute the function if the variable is set. Args: name: The name of the environment variable Returns: The function return value or `None` if the function was skipped. """ def wrapped(func): @functools.wraps(func) def _decorator(*args, **kwargs): if core.isset(name): return func(*args, **kwargs) return _decorator return wrapped
python
def isset(name): """Only execute the function if the variable is set. Args: name: The name of the environment variable Returns: The function return value or `None` if the function was skipped. """ def wrapped(func): @functools.wraps(func) def _decorator(*args, **kwargs): if core.isset(name): return func(*args, **kwargs) return _decorator return wrapped
[ "def", "isset", "(", "name", ")", ":", "def", "wrapped", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "_decorator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "core", ".", "isset", "(", "name", ")", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_decorator", "return", "wrapped" ]
Only execute the function if the variable is set. Args: name: The name of the environment variable Returns: The function return value or `None` if the function was skipped.
[ "Only", "execute", "the", "function", "if", "the", "variable", "is", "set", "." ]
train
https://github.com/MarcMeszaros/envitro/blob/19e925cd152c08d4db8126542afed35188cafff4/envitro/decorators.py#L44-L59
MarcMeszaros/envitro
envitro/decorators.py
bool
def bool(name, execute_bool=True, default=None): """Only execute the function if the boolean variable is set. Args: name: The name of the environment variable execute_bool: The boolean value to execute the function on default: The default value if the environment variable is not set (respects `execute_bool`) Returns: The function return value or `None` if the function was skipped. """ def wrapped(func): @functools.wraps(func) def _decorator(*args, **kwargs): if core.isset(name) and core.bool(name) == execute_bool: return func(*args, **kwargs) elif default is not None and default == execute_bool: return func(*args, **kwargs) return _decorator return wrapped
python
def bool(name, execute_bool=True, default=None): """Only execute the function if the boolean variable is set. Args: name: The name of the environment variable execute_bool: The boolean value to execute the function on default: The default value if the environment variable is not set (respects `execute_bool`) Returns: The function return value or `None` if the function was skipped. """ def wrapped(func): @functools.wraps(func) def _decorator(*args, **kwargs): if core.isset(name) and core.bool(name) == execute_bool: return func(*args, **kwargs) elif default is not None and default == execute_bool: return func(*args, **kwargs) return _decorator return wrapped
[ "def", "bool", "(", "name", ",", "execute_bool", "=", "True", ",", "default", "=", "None", ")", ":", "def", "wrapped", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "_decorator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "core", ".", "isset", "(", "name", ")", "and", "core", ".", "bool", "(", "name", ")", "==", "execute_bool", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "default", "is", "not", "None", "and", "default", "==", "execute_bool", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_decorator", "return", "wrapped" ]
Only execute the function if the boolean variable is set. Args: name: The name of the environment variable execute_bool: The boolean value to execute the function on default: The default value if the environment variable is not set (respects `execute_bool`) Returns: The function return value or `None` if the function was skipped.
[ "Only", "execute", "the", "function", "if", "the", "boolean", "variable", "is", "set", "." ]
train
https://github.com/MarcMeszaros/envitro/blob/19e925cd152c08d4db8126542afed35188cafff4/envitro/decorators.py#L62-L81
kappius/pyheaderfile
pyheaderfile/drive.py
GSheet.read_cell
def read_cell(self, x, y): """ Reads the cell at position x+1 and y+1; return value :param x: line index :param y: coll index :return: {header: value} """ if isinstance(self.header[y], tuple): header = self.header[y][0] else: header = self.header[y] x += 1 y += 1 if self.strip: self._sheet.cell(x, y).value = self._sheet.cell(x, y).value.strip() else: return {header: self._sheet.cell(x, y).value}
python
def read_cell(self, x, y): """ Reads the cell at position x+1 and y+1; return value :param x: line index :param y: coll index :return: {header: value} """ if isinstance(self.header[y], tuple): header = self.header[y][0] else: header = self.header[y] x += 1 y += 1 if self.strip: self._sheet.cell(x, y).value = self._sheet.cell(x, y).value.strip() else: return {header: self._sheet.cell(x, y).value}
[ "def", "read_cell", "(", "self", ",", "x", ",", "y", ")", ":", "if", "isinstance", "(", "self", ".", "header", "[", "y", "]", ",", "tuple", ")", ":", "header", "=", "self", ".", "header", "[", "y", "]", "[", "0", "]", "else", ":", "header", "=", "self", ".", "header", "[", "y", "]", "x", "+=", "1", "y", "+=", "1", "if", "self", ".", "strip", ":", "self", ".", "_sheet", ".", "cell", "(", "x", ",", "y", ")", ".", "value", "=", "self", ".", "_sheet", ".", "cell", "(", "x", ",", "y", ")", ".", "value", ".", "strip", "(", ")", "else", ":", "return", "{", "header", ":", "self", ".", "_sheet", ".", "cell", "(", "x", ",", "y", ")", ".", "value", "}" ]
Reads the cell at position x+1 and y+1; return value :param x: line index :param y: coll index :return: {header: value}
[ "Reads", "the", "cell", "at", "position", "x", "+", "1", "and", "y", "+", "1", ";", "return", "value", ":", "param", "x", ":", "line", "index", ":", "param", "y", ":", "coll", "index", ":", "return", ":", "{", "header", ":", "value", "}" ]
train
https://github.com/kappius/pyheaderfile/blob/8d587dadae538adcec527fd8e74ad89ed5e2006a/pyheaderfile/drive.py#L33-L49
kappius/pyheaderfile
pyheaderfile/drive.py
GSheet.write_cell
def write_cell(self, x, y, value): """ Writing value in the cell of x+1 and y+1 position :param x: line index :param y: coll index :param value: value to be written :return: """ x += 1 y += 1 self._sheet.update_cell(x, y, value)
python
def write_cell(self, x, y, value): """ Writing value in the cell of x+1 and y+1 position :param x: line index :param y: coll index :param value: value to be written :return: """ x += 1 y += 1 self._sheet.update_cell(x, y, value)
[ "def", "write_cell", "(", "self", ",", "x", ",", "y", ",", "value", ")", ":", "x", "+=", "1", "y", "+=", "1", "self", ".", "_sheet", ".", "update_cell", "(", "x", ",", "y", ",", "value", ")" ]
Writing value in the cell of x+1 and y+1 position :param x: line index :param y: coll index :param value: value to be written :return:
[ "Writing", "value", "in", "the", "cell", "of", "x", "+", "1", "and", "y", "+", "1", "position", ":", "param", "x", ":", "line", "index", ":", "param", "y", ":", "coll", "index", ":", "param", "value", ":", "value", "to", "be", "written", ":", "return", ":" ]
train
https://github.com/kappius/pyheaderfile/blob/8d587dadae538adcec527fd8e74ad89ed5e2006a/pyheaderfile/drive.py#L51-L61
kappius/pyheaderfile
pyheaderfile/drive.py
GSheet._open
def _open(self): """ Open the file; get sheets :return: """ if not hasattr(self, '_file'): self._file = self.gc.open(self.name) self.sheet_names = self._file.worksheets()
python
def _open(self): """ Open the file; get sheets :return: """ if not hasattr(self, '_file'): self._file = self.gc.open(self.name) self.sheet_names = self._file.worksheets()
[ "def", "_open", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_file'", ")", ":", "self", ".", "_file", "=", "self", ".", "gc", ".", "open", "(", "self", ".", "name", ")", "self", ".", "sheet_names", "=", "self", ".", "_file", ".", "worksheets", "(", ")" ]
Open the file; get sheets :return:
[ "Open", "the", "file", ";", "get", "sheets", ":", "return", ":" ]
train
https://github.com/kappius/pyheaderfile/blob/8d587dadae538adcec527fd8e74ad89ed5e2006a/pyheaderfile/drive.py#L73-L80
kappius/pyheaderfile
pyheaderfile/drive.py
GSheet._open_sheet
def _open_sheet(self): """ Read the sheet, get value the header, get number columns and rows :return: """ if self.sheet_name and not self.header: self._sheet = self._file.worksheet(self.sheet_name.title) self.ncols = self._sheet.col_count self.nrows = self._sheet.row_count for i in range(1, self.ncols+1): self.header = self.header + [self._sheet.cell(1, i).value]
python
def _open_sheet(self): """ Read the sheet, get value the header, get number columns and rows :return: """ if self.sheet_name and not self.header: self._sheet = self._file.worksheet(self.sheet_name.title) self.ncols = self._sheet.col_count self.nrows = self._sheet.row_count for i in range(1, self.ncols+1): self.header = self.header + [self._sheet.cell(1, i).value]
[ "def", "_open_sheet", "(", "self", ")", ":", "if", "self", ".", "sheet_name", "and", "not", "self", ".", "header", ":", "self", ".", "_sheet", "=", "self", ".", "_file", ".", "worksheet", "(", "self", ".", "sheet_name", ".", "title", ")", "self", ".", "ncols", "=", "self", ".", "_sheet", ".", "col_count", "self", ".", "nrows", "=", "self", ".", "_sheet", ".", "row_count", "for", "i", "in", "range", "(", "1", ",", "self", ".", "ncols", "+", "1", ")", ":", "self", ".", "header", "=", "self", ".", "header", "+", "[", "self", ".", "_sheet", ".", "cell", "(", "1", ",", "i", ")", ".", "value", "]" ]
Read the sheet, get value the header, get number columns and rows :return:
[ "Read", "the", "sheet", "get", "value", "the", "header", "get", "number", "columns", "and", "rows", ":", "return", ":" ]
train
https://github.com/kappius/pyheaderfile/blob/8d587dadae538adcec527fd8e74ad89ed5e2006a/pyheaderfile/drive.py#L82-L92
kappius/pyheaderfile
pyheaderfile/drive.py
GSheet._import
def _import(self): """ Makes imports :return: """ import os.path import gspread self.path = os.path self.gspread = gspread self._login()
python
def _import(self): """ Makes imports :return: """ import os.path import gspread self.path = os.path self.gspread = gspread self._login()
[ "def", "_import", "(", "self", ")", ":", "import", "os", ".", "path", "import", "gspread", "self", ".", "path", "=", "os", ".", "path", "self", ".", "gspread", "=", "gspread", "self", ".", "_login", "(", ")" ]
Makes imports :return:
[ "Makes", "imports", ":", "return", ":" ]
train
https://github.com/kappius/pyheaderfile/blob/8d587dadae538adcec527fd8e74ad89ed5e2006a/pyheaderfile/drive.py#L101-L110
kappius/pyheaderfile
pyheaderfile/drive.py
GSheet._login
def _login(self): """ Login with your Google account :return: """ # TODO(dmvieira) login changed to oauth2 self.gc = self.gspread.login(self.email, self.password)
python
def _login(self): """ Login with your Google account :return: """ # TODO(dmvieira) login changed to oauth2 self.gc = self.gspread.login(self.email, self.password)
[ "def", "_login", "(", "self", ")", ":", "# TODO(dmvieira) login changed to oauth2", "self", ".", "gc", "=", "self", ".", "gspread", ".", "login", "(", "self", ".", "email", ",", "self", ".", "password", ")" ]
Login with your Google account :return:
[ "Login", "with", "your", "Google", "account", ":", "return", ":" ]
train
https://github.com/kappius/pyheaderfile/blob/8d587dadae538adcec527fd8e74ad89ed5e2006a/pyheaderfile/drive.py#L112-L118
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
abstractModel.flags
def flags(self, index: QModelIndex): """All fields are selectable""" if self.IS_EDITABLE and self.header[index.column()] in self.EDITABLE_FIELDS: return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable else: return super().flags(index) | Qt.ItemIsSelectable
python
def flags(self, index: QModelIndex): """All fields are selectable""" if self.IS_EDITABLE and self.header[index.column()] in self.EDITABLE_FIELDS: return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable else: return super().flags(index) | Qt.ItemIsSelectable
[ "def", "flags", "(", "self", ",", "index", ":", "QModelIndex", ")", ":", "if", "self", ".", "IS_EDITABLE", "and", "self", ".", "header", "[", "index", ".", "column", "(", ")", "]", "in", "self", ".", "EDITABLE_FIELDS", ":", "return", "Qt", ".", "ItemIsEnabled", "|", "Qt", ".", "ItemIsSelectable", "|", "Qt", ".", "ItemIsEditable", "else", ":", "return", "super", "(", ")", ".", "flags", "(", "index", ")", "|", "Qt", ".", "ItemIsSelectable" ]
All fields are selectable
[ "All", "fields", "are", "selectable" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L118-L123
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
abstractModel.sort
def sort(self, section: int, order=None): """Order is defined by the current state of sorting""" attr = self.header[section] old_i, old_sort = self.sort_state self.beginResetModel() if section == old_i: self.collection.sort(attr, not old_sort) self.sort_state = (section, not old_sort) else: self.collection.sort(attr, True) self.sort_state = (section, True) self.endResetModel()
python
def sort(self, section: int, order=None): """Order is defined by the current state of sorting""" attr = self.header[section] old_i, old_sort = self.sort_state self.beginResetModel() if section == old_i: self.collection.sort(attr, not old_sort) self.sort_state = (section, not old_sort) else: self.collection.sort(attr, True) self.sort_state = (section, True) self.endResetModel()
[ "def", "sort", "(", "self", ",", "section", ":", "int", ",", "order", "=", "None", ")", ":", "attr", "=", "self", ".", "header", "[", "section", "]", "old_i", ",", "old_sort", "=", "self", ".", "sort_state", "self", ".", "beginResetModel", "(", ")", "if", "section", "==", "old_i", ":", "self", ".", "collection", ".", "sort", "(", "attr", ",", "not", "old_sort", ")", "self", ".", "sort_state", "=", "(", "section", ",", "not", "old_sort", ")", "else", ":", "self", ".", "collection", ".", "sort", "(", "attr", ",", "True", ")", "self", ".", "sort_state", "=", "(", "section", ",", "True", ")", "self", ".", "endResetModel", "(", ")" ]
Order is defined by the current state of sorting
[ "Order", "is", "defined", "by", "the", "current", "state", "of", "sorting" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L125-L136
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
abstractModel.remove_line
def remove_line(self, section): """Base implementation just pops the item from collection. Re-implements to add global behaviour """ self.beginResetModel() self.collection.pop(section) self.endResetModel()
python
def remove_line(self, section): """Base implementation just pops the item from collection. Re-implements to add global behaviour """ self.beginResetModel() self.collection.pop(section) self.endResetModel()
[ "def", "remove_line", "(", "self", ",", "section", ")", ":", "self", ".", "beginResetModel", "(", ")", "self", ".", "collection", ".", "pop", "(", "section", ")", "self", ".", "endResetModel", "(", ")" ]
Base implementation just pops the item from collection. Re-implements to add global behaviour
[ "Base", "implementation", "just", "pops", "the", "item", "from", "collection", ".", "Re", "-", "implements", "to", "add", "global", "behaviour" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L139-L145
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
abstractModel._update
def _update(self): """Emit dataChanged signal on all cells""" self.dataChanged.emit(self.createIndex(0, 0), self.createIndex( len(self.collection), len(self.header)))
python
def _update(self): """Emit dataChanged signal on all cells""" self.dataChanged.emit(self.createIndex(0, 0), self.createIndex( len(self.collection), len(self.header)))
[ "def", "_update", "(", "self", ")", ":", "self", ".", "dataChanged", ".", "emit", "(", "self", ".", "createIndex", "(", "0", ",", "0", ")", ",", "self", ".", "createIndex", "(", "len", "(", "self", ".", "collection", ")", ",", "len", "(", "self", ".", "header", ")", ")", ")" ]
Emit dataChanged signal on all cells
[ "Emit", "dataChanged", "signal", "on", "all", "cells" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L147-L150
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
abstractModel.get_item
def get_item(self, index): """ Acces shortcut :param index: Number of row or index of cell :return: Dict-like item """ row = index.row() if hasattr(index, "row") else index try: return self.collection[row] except IndexError: # invalid index for exemple return None
python
def get_item(self, index): """ Acces shortcut :param index: Number of row or index of cell :return: Dict-like item """ row = index.row() if hasattr(index, "row") else index try: return self.collection[row] except IndexError: # invalid index for exemple return None
[ "def", "get_item", "(", "self", ",", "index", ")", ":", "row", "=", "index", ".", "row", "(", ")", "if", "hasattr", "(", "index", ",", "\"row\"", ")", "else", "index", "try", ":", "return", "self", ".", "collection", "[", "row", "]", "except", "IndexError", ":", "# invalid index for exemple", "return", "None" ]
Acces shortcut :param index: Number of row or index of cell :return: Dict-like item
[ "Acces", "shortcut" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L157-L167
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
abstractModel.set_collection
def set_collection(self, collection): """Reset sort state, set collection and emit resetModel signal""" self.beginResetModel() self.collection = collection self.sort_state = (-1, False) self.endResetModel()
python
def set_collection(self, collection): """Reset sort state, set collection and emit resetModel signal""" self.beginResetModel() self.collection = collection self.sort_state = (-1, False) self.endResetModel()
[ "def", "set_collection", "(", "self", ",", "collection", ")", ":", "self", ".", "beginResetModel", "(", ")", "self", ".", "collection", "=", "collection", "self", ".", "sort_state", "=", "(", "-", "1", ",", "False", ")", "self", ".", "endResetModel", "(", ")" ]
Reset sort state, set collection and emit resetModel signal
[ "Reset", "sort", "state", "set", "collection", "and", "emit", "resetModel", "signal" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L169-L174
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
InternalDataModel.set_item
def set_item(self, index, new_item): """ Changes item at index in collection. Emit dataChanged signal. :param index: Number of row or index of cell :param new_item: Dict-like object """ row = index.row() if hasattr(index, "row") else index self.collection[row] = new_item self.dataChanged.emit(self.index( row, 0), self.index(row, self.rowCount() - 1))
python
def set_item(self, index, new_item): """ Changes item at index in collection. Emit dataChanged signal. :param index: Number of row or index of cell :param new_item: Dict-like object """ row = index.row() if hasattr(index, "row") else index self.collection[row] = new_item self.dataChanged.emit(self.index( row, 0), self.index(row, self.rowCount() - 1))
[ "def", "set_item", "(", "self", ",", "index", ",", "new_item", ")", ":", "row", "=", "index", ".", "row", "(", ")", "if", "hasattr", "(", "index", ",", "\"row\"", ")", "else", "index", "self", ".", "collection", "[", "row", "]", "=", "new_item", "self", ".", "dataChanged", ".", "emit", "(", "self", ".", "index", "(", "row", ",", "0", ")", ",", "self", ".", "index", "(", "row", ",", "self", ".", "rowCount", "(", ")", "-", "1", ")", ")" ]
Changes item at index in collection. Emit dataChanged signal. :param index: Number of row or index of cell :param new_item: Dict-like object
[ "Changes", "item", "at", "index", "in", "collection", ".", "Emit", "dataChanged", "signal", "." ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L191-L200
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
ExternalDataModel.set_data
def set_data(self, index, value): """Uses given data setter, and emit modelReset signal""" acces, field = self.get_item(index), self.header[index.column()] self.beginResetModel() self.set_data_hook(acces, field, value) self.endResetModel()
python
def set_data(self, index, value): """Uses given data setter, and emit modelReset signal""" acces, field = self.get_item(index), self.header[index.column()] self.beginResetModel() self.set_data_hook(acces, field, value) self.endResetModel()
[ "def", "set_data", "(", "self", ",", "index", ",", "value", ")", ":", "acces", ",", "field", "=", "self", ".", "get_item", "(", "index", ")", ",", "self", ".", "header", "[", "index", ".", "column", "(", ")", "]", "self", ".", "beginResetModel", "(", ")", "self", ".", "set_data_hook", "(", "acces", ",", "field", ",", "value", ")", "self", ".", "endResetModel", "(", ")" ]
Uses given data setter, and emit modelReset signal
[ "Uses", "given", "data", "setter", "and", "emit", "modelReset", "signal" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L223-L228
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
MultiSelectModel._set_id
def _set_id(self, Id, is_added, index): """Update selected_ids and emit dataChanged""" if is_added: self.selected_ids.add(Id) else: self.selected_ids.remove(Id) self.dataChanged.emit(index, index)
python
def _set_id(self, Id, is_added, index): """Update selected_ids and emit dataChanged""" if is_added: self.selected_ids.add(Id) else: self.selected_ids.remove(Id) self.dataChanged.emit(index, index)
[ "def", "_set_id", "(", "self", ",", "Id", ",", "is_added", ",", "index", ")", ":", "if", "is_added", ":", "self", ".", "selected_ids", ".", "add", "(", "Id", ")", "else", ":", "self", ".", "selected_ids", ".", "remove", "(", "Id", ")", "self", ".", "dataChanged", ".", "emit", "(", "index", ",", "index", ")" ]
Update selected_ids and emit dataChanged
[ "Update", "selected_ids", "and", "emit", "dataChanged" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L259-L265
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
MultiSelectModel.setData
def setData(self, index: QModelIndex, value, role=None): """Update selected_ids on click on index cell.""" if not (index.isValid() and role == Qt.CheckStateRole): return False c_id = self.get_item(index).Id self._set_id(c_id, value == Qt.Checked, index) return True
python
def setData(self, index: QModelIndex, value, role=None): """Update selected_ids on click on index cell.""" if not (index.isValid() and role == Qt.CheckStateRole): return False c_id = self.get_item(index).Id self._set_id(c_id, value == Qt.Checked, index) return True
[ "def", "setData", "(", "self", ",", "index", ":", "QModelIndex", ",", "value", ",", "role", "=", "None", ")", ":", "if", "not", "(", "index", ".", "isValid", "(", ")", "and", "role", "==", "Qt", ".", "CheckStateRole", ")", ":", "return", "False", "c_id", "=", "self", ".", "get_item", "(", "index", ")", ".", "Id", "self", ".", "_set_id", "(", "c_id", ",", "value", "==", "Qt", ".", "Checked", ",", "index", ")", "return", "True" ]
Update selected_ids on click on index cell.
[ "Update", "selected_ids", "on", "click", "on", "index", "cell", "." ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L267-L273
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
MultiSelectModel.set_by_Id
def set_by_Id(self, Id, is_added): """Update selected_ids with given Id""" row = self.collection.index_from_id(Id) if row is None: return self._set_id(Id, is_added, self.index(row, 0))
python
def set_by_Id(self, Id, is_added): """Update selected_ids with given Id""" row = self.collection.index_from_id(Id) if row is None: return self._set_id(Id, is_added, self.index(row, 0))
[ "def", "set_by_Id", "(", "self", ",", "Id", ",", "is_added", ")", ":", "row", "=", "self", ".", "collection", ".", "index_from_id", "(", "Id", ")", "if", "row", "is", "None", ":", "return", "self", ".", "_set_id", "(", "Id", ",", "is_added", ",", "self", ".", "index", "(", "row", ",", "0", ")", ")" ]
Update selected_ids with given Id
[ "Update", "selected_ids", "with", "given", "Id" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L275-L280
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
abstractList._setup_delegate
def _setup_delegate(self): """Add resize behavior on edit""" delegate = self.DELEGATE_CLASS(self) self.setItemDelegate(delegate) delegate.sizeHintChanged.connect( lambda index: self.resizeRowToContents(index.row())) if self.RESIZE_COLUMN: delegate.sizeHintChanged.connect( lambda index: self.resizeColumnToContents(index.column())) delegate.closeEditor.connect( lambda ed: self.resizeRowToContents(delegate.row_done_))
python
def _setup_delegate(self): """Add resize behavior on edit""" delegate = self.DELEGATE_CLASS(self) self.setItemDelegate(delegate) delegate.sizeHintChanged.connect( lambda index: self.resizeRowToContents(index.row())) if self.RESIZE_COLUMN: delegate.sizeHintChanged.connect( lambda index: self.resizeColumnToContents(index.column())) delegate.closeEditor.connect( lambda ed: self.resizeRowToContents(delegate.row_done_))
[ "def", "_setup_delegate", "(", "self", ")", ":", "delegate", "=", "self", ".", "DELEGATE_CLASS", "(", "self", ")", "self", ".", "setItemDelegate", "(", "delegate", ")", "delegate", ".", "sizeHintChanged", ".", "connect", "(", "lambda", "index", ":", "self", ".", "resizeRowToContents", "(", "index", ".", "row", "(", ")", ")", ")", "if", "self", ".", "RESIZE_COLUMN", ":", "delegate", ".", "sizeHintChanged", ".", "connect", "(", "lambda", "index", ":", "self", ".", "resizeColumnToContents", "(", "index", ".", "column", "(", ")", ")", ")", "delegate", ".", "closeEditor", ".", "connect", "(", "lambda", "ed", ":", "self", ".", "resizeRowToContents", "(", "delegate", ".", "row_done_", ")", ")" ]
Add resize behavior on edit
[ "Add", "resize", "behavior", "on", "edit" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L346-L356
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
abstractList._draw_placeholder
def _draw_placeholder(self): """To be used in QTreeView""" if self.model().rowCount() == 0: painter = QPainter(self.viewport()) painter.setFont(_custom_font(is_italic=True)) painter.drawText(self.rect().adjusted(0, 0, -5, -5), Qt.AlignCenter | Qt.TextWordWrap, self.PLACEHOLDER)
python
def _draw_placeholder(self): """To be used in QTreeView""" if self.model().rowCount() == 0: painter = QPainter(self.viewport()) painter.setFont(_custom_font(is_italic=True)) painter.drawText(self.rect().adjusted(0, 0, -5, -5), Qt.AlignCenter | Qt.TextWordWrap, self.PLACEHOLDER)
[ "def", "_draw_placeholder", "(", "self", ")", ":", "if", "self", ".", "model", "(", ")", ".", "rowCount", "(", ")", "==", "0", ":", "painter", "=", "QPainter", "(", "self", ".", "viewport", "(", ")", ")", "painter", ".", "setFont", "(", "_custom_font", "(", "is_italic", "=", "True", ")", ")", "painter", ".", "drawText", "(", "self", ".", "rect", "(", ")", ".", "adjusted", "(", "0", ",", "0", ",", "-", "5", ",", "-", "5", ")", ",", "Qt", ".", "AlignCenter", "|", "Qt", ".", "TextWordWrap", ",", "self", ".", "PLACEHOLDER", ")" ]
To be used in QTreeView
[ "To", "be", "used", "in", "QTreeView" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L361-L367
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
abstractList.get_current_item
def get_current_item(self): """Returns (first) selected item or None""" l = self.selectedIndexes() if len(l) > 0: return self.model().get_item(l[0])
python
def get_current_item(self): """Returns (first) selected item or None""" l = self.selectedIndexes() if len(l) > 0: return self.model().get_item(l[0])
[ "def", "get_current_item", "(", "self", ")", ":", "l", "=", "self", ".", "selectedIndexes", "(", ")", "if", "len", "(", "l", ")", ">", "0", ":", "return", "self", ".", "model", "(", ")", ".", "get_item", "(", "l", "[", "0", "]", ")" ]
Returns (first) selected item or None
[ "Returns", "(", "first", ")", "selected", "item", "or", "None" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L391-L395
benoitkugler/abstractDataLibrary
pyDLib/GUI/list_views.py
MultiSelectList.model_from_list
def model_from_list(l, header): """Return a model with a collection from a list of entry""" col = groups.sortableListe(PseudoAccesCategorie(n) for n in l) return MultiSelectModel(col, header)
python
def model_from_list(l, header): """Return a model with a collection from a list of entry""" col = groups.sortableListe(PseudoAccesCategorie(n) for n in l) return MultiSelectModel(col, header)
[ "def", "model_from_list", "(", "l", ",", "header", ")", ":", "col", "=", "groups", ".", "sortableListe", "(", "PseudoAccesCategorie", "(", "n", ")", "for", "n", "in", "l", ")", "return", "MultiSelectModel", "(", "col", ",", "header", ")" ]
Return a model with a collection from a list of entry
[ "Return", "a", "model", "with", "a", "collection", "from", "a", "list", "of", "entry" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/list_views.py#L409-L412
runfalk/loopialib
loopialib/client.py
_parse_status_code
def _parse_status_code(response): """ Return error string code if the response is an error, otherwise ``"OK"`` """ # This happens when a status response is expected if isinstance(response, string_types): return response # This happens when a list of structs are expected is_single_list = isinstance(response, list) and len(response) == 1 if is_single_list and isinstance(response[0], string_types): return response[0] # This happens when a struct of any kind is returned return "OK"
python
def _parse_status_code(response): """ Return error string code if the response is an error, otherwise ``"OK"`` """ # This happens when a status response is expected if isinstance(response, string_types): return response # This happens when a list of structs are expected is_single_list = isinstance(response, list) and len(response) == 1 if is_single_list and isinstance(response[0], string_types): return response[0] # This happens when a struct of any kind is returned return "OK"
[ "def", "_parse_status_code", "(", "response", ")", ":", "# This happens when a status response is expected", "if", "isinstance", "(", "response", ",", "string_types", ")", ":", "return", "response", "# This happens when a list of structs are expected", "is_single_list", "=", "isinstance", "(", "response", ",", "list", ")", "and", "len", "(", "response", ")", "==", "1", "if", "is_single_list", "and", "isinstance", "(", "response", "[", "0", "]", ",", "string_types", ")", ":", "return", "response", "[", "0", "]", "# This happens when a struct of any kind is returned", "return", "\"OK\"" ]
Return error string code if the response is an error, otherwise ``"OK"``
[ "Return", "error", "string", "code", "if", "the", "response", "is", "an", "error", "otherwise", "OK" ]
train
https://github.com/runfalk/loopialib/blob/58d99c56b0e9eb62f40d76b7b4ad5fdd4b079155/loopialib/client.py#L6-L21
runfalk/loopialib
loopialib/client.py
Loopia.remove_zone_record
def remove_zone_record(self, id, domain, subdomain=None): """ Remove the zone record with the given ID that belongs to the given domain and sub domain. If no sub domain is given the wildcard sub-domain is assumed. """ if subdomain is None: subdomain = "@" _validate_int("id", id) self._call("removeZoneRecord", domain, subdomain, id)
python
def remove_zone_record(self, id, domain, subdomain=None): """ Remove the zone record with the given ID that belongs to the given domain and sub domain. If no sub domain is given the wildcard sub-domain is assumed. """ if subdomain is None: subdomain = "@" _validate_int("id", id) self._call("removeZoneRecord", domain, subdomain, id)
[ "def", "remove_zone_record", "(", "self", ",", "id", ",", "domain", ",", "subdomain", "=", "None", ")", ":", "if", "subdomain", "is", "None", ":", "subdomain", "=", "\"@\"", "_validate_int", "(", "\"id\"", ",", "id", ")", "self", ".", "_call", "(", "\"removeZoneRecord\"", ",", "domain", ",", "subdomain", ",", "id", ")" ]
Remove the zone record with the given ID that belongs to the given domain and sub domain. If no sub domain is given the wildcard sub-domain is assumed.
[ "Remove", "the", "zone", "record", "with", "the", "given", "ID", "that", "belongs", "to", "the", "given", "domain", "and", "sub", "domain", ".", "If", "no", "sub", "domain", "is", "given", "the", "wildcard", "sub", "-", "domain", "is", "assumed", "." ]
train
https://github.com/runfalk/loopialib/blob/58d99c56b0e9eb62f40d76b7b4ad5fdd4b079155/loopialib/client.py#L100-L112
plandes/actioncli
src/python/zensols/actioncli/factory.py
ClassImporter.parse_module_class
def parse_module_class(self): """Parse the module and class name part of the fully qualifed class name. """ cname = self.class_name match = re.match(self.CLASS_REGEX, cname) if not match: raise ValueError(f'not a fully qualified class name: {cname}') return match.groups()
python
def parse_module_class(self): """Parse the module and class name part of the fully qualifed class name. """ cname = self.class_name match = re.match(self.CLASS_REGEX, cname) if not match: raise ValueError(f'not a fully qualified class name: {cname}') return match.groups()
[ "def", "parse_module_class", "(", "self", ")", ":", "cname", "=", "self", ".", "class_name", "match", "=", "re", ".", "match", "(", "self", ".", "CLASS_REGEX", ",", "cname", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "f'not a fully qualified class name: {cname}'", ")", "return", "match", ".", "groups", "(", ")" ]
Parse the module and class name part of the fully qualifed class name.
[ "Parse", "the", "module", "and", "class", "name", "part", "of", "the", "fully", "qualifed", "class", "name", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/factory.py#L33-L41
plandes/actioncli
src/python/zensols/actioncli/factory.py
ClassImporter.get_module_class
def get_module_class(self): """Return the module and class as a tuple of the given class in the initializer. :param reload: if ``True`` then reload the module before returning the class """ pkg, cname = self.parse_module_class() logger.debug(f'pkg: {pkg}, class: {cname}') pkg = pkg.split('.') mod = reduce(lambda m, n: getattr(m, n), pkg[1:], __import__(pkg[0])) logger.debug(f'mod: {mod}') if self.reload: importlib.reload(mod) cls = getattr(mod, cname) logger.debug(f'class: {cls}') return mod, cls
python
def get_module_class(self): """Return the module and class as a tuple of the given class in the initializer. :param reload: if ``True`` then reload the module before returning the class """ pkg, cname = self.parse_module_class() logger.debug(f'pkg: {pkg}, class: {cname}') pkg = pkg.split('.') mod = reduce(lambda m, n: getattr(m, n), pkg[1:], __import__(pkg[0])) logger.debug(f'mod: {mod}') if self.reload: importlib.reload(mod) cls = getattr(mod, cname) logger.debug(f'class: {cls}') return mod, cls
[ "def", "get_module_class", "(", "self", ")", ":", "pkg", ",", "cname", "=", "self", ".", "parse_module_class", "(", ")", "logger", ".", "debug", "(", "f'pkg: {pkg}, class: {cname}'", ")", "pkg", "=", "pkg", ".", "split", "(", "'.'", ")", "mod", "=", "reduce", "(", "lambda", "m", ",", "n", ":", "getattr", "(", "m", ",", "n", ")", ",", "pkg", "[", "1", ":", "]", ",", "__import__", "(", "pkg", "[", "0", "]", ")", ")", "logger", ".", "debug", "(", "f'mod: {mod}'", ")", "if", "self", ".", "reload", ":", "importlib", ".", "reload", "(", "mod", ")", "cls", "=", "getattr", "(", "mod", ",", "cname", ")", "logger", ".", "debug", "(", "f'class: {cls}'", ")", "return", "mod", ",", "cls" ]
Return the module and class as a tuple of the given class in the initializer. :param reload: if ``True`` then reload the module before returning the class
[ "Return", "the", "module", "and", "class", "as", "a", "tuple", "of", "the", "given", "class", "in", "the", "initializer", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/factory.py#L43-L60
plandes/actioncli
src/python/zensols/actioncli/factory.py
ClassImporter.instance
def instance(self, *args, **kwargs): """Create an instance of the specified class in the initializer. :param args: the arguments given to the initializer of the new class :param kwargs: the keyword arguments given to the initializer of the new class """ mod, cls = self.get_module_class() inst = cls(*args, **kwargs) logger.debug(f'inst: {inst}') return inst
python
def instance(self, *args, **kwargs): """Create an instance of the specified class in the initializer. :param args: the arguments given to the initializer of the new class :param kwargs: the keyword arguments given to the initializer of the new class """ mod, cls = self.get_module_class() inst = cls(*args, **kwargs) logger.debug(f'inst: {inst}') return inst
[ "def", "instance", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "mod", ",", "cls", "=", "self", ".", "get_module_class", "(", ")", "inst", "=", "cls", "(", "*", "args", ",", "*", "*", "kwargs", ")", "logger", ".", "debug", "(", "f'inst: {inst}'", ")", "return", "inst" ]
Create an instance of the specified class in the initializer. :param args: the arguments given to the initializer of the new class :param kwargs: the keyword arguments given to the initializer of the new class
[ "Create", "an", "instance", "of", "the", "specified", "class", "in", "the", "initializer", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/factory.py#L62-L73
plandes/actioncli
src/python/zensols/actioncli/factory.py
ClassImporter.set_log_level
def set_log_level(self, level=logging.INFO): """Convenciene method to set the log level of the module given in the initializer of this class. :param level: and instance of ``logging.<level>`` """ mod, cls = self.parse_module_class() logging.getLogger(mod).setLevel(level)
python
def set_log_level(self, level=logging.INFO): """Convenciene method to set the log level of the module given in the initializer of this class. :param level: and instance of ``logging.<level>`` """ mod, cls = self.parse_module_class() logging.getLogger(mod).setLevel(level)
[ "def", "set_log_level", "(", "self", ",", "level", "=", "logging", ".", "INFO", ")", ":", "mod", ",", "cls", "=", "self", ".", "parse_module_class", "(", ")", "logging", ".", "getLogger", "(", "mod", ")", ".", "setLevel", "(", "level", ")" ]
Convenciene method to set the log level of the module given in the initializer of this class. :param level: and instance of ``logging.<level>``
[ "Convenciene", "method", "to", "set", "the", "log", "level", "of", "the", "module", "given", "in", "the", "initializer", "of", "this", "class", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/factory.py#L75-L82
plandes/actioncli
src/python/zensols/actioncli/factory.py
ConfigFactory.register
def register(cls, instance_class, name=None): """Register a class with the factory. :param instance_class: the class to register with the factory (not a string) :param name: the name to use as the key for instance class lookups; defaults to the name of the class """ if name is None: name = instance_class.__name__ cls.INSTANCE_CLASSES[name] = instance_class
python
def register(cls, instance_class, name=None): """Register a class with the factory. :param instance_class: the class to register with the factory (not a string) :param name: the name to use as the key for instance class lookups; defaults to the name of the class """ if name is None: name = instance_class.__name__ cls.INSTANCE_CLASSES[name] = instance_class
[ "def", "register", "(", "cls", ",", "instance_class", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "instance_class", ".", "__name__", "cls", ".", "INSTANCE_CLASSES", "[", "name", "]", "=", "instance_class" ]
Register a class with the factory. :param instance_class: the class to register with the factory (not a string) :param name: the name to use as the key for instance class lookups; defaults to the name of the class
[ "Register", "a", "class", "with", "the", "factory", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/factory.py#L120-L131
plandes/actioncli
src/python/zensols/actioncli/factory.py
ConfigFactory._find_class
def _find_class(self, class_name): "Resolve the class from the name." classes = {} classes.update(globals()) classes.update(self.INSTANCE_CLASSES) logger.debug(f'looking up class: {class_name}') cls = classes[class_name] logger.debug(f'found class: {cls}') return cls
python
def _find_class(self, class_name): "Resolve the class from the name." classes = {} classes.update(globals()) classes.update(self.INSTANCE_CLASSES) logger.debug(f'looking up class: {class_name}') cls = classes[class_name] logger.debug(f'found class: {cls}') return cls
[ "def", "_find_class", "(", "self", ",", "class_name", ")", ":", "classes", "=", "{", "}", "classes", ".", "update", "(", "globals", "(", ")", ")", "classes", ".", "update", "(", "self", ".", "INSTANCE_CLASSES", ")", "logger", ".", "debug", "(", "f'looking up class: {class_name}'", ")", "cls", "=", "classes", "[", "class_name", "]", "logger", ".", "debug", "(", "f'found class: {cls}'", ")", "return", "cls" ]
Resolve the class from the name.
[ "Resolve", "the", "class", "from", "the", "name", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/factory.py#L133-L141
plandes/actioncli
src/python/zensols/actioncli/factory.py
ConfigFactory._class_name_params
def _class_name_params(self, name): "Get the class name and parameters to use for ``__init__``." sec = self.pattern.format(**{'name': name}) logger.debug(f'section: {sec}') params = {} params.update(self.config.populate({}, section=sec)) class_name = params['class_name'] del params['class_name'] return class_name, params
python
def _class_name_params(self, name): "Get the class name and parameters to use for ``__init__``." sec = self.pattern.format(**{'name': name}) logger.debug(f'section: {sec}') params = {} params.update(self.config.populate({}, section=sec)) class_name = params['class_name'] del params['class_name'] return class_name, params
[ "def", "_class_name_params", "(", "self", ",", "name", ")", ":", "sec", "=", "self", ".", "pattern", ".", "format", "(", "*", "*", "{", "'name'", ":", "name", "}", ")", "logger", ".", "debug", "(", "f'section: {sec}'", ")", "params", "=", "{", "}", "params", ".", "update", "(", "self", ".", "config", ".", "populate", "(", "{", "}", ",", "section", "=", "sec", ")", ")", "class_name", "=", "params", "[", "'class_name'", "]", "del", "params", "[", "'class_name'", "]", "return", "class_name", ",", "params" ]
Get the class name and parameters to use for ``__init__``.
[ "Get", "the", "class", "name", "and", "parameters", "to", "use", "for", "__init__", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/factory.py#L143-L151
plandes/actioncli
src/python/zensols/actioncli/factory.py
ConfigFactory._has_init_config
def _has_init_config(self, cls): """Return whether the class has a ``config`` parameter in the ``__init__`` method. """ args = inspect.signature(cls.__init__) return self.config_param_name in args.parameters
python
def _has_init_config(self, cls): """Return whether the class has a ``config`` parameter in the ``__init__`` method. """ args = inspect.signature(cls.__init__) return self.config_param_name in args.parameters
[ "def", "_has_init_config", "(", "self", ",", "cls", ")", ":", "args", "=", "inspect", ".", "signature", "(", "cls", ".", "__init__", ")", "return", "self", ".", "config_param_name", "in", "args", ".", "parameters" ]
Return whether the class has a ``config`` parameter in the ``__init__`` method.
[ "Return", "whether", "the", "class", "has", "a", "config", "parameter", "in", "the", "__init__", "method", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/factory.py#L153-L159
plandes/actioncli
src/python/zensols/actioncli/factory.py
ConfigFactory._has_init_name
def _has_init_name(self, cls): """Return whether the class has a ``name`` parameter in the ``__init__`` method. """ args = inspect.signature(cls.__init__) return self.name_param_name in args.parameters
python
def _has_init_name(self, cls): """Return whether the class has a ``name`` parameter in the ``__init__`` method. """ args = inspect.signature(cls.__init__) return self.name_param_name in args.parameters
[ "def", "_has_init_name", "(", "self", ",", "cls", ")", ":", "args", "=", "inspect", ".", "signature", "(", "cls", ".", "__init__", ")", "return", "self", ".", "name_param_name", "in", "args", ".", "parameters" ]
Return whether the class has a ``name`` parameter in the ``__init__`` method.
[ "Return", "whether", "the", "class", "has", "a", "name", "parameter", "in", "the", "__init__", "method", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/factory.py#L161-L167
plandes/actioncli
src/python/zensols/actioncli/factory.py
ConfigFactory._instance
def _instance(self, cls, *args, **kwargs): """Return the instance. :param cls: the class to create the instance from :param args: given to the ``__init__`` method :param kwargs: given to the ``__init__`` method """ logger.debug(f'args: {args}, kwargs: {kwargs}') return cls(*args, **kwargs)
python
def _instance(self, cls, *args, **kwargs): """Return the instance. :param cls: the class to create the instance from :param args: given to the ``__init__`` method :param kwargs: given to the ``__init__`` method """ logger.debug(f'args: {args}, kwargs: {kwargs}') return cls(*args, **kwargs)
[ "def", "_instance", "(", "self", ",", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "f'args: {args}, kwargs: {kwargs}'", ")", "return", "cls", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return the instance. :param cls: the class to create the instance from :param args: given to the ``__init__`` method :param kwargs: given to the ``__init__`` method
[ "Return", "the", "instance", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/factory.py#L169-L177
plandes/actioncli
src/python/zensols/actioncli/factory.py
ConfigFactory.instance
def instance(self, name=None, *args, **kwargs): """Create a new instance using key ``name``. :param name: the name of the class (by default) or the key name of the class used to find the class :param args: given to the ``__init__`` method :param kwargs: given to the ``__init__`` method """ logger.info(f'new instance of {name}') t0 = time() name = self.default_name if name is None else name logger.debug(f'creating instance of {name}') class_name, params = self._class_name_params(name) cls = self._find_class(class_name) params.update(kwargs) if self._has_init_config(cls): logger.debug(f'found config parameter') params['config'] = self.config if self._has_init_name(cls): logger.debug(f'found name parameter') params['name'] = name if logger.level >= logging.DEBUG: for k, v in params.items(): logger.debug(f'populating {k} -> {v} ({type(v)})') inst = self._instance(cls, *args, **params) logger.info(f'created {name} instance of {cls.__name__} ' + f'in {(time() - t0):.2f}s') return inst
python
def instance(self, name=None, *args, **kwargs): """Create a new instance using key ``name``. :param name: the name of the class (by default) or the key name of the class used to find the class :param args: given to the ``__init__`` method :param kwargs: given to the ``__init__`` method """ logger.info(f'new instance of {name}') t0 = time() name = self.default_name if name is None else name logger.debug(f'creating instance of {name}') class_name, params = self._class_name_params(name) cls = self._find_class(class_name) params.update(kwargs) if self._has_init_config(cls): logger.debug(f'found config parameter') params['config'] = self.config if self._has_init_name(cls): logger.debug(f'found name parameter') params['name'] = name if logger.level >= logging.DEBUG: for k, v in params.items(): logger.debug(f'populating {k} -> {v} ({type(v)})') inst = self._instance(cls, *args, **params) logger.info(f'created {name} instance of {cls.__name__} ' + f'in {(time() - t0):.2f}s') return inst
[ "def", "instance", "(", "self", ",", "name", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "f'new instance of {name}'", ")", "t0", "=", "time", "(", ")", "name", "=", "self", ".", "default_name", "if", "name", "is", "None", "else", "name", "logger", ".", "debug", "(", "f'creating instance of {name}'", ")", "class_name", ",", "params", "=", "self", ".", "_class_name_params", "(", "name", ")", "cls", "=", "self", ".", "_find_class", "(", "class_name", ")", "params", ".", "update", "(", "kwargs", ")", "if", "self", ".", "_has_init_config", "(", "cls", ")", ":", "logger", ".", "debug", "(", "f'found config parameter'", ")", "params", "[", "'config'", "]", "=", "self", ".", "config", "if", "self", ".", "_has_init_name", "(", "cls", ")", ":", "logger", ".", "debug", "(", "f'found name parameter'", ")", "params", "[", "'name'", "]", "=", "name", "if", "logger", ".", "level", ">=", "logging", ".", "DEBUG", ":", "for", "k", ",", "v", "in", "params", ".", "items", "(", ")", ":", "logger", ".", "debug", "(", "f'populating {k} -> {v} ({type(v)})'", ")", "inst", "=", "self", ".", "_instance", "(", "cls", ",", "*", "args", ",", "*", "*", "params", ")", "logger", ".", "info", "(", "f'created {name} instance of {cls.__name__} '", "+", "f'in {(time() - t0):.2f}s'", ")", "return", "inst" ]
Create a new instance using key ``name``. :param name: the name of the class (by default) or the key name of the class used to find the class :param args: given to the ``__init__`` method :param kwargs: given to the ``__init__`` method
[ "Create", "a", "new", "instance", "using", "key", "name", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/factory.py#L179-L207
plandes/actioncli
src/python/zensols/actioncli/factory.py
ConfigManager.load
def load(self, name=None, *args, **kwargs): "Load the instance of the object from the stash." inst = self.stash.load(name) if inst is None: inst = self.instance(name, *args, **kwargs) logger.debug(f'loaded (conf mng) instance: {inst}') return inst
python
def load(self, name=None, *args, **kwargs): "Load the instance of the object from the stash." inst = self.stash.load(name) if inst is None: inst = self.instance(name, *args, **kwargs) logger.debug(f'loaded (conf mng) instance: {inst}') return inst
[ "def", "load", "(", "self", ",", "name", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "inst", "=", "self", ".", "stash", ".", "load", "(", "name", ")", "if", "inst", "is", "None", ":", "inst", "=", "self", ".", "instance", "(", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", "logger", ".", "debug", "(", "f'loaded (conf mng) instance: {inst}'", ")", "return", "inst" ]
Load the instance of the object from the stash.
[ "Load", "the", "instance", "of", "the", "object", "from", "the", "stash", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/factory.py#L225-L231
plandes/actioncli
src/python/zensols/actioncli/factory.py
ConfigManager.dump
def dump(self, name: str, inst): "Save the object instance to the stash." self.stash.dump(name, inst)
python
def dump(self, name: str, inst): "Save the object instance to the stash." self.stash.dump(name, inst)
[ "def", "dump", "(", "self", ",", "name", ":", "str", ",", "inst", ")", ":", "self", ".", "stash", ".", "dump", "(", "name", ",", "inst", ")" ]
Save the object instance to the stash.
[ "Save", "the", "object", "instance", "to", "the", "stash", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/factory.py#L241-L243
saltant-org/saltant-py
saltant/client.py
Client.from_env
def from_env(cls, default_timeout=DEFAULT_TIMEOUT_SECONDS): """Return a client configured from environment variables. Essentially copying this: https://github.com/docker/docker-py/blob/master/docker/client.py#L43. The environment variables looked for are the following: .. envvar:: SALTANT_API_URL The URL of the saltant API. For example, https://shahlabjobs.ca/api/. .. envvar:: SALTANT_AUTH_TOKEN The registered saltant user's authentication token. Example: >>> from saltant.client import from_env >>> client = from_env() Args: default_timeout (int, optional): The maximum number of seconds to wait for a request to complete. Defaults to 90 seconds. Returns: :class:`Client`: A saltant API client object. Raises: :class:`saltant.exceptions.BadEnvironmentError`: The user has an incorrectly configured environment. """ # Get variables from environment try: base_api_url = os.environ["SALTANT_API_URL"] except KeyError: raise BadEnvironmentError("SALTANT_API_URL not defined!") try: # Try to get an auth token auth_token = os.environ["SALTANT_AUTH_TOKEN"] except KeyError: raise BadEnvironmentError("SALTANT_AUTH_TOKEN not defined!") # Return the configured client return cls( base_api_url=base_api_url, auth_token=auth_token, default_timeout=default_timeout, )
python
def from_env(cls, default_timeout=DEFAULT_TIMEOUT_SECONDS): """Return a client configured from environment variables. Essentially copying this: https://github.com/docker/docker-py/blob/master/docker/client.py#L43. The environment variables looked for are the following: .. envvar:: SALTANT_API_URL The URL of the saltant API. For example, https://shahlabjobs.ca/api/. .. envvar:: SALTANT_AUTH_TOKEN The registered saltant user's authentication token. Example: >>> from saltant.client import from_env >>> client = from_env() Args: default_timeout (int, optional): The maximum number of seconds to wait for a request to complete. Defaults to 90 seconds. Returns: :class:`Client`: A saltant API client object. Raises: :class:`saltant.exceptions.BadEnvironmentError`: The user has an incorrectly configured environment. """ # Get variables from environment try: base_api_url = os.environ["SALTANT_API_URL"] except KeyError: raise BadEnvironmentError("SALTANT_API_URL not defined!") try: # Try to get an auth token auth_token = os.environ["SALTANT_AUTH_TOKEN"] except KeyError: raise BadEnvironmentError("SALTANT_AUTH_TOKEN not defined!") # Return the configured client return cls( base_api_url=base_api_url, auth_token=auth_token, default_timeout=default_timeout, )
[ "def", "from_env", "(", "cls", ",", "default_timeout", "=", "DEFAULT_TIMEOUT_SECONDS", ")", ":", "# Get variables from environment", "try", ":", "base_api_url", "=", "os", ".", "environ", "[", "\"SALTANT_API_URL\"", "]", "except", "KeyError", ":", "raise", "BadEnvironmentError", "(", "\"SALTANT_API_URL not defined!\"", ")", "try", ":", "# Try to get an auth token", "auth_token", "=", "os", ".", "environ", "[", "\"SALTANT_AUTH_TOKEN\"", "]", "except", "KeyError", ":", "raise", "BadEnvironmentError", "(", "\"SALTANT_AUTH_TOKEN not defined!\"", ")", "# Return the configured client", "return", "cls", "(", "base_api_url", "=", "base_api_url", ",", "auth_token", "=", "auth_token", ",", "default_timeout", "=", "default_timeout", ",", ")" ]
Return a client configured from environment variables. Essentially copying this: https://github.com/docker/docker-py/blob/master/docker/client.py#L43. The environment variables looked for are the following: .. envvar:: SALTANT_API_URL The URL of the saltant API. For example, https://shahlabjobs.ca/api/. .. envvar:: SALTANT_AUTH_TOKEN The registered saltant user's authentication token. Example: >>> from saltant.client import from_env >>> client = from_env() Args: default_timeout (int, optional): The maximum number of seconds to wait for a request to complete. Defaults to 90 seconds. Returns: :class:`Client`: A saltant API client object. Raises: :class:`saltant.exceptions.BadEnvironmentError`: The user has an incorrectly configured environment.
[ "Return", "a", "client", "configured", "from", "environment", "variables", "." ]
train
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/client.py#L127-L178
plandes/actioncli
src/python/zensols/actioncli/persist.py
PersistedWork.clear_global
def clear_global(self): """Clear only any cached global data. """ vname = self.varname logger.debug(f'global clearning {vname}') if vname in globals(): logger.debug('removing global instance var: {}'.format(vname)) del globals()[vname]
python
def clear_global(self): """Clear only any cached global data. """ vname = self.varname logger.debug(f'global clearning {vname}') if vname in globals(): logger.debug('removing global instance var: {}'.format(vname)) del globals()[vname]
[ "def", "clear_global", "(", "self", ")", ":", "vname", "=", "self", ".", "varname", "logger", ".", "debug", "(", "f'global clearning {vname}'", ")", "if", "vname", "in", "globals", "(", ")", ":", "logger", ".", "debug", "(", "'removing global instance var: {}'", ".", "format", "(", "vname", ")", ")", "del", "globals", "(", ")", "[", "vname", "]" ]
Clear only any cached global data.
[ "Clear", "only", "any", "cached", "global", "data", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/persist.py#L64-L72
plandes/actioncli
src/python/zensols/actioncli/persist.py
PersistedWork.clear
def clear(self): """Clear the data, and thus, force it to be created on the next fetch. This is done by removing the attribute from ``owner``, deleting it from globals and removing the file from the disk. """ vname = self.varname if self.path.exists(): logger.debug('deleting cached work: {}'.format(self.path)) self.path.unlink() if self.owner is not None and hasattr(self.owner, vname): logger.debug('removing instance var: {}'.format(vname)) delattr(self.owner, vname) self.clear_global()
python
def clear(self): """Clear the data, and thus, force it to be created on the next fetch. This is done by removing the attribute from ``owner``, deleting it from globals and removing the file from the disk. """ vname = self.varname if self.path.exists(): logger.debug('deleting cached work: {}'.format(self.path)) self.path.unlink() if self.owner is not None and hasattr(self.owner, vname): logger.debug('removing instance var: {}'.format(vname)) delattr(self.owner, vname) self.clear_global()
[ "def", "clear", "(", "self", ")", ":", "vname", "=", "self", ".", "varname", "if", "self", ".", "path", ".", "exists", "(", ")", ":", "logger", ".", "debug", "(", "'deleting cached work: {}'", ".", "format", "(", "self", ".", "path", ")", ")", "self", ".", "path", ".", "unlink", "(", ")", "if", "self", ".", "owner", "is", "not", "None", "and", "hasattr", "(", "self", ".", "owner", ",", "vname", ")", ":", "logger", ".", "debug", "(", "'removing instance var: {}'", ".", "format", "(", "vname", ")", ")", "delattr", "(", "self", ".", "owner", ",", "vname", ")", "self", ".", "clear_global", "(", ")" ]
Clear the data, and thus, force it to be created on the next fetch. This is done by removing the attribute from ``owner``, deleting it from globals and removing the file from the disk.
[ "Clear", "the", "data", "and", "thus", "force", "it", "to", "be", "created", "on", "the", "next", "fetch", ".", "This", "is", "done", "by", "removing", "the", "attribute", "from", "owner", "deleting", "it", "from", "globals", "and", "removing", "the", "file", "from", "the", "disk", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/persist.py#L74-L87
plandes/actioncli
src/python/zensols/actioncli/persist.py
PersistedWork._load_or_create
def _load_or_create(self, *argv, **kwargs): """Invoke the file system operations to get the data, or create work. If the file does not exist, calling ``__do_work__`` and save it. """ if self.path.exists(): self._info('loading work from {}'.format(self.path)) with open(self.path, 'rb') as f: obj = pickle.load(f) else: self._info('saving work to {}'.format(self.path)) with open(self.path, 'wb') as f: obj = self._do_work(*argv, **kwargs) pickle.dump(obj, f) return obj
python
def _load_or_create(self, *argv, **kwargs): """Invoke the file system operations to get the data, or create work. If the file does not exist, calling ``__do_work__`` and save it. """ if self.path.exists(): self._info('loading work from {}'.format(self.path)) with open(self.path, 'rb') as f: obj = pickle.load(f) else: self._info('saving work to {}'.format(self.path)) with open(self.path, 'wb') as f: obj = self._do_work(*argv, **kwargs) pickle.dump(obj, f) return obj
[ "def", "_load_or_create", "(", "self", ",", "*", "argv", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "path", ".", "exists", "(", ")", ":", "self", ".", "_info", "(", "'loading work from {}'", ".", "format", "(", "self", ".", "path", ")", ")", "with", "open", "(", "self", ".", "path", ",", "'rb'", ")", "as", "f", ":", "obj", "=", "pickle", ".", "load", "(", "f", ")", "else", ":", "self", ".", "_info", "(", "'saving work to {}'", ".", "format", "(", "self", ".", "path", ")", ")", "with", "open", "(", "self", ".", "path", ",", "'wb'", ")", "as", "f", ":", "obj", "=", "self", ".", "_do_work", "(", "*", "argv", ",", "*", "*", "kwargs", ")", "pickle", ".", "dump", "(", "obj", ",", "f", ")", "return", "obj" ]
Invoke the file system operations to get the data, or create work. If the file does not exist, calling ``__do_work__`` and save it.
[ "Invoke", "the", "file", "system", "operations", "to", "get", "the", "data", "or", "create", "work", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/persist.py#L96-L110
plandes/actioncli
src/python/zensols/actioncli/persist.py
PreemptiveStash.has_data
def has_data(self): """Return whether or not the stash has any data available or not.""" if not hasattr(self, '_has_data'): try: next(iter(self.delegate.keys())) self._has_data = True except StopIteration: self._has_data = False return self._has_data
python
def has_data(self): """Return whether or not the stash has any data available or not.""" if not hasattr(self, '_has_data'): try: next(iter(self.delegate.keys())) self._has_data = True except StopIteration: self._has_data = False return self._has_data
[ "def", "has_data", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_has_data'", ")", ":", "try", ":", "next", "(", "iter", "(", "self", ".", "delegate", ".", "keys", "(", ")", ")", ")", "self", ".", "_has_data", "=", "True", "except", "StopIteration", ":", "self", ".", "_has_data", "=", "False", "return", "self", ".", "_has_data" ]
Return whether or not the stash has any data available or not.
[ "Return", "whether", "or", "not", "the", "stash", "has", "any", "data", "available", "or", "not", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/persist.py#L393-L401
plandes/actioncli
src/python/zensols/actioncli/persist.py
DirectoryStash._get_instance_path
def _get_instance_path(self, name): "Return a path to the pickled data with key ``name``." fname = self.pattern.format(**{'name': name}) logger.debug(f'path {self.create_path}: {self.create_path.exists()}') self._create_path_dir() return Path(self.create_path, fname)
python
def _get_instance_path(self, name): "Return a path to the pickled data with key ``name``." fname = self.pattern.format(**{'name': name}) logger.debug(f'path {self.create_path}: {self.create_path.exists()}') self._create_path_dir() return Path(self.create_path, fname)
[ "def", "_get_instance_path", "(", "self", ",", "name", ")", ":", "fname", "=", "self", ".", "pattern", ".", "format", "(", "*", "*", "{", "'name'", ":", "name", "}", ")", "logger", ".", "debug", "(", "f'path {self.create_path}: {self.create_path.exists()}'", ")", "self", ".", "_create_path_dir", "(", ")", "return", "Path", "(", "self", ".", "create_path", ",", "fname", ")" ]
Return a path to the pickled data with key ``name``.
[ "Return", "a", "path", "to", "the", "pickled", "data", "with", "key", "name", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/persist.py#L533-L538
plandes/actioncli
src/python/zensols/actioncli/persist.py
ShelveStash.shelve
def shelve(self): """Return an opened shelve object. """ logger.info('creating shelve data') fname = str(self.create_path.absolute()) inst = sh.open(fname, writeback=self.writeback) self.is_open = True return inst
python
def shelve(self): """Return an opened shelve object. """ logger.info('creating shelve data') fname = str(self.create_path.absolute()) inst = sh.open(fname, writeback=self.writeback) self.is_open = True return inst
[ "def", "shelve", "(", "self", ")", ":", "logger", ".", "info", "(", "'creating shelve data'", ")", "fname", "=", "str", "(", "self", ".", "create_path", ".", "absolute", "(", ")", ")", "inst", "=", "sh", ".", "open", "(", "fname", ",", "writeback", "=", "self", ".", "writeback", ")", "self", ".", "is_open", "=", "True", "return", "inst" ]
Return an opened shelve object.
[ "Return", "an", "opened", "shelve", "object", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/persist.py#L602-L610
plandes/actioncli
src/python/zensols/actioncli/persist.py
ShelveStash.delete
def delete(self, name=None): "Delete the shelve data file." logger.info('clearing shelve data') self.close() for path in Path(self.create_path.parent, self.create_path.name), \ Path(self.create_path.parent, self.create_path.name + '.db'): logger.debug(f'clearing {path} if exists: {path.exists()}') if path.exists(): path.unlink() break
python
def delete(self, name=None): "Delete the shelve data file." logger.info('clearing shelve data') self.close() for path in Path(self.create_path.parent, self.create_path.name), \ Path(self.create_path.parent, self.create_path.name + '.db'): logger.debug(f'clearing {path} if exists: {path.exists()}') if path.exists(): path.unlink() break
[ "def", "delete", "(", "self", ",", "name", "=", "None", ")", ":", "logger", ".", "info", "(", "'clearing shelve data'", ")", "self", ".", "close", "(", ")", "for", "path", "in", "Path", "(", "self", ".", "create_path", ".", "parent", ",", "self", ".", "create_path", ".", "name", ")", ",", "Path", "(", "self", ".", "create_path", ".", "parent", ",", "self", ".", "create_path", ".", "name", "+", "'.db'", ")", ":", "logger", ".", "debug", "(", "f'clearing {path} if exists: {path.exists()}'", ")", "if", "path", ".", "exists", "(", ")", ":", "path", ".", "unlink", "(", ")", "break" ]
Delete the shelve data file.
[ "Delete", "the", "shelve", "data", "file", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/persist.py#L625-L634
plandes/actioncli
src/python/zensols/actioncli/persist.py
ShelveStash.close
def close(self): "Close the shelve object, which is needed for data consistency." if self.is_open: logger.info('closing shelve data') try: self.shelve.close() self._shelve.clear() except Exception: self.is_open = False
python
def close(self): "Close the shelve object, which is needed for data consistency." if self.is_open: logger.info('closing shelve data') try: self.shelve.close() self._shelve.clear() except Exception: self.is_open = False
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "is_open", ":", "logger", ".", "info", "(", "'closing shelve data'", ")", "try", ":", "self", ".", "shelve", ".", "close", "(", ")", "self", ".", "_shelve", ".", "clear", "(", ")", "except", "Exception", ":", "self", ".", "is_open", "=", "False" ]
Close the shelve object, which is needed for data consistency.
[ "Close", "the", "shelve", "object", "which", "is", "needed", "for", "data", "consistency", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/persist.py#L636-L644
plandes/actioncli
src/python/zensols/actioncli/persist.py
MultiThreadedPoolStash._map
def _map(self, data_item): "Map ``data_item`` separately in each thread." delegate = self.delegate logger.debug(f'mapping: {data_item}') if self.clobber or not self.exists(data_item.id): logger.debug(f'exist: {data_item.id}: {self.exists(data_item.id)}') delegate.dump(data_item.id, data_item)
python
def _map(self, data_item): "Map ``data_item`` separately in each thread." delegate = self.delegate logger.debug(f'mapping: {data_item}') if self.clobber or not self.exists(data_item.id): logger.debug(f'exist: {data_item.id}: {self.exists(data_item.id)}') delegate.dump(data_item.id, data_item)
[ "def", "_map", "(", "self", ",", "data_item", ")", ":", "delegate", "=", "self", ".", "delegate", "logger", ".", "debug", "(", "f'mapping: {data_item}'", ")", "if", "self", ".", "clobber", "or", "not", "self", ".", "exists", "(", "data_item", ".", "id", ")", ":", "logger", ".", "debug", "(", "f'exist: {data_item.id}: {self.exists(data_item.id)}'", ")", "delegate", ".", "dump", "(", "data_item", ".", "id", ",", "data_item", ")" ]
Map ``data_item`` separately in each thread.
[ "Map", "data_item", "separately", "in", "each", "thread", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/persist.py#L685-L691
plandes/actioncli
src/python/zensols/actioncli/persist.py
MultiThreadedPoolStash.load_all
def load_all(self, workers=None, limit=None, n_expected=None): """Load all instances witih multiple threads. :param workers: number of workers to use to load instances, which defaults to what was given in the class initializer :param limit: return a maximum, which defaults to no limit :param n_expected: rerun the iteration on the data if we didn't find enough data, or more specifically, number of found data points is less than ``n_expected``; defaults to all """ if not self.has_data: self._preempt(True) # we did the best we could (avoid repeat later in this method) n_expected = 0 keys = tuple(self.delegate.keys()) if n_expected is not None and len(keys) < n_expected: self._preempt(True) keys = self.delegate.keys() keys = it.islice(limit, keys) if limit is not None else keys pool = self._create_thread_pool(workers) logger.debug(f'workers={workers}, keys: {keys}') try: return iter(pool.map(self.delegate.load, keys)) finally: pool.close()
python
def load_all(self, workers=None, limit=None, n_expected=None): """Load all instances witih multiple threads. :param workers: number of workers to use to load instances, which defaults to what was given in the class initializer :param limit: return a maximum, which defaults to no limit :param n_expected: rerun the iteration on the data if we didn't find enough data, or more specifically, number of found data points is less than ``n_expected``; defaults to all """ if not self.has_data: self._preempt(True) # we did the best we could (avoid repeat later in this method) n_expected = 0 keys = tuple(self.delegate.keys()) if n_expected is not None and len(keys) < n_expected: self._preempt(True) keys = self.delegate.keys() keys = it.islice(limit, keys) if limit is not None else keys pool = self._create_thread_pool(workers) logger.debug(f'workers={workers}, keys: {keys}') try: return iter(pool.map(self.delegate.load, keys)) finally: pool.close()
[ "def", "load_all", "(", "self", ",", "workers", "=", "None", ",", "limit", "=", "None", ",", "n_expected", "=", "None", ")", ":", "if", "not", "self", ".", "has_data", ":", "self", ".", "_preempt", "(", "True", ")", "# we did the best we could (avoid repeat later in this method)", "n_expected", "=", "0", "keys", "=", "tuple", "(", "self", ".", "delegate", ".", "keys", "(", ")", ")", "if", "n_expected", "is", "not", "None", "and", "len", "(", "keys", ")", "<", "n_expected", ":", "self", ".", "_preempt", "(", "True", ")", "keys", "=", "self", ".", "delegate", ".", "keys", "(", ")", "keys", "=", "it", ".", "islice", "(", "limit", ",", "keys", ")", "if", "limit", "is", "not", "None", "else", "keys", "pool", "=", "self", ".", "_create_thread_pool", "(", "workers", ")", "logger", ".", "debug", "(", "f'workers={workers}, keys: {keys}'", ")", "try", ":", "return", "iter", "(", "pool", ".", "map", "(", "self", ".", "delegate", ".", "load", ",", "keys", ")", ")", "finally", ":", "pool", ".", "close", "(", ")" ]
Load all instances witih multiple threads. :param workers: number of workers to use to load instances, which defaults to what was given in the class initializer :param limit: return a maximum, which defaults to no limit :param n_expected: rerun the iteration on the data if we didn't find enough data, or more specifically, number of found data points is less than ``n_expected``; defaults to all
[ "Load", "all", "instances", "witih", "multiple", "threads", "." ]
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/persist.py#L714-L741
pymacaron/pymacaron-core
pymacaron_core/swagger/api.py
API._make_persistent
def _make_persistent(self, model_name, pkg_name): """Monkey-patch object persistence (ex: to/from database) into a bravado-core model class""" # # WARNING: ugly piece of monkey-patching below. Hopefully will replace # with native bravado-core code in the future... # # Load class at path pkg_name c = get_function(pkg_name) for name in ('load_from_db', 'save_to_db'): if not hasattr(c, name): raise PyMacaronCoreException("Class %s has no static method '%s'" % (pkg_name, name)) log.info("Making %s persistent via %s" % (model_name, pkg_name)) # Replace model generator with one that adds 'save_to_db' to every instance model = getattr(self.model, model_name) n = self._wrap_bravado_model_generator(model, c.save_to_db, pkg_name) setattr(self.model, model_name, n) # Add class method load_from_db to model generator model = getattr(self.model, model_name) setattr(model, 'load_from_db', c.load_from_db)
python
def _make_persistent(self, model_name, pkg_name): """Monkey-patch object persistence (ex: to/from database) into a bravado-core model class""" # # WARNING: ugly piece of monkey-patching below. Hopefully will replace # with native bravado-core code in the future... # # Load class at path pkg_name c = get_function(pkg_name) for name in ('load_from_db', 'save_to_db'): if not hasattr(c, name): raise PyMacaronCoreException("Class %s has no static method '%s'" % (pkg_name, name)) log.info("Making %s persistent via %s" % (model_name, pkg_name)) # Replace model generator with one that adds 'save_to_db' to every instance model = getattr(self.model, model_name) n = self._wrap_bravado_model_generator(model, c.save_to_db, pkg_name) setattr(self.model, model_name, n) # Add class method load_from_db to model generator model = getattr(self.model, model_name) setattr(model, 'load_from_db', c.load_from_db)
[ "def", "_make_persistent", "(", "self", ",", "model_name", ",", "pkg_name", ")", ":", "#", "# WARNING: ugly piece of monkey-patching below. Hopefully will replace", "# with native bravado-core code in the future...", "#", "# Load class at path pkg_name", "c", "=", "get_function", "(", "pkg_name", ")", "for", "name", "in", "(", "'load_from_db'", ",", "'save_to_db'", ")", ":", "if", "not", "hasattr", "(", "c", ",", "name", ")", ":", "raise", "PyMacaronCoreException", "(", "\"Class %s has no static method '%s'\"", "%", "(", "pkg_name", ",", "name", ")", ")", "log", ".", "info", "(", "\"Making %s persistent via %s\"", "%", "(", "model_name", ",", "pkg_name", ")", ")", "# Replace model generator with one that adds 'save_to_db' to every instance", "model", "=", "getattr", "(", "self", ".", "model", ",", "model_name", ")", "n", "=", "self", ".", "_wrap_bravado_model_generator", "(", "model", ",", "c", ".", "save_to_db", ",", "pkg_name", ")", "setattr", "(", "self", ".", "model", ",", "model_name", ",", "n", ")", "# Add class method load_from_db to model generator", "model", "=", "getattr", "(", "self", ".", "model", ",", "model_name", ")", "setattr", "(", "model", ",", "'load_from_db'", ",", "c", ".", "load_from_db", ")" ]
Monkey-patch object persistence (ex: to/from database) into a bravado-core model class
[ "Monkey", "-", "patch", "object", "persistence", "(", "ex", ":", "to", "/", "from", "database", ")", "into", "a", "bravado", "-", "core", "model", "class" ]
train
https://github.com/pymacaron/pymacaron-core/blob/95070a39ed7065a84244ff5601fea4d54cc72b66/pymacaron_core/swagger/api.py#L121-L145
pymacaron/pymacaron-core
pymacaron_core/swagger/api.py
API.spawn_api
def spawn_api(self, app, decorator=None): """Auto-generate server endpoints implementing the API into this Flask app""" if decorator: assert type(decorator).__name__ == 'function' self.is_server = True self.app = app if self.local: # Re-generate client callers, this time as local and passing them the app self._generate_client_callers(app) return spawn_server_api(self.name, app, self.api_spec, self.error_callback, decorator)
python
def spawn_api(self, app, decorator=None): """Auto-generate server endpoints implementing the API into this Flask app""" if decorator: assert type(decorator).__name__ == 'function' self.is_server = True self.app = app if self.local: # Re-generate client callers, this time as local and passing them the app self._generate_client_callers(app) return spawn_server_api(self.name, app, self.api_spec, self.error_callback, decorator)
[ "def", "spawn_api", "(", "self", ",", "app", ",", "decorator", "=", "None", ")", ":", "if", "decorator", ":", "assert", "type", "(", "decorator", ")", ".", "__name__", "==", "'function'", "self", ".", "is_server", "=", "True", "self", ".", "app", "=", "app", "if", "self", ".", "local", ":", "# Re-generate client callers, this time as local and passing them the app", "self", ".", "_generate_client_callers", "(", "app", ")", "return", "spawn_server_api", "(", "self", ".", "name", ",", "app", ",", "self", ".", "api_spec", ",", "self", ".", "error_callback", ",", "decorator", ")" ]
Auto-generate server endpoints implementing the API into this Flask app
[ "Auto", "-", "generate", "server", "endpoints", "implementing", "the", "API", "into", "this", "Flask", "app" ]
train
https://github.com/pymacaron/pymacaron-core/blob/95070a39ed7065a84244ff5601fea4d54cc72b66/pymacaron_core/swagger/api.py#L161-L172
pymacaron/pymacaron-core
pymacaron_core/swagger/api.py
API.json_to_model
def json_to_model(self, model_name, j, validate=False): """Take a json strust and a model name, and return a model instance""" if validate: self.api_spec.validate(model_name, j) return self.api_spec.json_to_model(model_name, j)
python
def json_to_model(self, model_name, j, validate=False): """Take a json strust and a model name, and return a model instance""" if validate: self.api_spec.validate(model_name, j) return self.api_spec.json_to_model(model_name, j)
[ "def", "json_to_model", "(", "self", ",", "model_name", ",", "j", ",", "validate", "=", "False", ")", ":", "if", "validate", ":", "self", ".", "api_spec", ".", "validate", "(", "model_name", ",", "j", ")", "return", "self", ".", "api_spec", ".", "json_to_model", "(", "model_name", ",", "j", ")" ]
Take a json strust and a model name, and return a model instance
[ "Take", "a", "json", "strust", "and", "a", "model", "name", "and", "return", "a", "model", "instance" ]
train
https://github.com/pymacaron/pymacaron-core/blob/95070a39ed7065a84244ff5601fea4d54cc72b66/pymacaron_core/swagger/api.py#L185-L189
4degrees/clique
source/clique/__init__.py
assemble
def assemble( iterable, patterns=None, minimum_items=2, case_sensitive=True, assume_padded_when_ambiguous=False ): '''Assemble items in *iterable* into discreet collections. *patterns* may be specified as a list of regular expressions to limit the returned collection possibilities. Use this when interested in collections that only match specific patterns. Each pattern must contain the expression from :py:data:`DIGITS_PATTERN` exactly once. A selection of common expressions are available in :py:data:`PATTERNS`. .. note:: If a pattern is supplied as a string it will be automatically compiled to a :py:class:`re.RegexObject` instance for convenience. When *patterns* is not specified, collections are formed by examining all possible groupings of the items in *iterable* based around common numerical components. *minimum_items* dictates the minimum number of items a collection must have in order to be included in the result. The default is 2, filtering out single item collections. If *case_sensitive* is False, then items will be treated as part of the same collection when they only differ in casing. To avoid ambiguity, the resulting collection will always be lowercase. For example, "item.0001.dpx" and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx". .. note:: Any compiled *patterns* will also respect the set case sensitivity. For certain collections it may be ambiguous whether they are padded or not. For example, 1000-1010 can be considered either an unpadded collection or a four padded collection. By default, Clique is conservative and assumes that the collection is unpadded. To change this behaviour, set *assume_padded_when_ambiguous* to True and any ambiguous collection will have a relevant padding set. .. note:: *assume_padded_when_ambiguous* has no effect on collections that are unambiguous. For example, 1-100 will always be considered unpadded regardless of the *assume_padded_when_ambiguous* setting. Return tuple of two lists (collections, remainder) where 'collections' is a list of assembled :py:class:`~clique.collection.Collection` instances and 'remainder' is a list of items that did not belong to any collection. ''' collection_map = defaultdict(set) collections = [] remainder = [] # Compile patterns. flags = 0 if not case_sensitive: flags |= re.IGNORECASE compiled_patterns = [] if patterns is not None: if not patterns: return collections, list(iterable) for pattern in patterns: if isinstance(pattern, basestring): compiled_patterns.append(re.compile(pattern, flags=flags)) else: compiled_patterns.append(pattern) else: compiled_patterns.append(re.compile(DIGITS_PATTERN, flags=flags)) # Process iterable. for item in iterable: matched = False for pattern in compiled_patterns: for match in pattern.finditer(item): index = match.group('index') head = item[:match.start('index')] tail = item[match.end('index'):] if not case_sensitive: head = head.lower() tail = tail.lower() padding = match.group('padding') if padding: padding = len(index) else: padding = 0 key = (head, tail, padding) collection_map[key].add(int(index)) matched = True if not matched: remainder.append(item) # Form collections. merge_candidates = [] for (head, tail, padding), indexes in collection_map.items(): collection = Collection(head, tail, padding, indexes) collections.append(collection) if collection.padding == 0: merge_candidates.append(collection) # Merge together collections that align on padding boundaries. For example, # 0998-0999 and 1000-1001 can be merged into 0998-1001. Note that only # indexes within the padding width limit are merged. If a collection is # entirely merged into another then it will not be included as a separate # collection in the results. fully_merged = [] for collection in collections: if collection.padding == 0: continue for candidate in merge_candidates: if ( candidate.head == collection.head and candidate.tail == collection.tail ): merged_index_count = 0 for index in candidate.indexes: if len(str(abs(index))) == collection.padding: collection.indexes.add(index) merged_index_count += 1 if merged_index_count == len(candidate.indexes): fully_merged.append(candidate) # Filter out fully merged collections. collections = [collection for collection in collections if collection not in fully_merged] # Filter out collections that do not have at least as many indexes as # minimum_items. In addition, add any members of a filtered collection, # which are not members of an unfiltered collection, to the remainder. filtered = [] remainder_candidates = [] for collection in collections: if len(collection.indexes) >= minimum_items: filtered.append(collection) else: for member in collection: remainder_candidates.append(member) for candidate in remainder_candidates: # Check if candidate has already been added to remainder to avoid # duplicate entries. if candidate in remainder: continue has_membership = False for collection in filtered: if candidate in collection: has_membership = True break if not has_membership: remainder.append(candidate) # Set padding for all ambiguous collections according to the # assume_padded_when_ambiguous setting. if assume_padded_when_ambiguous: for collection in filtered: if ( not collection.padding and collection.indexes ): indexes = list(collection.indexes) first_index_width = len(str(indexes[0])) last_index_width = len(str(indexes[-1])) if first_index_width == last_index_width: collection.padding = first_index_width return filtered, remainder
python
def assemble( iterable, patterns=None, minimum_items=2, case_sensitive=True, assume_padded_when_ambiguous=False ): '''Assemble items in *iterable* into discreet collections. *patterns* may be specified as a list of regular expressions to limit the returned collection possibilities. Use this when interested in collections that only match specific patterns. Each pattern must contain the expression from :py:data:`DIGITS_PATTERN` exactly once. A selection of common expressions are available in :py:data:`PATTERNS`. .. note:: If a pattern is supplied as a string it will be automatically compiled to a :py:class:`re.RegexObject` instance for convenience. When *patterns* is not specified, collections are formed by examining all possible groupings of the items in *iterable* based around common numerical components. *minimum_items* dictates the minimum number of items a collection must have in order to be included in the result. The default is 2, filtering out single item collections. If *case_sensitive* is False, then items will be treated as part of the same collection when they only differ in casing. To avoid ambiguity, the resulting collection will always be lowercase. For example, "item.0001.dpx" and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx". .. note:: Any compiled *patterns* will also respect the set case sensitivity. For certain collections it may be ambiguous whether they are padded or not. For example, 1000-1010 can be considered either an unpadded collection or a four padded collection. By default, Clique is conservative and assumes that the collection is unpadded. To change this behaviour, set *assume_padded_when_ambiguous* to True and any ambiguous collection will have a relevant padding set. .. note:: *assume_padded_when_ambiguous* has no effect on collections that are unambiguous. For example, 1-100 will always be considered unpadded regardless of the *assume_padded_when_ambiguous* setting. Return tuple of two lists (collections, remainder) where 'collections' is a list of assembled :py:class:`~clique.collection.Collection` instances and 'remainder' is a list of items that did not belong to any collection. ''' collection_map = defaultdict(set) collections = [] remainder = [] # Compile patterns. flags = 0 if not case_sensitive: flags |= re.IGNORECASE compiled_patterns = [] if patterns is not None: if not patterns: return collections, list(iterable) for pattern in patterns: if isinstance(pattern, basestring): compiled_patterns.append(re.compile(pattern, flags=flags)) else: compiled_patterns.append(pattern) else: compiled_patterns.append(re.compile(DIGITS_PATTERN, flags=flags)) # Process iterable. for item in iterable: matched = False for pattern in compiled_patterns: for match in pattern.finditer(item): index = match.group('index') head = item[:match.start('index')] tail = item[match.end('index'):] if not case_sensitive: head = head.lower() tail = tail.lower() padding = match.group('padding') if padding: padding = len(index) else: padding = 0 key = (head, tail, padding) collection_map[key].add(int(index)) matched = True if not matched: remainder.append(item) # Form collections. merge_candidates = [] for (head, tail, padding), indexes in collection_map.items(): collection = Collection(head, tail, padding, indexes) collections.append(collection) if collection.padding == 0: merge_candidates.append(collection) # Merge together collections that align on padding boundaries. For example, # 0998-0999 and 1000-1001 can be merged into 0998-1001. Note that only # indexes within the padding width limit are merged. If a collection is # entirely merged into another then it will not be included as a separate # collection in the results. fully_merged = [] for collection in collections: if collection.padding == 0: continue for candidate in merge_candidates: if ( candidate.head == collection.head and candidate.tail == collection.tail ): merged_index_count = 0 for index in candidate.indexes: if len(str(abs(index))) == collection.padding: collection.indexes.add(index) merged_index_count += 1 if merged_index_count == len(candidate.indexes): fully_merged.append(candidate) # Filter out fully merged collections. collections = [collection for collection in collections if collection not in fully_merged] # Filter out collections that do not have at least as many indexes as # minimum_items. In addition, add any members of a filtered collection, # which are not members of an unfiltered collection, to the remainder. filtered = [] remainder_candidates = [] for collection in collections: if len(collection.indexes) >= minimum_items: filtered.append(collection) else: for member in collection: remainder_candidates.append(member) for candidate in remainder_candidates: # Check if candidate has already been added to remainder to avoid # duplicate entries. if candidate in remainder: continue has_membership = False for collection in filtered: if candidate in collection: has_membership = True break if not has_membership: remainder.append(candidate) # Set padding for all ambiguous collections according to the # assume_padded_when_ambiguous setting. if assume_padded_when_ambiguous: for collection in filtered: if ( not collection.padding and collection.indexes ): indexes = list(collection.indexes) first_index_width = len(str(indexes[0])) last_index_width = len(str(indexes[-1])) if first_index_width == last_index_width: collection.padding = first_index_width return filtered, remainder
[ "def", "assemble", "(", "iterable", ",", "patterns", "=", "None", ",", "minimum_items", "=", "2", ",", "case_sensitive", "=", "True", ",", "assume_padded_when_ambiguous", "=", "False", ")", ":", "collection_map", "=", "defaultdict", "(", "set", ")", "collections", "=", "[", "]", "remainder", "=", "[", "]", "# Compile patterns.", "flags", "=", "0", "if", "not", "case_sensitive", ":", "flags", "|=", "re", ".", "IGNORECASE", "compiled_patterns", "=", "[", "]", "if", "patterns", "is", "not", "None", ":", "if", "not", "patterns", ":", "return", "collections", ",", "list", "(", "iterable", ")", "for", "pattern", "in", "patterns", ":", "if", "isinstance", "(", "pattern", ",", "basestring", ")", ":", "compiled_patterns", ".", "append", "(", "re", ".", "compile", "(", "pattern", ",", "flags", "=", "flags", ")", ")", "else", ":", "compiled_patterns", ".", "append", "(", "pattern", ")", "else", ":", "compiled_patterns", ".", "append", "(", "re", ".", "compile", "(", "DIGITS_PATTERN", ",", "flags", "=", "flags", ")", ")", "# Process iterable.", "for", "item", "in", "iterable", ":", "matched", "=", "False", "for", "pattern", "in", "compiled_patterns", ":", "for", "match", "in", "pattern", ".", "finditer", "(", "item", ")", ":", "index", "=", "match", ".", "group", "(", "'index'", ")", "head", "=", "item", "[", ":", "match", ".", "start", "(", "'index'", ")", "]", "tail", "=", "item", "[", "match", ".", "end", "(", "'index'", ")", ":", "]", "if", "not", "case_sensitive", ":", "head", "=", "head", ".", "lower", "(", ")", "tail", "=", "tail", ".", "lower", "(", ")", "padding", "=", "match", ".", "group", "(", "'padding'", ")", "if", "padding", ":", "padding", "=", "len", "(", "index", ")", "else", ":", "padding", "=", "0", "key", "=", "(", "head", ",", "tail", ",", "padding", ")", "collection_map", "[", "key", "]", ".", "add", "(", "int", "(", "index", ")", ")", "matched", "=", "True", "if", "not", "matched", ":", "remainder", ".", "append", "(", "item", ")", "# Form collections.", "merge_candidates", "=", "[", "]", "for", "(", "head", ",", "tail", ",", "padding", ")", ",", "indexes", "in", "collection_map", ".", "items", "(", ")", ":", "collection", "=", "Collection", "(", "head", ",", "tail", ",", "padding", ",", "indexes", ")", "collections", ".", "append", "(", "collection", ")", "if", "collection", ".", "padding", "==", "0", ":", "merge_candidates", ".", "append", "(", "collection", ")", "# Merge together collections that align on padding boundaries. For example,", "# 0998-0999 and 1000-1001 can be merged into 0998-1001. Note that only", "# indexes within the padding width limit are merged. If a collection is", "# entirely merged into another then it will not be included as a separate", "# collection in the results.", "fully_merged", "=", "[", "]", "for", "collection", "in", "collections", ":", "if", "collection", ".", "padding", "==", "0", ":", "continue", "for", "candidate", "in", "merge_candidates", ":", "if", "(", "candidate", ".", "head", "==", "collection", ".", "head", "and", "candidate", ".", "tail", "==", "collection", ".", "tail", ")", ":", "merged_index_count", "=", "0", "for", "index", "in", "candidate", ".", "indexes", ":", "if", "len", "(", "str", "(", "abs", "(", "index", ")", ")", ")", "==", "collection", ".", "padding", ":", "collection", ".", "indexes", ".", "add", "(", "index", ")", "merged_index_count", "+=", "1", "if", "merged_index_count", "==", "len", "(", "candidate", ".", "indexes", ")", ":", "fully_merged", ".", "append", "(", "candidate", ")", "# Filter out fully merged collections.", "collections", "=", "[", "collection", "for", "collection", "in", "collections", "if", "collection", "not", "in", "fully_merged", "]", "# Filter out collections that do not have at least as many indexes as", "# minimum_items. In addition, add any members of a filtered collection,", "# which are not members of an unfiltered collection, to the remainder.", "filtered", "=", "[", "]", "remainder_candidates", "=", "[", "]", "for", "collection", "in", "collections", ":", "if", "len", "(", "collection", ".", "indexes", ")", ">=", "minimum_items", ":", "filtered", ".", "append", "(", "collection", ")", "else", ":", "for", "member", "in", "collection", ":", "remainder_candidates", ".", "append", "(", "member", ")", "for", "candidate", "in", "remainder_candidates", ":", "# Check if candidate has already been added to remainder to avoid", "# duplicate entries.", "if", "candidate", "in", "remainder", ":", "continue", "has_membership", "=", "False", "for", "collection", "in", "filtered", ":", "if", "candidate", "in", "collection", ":", "has_membership", "=", "True", "break", "if", "not", "has_membership", ":", "remainder", ".", "append", "(", "candidate", ")", "# Set padding for all ambiguous collections according to the", "# assume_padded_when_ambiguous setting.", "if", "assume_padded_when_ambiguous", ":", "for", "collection", "in", "filtered", ":", "if", "(", "not", "collection", ".", "padding", "and", "collection", ".", "indexes", ")", ":", "indexes", "=", "list", "(", "collection", ".", "indexes", ")", "first_index_width", "=", "len", "(", "str", "(", "indexes", "[", "0", "]", ")", ")", "last_index_width", "=", "len", "(", "str", "(", "indexes", "[", "-", "1", "]", ")", ")", "if", "first_index_width", "==", "last_index_width", ":", "collection", ".", "padding", "=", "first_index_width", "return", "filtered", ",", "remainder" ]
Assemble items in *iterable* into discreet collections. *patterns* may be specified as a list of regular expressions to limit the returned collection possibilities. Use this when interested in collections that only match specific patterns. Each pattern must contain the expression from :py:data:`DIGITS_PATTERN` exactly once. A selection of common expressions are available in :py:data:`PATTERNS`. .. note:: If a pattern is supplied as a string it will be automatically compiled to a :py:class:`re.RegexObject` instance for convenience. When *patterns* is not specified, collections are formed by examining all possible groupings of the items in *iterable* based around common numerical components. *minimum_items* dictates the minimum number of items a collection must have in order to be included in the result. The default is 2, filtering out single item collections. If *case_sensitive* is False, then items will be treated as part of the same collection when they only differ in casing. To avoid ambiguity, the resulting collection will always be lowercase. For example, "item.0001.dpx" and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx". .. note:: Any compiled *patterns* will also respect the set case sensitivity. For certain collections it may be ambiguous whether they are padded or not. For example, 1000-1010 can be considered either an unpadded collection or a four padded collection. By default, Clique is conservative and assumes that the collection is unpadded. To change this behaviour, set *assume_padded_when_ambiguous* to True and any ambiguous collection will have a relevant padding set. .. note:: *assume_padded_when_ambiguous* has no effect on collections that are unambiguous. For example, 1-100 will always be considered unpadded regardless of the *assume_padded_when_ambiguous* setting. Return tuple of two lists (collections, remainder) where 'collections' is a list of assembled :py:class:`~clique.collection.Collection` instances and 'remainder' is a list of items that did not belong to any collection.
[ "Assemble", "items", "in", "*", "iterable", "*", "into", "discreet", "collections", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/__init__.py#L23-L206
4degrees/clique
source/clique/__init__.py
parse
def parse(value, pattern='{head}{padding}{tail} [{ranges}]'): '''Parse *value* into a :py:class:`~clique.collection.Collection`. Use *pattern* to extract information from *value*. It may make use of the following keys: * *head* - Common leading part of the collection. * *tail* - Common trailing part of the collection. * *padding* - Padding value in ``%0d`` format. * *range* - Total range in the form ``start-end``. * *ranges* - Comma separated ranges of indexes. * *holes* - Comma separated ranges of missing indexes. .. note:: *holes* only makes sense if *range* or *ranges* is also present. ''' # Construct regular expression for given pattern. expressions = { 'head': '(?P<head>.*)', 'tail': '(?P<tail>.*)', 'padding': '%(?P<padding>\d*)d', 'range': '(?P<range>\d+-\d+)?', 'ranges': '(?P<ranges>[\d ,\-]+)?', 'holes': '(?P<holes>[\d ,\-]+)' } pattern_regex = re.escape(pattern) for key, expression in expressions.items(): pattern_regex = pattern_regex.replace( '\{{{0}\}}'.format(key), expression ) pattern_regex = '^{0}$'.format(pattern_regex) # Match pattern against value and use results to construct collection. match = re.search(pattern_regex, value) if match is None: raise ValueError('Value did not match pattern.') groups = match.groupdict() if 'padding' in groups and groups['padding']: groups['padding'] = int(groups['padding']) else: groups['padding'] = 0 # Create collection and then add indexes. collection = Collection( groups.get('head', ''), groups.get('tail', ''), groups['padding'] ) if groups.get('range', None) is not None: start, end = map(int, groups['range'].split('-')) collection.indexes.update(range(start, end + 1)) if groups.get('ranges', None) is not None: parts = [part.strip() for part in groups['ranges'].split(',')] for part in parts: index_range = list(map(int, part.split('-', 2))) if len(index_range) > 1: # Index range. for index in range(index_range[0], index_range[1] + 1): collection.indexes.add(index) else: # Single index. collection.indexes.add(index_range[0]) if 'holes' in groups: parts = [part.strip() for part in groups['holes'].split(',')] for part in parts: index_range = map(int, part.split('-', 2)) if len(index_range) > 1: # Index range. for index in range(index_range[0], index_range[1] + 1): collection.indexes.remove(index) else: # Single index. collection.indexes.remove(index_range[0]) return collection
python
def parse(value, pattern='{head}{padding}{tail} [{ranges}]'): '''Parse *value* into a :py:class:`~clique.collection.Collection`. Use *pattern* to extract information from *value*. It may make use of the following keys: * *head* - Common leading part of the collection. * *tail* - Common trailing part of the collection. * *padding* - Padding value in ``%0d`` format. * *range* - Total range in the form ``start-end``. * *ranges* - Comma separated ranges of indexes. * *holes* - Comma separated ranges of missing indexes. .. note:: *holes* only makes sense if *range* or *ranges* is also present. ''' # Construct regular expression for given pattern. expressions = { 'head': '(?P<head>.*)', 'tail': '(?P<tail>.*)', 'padding': '%(?P<padding>\d*)d', 'range': '(?P<range>\d+-\d+)?', 'ranges': '(?P<ranges>[\d ,\-]+)?', 'holes': '(?P<holes>[\d ,\-]+)' } pattern_regex = re.escape(pattern) for key, expression in expressions.items(): pattern_regex = pattern_regex.replace( '\{{{0}\}}'.format(key), expression ) pattern_regex = '^{0}$'.format(pattern_regex) # Match pattern against value and use results to construct collection. match = re.search(pattern_regex, value) if match is None: raise ValueError('Value did not match pattern.') groups = match.groupdict() if 'padding' in groups and groups['padding']: groups['padding'] = int(groups['padding']) else: groups['padding'] = 0 # Create collection and then add indexes. collection = Collection( groups.get('head', ''), groups.get('tail', ''), groups['padding'] ) if groups.get('range', None) is not None: start, end = map(int, groups['range'].split('-')) collection.indexes.update(range(start, end + 1)) if groups.get('ranges', None) is not None: parts = [part.strip() for part in groups['ranges'].split(',')] for part in parts: index_range = list(map(int, part.split('-', 2))) if len(index_range) > 1: # Index range. for index in range(index_range[0], index_range[1] + 1): collection.indexes.add(index) else: # Single index. collection.indexes.add(index_range[0]) if 'holes' in groups: parts = [part.strip() for part in groups['holes'].split(',')] for part in parts: index_range = map(int, part.split('-', 2)) if len(index_range) > 1: # Index range. for index in range(index_range[0], index_range[1] + 1): collection.indexes.remove(index) else: # Single index. collection.indexes.remove(index_range[0]) return collection
[ "def", "parse", "(", "value", ",", "pattern", "=", "'{head}{padding}{tail} [{ranges}]'", ")", ":", "# Construct regular expression for given pattern.", "expressions", "=", "{", "'head'", ":", "'(?P<head>.*)'", ",", "'tail'", ":", "'(?P<tail>.*)'", ",", "'padding'", ":", "'%(?P<padding>\\d*)d'", ",", "'range'", ":", "'(?P<range>\\d+-\\d+)?'", ",", "'ranges'", ":", "'(?P<ranges>[\\d ,\\-]+)?'", ",", "'holes'", ":", "'(?P<holes>[\\d ,\\-]+)'", "}", "pattern_regex", "=", "re", ".", "escape", "(", "pattern", ")", "for", "key", ",", "expression", "in", "expressions", ".", "items", "(", ")", ":", "pattern_regex", "=", "pattern_regex", ".", "replace", "(", "'\\{{{0}\\}}'", ".", "format", "(", "key", ")", ",", "expression", ")", "pattern_regex", "=", "'^{0}$'", ".", "format", "(", "pattern_regex", ")", "# Match pattern against value and use results to construct collection.", "match", "=", "re", ".", "search", "(", "pattern_regex", ",", "value", ")", "if", "match", "is", "None", ":", "raise", "ValueError", "(", "'Value did not match pattern.'", ")", "groups", "=", "match", ".", "groupdict", "(", ")", "if", "'padding'", "in", "groups", "and", "groups", "[", "'padding'", "]", ":", "groups", "[", "'padding'", "]", "=", "int", "(", "groups", "[", "'padding'", "]", ")", "else", ":", "groups", "[", "'padding'", "]", "=", "0", "# Create collection and then add indexes.", "collection", "=", "Collection", "(", "groups", ".", "get", "(", "'head'", ",", "''", ")", ",", "groups", ".", "get", "(", "'tail'", ",", "''", ")", ",", "groups", "[", "'padding'", "]", ")", "if", "groups", ".", "get", "(", "'range'", ",", "None", ")", "is", "not", "None", ":", "start", ",", "end", "=", "map", "(", "int", ",", "groups", "[", "'range'", "]", ".", "split", "(", "'-'", ")", ")", "collection", ".", "indexes", ".", "update", "(", "range", "(", "start", ",", "end", "+", "1", ")", ")", "if", "groups", ".", "get", "(", "'ranges'", ",", "None", ")", "is", "not", "None", ":", "parts", "=", "[", "part", ".", "strip", "(", ")", "for", "part", "in", "groups", "[", "'ranges'", "]", ".", "split", "(", "','", ")", "]", "for", "part", "in", "parts", ":", "index_range", "=", "list", "(", "map", "(", "int", ",", "part", ".", "split", "(", "'-'", ",", "2", ")", ")", ")", "if", "len", "(", "index_range", ")", ">", "1", ":", "# Index range.", "for", "index", "in", "range", "(", "index_range", "[", "0", "]", ",", "index_range", "[", "1", "]", "+", "1", ")", ":", "collection", ".", "indexes", ".", "add", "(", "index", ")", "else", ":", "# Single index.", "collection", ".", "indexes", ".", "add", "(", "index_range", "[", "0", "]", ")", "if", "'holes'", "in", "groups", ":", "parts", "=", "[", "part", ".", "strip", "(", ")", "for", "part", "in", "groups", "[", "'holes'", "]", ".", "split", "(", "','", ")", "]", "for", "part", "in", "parts", ":", "index_range", "=", "map", "(", "int", ",", "part", ".", "split", "(", "'-'", ",", "2", ")", ")", "if", "len", "(", "index_range", ")", ">", "1", ":", "# Index range.", "for", "index", "in", "range", "(", "index_range", "[", "0", "]", ",", "index_range", "[", "1", "]", "+", "1", ")", ":", "collection", ".", "indexes", ".", "remove", "(", "index", ")", "else", ":", "# Single index.", "collection", ".", "indexes", ".", "remove", "(", "index_range", "[", "0", "]", ")", "return", "collection" ]
Parse *value* into a :py:class:`~clique.collection.Collection`. Use *pattern* to extract information from *value*. It may make use of the following keys: * *head* - Common leading part of the collection. * *tail* - Common trailing part of the collection. * *padding* - Padding value in ``%0d`` format. * *range* - Total range in the form ``start-end``. * *ranges* - Comma separated ranges of indexes. * *holes* - Comma separated ranges of missing indexes. .. note:: *holes* only makes sense if *range* or *ranges* is also present.
[ "Parse", "*", "value", "*", "into", "a", ":", "py", ":", "class", ":", "~clique", ".", "collection", ".", "Collection", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/__init__.py#L209-L293
4degrees/clique
source/clique/sorted_set.py
SortedSet.add
def add(self, item): '''Add *item*.''' if not item in self: index = bisect.bisect_right(self._members, item) self._members.insert(index, item)
python
def add(self, item): '''Add *item*.''' if not item in self: index = bisect.bisect_right(self._members, item) self._members.insert(index, item)
[ "def", "add", "(", "self", ",", "item", ")", ":", "if", "not", "item", "in", "self", ":", "index", "=", "bisect", ".", "bisect_right", "(", "self", ".", "_members", ",", "item", ")", "self", ".", "_members", ".", "insert", "(", "index", ",", "item", ")" ]
Add *item*.
[ "Add", "*", "item", "*", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/sorted_set.py#L39-L43
4degrees/clique
source/clique/sorted_set.py
SortedSet.discard
def discard(self, item): '''Remove *item*.''' index = self._index(item) if index >= 0: del self._members[index]
python
def discard(self, item): '''Remove *item*.''' index = self._index(item) if index >= 0: del self._members[index]
[ "def", "discard", "(", "self", ",", "item", ")", ":", "index", "=", "self", ".", "_index", "(", "item", ")", "if", "index", ">=", "0", ":", "del", "self", ".", "_members", "[", "index", "]" ]
Remove *item*.
[ "Remove", "*", "item", "*", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/sorted_set.py#L45-L49
4degrees/clique
source/clique/sorted_set.py
SortedSet._index
def _index(self, item): '''Return index of *item* in member list or -1 if not present.''' index = bisect.bisect_left(self._members, item) if index != len(self) and self._members[index] == item: return index return -1
python
def _index(self, item): '''Return index of *item* in member list or -1 if not present.''' index = bisect.bisect_left(self._members, item) if index != len(self) and self._members[index] == item: return index return -1
[ "def", "_index", "(", "self", ",", "item", ")", ":", "index", "=", "bisect", ".", "bisect_left", "(", "self", ".", "_members", ",", "item", ")", "if", "index", "!=", "len", "(", "self", ")", "and", "self", ".", "_members", "[", "index", "]", "==", "item", ":", "return", "index", "return", "-", "1" ]
Return index of *item* in member list or -1 if not present.
[ "Return", "index", "of", "*", "item", "*", "in", "member", "list", "or", "-", "1", "if", "not", "present", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/sorted_set.py#L56-L62
4degrees/clique
source/clique/collection.py
Collection._update_expression
def _update_expression(self): '''Update internal expression.''' self._expression = re.compile( '^{0}(?P<index>(?P<padding>0*)\d+?){1}$' .format(re.escape(self.head), re.escape(self.tail)) )
python
def _update_expression(self): '''Update internal expression.''' self._expression = re.compile( '^{0}(?P<index>(?P<padding>0*)\d+?){1}$' .format(re.escape(self.head), re.escape(self.tail)) )
[ "def", "_update_expression", "(", "self", ")", ":", "self", ".", "_expression", "=", "re", ".", "compile", "(", "'^{0}(?P<index>(?P<padding>0*)\\d+?){1}$'", ".", "format", "(", "re", ".", "escape", "(", "self", ".", "head", ")", ",", "re", ".", "escape", "(", "self", ".", "tail", ")", ")", ")" ]
Update internal expression.
[ "Update", "internal", "expression", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/collection.py#L77-L82
4degrees/clique
source/clique/collection.py
Collection.match
def match(self, item): '''Return whether *item* matches this collection expression. If a match is successful return data about the match otherwise return None. ''' match = self._expression.match(item) if not match: return None index = match.group('index') padded = False if match.group('padding'): padded = True if self.padding == 0: if padded: return None elif len(index) != self.padding: return None return match
python
def match(self, item): '''Return whether *item* matches this collection expression. If a match is successful return data about the match otherwise return None. ''' match = self._expression.match(item) if not match: return None index = match.group('index') padded = False if match.group('padding'): padded = True if self.padding == 0: if padded: return None elif len(index) != self.padding: return None return match
[ "def", "match", "(", "self", ",", "item", ")", ":", "match", "=", "self", ".", "_expression", ".", "match", "(", "item", ")", "if", "not", "match", ":", "return", "None", "index", "=", "match", ".", "group", "(", "'index'", ")", "padded", "=", "False", "if", "match", ".", "group", "(", "'padding'", ")", ":", "padded", "=", "True", "if", "self", ".", "padding", "==", "0", ":", "if", "padded", ":", "return", "None", "elif", "len", "(", "index", ")", "!=", "self", ".", "padding", ":", "return", "None", "return", "match" ]
Return whether *item* matches this collection expression. If a match is successful return data about the match otherwise return None.
[ "Return", "whether", "*", "item", "*", "matches", "this", "collection", "expression", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/collection.py#L170-L193
4degrees/clique
source/clique/collection.py
Collection.add
def add(self, item): '''Add *item* to collection. raise :py:class:`~clique.error.CollectionError` if *item* cannot be added to the collection. ''' match = self.match(item) if match is None: raise clique.error.CollectionError( 'Item does not match collection expression.' ) self.indexes.add(int(match.group('index')))
python
def add(self, item): '''Add *item* to collection. raise :py:class:`~clique.error.CollectionError` if *item* cannot be added to the collection. ''' match = self.match(item) if match is None: raise clique.error.CollectionError( 'Item does not match collection expression.' ) self.indexes.add(int(match.group('index')))
[ "def", "add", "(", "self", ",", "item", ")", ":", "match", "=", "self", ".", "match", "(", "item", ")", "if", "match", "is", "None", ":", "raise", "clique", ".", "error", ".", "CollectionError", "(", "'Item does not match collection expression.'", ")", "self", ".", "indexes", ".", "add", "(", "int", "(", "match", ".", "group", "(", "'index'", ")", ")", ")" ]
Add *item* to collection. raise :py:class:`~clique.error.CollectionError` if *item* cannot be added to the collection.
[ "Add", "*", "item", "*", "to", "collection", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/collection.py#L195-L208
4degrees/clique
source/clique/collection.py
Collection.remove
def remove(self, item): '''Remove *item* from collection. raise :py:class:`~clique.error.CollectionError` if *item* cannot be removed from the collection. ''' match = self.match(item) if match is None: raise clique.error.CollectionError( 'Item not present in collection.' ) index = int(match.group('index')) try: self.indexes.remove(index) except KeyError: raise clique.error.CollectionError( 'Item not present in collection.' )
python
def remove(self, item): '''Remove *item* from collection. raise :py:class:`~clique.error.CollectionError` if *item* cannot be removed from the collection. ''' match = self.match(item) if match is None: raise clique.error.CollectionError( 'Item not present in collection.' ) index = int(match.group('index')) try: self.indexes.remove(index) except KeyError: raise clique.error.CollectionError( 'Item not present in collection.' )
[ "def", "remove", "(", "self", ",", "item", ")", ":", "match", "=", "self", ".", "match", "(", "item", ")", "if", "match", "is", "None", ":", "raise", "clique", ".", "error", ".", "CollectionError", "(", "'Item not present in collection.'", ")", "index", "=", "int", "(", "match", ".", "group", "(", "'index'", ")", ")", "try", ":", "self", ".", "indexes", ".", "remove", "(", "index", ")", "except", "KeyError", ":", "raise", "clique", ".", "error", ".", "CollectionError", "(", "'Item not present in collection.'", ")" ]
Remove *item* from collection. raise :py:class:`~clique.error.CollectionError` if *item* cannot be removed from the collection.
[ "Remove", "*", "item", "*", "from", "collection", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/collection.py#L210-L229
4degrees/clique
source/clique/collection.py
Collection.format
def format(self, pattern='{head}{padding}{tail} [{ranges}]'): '''Return string representation as specified by *pattern*. Pattern can be any format accepted by Python's standard format function and will receive the following keyword arguments as context: * *head* - Common leading part of the collection. * *tail* - Common trailing part of the collection. * *padding* - Padding value in ``%0d`` format. * *range* - Total range in the form ``start-end`` * *ranges* - Comma separated ranges of indexes. * *holes* - Comma separated ranges of missing indexes. ''' data = {} data['head'] = self.head data['tail'] = self.tail if self.padding: data['padding'] = '%0{0}d'.format(self.padding) else: data['padding'] = '%d' if '{holes}' in pattern: data['holes'] = self.holes().format('{ranges}') if '{range}' in pattern or '{ranges}' in pattern: indexes = list(self.indexes) indexes_count = len(indexes) if indexes_count == 0: data['range'] = '' elif indexes_count == 1: data['range'] = '{0}'.format(indexes[0]) else: data['range'] = '{0}-{1}'.format( indexes[0], indexes[-1] ) if '{ranges}' in pattern: separated = self.separate() if len(separated) > 1: ranges = [collection.format('{range}') for collection in separated] else: ranges = [data['range']] data['ranges'] = ', '.join(ranges) return pattern.format(**data)
python
def format(self, pattern='{head}{padding}{tail} [{ranges}]'): '''Return string representation as specified by *pattern*. Pattern can be any format accepted by Python's standard format function and will receive the following keyword arguments as context: * *head* - Common leading part of the collection. * *tail* - Common trailing part of the collection. * *padding* - Padding value in ``%0d`` format. * *range* - Total range in the form ``start-end`` * *ranges* - Comma separated ranges of indexes. * *holes* - Comma separated ranges of missing indexes. ''' data = {} data['head'] = self.head data['tail'] = self.tail if self.padding: data['padding'] = '%0{0}d'.format(self.padding) else: data['padding'] = '%d' if '{holes}' in pattern: data['holes'] = self.holes().format('{ranges}') if '{range}' in pattern or '{ranges}' in pattern: indexes = list(self.indexes) indexes_count = len(indexes) if indexes_count == 0: data['range'] = '' elif indexes_count == 1: data['range'] = '{0}'.format(indexes[0]) else: data['range'] = '{0}-{1}'.format( indexes[0], indexes[-1] ) if '{ranges}' in pattern: separated = self.separate() if len(separated) > 1: ranges = [collection.format('{range}') for collection in separated] else: ranges = [data['range']] data['ranges'] = ', '.join(ranges) return pattern.format(**data)
[ "def", "format", "(", "self", ",", "pattern", "=", "'{head}{padding}{tail} [{ranges}]'", ")", ":", "data", "=", "{", "}", "data", "[", "'head'", "]", "=", "self", ".", "head", "data", "[", "'tail'", "]", "=", "self", ".", "tail", "if", "self", ".", "padding", ":", "data", "[", "'padding'", "]", "=", "'%0{0}d'", ".", "format", "(", "self", ".", "padding", ")", "else", ":", "data", "[", "'padding'", "]", "=", "'%d'", "if", "'{holes}'", "in", "pattern", ":", "data", "[", "'holes'", "]", "=", "self", ".", "holes", "(", ")", ".", "format", "(", "'{ranges}'", ")", "if", "'{range}'", "in", "pattern", "or", "'{ranges}'", "in", "pattern", ":", "indexes", "=", "list", "(", "self", ".", "indexes", ")", "indexes_count", "=", "len", "(", "indexes", ")", "if", "indexes_count", "==", "0", ":", "data", "[", "'range'", "]", "=", "''", "elif", "indexes_count", "==", "1", ":", "data", "[", "'range'", "]", "=", "'{0}'", ".", "format", "(", "indexes", "[", "0", "]", ")", "else", ":", "data", "[", "'range'", "]", "=", "'{0}-{1}'", ".", "format", "(", "indexes", "[", "0", "]", ",", "indexes", "[", "-", "1", "]", ")", "if", "'{ranges}'", "in", "pattern", ":", "separated", "=", "self", ".", "separate", "(", ")", "if", "len", "(", "separated", ")", ">", "1", ":", "ranges", "=", "[", "collection", ".", "format", "(", "'{range}'", ")", "for", "collection", "in", "separated", "]", "else", ":", "ranges", "=", "[", "data", "[", "'range'", "]", "]", "data", "[", "'ranges'", "]", "=", "', '", ".", "join", "(", "ranges", ")", "return", "pattern", ".", "format", "(", "*", "*", "data", ")" ]
Return string representation as specified by *pattern*. Pattern can be any format accepted by Python's standard format function and will receive the following keyword arguments as context: * *head* - Common leading part of the collection. * *tail* - Common trailing part of the collection. * *padding* - Padding value in ``%0d`` format. * *range* - Total range in the form ``start-end`` * *ranges* - Comma separated ranges of indexes. * *holes* - Comma separated ranges of missing indexes.
[ "Return", "string", "representation", "as", "specified", "by", "*", "pattern", "*", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/collection.py#L231-L283
4degrees/clique
source/clique/collection.py
Collection.is_contiguous
def is_contiguous(self): '''Return whether entire collection is contiguous.''' previous = None for index in self.indexes: if previous is None: previous = index continue if index != (previous + 1): return False previous = index return True
python
def is_contiguous(self): '''Return whether entire collection is contiguous.''' previous = None for index in self.indexes: if previous is None: previous = index continue if index != (previous + 1): return False previous = index return True
[ "def", "is_contiguous", "(", "self", ")", ":", "previous", "=", "None", "for", "index", "in", "self", ".", "indexes", ":", "if", "previous", "is", "None", ":", "previous", "=", "index", "continue", "if", "index", "!=", "(", "previous", "+", "1", ")", ":", "return", "False", "previous", "=", "index", "return", "True" ]
Return whether entire collection is contiguous.
[ "Return", "whether", "entire", "collection", "is", "contiguous", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/collection.py#L285-L298
4degrees/clique
source/clique/collection.py
Collection.holes
def holes(self): '''Return holes in collection. Return :py:class:`~clique.collection.Collection` of missing indexes. ''' missing = set([]) previous = None for index in self.indexes: if previous is None: previous = index continue if index != (previous + 1): missing.update(range(previous + 1, index)) previous = index return Collection(self.head, self.tail, self.padding, indexes=missing)
python
def holes(self): '''Return holes in collection. Return :py:class:`~clique.collection.Collection` of missing indexes. ''' missing = set([]) previous = None for index in self.indexes: if previous is None: previous = index continue if index != (previous + 1): missing.update(range(previous + 1, index)) previous = index return Collection(self.head, self.tail, self.padding, indexes=missing)
[ "def", "holes", "(", "self", ")", ":", "missing", "=", "set", "(", "[", "]", ")", "previous", "=", "None", "for", "index", "in", "self", ".", "indexes", ":", "if", "previous", "is", "None", ":", "previous", "=", "index", "continue", "if", "index", "!=", "(", "previous", "+", "1", ")", ":", "missing", ".", "update", "(", "range", "(", "previous", "+", "1", ",", "index", ")", ")", "previous", "=", "index", "return", "Collection", "(", "self", ".", "head", ",", "self", ".", "tail", ",", "self", ".", "padding", ",", "indexes", "=", "missing", ")" ]
Return holes in collection. Return :py:class:`~clique.collection.Collection` of missing indexes.
[ "Return", "holes", "in", "collection", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/collection.py#L300-L318
4degrees/clique
source/clique/collection.py
Collection.is_compatible
def is_compatible(self, collection): '''Return whether *collection* is compatible with this collection. To be compatible *collection* must have the same head, tail and padding properties as this collection. ''' return all([ isinstance(collection, Collection), collection.head == self.head, collection.tail == self.tail, collection.padding == self.padding ])
python
def is_compatible(self, collection): '''Return whether *collection* is compatible with this collection. To be compatible *collection* must have the same head, tail and padding properties as this collection. ''' return all([ isinstance(collection, Collection), collection.head == self.head, collection.tail == self.tail, collection.padding == self.padding ])
[ "def", "is_compatible", "(", "self", ",", "collection", ")", ":", "return", "all", "(", "[", "isinstance", "(", "collection", ",", "Collection", ")", ",", "collection", ".", "head", "==", "self", ".", "head", ",", "collection", ".", "tail", "==", "self", ".", "tail", ",", "collection", ".", "padding", "==", "self", ".", "padding", "]", ")" ]
Return whether *collection* is compatible with this collection. To be compatible *collection* must have the same head, tail and padding properties as this collection.
[ "Return", "whether", "*", "collection", "*", "is", "compatible", "with", "this", "collection", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/collection.py#L320-L332
4degrees/clique
source/clique/collection.py
Collection.merge
def merge(self, collection): '''Merge *collection* into this collection. If the *collection* is compatible with this collection then update indexes with all indexes in *collection*. raise :py:class:`~clique.error.CollectionError` if *collection* is not compatible with this collection. ''' if not self.is_compatible(collection): raise clique.error.CollectionError('Collection is not compatible ' 'with this collection.') self.indexes.update(collection.indexes)
python
def merge(self, collection): '''Merge *collection* into this collection. If the *collection* is compatible with this collection then update indexes with all indexes in *collection*. raise :py:class:`~clique.error.CollectionError` if *collection* is not compatible with this collection. ''' if not self.is_compatible(collection): raise clique.error.CollectionError('Collection is not compatible ' 'with this collection.') self.indexes.update(collection.indexes)
[ "def", "merge", "(", "self", ",", "collection", ")", ":", "if", "not", "self", ".", "is_compatible", "(", "collection", ")", ":", "raise", "clique", ".", "error", ".", "CollectionError", "(", "'Collection is not compatible '", "'with this collection.'", ")", "self", ".", "indexes", ".", "update", "(", "collection", ".", "indexes", ")" ]
Merge *collection* into this collection. If the *collection* is compatible with this collection then update indexes with all indexes in *collection*. raise :py:class:`~clique.error.CollectionError` if *collection* is not compatible with this collection.
[ "Merge", "*", "collection", "*", "into", "this", "collection", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/collection.py#L334-L348
4degrees/clique
source/clique/collection.py
Collection.separate
def separate(self): '''Return contiguous parts of collection as separate collections. Return as list of :py:class:`~clique.collection.Collection` instances. ''' collections = [] start = None end = None for index in self.indexes: if start is None: start = index end = start continue if index != (end + 1): collections.append( Collection(self.head, self.tail, self.padding, indexes=set(range(start, end + 1))) ) start = index end = index if start is None: collections.append( Collection(self.head, self.tail, self.padding) ) else: collections.append( Collection(self.head, self.tail, self.padding, indexes=range(start, end + 1)) ) return collections
python
def separate(self): '''Return contiguous parts of collection as separate collections. Return as list of :py:class:`~clique.collection.Collection` instances. ''' collections = [] start = None end = None for index in self.indexes: if start is None: start = index end = start continue if index != (end + 1): collections.append( Collection(self.head, self.tail, self.padding, indexes=set(range(start, end + 1))) ) start = index end = index if start is None: collections.append( Collection(self.head, self.tail, self.padding) ) else: collections.append( Collection(self.head, self.tail, self.padding, indexes=range(start, end + 1)) ) return collections
[ "def", "separate", "(", "self", ")", ":", "collections", "=", "[", "]", "start", "=", "None", "end", "=", "None", "for", "index", "in", "self", ".", "indexes", ":", "if", "start", "is", "None", ":", "start", "=", "index", "end", "=", "start", "continue", "if", "index", "!=", "(", "end", "+", "1", ")", ":", "collections", ".", "append", "(", "Collection", "(", "self", ".", "head", ",", "self", ".", "tail", ",", "self", ".", "padding", ",", "indexes", "=", "set", "(", "range", "(", "start", ",", "end", "+", "1", ")", ")", ")", ")", "start", "=", "index", "end", "=", "index", "if", "start", "is", "None", ":", "collections", ".", "append", "(", "Collection", "(", "self", ".", "head", ",", "self", ".", "tail", ",", "self", ".", "padding", ")", ")", "else", ":", "collections", ".", "append", "(", "Collection", "(", "self", ".", "head", ",", "self", ".", "tail", ",", "self", ".", "padding", ",", "indexes", "=", "range", "(", "start", ",", "end", "+", "1", ")", ")", ")", "return", "collections" ]
Return contiguous parts of collection as separate collections. Return as list of :py:class:`~clique.collection.Collection` instances.
[ "Return", "contiguous", "parts", "of", "collection", "as", "separate", "collections", "." ]
train
https://github.com/4degrees/clique/blob/af1d4fef1d60c30a870257199a4d98597d15417d/source/clique/collection.py#L350-L385
UDST/osmnet
osmnet/config.py
format_check
def format_check(settings): """ Check the format of a osmnet_config object. Parameters ---------- settings : dict osmnet_config as a dictionary Returns ------- Nothing """ valid_keys = ['logs_folder', 'log_file', 'log_console', 'log_name', 'log_filename', 'keep_osm_tags'] for key in list(settings.keys()): assert key in valid_keys, \ ('{} not found in list of valid configuation keys').format(key) assert isinstance(key, str), ('{} must be a string').format(key) if key == 'keep_osm_tags': assert isinstance(settings[key], list), \ ('{} must be a list').format(key) for value in settings[key]: assert all(isinstance(element, str) for element in value), \ 'all elements must be a string' if key == 'log_file' or key == 'log_console': assert isinstance(settings[key], bool), \ ('{} must be boolean').format(key)
python
def format_check(settings): """ Check the format of a osmnet_config object. Parameters ---------- settings : dict osmnet_config as a dictionary Returns ------- Nothing """ valid_keys = ['logs_folder', 'log_file', 'log_console', 'log_name', 'log_filename', 'keep_osm_tags'] for key in list(settings.keys()): assert key in valid_keys, \ ('{} not found in list of valid configuation keys').format(key) assert isinstance(key, str), ('{} must be a string').format(key) if key == 'keep_osm_tags': assert isinstance(settings[key], list), \ ('{} must be a list').format(key) for value in settings[key]: assert all(isinstance(element, str) for element in value), \ 'all elements must be a string' if key == 'log_file' or key == 'log_console': assert isinstance(settings[key], bool), \ ('{} must be boolean').format(key)
[ "def", "format_check", "(", "settings", ")", ":", "valid_keys", "=", "[", "'logs_folder'", ",", "'log_file'", ",", "'log_console'", ",", "'log_name'", ",", "'log_filename'", ",", "'keep_osm_tags'", "]", "for", "key", "in", "list", "(", "settings", ".", "keys", "(", ")", ")", ":", "assert", "key", "in", "valid_keys", ",", "(", "'{} not found in list of valid configuation keys'", ")", ".", "format", "(", "key", ")", "assert", "isinstance", "(", "key", ",", "str", ")", ",", "(", "'{} must be a string'", ")", ".", "format", "(", "key", ")", "if", "key", "==", "'keep_osm_tags'", ":", "assert", "isinstance", "(", "settings", "[", "key", "]", ",", "list", ")", ",", "(", "'{} must be a list'", ")", ".", "format", "(", "key", ")", "for", "value", "in", "settings", "[", "key", "]", ":", "assert", "all", "(", "isinstance", "(", "element", ",", "str", ")", "for", "element", "in", "value", ")", ",", "'all elements must be a string'", "if", "key", "==", "'log_file'", "or", "key", "==", "'log_console'", ":", "assert", "isinstance", "(", "settings", "[", "key", "]", ",", "bool", ")", ",", "(", "'{} must be boolean'", ")", ".", "format", "(", "key", ")" ]
Check the format of a osmnet_config object. Parameters ---------- settings : dict osmnet_config as a dictionary Returns ------- Nothing
[ "Check", "the", "format", "of", "a", "osmnet_config", "object", "." ]
train
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/config.py#L2-L30
UDST/osmnet
osmnet/config.py
osmnet_config.to_dict
def to_dict(self): """ Return a dict representation of an osmnet osmnet_config instance. """ return {'logs_folder': self.logs_folder, 'log_file': self.log_file, 'log_console': self.log_console, 'log_name': self.log_name, 'log_filename': self.log_filename, 'keep_osm_tags': self.keep_osm_tags }
python
def to_dict(self): """ Return a dict representation of an osmnet osmnet_config instance. """ return {'logs_folder': self.logs_folder, 'log_file': self.log_file, 'log_console': self.log_console, 'log_name': self.log_name, 'log_filename': self.log_filename, 'keep_osm_tags': self.keep_osm_tags }
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "'logs_folder'", ":", "self", ".", "logs_folder", ",", "'log_file'", ":", "self", ".", "log_file", ",", "'log_console'", ":", "self", ".", "log_console", ",", "'log_name'", ":", "self", ".", "log_name", ",", "'log_filename'", ":", "self", ".", "log_filename", ",", "'keep_osm_tags'", ":", "self", ".", "keep_osm_tags", "}" ]
Return a dict representation of an osmnet osmnet_config instance.
[ "Return", "a", "dict", "representation", "of", "an", "osmnet", "osmnet_config", "instance", "." ]
train
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/config.py#L73-L83
UDST/osmnet
osmnet/utils.py
great_circle_dist
def great_circle_dist(lat1, lon1, lat2, lon2): """ Get the distance (in meters) between two lat/lon points via the Haversine formula. Parameters ---------- lat1, lon1, lat2, lon2 : float Latitude and longitude in degrees. Returns ------- dist : float Distance in meters. """ radius = 6372795 # meters lat1 = math.radians(lat1) lon1 = math.radians(lon1) lat2 = math.radians(lat2) lon2 = math.radians(lon2) dlat = lat2 - lat1 dlon = lon2 - lon1 # formula from: # http://en.wikipedia.org/wiki/Haversine_formula#The_haversine_formula a = math.pow(math.sin(dlat / 2), 2) b = math.cos(lat1) * math.cos(lat2) * math.pow(math.sin(dlon / 2), 2) d = 2 * radius * math.asin(math.sqrt(a + b)) return d
python
def great_circle_dist(lat1, lon1, lat2, lon2): """ Get the distance (in meters) between two lat/lon points via the Haversine formula. Parameters ---------- lat1, lon1, lat2, lon2 : float Latitude and longitude in degrees. Returns ------- dist : float Distance in meters. """ radius = 6372795 # meters lat1 = math.radians(lat1) lon1 = math.radians(lon1) lat2 = math.radians(lat2) lon2 = math.radians(lon2) dlat = lat2 - lat1 dlon = lon2 - lon1 # formula from: # http://en.wikipedia.org/wiki/Haversine_formula#The_haversine_formula a = math.pow(math.sin(dlat / 2), 2) b = math.cos(lat1) * math.cos(lat2) * math.pow(math.sin(dlon / 2), 2) d = 2 * radius * math.asin(math.sqrt(a + b)) return d
[ "def", "great_circle_dist", "(", "lat1", ",", "lon1", ",", "lat2", ",", "lon2", ")", ":", "radius", "=", "6372795", "# meters", "lat1", "=", "math", ".", "radians", "(", "lat1", ")", "lon1", "=", "math", ".", "radians", "(", "lon1", ")", "lat2", "=", "math", ".", "radians", "(", "lat2", ")", "lon2", "=", "math", ".", "radians", "(", "lon2", ")", "dlat", "=", "lat2", "-", "lat1", "dlon", "=", "lon2", "-", "lon1", "# formula from:", "# http://en.wikipedia.org/wiki/Haversine_formula#The_haversine_formula", "a", "=", "math", ".", "pow", "(", "math", ".", "sin", "(", "dlat", "/", "2", ")", ",", "2", ")", "b", "=", "math", ".", "cos", "(", "lat1", ")", "*", "math", ".", "cos", "(", "lat2", ")", "*", "math", ".", "pow", "(", "math", ".", "sin", "(", "dlon", "/", "2", ")", ",", "2", ")", "d", "=", "2", "*", "radius", "*", "math", ".", "asin", "(", "math", ".", "sqrt", "(", "a", "+", "b", ")", ")", "return", "d" ]
Get the distance (in meters) between two lat/lon points via the Haversine formula. Parameters ---------- lat1, lon1, lat2, lon2 : float Latitude and longitude in degrees. Returns ------- dist : float Distance in meters.
[ "Get", "the", "distance", "(", "in", "meters", ")", "between", "two", "lat", "/", "lon", "points", "via", "the", "Haversine", "formula", "." ]
train
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/utils.py#L17-L49
UDST/osmnet
osmnet/load.py
osm_filter
def osm_filter(network_type): """ Create a filter to query Overpass API for the specified OSM network type. Parameters ---------- network_type : string, {'walk', 'drive'} denoting the type of street network to extract Returns ------- osm_filter : string """ filters = {} # drive: select only roads that are drivable by normal 2 wheel drive # passenger vehicles both private and public # roads. Filter out un-drivable roads and service roads tagged as parking, # driveway, or emergency-access filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps' '|track|proposed|construction|bridleway|abandoned' '|platform|raceway|service"]' '["motor_vehicle"!~"no"]["motorcar"!~"no"]' '["service"!~"parking|parking_aisle|driveway' '|emergency_access"]') # walk: select only roads and pathways that allow pedestrian access both # private and public pathways and roads. # Filter out limited access roadways and allow service roads filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned' '|platform|raceway"]["foot"!~"no"]' '["pedestrians"!~"no"]') if network_type in filters: osm_filter = filters[network_type] else: raise ValueError('unknown network_type "{}"'.format(network_type)) return osm_filter
python
def osm_filter(network_type): """ Create a filter to query Overpass API for the specified OSM network type. Parameters ---------- network_type : string, {'walk', 'drive'} denoting the type of street network to extract Returns ------- osm_filter : string """ filters = {} # drive: select only roads that are drivable by normal 2 wheel drive # passenger vehicles both private and public # roads. Filter out un-drivable roads and service roads tagged as parking, # driveway, or emergency-access filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps' '|track|proposed|construction|bridleway|abandoned' '|platform|raceway|service"]' '["motor_vehicle"!~"no"]["motorcar"!~"no"]' '["service"!~"parking|parking_aisle|driveway' '|emergency_access"]') # walk: select only roads and pathways that allow pedestrian access both # private and public pathways and roads. # Filter out limited access roadways and allow service roads filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned' '|platform|raceway"]["foot"!~"no"]' '["pedestrians"!~"no"]') if network_type in filters: osm_filter = filters[network_type] else: raise ValueError('unknown network_type "{}"'.format(network_type)) return osm_filter
[ "def", "osm_filter", "(", "network_type", ")", ":", "filters", "=", "{", "}", "# drive: select only roads that are drivable by normal 2 wheel drive", "# passenger vehicles both private and public", "# roads. Filter out un-drivable roads and service roads tagged as parking,", "# driveway, or emergency-access", "filters", "[", "'drive'", "]", "=", "(", "'[\"highway\"!~\"cycleway|footway|path|pedestrian|steps'", "'|track|proposed|construction|bridleway|abandoned'", "'|platform|raceway|service\"]'", "'[\"motor_vehicle\"!~\"no\"][\"motorcar\"!~\"no\"]'", "'[\"service\"!~\"parking|parking_aisle|driveway'", "'|emergency_access\"]'", ")", "# walk: select only roads and pathways that allow pedestrian access both", "# private and public pathways and roads.", "# Filter out limited access roadways and allow service roads", "filters", "[", "'walk'", "]", "=", "(", "'[\"highway\"!~\"motor|proposed|construction|abandoned'", "'|platform|raceway\"][\"foot\"!~\"no\"]'", "'[\"pedestrians\"!~\"no\"]'", ")", "if", "network_type", "in", "filters", ":", "osm_filter", "=", "filters", "[", "network_type", "]", "else", ":", "raise", "ValueError", "(", "'unknown network_type \"{}\"'", ".", "format", "(", "network_type", ")", ")", "return", "osm_filter" ]
Create a filter to query Overpass API for the specified OSM network type. Parameters ---------- network_type : string, {'walk', 'drive'} denoting the type of street network to extract Returns ------- osm_filter : string
[ "Create", "a", "filter", "to", "query", "Overpass", "API", "for", "the", "specified", "OSM", "network", "type", "." ]
train
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L29-L67
UDST/osmnet
osmnet/load.py
osm_net_download
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None, network_type='walk', timeout=180, memory=None, max_query_area_size=50*1000*50*1000, custom_osm_filter=None): """ Download OSM ways and nodes within a bounding box from the Overpass API. Parameters ---------- lat_min : float southern latitude of bounding box lng_min : float eastern longitude of bounding box lat_max : float northern latitude of bounding box lng_max : float western longitude of bounding box network_type : string Specify the network type where value of 'walk' includes roadways where pedestrians are allowed and pedestrian pathways and 'drive' includes driveable roadways. timeout : int the timeout interval for requests and to pass to Overpass API memory : int server memory allocation size for the query, in bytes. If none, server will use its default allocation size max_query_area_size : float max area for any part of the geometry, in the units the geometry is in: any polygon bigger will get divided up for multiple queries to Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are meters)) custom_osm_filter : string, optional specify custom arguments for the way["highway"] query to OSM. Must follow Overpass API schema. For example to request highway ways that are service roads use: '["highway"="service"]' Returns ------- response_json : dict """ # create a filter to exclude certain kinds of ways based on the requested # network_type if custom_osm_filter is None: request_filter = osm_filter(network_type) else: request_filter = custom_osm_filter response_jsons_list = [] response_jsons = [] # server memory allocation in bytes formatted for Overpass API query if memory is None: maxsize = '' else: maxsize = '[maxsize:{}]'.format(memory) # define the Overpass API query # way["highway"] denotes ways with highway keys and {filters} returns # ways with the requested key/value. the '>' makes it recurse so we get # ways and way nodes. maxsize is in bytes. # turn bbox into a polygon and project to local UTM polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min), (lng_min, lat_max), (lng_max, lat_max)]) geometry_proj, crs_proj = project_geometry(polygon, crs={'init': 'epsg:4326'}) # subdivide the bbox area poly if it exceeds the max area size # (in meters), then project back to WGS84 geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry( geometry_proj, max_query_area_size=max_query_area_size) geometry, crs = project_geometry(geometry_proj_consolidated_subdivided, crs=crs_proj, to_latlong=True) log('Requesting network data within bounding box from Overpass API ' 'in {:,} request(s)'.format(len(geometry))) start_time = time.time() # loop through each polygon in the geometry for poly in geometry: # represent bbox as lng_max, lat_min, lng_min, lat_max and round # lat-longs to 8 decimal places to create # consistent URL strings lng_max, lat_min, lng_min, lat_max = poly.bounds query_template = '[out:json][timeout:{timeout}]{maxsize};' \ '(way["highway"]' \ '{filters}({lat_min:.8f},{lng_max:.8f},' \ '{lat_max:.8f},{lng_min:.8f});>;);out;' query_str = query_template.format(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min, lng_max=lng_max, filters=request_filter, timeout=timeout, maxsize=maxsize) response_json = overpass_request(data={'data': query_str}, timeout=timeout) response_jsons_list.append(response_json) log('Downloaded OSM network data within bounding box from Overpass ' 'API in {:,} request(s) and' ' {:,.2f} seconds'.format(len(geometry), time.time()-start_time)) # stitch together individual json results for json in response_jsons_list: try: response_jsons.extend(json['elements']) except KeyError: pass # remove duplicate records resulting from the json stitching start_time = time.time() record_count = len(response_jsons) if record_count == 0: raise Exception('Query resulted in no data. Check your query ' 'parameters: {}'.format(query_str)) else: response_jsons_df = pd.DataFrame.from_records(response_jsons, index='id') nodes = response_jsons_df[response_jsons_df['type'] == 'node'] nodes = nodes[~nodes.index.duplicated(keep='first')] ways = response_jsons_df[response_jsons_df['type'] == 'way'] ways = ways[~ways.index.duplicated(keep='first')] response_jsons_df = pd.concat([nodes, ways], axis=0) response_jsons_df.reset_index(inplace=True) response_jsons = response_jsons_df.to_dict(orient='records') if record_count - len(response_jsons) > 0: log('{:,} duplicate records removed. Took {:,.2f} seconds'.format( record_count - len(response_jsons), time.time() - start_time)) return {'elements': response_jsons}
python
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None, network_type='walk', timeout=180, memory=None, max_query_area_size=50*1000*50*1000, custom_osm_filter=None): """ Download OSM ways and nodes within a bounding box from the Overpass API. Parameters ---------- lat_min : float southern latitude of bounding box lng_min : float eastern longitude of bounding box lat_max : float northern latitude of bounding box lng_max : float western longitude of bounding box network_type : string Specify the network type where value of 'walk' includes roadways where pedestrians are allowed and pedestrian pathways and 'drive' includes driveable roadways. timeout : int the timeout interval for requests and to pass to Overpass API memory : int server memory allocation size for the query, in bytes. If none, server will use its default allocation size max_query_area_size : float max area for any part of the geometry, in the units the geometry is in: any polygon bigger will get divided up for multiple queries to Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are meters)) custom_osm_filter : string, optional specify custom arguments for the way["highway"] query to OSM. Must follow Overpass API schema. For example to request highway ways that are service roads use: '["highway"="service"]' Returns ------- response_json : dict """ # create a filter to exclude certain kinds of ways based on the requested # network_type if custom_osm_filter is None: request_filter = osm_filter(network_type) else: request_filter = custom_osm_filter response_jsons_list = [] response_jsons = [] # server memory allocation in bytes formatted for Overpass API query if memory is None: maxsize = '' else: maxsize = '[maxsize:{}]'.format(memory) # define the Overpass API query # way["highway"] denotes ways with highway keys and {filters} returns # ways with the requested key/value. the '>' makes it recurse so we get # ways and way nodes. maxsize is in bytes. # turn bbox into a polygon and project to local UTM polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min), (lng_min, lat_max), (lng_max, lat_max)]) geometry_proj, crs_proj = project_geometry(polygon, crs={'init': 'epsg:4326'}) # subdivide the bbox area poly if it exceeds the max area size # (in meters), then project back to WGS84 geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry( geometry_proj, max_query_area_size=max_query_area_size) geometry, crs = project_geometry(geometry_proj_consolidated_subdivided, crs=crs_proj, to_latlong=True) log('Requesting network data within bounding box from Overpass API ' 'in {:,} request(s)'.format(len(geometry))) start_time = time.time() # loop through each polygon in the geometry for poly in geometry: # represent bbox as lng_max, lat_min, lng_min, lat_max and round # lat-longs to 8 decimal places to create # consistent URL strings lng_max, lat_min, lng_min, lat_max = poly.bounds query_template = '[out:json][timeout:{timeout}]{maxsize};' \ '(way["highway"]' \ '{filters}({lat_min:.8f},{lng_max:.8f},' \ '{lat_max:.8f},{lng_min:.8f});>;);out;' query_str = query_template.format(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min, lng_max=lng_max, filters=request_filter, timeout=timeout, maxsize=maxsize) response_json = overpass_request(data={'data': query_str}, timeout=timeout) response_jsons_list.append(response_json) log('Downloaded OSM network data within bounding box from Overpass ' 'API in {:,} request(s) and' ' {:,.2f} seconds'.format(len(geometry), time.time()-start_time)) # stitch together individual json results for json in response_jsons_list: try: response_jsons.extend(json['elements']) except KeyError: pass # remove duplicate records resulting from the json stitching start_time = time.time() record_count = len(response_jsons) if record_count == 0: raise Exception('Query resulted in no data. Check your query ' 'parameters: {}'.format(query_str)) else: response_jsons_df = pd.DataFrame.from_records(response_jsons, index='id') nodes = response_jsons_df[response_jsons_df['type'] == 'node'] nodes = nodes[~nodes.index.duplicated(keep='first')] ways = response_jsons_df[response_jsons_df['type'] == 'way'] ways = ways[~ways.index.duplicated(keep='first')] response_jsons_df = pd.concat([nodes, ways], axis=0) response_jsons_df.reset_index(inplace=True) response_jsons = response_jsons_df.to_dict(orient='records') if record_count - len(response_jsons) > 0: log('{:,} duplicate records removed. Took {:,.2f} seconds'.format( record_count - len(response_jsons), time.time() - start_time)) return {'elements': response_jsons}
[ "def", "osm_net_download", "(", "lat_min", "=", "None", ",", "lng_min", "=", "None", ",", "lat_max", "=", "None", ",", "lng_max", "=", "None", ",", "network_type", "=", "'walk'", ",", "timeout", "=", "180", ",", "memory", "=", "None", ",", "max_query_area_size", "=", "50", "*", "1000", "*", "50", "*", "1000", ",", "custom_osm_filter", "=", "None", ")", ":", "# create a filter to exclude certain kinds of ways based on the requested", "# network_type", "if", "custom_osm_filter", "is", "None", ":", "request_filter", "=", "osm_filter", "(", "network_type", ")", "else", ":", "request_filter", "=", "custom_osm_filter", "response_jsons_list", "=", "[", "]", "response_jsons", "=", "[", "]", "# server memory allocation in bytes formatted for Overpass API query", "if", "memory", "is", "None", ":", "maxsize", "=", "''", "else", ":", "maxsize", "=", "'[maxsize:{}]'", ".", "format", "(", "memory", ")", "# define the Overpass API query", "# way[\"highway\"] denotes ways with highway keys and {filters} returns", "# ways with the requested key/value. the '>' makes it recurse so we get", "# ways and way nodes. maxsize is in bytes.", "# turn bbox into a polygon and project to local UTM", "polygon", "=", "Polygon", "(", "[", "(", "lng_max", ",", "lat_min", ")", ",", "(", "lng_min", ",", "lat_min", ")", ",", "(", "lng_min", ",", "lat_max", ")", ",", "(", "lng_max", ",", "lat_max", ")", "]", ")", "geometry_proj", ",", "crs_proj", "=", "project_geometry", "(", "polygon", ",", "crs", "=", "{", "'init'", ":", "'epsg:4326'", "}", ")", "# subdivide the bbox area poly if it exceeds the max area size", "# (in meters), then project back to WGS84", "geometry_proj_consolidated_subdivided", "=", "consolidate_subdivide_geometry", "(", "geometry_proj", ",", "max_query_area_size", "=", "max_query_area_size", ")", "geometry", ",", "crs", "=", "project_geometry", "(", "geometry_proj_consolidated_subdivided", ",", "crs", "=", "crs_proj", ",", "to_latlong", "=", "True", ")", "log", "(", "'Requesting network data within bounding box from Overpass API '", "'in {:,} request(s)'", ".", "format", "(", "len", "(", "geometry", ")", ")", ")", "start_time", "=", "time", ".", "time", "(", ")", "# loop through each polygon in the geometry", "for", "poly", "in", "geometry", ":", "# represent bbox as lng_max, lat_min, lng_min, lat_max and round", "# lat-longs to 8 decimal places to create", "# consistent URL strings", "lng_max", ",", "lat_min", ",", "lng_min", ",", "lat_max", "=", "poly", ".", "bounds", "query_template", "=", "'[out:json][timeout:{timeout}]{maxsize};'", "'(way[\"highway\"]'", "'{filters}({lat_min:.8f},{lng_max:.8f},'", "'{lat_max:.8f},{lng_min:.8f});>;);out;'", "query_str", "=", "query_template", ".", "format", "(", "lat_max", "=", "lat_max", ",", "lat_min", "=", "lat_min", ",", "lng_min", "=", "lng_min", ",", "lng_max", "=", "lng_max", ",", "filters", "=", "request_filter", ",", "timeout", "=", "timeout", ",", "maxsize", "=", "maxsize", ")", "response_json", "=", "overpass_request", "(", "data", "=", "{", "'data'", ":", "query_str", "}", ",", "timeout", "=", "timeout", ")", "response_jsons_list", ".", "append", "(", "response_json", ")", "log", "(", "'Downloaded OSM network data within bounding box from Overpass '", "'API in {:,} request(s) and'", "' {:,.2f} seconds'", ".", "format", "(", "len", "(", "geometry", ")", ",", "time", ".", "time", "(", ")", "-", "start_time", ")", ")", "# stitch together individual json results", "for", "json", "in", "response_jsons_list", ":", "try", ":", "response_jsons", ".", "extend", "(", "json", "[", "'elements'", "]", ")", "except", "KeyError", ":", "pass", "# remove duplicate records resulting from the json stitching", "start_time", "=", "time", ".", "time", "(", ")", "record_count", "=", "len", "(", "response_jsons", ")", "if", "record_count", "==", "0", ":", "raise", "Exception", "(", "'Query resulted in no data. Check your query '", "'parameters: {}'", ".", "format", "(", "query_str", ")", ")", "else", ":", "response_jsons_df", "=", "pd", ".", "DataFrame", ".", "from_records", "(", "response_jsons", ",", "index", "=", "'id'", ")", "nodes", "=", "response_jsons_df", "[", "response_jsons_df", "[", "'type'", "]", "==", "'node'", "]", "nodes", "=", "nodes", "[", "~", "nodes", ".", "index", ".", "duplicated", "(", "keep", "=", "'first'", ")", "]", "ways", "=", "response_jsons_df", "[", "response_jsons_df", "[", "'type'", "]", "==", "'way'", "]", "ways", "=", "ways", "[", "~", "ways", ".", "index", ".", "duplicated", "(", "keep", "=", "'first'", ")", "]", "response_jsons_df", "=", "pd", ".", "concat", "(", "[", "nodes", ",", "ways", "]", ",", "axis", "=", "0", ")", "response_jsons_df", ".", "reset_index", "(", "inplace", "=", "True", ")", "response_jsons", "=", "response_jsons_df", ".", "to_dict", "(", "orient", "=", "'records'", ")", "if", "record_count", "-", "len", "(", "response_jsons", ")", ">", "0", ":", "log", "(", "'{:,} duplicate records removed. Took {:,.2f} seconds'", ".", "format", "(", "record_count", "-", "len", "(", "response_jsons", ")", ",", "time", ".", "time", "(", ")", "-", "start_time", ")", ")", "return", "{", "'elements'", ":", "response_jsons", "}" ]
Download OSM ways and nodes within a bounding box from the Overpass API. Parameters ---------- lat_min : float southern latitude of bounding box lng_min : float eastern longitude of bounding box lat_max : float northern latitude of bounding box lng_max : float western longitude of bounding box network_type : string Specify the network type where value of 'walk' includes roadways where pedestrians are allowed and pedestrian pathways and 'drive' includes driveable roadways. timeout : int the timeout interval for requests and to pass to Overpass API memory : int server memory allocation size for the query, in bytes. If none, server will use its default allocation size max_query_area_size : float max area for any part of the geometry, in the units the geometry is in: any polygon bigger will get divided up for multiple queries to Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are meters)) custom_osm_filter : string, optional specify custom arguments for the way["highway"] query to OSM. Must follow Overpass API schema. For example to request highway ways that are service roads use: '["highway"="service"]' Returns ------- response_json : dict
[ "Download", "OSM", "ways", "and", "nodes", "within", "a", "bounding", "box", "from", "the", "Overpass", "API", "." ]
train
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L70-L200
UDST/osmnet
osmnet/load.py
overpass_request
def overpass_request(data, pause_duration=None, timeout=180, error_pause_duration=None): """ Send a request to the Overpass API via HTTP POST and return the JSON response Parameters ---------- data : dict or OrderedDict key-value pairs of parameters to post to Overpass API pause_duration : int how long to pause in seconds before requests, if None, will query Overpass API status endpoint to find when next slot is available timeout : int the timeout interval for the requests library error_pause_duration : int how long to pause in seconds before re-trying requests if error Returns ------- response_json : dict """ # define the Overpass API URL, then construct a GET-style URL url = 'http://www.overpass-api.de/api/interpreter' start_time = time.time() log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data)) response = requests.post(url, data=data, timeout=timeout) # get the response size and the domain, log result size_kb = len(response.content) / 1000. domain = re.findall(r'//(?s)(.*?)/', url)[0] log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds' .format(size_kb, domain, time.time()-start_time)) try: response_json = response.json() if 'remark' in response_json: log('Server remark: "{}"'.format(response_json['remark'], level=lg.WARNING)) except Exception: # 429 = 'too many requests' and 504 = 'gateway timeout' from server # overload. handle these errors by recursively # calling overpass_request until a valid response is achieved if response.status_code in [429, 504]: # pause for error_pause_duration seconds before re-trying request if error_pause_duration is None: error_pause_duration = get_pause_duration() log('Server at {} returned status code {} and no JSON data. ' 'Re-trying request in {:.2f} seconds.' .format(domain, response.status_code, error_pause_duration), level=lg.WARNING) time.sleep(error_pause_duration) response_json = overpass_request(data=data, pause_duration=pause_duration, timeout=timeout) # else, this was an unhandled status_code, throw an exception else: log('Server at {} returned status code {} and no JSON data' .format(domain, response.status_code), level=lg.ERROR) raise Exception('Server returned no JSON data.\n{} {}\n{}' .format(response, response.reason, response.text)) return response_json
python
def overpass_request(data, pause_duration=None, timeout=180, error_pause_duration=None): """ Send a request to the Overpass API via HTTP POST and return the JSON response Parameters ---------- data : dict or OrderedDict key-value pairs of parameters to post to Overpass API pause_duration : int how long to pause in seconds before requests, if None, will query Overpass API status endpoint to find when next slot is available timeout : int the timeout interval for the requests library error_pause_duration : int how long to pause in seconds before re-trying requests if error Returns ------- response_json : dict """ # define the Overpass API URL, then construct a GET-style URL url = 'http://www.overpass-api.de/api/interpreter' start_time = time.time() log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data)) response = requests.post(url, data=data, timeout=timeout) # get the response size and the domain, log result size_kb = len(response.content) / 1000. domain = re.findall(r'//(?s)(.*?)/', url)[0] log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds' .format(size_kb, domain, time.time()-start_time)) try: response_json = response.json() if 'remark' in response_json: log('Server remark: "{}"'.format(response_json['remark'], level=lg.WARNING)) except Exception: # 429 = 'too many requests' and 504 = 'gateway timeout' from server # overload. handle these errors by recursively # calling overpass_request until a valid response is achieved if response.status_code in [429, 504]: # pause for error_pause_duration seconds before re-trying request if error_pause_duration is None: error_pause_duration = get_pause_duration() log('Server at {} returned status code {} and no JSON data. ' 'Re-trying request in {:.2f} seconds.' .format(domain, response.status_code, error_pause_duration), level=lg.WARNING) time.sleep(error_pause_duration) response_json = overpass_request(data=data, pause_duration=pause_duration, timeout=timeout) # else, this was an unhandled status_code, throw an exception else: log('Server at {} returned status code {} and no JSON data' .format(domain, response.status_code), level=lg.ERROR) raise Exception('Server returned no JSON data.\n{} {}\n{}' .format(response, response.reason, response.text)) return response_json
[ "def", "overpass_request", "(", "data", ",", "pause_duration", "=", "None", ",", "timeout", "=", "180", ",", "error_pause_duration", "=", "None", ")", ":", "# define the Overpass API URL, then construct a GET-style URL", "url", "=", "'http://www.overpass-api.de/api/interpreter'", "start_time", "=", "time", ".", "time", "(", ")", "log", "(", "'Posting to {} with timeout={}, \"{}\"'", ".", "format", "(", "url", ",", "timeout", ",", "data", ")", ")", "response", "=", "requests", ".", "post", "(", "url", ",", "data", "=", "data", ",", "timeout", "=", "timeout", ")", "# get the response size and the domain, log result", "size_kb", "=", "len", "(", "response", ".", "content", ")", "/", "1000.", "domain", "=", "re", ".", "findall", "(", "r'//(?s)(.*?)/'", ",", "url", ")", "[", "0", "]", "log", "(", "'Downloaded {:,.1f}KB from {} in {:,.2f} seconds'", ".", "format", "(", "size_kb", ",", "domain", ",", "time", ".", "time", "(", ")", "-", "start_time", ")", ")", "try", ":", "response_json", "=", "response", ".", "json", "(", ")", "if", "'remark'", "in", "response_json", ":", "log", "(", "'Server remark: \"{}\"'", ".", "format", "(", "response_json", "[", "'remark'", "]", ",", "level", "=", "lg", ".", "WARNING", ")", ")", "except", "Exception", ":", "# 429 = 'too many requests' and 504 = 'gateway timeout' from server", "# overload. handle these errors by recursively", "# calling overpass_request until a valid response is achieved", "if", "response", ".", "status_code", "in", "[", "429", ",", "504", "]", ":", "# pause for error_pause_duration seconds before re-trying request", "if", "error_pause_duration", "is", "None", ":", "error_pause_duration", "=", "get_pause_duration", "(", ")", "log", "(", "'Server at {} returned status code {} and no JSON data. '", "'Re-trying request in {:.2f} seconds.'", ".", "format", "(", "domain", ",", "response", ".", "status_code", ",", "error_pause_duration", ")", ",", "level", "=", "lg", ".", "WARNING", ")", "time", ".", "sleep", "(", "error_pause_duration", ")", "response_json", "=", "overpass_request", "(", "data", "=", "data", ",", "pause_duration", "=", "pause_duration", ",", "timeout", "=", "timeout", ")", "# else, this was an unhandled status_code, throw an exception", "else", ":", "log", "(", "'Server at {} returned status code {} and no JSON data'", ".", "format", "(", "domain", ",", "response", ".", "status_code", ")", ",", "level", "=", "lg", ".", "ERROR", ")", "raise", "Exception", "(", "'Server returned no JSON data.\\n{} {}\\n{}'", ".", "format", "(", "response", ",", "response", ".", "reason", ",", "response", ".", "text", ")", ")", "return", "response_json" ]
Send a request to the Overpass API via HTTP POST and return the JSON response Parameters ---------- data : dict or OrderedDict key-value pairs of parameters to post to Overpass API pause_duration : int how long to pause in seconds before requests, if None, will query Overpass API status endpoint to find when next slot is available timeout : int the timeout interval for the requests library error_pause_duration : int how long to pause in seconds before re-trying requests if error Returns ------- response_json : dict
[ "Send", "a", "request", "to", "the", "Overpass", "API", "via", "HTTP", "POST", "and", "return", "the", "JSON", "response" ]
train
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L203-L270
UDST/osmnet
osmnet/load.py
get_pause_duration
def get_pause_duration(recursive_delay=5, default_duration=10): """ Check the Overpass API status endpoint to determine how long to wait until next slot is available. Parameters ---------- recursive_delay : int how long to wait between recursive calls if server is currently running a query default_duration : int if fatal error, function falls back on returning this value Returns ------- pause_duration : int """ try: response = requests.get('http://overpass-api.de/api/status') status = response.text.split('\n')[3] status_first_token = status.split(' ')[0] except Exception: # if status endpoint cannot be reached or output parsed, log error # and return default duration log('Unable to query http://overpass-api.de/api/status', level=lg.ERROR) return default_duration try: # if first token is numeric, it indicates the number of slots # available - no wait required available_slots = int(status_first_token) pause_duration = 0 except Exception: # if first token is 'Slot', it tells you when your slot will be free if status_first_token == 'Slot': utc_time_str = status.split(' ')[3] utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None) pause_duration = math.ceil( (utc_time - dt.datetime.utcnow()).total_seconds()) pause_duration = max(pause_duration, 1) # if first token is 'Currently', it is currently running a query so # check back in recursive_delay seconds elif status_first_token == 'Currently': time.sleep(recursive_delay) pause_duration = get_pause_duration() else: # any other status is unrecognized - log an error and return # default duration log('Unrecognized server status: "{}"'.format(status), level=lg.ERROR) return default_duration return pause_duration
python
def get_pause_duration(recursive_delay=5, default_duration=10): """ Check the Overpass API status endpoint to determine how long to wait until next slot is available. Parameters ---------- recursive_delay : int how long to wait between recursive calls if server is currently running a query default_duration : int if fatal error, function falls back on returning this value Returns ------- pause_duration : int """ try: response = requests.get('http://overpass-api.de/api/status') status = response.text.split('\n')[3] status_first_token = status.split(' ')[0] except Exception: # if status endpoint cannot be reached or output parsed, log error # and return default duration log('Unable to query http://overpass-api.de/api/status', level=lg.ERROR) return default_duration try: # if first token is numeric, it indicates the number of slots # available - no wait required available_slots = int(status_first_token) pause_duration = 0 except Exception: # if first token is 'Slot', it tells you when your slot will be free if status_first_token == 'Slot': utc_time_str = status.split(' ')[3] utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None) pause_duration = math.ceil( (utc_time - dt.datetime.utcnow()).total_seconds()) pause_duration = max(pause_duration, 1) # if first token is 'Currently', it is currently running a query so # check back in recursive_delay seconds elif status_first_token == 'Currently': time.sleep(recursive_delay) pause_duration = get_pause_duration() else: # any other status is unrecognized - log an error and return # default duration log('Unrecognized server status: "{}"'.format(status), level=lg.ERROR) return default_duration return pause_duration
[ "def", "get_pause_duration", "(", "recursive_delay", "=", "5", ",", "default_duration", "=", "10", ")", ":", "try", ":", "response", "=", "requests", ".", "get", "(", "'http://overpass-api.de/api/status'", ")", "status", "=", "response", ".", "text", ".", "split", "(", "'\\n'", ")", "[", "3", "]", "status_first_token", "=", "status", ".", "split", "(", "' '", ")", "[", "0", "]", "except", "Exception", ":", "# if status endpoint cannot be reached or output parsed, log error", "# and return default duration", "log", "(", "'Unable to query http://overpass-api.de/api/status'", ",", "level", "=", "lg", ".", "ERROR", ")", "return", "default_duration", "try", ":", "# if first token is numeric, it indicates the number of slots", "# available - no wait required", "available_slots", "=", "int", "(", "status_first_token", ")", "pause_duration", "=", "0", "except", "Exception", ":", "# if first token is 'Slot', it tells you when your slot will be free", "if", "status_first_token", "==", "'Slot'", ":", "utc_time_str", "=", "status", ".", "split", "(", "' '", ")", "[", "3", "]", "utc_time", "=", "date_parser", ".", "parse", "(", "utc_time_str", ")", ".", "replace", "(", "tzinfo", "=", "None", ")", "pause_duration", "=", "math", ".", "ceil", "(", "(", "utc_time", "-", "dt", ".", "datetime", ".", "utcnow", "(", ")", ")", ".", "total_seconds", "(", ")", ")", "pause_duration", "=", "max", "(", "pause_duration", ",", "1", ")", "# if first token is 'Currently', it is currently running a query so", "# check back in recursive_delay seconds", "elif", "status_first_token", "==", "'Currently'", ":", "time", ".", "sleep", "(", "recursive_delay", ")", "pause_duration", "=", "get_pause_duration", "(", ")", "else", ":", "# any other status is unrecognized - log an error and return", "# default duration", "log", "(", "'Unrecognized server status: \"{}\"'", ".", "format", "(", "status", ")", ",", "level", "=", "lg", ".", "ERROR", ")", "return", "default_duration", "return", "pause_duration" ]
Check the Overpass API status endpoint to determine how long to wait until next slot is available. Parameters ---------- recursive_delay : int how long to wait between recursive calls if server is currently running a query default_duration : int if fatal error, function falls back on returning this value Returns ------- pause_duration : int
[ "Check", "the", "Overpass", "API", "status", "endpoint", "to", "determine", "how", "long", "to", "wait", "until", "next", "slot", "is", "available", "." ]
train
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L273-L328
UDST/osmnet
osmnet/load.py
consolidate_subdivide_geometry
def consolidate_subdivide_geometry(geometry, max_query_area_size): """ Consolidate a geometry into a convex hull, then subdivide it into smaller sub-polygons if its area exceeds max size (in geometry's units). Parameters ---------- geometry : shapely Polygon or MultiPolygon the geometry to consolidate and subdivide max_query_area_size : float max area for any part of the geometry, in the units the geometry is in: any polygon bigger will get divided up for multiple queries to the Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are meters)) Returns ------- geometry : Polygon or MultiPolygon """ # let the linear length of the quadrats (with which to subdivide the # geometry) be the square root of max area size quadrat_width = math.sqrt(max_query_area_size) if not isinstance(geometry, (Polygon, MultiPolygon)): raise ValueError('Geometry must be a shapely Polygon or MultiPolygon') # if geometry is a MultiPolygon OR a single Polygon whose area exceeds # the max size, get the convex hull around the geometry if isinstance( geometry, MultiPolygon) or \ (isinstance( geometry, Polygon) and geometry.area > max_query_area_size): geometry = geometry.convex_hull # if geometry area exceeds max size, subdivide it into smaller sub-polygons if geometry.area > max_query_area_size: geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width) if isinstance(geometry, Polygon): geometry = MultiPolygon([geometry]) return geometry
python
def consolidate_subdivide_geometry(geometry, max_query_area_size): """ Consolidate a geometry into a convex hull, then subdivide it into smaller sub-polygons if its area exceeds max size (in geometry's units). Parameters ---------- geometry : shapely Polygon or MultiPolygon the geometry to consolidate and subdivide max_query_area_size : float max area for any part of the geometry, in the units the geometry is in: any polygon bigger will get divided up for multiple queries to the Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are meters)) Returns ------- geometry : Polygon or MultiPolygon """ # let the linear length of the quadrats (with which to subdivide the # geometry) be the square root of max area size quadrat_width = math.sqrt(max_query_area_size) if not isinstance(geometry, (Polygon, MultiPolygon)): raise ValueError('Geometry must be a shapely Polygon or MultiPolygon') # if geometry is a MultiPolygon OR a single Polygon whose area exceeds # the max size, get the convex hull around the geometry if isinstance( geometry, MultiPolygon) or \ (isinstance( geometry, Polygon) and geometry.area > max_query_area_size): geometry = geometry.convex_hull # if geometry area exceeds max size, subdivide it into smaller sub-polygons if geometry.area > max_query_area_size: geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width) if isinstance(geometry, Polygon): geometry = MultiPolygon([geometry]) return geometry
[ "def", "consolidate_subdivide_geometry", "(", "geometry", ",", "max_query_area_size", ")", ":", "# let the linear length of the quadrats (with which to subdivide the", "# geometry) be the square root of max area size", "quadrat_width", "=", "math", ".", "sqrt", "(", "max_query_area_size", ")", "if", "not", "isinstance", "(", "geometry", ",", "(", "Polygon", ",", "MultiPolygon", ")", ")", ":", "raise", "ValueError", "(", "'Geometry must be a shapely Polygon or MultiPolygon'", ")", "# if geometry is a MultiPolygon OR a single Polygon whose area exceeds", "# the max size, get the convex hull around the geometry", "if", "isinstance", "(", "geometry", ",", "MultiPolygon", ")", "or", "(", "isinstance", "(", "geometry", ",", "Polygon", ")", "and", "geometry", ".", "area", ">", "max_query_area_size", ")", ":", "geometry", "=", "geometry", ".", "convex_hull", "# if geometry area exceeds max size, subdivide it into smaller sub-polygons", "if", "geometry", ".", "area", ">", "max_query_area_size", ":", "geometry", "=", "quadrat_cut_geometry", "(", "geometry", ",", "quadrat_width", "=", "quadrat_width", ")", "if", "isinstance", "(", "geometry", ",", "Polygon", ")", ":", "geometry", "=", "MultiPolygon", "(", "[", "geometry", "]", ")", "return", "geometry" ]
Consolidate a geometry into a convex hull, then subdivide it into smaller sub-polygons if its area exceeds max size (in geometry's units). Parameters ---------- geometry : shapely Polygon or MultiPolygon the geometry to consolidate and subdivide max_query_area_size : float max area for any part of the geometry, in the units the geometry is in: any polygon bigger will get divided up for multiple queries to the Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are meters)) Returns ------- geometry : Polygon or MultiPolygon
[ "Consolidate", "a", "geometry", "into", "a", "convex", "hull", "then", "subdivide", "it", "into", "smaller", "sub", "-", "polygons", "if", "its", "area", "exceeds", "max", "size", "(", "in", "geometry", "s", "units", ")", "." ]
train
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L331-L373
UDST/osmnet
osmnet/load.py
quadrat_cut_geometry
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3, buffer_amount=1e-9): """ Split a Polygon or MultiPolygon up into sub-polygons of a specified size, using quadrats. Parameters ---------- geometry : shapely Polygon or MultiPolygon the geometry to split up into smaller sub-polygons quadrat_width : float the linear width of the quadrats with which to cut up the geometry (in the units the geometry is in) min_num : float the minimum number of linear quadrat lines (e.g., min_num=3 would produce a quadrat grid of 4 squares) buffer_amount : float buffer the quadrat grid lines by quadrat_width times buffer_amount Returns ------- multipoly : shapely MultiPolygon """ # create n evenly spaced points between the min and max x and y bounds lng_max, lat_min, lng_min, lat_max = geometry.bounds x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1 y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1 x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num)) y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num)) # create a quadrat grid of lines at each of the evenly spaced points vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])]) for x in x_points] horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)]) for y in y_points] lines = vertical_lines + horizont_lines # buffer each line to distance of the quadrat width divided by 1 billion, # take their union, then cut geometry into pieces by these quadrats buffer_size = quadrat_width * buffer_amount lines_buffered = [line.buffer(buffer_size) for line in lines] quadrats = unary_union(lines_buffered) multipoly = geometry.difference(quadrats) return multipoly
python
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3, buffer_amount=1e-9): """ Split a Polygon or MultiPolygon up into sub-polygons of a specified size, using quadrats. Parameters ---------- geometry : shapely Polygon or MultiPolygon the geometry to split up into smaller sub-polygons quadrat_width : float the linear width of the quadrats with which to cut up the geometry (in the units the geometry is in) min_num : float the minimum number of linear quadrat lines (e.g., min_num=3 would produce a quadrat grid of 4 squares) buffer_amount : float buffer the quadrat grid lines by quadrat_width times buffer_amount Returns ------- multipoly : shapely MultiPolygon """ # create n evenly spaced points between the min and max x and y bounds lng_max, lat_min, lng_min, lat_max = geometry.bounds x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1 y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1 x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num)) y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num)) # create a quadrat grid of lines at each of the evenly spaced points vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])]) for x in x_points] horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)]) for y in y_points] lines = vertical_lines + horizont_lines # buffer each line to distance of the quadrat width divided by 1 billion, # take their union, then cut geometry into pieces by these quadrats buffer_size = quadrat_width * buffer_amount lines_buffered = [line.buffer(buffer_size) for line in lines] quadrats = unary_union(lines_buffered) multipoly = geometry.difference(quadrats) return multipoly
[ "def", "quadrat_cut_geometry", "(", "geometry", ",", "quadrat_width", ",", "min_num", "=", "3", ",", "buffer_amount", "=", "1e-9", ")", ":", "# create n evenly spaced points between the min and max x and y bounds", "lng_max", ",", "lat_min", ",", "lng_min", ",", "lat_max", "=", "geometry", ".", "bounds", "x_num", "=", "math", ".", "ceil", "(", "(", "lng_min", "-", "lng_max", ")", "/", "quadrat_width", ")", "+", "1", "y_num", "=", "math", ".", "ceil", "(", "(", "lat_max", "-", "lat_min", ")", "/", "quadrat_width", ")", "+", "1", "x_points", "=", "np", ".", "linspace", "(", "lng_max", ",", "lng_min", ",", "num", "=", "max", "(", "x_num", ",", "min_num", ")", ")", "y_points", "=", "np", ".", "linspace", "(", "lat_min", ",", "lat_max", ",", "num", "=", "max", "(", "y_num", ",", "min_num", ")", ")", "# create a quadrat grid of lines at each of the evenly spaced points", "vertical_lines", "=", "[", "LineString", "(", "[", "(", "x", ",", "y_points", "[", "0", "]", ")", ",", "(", "x", ",", "y_points", "[", "-", "1", "]", ")", "]", ")", "for", "x", "in", "x_points", "]", "horizont_lines", "=", "[", "LineString", "(", "[", "(", "x_points", "[", "0", "]", ",", "y", ")", ",", "(", "x_points", "[", "-", "1", "]", ",", "y", ")", "]", ")", "for", "y", "in", "y_points", "]", "lines", "=", "vertical_lines", "+", "horizont_lines", "# buffer each line to distance of the quadrat width divided by 1 billion,", "# take their union, then cut geometry into pieces by these quadrats", "buffer_size", "=", "quadrat_width", "*", "buffer_amount", "lines_buffered", "=", "[", "line", ".", "buffer", "(", "buffer_size", ")", "for", "line", "in", "lines", "]", "quadrats", "=", "unary_union", "(", "lines_buffered", ")", "multipoly", "=", "geometry", ".", "difference", "(", "quadrats", ")", "return", "multipoly" ]
Split a Polygon or MultiPolygon up into sub-polygons of a specified size, using quadrats. Parameters ---------- geometry : shapely Polygon or MultiPolygon the geometry to split up into smaller sub-polygons quadrat_width : float the linear width of the quadrats with which to cut up the geometry (in the units the geometry is in) min_num : float the minimum number of linear quadrat lines (e.g., min_num=3 would produce a quadrat grid of 4 squares) buffer_amount : float buffer the quadrat grid lines by quadrat_width times buffer_amount Returns ------- multipoly : shapely MultiPolygon
[ "Split", "a", "Polygon", "or", "MultiPolygon", "up", "into", "sub", "-", "polygons", "of", "a", "specified", "size", "using", "quadrats", "." ]
train
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L376-L421
UDST/osmnet
osmnet/load.py
project_geometry
def project_geometry(geometry, crs, to_latlong=False): """ Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa Parameters ---------- geometry : shapely Polygon or MultiPolygon the geometry to project crs : int the starting coordinate reference system of the passed-in geometry to_latlong : bool if True, project from crs to WGS84, if False, project from crs to local UTM zone Returns ------- geometry_proj, crs : tuple (projected shapely geometry, crs of the projected geometry) """ gdf = gpd.GeoDataFrame() gdf.crs = crs gdf.name = 'geometry to project' gdf['geometry'] = None gdf.loc[0, 'geometry'] = geometry gdf_proj = project_gdf(gdf, to_latlong=to_latlong) geometry_proj = gdf_proj['geometry'].iloc[0] return geometry_proj, gdf_proj.crs
python
def project_geometry(geometry, crs, to_latlong=False): """ Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa Parameters ---------- geometry : shapely Polygon or MultiPolygon the geometry to project crs : int the starting coordinate reference system of the passed-in geometry to_latlong : bool if True, project from crs to WGS84, if False, project from crs to local UTM zone Returns ------- geometry_proj, crs : tuple (projected shapely geometry, crs of the projected geometry) """ gdf = gpd.GeoDataFrame() gdf.crs = crs gdf.name = 'geometry to project' gdf['geometry'] = None gdf.loc[0, 'geometry'] = geometry gdf_proj = project_gdf(gdf, to_latlong=to_latlong) geometry_proj = gdf_proj['geometry'].iloc[0] return geometry_proj, gdf_proj.crs
[ "def", "project_geometry", "(", "geometry", ",", "crs", ",", "to_latlong", "=", "False", ")", ":", "gdf", "=", "gpd", ".", "GeoDataFrame", "(", ")", "gdf", ".", "crs", "=", "crs", "gdf", ".", "name", "=", "'geometry to project'", "gdf", "[", "'geometry'", "]", "=", "None", "gdf", ".", "loc", "[", "0", ",", "'geometry'", "]", "=", "geometry", "gdf_proj", "=", "project_gdf", "(", "gdf", ",", "to_latlong", "=", "to_latlong", ")", "geometry_proj", "=", "gdf_proj", "[", "'geometry'", "]", ".", "iloc", "[", "0", "]", "return", "geometry_proj", ",", "gdf_proj", ".", "crs" ]
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa Parameters ---------- geometry : shapely Polygon or MultiPolygon the geometry to project crs : int the starting coordinate reference system of the passed-in geometry to_latlong : bool if True, project from crs to WGS84, if False, project from crs to local UTM zone Returns ------- geometry_proj, crs : tuple (projected shapely geometry, crs of the projected geometry)
[ "Project", "a", "shapely", "Polygon", "or", "MultiPolygon", "from", "WGS84", "to", "UTM", "or", "vice", "-", "versa" ]
train
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L424-L450