id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
245,500
pyokagan/pyglreg
glreg.py
Registry.text
def text(self): """Formatted API declarations. Equivalent to the concatenation of `text` attributes of types, enums and commands in this Registry. """ out = [] out.extend(x.text for x in self.types.values()) out.extend(x.text for x in self.enums.values()) out.extend('extern {0};'.format(x.text) for x in self.commands.values()) return '\n'.join(out)
python
def text(self): """Formatted API declarations. Equivalent to the concatenation of `text` attributes of types, enums and commands in this Registry. """ out = [] out.extend(x.text for x in self.types.values()) out.extend(x.text for x in self.enums.values()) out.extend('extern {0};'.format(x.text) for x in self.commands.values()) return '\n'.join(out)
[ "def", "text", "(", "self", ")", ":", "out", "=", "[", "]", "out", ".", "extend", "(", "x", ".", "text", "for", "x", "in", "self", ".", "types", ".", "values", "(", ")", ")", "out", ".", "extend", "(", "x", ".", "text", "for", "x", "in", "self", ".", "enums", ".", "values", "(", ")", ")", "out", ".", "extend", "(", "'extern {0};'", ".", "format", "(", "x", ".", "text", ")", "for", "x", "in", "self", ".", "commands", ".", "values", "(", ")", ")", "return", "'\\n'", ".", "join", "(", "out", ")" ]
Formatted API declarations. Equivalent to the concatenation of `text` attributes of types, enums and commands in this Registry.
[ "Formatted", "API", "declarations", "." ]
68fa5a6c6cee8667879840fbbcc7d30f52852915
https://github.com/pyokagan/pyglreg/blob/68fa5a6c6cee8667879840fbbcc7d30f52852915/glreg.py#L486-L497
245,501
pyokagan/pyglreg
glreg.py
Registry.get_type
def get_type(self, name, api=None): """Returns Type `name`, with preference for the Type of `api`. :param str name: Type name :param str api: api name to prefer, of None to prefer types with no api name :return: Type object """ k = (name, api) if k in self.types: return self.types[k] else: return self.types[(name, None)]
python
def get_type(self, name, api=None): """Returns Type `name`, with preference for the Type of `api`. :param str name: Type name :param str api: api name to prefer, of None to prefer types with no api name :return: Type object """ k = (name, api) if k in self.types: return self.types[k] else: return self.types[(name, None)]
[ "def", "get_type", "(", "self", ",", "name", ",", "api", "=", "None", ")", ":", "k", "=", "(", "name", ",", "api", ")", "if", "k", "in", "self", ".", "types", ":", "return", "self", ".", "types", "[", "k", "]", "else", ":", "return", "self", ".", "types", "[", "(", "name", ",", "None", ")", "]" ]
Returns Type `name`, with preference for the Type of `api`. :param str name: Type name :param str api: api name to prefer, of None to prefer types with no api name :return: Type object
[ "Returns", "Type", "name", "with", "preference", "for", "the", "Type", "of", "api", "." ]
68fa5a6c6cee8667879840fbbcc7d30f52852915
https://github.com/pyokagan/pyglreg/blob/68fa5a6c6cee8667879840fbbcc7d30f52852915/glreg.py#L499-L511
245,502
pyokagan/pyglreg
glreg.py
Registry.get_features
def get_features(self, api=None): """Returns filtered list of features in this registry :param str api: Return only features with this api name, or None to return all features. :return: list of Feature objects """ return [x for x in self.features.values() if api and x.api == api or not api]
python
def get_features(self, api=None): """Returns filtered list of features in this registry :param str api: Return only features with this api name, or None to return all features. :return: list of Feature objects """ return [x for x in self.features.values() if api and x.api == api or not api]
[ "def", "get_features", "(", "self", ",", "api", "=", "None", ")", ":", "return", "[", "x", "for", "x", "in", "self", ".", "features", ".", "values", "(", ")", "if", "api", "and", "x", ".", "api", "==", "api", "or", "not", "api", "]" ]
Returns filtered list of features in this registry :param str api: Return only features with this api name, or None to return all features. :return: list of Feature objects
[ "Returns", "filtered", "list", "of", "features", "in", "this", "registry" ]
68fa5a6c6cee8667879840fbbcc7d30f52852915
https://github.com/pyokagan/pyglreg/blob/68fa5a6c6cee8667879840fbbcc7d30f52852915/glreg.py#L513-L521
245,503
pyokagan/pyglreg
glreg.py
Registry.get_extensions
def get_extensions(self, support=None): """Returns filtered list of extensions in this registry :param support: Return only extensions with this extension support string, or None to return all extensions. :return: list of Extension objects """ return [x for x in self.extensions.values() if support and support in x.supported or not support]
python
def get_extensions(self, support=None): """Returns filtered list of extensions in this registry :param support: Return only extensions with this extension support string, or None to return all extensions. :return: list of Extension objects """ return [x for x in self.extensions.values() if support and support in x.supported or not support]
[ "def", "get_extensions", "(", "self", ",", "support", "=", "None", ")", ":", "return", "[", "x", "for", "x", "in", "self", ".", "extensions", ".", "values", "(", ")", "if", "support", "and", "support", "in", "x", ".", "supported", "or", "not", "support", "]" ]
Returns filtered list of extensions in this registry :param support: Return only extensions with this extension support string, or None to return all extensions. :return: list of Extension objects
[ "Returns", "filtered", "list", "of", "extensions", "in", "this", "registry" ]
68fa5a6c6cee8667879840fbbcc7d30f52852915
https://github.com/pyokagan/pyglreg/blob/68fa5a6c6cee8667879840fbbcc7d30f52852915/glreg.py#L523-L531
245,504
pyokagan/pyglreg
glreg.py
Registry.get_requires
def get_requires(self, api=None, profile=None, support=None): """Returns filtered list of Require objects in this registry :param str api: Return Require objects with this api name or None to return all Require objects. :param str profile: Return Require objects with this profile or None to return all Require objects. :param str support: Return Require objects with this extension support string or None to return all Require objects. :return: list of Require objects """ out = [] for ft in self.get_features(api): out.extend(ft.get_requires(profile)) for ext in self.extensions.values(): # Filter extension support if support and support not in ext.supported: continue out.extend(ext.get_requires(api, profile)) return out
python
def get_requires(self, api=None, profile=None, support=None): """Returns filtered list of Require objects in this registry :param str api: Return Require objects with this api name or None to return all Require objects. :param str profile: Return Require objects with this profile or None to return all Require objects. :param str support: Return Require objects with this extension support string or None to return all Require objects. :return: list of Require objects """ out = [] for ft in self.get_features(api): out.extend(ft.get_requires(profile)) for ext in self.extensions.values(): # Filter extension support if support and support not in ext.supported: continue out.extend(ext.get_requires(api, profile)) return out
[ "def", "get_requires", "(", "self", ",", "api", "=", "None", ",", "profile", "=", "None", ",", "support", "=", "None", ")", ":", "out", "=", "[", "]", "for", "ft", "in", "self", ".", "get_features", "(", "api", ")", ":", "out", ".", "extend", "(", "ft", ".", "get_requires", "(", "profile", ")", ")", "for", "ext", "in", "self", ".", "extensions", ".", "values", "(", ")", ":", "# Filter extension support", "if", "support", "and", "support", "not", "in", "ext", ".", "supported", ":", "continue", "out", ".", "extend", "(", "ext", ".", "get_requires", "(", "api", ",", "profile", ")", ")", "return", "out" ]
Returns filtered list of Require objects in this registry :param str api: Return Require objects with this api name or None to return all Require objects. :param str profile: Return Require objects with this profile or None to return all Require objects. :param str support: Return Require objects with this extension support string or None to return all Require objects. :return: list of Require objects
[ "Returns", "filtered", "list", "of", "Require", "objects", "in", "this", "registry" ]
68fa5a6c6cee8667879840fbbcc7d30f52852915
https://github.com/pyokagan/pyglreg/blob/68fa5a6c6cee8667879840fbbcc7d30f52852915/glreg.py#L533-L552
245,505
pyokagan/pyglreg
glreg.py
Registry.get_removes
def get_removes(self, api=None, profile=None): """Returns filtered list of Remove objects in this registry :param str api: Return Remove objects with this api name or None to return all Remove objects. :param str profile: Return Remove objects with this profile or None to return all Remove objects. :return: list of Remove objects """ out = [] for ft in self.get_features(api): out.extend(ft.get_removes(profile)) return out
python
def get_removes(self, api=None, profile=None): """Returns filtered list of Remove objects in this registry :param str api: Return Remove objects with this api name or None to return all Remove objects. :param str profile: Return Remove objects with this profile or None to return all Remove objects. :return: list of Remove objects """ out = [] for ft in self.get_features(api): out.extend(ft.get_removes(profile)) return out
[ "def", "get_removes", "(", "self", ",", "api", "=", "None", ",", "profile", "=", "None", ")", ":", "out", "=", "[", "]", "for", "ft", "in", "self", ".", "get_features", "(", "api", ")", ":", "out", ".", "extend", "(", "ft", ".", "get_removes", "(", "profile", ")", ")", "return", "out" ]
Returns filtered list of Remove objects in this registry :param str api: Return Remove objects with this api name or None to return all Remove objects. :param str profile: Return Remove objects with this profile or None to return all Remove objects. :return: list of Remove objects
[ "Returns", "filtered", "list", "of", "Remove", "objects", "in", "this", "registry" ]
68fa5a6c6cee8667879840fbbcc7d30f52852915
https://github.com/pyokagan/pyglreg/blob/68fa5a6c6cee8667879840fbbcc7d30f52852915/glreg.py#L554-L566
245,506
pyokagan/pyglreg
glreg.py
Registry.get_apis
def get_apis(self): """Returns set of api names referenced in this Registry :return: set of api name strings """ out = set(x.api for x in self.types.values() if x.api) for ft in self.features.values(): out.update(ft.get_apis()) for ext in self.extensions.values(): out.update(ext.get_apis()) return out
python
def get_apis(self): """Returns set of api names referenced in this Registry :return: set of api name strings """ out = set(x.api for x in self.types.values() if x.api) for ft in self.features.values(): out.update(ft.get_apis()) for ext in self.extensions.values(): out.update(ext.get_apis()) return out
[ "def", "get_apis", "(", "self", ")", ":", "out", "=", "set", "(", "x", ".", "api", "for", "x", "in", "self", ".", "types", ".", "values", "(", ")", "if", "x", ".", "api", ")", "for", "ft", "in", "self", ".", "features", ".", "values", "(", ")", ":", "out", ".", "update", "(", "ft", ".", "get_apis", "(", ")", ")", "for", "ext", "in", "self", ".", "extensions", ".", "values", "(", ")", ":", "out", ".", "update", "(", "ext", ".", "get_apis", "(", ")", ")", "return", "out" ]
Returns set of api names referenced in this Registry :return: set of api name strings
[ "Returns", "set", "of", "api", "names", "referenced", "in", "this", "Registry" ]
68fa5a6c6cee8667879840fbbcc7d30f52852915
https://github.com/pyokagan/pyglreg/blob/68fa5a6c6cee8667879840fbbcc7d30f52852915/glreg.py#L568-L578
245,507
pyokagan/pyglreg
glreg.py
Registry.get_profiles
def get_profiles(self): """Returns set of profile names referenced in this Registry :return: set of profile name strings """ out = set() for ft in self.features.values(): out.update(ft.get_profiles()) for ext in self.extensions.values(): out.update(ext.get_profiles()) return out
python
def get_profiles(self): """Returns set of profile names referenced in this Registry :return: set of profile name strings """ out = set() for ft in self.features.values(): out.update(ft.get_profiles()) for ext in self.extensions.values(): out.update(ext.get_profiles()) return out
[ "def", "get_profiles", "(", "self", ")", ":", "out", "=", "set", "(", ")", "for", "ft", "in", "self", ".", "features", ".", "values", "(", ")", ":", "out", ".", "update", "(", "ft", ".", "get_profiles", "(", ")", ")", "for", "ext", "in", "self", ".", "extensions", ".", "values", "(", ")", ":", "out", ".", "update", "(", "ext", ".", "get_profiles", "(", ")", ")", "return", "out" ]
Returns set of profile names referenced in this Registry :return: set of profile name strings
[ "Returns", "set", "of", "profile", "names", "referenced", "in", "this", "Registry" ]
68fa5a6c6cee8667879840fbbcc7d30f52852915
https://github.com/pyokagan/pyglreg/blob/68fa5a6c6cee8667879840fbbcc7d30f52852915/glreg.py#L580-L590
245,508
pyokagan/pyglreg
glreg.py
Registry.get_supports
def get_supports(self): """Returns set of extension support strings referenced in this Registry :return: set of extension support strings """ out = set() for ext in self.extensions.values(): out.update(ext.get_supports()) return out
python
def get_supports(self): """Returns set of extension support strings referenced in this Registry :return: set of extension support strings """ out = set() for ext in self.extensions.values(): out.update(ext.get_supports()) return out
[ "def", "get_supports", "(", "self", ")", ":", "out", "=", "set", "(", ")", "for", "ext", "in", "self", ".", "extensions", ".", "values", "(", ")", ":", "out", ".", "update", "(", "ext", ".", "get_supports", "(", ")", ")", "return", "out" ]
Returns set of extension support strings referenced in this Registry :return: set of extension support strings
[ "Returns", "set", "of", "extension", "support", "strings", "referenced", "in", "this", "Registry" ]
68fa5a6c6cee8667879840fbbcc7d30f52852915
https://github.com/pyokagan/pyglreg/blob/68fa5a6c6cee8667879840fbbcc7d30f52852915/glreg.py#L592-L600
245,509
evfredericksen/pynacea
pynhost/pynhost/platforms/linux.py
get_open_window_names
def get_open_window_names(): ''' Return a dict with open program names and their corresponding decimal ids ''' raw_names = subprocess.check_output(['wmctrl', '-l']).decode('utf8').split('\n') split_names = [name.split() for name in raw_names if name] name_dict = {} for name in split_names: if not int(name[1]): name_dict[' '.join(name[3:]).lower()] = name[0] return name_dict
python
def get_open_window_names(): ''' Return a dict with open program names and their corresponding decimal ids ''' raw_names = subprocess.check_output(['wmctrl', '-l']).decode('utf8').split('\n') split_names = [name.split() for name in raw_names if name] name_dict = {} for name in split_names: if not int(name[1]): name_dict[' '.join(name[3:]).lower()] = name[0] return name_dict
[ "def", "get_open_window_names", "(", ")", ":", "raw_names", "=", "subprocess", ".", "check_output", "(", "[", "'wmctrl'", ",", "'-l'", "]", ")", ".", "decode", "(", "'utf8'", ")", ".", "split", "(", "'\\n'", ")", "split_names", "=", "[", "name", ".", "split", "(", ")", "for", "name", "in", "raw_names", "if", "name", "]", "name_dict", "=", "{", "}", "for", "name", "in", "split_names", ":", "if", "not", "int", "(", "name", "[", "1", "]", ")", ":", "name_dict", "[", "' '", ".", "join", "(", "name", "[", "3", ":", "]", ")", ".", "lower", "(", ")", "]", "=", "name", "[", "0", "]", "return", "name_dict" ]
Return a dict with open program names and their corresponding decimal ids
[ "Return", "a", "dict", "with", "open", "program", "names", "and", "their", "corresponding", "decimal", "ids" ]
63ee0e6695209048bf2571aa2c3770f502e29b0a
https://github.com/evfredericksen/pynacea/blob/63ee0e6695209048bf2571aa2c3770f502e29b0a/pynhost/pynhost/platforms/linux.py#L62-L72
245,510
pudo-attic/loadkit
loadkit/operators/table.py
resource_row_set
def resource_row_set(package, resource): """ Generate an iterator over all the rows in this resource's source data. """ # This is a work-around because messytables hangs on boto file # handles, so we're doing it via plain old HTTP. table_set = any_tableset(resource.fh(), extension=resource.meta.get('extension'), mimetype=resource.meta.get('mime_type')) tables = list(table_set.tables) if not len(tables): log.error("No tables were found in the source file.") return row_set = tables[0] offset, headers = headers_guess(row_set.sample) row_set.register_processor(headers_processor(headers)) row_set.register_processor(offset_processor(offset + 1)) types = type_guess(row_set.sample, strict=True) row_set.register_processor(types_processor(types)) return row_set
python
def resource_row_set(package, resource): """ Generate an iterator over all the rows in this resource's source data. """ # This is a work-around because messytables hangs on boto file # handles, so we're doing it via plain old HTTP. table_set = any_tableset(resource.fh(), extension=resource.meta.get('extension'), mimetype=resource.meta.get('mime_type')) tables = list(table_set.tables) if not len(tables): log.error("No tables were found in the source file.") return row_set = tables[0] offset, headers = headers_guess(row_set.sample) row_set.register_processor(headers_processor(headers)) row_set.register_processor(offset_processor(offset + 1)) types = type_guess(row_set.sample, strict=True) row_set.register_processor(types_processor(types)) return row_set
[ "def", "resource_row_set", "(", "package", ",", "resource", ")", ":", "# This is a work-around because messytables hangs on boto file", "# handles, so we're doing it via plain old HTTP.", "table_set", "=", "any_tableset", "(", "resource", ".", "fh", "(", ")", ",", "extension", "=", "resource", ".", "meta", ".", "get", "(", "'extension'", ")", ",", "mimetype", "=", "resource", ".", "meta", ".", "get", "(", "'mime_type'", ")", ")", "tables", "=", "list", "(", "table_set", ".", "tables", ")", "if", "not", "len", "(", "tables", ")", ":", "log", ".", "error", "(", "\"No tables were found in the source file.\"", ")", "return", "row_set", "=", "tables", "[", "0", "]", "offset", ",", "headers", "=", "headers_guess", "(", "row_set", ".", "sample", ")", "row_set", ".", "register_processor", "(", "headers_processor", "(", "headers", ")", ")", "row_set", ".", "register_processor", "(", "offset_processor", "(", "offset", "+", "1", ")", ")", "types", "=", "type_guess", "(", "row_set", ".", "sample", ",", "strict", "=", "True", ")", "row_set", ".", "register_processor", "(", "types_processor", "(", "types", ")", ")", "return", "row_set" ]
Generate an iterator over all the rows in this resource's source data.
[ "Generate", "an", "iterator", "over", "all", "the", "rows", "in", "this", "resource", "s", "source", "data", "." ]
1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c
https://github.com/pudo-attic/loadkit/blob/1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c/loadkit/operators/table.py#L18-L37
245,511
pudo-attic/loadkit
loadkit/operators/table.py
column_alias
def column_alias(cell, names): """ Generate a normalized version of the column name. """ column = slugify(cell.column or '', sep='_') column = column.strip('_') column = 'column' if not len(column) else column name, i = column, 2 # de-dupe: column, column_2, column_3, ... while name in names: name = '%s_%s' % (name, i) i += 1 return name
python
def column_alias(cell, names): """ Generate a normalized version of the column name. """ column = slugify(cell.column or '', sep='_') column = column.strip('_') column = 'column' if not len(column) else column name, i = column, 2 # de-dupe: column, column_2, column_3, ... while name in names: name = '%s_%s' % (name, i) i += 1 return name
[ "def", "column_alias", "(", "cell", ",", "names", ")", ":", "column", "=", "slugify", "(", "cell", ".", "column", "or", "''", ",", "sep", "=", "'_'", ")", "column", "=", "column", ".", "strip", "(", "'_'", ")", "column", "=", "'column'", "if", "not", "len", "(", "column", ")", "else", "column", "name", ",", "i", "=", "column", ",", "2", "# de-dupe: column, column_2, column_3, ...", "while", "name", "in", "names", ":", "name", "=", "'%s_%s'", "%", "(", "name", ",", "i", ")", "i", "+=", "1", "return", "name" ]
Generate a normalized version of the column name.
[ "Generate", "a", "normalized", "version", "of", "the", "column", "name", "." ]
1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c
https://github.com/pudo-attic/loadkit/blob/1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c/loadkit/operators/table.py#L40-L50
245,512
pudo-attic/loadkit
loadkit/operators/table.py
random_sample
def random_sample(value, field, row, num=10): """ Collect a random sample of the values in a particular field based on the reservoir sampling technique. """ # TODO: Could become a more general DQ piece. if value is None: field['has_nulls'] = True return if value in field['samples']: return if isinstance(value, basestring) and not len(value.strip()): field['has_empty'] = True return if len(field['samples']) < num: field['samples'].append(value) return j = random.randint(0, row) if j < (num - 1): field['samples'][j] = value
python
def random_sample(value, field, row, num=10): """ Collect a random sample of the values in a particular field based on the reservoir sampling technique. """ # TODO: Could become a more general DQ piece. if value is None: field['has_nulls'] = True return if value in field['samples']: return if isinstance(value, basestring) and not len(value.strip()): field['has_empty'] = True return if len(field['samples']) < num: field['samples'].append(value) return j = random.randint(0, row) if j < (num - 1): field['samples'][j] = value
[ "def", "random_sample", "(", "value", ",", "field", ",", "row", ",", "num", "=", "10", ")", ":", "# TODO: Could become a more general DQ piece.", "if", "value", "is", "None", ":", "field", "[", "'has_nulls'", "]", "=", "True", "return", "if", "value", "in", "field", "[", "'samples'", "]", ":", "return", "if", "isinstance", "(", "value", ",", "basestring", ")", "and", "not", "len", "(", "value", ".", "strip", "(", ")", ")", ":", "field", "[", "'has_empty'", "]", "=", "True", "return", "if", "len", "(", "field", "[", "'samples'", "]", ")", "<", "num", ":", "field", "[", "'samples'", "]", ".", "append", "(", "value", ")", "return", "j", "=", "random", ".", "randint", "(", "0", ",", "row", ")", "if", "j", "<", "(", "num", "-", "1", ")", ":", "field", "[", "'samples'", "]", "[", "j", "]", "=", "value" ]
Collect a random sample of the values in a particular field based on the reservoir sampling technique.
[ "Collect", "a", "random", "sample", "of", "the", "values", "in", "a", "particular", "field", "based", "on", "the", "reservoir", "sampling", "technique", "." ]
1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c
https://github.com/pudo-attic/loadkit/blob/1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c/loadkit/operators/table.py#L75-L92
245,513
lqdc/pysimstr
pysimstr.py
make_unique_ngrams
def make_unique_ngrams(s, n): """Make a set of unique n-grams from a string.""" return set(s[i:i + n] for i in range(len(s) - n + 1))
python
def make_unique_ngrams(s, n): """Make a set of unique n-grams from a string.""" return set(s[i:i + n] for i in range(len(s) - n + 1))
[ "def", "make_unique_ngrams", "(", "s", ",", "n", ")", ":", "return", "set", "(", "s", "[", "i", ":", "i", "+", "n", "]", "for", "i", "in", "range", "(", "len", "(", "s", ")", "-", "n", "+", "1", ")", ")" ]
Make a set of unique n-grams from a string.
[ "Make", "a", "set", "of", "unique", "n", "-", "grams", "from", "a", "string", "." ]
0475005402c8efc7c3eb4f56660639e505d8986d
https://github.com/lqdc/pysimstr/blob/0475005402c8efc7c3eb4f56660639e505d8986d/pysimstr.py#L15-L17
245,514
lqdc/pysimstr
pysimstr.py
SimStr.check
def check(self, s, instant_exact=True): """Check if a string is in the DB. :param s: str, string to check against the DB. :param instant_exact: bool, look up exact matches with a hash lookup. :return: True or False :rtype: bool """ all_sets = self._get_comparison_strings(s) if instant_exact and s in all_sets: # exact match return True for comparison_string in all_sets: if self.comparison_func(s, comparison_string) >= self.cutoff: return True return False
python
def check(self, s, instant_exact=True): """Check if a string is in the DB. :param s: str, string to check against the DB. :param instant_exact: bool, look up exact matches with a hash lookup. :return: True or False :rtype: bool """ all_sets = self._get_comparison_strings(s) if instant_exact and s in all_sets: # exact match return True for comparison_string in all_sets: if self.comparison_func(s, comparison_string) >= self.cutoff: return True return False
[ "def", "check", "(", "self", ",", "s", ",", "instant_exact", "=", "True", ")", ":", "all_sets", "=", "self", ".", "_get_comparison_strings", "(", "s", ")", "if", "instant_exact", "and", "s", "in", "all_sets", ":", "# exact match", "return", "True", "for", "comparison_string", "in", "all_sets", ":", "if", "self", ".", "comparison_func", "(", "s", ",", "comparison_string", ")", ">=", "self", ".", "cutoff", ":", "return", "True", "return", "False" ]
Check if a string is in the DB. :param s: str, string to check against the DB. :param instant_exact: bool, look up exact matches with a hash lookup. :return: True or False :rtype: bool
[ "Check", "if", "a", "string", "is", "in", "the", "DB", "." ]
0475005402c8efc7c3eb4f56660639e505d8986d
https://github.com/lqdc/pysimstr/blob/0475005402c8efc7c3eb4f56660639e505d8986d/pysimstr.py#L84-L100
245,515
lqdc/pysimstr
pysimstr.py
SimStr.insert
def insert(self, seq): """ Populates the DB from a sequence of strings, ERASING PREVIOUS STATE. :param seq: an iterable """ # erase previous elements and make defaultdict for easier insertion. self._els_idxed = defaultdict(lambda: defaultdict(set)) if type(seq) is str: raise ValueError('Provided argument should be a sequence of strings' ', but not a string itself.') for el in seq: if type(el) is not str: raise ValueError('Element %s is not a string' % (el,)) for gram in make_unique_ngrams(el, self.idx_size): self._els_idxed[gram][len(el)].add(el) # convert defaultdict to dict so as to not increase size when checking # for presence of an element self._finalize_db()
python
def insert(self, seq): """ Populates the DB from a sequence of strings, ERASING PREVIOUS STATE. :param seq: an iterable """ # erase previous elements and make defaultdict for easier insertion. self._els_idxed = defaultdict(lambda: defaultdict(set)) if type(seq) is str: raise ValueError('Provided argument should be a sequence of strings' ', but not a string itself.') for el in seq: if type(el) is not str: raise ValueError('Element %s is not a string' % (el,)) for gram in make_unique_ngrams(el, self.idx_size): self._els_idxed[gram][len(el)].add(el) # convert defaultdict to dict so as to not increase size when checking # for presence of an element self._finalize_db()
[ "def", "insert", "(", "self", ",", "seq", ")", ":", "# erase previous elements and make defaultdict for easier insertion.", "self", ".", "_els_idxed", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "set", ")", ")", "if", "type", "(", "seq", ")", "is", "str", ":", "raise", "ValueError", "(", "'Provided argument should be a sequence of strings'", "', but not a string itself.'", ")", "for", "el", "in", "seq", ":", "if", "type", "(", "el", ")", "is", "not", "str", ":", "raise", "ValueError", "(", "'Element %s is not a string'", "%", "(", "el", ",", ")", ")", "for", "gram", "in", "make_unique_ngrams", "(", "el", ",", "self", ".", "idx_size", ")", ":", "self", ".", "_els_idxed", "[", "gram", "]", "[", "len", "(", "el", ")", "]", ".", "add", "(", "el", ")", "# convert defaultdict to dict so as to not increase size when checking", "# for presence of an element", "self", ".", "_finalize_db", "(", ")" ]
Populates the DB from a sequence of strings, ERASING PREVIOUS STATE. :param seq: an iterable
[ "Populates", "the", "DB", "from", "a", "sequence", "of", "strings", "ERASING", "PREVIOUS", "STATE", "." ]
0475005402c8efc7c3eb4f56660639e505d8986d
https://github.com/lqdc/pysimstr/blob/0475005402c8efc7c3eb4f56660639e505d8986d/pysimstr.py#L138-L158
245,516
lqdc/pysimstr
pysimstr.py
SimStr._finalize_db
def _finalize_db(self): """Convert defaultdicts to regular dicts.""" for k, v in self._els_idxed.items(): self._els_idxed[k] = dict(v) self._els_idxed = dict(self._els_idxed)
python
def _finalize_db(self): """Convert defaultdicts to regular dicts.""" for k, v in self._els_idxed.items(): self._els_idxed[k] = dict(v) self._els_idxed = dict(self._els_idxed)
[ "def", "_finalize_db", "(", "self", ")", ":", "for", "k", ",", "v", "in", "self", ".", "_els_idxed", ".", "items", "(", ")", ":", "self", ".", "_els_idxed", "[", "k", "]", "=", "dict", "(", "v", ")", "self", ".", "_els_idxed", "=", "dict", "(", "self", ".", "_els_idxed", ")" ]
Convert defaultdicts to regular dicts.
[ "Convert", "defaultdicts", "to", "regular", "dicts", "." ]
0475005402c8efc7c3eb4f56660639e505d8986d
https://github.com/lqdc/pysimstr/blob/0475005402c8efc7c3eb4f56660639e505d8986d/pysimstr.py#L160-L164
245,517
lqdc/pysimstr
pysimstr.py
SimStr._get_comparison_strings
def _get_comparison_strings(self, s): """Find all similar strings""" str_len = len(s) comparison_idxs = make_unique_ngrams(s, self.idx_size) min_len = len(s) - self.plus_minus if min_len < 0: min_len = 0 if self._els_idxed is None: raise UnintitializedError('Database not created') all_sets = set() for idx in comparison_idxs: found_idx = self._els_idxed.get(idx, None) if found_idx is None: continue for i in range(min_len, str_len + self.plus_minus): found_len = found_idx.get(i, None) if found_len is not None: all_sets = all_sets.union(found_len) return all_sets
python
def _get_comparison_strings(self, s): """Find all similar strings""" str_len = len(s) comparison_idxs = make_unique_ngrams(s, self.idx_size) min_len = len(s) - self.plus_minus if min_len < 0: min_len = 0 if self._els_idxed is None: raise UnintitializedError('Database not created') all_sets = set() for idx in comparison_idxs: found_idx = self._els_idxed.get(idx, None) if found_idx is None: continue for i in range(min_len, str_len + self.plus_minus): found_len = found_idx.get(i, None) if found_len is not None: all_sets = all_sets.union(found_len) return all_sets
[ "def", "_get_comparison_strings", "(", "self", ",", "s", ")", ":", "str_len", "=", "len", "(", "s", ")", "comparison_idxs", "=", "make_unique_ngrams", "(", "s", ",", "self", ".", "idx_size", ")", "min_len", "=", "len", "(", "s", ")", "-", "self", ".", "plus_minus", "if", "min_len", "<", "0", ":", "min_len", "=", "0", "if", "self", ".", "_els_idxed", "is", "None", ":", "raise", "UnintitializedError", "(", "'Database not created'", ")", "all_sets", "=", "set", "(", ")", "for", "idx", "in", "comparison_idxs", ":", "found_idx", "=", "self", ".", "_els_idxed", ".", "get", "(", "idx", ",", "None", ")", "if", "found_idx", "is", "None", ":", "continue", "for", "i", "in", "range", "(", "min_len", ",", "str_len", "+", "self", ".", "plus_minus", ")", ":", "found_len", "=", "found_idx", ".", "get", "(", "i", ",", "None", ")", "if", "found_len", "is", "not", "None", ":", "all_sets", "=", "all_sets", ".", "union", "(", "found_len", ")", "return", "all_sets" ]
Find all similar strings
[ "Find", "all", "similar", "strings" ]
0475005402c8efc7c3eb4f56660639e505d8986d
https://github.com/lqdc/pysimstr/blob/0475005402c8efc7c3eb4f56660639e505d8986d/pysimstr.py#L166-L187
245,518
rosenbrockc/acorn
acorn/logging/analysis.py
_load_methods
def _load_methods(package): """Loads the mappings from method call result to analysis. Args: package (str): name of the package to load for. """ global _methods _methods[package] = None from acorn.config import settings from acorn.logging.descriptors import _obj_getattr spack = settings(package) if spack is not None: if spack.has_section("analysis.methods"): _methods[package] = {} from importlib import import_module mappings = dict(spack.items("analysis.methods")) for fqdn, target in mappings.items(): rootname = target.split('.')[0] root = import_module(rootname) caller = _obj_getattr(root, target) _methods[package][fqdn] = caller
python
def _load_methods(package): """Loads the mappings from method call result to analysis. Args: package (str): name of the package to load for. """ global _methods _methods[package] = None from acorn.config import settings from acorn.logging.descriptors import _obj_getattr spack = settings(package) if spack is not None: if spack.has_section("analysis.methods"): _methods[package] = {} from importlib import import_module mappings = dict(spack.items("analysis.methods")) for fqdn, target in mappings.items(): rootname = target.split('.')[0] root = import_module(rootname) caller = _obj_getattr(root, target) _methods[package][fqdn] = caller
[ "def", "_load_methods", "(", "package", ")", ":", "global", "_methods", "_methods", "[", "package", "]", "=", "None", "from", "acorn", ".", "config", "import", "settings", "from", "acorn", ".", "logging", ".", "descriptors", "import", "_obj_getattr", "spack", "=", "settings", "(", "package", ")", "if", "spack", "is", "not", "None", ":", "if", "spack", ".", "has_section", "(", "\"analysis.methods\"", ")", ":", "_methods", "[", "package", "]", "=", "{", "}", "from", "importlib", "import", "import_module", "mappings", "=", "dict", "(", "spack", ".", "items", "(", "\"analysis.methods\"", ")", ")", "for", "fqdn", ",", "target", "in", "mappings", ".", "items", "(", ")", ":", "rootname", "=", "target", ".", "split", "(", "'.'", ")", "[", "0", "]", "root", "=", "import_module", "(", "rootname", ")", "caller", "=", "_obj_getattr", "(", "root", ",", "target", ")", "_methods", "[", "package", "]", "[", "fqdn", "]", "=", "caller" ]
Loads the mappings from method call result to analysis. Args: package (str): name of the package to load for.
[ "Loads", "the", "mappings", "from", "method", "call", "result", "to", "analysis", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/analysis.py#L8-L30
245,519
rosenbrockc/acorn
acorn/logging/analysis.py
analyze
def analyze(fqdn, result, argl, argd): """Analyzes the result from calling the method with the specified FQDN. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call. """ package = fqdn.split('.')[0] if package not in _methods: _load_methods(package) if _methods[package] is not None and fqdn in _methods[package]: return _methods[package][fqdn](fqdn, result, *argl, **argd)
python
def analyze(fqdn, result, argl, argd): """Analyzes the result from calling the method with the specified FQDN. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call. """ package = fqdn.split('.')[0] if package not in _methods: _load_methods(package) if _methods[package] is not None and fqdn in _methods[package]: return _methods[package][fqdn](fqdn, result, *argl, **argd)
[ "def", "analyze", "(", "fqdn", ",", "result", ",", "argl", ",", "argd", ")", ":", "package", "=", "fqdn", ".", "split", "(", "'.'", ")", "[", "0", "]", "if", "package", "not", "in", "_methods", ":", "_load_methods", "(", "package", ")", "if", "_methods", "[", "package", "]", "is", "not", "None", "and", "fqdn", "in", "_methods", "[", "package", "]", ":", "return", "_methods", "[", "package", "]", "[", "fqdn", "]", "(", "fqdn", ",", "result", ",", "*", "argl", ",", "*", "*", "argd", ")" ]
Analyzes the result from calling the method with the specified FQDN. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call.
[ "Analyzes", "the", "result", "from", "calling", "the", "method", "with", "the", "specified", "FQDN", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/analysis.py#L32-L46
245,520
etcher-be/emiz
emiz/avwx/taf.py
parse
def parse(station: str, txt: str) -> TafData: """ Returns TafData and Units dataclasses with parsed data and their associated units """ core.valid_station(station) while len(txt) > 3 and txt[:4] in ('TAF ', 'AMD ', 'COR '): txt = txt[4:] _, station, time = core.get_station_and_time(txt[:20].split(' ')) retwx = { 'end_time': None, 'raw': txt, 'remarks': None, 'start_time': None, 'station': station, 'time': core.make_timestamp(time) } txt = txt.replace(station, '') txt = txt.replace(time, '').strip() if core.uses_na_format(station): use_na = True units = Units(**NA_UNITS) # type: ignore else: use_na = False units = Units(**IN_UNITS) # type: ignore # Find and remove remarks txt, retwx['remarks'] = core.get_taf_remarks(txt) # Split and parse each line lines = core.split_taf(txt) parsed_lines = parse_lines(lines, units, use_na) # Perform additional info extract and corrections if parsed_lines: parsed_lines[-1]['other'], retwx['max_temp'], retwx['min_temp'] \ = core.get_temp_min_and_max(parsed_lines[-1]['other']) if not (retwx['max_temp'] or retwx['min_temp']): parsed_lines[0]['other'], retwx['max_temp'], retwx['min_temp'] \ = core.get_temp_min_and_max(parsed_lines[0]['other']) # Set start and end times based on the first line start, end = parsed_lines[0]['start_time'], parsed_lines[0]['end_time'] parsed_lines[0]['end_time'] = None retwx['start_time'], retwx['end_time'] = start, end parsed_lines = core.find_missing_taf_times(parsed_lines, start, end) parsed_lines = core.get_taf_flight_rules(parsed_lines) # Extract Oceania-specific data if retwx['station'][0] == 'A': # type: ignore parsed_lines[-1]['other'], retwx['alts'], retwx['temps'] \ = core.get_oceania_temp_and_alt(parsed_lines[-1]['other']) # Convert to dataclass retwx['forecast'] = [TafLineData(**line) for line in parsed_lines] # type: ignore return TafData(**retwx), units
python
def parse(station: str, txt: str) -> TafData: """ Returns TafData and Units dataclasses with parsed data and their associated units """ core.valid_station(station) while len(txt) > 3 and txt[:4] in ('TAF ', 'AMD ', 'COR '): txt = txt[4:] _, station, time = core.get_station_and_time(txt[:20].split(' ')) retwx = { 'end_time': None, 'raw': txt, 'remarks': None, 'start_time': None, 'station': station, 'time': core.make_timestamp(time) } txt = txt.replace(station, '') txt = txt.replace(time, '').strip() if core.uses_na_format(station): use_na = True units = Units(**NA_UNITS) # type: ignore else: use_na = False units = Units(**IN_UNITS) # type: ignore # Find and remove remarks txt, retwx['remarks'] = core.get_taf_remarks(txt) # Split and parse each line lines = core.split_taf(txt) parsed_lines = parse_lines(lines, units, use_na) # Perform additional info extract and corrections if parsed_lines: parsed_lines[-1]['other'], retwx['max_temp'], retwx['min_temp'] \ = core.get_temp_min_and_max(parsed_lines[-1]['other']) if not (retwx['max_temp'] or retwx['min_temp']): parsed_lines[0]['other'], retwx['max_temp'], retwx['min_temp'] \ = core.get_temp_min_and_max(parsed_lines[0]['other']) # Set start and end times based on the first line start, end = parsed_lines[0]['start_time'], parsed_lines[0]['end_time'] parsed_lines[0]['end_time'] = None retwx['start_time'], retwx['end_time'] = start, end parsed_lines = core.find_missing_taf_times(parsed_lines, start, end) parsed_lines = core.get_taf_flight_rules(parsed_lines) # Extract Oceania-specific data if retwx['station'][0] == 'A': # type: ignore parsed_lines[-1]['other'], retwx['alts'], retwx['temps'] \ = core.get_oceania_temp_and_alt(parsed_lines[-1]['other']) # Convert to dataclass retwx['forecast'] = [TafLineData(**line) for line in parsed_lines] # type: ignore return TafData(**retwx), units
[ "def", "parse", "(", "station", ":", "str", ",", "txt", ":", "str", ")", "->", "TafData", ":", "core", ".", "valid_station", "(", "station", ")", "while", "len", "(", "txt", ")", ">", "3", "and", "txt", "[", ":", "4", "]", "in", "(", "'TAF '", ",", "'AMD '", ",", "'COR '", ")", ":", "txt", "=", "txt", "[", "4", ":", "]", "_", ",", "station", ",", "time", "=", "core", ".", "get_station_and_time", "(", "txt", "[", ":", "20", "]", ".", "split", "(", "' '", ")", ")", "retwx", "=", "{", "'end_time'", ":", "None", ",", "'raw'", ":", "txt", ",", "'remarks'", ":", "None", ",", "'start_time'", ":", "None", ",", "'station'", ":", "station", ",", "'time'", ":", "core", ".", "make_timestamp", "(", "time", ")", "}", "txt", "=", "txt", ".", "replace", "(", "station", ",", "''", ")", "txt", "=", "txt", ".", "replace", "(", "time", ",", "''", ")", ".", "strip", "(", ")", "if", "core", ".", "uses_na_format", "(", "station", ")", ":", "use_na", "=", "True", "units", "=", "Units", "(", "*", "*", "NA_UNITS", ")", "# type: ignore", "else", ":", "use_na", "=", "False", "units", "=", "Units", "(", "*", "*", "IN_UNITS", ")", "# type: ignore", "# Find and remove remarks", "txt", ",", "retwx", "[", "'remarks'", "]", "=", "core", ".", "get_taf_remarks", "(", "txt", ")", "# Split and parse each line", "lines", "=", "core", ".", "split_taf", "(", "txt", ")", "parsed_lines", "=", "parse_lines", "(", "lines", ",", "units", ",", "use_na", ")", "# Perform additional info extract and corrections", "if", "parsed_lines", ":", "parsed_lines", "[", "-", "1", "]", "[", "'other'", "]", ",", "retwx", "[", "'max_temp'", "]", ",", "retwx", "[", "'min_temp'", "]", "=", "core", ".", "get_temp_min_and_max", "(", "parsed_lines", "[", "-", "1", "]", "[", "'other'", "]", ")", "if", "not", "(", "retwx", "[", "'max_temp'", "]", "or", "retwx", "[", "'min_temp'", "]", ")", ":", "parsed_lines", "[", "0", "]", "[", "'other'", "]", ",", "retwx", "[", "'max_temp'", "]", ",", "retwx", "[", "'min_temp'", "]", "=", "core", ".", "get_temp_min_and_max", "(", "parsed_lines", "[", "0", "]", "[", "'other'", "]", ")", "# Set start and end times based on the first line", "start", ",", "end", "=", "parsed_lines", "[", "0", "]", "[", "'start_time'", "]", ",", "parsed_lines", "[", "0", "]", "[", "'end_time'", "]", "parsed_lines", "[", "0", "]", "[", "'end_time'", "]", "=", "None", "retwx", "[", "'start_time'", "]", ",", "retwx", "[", "'end_time'", "]", "=", "start", ",", "end", "parsed_lines", "=", "core", ".", "find_missing_taf_times", "(", "parsed_lines", ",", "start", ",", "end", ")", "parsed_lines", "=", "core", ".", "get_taf_flight_rules", "(", "parsed_lines", ")", "# Extract Oceania-specific data", "if", "retwx", "[", "'station'", "]", "[", "0", "]", "==", "'A'", ":", "# type: ignore", "parsed_lines", "[", "-", "1", "]", "[", "'other'", "]", ",", "retwx", "[", "'alts'", "]", ",", "retwx", "[", "'temps'", "]", "=", "core", ".", "get_oceania_temp_and_alt", "(", "parsed_lines", "[", "-", "1", "]", "[", "'other'", "]", ")", "# Convert to dataclass", "retwx", "[", "'forecast'", "]", "=", "[", "TafLineData", "(", "*", "*", "line", ")", "for", "line", "in", "parsed_lines", "]", "# type: ignore", "return", "TafData", "(", "*", "*", "retwx", ")", ",", "units" ]
Returns TafData and Units dataclasses with parsed data and their associated units
[ "Returns", "TafData", "and", "Units", "dataclasses", "with", "parsed", "data", "and", "their", "associated", "units" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/taf.py#L22-L70
245,521
etcher-be/emiz
emiz/avwx/taf.py
parse_lines
def parse_lines(lines: [str], units: Units, use_na: bool = True) -> [dict]: # type: ignore """ Returns a list of parsed line dictionaries """ parsed_lines = [] prob = '' while lines: raw_line = lines[0].strip() line = core.sanitize_line(raw_line) # Remove prob from the beginning of a line if line.startswith('PROB'): # Add standalone prob to next line if len(line) == 6: prob = line line = '' # Add to current line elif len(line) > 6: prob = line[:6] line = line[6:].strip() if line: parsed_line = (parse_na_line if use_na else parse_in_line)(line, units) for key in ('start_time', 'end_time'): parsed_line[key] = core.make_timestamp(parsed_line[key]) # type: ignore parsed_line['probability'] = core.make_number(prob[4:]) # type: ignore parsed_line['raw'] = raw_line parsed_line['sanitized'] = prob + ' ' + line if prob else line prob = '' parsed_lines.append(parsed_line) lines.pop(0) return parsed_lines
python
def parse_lines(lines: [str], units: Units, use_na: bool = True) -> [dict]: # type: ignore """ Returns a list of parsed line dictionaries """ parsed_lines = [] prob = '' while lines: raw_line = lines[0].strip() line = core.sanitize_line(raw_line) # Remove prob from the beginning of a line if line.startswith('PROB'): # Add standalone prob to next line if len(line) == 6: prob = line line = '' # Add to current line elif len(line) > 6: prob = line[:6] line = line[6:].strip() if line: parsed_line = (parse_na_line if use_na else parse_in_line)(line, units) for key in ('start_time', 'end_time'): parsed_line[key] = core.make_timestamp(parsed_line[key]) # type: ignore parsed_line['probability'] = core.make_number(prob[4:]) # type: ignore parsed_line['raw'] = raw_line parsed_line['sanitized'] = prob + ' ' + line if prob else line prob = '' parsed_lines.append(parsed_line) lines.pop(0) return parsed_lines
[ "def", "parse_lines", "(", "lines", ":", "[", "str", "]", ",", "units", ":", "Units", ",", "use_na", ":", "bool", "=", "True", ")", "->", "[", "dict", "]", ":", "# type: ignore", "parsed_lines", "=", "[", "]", "prob", "=", "''", "while", "lines", ":", "raw_line", "=", "lines", "[", "0", "]", ".", "strip", "(", ")", "line", "=", "core", ".", "sanitize_line", "(", "raw_line", ")", "# Remove prob from the beginning of a line", "if", "line", ".", "startswith", "(", "'PROB'", ")", ":", "# Add standalone prob to next line", "if", "len", "(", "line", ")", "==", "6", ":", "prob", "=", "line", "line", "=", "''", "# Add to current line", "elif", "len", "(", "line", ")", ">", "6", ":", "prob", "=", "line", "[", ":", "6", "]", "line", "=", "line", "[", "6", ":", "]", ".", "strip", "(", ")", "if", "line", ":", "parsed_line", "=", "(", "parse_na_line", "if", "use_na", "else", "parse_in_line", ")", "(", "line", ",", "units", ")", "for", "key", "in", "(", "'start_time'", ",", "'end_time'", ")", ":", "parsed_line", "[", "key", "]", "=", "core", ".", "make_timestamp", "(", "parsed_line", "[", "key", "]", ")", "# type: ignore", "parsed_line", "[", "'probability'", "]", "=", "core", ".", "make_number", "(", "prob", "[", "4", ":", "]", ")", "# type: ignore", "parsed_line", "[", "'raw'", "]", "=", "raw_line", "parsed_line", "[", "'sanitized'", "]", "=", "prob", "+", "' '", "+", "line", "if", "prob", "else", "line", "prob", "=", "''", "parsed_lines", ".", "append", "(", "parsed_line", ")", "lines", ".", "pop", "(", "0", ")", "return", "parsed_lines" ]
Returns a list of parsed line dictionaries
[ "Returns", "a", "list", "of", "parsed", "line", "dictionaries" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/taf.py#L73-L102
245,522
etcher-be/emiz
emiz/avwx/taf.py
parse_na_line
def parse_na_line(txt: str, units: Units) -> typing.Dict[str, str]: """ Parser for the North American TAF forcast varient """ retwx = {} wxdata = txt.split(' ') wxdata, _, retwx['wind_shear'] = core.sanitize_report_list(wxdata) wxdata, retwx['type'], retwx['start_time'], retwx['end_time'] = core.get_type_and_times(wxdata) wxdata, retwx['wind_direction'], retwx['wind_speed'], \ retwx['wind_gust'], _ = core.get_wind(wxdata, units) wxdata, retwx['visibility'] = core.get_visibility(wxdata, units) wxdata, retwx['clouds'] = core.get_clouds(wxdata) retwx['other'], retwx['altimeter'], retwx['icing'], retwx['turbulance'] \ = core.get_taf_alt_ice_turb(wxdata) return retwx
python
def parse_na_line(txt: str, units: Units) -> typing.Dict[str, str]: """ Parser for the North American TAF forcast varient """ retwx = {} wxdata = txt.split(' ') wxdata, _, retwx['wind_shear'] = core.sanitize_report_list(wxdata) wxdata, retwx['type'], retwx['start_time'], retwx['end_time'] = core.get_type_and_times(wxdata) wxdata, retwx['wind_direction'], retwx['wind_speed'], \ retwx['wind_gust'], _ = core.get_wind(wxdata, units) wxdata, retwx['visibility'] = core.get_visibility(wxdata, units) wxdata, retwx['clouds'] = core.get_clouds(wxdata) retwx['other'], retwx['altimeter'], retwx['icing'], retwx['turbulance'] \ = core.get_taf_alt_ice_turb(wxdata) return retwx
[ "def", "parse_na_line", "(", "txt", ":", "str", ",", "units", ":", "Units", ")", "->", "typing", ".", "Dict", "[", "str", ",", "str", "]", ":", "retwx", "=", "{", "}", "wxdata", "=", "txt", ".", "split", "(", "' '", ")", "wxdata", ",", "_", ",", "retwx", "[", "'wind_shear'", "]", "=", "core", ".", "sanitize_report_list", "(", "wxdata", ")", "wxdata", ",", "retwx", "[", "'type'", "]", ",", "retwx", "[", "'start_time'", "]", ",", "retwx", "[", "'end_time'", "]", "=", "core", ".", "get_type_and_times", "(", "wxdata", ")", "wxdata", ",", "retwx", "[", "'wind_direction'", "]", ",", "retwx", "[", "'wind_speed'", "]", ",", "retwx", "[", "'wind_gust'", "]", ",", "_", "=", "core", ".", "get_wind", "(", "wxdata", ",", "units", ")", "wxdata", ",", "retwx", "[", "'visibility'", "]", "=", "core", ".", "get_visibility", "(", "wxdata", ",", "units", ")", "wxdata", ",", "retwx", "[", "'clouds'", "]", "=", "core", ".", "get_clouds", "(", "wxdata", ")", "retwx", "[", "'other'", "]", ",", "retwx", "[", "'altimeter'", "]", ",", "retwx", "[", "'icing'", "]", ",", "retwx", "[", "'turbulance'", "]", "=", "core", ".", "get_taf_alt_ice_turb", "(", "wxdata", ")", "return", "retwx" ]
Parser for the North American TAF forcast varient
[ "Parser", "for", "the", "North", "American", "TAF", "forcast", "varient" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/taf.py#L105-L119
245,523
etcher-be/emiz
emiz/avwx/taf.py
parse_in_line
def parse_in_line(txt: str, units: Units) -> typing.Dict[str, str]: """ Parser for the International TAF forcast varient """ retwx = {} wxdata = txt.split(' ') wxdata, _, retwx['wind_shear'] = core.sanitize_report_list(wxdata) wxdata, retwx['type'], retwx['start_time'], retwx['end_time'] = core.get_type_and_times(wxdata) wxdata, retwx['wind_direction'], retwx['wind_speed'], \ retwx['wind_gust'], _ = core.get_wind(wxdata, units) if 'CAVOK' in wxdata: retwx['visibility'] = core.make_number('CAVOK') retwx['clouds'] = [] wxdata.pop(wxdata.index('CAVOK')) else: wxdata, retwx['visibility'] = core.get_visibility(wxdata, units) wxdata, retwx['clouds'] = core.get_clouds(wxdata) retwx['other'], retwx['altimeter'], retwx['icing'], retwx['turbulance'] \ = core.get_taf_alt_ice_turb(wxdata) return retwx
python
def parse_in_line(txt: str, units: Units) -> typing.Dict[str, str]: """ Parser for the International TAF forcast varient """ retwx = {} wxdata = txt.split(' ') wxdata, _, retwx['wind_shear'] = core.sanitize_report_list(wxdata) wxdata, retwx['type'], retwx['start_time'], retwx['end_time'] = core.get_type_and_times(wxdata) wxdata, retwx['wind_direction'], retwx['wind_speed'], \ retwx['wind_gust'], _ = core.get_wind(wxdata, units) if 'CAVOK' in wxdata: retwx['visibility'] = core.make_number('CAVOK') retwx['clouds'] = [] wxdata.pop(wxdata.index('CAVOK')) else: wxdata, retwx['visibility'] = core.get_visibility(wxdata, units) wxdata, retwx['clouds'] = core.get_clouds(wxdata) retwx['other'], retwx['altimeter'], retwx['icing'], retwx['turbulance'] \ = core.get_taf_alt_ice_turb(wxdata) return retwx
[ "def", "parse_in_line", "(", "txt", ":", "str", ",", "units", ":", "Units", ")", "->", "typing", ".", "Dict", "[", "str", ",", "str", "]", ":", "retwx", "=", "{", "}", "wxdata", "=", "txt", ".", "split", "(", "' '", ")", "wxdata", ",", "_", ",", "retwx", "[", "'wind_shear'", "]", "=", "core", ".", "sanitize_report_list", "(", "wxdata", ")", "wxdata", ",", "retwx", "[", "'type'", "]", ",", "retwx", "[", "'start_time'", "]", ",", "retwx", "[", "'end_time'", "]", "=", "core", ".", "get_type_and_times", "(", "wxdata", ")", "wxdata", ",", "retwx", "[", "'wind_direction'", "]", ",", "retwx", "[", "'wind_speed'", "]", ",", "retwx", "[", "'wind_gust'", "]", ",", "_", "=", "core", ".", "get_wind", "(", "wxdata", ",", "units", ")", "if", "'CAVOK'", "in", "wxdata", ":", "retwx", "[", "'visibility'", "]", "=", "core", ".", "make_number", "(", "'CAVOK'", ")", "retwx", "[", "'clouds'", "]", "=", "[", "]", "wxdata", ".", "pop", "(", "wxdata", ".", "index", "(", "'CAVOK'", ")", ")", "else", ":", "wxdata", ",", "retwx", "[", "'visibility'", "]", "=", "core", ".", "get_visibility", "(", "wxdata", ",", "units", ")", "wxdata", ",", "retwx", "[", "'clouds'", "]", "=", "core", ".", "get_clouds", "(", "wxdata", ")", "retwx", "[", "'other'", "]", ",", "retwx", "[", "'altimeter'", "]", ",", "retwx", "[", "'icing'", "]", ",", "retwx", "[", "'turbulance'", "]", "=", "core", ".", "get_taf_alt_ice_turb", "(", "wxdata", ")", "return", "retwx" ]
Parser for the International TAF forcast varient
[ "Parser", "for", "the", "International", "TAF", "forcast", "varient" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/taf.py#L122-L141
245,524
PyMLGame/pymlgame
pymlgame/screen.py
Screen.reset
def reset(self): """ Fill the screen with black pixels """ surface = Surface(self.width, self.height) surface.fill(BLACK) self.matrix = surface.matrix
python
def reset(self): """ Fill the screen with black pixels """ surface = Surface(self.width, self.height) surface.fill(BLACK) self.matrix = surface.matrix
[ "def", "reset", "(", "self", ")", ":", "surface", "=", "Surface", "(", "self", ".", "width", ",", "self", ".", "height", ")", "surface", ".", "fill", "(", "BLACK", ")", "self", ".", "matrix", "=", "surface", ".", "matrix" ]
Fill the screen with black pixels
[ "Fill", "the", "screen", "with", "black", "pixels" ]
450fe77d35f9a26c107586d6954f69c3895bf504
https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/screen.py#L29-L35
245,525
PyMLGame/pymlgame
pymlgame/screen.py
Screen.update
def update(self): """ Sends the current screen contents to Mate Light """ display_data = [] for y in range(self.height): for x in range(self.width): for color in self.matrix[x][y]: display_data.append(int(color)) checksum = bytearray([0, 0, 0, 0]) data_as_bytes = bytearray(display_data) data = data_as_bytes + checksum sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.sendto(data, (self.host, self.port))
python
def update(self): """ Sends the current screen contents to Mate Light """ display_data = [] for y in range(self.height): for x in range(self.width): for color in self.matrix[x][y]: display_data.append(int(color)) checksum = bytearray([0, 0, 0, 0]) data_as_bytes = bytearray(display_data) data = data_as_bytes + checksum sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.sendto(data, (self.host, self.port))
[ "def", "update", "(", "self", ")", ":", "display_data", "=", "[", "]", "for", "y", "in", "range", "(", "self", ".", "height", ")", ":", "for", "x", "in", "range", "(", "self", ".", "width", ")", ":", "for", "color", "in", "self", ".", "matrix", "[", "x", "]", "[", "y", "]", ":", "display_data", ".", "append", "(", "int", "(", "color", ")", ")", "checksum", "=", "bytearray", "(", "[", "0", ",", "0", ",", "0", ",", "0", "]", ")", "data_as_bytes", "=", "bytearray", "(", "display_data", ")", "data", "=", "data_as_bytes", "+", "checksum", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ")", "sock", ".", "sendto", "(", "data", ",", "(", "self", ".", "host", ",", "self", ".", "port", ")", ")" ]
Sends the current screen contents to Mate Light
[ "Sends", "the", "current", "screen", "contents", "to", "Mate", "Light" ]
450fe77d35f9a26c107586d6954f69c3895bf504
https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/screen.py#L37-L51
245,526
PyMLGame/pymlgame
pymlgame/screen.py
Screen.blit
def blit(self, surface, pos=(0, 0)): """ Blits a surface on the screen at pos :param surface: Surface to blit :param pos: Top left corner to start blitting :type surface: Surface :type pos: tuple """ for x in range(surface.width): for y in range(surface.height): point = (x + pos[0], y + pos[1]) if self.point_on_screen(point): self.matrix[point[0]][point[1]] = surface.matrix[x][y]
python
def blit(self, surface, pos=(0, 0)): """ Blits a surface on the screen at pos :param surface: Surface to blit :param pos: Top left corner to start blitting :type surface: Surface :type pos: tuple """ for x in range(surface.width): for y in range(surface.height): point = (x + pos[0], y + pos[1]) if self.point_on_screen(point): self.matrix[point[0]][point[1]] = surface.matrix[x][y]
[ "def", "blit", "(", "self", ",", "surface", ",", "pos", "=", "(", "0", ",", "0", ")", ")", ":", "for", "x", "in", "range", "(", "surface", ".", "width", ")", ":", "for", "y", "in", "range", "(", "surface", ".", "height", ")", ":", "point", "=", "(", "x", "+", "pos", "[", "0", "]", ",", "y", "+", "pos", "[", "1", "]", ")", "if", "self", ".", "point_on_screen", "(", "point", ")", ":", "self", ".", "matrix", "[", "point", "[", "0", "]", "]", "[", "point", "[", "1", "]", "]", "=", "surface", ".", "matrix", "[", "x", "]", "[", "y", "]" ]
Blits a surface on the screen at pos :param surface: Surface to blit :param pos: Top left corner to start blitting :type surface: Surface :type pos: tuple
[ "Blits", "a", "surface", "on", "the", "screen", "at", "pos" ]
450fe77d35f9a26c107586d6954f69c3895bf504
https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/screen.py#L53-L66
245,527
PyMLGame/pymlgame
pymlgame/screen.py
Screen.point_on_screen
def point_on_screen(self, pos): """ Is the point still on the screen? :param pos: Point :type pos: tuple :return: Is it? :rtype: bool """ if 0 <= pos[0] < self.width and 0 <= pos[1] < self.height: return True else: return False
python
def point_on_screen(self, pos): """ Is the point still on the screen? :param pos: Point :type pos: tuple :return: Is it? :rtype: bool """ if 0 <= pos[0] < self.width and 0 <= pos[1] < self.height: return True else: return False
[ "def", "point_on_screen", "(", "self", ",", "pos", ")", ":", "if", "0", "<=", "pos", "[", "0", "]", "<", "self", ".", "width", "and", "0", "<=", "pos", "[", "1", "]", "<", "self", ".", "height", ":", "return", "True", "else", ":", "return", "False" ]
Is the point still on the screen? :param pos: Point :type pos: tuple :return: Is it? :rtype: bool
[ "Is", "the", "point", "still", "on", "the", "screen?" ]
450fe77d35f9a26c107586d6954f69c3895bf504
https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/screen.py#L68-L80
245,528
chrisvoncsefalvay/urbanpyctionary
urbanpyctionary/client.py
Client.get
def get(self, word): """ Obtains the definition of a word from Urban Dictionary. :param word: word to be searched for :type word: str :return: a result set with all definitions for the word """ url = "https://mashape-community-urban-dictionary.p.mashape.com/define?term=%s" % word try: res = requests.get(url, headers = {"X-Mashape-Key": self.api_key, "Accept": "text/plain"}) except requests.ConnectionError: raise errors.ConnectionError if res.status_code == 200: if res.json()["result_type"] == 'no_results': raise errors.NoResultsError else: return Result(res.json()) else: if res.status_code == 403: raise errors.APIUnauthorizedError
python
def get(self, word): """ Obtains the definition of a word from Urban Dictionary. :param word: word to be searched for :type word: str :return: a result set with all definitions for the word """ url = "https://mashape-community-urban-dictionary.p.mashape.com/define?term=%s" % word try: res = requests.get(url, headers = {"X-Mashape-Key": self.api_key, "Accept": "text/plain"}) except requests.ConnectionError: raise errors.ConnectionError if res.status_code == 200: if res.json()["result_type"] == 'no_results': raise errors.NoResultsError else: return Result(res.json()) else: if res.status_code == 403: raise errors.APIUnauthorizedError
[ "def", "get", "(", "self", ",", "word", ")", ":", "url", "=", "\"https://mashape-community-urban-dictionary.p.mashape.com/define?term=%s\"", "%", "word", "try", ":", "res", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "{", "\"X-Mashape-Key\"", ":", "self", ".", "api_key", ",", "\"Accept\"", ":", "\"text/plain\"", "}", ")", "except", "requests", ".", "ConnectionError", ":", "raise", "errors", ".", "ConnectionError", "if", "res", ".", "status_code", "==", "200", ":", "if", "res", ".", "json", "(", ")", "[", "\"result_type\"", "]", "==", "'no_results'", ":", "raise", "errors", ".", "NoResultsError", "else", ":", "return", "Result", "(", "res", ".", "json", "(", ")", ")", "else", ":", "if", "res", ".", "status_code", "==", "403", ":", "raise", "errors", ".", "APIUnauthorizedError" ]
Obtains the definition of a word from Urban Dictionary. :param word: word to be searched for :type word: str :return: a result set with all definitions for the word
[ "Obtains", "the", "definition", "of", "a", "word", "from", "Urban", "Dictionary", "." ]
77ce3262d25d16ae9179909a34197c102adb2f06
https://github.com/chrisvoncsefalvay/urbanpyctionary/blob/77ce3262d25d16ae9179909a34197c102adb2f06/urbanpyctionary/client.py#L115-L140
245,529
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader.estimate
def estimate(self, upgrades): """Estimate the time needed to apply upgrades. If an upgrades does not specify and estimate it is assumed to be in the order of 1 second. :param upgrades: List of upgrades sorted in topological order. """ val = 0 for u in upgrades: val += u.estimate() return val
python
def estimate(self, upgrades): """Estimate the time needed to apply upgrades. If an upgrades does not specify and estimate it is assumed to be in the order of 1 second. :param upgrades: List of upgrades sorted in topological order. """ val = 0 for u in upgrades: val += u.estimate() return val
[ "def", "estimate", "(", "self", ",", "upgrades", ")", ":", "val", "=", "0", "for", "u", "in", "upgrades", ":", "val", "+=", "u", ".", "estimate", "(", ")", "return", "val" ]
Estimate the time needed to apply upgrades. If an upgrades does not specify and estimate it is assumed to be in the order of 1 second. :param upgrades: List of upgrades sorted in topological order.
[ "Estimate", "the", "time", "needed", "to", "apply", "upgrades", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L81-L92
245,530
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader.human_estimate
def human_estimate(self, upgrades): """Make a human readable estimated time to completion string. :param upgrades: List of upgrades sorted in topological order. """ val = self.estimate(upgrades) if val < 60: return "less than 1 minute" elif val < 300: return "less than 5 minutes" elif val < 600: return "less than 10 minutes" elif val < 1800: return "less than 30 minutes" elif val < 3600: return "less than 1 hour" elif val < 3 * 3600: return "less than 3 hours" elif val < 6 * 3600: return "less than 6 hours" elif val < 12 * 3600: return "less than 12 hours" elif val < 86400: return "less than 1 day" else: return "more than 1 day"
python
def human_estimate(self, upgrades): """Make a human readable estimated time to completion string. :param upgrades: List of upgrades sorted in topological order. """ val = self.estimate(upgrades) if val < 60: return "less than 1 minute" elif val < 300: return "less than 5 minutes" elif val < 600: return "less than 10 minutes" elif val < 1800: return "less than 30 minutes" elif val < 3600: return "less than 1 hour" elif val < 3 * 3600: return "less than 3 hours" elif val < 6 * 3600: return "less than 6 hours" elif val < 12 * 3600: return "less than 12 hours" elif val < 86400: return "less than 1 day" else: return "more than 1 day"
[ "def", "human_estimate", "(", "self", ",", "upgrades", ")", ":", "val", "=", "self", ".", "estimate", "(", "upgrades", ")", "if", "val", "<", "60", ":", "return", "\"less than 1 minute\"", "elif", "val", "<", "300", ":", "return", "\"less than 5 minutes\"", "elif", "val", "<", "600", ":", "return", "\"less than 10 minutes\"", "elif", "val", "<", "1800", ":", "return", "\"less than 30 minutes\"", "elif", "val", "<", "3600", ":", "return", "\"less than 1 hour\"", "elif", "val", "<", "3", "*", "3600", ":", "return", "\"less than 3 hours\"", "elif", "val", "<", "6", "*", "3600", ":", "return", "\"less than 6 hours\"", "elif", "val", "<", "12", "*", "3600", ":", "return", "\"less than 12 hours\"", "elif", "val", "<", "86400", ":", "return", "\"less than 1 day\"", "else", ":", "return", "\"more than 1 day\"" ]
Make a human readable estimated time to completion string. :param upgrades: List of upgrades sorted in topological order.
[ "Make", "a", "human", "readable", "estimated", "time", "to", "completion", "string", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L94-L119
245,531
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader._setup_log_prefix
def _setup_log_prefix(self, plugin_id=''): """Setup custom warning notification.""" self._logger_console_fmtter.prefix = '%s: ' % plugin_id self._logger_console_fmtter.plugin_id = plugin_id self._logger_file_fmtter.prefix = '*' self._logger_file_fmtter.plugin_id = '%s: ' % plugin_id
python
def _setup_log_prefix(self, plugin_id=''): """Setup custom warning notification.""" self._logger_console_fmtter.prefix = '%s: ' % plugin_id self._logger_console_fmtter.plugin_id = plugin_id self._logger_file_fmtter.prefix = '*' self._logger_file_fmtter.plugin_id = '%s: ' % plugin_id
[ "def", "_setup_log_prefix", "(", "self", ",", "plugin_id", "=", "''", ")", ":", "self", ".", "_logger_console_fmtter", ".", "prefix", "=", "'%s: '", "%", "plugin_id", "self", ".", "_logger_console_fmtter", ".", "plugin_id", "=", "plugin_id", "self", ".", "_logger_file_fmtter", ".", "prefix", "=", "'*'", "self", ".", "_logger_file_fmtter", ".", "plugin_id", "=", "'%s: '", "%", "plugin_id" ]
Setup custom warning notification.
[ "Setup", "custom", "warning", "notification", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L121-L126
245,532
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader._teardown_log_prefix
def _teardown_log_prefix(self): """Tear down custom warning notification.""" self._logger_console_fmtter.prefix = '' self._logger_console_fmtter.plugin_id = '' self._logger_file_fmtter.prefix = ' ' self._logger_file_fmtter.plugin_id = ''
python
def _teardown_log_prefix(self): """Tear down custom warning notification.""" self._logger_console_fmtter.prefix = '' self._logger_console_fmtter.plugin_id = '' self._logger_file_fmtter.prefix = ' ' self._logger_file_fmtter.plugin_id = ''
[ "def", "_teardown_log_prefix", "(", "self", ")", ":", "self", ".", "_logger_console_fmtter", ".", "prefix", "=", "''", "self", ".", "_logger_console_fmtter", ".", "plugin_id", "=", "''", "self", ".", "_logger_file_fmtter", ".", "prefix", "=", "' '", "self", ".", "_logger_file_fmtter", ".", "plugin_id", "=", "''" ]
Tear down custom warning notification.
[ "Tear", "down", "custom", "warning", "notification", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L128-L133
245,533
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader.pre_upgrade_checks
def pre_upgrade_checks(self, upgrades): """Run upgrade pre-checks prior to applying upgrades. Pre-checks should in general be fast to execute. Pre-checks may the use the wait_for_user function, to query the user for confirmation, but should respect the --yes-i-know option to run unattended. All pre-checks will be executed even if one fails, however if one pre- check fails, the upgrade process will be stopped and the user warned. :param upgrades: List of upgrades sorted in topological order. """ errors = [] for check in self.global_pre_upgrade: self._setup_log_prefix(plugin_id=check.__name__) try: check() except RuntimeError as e: errors.append((check.__name__, e.args)) for u in upgrades: self._setup_log_prefix(plugin_id=u.name) try: u.pre_upgrade() except RuntimeError as e: errors.append((u.name, e.args)) self._teardown_log_prefix() self._check_errors(errors, "Pre-upgrade check for %s failed with the" " following errors:")
python
def pre_upgrade_checks(self, upgrades): """Run upgrade pre-checks prior to applying upgrades. Pre-checks should in general be fast to execute. Pre-checks may the use the wait_for_user function, to query the user for confirmation, but should respect the --yes-i-know option to run unattended. All pre-checks will be executed even if one fails, however if one pre- check fails, the upgrade process will be stopped and the user warned. :param upgrades: List of upgrades sorted in topological order. """ errors = [] for check in self.global_pre_upgrade: self._setup_log_prefix(plugin_id=check.__name__) try: check() except RuntimeError as e: errors.append((check.__name__, e.args)) for u in upgrades: self._setup_log_prefix(plugin_id=u.name) try: u.pre_upgrade() except RuntimeError as e: errors.append((u.name, e.args)) self._teardown_log_prefix() self._check_errors(errors, "Pre-upgrade check for %s failed with the" " following errors:")
[ "def", "pre_upgrade_checks", "(", "self", ",", "upgrades", ")", ":", "errors", "=", "[", "]", "for", "check", "in", "self", ".", "global_pre_upgrade", ":", "self", ".", "_setup_log_prefix", "(", "plugin_id", "=", "check", ".", "__name__", ")", "try", ":", "check", "(", ")", "except", "RuntimeError", "as", "e", ":", "errors", ".", "append", "(", "(", "check", ".", "__name__", ",", "e", ".", "args", ")", ")", "for", "u", "in", "upgrades", ":", "self", ".", "_setup_log_prefix", "(", "plugin_id", "=", "u", ".", "name", ")", "try", ":", "u", ".", "pre_upgrade", "(", ")", "except", "RuntimeError", "as", "e", ":", "errors", ".", "append", "(", "(", "u", ".", "name", ",", "e", ".", "args", ")", ")", "self", ".", "_teardown_log_prefix", "(", ")", "self", ".", "_check_errors", "(", "errors", ",", "\"Pre-upgrade check for %s failed with the\"", "\" following errors:\"", ")" ]
Run upgrade pre-checks prior to applying upgrades. Pre-checks should in general be fast to execute. Pre-checks may the use the wait_for_user function, to query the user for confirmation, but should respect the --yes-i-know option to run unattended. All pre-checks will be executed even if one fails, however if one pre- check fails, the upgrade process will be stopped and the user warned. :param upgrades: List of upgrades sorted in topological order.
[ "Run", "upgrade", "pre", "-", "checks", "prior", "to", "applying", "upgrades", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L177-L209
245,534
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader._check_errors
def _check_errors(self, errors, prefix): """Check for errors and possible raise and format an error message. :param errors: List of error messages. :param prefix: str, Prefix message for error messages """ args = [] for uid, messages in errors: error_msg = [] error_msg.append(prefix % uid) for msg in messages: error_msg.append(" (-) %s" % msg) args.append("\n".join(error_msg)) if args: raise RuntimeError(*args)
python
def _check_errors(self, errors, prefix): """Check for errors and possible raise and format an error message. :param errors: List of error messages. :param prefix: str, Prefix message for error messages """ args = [] for uid, messages in errors: error_msg = [] error_msg.append(prefix % uid) for msg in messages: error_msg.append(" (-) %s" % msg) args.append("\n".join(error_msg)) if args: raise RuntimeError(*args)
[ "def", "_check_errors", "(", "self", ",", "errors", ",", "prefix", ")", ":", "args", "=", "[", "]", "for", "uid", ",", "messages", "in", "errors", ":", "error_msg", "=", "[", "]", "error_msg", ".", "append", "(", "prefix", "%", "uid", ")", "for", "msg", "in", "messages", ":", "error_msg", ".", "append", "(", "\" (-) %s\"", "%", "msg", ")", "args", ".", "append", "(", "\"\\n\"", ".", "join", "(", "error_msg", ")", ")", "if", "args", ":", "raise", "RuntimeError", "(", "*", "args", ")" ]
Check for errors and possible raise and format an error message. :param errors: List of error messages. :param prefix: str, Prefix message for error messages
[ "Check", "for", "errors", "and", "possible", "raise", "and", "format", "an", "error", "message", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L211-L227
245,535
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader.post_upgrade_checks
def post_upgrade_checks(self, upgrades): """Run post-upgrade checks after applying all pending upgrades. Post checks may be used to emit warnings encountered when applying an upgrade, but post-checks can also be used to advice the user to run re-indexing or similar long running processes. Post-checks may query for user-input, but should respect the --yes-i-know option to run in an unattended mode. All applied upgrades post-checks are executed. :param upgrades: List of upgrades sorted in topological order. """ errors = [] for u in upgrades: self._setup_log_prefix(plugin_id=u.name) try: u.post_upgrade() except RuntimeError as e: errors.append((u.name, e.args)) for check in self.global_post_upgrade: self._setup_log_prefix(plugin_id=check.__name__) try: check() except RuntimeError as e: errors.append((check.__name__, e.args)) self._teardown_log_prefix() self._check_errors(errors, "Post-upgrade check for %s failed with the " "following errors:")
python
def post_upgrade_checks(self, upgrades): """Run post-upgrade checks after applying all pending upgrades. Post checks may be used to emit warnings encountered when applying an upgrade, but post-checks can also be used to advice the user to run re-indexing or similar long running processes. Post-checks may query for user-input, but should respect the --yes-i-know option to run in an unattended mode. All applied upgrades post-checks are executed. :param upgrades: List of upgrades sorted in topological order. """ errors = [] for u in upgrades: self._setup_log_prefix(plugin_id=u.name) try: u.post_upgrade() except RuntimeError as e: errors.append((u.name, e.args)) for check in self.global_post_upgrade: self._setup_log_prefix(plugin_id=check.__name__) try: check() except RuntimeError as e: errors.append((check.__name__, e.args)) self._teardown_log_prefix() self._check_errors(errors, "Post-upgrade check for %s failed with the " "following errors:")
[ "def", "post_upgrade_checks", "(", "self", ",", "upgrades", ")", ":", "errors", "=", "[", "]", "for", "u", "in", "upgrades", ":", "self", ".", "_setup_log_prefix", "(", "plugin_id", "=", "u", ".", "name", ")", "try", ":", "u", ".", "post_upgrade", "(", ")", "except", "RuntimeError", "as", "e", ":", "errors", ".", "append", "(", "(", "u", ".", "name", ",", "e", ".", "args", ")", ")", "for", "check", "in", "self", ".", "global_post_upgrade", ":", "self", ".", "_setup_log_prefix", "(", "plugin_id", "=", "check", ".", "__name__", ")", "try", ":", "check", "(", ")", "except", "RuntimeError", "as", "e", ":", "errors", ".", "append", "(", "(", "check", ".", "__name__", ",", "e", ".", "args", ")", ")", "self", ".", "_teardown_log_prefix", "(", ")", "self", ".", "_check_errors", "(", "errors", ",", "\"Post-upgrade check for %s failed with the \"", "\"following errors:\"", ")" ]
Run post-upgrade checks after applying all pending upgrades. Post checks may be used to emit warnings encountered when applying an upgrade, but post-checks can also be used to advice the user to run re-indexing or similar long running processes. Post-checks may query for user-input, but should respect the --yes-i-know option to run in an unattended mode. All applied upgrades post-checks are executed. :param upgrades: List of upgrades sorted in topological order.
[ "Run", "post", "-", "upgrade", "checks", "after", "applying", "all", "pending", "upgrades", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L229-L262
245,536
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader.apply_upgrade
def apply_upgrade(self, upgrade): """Apply a upgrade and register that it was successful. A upgrade may throw a RuntimeError, if an unrecoverable error happens. :param upgrade: A single upgrade """ self._setup_log_prefix(plugin_id=upgrade.name) try: # Nested due to Python 2.4 try: upgrade.do_upgrade() self.register_success(upgrade) except RuntimeError as e: msg = ["Upgrade error(s):"] for m in e.args: msg.append(" (-) %s" % m) logger = self.get_logger() logger.error("\n".join(msg)) raise RuntimeError( "Upgrade '%s' failed. Your installation is in an" " inconsistent state. Please manually review the upgrade " "and resolve inconsistencies." % upgrade['id'] ) finally: self._teardown_log_prefix()
python
def apply_upgrade(self, upgrade): """Apply a upgrade and register that it was successful. A upgrade may throw a RuntimeError, if an unrecoverable error happens. :param upgrade: A single upgrade """ self._setup_log_prefix(plugin_id=upgrade.name) try: # Nested due to Python 2.4 try: upgrade.do_upgrade() self.register_success(upgrade) except RuntimeError as e: msg = ["Upgrade error(s):"] for m in e.args: msg.append(" (-) %s" % m) logger = self.get_logger() logger.error("\n".join(msg)) raise RuntimeError( "Upgrade '%s' failed. Your installation is in an" " inconsistent state. Please manually review the upgrade " "and resolve inconsistencies." % upgrade['id'] ) finally: self._teardown_log_prefix()
[ "def", "apply_upgrade", "(", "self", ",", "upgrade", ")", ":", "self", ".", "_setup_log_prefix", "(", "plugin_id", "=", "upgrade", ".", "name", ")", "try", ":", "# Nested due to Python 2.4", "try", ":", "upgrade", ".", "do_upgrade", "(", ")", "self", ".", "register_success", "(", "upgrade", ")", "except", "RuntimeError", "as", "e", ":", "msg", "=", "[", "\"Upgrade error(s):\"", "]", "for", "m", "in", "e", ".", "args", ":", "msg", ".", "append", "(", "\" (-) %s\"", "%", "m", ")", "logger", "=", "self", ".", "get_logger", "(", ")", "logger", ".", "error", "(", "\"\\n\"", ".", "join", "(", "msg", ")", ")", "raise", "RuntimeError", "(", "\"Upgrade '%s' failed. Your installation is in an\"", "\" inconsistent state. Please manually review the upgrade \"", "\"and resolve inconsistencies.\"", "%", "upgrade", "[", "'id'", "]", ")", "finally", ":", "self", ".", "_teardown_log_prefix", "(", ")" ]
Apply a upgrade and register that it was successful. A upgrade may throw a RuntimeError, if an unrecoverable error happens. :param upgrade: A single upgrade
[ "Apply", "a", "upgrade", "and", "register", "that", "it", "was", "successful", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L264-L292
245,537
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader.load_history
def load_history(self): """Load upgrade history from database table. If upgrade table does not exists, the history is assumed to be empty. """ if not self.history: query = Upgrade.query.order_by(desc(Upgrade.applied)) for u in query.all(): self.history[u.upgrade] = u.applied self.ordered_history.append(u.upgrade)
python
def load_history(self): """Load upgrade history from database table. If upgrade table does not exists, the history is assumed to be empty. """ if not self.history: query = Upgrade.query.order_by(desc(Upgrade.applied)) for u in query.all(): self.history[u.upgrade] = u.applied self.ordered_history.append(u.upgrade)
[ "def", "load_history", "(", "self", ")", ":", "if", "not", "self", ".", "history", ":", "query", "=", "Upgrade", ".", "query", ".", "order_by", "(", "desc", "(", "Upgrade", ".", "applied", ")", ")", "for", "u", "in", "query", ".", "all", "(", ")", ":", "self", ".", "history", "[", "u", ".", "upgrade", "]", "=", "u", ".", "applied", "self", ".", "ordered_history", ".", "append", "(", "u", ".", "upgrade", ")" ]
Load upgrade history from database table. If upgrade table does not exists, the history is assumed to be empty.
[ "Load", "upgrade", "history", "from", "database", "table", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L294-L304
245,538
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader.register_success
def register_success(self, upgrade): """Register a successful upgrade.""" u = Upgrade(upgrade=upgrade.name, applied=datetime.now()) db.session.add(u) db.session.commit()
python
def register_success(self, upgrade): """Register a successful upgrade.""" u = Upgrade(upgrade=upgrade.name, applied=datetime.now()) db.session.add(u) db.session.commit()
[ "def", "register_success", "(", "self", ",", "upgrade", ")", ":", "u", "=", "Upgrade", "(", "upgrade", "=", "upgrade", ".", "name", ",", "applied", "=", "datetime", ".", "now", "(", ")", ")", "db", ".", "session", ".", "add", "(", "u", ")", "db", ".", "session", ".", "commit", "(", ")" ]
Register a successful upgrade.
[ "Register", "a", "successful", "upgrade", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L314-L318
245,539
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader.get_history
def get_history(self): """Get history of applied upgrades.""" self.load_history() return map(lambda x: (x, self.history[x]), self.ordered_history)
python
def get_history(self): """Get history of applied upgrades.""" self.load_history() return map(lambda x: (x, self.history[x]), self.ordered_history)
[ "def", "get_history", "(", "self", ")", ":", "self", ".", "load_history", "(", ")", "return", "map", "(", "lambda", "x", ":", "(", "x", ",", "self", ".", "history", "[", "x", "]", ")", ",", "self", ".", "ordered_history", ")" ]
Get history of applied upgrades.
[ "Get", "history", "of", "applied", "upgrades", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L320-L323
245,540
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader._load_upgrades
def _load_upgrades(self, remove_applied=True): """Load upgrade modules. :param remove_applied: if True, already applied upgrades will not be included, if False the entire upgrade graph will be returned. """ if remove_applied: self.load_history() for entry_point in iter_entry_points('invenio_upgrader.upgrades'): upgrade = entry_point.load()() self.__class__._upgrades[upgrade.name] = upgrade return self.__class__._upgrades
python
def _load_upgrades(self, remove_applied=True): """Load upgrade modules. :param remove_applied: if True, already applied upgrades will not be included, if False the entire upgrade graph will be returned. """ if remove_applied: self.load_history() for entry_point in iter_entry_points('invenio_upgrader.upgrades'): upgrade = entry_point.load()() self.__class__._upgrades[upgrade.name] = upgrade return self.__class__._upgrades
[ "def", "_load_upgrades", "(", "self", ",", "remove_applied", "=", "True", ")", ":", "if", "remove_applied", ":", "self", ".", "load_history", "(", ")", "for", "entry_point", "in", "iter_entry_points", "(", "'invenio_upgrader.upgrades'", ")", ":", "upgrade", "=", "entry_point", ".", "load", "(", ")", "(", ")", "self", ".", "__class__", ".", "_upgrades", "[", "upgrade", ".", "name", "]", "=", "upgrade", "return", "self", ".", "__class__", ".", "_upgrades" ]
Load upgrade modules. :param remove_applied: if True, already applied upgrades will not be included, if False the entire upgrade graph will be returned.
[ "Load", "upgrade", "modules", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L325-L339
245,541
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader._create_graph
def _create_graph(self, upgrades, history=None): """Create dependency graph from upgrades. :param upgrades: Dict of upgrades :param history: Dict of applied upgrades """ history = history or {} graph_incoming = {} # nodes their incoming edges graph_outgoing = {} # nodes their outgoing edges # Create graph data structure for mod in six.itervalues(upgrades): # Remove all incoming edges from already applied upgrades graph_incoming[mod.name] = [x for x in mod.depends_on if x not in history] # Build graph_outgoing if mod.name not in graph_outgoing: graph_outgoing[mod.name] = [] for edge in graph_incoming[mod.name]: if edge not in graph_outgoing: graph_outgoing[edge] = [] graph_outgoing[edge].append(mod.name) return (graph_incoming, graph_outgoing)
python
def _create_graph(self, upgrades, history=None): """Create dependency graph from upgrades. :param upgrades: Dict of upgrades :param history: Dict of applied upgrades """ history = history or {} graph_incoming = {} # nodes their incoming edges graph_outgoing = {} # nodes their outgoing edges # Create graph data structure for mod in six.itervalues(upgrades): # Remove all incoming edges from already applied upgrades graph_incoming[mod.name] = [x for x in mod.depends_on if x not in history] # Build graph_outgoing if mod.name not in graph_outgoing: graph_outgoing[mod.name] = [] for edge in graph_incoming[mod.name]: if edge not in graph_outgoing: graph_outgoing[edge] = [] graph_outgoing[edge].append(mod.name) return (graph_incoming, graph_outgoing)
[ "def", "_create_graph", "(", "self", ",", "upgrades", ",", "history", "=", "None", ")", ":", "history", "=", "history", "or", "{", "}", "graph_incoming", "=", "{", "}", "# nodes their incoming edges", "graph_outgoing", "=", "{", "}", "# nodes their outgoing edges", "# Create graph data structure", "for", "mod", "in", "six", ".", "itervalues", "(", "upgrades", ")", ":", "# Remove all incoming edges from already applied upgrades", "graph_incoming", "[", "mod", ".", "name", "]", "=", "[", "x", "for", "x", "in", "mod", ".", "depends_on", "if", "x", "not", "in", "history", "]", "# Build graph_outgoing", "if", "mod", ".", "name", "not", "in", "graph_outgoing", ":", "graph_outgoing", "[", "mod", ".", "name", "]", "=", "[", "]", "for", "edge", "in", "graph_incoming", "[", "mod", ".", "name", "]", ":", "if", "edge", "not", "in", "graph_outgoing", ":", "graph_outgoing", "[", "edge", "]", "=", "[", "]", "graph_outgoing", "[", "edge", "]", ".", "append", "(", "mod", ".", "name", ")", "return", "(", "graph_incoming", ",", "graph_outgoing", ")" ]
Create dependency graph from upgrades. :param upgrades: Dict of upgrades :param history: Dict of applied upgrades
[ "Create", "dependency", "graph", "from", "upgrades", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L355-L378
245,542
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader.order_upgrades
def order_upgrades(self, upgrades, history=None): """Order upgrades according to their dependencies. (topological sort using Kahn's algorithm - http://en.wikipedia.org/wiki/Topological_sorting). :param upgrades: Dict of upgrades :param history: Dict of applied upgrades """ history = history or {} graph_incoming, graph_outgoing = self._create_graph(upgrades, history) # Removed already applied upgrades (assumes all dependencies prior to # this upgrade has been applied). for node_id in six.iterkeys(history): start_nodes = [node_id, ] while start_nodes: node = start_nodes.pop() # Remove from direct dependents try: for d in graph_outgoing[node]: graph_incoming[d] = [x for x in graph_incoming[d] if x != node] except KeyError: warnings.warn("Ghost upgrade %s detected" % node) # Remove all prior dependencies if node in graph_incoming: # Get dependencies, remove node, and recursively # remove all dependencies. depends_on = graph_incoming[node] # Add dependencies to check for d in depends_on: graph_outgoing[d] = [x for x in graph_outgoing[d] if x != node] start_nodes.append(d) del graph_incoming[node] # Check for missing dependencies for node_id, depends_on in six.iteritems(graph_incoming): for d in depends_on: if d not in graph_incoming: raise RuntimeError("Upgrade %s depends on an unknown" " upgrade %s" % (node_id, d)) # Nodes with no incoming edges start_nodes = [x for x in six.iterkeys(graph_incoming) if len(graph_incoming[x]) == 0] topo_order = [] while start_nodes: # Append node_n to list (it has no incoming edges) node_n = start_nodes.pop() topo_order.append(node_n) # For each node m with and edge from n to m for node_m in graph_outgoing[node_n]: # Remove the edge n to m graph_incoming[node_m] = [x for x in graph_incoming[node_m] if x != node_n] # If m has no incoming edges, add it to start_nodes. if not graph_incoming[node_m]: start_nodes.append(node_m) for node, edges in six.iteritems(graph_incoming): if edges: raise RuntimeError("The upgrades have at least one cyclic " "dependency involving %s." % node) return map(lambda x: upgrades[x], topo_order)
python
def order_upgrades(self, upgrades, history=None): """Order upgrades according to their dependencies. (topological sort using Kahn's algorithm - http://en.wikipedia.org/wiki/Topological_sorting). :param upgrades: Dict of upgrades :param history: Dict of applied upgrades """ history = history or {} graph_incoming, graph_outgoing = self._create_graph(upgrades, history) # Removed already applied upgrades (assumes all dependencies prior to # this upgrade has been applied). for node_id in six.iterkeys(history): start_nodes = [node_id, ] while start_nodes: node = start_nodes.pop() # Remove from direct dependents try: for d in graph_outgoing[node]: graph_incoming[d] = [x for x in graph_incoming[d] if x != node] except KeyError: warnings.warn("Ghost upgrade %s detected" % node) # Remove all prior dependencies if node in graph_incoming: # Get dependencies, remove node, and recursively # remove all dependencies. depends_on = graph_incoming[node] # Add dependencies to check for d in depends_on: graph_outgoing[d] = [x for x in graph_outgoing[d] if x != node] start_nodes.append(d) del graph_incoming[node] # Check for missing dependencies for node_id, depends_on in six.iteritems(graph_incoming): for d in depends_on: if d not in graph_incoming: raise RuntimeError("Upgrade %s depends on an unknown" " upgrade %s" % (node_id, d)) # Nodes with no incoming edges start_nodes = [x for x in six.iterkeys(graph_incoming) if len(graph_incoming[x]) == 0] topo_order = [] while start_nodes: # Append node_n to list (it has no incoming edges) node_n = start_nodes.pop() topo_order.append(node_n) # For each node m with and edge from n to m for node_m in graph_outgoing[node_n]: # Remove the edge n to m graph_incoming[node_m] = [x for x in graph_incoming[node_m] if x != node_n] # If m has no incoming edges, add it to start_nodes. if not graph_incoming[node_m]: start_nodes.append(node_m) for node, edges in six.iteritems(graph_incoming): if edges: raise RuntimeError("The upgrades have at least one cyclic " "dependency involving %s." % node) return map(lambda x: upgrades[x], topo_order)
[ "def", "order_upgrades", "(", "self", ",", "upgrades", ",", "history", "=", "None", ")", ":", "history", "=", "history", "or", "{", "}", "graph_incoming", ",", "graph_outgoing", "=", "self", ".", "_create_graph", "(", "upgrades", ",", "history", ")", "# Removed already applied upgrades (assumes all dependencies prior to", "# this upgrade has been applied).", "for", "node_id", "in", "six", ".", "iterkeys", "(", "history", ")", ":", "start_nodes", "=", "[", "node_id", ",", "]", "while", "start_nodes", ":", "node", "=", "start_nodes", ".", "pop", "(", ")", "# Remove from direct dependents", "try", ":", "for", "d", "in", "graph_outgoing", "[", "node", "]", ":", "graph_incoming", "[", "d", "]", "=", "[", "x", "for", "x", "in", "graph_incoming", "[", "d", "]", "if", "x", "!=", "node", "]", "except", "KeyError", ":", "warnings", ".", "warn", "(", "\"Ghost upgrade %s detected\"", "%", "node", ")", "# Remove all prior dependencies", "if", "node", "in", "graph_incoming", ":", "# Get dependencies, remove node, and recursively", "# remove all dependencies.", "depends_on", "=", "graph_incoming", "[", "node", "]", "# Add dependencies to check", "for", "d", "in", "depends_on", ":", "graph_outgoing", "[", "d", "]", "=", "[", "x", "for", "x", "in", "graph_outgoing", "[", "d", "]", "if", "x", "!=", "node", "]", "start_nodes", ".", "append", "(", "d", ")", "del", "graph_incoming", "[", "node", "]", "# Check for missing dependencies", "for", "node_id", ",", "depends_on", "in", "six", ".", "iteritems", "(", "graph_incoming", ")", ":", "for", "d", "in", "depends_on", ":", "if", "d", "not", "in", "graph_incoming", ":", "raise", "RuntimeError", "(", "\"Upgrade %s depends on an unknown\"", "\" upgrade %s\"", "%", "(", "node_id", ",", "d", ")", ")", "# Nodes with no incoming edges", "start_nodes", "=", "[", "x", "for", "x", "in", "six", ".", "iterkeys", "(", "graph_incoming", ")", "if", "len", "(", "graph_incoming", "[", "x", "]", ")", "==", "0", "]", "topo_order", "=", "[", "]", "while", "start_nodes", ":", "# Append node_n to list (it has no incoming edges)", "node_n", "=", "start_nodes", ".", "pop", "(", ")", "topo_order", ".", "append", "(", "node_n", ")", "# For each node m with and edge from n to m", "for", "node_m", "in", "graph_outgoing", "[", "node_n", "]", ":", "# Remove the edge n to m", "graph_incoming", "[", "node_m", "]", "=", "[", "x", "for", "x", "in", "graph_incoming", "[", "node_m", "]", "if", "x", "!=", "node_n", "]", "# If m has no incoming edges, add it to start_nodes.", "if", "not", "graph_incoming", "[", "node_m", "]", ":", "start_nodes", ".", "append", "(", "node_m", ")", "for", "node", ",", "edges", "in", "six", ".", "iteritems", "(", "graph_incoming", ")", ":", "if", "edges", ":", "raise", "RuntimeError", "(", "\"The upgrades have at least one cyclic \"", "\"dependency involving %s.\"", "%", "node", ")", "return", "map", "(", "lambda", "x", ":", "upgrades", "[", "x", "]", ",", "topo_order", ")" ]
Order upgrades according to their dependencies. (topological sort using Kahn's algorithm - http://en.wikipedia.org/wiki/Topological_sorting). :param upgrades: Dict of upgrades :param history: Dict of applied upgrades
[ "Order", "upgrades", "according", "to", "their", "dependencies", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L396-L467
245,543
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
InvenioUpgrader._parse_plugin_id
def _parse_plugin_id(self, plugin_id): """Determine repository from plugin id.""" m = re.match("(.+)(_\d{4}_\d{2}_\d{2}_)(.+)", plugin_id) if m: return m.group(1) m = re.match("(.+)(_release_)(.+)", plugin_id) if m: return m.group(1) raise RuntimeError("Repository could not be determined from " "the upgrade identifier: %s." % plugin_id)
python
def _parse_plugin_id(self, plugin_id): """Determine repository from plugin id.""" m = re.match("(.+)(_\d{4}_\d{2}_\d{2}_)(.+)", plugin_id) if m: return m.group(1) m = re.match("(.+)(_release_)(.+)", plugin_id) if m: return m.group(1) raise RuntimeError("Repository could not be determined from " "the upgrade identifier: %s." % plugin_id)
[ "def", "_parse_plugin_id", "(", "self", ",", "plugin_id", ")", ":", "m", "=", "re", ".", "match", "(", "\"(.+)(_\\d{4}_\\d{2}_\\d{2}_)(.+)\"", ",", "plugin_id", ")", "if", "m", ":", "return", "m", ".", "group", "(", "1", ")", "m", "=", "re", ".", "match", "(", "\"(.+)(_release_)(.+)\"", ",", "plugin_id", ")", "if", "m", ":", "return", "m", ".", "group", "(", "1", ")", "raise", "RuntimeError", "(", "\"Repository could not be determined from \"", "\"the upgrade identifier: %s.\"", "%", "plugin_id", ")" ]
Determine repository from plugin id.
[ "Determine", "repository", "from", "plugin", "id", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L469-L479
245,544
etcher-be/emiz
emiz/weather/mizfile/mizfile_get_metar.py
get_metar_from_mission
def get_metar_from_mission( mission_file: str, icao: str = 'XXXX', time: str = None, ) -> str: """ Builds a dummy METAR string from a mission file Args: mission_file: input mission file icao: dummy ICAO (defaults to XXXX) time: dummy time (defaults to now()) Returns: METAR str """ return _MetarFromMission( mission_file=mission_file, icao=icao, time=time, ).metar
python
def get_metar_from_mission( mission_file: str, icao: str = 'XXXX', time: str = None, ) -> str: """ Builds a dummy METAR string from a mission file Args: mission_file: input mission file icao: dummy ICAO (defaults to XXXX) time: dummy time (defaults to now()) Returns: METAR str """ return _MetarFromMission( mission_file=mission_file, icao=icao, time=time, ).metar
[ "def", "get_metar_from_mission", "(", "mission_file", ":", "str", ",", "icao", ":", "str", "=", "'XXXX'", ",", "time", ":", "str", "=", "None", ",", ")", "->", "str", ":", "return", "_MetarFromMission", "(", "mission_file", "=", "mission_file", ",", "icao", "=", "icao", ",", "time", "=", "time", ",", ")", ".", "metar" ]
Builds a dummy METAR string from a mission file Args: mission_file: input mission file icao: dummy ICAO (defaults to XXXX) time: dummy time (defaults to now()) Returns: METAR str
[ "Builds", "a", "dummy", "METAR", "string", "from", "a", "mission", "file" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/mizfile/mizfile_get_metar.py#L138-L158
245,545
etcher-be/emiz
emiz/weather/mizfile/mizfile_get_metar.py
_MetarFromMission.metar
def metar(self) -> str: """ Builds a METAR string from a MIZ file A lots of information is inferred from what information we have available in DCS. There constraints in the way MIZ files are built, with precipitations for example. Returns: METAR string """ metar = f'{self._icao} ' \ f'{self._time} ' \ f'{self._wind} ' \ f'{self._visibility} ' \ f'{self._precipitations} ' \ f'{self._clouds} ' \ f'{self._temperature} ' \ f'{self._pressure} ' \ f'{self._qualifier}' return re.sub(' +', ' ', metar)
python
def metar(self) -> str: """ Builds a METAR string from a MIZ file A lots of information is inferred from what information we have available in DCS. There constraints in the way MIZ files are built, with precipitations for example. Returns: METAR string """ metar = f'{self._icao} ' \ f'{self._time} ' \ f'{self._wind} ' \ f'{self._visibility} ' \ f'{self._precipitations} ' \ f'{self._clouds} ' \ f'{self._temperature} ' \ f'{self._pressure} ' \ f'{self._qualifier}' return re.sub(' +', ' ', metar)
[ "def", "metar", "(", "self", ")", "->", "str", ":", "metar", "=", "f'{self._icao} '", "f'{self._time} '", "f'{self._wind} '", "f'{self._visibility} '", "f'{self._precipitations} '", "f'{self._clouds} '", "f'{self._temperature} '", "f'{self._pressure} '", "f'{self._qualifier}'", "return", "re", ".", "sub", "(", "' +'", ",", "' '", ",", "metar", ")" ]
Builds a METAR string from a MIZ file A lots of information is inferred from what information we have available in DCS. There constraints in the way MIZ files are built, with precipitations for example. Returns: METAR string
[ "Builds", "a", "METAR", "string", "from", "a", "MIZ", "file" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/mizfile/mizfile_get_metar.py#L115-L133
245,546
dossier/dossier.models
dossier/models/subtopic.py
subtopics
def subtopics(store, folders, folder_id, subfolder_id, ann_id=None): '''Yields an unordered generator of subtopics in a subfolder. Each item of the generator is a 4-tuple of ``content_id``, ``subtopic_id``, ``subtopic_type`` and ``data``. Subtopic type is one of the following Unicode strings: ``text``, ``image`` or ``manual``. The type of ``data`` is dependent on the subtopic type. For ``image``, ``data`` is a ``(unicode, str)``, where the first element is the URL and the second element is the binary image data. For all other types, ``data`` is a ``unicode`` string. :param str folder_id: Folder id :param str subfolder_id: Subfolder id :param str ann_id: Username :rtype: generator of ``(content_id, subtopic_id, url, subtopic_type, data)`` ''' # This code will be changed soon. In essence, it implements the # convention established in SortingDesk for storing subtopic data. # Currently, subtopic data is stored in the FC that the data (i.e., # image or snippet) came from. This is bad because it causes pretty # severe race conditions. # # Our current plan is to put each subtopic datum in its own FC. It will # require this code to make more FC fetches, but we should be able to # do it with one `store.get_many` call. items = folders.grouped_items(folder_id, subfolder_id, ann_id=ann_id) fcs = dict([(cid, fc) for cid, fc in store.get_many(items.keys())]) for cid, subids in items.iteritems(): fc = fcs[cid] for subid in subids: try: data = typed_subtopic_data(fc, subid) except KeyError: # We have a dangling label folks! continue yield cid, subid, fc['meta_url'], subtopic_type(subid), data
python
def subtopics(store, folders, folder_id, subfolder_id, ann_id=None): '''Yields an unordered generator of subtopics in a subfolder. Each item of the generator is a 4-tuple of ``content_id``, ``subtopic_id``, ``subtopic_type`` and ``data``. Subtopic type is one of the following Unicode strings: ``text``, ``image`` or ``manual``. The type of ``data`` is dependent on the subtopic type. For ``image``, ``data`` is a ``(unicode, str)``, where the first element is the URL and the second element is the binary image data. For all other types, ``data`` is a ``unicode`` string. :param str folder_id: Folder id :param str subfolder_id: Subfolder id :param str ann_id: Username :rtype: generator of ``(content_id, subtopic_id, url, subtopic_type, data)`` ''' # This code will be changed soon. In essence, it implements the # convention established in SortingDesk for storing subtopic data. # Currently, subtopic data is stored in the FC that the data (i.e., # image or snippet) came from. This is bad because it causes pretty # severe race conditions. # # Our current plan is to put each subtopic datum in its own FC. It will # require this code to make more FC fetches, but we should be able to # do it with one `store.get_many` call. items = folders.grouped_items(folder_id, subfolder_id, ann_id=ann_id) fcs = dict([(cid, fc) for cid, fc in store.get_many(items.keys())]) for cid, subids in items.iteritems(): fc = fcs[cid] for subid in subids: try: data = typed_subtopic_data(fc, subid) except KeyError: # We have a dangling label folks! continue yield cid, subid, fc['meta_url'], subtopic_type(subid), data
[ "def", "subtopics", "(", "store", ",", "folders", ",", "folder_id", ",", "subfolder_id", ",", "ann_id", "=", "None", ")", ":", "# This code will be changed soon. In essence, it implements the", "# convention established in SortingDesk for storing subtopic data.", "# Currently, subtopic data is stored in the FC that the data (i.e.,", "# image or snippet) came from. This is bad because it causes pretty", "# severe race conditions.", "#", "# Our current plan is to put each subtopic datum in its own FC. It will", "# require this code to make more FC fetches, but we should be able to", "# do it with one `store.get_many` call.", "items", "=", "folders", ".", "grouped_items", "(", "folder_id", ",", "subfolder_id", ",", "ann_id", "=", "ann_id", ")", "fcs", "=", "dict", "(", "[", "(", "cid", ",", "fc", ")", "for", "cid", ",", "fc", "in", "store", ".", "get_many", "(", "items", ".", "keys", "(", ")", ")", "]", ")", "for", "cid", ",", "subids", "in", "items", ".", "iteritems", "(", ")", ":", "fc", "=", "fcs", "[", "cid", "]", "for", "subid", "in", "subids", ":", "try", ":", "data", "=", "typed_subtopic_data", "(", "fc", ",", "subid", ")", "except", "KeyError", ":", "# We have a dangling label folks!", "continue", "yield", "cid", ",", "subid", ",", "fc", "[", "'meta_url'", "]", ",", "subtopic_type", "(", "subid", ")", ",", "data" ]
Yields an unordered generator of subtopics in a subfolder. Each item of the generator is a 4-tuple of ``content_id``, ``subtopic_id``, ``subtopic_type`` and ``data``. Subtopic type is one of the following Unicode strings: ``text``, ``image`` or ``manual``. The type of ``data`` is dependent on the subtopic type. For ``image``, ``data`` is a ``(unicode, str)``, where the first element is the URL and the second element is the binary image data. For all other types, ``data`` is a ``unicode`` string. :param str folder_id: Folder id :param str subfolder_id: Subfolder id :param str ann_id: Username :rtype: generator of ``(content_id, subtopic_id, url, subtopic_type, data)``
[ "Yields", "an", "unordered", "generator", "of", "subtopics", "in", "a", "subfolder", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/subtopic.py#L9-L46
245,547
dossier/dossier.models
dossier/models/subtopic.py
typed_subtopic_data
def typed_subtopic_data(fc, subid): '''Returns typed subtopic data from an FC.''' # I don't think this code will change after we fix the data race bug. ---AG ty = subtopic_type(subid) data = get_unicode_feature(fc, subid) assert isinstance(data, unicode), \ 'data should be `unicode` but is %r' % type(data) if ty == 'image': img_data = get_unicode_feature(fc, subid + '|data') img = re.sub('^data:image/[a-zA-Z]+;base64,', '', img_data) img = base64.b64decode(img.encode('utf-8')) return data, img elif ty in ('text', 'manual'): return data raise ValueError('unrecognized subtopic type "%s"' % ty)
python
def typed_subtopic_data(fc, subid): '''Returns typed subtopic data from an FC.''' # I don't think this code will change after we fix the data race bug. ---AG ty = subtopic_type(subid) data = get_unicode_feature(fc, subid) assert isinstance(data, unicode), \ 'data should be `unicode` but is %r' % type(data) if ty == 'image': img_data = get_unicode_feature(fc, subid + '|data') img = re.sub('^data:image/[a-zA-Z]+;base64,', '', img_data) img = base64.b64decode(img.encode('utf-8')) return data, img elif ty in ('text', 'manual'): return data raise ValueError('unrecognized subtopic type "%s"' % ty)
[ "def", "typed_subtopic_data", "(", "fc", ",", "subid", ")", ":", "# I don't think this code will change after we fix the data race bug. ---AG", "ty", "=", "subtopic_type", "(", "subid", ")", "data", "=", "get_unicode_feature", "(", "fc", ",", "subid", ")", "assert", "isinstance", "(", "data", ",", "unicode", ")", ",", "'data should be `unicode` but is %r'", "%", "type", "(", "data", ")", "if", "ty", "==", "'image'", ":", "img_data", "=", "get_unicode_feature", "(", "fc", ",", "subid", "+", "'|data'", ")", "img", "=", "re", ".", "sub", "(", "'^data:image/[a-zA-Z]+;base64,'", ",", "''", ",", "img_data", ")", "img", "=", "base64", ".", "b64decode", "(", "img", ".", "encode", "(", "'utf-8'", ")", ")", "return", "data", ",", "img", "elif", "ty", "in", "(", "'text'", ",", "'manual'", ")", ":", "return", "data", "raise", "ValueError", "(", "'unrecognized subtopic type \"%s\"'", "%", "ty", ")" ]
Returns typed subtopic data from an FC.
[ "Returns", "typed", "subtopic", "data", "from", "an", "FC", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/subtopic.py#L49-L63
245,548
bfontaine/p7magma
magma/souputils.py
text
def text(el, strip=True): """ Return the text of a ``BeautifulSoup`` element """ if not el: return "" text = el.text if strip: text = text.strip() return text
python
def text(el, strip=True): """ Return the text of a ``BeautifulSoup`` element """ if not el: return "" text = el.text if strip: text = text.strip() return text
[ "def", "text", "(", "el", ",", "strip", "=", "True", ")", ":", "if", "not", "el", ":", "return", "\"\"", "text", "=", "el", ".", "text", "if", "strip", ":", "text", "=", "text", ".", "strip", "(", ")", "return", "text" ]
Return the text of a ``BeautifulSoup`` element
[ "Return", "the", "text", "of", "a", "BeautifulSoup", "element" ]
713647aa9e3187c93c2577ef812f33ec42ae5494
https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/souputils.py#L8-L18
245,549
bfontaine/p7magma
magma/souputils.py
parse
def parse(el, typ): """ Parse a ``BeautifulSoup`` element as the given type. """ if not el: return typ() txt = text(el) if not txt: return typ() return typ(txt)
python
def parse(el, typ): """ Parse a ``BeautifulSoup`` element as the given type. """ if not el: return typ() txt = text(el) if not txt: return typ() return typ(txt)
[ "def", "parse", "(", "el", ",", "typ", ")", ":", "if", "not", "el", ":", "return", "typ", "(", ")", "txt", "=", "text", "(", "el", ")", "if", "not", "txt", ":", "return", "typ", "(", ")", "return", "typ", "(", "txt", ")" ]
Parse a ``BeautifulSoup`` element as the given type.
[ "Parse", "a", "BeautifulSoup", "element", "as", "the", "given", "type", "." ]
713647aa9e3187c93c2577ef812f33ec42ae5494
https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/souputils.py#L30-L39
245,550
bfontaine/p7magma
magma/souputils.py
parsebool
def parsebool(el): """ Parse a ``BeautifulSoup`` element as a bool """ txt = text(el) up = txt.upper() if up == "OUI": return True if up == "NON": return False return bool(parseint(el))
python
def parsebool(el): """ Parse a ``BeautifulSoup`` element as a bool """ txt = text(el) up = txt.upper() if up == "OUI": return True if up == "NON": return False return bool(parseint(el))
[ "def", "parsebool", "(", "el", ")", ":", "txt", "=", "text", "(", "el", ")", "up", "=", "txt", ".", "upper", "(", ")", "if", "up", "==", "\"OUI\"", ":", "return", "True", "if", "up", "==", "\"NON\"", ":", "return", "False", "return", "bool", "(", "parseint", "(", "el", ")", ")" ]
Parse a ``BeautifulSoup`` element as a bool
[ "Parse", "a", "BeautifulSoup", "element", "as", "a", "bool" ]
713647aa9e3187c93c2577ef812f33ec42ae5494
https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/souputils.py#L63-L74
245,551
GemHQ/round-py
round/client.py
Client.authenticate_device
def authenticate_device(self, api_token, device_token, email=None, user_url=None, override=False, fetch=True): """Set credentials for Device authentication. Args: api_token (str): Token issued to your Application through the Gem Developer Console. device_token (str): Physical device identifier. You will receive this from a user.devices.create call or from users.create. email (str, optional): User's email address, required if user_url is not provided. user_url (str, optional): User's Gem url. override (boolean, optional): Replace existing Application credentials. fetch (boolean, optional): Return the authenticated User. Returns: An User object if `fetch` is True. """ if (self.context.has_auth_params('Gem-Device') and not override): raise OverrideError('Gem-Device') if (not api_token or not device_token or (not email and not user_url) or not self.context.authorize('Gem-Device', api_token=api_token, user_email=email, user_url=user_url, device_token=device_token)): raise AuthUsageError(self.context, 'Gem-Device') if fetch: user = self.user(email) if email else self.user() return user.refresh() else: return True
python
def authenticate_device(self, api_token, device_token, email=None, user_url=None, override=False, fetch=True): """Set credentials for Device authentication. Args: api_token (str): Token issued to your Application through the Gem Developer Console. device_token (str): Physical device identifier. You will receive this from a user.devices.create call or from users.create. email (str, optional): User's email address, required if user_url is not provided. user_url (str, optional): User's Gem url. override (boolean, optional): Replace existing Application credentials. fetch (boolean, optional): Return the authenticated User. Returns: An User object if `fetch` is True. """ if (self.context.has_auth_params('Gem-Device') and not override): raise OverrideError('Gem-Device') if (not api_token or not device_token or (not email and not user_url) or not self.context.authorize('Gem-Device', api_token=api_token, user_email=email, user_url=user_url, device_token=device_token)): raise AuthUsageError(self.context, 'Gem-Device') if fetch: user = self.user(email) if email else self.user() return user.refresh() else: return True
[ "def", "authenticate_device", "(", "self", ",", "api_token", ",", "device_token", ",", "email", "=", "None", ",", "user_url", "=", "None", ",", "override", "=", "False", ",", "fetch", "=", "True", ")", ":", "if", "(", "self", ".", "context", ".", "has_auth_params", "(", "'Gem-Device'", ")", "and", "not", "override", ")", ":", "raise", "OverrideError", "(", "'Gem-Device'", ")", "if", "(", "not", "api_token", "or", "not", "device_token", "or", "(", "not", "email", "and", "not", "user_url", ")", "or", "not", "self", ".", "context", ".", "authorize", "(", "'Gem-Device'", ",", "api_token", "=", "api_token", ",", "user_email", "=", "email", ",", "user_url", "=", "user_url", ",", "device_token", "=", "device_token", ")", ")", ":", "raise", "AuthUsageError", "(", "self", ".", "context", ",", "'Gem-Device'", ")", "if", "fetch", ":", "user", "=", "self", ".", "user", "(", "email", ")", "if", "email", "else", "self", ".", "user", "(", ")", "return", "user", ".", "refresh", "(", ")", "else", ":", "return", "True" ]
Set credentials for Device authentication. Args: api_token (str): Token issued to your Application through the Gem Developer Console. device_token (str): Physical device identifier. You will receive this from a user.devices.create call or from users.create. email (str, optional): User's email address, required if user_url is not provided. user_url (str, optional): User's Gem url. override (boolean, optional): Replace existing Application credentials. fetch (boolean, optional): Return the authenticated User. Returns: An User object if `fetch` is True.
[ "Set", "credentials", "for", "Device", "authentication", "." ]
d0838f849cd260b1eb5df67ed3c6f2fe56c91c21
https://github.com/GemHQ/round-py/blob/d0838f849cd260b1eb5df67ed3c6f2fe56c91c21/round/client.py#L71-L106
245,552
GemHQ/round-py
round/client.py
Client.authenticate_identify
def authenticate_identify(self, api_token, override=True): """Set credentials for Identify authentication. Args: api_token (str): Token issued to your Application through the Gem Developer Console. override (boolean): Replace existing Application credentials. """ if (self.context.has_auth_params('Gem-Identify') and not override): raise OverrideError('Gem-Identify') if (not api_token or not self.context.authorize('Gem-Identify', api_token=api_token)): raise AuthUsageError(self.context, 'Gem-Identify') return True
python
def authenticate_identify(self, api_token, override=True): """Set credentials for Identify authentication. Args: api_token (str): Token issued to your Application through the Gem Developer Console. override (boolean): Replace existing Application credentials. """ if (self.context.has_auth_params('Gem-Identify') and not override): raise OverrideError('Gem-Identify') if (not api_token or not self.context.authorize('Gem-Identify', api_token=api_token)): raise AuthUsageError(self.context, 'Gem-Identify') return True
[ "def", "authenticate_identify", "(", "self", ",", "api_token", ",", "override", "=", "True", ")", ":", "if", "(", "self", ".", "context", ".", "has_auth_params", "(", "'Gem-Identify'", ")", "and", "not", "override", ")", ":", "raise", "OverrideError", "(", "'Gem-Identify'", ")", "if", "(", "not", "api_token", "or", "not", "self", ".", "context", ".", "authorize", "(", "'Gem-Identify'", ",", "api_token", "=", "api_token", ")", ")", ":", "raise", "AuthUsageError", "(", "self", ".", "context", ",", "'Gem-Identify'", ")", "return", "True" ]
Set credentials for Identify authentication. Args: api_token (str): Token issued to your Application through the Gem Developer Console. override (boolean): Replace existing Application credentials.
[ "Set", "credentials", "for", "Identify", "authentication", "." ]
d0838f849cd260b1eb5df67ed3c6f2fe56c91c21
https://github.com/GemHQ/round-py/blob/d0838f849cd260b1eb5df67ed3c6f2fe56c91c21/round/client.py#L108-L123
245,553
nkmathew/yasi-sexp-indenter
yasi.py
parse_options
def parse_options(arguments=None): """ Reads command-line arguments >>> parse_options('--indent-comments') """ if arguments is None: arguments = sys.argv[1:] if isinstance(arguments, str): arguments = arguments.split() if isinstance(arguments, argparse.Namespace): return arguments parser = create_args_parser() args = parser.parse_args(arguments) # pprint(args.__dict__) args.dialect = args.dialect.lower() if args.dialect not in ['lisp', 'newlisp', 'clojure', 'scheme', 'all', '']: parser.error("`{0}' is not a recognized dialect".format(args.dialect)) args.backup_dir = os.path.expanduser(args.backup_dir) if not os.path.exists(args.backup_dir): parser.error("Directory `{0}' does not exist".format(args.backup_dir)) if len(args.files) > 1 and args.output_file: parser.error('Cannot use the -o flag when more than one file is specified') if not args.files: # Indentation from standard input if args.modify and not args.output_file: args.modify = False args.backup = False args.warning = False if args.output_diff: # If someone requests a diff we assume he/she doesn't want the file to be # modified args.modify = False return args
python
def parse_options(arguments=None): """ Reads command-line arguments >>> parse_options('--indent-comments') """ if arguments is None: arguments = sys.argv[1:] if isinstance(arguments, str): arguments = arguments.split() if isinstance(arguments, argparse.Namespace): return arguments parser = create_args_parser() args = parser.parse_args(arguments) # pprint(args.__dict__) args.dialect = args.dialect.lower() if args.dialect not in ['lisp', 'newlisp', 'clojure', 'scheme', 'all', '']: parser.error("`{0}' is not a recognized dialect".format(args.dialect)) args.backup_dir = os.path.expanduser(args.backup_dir) if not os.path.exists(args.backup_dir): parser.error("Directory `{0}' does not exist".format(args.backup_dir)) if len(args.files) > 1 and args.output_file: parser.error('Cannot use the -o flag when more than one file is specified') if not args.files: # Indentation from standard input if args.modify and not args.output_file: args.modify = False args.backup = False args.warning = False if args.output_diff: # If someone requests a diff we assume he/she doesn't want the file to be # modified args.modify = False return args
[ "def", "parse_options", "(", "arguments", "=", "None", ")", ":", "if", "arguments", "is", "None", ":", "arguments", "=", "sys", ".", "argv", "[", "1", ":", "]", "if", "isinstance", "(", "arguments", ",", "str", ")", ":", "arguments", "=", "arguments", ".", "split", "(", ")", "if", "isinstance", "(", "arguments", ",", "argparse", ".", "Namespace", ")", ":", "return", "arguments", "parser", "=", "create_args_parser", "(", ")", "args", "=", "parser", ".", "parse_args", "(", "arguments", ")", "# pprint(args.__dict__)", "args", ".", "dialect", "=", "args", ".", "dialect", ".", "lower", "(", ")", "if", "args", ".", "dialect", "not", "in", "[", "'lisp'", ",", "'newlisp'", ",", "'clojure'", ",", "'scheme'", ",", "'all'", ",", "''", "]", ":", "parser", ".", "error", "(", "\"`{0}' is not a recognized dialect\"", ".", "format", "(", "args", ".", "dialect", ")", ")", "args", ".", "backup_dir", "=", "os", ".", "path", ".", "expanduser", "(", "args", ".", "backup_dir", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "args", ".", "backup_dir", ")", ":", "parser", ".", "error", "(", "\"Directory `{0}' does not exist\"", ".", "format", "(", "args", ".", "backup_dir", ")", ")", "if", "len", "(", "args", ".", "files", ")", ">", "1", "and", "args", ".", "output_file", ":", "parser", ".", "error", "(", "'Cannot use the -o flag when more than one file is specified'", ")", "if", "not", "args", ".", "files", ":", "# Indentation from standard input", "if", "args", ".", "modify", "and", "not", "args", ".", "output_file", ":", "args", ".", "modify", "=", "False", "args", ".", "backup", "=", "False", "args", ".", "warning", "=", "False", "if", "args", ".", "output_diff", ":", "# If someone requests a diff we assume he/she doesn't want the file to be", "# modified", "args", ".", "modify", "=", "False", "return", "args" ]
Reads command-line arguments >>> parse_options('--indent-comments')
[ "Reads", "command", "-", "line", "arguments" ]
6ec2a4675e79606c555bcb67494a0ba994b05805
https://github.com/nkmathew/yasi-sexp-indenter/blob/6ec2a4675e79606c555bcb67494a0ba994b05805/yasi.py#L109-L147
245,554
nkmathew/yasi-sexp-indenter
yasi.py
assign_indent_numbers
def assign_indent_numbers(lst, inum, dic=collections.defaultdict(int)): """ Associate keywords with their respective indentation numbers """ for i in lst: dic[i] = inum return dic
python
def assign_indent_numbers(lst, inum, dic=collections.defaultdict(int)): """ Associate keywords with their respective indentation numbers """ for i in lst: dic[i] = inum return dic
[ "def", "assign_indent_numbers", "(", "lst", ",", "inum", ",", "dic", "=", "collections", ".", "defaultdict", "(", "int", ")", ")", ":", "for", "i", "in", "lst", ":", "dic", "[", "i", "]", "=", "inum", "return", "dic" ]
Associate keywords with their respective indentation numbers
[ "Associate", "keywords", "with", "their", "respective", "indentation", "numbers" ]
6ec2a4675e79606c555bcb67494a0ba994b05805
https://github.com/nkmathew/yasi-sexp-indenter/blob/6ec2a4675e79606c555bcb67494a0ba994b05805/yasi.py#L571-L576
245,555
datadesk/django-greeking
greeking/placeholdit.py
get_url
def get_url( width, height=None, background_color="cccccc", text_color="969696", text=None, random_background_color=False ): """ Craft the URL for a placeholder image. You can customize the background color, text color and text using the optional keyword arguments If you want to use a random color pass in random_background_color as True. """ if random_background_color: background_color = _get_random_color() # If height is not provided, presume it is will be a square if not height: height = width d = dict( width=width, height=height, bcolor=background_color, tcolor=text_color ) url = URL % d if text: text = text.replace(" ", "+") url = url + "?text=" + text return url
python
def get_url( width, height=None, background_color="cccccc", text_color="969696", text=None, random_background_color=False ): """ Craft the URL for a placeholder image. You can customize the background color, text color and text using the optional keyword arguments If you want to use a random color pass in random_background_color as True. """ if random_background_color: background_color = _get_random_color() # If height is not provided, presume it is will be a square if not height: height = width d = dict( width=width, height=height, bcolor=background_color, tcolor=text_color ) url = URL % d if text: text = text.replace(" ", "+") url = url + "?text=" + text return url
[ "def", "get_url", "(", "width", ",", "height", "=", "None", ",", "background_color", "=", "\"cccccc\"", ",", "text_color", "=", "\"969696\"", ",", "text", "=", "None", ",", "random_background_color", "=", "False", ")", ":", "if", "random_background_color", ":", "background_color", "=", "_get_random_color", "(", ")", "# If height is not provided, presume it is will be a square", "if", "not", "height", ":", "height", "=", "width", "d", "=", "dict", "(", "width", "=", "width", ",", "height", "=", "height", ",", "bcolor", "=", "background_color", ",", "tcolor", "=", "text_color", ")", "url", "=", "URL", "%", "d", "if", "text", ":", "text", "=", "text", ".", "replace", "(", "\" \"", ",", "\"+\"", ")", "url", "=", "url", "+", "\"?text=\"", "+", "text", "return", "url" ]
Craft the URL for a placeholder image. You can customize the background color, text color and text using the optional keyword arguments If you want to use a random color pass in random_background_color as True.
[ "Craft", "the", "URL", "for", "a", "placeholder", "image", "." ]
72509c94952279503bbe8d5a710c1fd344da0670
https://github.com/datadesk/django-greeking/blob/72509c94952279503bbe8d5a710c1fd344da0670/greeking/placeholdit.py#L20-L48
245,556
ronaldguillen/wave
wave/reverse.py
preserve_builtin_query_params
def preserve_builtin_query_params(url, request=None): """ Given an incoming request, and an outgoing URL representation, append the value of any built-in query parameters. """ if request is None: return url overrides = [ api_settings.URL_FORMAT_OVERRIDE, ] for param in overrides: if param and (param in request.GET): value = request.GET[param] url = replace_query_param(url, param, value) return url
python
def preserve_builtin_query_params(url, request=None): """ Given an incoming request, and an outgoing URL representation, append the value of any built-in query parameters. """ if request is None: return url overrides = [ api_settings.URL_FORMAT_OVERRIDE, ] for param in overrides: if param and (param in request.GET): value = request.GET[param] url = replace_query_param(url, param, value) return url
[ "def", "preserve_builtin_query_params", "(", "url", ",", "request", "=", "None", ")", ":", "if", "request", "is", "None", ":", "return", "url", "overrides", "=", "[", "api_settings", ".", "URL_FORMAT_OVERRIDE", ",", "]", "for", "param", "in", "overrides", ":", "if", "param", "and", "(", "param", "in", "request", ".", "GET", ")", ":", "value", "=", "request", ".", "GET", "[", "param", "]", "url", "=", "replace_query_param", "(", "url", ",", "param", ",", "value", ")", "return", "url" ]
Given an incoming request, and an outgoing URL representation, append the value of any built-in query parameters.
[ "Given", "an", "incoming", "request", "and", "an", "outgoing", "URL", "representation", "append", "the", "value", "of", "any", "built", "-", "in", "query", "parameters", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/reverse.py#L15-L32
245,557
ronaldguillen/wave
wave/reverse.py
reverse
def reverse(viewname, args=None, kwargs=None, request=None, format=None, **extra): """ If versioning is being used then we pass any `reverse` calls through to the versioning scheme instance, so that the resulting URL can be modified if needed. """ scheme = getattr(request, 'versioning_scheme', None) if scheme is not None: try: url = scheme.reverse(viewname, args, kwargs, request, format, **extra) except NoReverseMatch: # In case the versioning scheme reversal fails, fallback to the # default implementation url = _reverse(viewname, args, kwargs, request, format, **extra) else: url = _reverse(viewname, args, kwargs, request, format, **extra) return preserve_builtin_query_params(url, request)
python
def reverse(viewname, args=None, kwargs=None, request=None, format=None, **extra): """ If versioning is being used then we pass any `reverse` calls through to the versioning scheme instance, so that the resulting URL can be modified if needed. """ scheme = getattr(request, 'versioning_scheme', None) if scheme is not None: try: url = scheme.reverse(viewname, args, kwargs, request, format, **extra) except NoReverseMatch: # In case the versioning scheme reversal fails, fallback to the # default implementation url = _reverse(viewname, args, kwargs, request, format, **extra) else: url = _reverse(viewname, args, kwargs, request, format, **extra) return preserve_builtin_query_params(url, request)
[ "def", "reverse", "(", "viewname", ",", "args", "=", "None", ",", "kwargs", "=", "None", ",", "request", "=", "None", ",", "format", "=", "None", ",", "*", "*", "extra", ")", ":", "scheme", "=", "getattr", "(", "request", ",", "'versioning_scheme'", ",", "None", ")", "if", "scheme", "is", "not", "None", ":", "try", ":", "url", "=", "scheme", ".", "reverse", "(", "viewname", ",", "args", ",", "kwargs", ",", "request", ",", "format", ",", "*", "*", "extra", ")", "except", "NoReverseMatch", ":", "# In case the versioning scheme reversal fails, fallback to the", "# default implementation", "url", "=", "_reverse", "(", "viewname", ",", "args", ",", "kwargs", ",", "request", ",", "format", ",", "*", "*", "extra", ")", "else", ":", "url", "=", "_reverse", "(", "viewname", ",", "args", ",", "kwargs", ",", "request", ",", "format", ",", "*", "*", "extra", ")", "return", "preserve_builtin_query_params", "(", "url", ",", "request", ")" ]
If versioning is being used then we pass any `reverse` calls through to the versioning scheme instance, so that the resulting URL can be modified if needed.
[ "If", "versioning", "is", "being", "used", "then", "we", "pass", "any", "reverse", "calls", "through", "to", "the", "versioning", "scheme", "instance", "so", "that", "the", "resulting", "URL", "can", "be", "modified", "if", "needed", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/reverse.py#L35-L52
245,558
ronaldguillen/wave
wave/reverse.py
_reverse
def _reverse(viewname, args=None, kwargs=None, request=None, format=None, **extra): """ Same as `django.core.urlresolvers.reverse`, but optionally takes a request and returns a fully qualified URL, using the request to get the base URL. """ if format is not None: kwargs = kwargs or {} kwargs['format'] = format url = django_reverse(viewname, args=args, kwargs=kwargs, **extra) if request: return request.build_absolute_uri(url) return url
python
def _reverse(viewname, args=None, kwargs=None, request=None, format=None, **extra): """ Same as `django.core.urlresolvers.reverse`, but optionally takes a request and returns a fully qualified URL, using the request to get the base URL. """ if format is not None: kwargs = kwargs or {} kwargs['format'] = format url = django_reverse(viewname, args=args, kwargs=kwargs, **extra) if request: return request.build_absolute_uri(url) return url
[ "def", "_reverse", "(", "viewname", ",", "args", "=", "None", ",", "kwargs", "=", "None", ",", "request", "=", "None", ",", "format", "=", "None", ",", "*", "*", "extra", ")", ":", "if", "format", "is", "not", "None", ":", "kwargs", "=", "kwargs", "or", "{", "}", "kwargs", "[", "'format'", "]", "=", "format", "url", "=", "django_reverse", "(", "viewname", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "*", "*", "extra", ")", "if", "request", ":", "return", "request", ".", "build_absolute_uri", "(", "url", ")", "return", "url" ]
Same as `django.core.urlresolvers.reverse`, but optionally takes a request and returns a fully qualified URL, using the request to get the base URL.
[ "Same", "as", "django", ".", "core", ".", "urlresolvers", ".", "reverse", "but", "optionally", "takes", "a", "request", "and", "returns", "a", "fully", "qualified", "URL", "using", "the", "request", "to", "get", "the", "base", "URL", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/reverse.py#L55-L66
245,559
etcher-be/emiz
emiz/new_miz.py
NewMiz.decompose
def decompose(miz_file: Path, output_folder: Path): """ Decompose this Miz into json Args: output_folder: folder to output the json structure as a Path miz_file: MIZ file path as a Path """ mission_folder, assets_folder = NewMiz._get_subfolders(output_folder) NewMiz._wipe_folders(mission_folder, assets_folder) LOGGER.info('unzipping mission file') with Miz(miz_file) as miz: version = miz.mission.d['version'] LOGGER.debug(f'mission version: "%s"', version) LOGGER.info('copying assets to: "%s"', assets_folder) ignore = shutil.ignore_patterns('mission') shutil.copytree(str(miz.temp_dir), str(assets_folder), ignore=ignore) NewMiz._reorder_warehouses(assets_folder) LOGGER.info('decomposing mission table into: "%s" (this will take a while)', mission_folder) NewMiz._decompose_dict(miz.mission.d, 'base_info', mission_folder, version, miz)
python
def decompose(miz_file: Path, output_folder: Path): """ Decompose this Miz into json Args: output_folder: folder to output the json structure as a Path miz_file: MIZ file path as a Path """ mission_folder, assets_folder = NewMiz._get_subfolders(output_folder) NewMiz._wipe_folders(mission_folder, assets_folder) LOGGER.info('unzipping mission file') with Miz(miz_file) as miz: version = miz.mission.d['version'] LOGGER.debug(f'mission version: "%s"', version) LOGGER.info('copying assets to: "%s"', assets_folder) ignore = shutil.ignore_patterns('mission') shutil.copytree(str(miz.temp_dir), str(assets_folder), ignore=ignore) NewMiz._reorder_warehouses(assets_folder) LOGGER.info('decomposing mission table into: "%s" (this will take a while)', mission_folder) NewMiz._decompose_dict(miz.mission.d, 'base_info', mission_folder, version, miz)
[ "def", "decompose", "(", "miz_file", ":", "Path", ",", "output_folder", ":", "Path", ")", ":", "mission_folder", ",", "assets_folder", "=", "NewMiz", ".", "_get_subfolders", "(", "output_folder", ")", "NewMiz", ".", "_wipe_folders", "(", "mission_folder", ",", "assets_folder", ")", "LOGGER", ".", "info", "(", "'unzipping mission file'", ")", "with", "Miz", "(", "miz_file", ")", "as", "miz", ":", "version", "=", "miz", ".", "mission", ".", "d", "[", "'version'", "]", "LOGGER", ".", "debug", "(", "f'mission version: \"%s\"'", ",", "version", ")", "LOGGER", ".", "info", "(", "'copying assets to: \"%s\"'", ",", "assets_folder", ")", "ignore", "=", "shutil", ".", "ignore_patterns", "(", "'mission'", ")", "shutil", ".", "copytree", "(", "str", "(", "miz", ".", "temp_dir", ")", ",", "str", "(", "assets_folder", ")", ",", "ignore", "=", "ignore", ")", "NewMiz", ".", "_reorder_warehouses", "(", "assets_folder", ")", "LOGGER", ".", "info", "(", "'decomposing mission table into: \"%s\" (this will take a while)'", ",", "mission_folder", ")", "NewMiz", ".", "_decompose_dict", "(", "miz", ".", "mission", ".", "d", ",", "'base_info'", ",", "mission_folder", ",", "version", ",", "miz", ")" ]
Decompose this Miz into json Args: output_folder: folder to output the json structure as a Path miz_file: MIZ file path as a Path
[ "Decompose", "this", "Miz", "into", "json" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/new_miz.py#L259-L281
245,560
etcher-be/emiz
emiz/new_miz.py
NewMiz.recompose
def recompose(src: Path, target_file: Path): """ Recompose a Miz from json object Args: src: folder containing the json structure target_file: target Miz file """ mission_folder, assets_folder = NewMiz._get_subfolders(src) # pylint: disable=c-extension-no-member base_info = ujson.loads(Path(mission_folder, 'base_info.json').read_text(encoding=ENCODING)) version = base_info['__version__'] with Miz(target_file) as miz: LOGGER.info('re-composing mission table from folder: "%s"', mission_folder) miz.mission.d = NewMiz._recreate_dict_from_folder(mission_folder, version) for item in assets_folder.iterdir(): target = Path(miz.temp_dir, item.name).absolute() if item.is_dir(): if target.exists(): shutil.rmtree(target) shutil.copytree(item.absolute(), target) elif item.is_file(): shutil.copy(item.absolute(), target) miz.zip(target_file, encode=False)
python
def recompose(src: Path, target_file: Path): """ Recompose a Miz from json object Args: src: folder containing the json structure target_file: target Miz file """ mission_folder, assets_folder = NewMiz._get_subfolders(src) # pylint: disable=c-extension-no-member base_info = ujson.loads(Path(mission_folder, 'base_info.json').read_text(encoding=ENCODING)) version = base_info['__version__'] with Miz(target_file) as miz: LOGGER.info('re-composing mission table from folder: "%s"', mission_folder) miz.mission.d = NewMiz._recreate_dict_from_folder(mission_folder, version) for item in assets_folder.iterdir(): target = Path(miz.temp_dir, item.name).absolute() if item.is_dir(): if target.exists(): shutil.rmtree(target) shutil.copytree(item.absolute(), target) elif item.is_file(): shutil.copy(item.absolute(), target) miz.zip(target_file, encode=False)
[ "def", "recompose", "(", "src", ":", "Path", ",", "target_file", ":", "Path", ")", ":", "mission_folder", ",", "assets_folder", "=", "NewMiz", ".", "_get_subfolders", "(", "src", ")", "# pylint: disable=c-extension-no-member", "base_info", "=", "ujson", ".", "loads", "(", "Path", "(", "mission_folder", ",", "'base_info.json'", ")", ".", "read_text", "(", "encoding", "=", "ENCODING", ")", ")", "version", "=", "base_info", "[", "'__version__'", "]", "with", "Miz", "(", "target_file", ")", "as", "miz", ":", "LOGGER", ".", "info", "(", "'re-composing mission table from folder: \"%s\"'", ",", "mission_folder", ")", "miz", ".", "mission", ".", "d", "=", "NewMiz", ".", "_recreate_dict_from_folder", "(", "mission_folder", ",", "version", ")", "for", "item", "in", "assets_folder", ".", "iterdir", "(", ")", ":", "target", "=", "Path", "(", "miz", ".", "temp_dir", ",", "item", ".", "name", ")", ".", "absolute", "(", ")", "if", "item", ".", "is_dir", "(", ")", ":", "if", "target", ".", "exists", "(", ")", ":", "shutil", ".", "rmtree", "(", "target", ")", "shutil", ".", "copytree", "(", "item", ".", "absolute", "(", ")", ",", "target", ")", "elif", "item", ".", "is_file", "(", ")", ":", "shutil", ".", "copy", "(", "item", ".", "absolute", "(", ")", ",", "target", ")", "miz", ".", "zip", "(", "target_file", ",", "encode", "=", "False", ")" ]
Recompose a Miz from json object Args: src: folder containing the json structure target_file: target Miz file
[ "Recompose", "a", "Miz", "from", "json", "object" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/new_miz.py#L284-L307
245,561
sahilchinoy/django-irs-filings
irs/management/commands/__init__.py
IRSCommand.handle
def handle(self, *args, **options): """ Sets options common to all commands. Any command subclassing this object should implement its own handle method, as is standard in Django, and run this method via a super call to inherit its functionality. """ # Create a data directory self.data_dir = os.path.join( settings.BASE_DIR, 'data') if not os.path.exists(self.data_dir): os.makedirs(self.data_dir) # Start the clock self.start_datetime = datetime.now()
python
def handle(self, *args, **options): """ Sets options common to all commands. Any command subclassing this object should implement its own handle method, as is standard in Django, and run this method via a super call to inherit its functionality. """ # Create a data directory self.data_dir = os.path.join( settings.BASE_DIR, 'data') if not os.path.exists(self.data_dir): os.makedirs(self.data_dir) # Start the clock self.start_datetime = datetime.now()
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "# Create a data directory", "self", ".", "data_dir", "=", "os", ".", "path", ".", "join", "(", "settings", ".", "BASE_DIR", ",", "'data'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "data_dir", ")", ":", "os", ".", "makedirs", "(", "self", ".", "data_dir", ")", "# Start the clock", "self", ".", "start_datetime", "=", "datetime", ".", "now", "(", ")" ]
Sets options common to all commands. Any command subclassing this object should implement its own handle method, as is standard in Django, and run this method via a super call to inherit its functionality.
[ "Sets", "options", "common", "to", "all", "commands", ".", "Any", "command", "subclassing", "this", "object", "should", "implement", "its", "own", "handle", "method", "as", "is", "standard", "in", "Django", "and", "run", "this", "method", "via", "a", "super", "call", "to", "inherit", "its", "functionality", "." ]
efe80cc57ce1d9d8488f4e9496cf2347e29b6d8b
https://github.com/sahilchinoy/django-irs-filings/blob/efe80cc57ce1d9d8488f4e9496cf2347e29b6d8b/irs/management/commands/__init__.py#L14-L30
245,562
tlatsas/wigiki
wigiki/generator.py
SiteGenerator._save_file
def _save_file(self, filename, contents): """write the html file contents to disk""" with open(filename, 'w') as f: f.write(contents)
python
def _save_file(self, filename, contents): """write the html file contents to disk""" with open(filename, 'w') as f: f.write(contents)
[ "def", "_save_file", "(", "self", ",", "filename", ",", "contents", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "contents", ")" ]
write the html file contents to disk
[ "write", "the", "html", "file", "contents", "to", "disk" ]
baf9c5a6523a9b90db7572330d04e3e199391273
https://github.com/tlatsas/wigiki/blob/baf9c5a6523a9b90db7572330d04e3e199391273/wigiki/generator.py#L49-L52
245,563
Kjwon15/autotweet
autotweet/database.py
get_session
def get_session(url): """Get db session. :param url: URL for connect with DB :type url: :class:`str` :returns: A sqlalchemy db session :rtype: :class:`sqlalchemy.orm.Session` """ engine = create_engine(url) db_session = scoped_session(sessionmaker(engine)) Base.metadata.create_all(engine) return db_session
python
def get_session(url): """Get db session. :param url: URL for connect with DB :type url: :class:`str` :returns: A sqlalchemy db session :rtype: :class:`sqlalchemy.orm.Session` """ engine = create_engine(url) db_session = scoped_session(sessionmaker(engine)) Base.metadata.create_all(engine) return db_session
[ "def", "get_session", "(", "url", ")", ":", "engine", "=", "create_engine", "(", "url", ")", "db_session", "=", "scoped_session", "(", "sessionmaker", "(", "engine", ")", ")", "Base", ".", "metadata", ".", "create_all", "(", "engine", ")", "return", "db_session" ]
Get db session. :param url: URL for connect with DB :type url: :class:`str` :returns: A sqlalchemy db session :rtype: :class:`sqlalchemy.orm.Session`
[ "Get", "db", "session", "." ]
c35b68ee1814916fbe9e5a5bd6ea6e75b3cc596e
https://github.com/Kjwon15/autotweet/blob/c35b68ee1814916fbe9e5a5bd6ea6e75b3cc596e/autotweet/database.py#L24-L37
245,564
jasonfharris/sysexecute
sysexecute/execute.py
_AsynchronouslyGetProcessOutput
def _AsynchronouslyGetProcessOutput(formattedCmd, printStdOut, printStdErr, **kwargs): ''' Asynchronously read the process ''' opts = filterKWArgsForFunc(kwargs, subprocess.Popen) opts['stdout'] = subprocess.PIPE opts['stderr'] = subprocess.PIPE process = subprocess.Popen(formattedCmd, **opts) # Launch the asynchronous readers of the process' stdout and stderr. stdout_queue = Queue.Queue() stdout_reader = _AsynchronousFileReader(process.stdout, stdout_queue) stdout_reader.start() stderr_queue = Queue.Queue() stderr_reader = _AsynchronousFileReader(process.stderr, stderr_queue) stderr_reader.start() stdOutLines = [] stdErrLines = [] # Check the queues if we received some output (until there is nothing more to get). while not stdout_reader.eof() or not stderr_reader.eof(): # Show what we received from standard output. while not stdout_queue.empty(): line = stdout_queue.get() stdOutLines.append(line) if printStdOut: print line.rstrip() # Show what we received from standard error. while not stderr_queue.empty(): line = stderr_queue.get() stdErrLines.append(line) if printStdErr: print colored(line.rstrip(),'red') # Sleep a bit before asking the readers again. time.sleep(.05) # Let's be tidy and join the threads we've started. stdout_reader.join() stderr_reader.join() # Close subprocess' file descriptors. process.stdout.close() process.stderr.close() process.wait() stdOut = ''.join(stdOutLines) stdErr = ''.join(stdErrLines) return (process.returncode, stdOut, stdErr)
python
def _AsynchronouslyGetProcessOutput(formattedCmd, printStdOut, printStdErr, **kwargs): ''' Asynchronously read the process ''' opts = filterKWArgsForFunc(kwargs, subprocess.Popen) opts['stdout'] = subprocess.PIPE opts['stderr'] = subprocess.PIPE process = subprocess.Popen(formattedCmd, **opts) # Launch the asynchronous readers of the process' stdout and stderr. stdout_queue = Queue.Queue() stdout_reader = _AsynchronousFileReader(process.stdout, stdout_queue) stdout_reader.start() stderr_queue = Queue.Queue() stderr_reader = _AsynchronousFileReader(process.stderr, stderr_queue) stderr_reader.start() stdOutLines = [] stdErrLines = [] # Check the queues if we received some output (until there is nothing more to get). while not stdout_reader.eof() or not stderr_reader.eof(): # Show what we received from standard output. while not stdout_queue.empty(): line = stdout_queue.get() stdOutLines.append(line) if printStdOut: print line.rstrip() # Show what we received from standard error. while not stderr_queue.empty(): line = stderr_queue.get() stdErrLines.append(line) if printStdErr: print colored(line.rstrip(),'red') # Sleep a bit before asking the readers again. time.sleep(.05) # Let's be tidy and join the threads we've started. stdout_reader.join() stderr_reader.join() # Close subprocess' file descriptors. process.stdout.close() process.stderr.close() process.wait() stdOut = ''.join(stdOutLines) stdErr = ''.join(stdErrLines) return (process.returncode, stdOut, stdErr)
[ "def", "_AsynchronouslyGetProcessOutput", "(", "formattedCmd", ",", "printStdOut", ",", "printStdErr", ",", "*", "*", "kwargs", ")", ":", "opts", "=", "filterKWArgsForFunc", "(", "kwargs", ",", "subprocess", ".", "Popen", ")", "opts", "[", "'stdout'", "]", "=", "subprocess", ".", "PIPE", "opts", "[", "'stderr'", "]", "=", "subprocess", ".", "PIPE", "process", "=", "subprocess", ".", "Popen", "(", "formattedCmd", ",", "*", "*", "opts", ")", "# Launch the asynchronous readers of the process' stdout and stderr.", "stdout_queue", "=", "Queue", ".", "Queue", "(", ")", "stdout_reader", "=", "_AsynchronousFileReader", "(", "process", ".", "stdout", ",", "stdout_queue", ")", "stdout_reader", ".", "start", "(", ")", "stderr_queue", "=", "Queue", ".", "Queue", "(", ")", "stderr_reader", "=", "_AsynchronousFileReader", "(", "process", ".", "stderr", ",", "stderr_queue", ")", "stderr_reader", ".", "start", "(", ")", "stdOutLines", "=", "[", "]", "stdErrLines", "=", "[", "]", "# Check the queues if we received some output (until there is nothing more to get).", "while", "not", "stdout_reader", ".", "eof", "(", ")", "or", "not", "stderr_reader", ".", "eof", "(", ")", ":", "# Show what we received from standard output.", "while", "not", "stdout_queue", ".", "empty", "(", ")", ":", "line", "=", "stdout_queue", ".", "get", "(", ")", "stdOutLines", ".", "append", "(", "line", ")", "if", "printStdOut", ":", "print", "line", ".", "rstrip", "(", ")", "# Show what we received from standard error.", "while", "not", "stderr_queue", ".", "empty", "(", ")", ":", "line", "=", "stderr_queue", ".", "get", "(", ")", "stdErrLines", ".", "append", "(", "line", ")", "if", "printStdErr", ":", "print", "colored", "(", "line", ".", "rstrip", "(", ")", ",", "'red'", ")", "# Sleep a bit before asking the readers again.", "time", ".", "sleep", "(", ".05", ")", "# Let's be tidy and join the threads we've started.", "stdout_reader", ".", "join", "(", ")", "stderr_reader", ".", "join", "(", ")", "# Close subprocess' file descriptors.", "process", ".", "stdout", ".", "close", "(", ")", "process", ".", "stderr", ".", "close", "(", ")", "process", ".", "wait", "(", ")", "stdOut", "=", "''", ".", "join", "(", "stdOutLines", ")", "stdErr", "=", "''", ".", "join", "(", "stdErrLines", ")", "return", "(", "process", ".", "returncode", ",", "stdOut", ",", "stdErr", ")" ]
Asynchronously read the process
[ "Asynchronously", "read", "the", "process" ]
5fb0639364fa91452da93f99220bf622351d0b7a
https://github.com/jasonfharris/sysexecute/blob/5fb0639364fa91452da93f99220bf622351d0b7a/sysexecute/execute.py#L139-L186
245,565
jasonfharris/sysexecute
sysexecute/execute.py
execute
def execute(cmd, verbosityThreshold = 1, **kwargs): '''execute the passed in command in the shell''' global exectue_defaults opts = merge(exectue_defaults, kwargs) # the options computed from the default options together with the passed in options. subopts = filterKWArgsForFunc(opts, subprocess.Popen) formattedCmd = cmd.format(**getFormatBindings(cmd,1)) shouldPrint = opts['verbosity'] >= verbosityThreshold isDryrun = opts['dryrun'] if shouldPrint: msg = "would execute:" if isDryrun else "executing:" pre = "("+subopts['cwd']+")" if (subopts['cwd'] != exectue_defaults['cwd']) else "" print "{pre}{msg} {formattedCmd}".format(pre=pre, formattedCmd=formattedCmd, msg=msg) if isDryrun: return (0, None, None) printStdOut = shouldPrint and opts['permitShowingStdOut'] printStdErr = shouldPrint and opts['permitShowingStdErr'] returnCode = 0 if opts['captureStdOutStdErr']: (returnCode, stdOut, stdErr) = _AsynchronouslyGetProcessOutput(formattedCmd, printStdOut, printStdErr, **subopts) else: subopts['stdout'] = None if printStdOut else subprocess.PIPE subopts['stderr'] = None if printStdErr else subprocess.PIPE process = subprocess.Popen(formattedCmd, **subopts) res = process.communicate() returnCode = process.returncode stdOut = res[0] stdErr = res[1] if returnCode and not opts['ignoreErrors']: print colored('Command {} failed with return code {}!'.format(cmd, returnCode),'red') sys.exit(returnCode) # always print any errors return (returnCode, stdOut, stdErr)
python
def execute(cmd, verbosityThreshold = 1, **kwargs): '''execute the passed in command in the shell''' global exectue_defaults opts = merge(exectue_defaults, kwargs) # the options computed from the default options together with the passed in options. subopts = filterKWArgsForFunc(opts, subprocess.Popen) formattedCmd = cmd.format(**getFormatBindings(cmd,1)) shouldPrint = opts['verbosity'] >= verbosityThreshold isDryrun = opts['dryrun'] if shouldPrint: msg = "would execute:" if isDryrun else "executing:" pre = "("+subopts['cwd']+")" if (subopts['cwd'] != exectue_defaults['cwd']) else "" print "{pre}{msg} {formattedCmd}".format(pre=pre, formattedCmd=formattedCmd, msg=msg) if isDryrun: return (0, None, None) printStdOut = shouldPrint and opts['permitShowingStdOut'] printStdErr = shouldPrint and opts['permitShowingStdErr'] returnCode = 0 if opts['captureStdOutStdErr']: (returnCode, stdOut, stdErr) = _AsynchronouslyGetProcessOutput(formattedCmd, printStdOut, printStdErr, **subopts) else: subopts['stdout'] = None if printStdOut else subprocess.PIPE subopts['stderr'] = None if printStdErr else subprocess.PIPE process = subprocess.Popen(formattedCmd, **subopts) res = process.communicate() returnCode = process.returncode stdOut = res[0] stdErr = res[1] if returnCode and not opts['ignoreErrors']: print colored('Command {} failed with return code {}!'.format(cmd, returnCode),'red') sys.exit(returnCode) # always print any errors return (returnCode, stdOut, stdErr)
[ "def", "execute", "(", "cmd", ",", "verbosityThreshold", "=", "1", ",", "*", "*", "kwargs", ")", ":", "global", "exectue_defaults", "opts", "=", "merge", "(", "exectue_defaults", ",", "kwargs", ")", "# the options computed from the default options together with the passed in options.", "subopts", "=", "filterKWArgsForFunc", "(", "opts", ",", "subprocess", ".", "Popen", ")", "formattedCmd", "=", "cmd", ".", "format", "(", "*", "*", "getFormatBindings", "(", "cmd", ",", "1", ")", ")", "shouldPrint", "=", "opts", "[", "'verbosity'", "]", ">=", "verbosityThreshold", "isDryrun", "=", "opts", "[", "'dryrun'", "]", "if", "shouldPrint", ":", "msg", "=", "\"would execute:\"", "if", "isDryrun", "else", "\"executing:\"", "pre", "=", "\"(\"", "+", "subopts", "[", "'cwd'", "]", "+", "\")\"", "if", "(", "subopts", "[", "'cwd'", "]", "!=", "exectue_defaults", "[", "'cwd'", "]", ")", "else", "\"\"", "print", "\"{pre}{msg} {formattedCmd}\"", ".", "format", "(", "pre", "=", "pre", ",", "formattedCmd", "=", "formattedCmd", ",", "msg", "=", "msg", ")", "if", "isDryrun", ":", "return", "(", "0", ",", "None", ",", "None", ")", "printStdOut", "=", "shouldPrint", "and", "opts", "[", "'permitShowingStdOut'", "]", "printStdErr", "=", "shouldPrint", "and", "opts", "[", "'permitShowingStdErr'", "]", "returnCode", "=", "0", "if", "opts", "[", "'captureStdOutStdErr'", "]", ":", "(", "returnCode", ",", "stdOut", ",", "stdErr", ")", "=", "_AsynchronouslyGetProcessOutput", "(", "formattedCmd", ",", "printStdOut", ",", "printStdErr", ",", "*", "*", "subopts", ")", "else", ":", "subopts", "[", "'stdout'", "]", "=", "None", "if", "printStdOut", "else", "subprocess", ".", "PIPE", "subopts", "[", "'stderr'", "]", "=", "None", "if", "printStdErr", "else", "subprocess", ".", "PIPE", "process", "=", "subprocess", ".", "Popen", "(", "formattedCmd", ",", "*", "*", "subopts", ")", "res", "=", "process", ".", "communicate", "(", ")", "returnCode", "=", "process", ".", "returncode", "stdOut", "=", "res", "[", "0", "]", "stdErr", "=", "res", "[", "1", "]", "if", "returnCode", "and", "not", "opts", "[", "'ignoreErrors'", "]", ":", "print", "colored", "(", "'Command {} failed with return code {}!'", ".", "format", "(", "cmd", ",", "returnCode", ")", ",", "'red'", ")", "sys", ".", "exit", "(", "returnCode", ")", "# always print any errors", "return", "(", "returnCode", ",", "stdOut", ",", "stdErr", ")" ]
execute the passed in command in the shell
[ "execute", "the", "passed", "in", "command", "in", "the", "shell" ]
5fb0639364fa91452da93f99220bf622351d0b7a
https://github.com/jasonfharris/sysexecute/blob/5fb0639364fa91452da93f99220bf622351d0b7a/sysexecute/execute.py#L190-L226
245,566
collectiveacuity/labPack
labpack/databases/sql.py
sqlClient._extract_columns
def _extract_columns(self, table_name): ''' a method to extract the column properties of an existing table ''' import re from sqlalchemy import MetaData, VARCHAR, INTEGER, BLOB, BOOLEAN, FLOAT from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, BIT, BYTEA # retrieve list of tables metadata_object = MetaData() table_list = self.engine.table_names() # determine columns prior_columns = {} if table_name in table_list: metadata_object.reflect(self.engine) existing_table = metadata_object.tables[table_name] for column in existing_table.columns: column_type = None column_length = None if column.type.__class__ == FLOAT().__class__: column_type = 'float' elif column.type.__class__ == DOUBLE_PRECISION().__class__: # Postgres column_type = 'float' elif column.type.__class__ == INTEGER().__class__: column_type = 'integer' elif column.type.__class__ == VARCHAR().__class__: column_length = getattr(column.type, 'length', None) if column_length == 1: if column.primary_key: column_length = None column_type = 'string' elif column.type.__class__ == BLOB().__class__: column_type = 'list' elif column.type.__class__ in (BIT().__class__, BYTEA().__class__): column_type = 'list' elif column.type.__class__ == BOOLEAN().__class__: column_type = 'boolean' prior_columns[column.key] = (column.key, column_type, '', column_length) return prior_columns
python
def _extract_columns(self, table_name): ''' a method to extract the column properties of an existing table ''' import re from sqlalchemy import MetaData, VARCHAR, INTEGER, BLOB, BOOLEAN, FLOAT from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, BIT, BYTEA # retrieve list of tables metadata_object = MetaData() table_list = self.engine.table_names() # determine columns prior_columns = {} if table_name in table_list: metadata_object.reflect(self.engine) existing_table = metadata_object.tables[table_name] for column in existing_table.columns: column_type = None column_length = None if column.type.__class__ == FLOAT().__class__: column_type = 'float' elif column.type.__class__ == DOUBLE_PRECISION().__class__: # Postgres column_type = 'float' elif column.type.__class__ == INTEGER().__class__: column_type = 'integer' elif column.type.__class__ == VARCHAR().__class__: column_length = getattr(column.type, 'length', None) if column_length == 1: if column.primary_key: column_length = None column_type = 'string' elif column.type.__class__ == BLOB().__class__: column_type = 'list' elif column.type.__class__ in (BIT().__class__, BYTEA().__class__): column_type = 'list' elif column.type.__class__ == BOOLEAN().__class__: column_type = 'boolean' prior_columns[column.key] = (column.key, column_type, '', column_length) return prior_columns
[ "def", "_extract_columns", "(", "self", ",", "table_name", ")", ":", "import", "re", "from", "sqlalchemy", "import", "MetaData", ",", "VARCHAR", ",", "INTEGER", ",", "BLOB", ",", "BOOLEAN", ",", "FLOAT", "from", "sqlalchemy", ".", "dialects", ".", "postgresql", "import", "DOUBLE_PRECISION", ",", "BIT", ",", "BYTEA", "# retrieve list of tables", "metadata_object", "=", "MetaData", "(", ")", "table_list", "=", "self", ".", "engine", ".", "table_names", "(", ")", "# determine columns", "prior_columns", "=", "{", "}", "if", "table_name", "in", "table_list", ":", "metadata_object", ".", "reflect", "(", "self", ".", "engine", ")", "existing_table", "=", "metadata_object", ".", "tables", "[", "table_name", "]", "for", "column", "in", "existing_table", ".", "columns", ":", "column_type", "=", "None", "column_length", "=", "None", "if", "column", ".", "type", ".", "__class__", "==", "FLOAT", "(", ")", ".", "__class__", ":", "column_type", "=", "'float'", "elif", "column", ".", "type", ".", "__class__", "==", "DOUBLE_PRECISION", "(", ")", ".", "__class__", ":", "# Postgres", "column_type", "=", "'float'", "elif", "column", ".", "type", ".", "__class__", "==", "INTEGER", "(", ")", ".", "__class__", ":", "column_type", "=", "'integer'", "elif", "column", ".", "type", ".", "__class__", "==", "VARCHAR", "(", ")", ".", "__class__", ":", "column_length", "=", "getattr", "(", "column", ".", "type", ",", "'length'", ",", "None", ")", "if", "column_length", "==", "1", ":", "if", "column", ".", "primary_key", ":", "column_length", "=", "None", "column_type", "=", "'string'", "elif", "column", ".", "type", ".", "__class__", "==", "BLOB", "(", ")", ".", "__class__", ":", "column_type", "=", "'list'", "elif", "column", ".", "type", ".", "__class__", "in", "(", "BIT", "(", ")", ".", "__class__", ",", "BYTEA", "(", ")", ".", "__class__", ")", ":", "column_type", "=", "'list'", "elif", "column", ".", "type", ".", "__class__", "==", "BOOLEAN", "(", ")", ".", "__class__", ":", "column_type", "=", "'boolean'", "prior_columns", "[", "column", ".", "key", "]", "=", "(", "column", ".", "key", ",", "column_type", ",", "''", ",", "column_length", ")", "return", "prior_columns" ]
a method to extract the column properties of an existing table
[ "a", "method", "to", "extract", "the", "column", "properties", "of", "an", "existing", "table" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/sql.py#L268-L308
245,567
collectiveacuity/labPack
labpack/databases/sql.py
sqlClient._parse_columns
def _parse_columns(self): ''' a helper method for parsing the column properties from the record schema ''' # construct column list column_map = {} for key, value in self.model.keyMap.items(): record_key = key[1:] if record_key: if self.item_key.findall(record_key): pass else: if value['value_datatype'] == 'map': continue datatype = value['value_datatype'] if value['value_datatype'] == 'number': datatype = 'float' if 'integer_data' in value.keys(): if value['integer_data']: datatype = 'integer' replace_key = '' if 'field_metadata' in value.keys(): if 'replace_key' in value['field_metadata'].keys(): if isinstance(value['field_metadata']['replace_key'], str): replace_key = value['field_metadata']['replace_key'] max_length = None if 'max_length' in value.keys(): max_length = value['max_length'] column_map[record_key] = (record_key, datatype, replace_key, max_length) return column_map
python
def _parse_columns(self): ''' a helper method for parsing the column properties from the record schema ''' # construct column list column_map = {} for key, value in self.model.keyMap.items(): record_key = key[1:] if record_key: if self.item_key.findall(record_key): pass else: if value['value_datatype'] == 'map': continue datatype = value['value_datatype'] if value['value_datatype'] == 'number': datatype = 'float' if 'integer_data' in value.keys(): if value['integer_data']: datatype = 'integer' replace_key = '' if 'field_metadata' in value.keys(): if 'replace_key' in value['field_metadata'].keys(): if isinstance(value['field_metadata']['replace_key'], str): replace_key = value['field_metadata']['replace_key'] max_length = None if 'max_length' in value.keys(): max_length = value['max_length'] column_map[record_key] = (record_key, datatype, replace_key, max_length) return column_map
[ "def", "_parse_columns", "(", "self", ")", ":", "# construct column list", "column_map", "=", "{", "}", "for", "key", ",", "value", "in", "self", ".", "model", ".", "keyMap", ".", "items", "(", ")", ":", "record_key", "=", "key", "[", "1", ":", "]", "if", "record_key", ":", "if", "self", ".", "item_key", ".", "findall", "(", "record_key", ")", ":", "pass", "else", ":", "if", "value", "[", "'value_datatype'", "]", "==", "'map'", ":", "continue", "datatype", "=", "value", "[", "'value_datatype'", "]", "if", "value", "[", "'value_datatype'", "]", "==", "'number'", ":", "datatype", "=", "'float'", "if", "'integer_data'", "in", "value", ".", "keys", "(", ")", ":", "if", "value", "[", "'integer_data'", "]", ":", "datatype", "=", "'integer'", "replace_key", "=", "''", "if", "'field_metadata'", "in", "value", ".", "keys", "(", ")", ":", "if", "'replace_key'", "in", "value", "[", "'field_metadata'", "]", ".", "keys", "(", ")", ":", "if", "isinstance", "(", "value", "[", "'field_metadata'", "]", "[", "'replace_key'", "]", ",", "str", ")", ":", "replace_key", "=", "value", "[", "'field_metadata'", "]", "[", "'replace_key'", "]", "max_length", "=", "None", "if", "'max_length'", "in", "value", ".", "keys", "(", ")", ":", "max_length", "=", "value", "[", "'max_length'", "]", "column_map", "[", "record_key", "]", "=", "(", "record_key", ",", "datatype", ",", "replace_key", ",", "max_length", ")", "return", "column_map" ]
a helper method for parsing the column properties from the record schema
[ "a", "helper", "method", "for", "parsing", "the", "column", "properties", "from", "the", "record", "schema" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/sql.py#L310-L340
245,568
collectiveacuity/labPack
labpack/databases/sql.py
sqlClient._construct_columns
def _construct_columns(self, column_map): ''' a helper method for constructing the column objects for a table object ''' from sqlalchemy import Column, String, Boolean, Integer, Float, Binary column_args = [] for key, value in column_map.items(): record_key = value[0] datatype = value[1] max_length = value[2] if record_key == 'id': if datatype in ('string', 'float', 'integer'): if datatype == 'string': if max_length: column_args.insert(0, Column(record_key, String(max_length), primary_key=True)) else: column_args.insert(0, Column(record_key, String, primary_key=True)) elif datatype == 'float': column_args.insert(0, Column(record_key, Float, primary_key=True)) elif datatype == 'integer': column_args.insert(0, Column(record_key, Integer, primary_key=True)) else: raise ValueError('Field "id" in record_schema must be a string, float or integer.') else: if datatype == 'boolean': column_args.append(Column(record_key, Boolean)) elif datatype == 'string': if max_length: column_args.append(Column(record_key, String(max_length))) else: column_args.append(Column(record_key, String)) elif datatype == 'float': column_args.append(Column(record_key, Float)) elif datatype == 'integer': column_args.append(Column(record_key, Integer)) elif datatype == 'list': column_args.append(Column(record_key, Binary)) return column_args
python
def _construct_columns(self, column_map): ''' a helper method for constructing the column objects for a table object ''' from sqlalchemy import Column, String, Boolean, Integer, Float, Binary column_args = [] for key, value in column_map.items(): record_key = value[0] datatype = value[1] max_length = value[2] if record_key == 'id': if datatype in ('string', 'float', 'integer'): if datatype == 'string': if max_length: column_args.insert(0, Column(record_key, String(max_length), primary_key=True)) else: column_args.insert(0, Column(record_key, String, primary_key=True)) elif datatype == 'float': column_args.insert(0, Column(record_key, Float, primary_key=True)) elif datatype == 'integer': column_args.insert(0, Column(record_key, Integer, primary_key=True)) else: raise ValueError('Field "id" in record_schema must be a string, float or integer.') else: if datatype == 'boolean': column_args.append(Column(record_key, Boolean)) elif datatype == 'string': if max_length: column_args.append(Column(record_key, String(max_length))) else: column_args.append(Column(record_key, String)) elif datatype == 'float': column_args.append(Column(record_key, Float)) elif datatype == 'integer': column_args.append(Column(record_key, Integer)) elif datatype == 'list': column_args.append(Column(record_key, Binary)) return column_args
[ "def", "_construct_columns", "(", "self", ",", "column_map", ")", ":", "from", "sqlalchemy", "import", "Column", ",", "String", ",", "Boolean", ",", "Integer", ",", "Float", ",", "Binary", "column_args", "=", "[", "]", "for", "key", ",", "value", "in", "column_map", ".", "items", "(", ")", ":", "record_key", "=", "value", "[", "0", "]", "datatype", "=", "value", "[", "1", "]", "max_length", "=", "value", "[", "2", "]", "if", "record_key", "==", "'id'", ":", "if", "datatype", "in", "(", "'string'", ",", "'float'", ",", "'integer'", ")", ":", "if", "datatype", "==", "'string'", ":", "if", "max_length", ":", "column_args", ".", "insert", "(", "0", ",", "Column", "(", "record_key", ",", "String", "(", "max_length", ")", ",", "primary_key", "=", "True", ")", ")", "else", ":", "column_args", ".", "insert", "(", "0", ",", "Column", "(", "record_key", ",", "String", ",", "primary_key", "=", "True", ")", ")", "elif", "datatype", "==", "'float'", ":", "column_args", ".", "insert", "(", "0", ",", "Column", "(", "record_key", ",", "Float", ",", "primary_key", "=", "True", ")", ")", "elif", "datatype", "==", "'integer'", ":", "column_args", ".", "insert", "(", "0", ",", "Column", "(", "record_key", ",", "Integer", ",", "primary_key", "=", "True", ")", ")", "else", ":", "raise", "ValueError", "(", "'Field \"id\" in record_schema must be a string, float or integer.'", ")", "else", ":", "if", "datatype", "==", "'boolean'", ":", "column_args", ".", "append", "(", "Column", "(", "record_key", ",", "Boolean", ")", ")", "elif", "datatype", "==", "'string'", ":", "if", "max_length", ":", "column_args", ".", "append", "(", "Column", "(", "record_key", ",", "String", "(", "max_length", ")", ")", ")", "else", ":", "column_args", ".", "append", "(", "Column", "(", "record_key", ",", "String", ")", ")", "elif", "datatype", "==", "'float'", ":", "column_args", ".", "append", "(", "Column", "(", "record_key", ",", "Float", ")", ")", "elif", "datatype", "==", "'integer'", ":", "column_args", ".", "append", "(", "Column", "(", "record_key", ",", "Integer", ")", ")", "elif", "datatype", "==", "'list'", ":", "column_args", ".", "append", "(", "Column", "(", "record_key", ",", "Binary", ")", ")", "return", "column_args" ]
a helper method for constructing the column objects for a table object
[ "a", "helper", "method", "for", "constructing", "the", "column", "objects", "for", "a", "table", "object" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/sql.py#L342-L381
245,569
collectiveacuity/labPack
labpack/databases/sql.py
sqlClient._reconstruct_record
def _reconstruct_record(self, record_object): ''' a helper method for reconstructing record fields from record object ''' record_details = {} current_details = record_details for key, value in self.model.keyMap.items(): record_key = key[1:] if record_key: record_value = getattr(record_object, record_key, None) if record_value != None: record_segments = record_key.split('.') for i in range(len(record_segments)): segment = record_segments[i] if i + 1 < len(record_segments): if segment not in record_details.keys(): current_details[segment] = {} current_details = current_details[segment] else: if isinstance(record_value, bytes): current_details[segment] = pickle.loads(record_value) else: current_details[segment] = record_value current_details = record_details return record_details
python
def _reconstruct_record(self, record_object): ''' a helper method for reconstructing record fields from record object ''' record_details = {} current_details = record_details for key, value in self.model.keyMap.items(): record_key = key[1:] if record_key: record_value = getattr(record_object, record_key, None) if record_value != None: record_segments = record_key.split('.') for i in range(len(record_segments)): segment = record_segments[i] if i + 1 < len(record_segments): if segment not in record_details.keys(): current_details[segment] = {} current_details = current_details[segment] else: if isinstance(record_value, bytes): current_details[segment] = pickle.loads(record_value) else: current_details[segment] = record_value current_details = record_details return record_details
[ "def", "_reconstruct_record", "(", "self", ",", "record_object", ")", ":", "record_details", "=", "{", "}", "current_details", "=", "record_details", "for", "key", ",", "value", "in", "self", ".", "model", ".", "keyMap", ".", "items", "(", ")", ":", "record_key", "=", "key", "[", "1", ":", "]", "if", "record_key", ":", "record_value", "=", "getattr", "(", "record_object", ",", "record_key", ",", "None", ")", "if", "record_value", "!=", "None", ":", "record_segments", "=", "record_key", ".", "split", "(", "'.'", ")", "for", "i", "in", "range", "(", "len", "(", "record_segments", ")", ")", ":", "segment", "=", "record_segments", "[", "i", "]", "if", "i", "+", "1", "<", "len", "(", "record_segments", ")", ":", "if", "segment", "not", "in", "record_details", ".", "keys", "(", ")", ":", "current_details", "[", "segment", "]", "=", "{", "}", "current_details", "=", "current_details", "[", "segment", "]", "else", ":", "if", "isinstance", "(", "record_value", ",", "bytes", ")", ":", "current_details", "[", "segment", "]", "=", "pickle", ".", "loads", "(", "record_value", ")", "else", ":", "current_details", "[", "segment", "]", "=", "record_value", "current_details", "=", "record_details", "return", "record_details" ]
a helper method for reconstructing record fields from record object
[ "a", "helper", "method", "for", "reconstructing", "record", "fields", "from", "record", "object" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/sql.py#L383-L408
245,570
collectiveacuity/labPack
labpack/databases/sql.py
sqlClient._compare_columns
def _compare_columns(self, new_columns, old_columns): ''' a helper method for generating differences between column properties ''' # print(new_columns) # print(old_columns) add_columns = {} remove_columns = {} rename_columns = {} retype_columns = {} resize_columns = {} for key, value in new_columns.items(): if key not in old_columns.keys(): add_columns[key] = True if value[2]: if value[2] in old_columns.keys(): rename_columns[key] = value[2] del add_columns[key] else: if value[1] != old_columns[key][1]: retype_columns[key] = value[1] if value[3] != old_columns[key][3]: resize_columns[key] = value[3] remove_keys = set(old_columns.keys()) - set(new_columns.keys()) if remove_keys: for key in list(remove_keys): remove_columns[key] = True return add_columns, remove_columns, rename_columns, retype_columns, resize_columns
python
def _compare_columns(self, new_columns, old_columns): ''' a helper method for generating differences between column properties ''' # print(new_columns) # print(old_columns) add_columns = {} remove_columns = {} rename_columns = {} retype_columns = {} resize_columns = {} for key, value in new_columns.items(): if key not in old_columns.keys(): add_columns[key] = True if value[2]: if value[2] in old_columns.keys(): rename_columns[key] = value[2] del add_columns[key] else: if value[1] != old_columns[key][1]: retype_columns[key] = value[1] if value[3] != old_columns[key][3]: resize_columns[key] = value[3] remove_keys = set(old_columns.keys()) - set(new_columns.keys()) if remove_keys: for key in list(remove_keys): remove_columns[key] = True return add_columns, remove_columns, rename_columns, retype_columns, resize_columns
[ "def", "_compare_columns", "(", "self", ",", "new_columns", ",", "old_columns", ")", ":", "# print(new_columns)", "# print(old_columns)", "add_columns", "=", "{", "}", "remove_columns", "=", "{", "}", "rename_columns", "=", "{", "}", "retype_columns", "=", "{", "}", "resize_columns", "=", "{", "}", "for", "key", ",", "value", "in", "new_columns", ".", "items", "(", ")", ":", "if", "key", "not", "in", "old_columns", ".", "keys", "(", ")", ":", "add_columns", "[", "key", "]", "=", "True", "if", "value", "[", "2", "]", ":", "if", "value", "[", "2", "]", "in", "old_columns", ".", "keys", "(", ")", ":", "rename_columns", "[", "key", "]", "=", "value", "[", "2", "]", "del", "add_columns", "[", "key", "]", "else", ":", "if", "value", "[", "1", "]", "!=", "old_columns", "[", "key", "]", "[", "1", "]", ":", "retype_columns", "[", "key", "]", "=", "value", "[", "1", "]", "if", "value", "[", "3", "]", "!=", "old_columns", "[", "key", "]", "[", "3", "]", ":", "resize_columns", "[", "key", "]", "=", "value", "[", "3", "]", "remove_keys", "=", "set", "(", "old_columns", ".", "keys", "(", ")", ")", "-", "set", "(", "new_columns", ".", "keys", "(", ")", ")", "if", "remove_keys", ":", "for", "key", "in", "list", "(", "remove_keys", ")", ":", "remove_columns", "[", "key", "]", "=", "True", "return", "add_columns", ",", "remove_columns", ",", "rename_columns", ",", "retype_columns", ",", "resize_columns" ]
a helper method for generating differences between column properties
[ "a", "helper", "method", "for", "generating", "differences", "between", "column", "properties" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/sql.py#L410-L439
245,571
collectiveacuity/labPack
labpack/databases/sql.py
sqlClient._construct_inserts
def _construct_inserts(self, record, new_columns, rename_columns, retype_columns, resize_columns): ''' a helper method for constructing the insert kwargs for a record ''' insert_kwargs = {} for key, value in new_columns.items(): # retrieve value for key (or from old key name) if key in rename_columns.keys(): record_value = getattr(record, rename_columns[key], None) else: record_value = getattr(record, key, None) # attempt to convert datatype if record_value: if key in retype_columns.keys(): try: old_list = False if isinstance(record_value, bytes): record_value = pickle.loads(record_value) old_list = True if retype_columns[key] == 'boolean': record_value = bool(record_value) elif retype_columns[key] == 'string': if old_list: record_value = ','.join(record_value) else: record_value = str(record_value) elif retype_columns[key] == 'integer': if old_list: record_value = int(record_value[0]) else: record_value = int(record_value) elif retype_columns[key] == 'float': if old_list: record_value = int(record_value[0]) else: record_value = float(record_value) elif retype_columns[key] == 'list': if isinstance(record_value, str): record_value = pickle.dumps(record_value.split(',')) else: record_value = pickle.dumps([record_value]) except: record_value = None # attempt to resize string data if key in resize_columns.keys(): max_length = resize_columns[key] try: if len(record_value) > max_length: record_value = record_value[0:max_length] except: record_value = None insert_kwargs[key] = record_value return insert_kwargs
python
def _construct_inserts(self, record, new_columns, rename_columns, retype_columns, resize_columns): ''' a helper method for constructing the insert kwargs for a record ''' insert_kwargs = {} for key, value in new_columns.items(): # retrieve value for key (or from old key name) if key in rename_columns.keys(): record_value = getattr(record, rename_columns[key], None) else: record_value = getattr(record, key, None) # attempt to convert datatype if record_value: if key in retype_columns.keys(): try: old_list = False if isinstance(record_value, bytes): record_value = pickle.loads(record_value) old_list = True if retype_columns[key] == 'boolean': record_value = bool(record_value) elif retype_columns[key] == 'string': if old_list: record_value = ','.join(record_value) else: record_value = str(record_value) elif retype_columns[key] == 'integer': if old_list: record_value = int(record_value[0]) else: record_value = int(record_value) elif retype_columns[key] == 'float': if old_list: record_value = int(record_value[0]) else: record_value = float(record_value) elif retype_columns[key] == 'list': if isinstance(record_value, str): record_value = pickle.dumps(record_value.split(',')) else: record_value = pickle.dumps([record_value]) except: record_value = None # attempt to resize string data if key in resize_columns.keys(): max_length = resize_columns[key] try: if len(record_value) > max_length: record_value = record_value[0:max_length] except: record_value = None insert_kwargs[key] = record_value return insert_kwargs
[ "def", "_construct_inserts", "(", "self", ",", "record", ",", "new_columns", ",", "rename_columns", ",", "retype_columns", ",", "resize_columns", ")", ":", "insert_kwargs", "=", "{", "}", "for", "key", ",", "value", "in", "new_columns", ".", "items", "(", ")", ":", "# retrieve value for key (or from old key name)", "if", "key", "in", "rename_columns", ".", "keys", "(", ")", ":", "record_value", "=", "getattr", "(", "record", ",", "rename_columns", "[", "key", "]", ",", "None", ")", "else", ":", "record_value", "=", "getattr", "(", "record", ",", "key", ",", "None", ")", "# attempt to convert datatype", "if", "record_value", ":", "if", "key", "in", "retype_columns", ".", "keys", "(", ")", ":", "try", ":", "old_list", "=", "False", "if", "isinstance", "(", "record_value", ",", "bytes", ")", ":", "record_value", "=", "pickle", ".", "loads", "(", "record_value", ")", "old_list", "=", "True", "if", "retype_columns", "[", "key", "]", "==", "'boolean'", ":", "record_value", "=", "bool", "(", "record_value", ")", "elif", "retype_columns", "[", "key", "]", "==", "'string'", ":", "if", "old_list", ":", "record_value", "=", "','", ".", "join", "(", "record_value", ")", "else", ":", "record_value", "=", "str", "(", "record_value", ")", "elif", "retype_columns", "[", "key", "]", "==", "'integer'", ":", "if", "old_list", ":", "record_value", "=", "int", "(", "record_value", "[", "0", "]", ")", "else", ":", "record_value", "=", "int", "(", "record_value", ")", "elif", "retype_columns", "[", "key", "]", "==", "'float'", ":", "if", "old_list", ":", "record_value", "=", "int", "(", "record_value", "[", "0", "]", ")", "else", ":", "record_value", "=", "float", "(", "record_value", ")", "elif", "retype_columns", "[", "key", "]", "==", "'list'", ":", "if", "isinstance", "(", "record_value", ",", "str", ")", ":", "record_value", "=", "pickle", ".", "dumps", "(", "record_value", ".", "split", "(", "','", ")", ")", "else", ":", "record_value", "=", "pickle", ".", "dumps", "(", "[", "record_value", "]", ")", "except", ":", "record_value", "=", "None", "# attempt to resize string data ", "if", "key", "in", "resize_columns", ".", "keys", "(", ")", ":", "max_length", "=", "resize_columns", "[", "key", "]", "try", ":", "if", "len", "(", "record_value", ")", ">", "max_length", ":", "record_value", "=", "record_value", "[", "0", ":", "max_length", "]", "except", ":", "record_value", "=", "None", "insert_kwargs", "[", "key", "]", "=", "record_value", "return", "insert_kwargs" ]
a helper method for constructing the insert kwargs for a record
[ "a", "helper", "method", "for", "constructing", "the", "insert", "kwargs", "for", "a", "record" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/sql.py#L441-L499
245,572
abusque/qng
qng/generator.py
QuebNameGenerator.generate
def generate(self, gender=None, part=None, snake_case=False, weighted=False): """Generate a Queb name. :param str gender: Gender of name to generate, one of 'male' or 'female'. If not specified, either gender can be generated. (optional) :param str part: Part of the name to generate, one of 'first' or 'last'. If not specified, full names are generated. (optional) :param bool snake_case: If True, generate a name in "snake_case" format, also stripping diacritics if any. (default: False) :param bool weighted: If True, generate names according to their relative popularity. (default: False) :return str: The generated name. """ if weighted: get_random_name = self._get_weighted_random_name else: get_random_name = self._get_random_name if gender == 'male': first_names = self._male_names elif gender == 'female': first_names = self._female_names else: first_names = self._names name = '' surname = '' if part == 'first': name = get_random_name(first_names) elif part == 'last': surname = get_random_name(self._surnames) else: name = get_random_name(first_names) surname = get_random_name(self._surnames) return self._format_name(name, surname, snake_case=snake_case)
python
def generate(self, gender=None, part=None, snake_case=False, weighted=False): """Generate a Queb name. :param str gender: Gender of name to generate, one of 'male' or 'female'. If not specified, either gender can be generated. (optional) :param str part: Part of the name to generate, one of 'first' or 'last'. If not specified, full names are generated. (optional) :param bool snake_case: If True, generate a name in "snake_case" format, also stripping diacritics if any. (default: False) :param bool weighted: If True, generate names according to their relative popularity. (default: False) :return str: The generated name. """ if weighted: get_random_name = self._get_weighted_random_name else: get_random_name = self._get_random_name if gender == 'male': first_names = self._male_names elif gender == 'female': first_names = self._female_names else: first_names = self._names name = '' surname = '' if part == 'first': name = get_random_name(first_names) elif part == 'last': surname = get_random_name(self._surnames) else: name = get_random_name(first_names) surname = get_random_name(self._surnames) return self._format_name(name, surname, snake_case=snake_case)
[ "def", "generate", "(", "self", ",", "gender", "=", "None", ",", "part", "=", "None", ",", "snake_case", "=", "False", ",", "weighted", "=", "False", ")", ":", "if", "weighted", ":", "get_random_name", "=", "self", ".", "_get_weighted_random_name", "else", ":", "get_random_name", "=", "self", ".", "_get_random_name", "if", "gender", "==", "'male'", ":", "first_names", "=", "self", ".", "_male_names", "elif", "gender", "==", "'female'", ":", "first_names", "=", "self", ".", "_female_names", "else", ":", "first_names", "=", "self", ".", "_names", "name", "=", "''", "surname", "=", "''", "if", "part", "==", "'first'", ":", "name", "=", "get_random_name", "(", "first_names", ")", "elif", "part", "==", "'last'", ":", "surname", "=", "get_random_name", "(", "self", ".", "_surnames", ")", "else", ":", "name", "=", "get_random_name", "(", "first_names", ")", "surname", "=", "get_random_name", "(", "self", ".", "_surnames", ")", "return", "self", ".", "_format_name", "(", "name", ",", "surname", ",", "snake_case", "=", "snake_case", ")" ]
Generate a Queb name. :param str gender: Gender of name to generate, one of 'male' or 'female'. If not specified, either gender can be generated. (optional) :param str part: Part of the name to generate, one of 'first' or 'last'. If not specified, full names are generated. (optional) :param bool snake_case: If True, generate a name in "snake_case" format, also stripping diacritics if any. (default: False) :param bool weighted: If True, generate names according to their relative popularity. (default: False) :return str: The generated name.
[ "Generate", "a", "Queb", "name", "." ]
93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d
https://github.com/abusque/qng/blob/93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d/qng/generator.py#L30-L69
245,573
abusque/qng
qng/generator.py
QuebNameGenerator._read_name_file
def _read_name_file(self, filename): """Read a name file from the data directory :param filename: Name of the file to read. :return: A list of name entries. """ file_path = os.path.join(self._DATA_DIR, filename) with open(file_path) as f: names = json.load(f) return names
python
def _read_name_file(self, filename): """Read a name file from the data directory :param filename: Name of the file to read. :return: A list of name entries. """ file_path = os.path.join(self._DATA_DIR, filename) with open(file_path) as f: names = json.load(f) return names
[ "def", "_read_name_file", "(", "self", ",", "filename", ")", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_DATA_DIR", ",", "filename", ")", "with", "open", "(", "file_path", ")", "as", "f", ":", "names", "=", "json", ".", "load", "(", "f", ")", "return", "names" ]
Read a name file from the data directory :param filename: Name of the file to read. :return: A list of name entries.
[ "Read", "a", "name", "file", "from", "the", "data", "directory" ]
93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d
https://github.com/abusque/qng/blob/93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d/qng/generator.py#L71-L81
245,574
abusque/qng
qng/generator.py
QuebNameGenerator._get_names
def _get_names(self): """Get the list of first names. :return: A list of first name entries. """ names = self._read_name_file('names.json') names = self._compute_weights(names) return names
python
def _get_names(self): """Get the list of first names. :return: A list of first name entries. """ names = self._read_name_file('names.json') names = self._compute_weights(names) return names
[ "def", "_get_names", "(", "self", ")", ":", "names", "=", "self", ".", "_read_name_file", "(", "'names.json'", ")", "names", "=", "self", ".", "_compute_weights", "(", "names", ")", "return", "names" ]
Get the list of first names. :return: A list of first name entries.
[ "Get", "the", "list", "of", "first", "names", "." ]
93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d
https://github.com/abusque/qng/blob/93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d/qng/generator.py#L83-L91
245,575
abusque/qng
qng/generator.py
QuebNameGenerator._get_surnames
def _get_surnames(self): """Get the list of surnames. :return: A list of surname entries. """ names = self._read_name_file('surnames.json') names = self._compute_weights(names) return names
python
def _get_surnames(self): """Get the list of surnames. :return: A list of surname entries. """ names = self._read_name_file('surnames.json') names = self._compute_weights(names) return names
[ "def", "_get_surnames", "(", "self", ")", ":", "names", "=", "self", ".", "_read_name_file", "(", "'surnames.json'", ")", "names", "=", "self", ".", "_compute_weights", "(", "names", ")", "return", "names" ]
Get the list of surnames. :return: A list of surname entries.
[ "Get", "the", "list", "of", "surnames", "." ]
93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d
https://github.com/abusque/qng/blob/93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d/qng/generator.py#L111-L119
245,576
abusque/qng
qng/generator.py
QuebNameGenerator._get_random_name
def _get_random_name(name_list): """Get a random name from a given list. The choice of the name is fully random. :param name_list: The list of names from which to pick. :return str: A randomly chosen name. """ length = len(name_list) index = random.randrange(length) return name_list[index]['name']
python
def _get_random_name(name_list): """Get a random name from a given list. The choice of the name is fully random. :param name_list: The list of names from which to pick. :return str: A randomly chosen name. """ length = len(name_list) index = random.randrange(length) return name_list[index]['name']
[ "def", "_get_random_name", "(", "name_list", ")", ":", "length", "=", "len", "(", "name_list", ")", "index", "=", "random", ".", "randrange", "(", "length", ")", "return", "name_list", "[", "index", "]", "[", "'name'", "]" ]
Get a random name from a given list. The choice of the name is fully random. :param name_list: The list of names from which to pick. :return str: A randomly chosen name.
[ "Get", "a", "random", "name", "from", "a", "given", "list", "." ]
93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d
https://github.com/abusque/qng/blob/93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d/qng/generator.py#L134-L145
245,577
abusque/qng
qng/generator.py
QuebNameGenerator._get_weighted_random_name
def _get_weighted_random_name(name_list): """Get a random name from a given list, according to its frequency. The choice of the name is random, but weighted in proportion to the relative frequency or popularity of each name in the list. If one name is twice as popular as another one, then it is twice as likely to get chosen. :param name_list: The list of names from which to pick. :return str: A randomly chosen name. """ total_weight = name_list[-1]['weight_high'] random_weight = random.randrange(total_weight + 1) left = 0 right = len(name_list) - 1 while left <= right: index = (left + right) // 2 entry = name_list[index] if random_weight > entry['weight_high']: left = index + 1 elif random_weight < entry['weight_low']: right = index - 1 else: return entry['name']
python
def _get_weighted_random_name(name_list): """Get a random name from a given list, according to its frequency. The choice of the name is random, but weighted in proportion to the relative frequency or popularity of each name in the list. If one name is twice as popular as another one, then it is twice as likely to get chosen. :param name_list: The list of names from which to pick. :return str: A randomly chosen name. """ total_weight = name_list[-1]['weight_high'] random_weight = random.randrange(total_weight + 1) left = 0 right = len(name_list) - 1 while left <= right: index = (left + right) // 2 entry = name_list[index] if random_weight > entry['weight_high']: left = index + 1 elif random_weight < entry['weight_low']: right = index - 1 else: return entry['name']
[ "def", "_get_weighted_random_name", "(", "name_list", ")", ":", "total_weight", "=", "name_list", "[", "-", "1", "]", "[", "'weight_high'", "]", "random_weight", "=", "random", ".", "randrange", "(", "total_weight", "+", "1", ")", "left", "=", "0", "right", "=", "len", "(", "name_list", ")", "-", "1", "while", "left", "<=", "right", ":", "index", "=", "(", "left", "+", "right", ")", "//", "2", "entry", "=", "name_list", "[", "index", "]", "if", "random_weight", ">", "entry", "[", "'weight_high'", "]", ":", "left", "=", "index", "+", "1", "elif", "random_weight", "<", "entry", "[", "'weight_low'", "]", ":", "right", "=", "index", "-", "1", "else", ":", "return", "entry", "[", "'name'", "]" ]
Get a random name from a given list, according to its frequency. The choice of the name is random, but weighted in proportion to the relative frequency or popularity of each name in the list. If one name is twice as popular as another one, then it is twice as likely to get chosen. :param name_list: The list of names from which to pick. :return str: A randomly chosen name.
[ "Get", "a", "random", "name", "from", "a", "given", "list", "according", "to", "its", "frequency", "." ]
93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d
https://github.com/abusque/qng/blob/93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d/qng/generator.py#L148-L174
245,578
abusque/qng
qng/generator.py
QuebNameGenerator._format_name
def _format_name(self, name, surname, snake_case=False): """Format a first name and a surname into a cohesive string. Note that either name or surname can be empty strings, and formatting will still succeed. :param str name: A first name. :param str surname: A surname. :param bool snake_case: If True, format the name as "snake_case", also stripping diacritics if any. (default: False) :return str: The formatted name. """ if not name or not surname: sep = '' elif snake_case: sep = '_' else: sep = ' ' if snake_case: name = self._snakify_name(name) surname = self._snakify_name(surname) disp_name = '{}{}{}'.format(name, sep, surname) return disp_name
python
def _format_name(self, name, surname, snake_case=False): """Format a first name and a surname into a cohesive string. Note that either name or surname can be empty strings, and formatting will still succeed. :param str name: A first name. :param str surname: A surname. :param bool snake_case: If True, format the name as "snake_case", also stripping diacritics if any. (default: False) :return str: The formatted name. """ if not name or not surname: sep = '' elif snake_case: sep = '_' else: sep = ' ' if snake_case: name = self._snakify_name(name) surname = self._snakify_name(surname) disp_name = '{}{}{}'.format(name, sep, surname) return disp_name
[ "def", "_format_name", "(", "self", ",", "name", ",", "surname", ",", "snake_case", "=", "False", ")", ":", "if", "not", "name", "or", "not", "surname", ":", "sep", "=", "''", "elif", "snake_case", ":", "sep", "=", "'_'", "else", ":", "sep", "=", "' '", "if", "snake_case", ":", "name", "=", "self", ".", "_snakify_name", "(", "name", ")", "surname", "=", "self", ".", "_snakify_name", "(", "surname", ")", "disp_name", "=", "'{}{}{}'", ".", "format", "(", "name", ",", "sep", ",", "surname", ")", "return", "disp_name" ]
Format a first name and a surname into a cohesive string. Note that either name or surname can be empty strings, and formatting will still succeed. :param str name: A first name. :param str surname: A surname. :param bool snake_case: If True, format the name as "snake_case", also stripping diacritics if any. (default: False) :return str: The formatted name.
[ "Format", "a", "first", "name", "and", "a", "surname", "into", "a", "cohesive", "string", "." ]
93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d
https://github.com/abusque/qng/blob/93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d/qng/generator.py#L176-L202
245,579
abusque/qng
qng/generator.py
QuebNameGenerator._snakify_name
def _snakify_name(self, name): """Snakify a name string. In this context, "to snakify" means to strip a name of all diacritics, convert it to lower case, and replace any spaces inside the name with hyphens. This way the name is made "machine-friendly", and ready to be combined with a second name component into a full "snake_case" name. :param str name: A name to snakify. :return str: A snakified name. """ name = self._strip_diacritics(name) name = name.lower() name = name.replace(' ', '-') return name
python
def _snakify_name(self, name): """Snakify a name string. In this context, "to snakify" means to strip a name of all diacritics, convert it to lower case, and replace any spaces inside the name with hyphens. This way the name is made "machine-friendly", and ready to be combined with a second name component into a full "snake_case" name. :param str name: A name to snakify. :return str: A snakified name. """ name = self._strip_diacritics(name) name = name.lower() name = name.replace(' ', '-') return name
[ "def", "_snakify_name", "(", "self", ",", "name", ")", ":", "name", "=", "self", ".", "_strip_diacritics", "(", "name", ")", "name", "=", "name", ".", "lower", "(", ")", "name", "=", "name", ".", "replace", "(", "' '", ",", "'-'", ")", "return", "name" ]
Snakify a name string. In this context, "to snakify" means to strip a name of all diacritics, convert it to lower case, and replace any spaces inside the name with hyphens. This way the name is made "machine-friendly", and ready to be combined with a second name component into a full "snake_case" name. :param str name: A name to snakify. :return str: A snakified name.
[ "Snakify", "a", "name", "string", "." ]
93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d
https://github.com/abusque/qng/blob/93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d/qng/generator.py#L204-L222
245,580
sassoo/goldman
goldman/utils/str_helpers.py
cmp_val_salt_hash
def cmp_val_salt_hash(val, salt, str_hash): """ Given a string, salt, & hash validate the string The salt & val will be concatented as in gen_salt_and hash() & compared to the provided hash. This will only ever work with hashes derived from gen_salt_and_hash() :param val: clear-text string :param salt: string salt :param str_hash: existing hash to compare against :return: boolean """ computed_hash = hashlib.sha256(val + salt).hexdigest() return computed_hash == str_hash
python
def cmp_val_salt_hash(val, salt, str_hash): """ Given a string, salt, & hash validate the string The salt & val will be concatented as in gen_salt_and hash() & compared to the provided hash. This will only ever work with hashes derived from gen_salt_and_hash() :param val: clear-text string :param salt: string salt :param str_hash: existing hash to compare against :return: boolean """ computed_hash = hashlib.sha256(val + salt).hexdigest() return computed_hash == str_hash
[ "def", "cmp_val_salt_hash", "(", "val", ",", "salt", ",", "str_hash", ")", ":", "computed_hash", "=", "hashlib", ".", "sha256", "(", "val", "+", "salt", ")", ".", "hexdigest", "(", ")", "return", "computed_hash", "==", "str_hash" ]
Given a string, salt, & hash validate the string The salt & val will be concatented as in gen_salt_and hash() & compared to the provided hash. This will only ever work with hashes derived from gen_salt_and_hash() :param val: clear-text string :param salt: string salt :param str_hash: existing hash to compare against :return: boolean
[ "Given", "a", "string", "salt", "&", "hash", "validate", "the", "string" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/utils/str_helpers.py#L14-L28
245,581
sassoo/goldman
goldman/utils/str_helpers.py
gen_salt_and_hash
def gen_salt_and_hash(val=None): """ Generate a salt & hash If no string is provided then a random string will be used to hash & referred to as `val`. The salt will always be randomly generated & the hash will be a sha256 hex value of the `val` & the salt as a concatenated string. It follows the guidance here: crackstation.net/hashing-security.htm#properhashing :param val: str :return: tuple of strings (salt, hash) """ if not val: val = random_str() str_salt = random_str() str_hash = hashlib.sha256(val + str_salt).hexdigest() return str_salt, str_hash
python
def gen_salt_and_hash(val=None): """ Generate a salt & hash If no string is provided then a random string will be used to hash & referred to as `val`. The salt will always be randomly generated & the hash will be a sha256 hex value of the `val` & the salt as a concatenated string. It follows the guidance here: crackstation.net/hashing-security.htm#properhashing :param val: str :return: tuple of strings (salt, hash) """ if not val: val = random_str() str_salt = random_str() str_hash = hashlib.sha256(val + str_salt).hexdigest() return str_salt, str_hash
[ "def", "gen_salt_and_hash", "(", "val", "=", "None", ")", ":", "if", "not", "val", ":", "val", "=", "random_str", "(", ")", "str_salt", "=", "random_str", "(", ")", "str_hash", "=", "hashlib", ".", "sha256", "(", "val", "+", "str_salt", ")", ".", "hexdigest", "(", ")", "return", "str_salt", ",", "str_hash" ]
Generate a salt & hash If no string is provided then a random string will be used to hash & referred to as `val`. The salt will always be randomly generated & the hash will be a sha256 hex value of the `val` & the salt as a concatenated string. It follows the guidance here: crackstation.net/hashing-security.htm#properhashing :param val: str :return: tuple of strings (salt, hash)
[ "Generate", "a", "salt", "&", "hash" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/utils/str_helpers.py#L31-L52
245,582
sassoo/goldman
goldman/utils/str_helpers.py
str_to_bool
def str_to_bool(val): """ Return a boolean if the string value represents one :param val: str :return: bool :raise: ValueError """ if isinstance(val, bool): return val elif val.lower() == 'true': return True elif val.lower() == 'false': return False else: raise ValueError
python
def str_to_bool(val): """ Return a boolean if the string value represents one :param val: str :return: bool :raise: ValueError """ if isinstance(val, bool): return val elif val.lower() == 'true': return True elif val.lower() == 'false': return False else: raise ValueError
[ "def", "str_to_bool", "(", "val", ")", ":", "if", "isinstance", "(", "val", ",", "bool", ")", ":", "return", "val", "elif", "val", ".", "lower", "(", ")", "==", "'true'", ":", "return", "True", "elif", "val", ".", "lower", "(", ")", "==", "'false'", ":", "return", "False", "else", ":", "raise", "ValueError" ]
Return a boolean if the string value represents one :param val: str :return: bool :raise: ValueError
[ "Return", "a", "boolean", "if", "the", "string", "value", "represents", "one" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/utils/str_helpers.py#L67-L82
245,583
sassoo/goldman
goldman/utils/str_helpers.py
str_to_dt
def str_to_dt(val): """ Return a datetime object if the string value represents one Epoch integer or an ISO 8601 compatible string is supported. :param val: str :return: datetime :raise: ValueError """ if isinstance(val, dt): return val try: if val.isdigit(): return dt.utcfromtimestamp(float(val)) else: return dt.strptime(val, '%Y-%m-%dT%H:%M:%S.%f') except (AttributeError, TypeError): raise ValueError
python
def str_to_dt(val): """ Return a datetime object if the string value represents one Epoch integer or an ISO 8601 compatible string is supported. :param val: str :return: datetime :raise: ValueError """ if isinstance(val, dt): return val try: if val.isdigit(): return dt.utcfromtimestamp(float(val)) else: return dt.strptime(val, '%Y-%m-%dT%H:%M:%S.%f') except (AttributeError, TypeError): raise ValueError
[ "def", "str_to_dt", "(", "val", ")", ":", "if", "isinstance", "(", "val", ",", "dt", ")", ":", "return", "val", "try", ":", "if", "val", ".", "isdigit", "(", ")", ":", "return", "dt", ".", "utcfromtimestamp", "(", "float", "(", "val", ")", ")", "else", ":", "return", "dt", ".", "strptime", "(", "val", ",", "'%Y-%m-%dT%H:%M:%S.%f'", ")", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "raise", "ValueError" ]
Return a datetime object if the string value represents one Epoch integer or an ISO 8601 compatible string is supported. :param val: str :return: datetime :raise: ValueError
[ "Return", "a", "datetime", "object", "if", "the", "string", "value", "represents", "one" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/utils/str_helpers.py#L85-L105
245,584
pip-services3-python/pip-services3-components-python
pip_services3_components/log/Logger.py
Logger._format_and_write
def _format_and_write(self, level, correlation_id, error, message, *args, **kwargs): """ Formats the log message and writes it to the logger destination. :param level: a log level. :param correlation_id: (optional) transaction id to trace execution through call chain. :param error: an error object associated with this message. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ if message != None and len(message) > 0 and len(kwargs) > 0: message = message.format(*args, **kwargs) self._write(level, correlation_id, error, message)
python
def _format_and_write(self, level, correlation_id, error, message, *args, **kwargs): """ Formats the log message and writes it to the logger destination. :param level: a log level. :param correlation_id: (optional) transaction id to trace execution through call chain. :param error: an error object associated with this message. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ if message != None and len(message) > 0 and len(kwargs) > 0: message = message.format(*args, **kwargs) self._write(level, correlation_id, error, message)
[ "def", "_format_and_write", "(", "self", ",", "level", ",", "correlation_id", ",", "error", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "message", "!=", "None", "and", "len", "(", "message", ")", ">", "0", "and", "len", "(", "kwargs", ")", ">", "0", ":", "message", "=", "message", ".", "format", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "_write", "(", "level", ",", "correlation_id", ",", "error", ",", "message", ")" ]
Formats the log message and writes it to the logger destination. :param level: a log level. :param correlation_id: (optional) transaction id to trace execution through call chain. :param error: an error object associated with this message. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message.
[ "Formats", "the", "log", "message", "and", "writes", "it", "to", "the", "logger", "destination", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/log/Logger.py#L84-L102
245,585
pip-services3-python/pip-services3-components-python
pip_services3_components/log/Logger.py
Logger.log
def log(self, level, correlation_id, error, message, *args, **kwargs): """ Logs a message at specified log level. :param level: a log level. :param correlation_id: (optional) transaction id to trace execution through call chain. :param error: an error object associated with this message. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ self._format_and_write(level, correlation_id, error, message, args, kwargs)
python
def log(self, level, correlation_id, error, message, *args, **kwargs): """ Logs a message at specified log level. :param level: a log level. :param correlation_id: (optional) transaction id to trace execution through call chain. :param error: an error object associated with this message. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ self._format_and_write(level, correlation_id, error, message, args, kwargs)
[ "def", "log", "(", "self", ",", "level", ",", "correlation_id", ",", "error", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_format_and_write", "(", "level", ",", "correlation_id", ",", "error", ",", "message", ",", "args", ",", "kwargs", ")" ]
Logs a message at specified log level. :param level: a log level. :param correlation_id: (optional) transaction id to trace execution through call chain. :param error: an error object associated with this message. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message.
[ "Logs", "a", "message", "at", "specified", "log", "level", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/log/Logger.py#L104-L120
245,586
pip-services3-python/pip-services3-components-python
pip_services3_components/log/Logger.py
Logger.error
def error(self, correlation_id, error, message, *args, **kwargs): """ Logs recoverable application error. :param correlation_id: (optional) transaction id to trace execution through call chain. :param error: an error object associated with this message. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ self._format_and_write(LogLevel.Error, correlation_id, error, message, args, kwargs)
python
def error(self, correlation_id, error, message, *args, **kwargs): """ Logs recoverable application error. :param correlation_id: (optional) transaction id to trace execution through call chain. :param error: an error object associated with this message. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ self._format_and_write(LogLevel.Error, correlation_id, error, message, args, kwargs)
[ "def", "error", "(", "self", ",", "correlation_id", ",", "error", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_format_and_write", "(", "LogLevel", ".", "Error", ",", "correlation_id", ",", "error", ",", "message", ",", "args", ",", "kwargs", ")" ]
Logs recoverable application error. :param correlation_id: (optional) transaction id to trace execution through call chain. :param error: an error object associated with this message. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message.
[ "Logs", "recoverable", "application", "error", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/log/Logger.py#L138-L152
245,587
pip-services3-python/pip-services3-components-python
pip_services3_components/log/Logger.py
Logger.warn
def warn(self, correlation_id, message, *args, **kwargs): """ Logs a warning that may or may not have a negative impact. :param correlation_id: (optional) transaction id to trace execution through call chain. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ self._format_and_write(LogLevel.Warn, correlation_id, None, message, args, kwargs)
python
def warn(self, correlation_id, message, *args, **kwargs): """ Logs a warning that may or may not have a negative impact. :param correlation_id: (optional) transaction id to trace execution through call chain. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ self._format_and_write(LogLevel.Warn, correlation_id, None, message, args, kwargs)
[ "def", "warn", "(", "self", ",", "correlation_id", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_format_and_write", "(", "LogLevel", ".", "Warn", ",", "correlation_id", ",", "None", ",", "message", ",", "args", ",", "kwargs", ")" ]
Logs a warning that may or may not have a negative impact. :param correlation_id: (optional) transaction id to trace execution through call chain. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message.
[ "Logs", "a", "warning", "that", "may", "or", "may", "not", "have", "a", "negative", "impact", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/log/Logger.py#L154-L166
245,588
pip-services3-python/pip-services3-components-python
pip_services3_components/log/Logger.py
Logger.info
def info(self, correlation_id, message, *args, **kwargs): """ Logs an important information message :param correlation_id: (optional) transaction id to trace execution through call chain. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ self._format_and_write(LogLevel.Info, correlation_id, None, message, args, kwargs)
python
def info(self, correlation_id, message, *args, **kwargs): """ Logs an important information message :param correlation_id: (optional) transaction id to trace execution through call chain. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ self._format_and_write(LogLevel.Info, correlation_id, None, message, args, kwargs)
[ "def", "info", "(", "self", ",", "correlation_id", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_format_and_write", "(", "LogLevel", ".", "Info", ",", "correlation_id", ",", "None", ",", "message", ",", "args", ",", "kwargs", ")" ]
Logs an important information message :param correlation_id: (optional) transaction id to trace execution through call chain. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message.
[ "Logs", "an", "important", "information", "message" ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/log/Logger.py#L168-L180
245,589
pip-services3-python/pip-services3-components-python
pip_services3_components/log/Logger.py
Logger.debug
def debug(self, correlation_id, message, *args, **kwargs): """ Logs a high-level debug information for troubleshooting. :param correlation_id: (optional) transaction id to trace execution through call chain. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ self._format_and_write(LogLevel.Debug, correlation_id, None, message, args, kwargs)
python
def debug(self, correlation_id, message, *args, **kwargs): """ Logs a high-level debug information for troubleshooting. :param correlation_id: (optional) transaction id to trace execution through call chain. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ self._format_and_write(LogLevel.Debug, correlation_id, None, message, args, kwargs)
[ "def", "debug", "(", "self", ",", "correlation_id", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_format_and_write", "(", "LogLevel", ".", "Debug", ",", "correlation_id", ",", "None", ",", "message", ",", "args", ",", "kwargs", ")" ]
Logs a high-level debug information for troubleshooting. :param correlation_id: (optional) transaction id to trace execution through call chain. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message.
[ "Logs", "a", "high", "-", "level", "debug", "information", "for", "troubleshooting", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/log/Logger.py#L182-L194
245,590
pip-services3-python/pip-services3-components-python
pip_services3_components/log/Logger.py
Logger.trace
def trace(self, correlation_id, message, *args, **kwargs): """ Logs a low-level debug information for troubleshooting. :param correlation_id: (optional) transaction id to trace execution through call chain. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ self._format_and_write(LogLevel.Trace, correlation_id, None, message, args, kwargs)
python
def trace(self, correlation_id, message, *args, **kwargs): """ Logs a low-level debug information for troubleshooting. :param correlation_id: (optional) transaction id to trace execution through call chain. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ self._format_and_write(LogLevel.Trace, correlation_id, None, message, args, kwargs)
[ "def", "trace", "(", "self", ",", "correlation_id", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_format_and_write", "(", "LogLevel", ".", "Trace", ",", "correlation_id", ",", "None", ",", "message", ",", "args", ",", "kwargs", ")" ]
Logs a low-level debug information for troubleshooting. :param correlation_id: (optional) transaction id to trace execution through call chain. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message.
[ "Logs", "a", "low", "-", "level", "debug", "information", "for", "troubleshooting", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/log/Logger.py#L196-L208
245,591
django-py/django-doberman
doberman/contrib/ipware/__init__.py
check_ipv6
def check_ipv6(ip_str): """ Return True if is a valid IP v6 """ try: socket.inet_pton(socket.AF_INET6, ip_str) except socket.error: return False return True
python
def check_ipv6(ip_str): """ Return True if is a valid IP v6 """ try: socket.inet_pton(socket.AF_INET6, ip_str) except socket.error: return False return True
[ "def", "check_ipv6", "(", "ip_str", ")", ":", "try", ":", "socket", ".", "inet_pton", "(", "socket", ".", "AF_INET6", ",", "ip_str", ")", "except", "socket", ".", "error", ":", "return", "False", "return", "True" ]
Return True if is a valid IP v6
[ "Return", "True", "if", "is", "a", "valid", "IP", "v6" ]
2e5959737a1b64234ed5a179c93f96a0de1c3e5c
https://github.com/django-py/django-doberman/blob/2e5959737a1b64234ed5a179c93f96a0de1c3e5c/doberman/contrib/ipware/__init__.py#L24-L33
245,592
django-py/django-doberman
doberman/contrib/ipware/__init__.py
check_ipv4
def check_ipv4(ip_str): """ Return True if is a valid IP v4 """ try: socket.inet_pton(socket.AF_INET, ip_str) except AttributeError: try: socket.inet_aton(ip_str) except socket.error: return False return ip_str.count('.') == 3 except socket.error: return False return True
python
def check_ipv4(ip_str): """ Return True if is a valid IP v4 """ try: socket.inet_pton(socket.AF_INET, ip_str) except AttributeError: try: socket.inet_aton(ip_str) except socket.error: return False return ip_str.count('.') == 3 except socket.error: return False return True
[ "def", "check_ipv4", "(", "ip_str", ")", ":", "try", ":", "socket", ".", "inet_pton", "(", "socket", ".", "AF_INET", ",", "ip_str", ")", "except", "AttributeError", ":", "try", ":", "socket", ".", "inet_aton", "(", "ip_str", ")", "except", "socket", ".", "error", ":", "return", "False", "return", "ip_str", ".", "count", "(", "'.'", ")", "==", "3", "except", "socket", ".", "error", ":", "return", "False", "return", "True" ]
Return True if is a valid IP v4
[ "Return", "True", "if", "is", "a", "valid", "IP", "v4" ]
2e5959737a1b64234ed5a179c93f96a0de1c3e5c
https://github.com/django-py/django-doberman/blob/2e5959737a1b64234ed5a179c93f96a0de1c3e5c/doberman/contrib/ipware/__init__.py#L36-L50
245,593
SkyLothar/requests-ucloud
ucloudauth/ufile.py
UFileAuth.gen_str_to_sign
def gen_str_to_sign(self, req): """Generate string to sign using giving prepared request""" url = urlsplit(req.url) bucket_name = url.netloc.split(".", 1)[0] logger.debug(req.headers.items()) ucloud_headers = [ (k, v.strip()) for k, v in sorted(req.headers.lower_items()) if k.startswith("x-ucloud-") ] canonicalized_headers = "\n".join([ "{0}:{1}".format(k, v) for k, v in ucloud_headers ]) canonicalized_resource = "/{0}{1}".format( bucket_name, unquote(url.path) ) str_to_sign = "\n".join([ req.method, req.headers.get("content-md5", ""), req.headers.get("content-type", ""), req.headers.get("date", self._expires), canonicalized_headers + canonicalized_resource ]) return str_to_sign
python
def gen_str_to_sign(self, req): """Generate string to sign using giving prepared request""" url = urlsplit(req.url) bucket_name = url.netloc.split(".", 1)[0] logger.debug(req.headers.items()) ucloud_headers = [ (k, v.strip()) for k, v in sorted(req.headers.lower_items()) if k.startswith("x-ucloud-") ] canonicalized_headers = "\n".join([ "{0}:{1}".format(k, v) for k, v in ucloud_headers ]) canonicalized_resource = "/{0}{1}".format( bucket_name, unquote(url.path) ) str_to_sign = "\n".join([ req.method, req.headers.get("content-md5", ""), req.headers.get("content-type", ""), req.headers.get("date", self._expires), canonicalized_headers + canonicalized_resource ]) return str_to_sign
[ "def", "gen_str_to_sign", "(", "self", ",", "req", ")", ":", "url", "=", "urlsplit", "(", "req", ".", "url", ")", "bucket_name", "=", "url", ".", "netloc", ".", "split", "(", "\".\"", ",", "1", ")", "[", "0", "]", "logger", ".", "debug", "(", "req", ".", "headers", ".", "items", "(", ")", ")", "ucloud_headers", "=", "[", "(", "k", ",", "v", ".", "strip", "(", ")", ")", "for", "k", ",", "v", "in", "sorted", "(", "req", ".", "headers", ".", "lower_items", "(", ")", ")", "if", "k", ".", "startswith", "(", "\"x-ucloud-\"", ")", "]", "canonicalized_headers", "=", "\"\\n\"", ".", "join", "(", "[", "\"{0}:{1}\"", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "ucloud_headers", "]", ")", "canonicalized_resource", "=", "\"/{0}{1}\"", ".", "format", "(", "bucket_name", ",", "unquote", "(", "url", ".", "path", ")", ")", "str_to_sign", "=", "\"\\n\"", ".", "join", "(", "[", "req", ".", "method", ",", "req", ".", "headers", ".", "get", "(", "\"content-md5\"", ",", "\"\"", ")", ",", "req", ".", "headers", ".", "get", "(", "\"content-type\"", ",", "\"\"", ")", ",", "req", ".", "headers", ".", "get", "(", "\"date\"", ",", "self", ".", "_expires", ")", ",", "canonicalized_headers", "+", "canonicalized_resource", "]", ")", "return", "str_to_sign" ]
Generate string to sign using giving prepared request
[ "Generate", "string", "to", "sign", "using", "giving", "prepared", "request" ]
986a81d323add8e6708367c6a31280e859210d87
https://github.com/SkyLothar/requests-ucloud/blob/986a81d323add8e6708367c6a31280e859210d87/ucloudauth/ufile.py#L112-L139
245,594
SkyLothar/requests-ucloud
ucloudauth/ufile.py
UFileAuth.fill_all_headers
def fill_all_headers(self, req): """Set content-type, content-md5, date to the request.""" url = urlsplit(req.url) content_type, __ = mimetypes.guess_type(url.path) if content_type is None: content_type = self.DEFAULT_TYPE logger.warn("can not determine mime-type for {0}".format(url.path)) if self._expires is None: # sign with url, no content-type for url req.headers.setdefault("content-type", content_type) if ( req.body is not None and req.headers.get("content-md5") is None and self._allow_empty_md5 is False ): logger.debug("calculating content-md5") content, content_md5 = utils.cal_content_md5(req.body) req.body = content req.headers["content-md5"] = content_md5 logger.debug("new content-md5 is: {0}".format(content_md5)) else: logger.debug("skip content-md5 calculation") if self._expires is None: req.headers.setdefault( "date", time.strftime(self.DATE_FMT, time.gmtime()) ) return req
python
def fill_all_headers(self, req): """Set content-type, content-md5, date to the request.""" url = urlsplit(req.url) content_type, __ = mimetypes.guess_type(url.path) if content_type is None: content_type = self.DEFAULT_TYPE logger.warn("can not determine mime-type for {0}".format(url.path)) if self._expires is None: # sign with url, no content-type for url req.headers.setdefault("content-type", content_type) if ( req.body is not None and req.headers.get("content-md5") is None and self._allow_empty_md5 is False ): logger.debug("calculating content-md5") content, content_md5 = utils.cal_content_md5(req.body) req.body = content req.headers["content-md5"] = content_md5 logger.debug("new content-md5 is: {0}".format(content_md5)) else: logger.debug("skip content-md5 calculation") if self._expires is None: req.headers.setdefault( "date", time.strftime(self.DATE_FMT, time.gmtime()) ) return req
[ "def", "fill_all_headers", "(", "self", ",", "req", ")", ":", "url", "=", "urlsplit", "(", "req", ".", "url", ")", "content_type", ",", "__", "=", "mimetypes", ".", "guess_type", "(", "url", ".", "path", ")", "if", "content_type", "is", "None", ":", "content_type", "=", "self", ".", "DEFAULT_TYPE", "logger", ".", "warn", "(", "\"can not determine mime-type for {0}\"", ".", "format", "(", "url", ".", "path", ")", ")", "if", "self", ".", "_expires", "is", "None", ":", "# sign with url, no content-type for url", "req", ".", "headers", ".", "setdefault", "(", "\"content-type\"", ",", "content_type", ")", "if", "(", "req", ".", "body", "is", "not", "None", "and", "req", ".", "headers", ".", "get", "(", "\"content-md5\"", ")", "is", "None", "and", "self", ".", "_allow_empty_md5", "is", "False", ")", ":", "logger", ".", "debug", "(", "\"calculating content-md5\"", ")", "content", ",", "content_md5", "=", "utils", ".", "cal_content_md5", "(", "req", ".", "body", ")", "req", ".", "body", "=", "content", "req", ".", "headers", "[", "\"content-md5\"", "]", "=", "content_md5", "logger", ".", "debug", "(", "\"new content-md5 is: {0}\"", ".", "format", "(", "content_md5", ")", ")", "else", ":", "logger", ".", "debug", "(", "\"skip content-md5 calculation\"", ")", "if", "self", ".", "_expires", "is", "None", ":", "req", ".", "headers", ".", "setdefault", "(", "\"date\"", ",", "time", ".", "strftime", "(", "self", ".", "DATE_FMT", ",", "time", ".", "gmtime", "(", ")", ")", ")", "return", "req" ]
Set content-type, content-md5, date to the request.
[ "Set", "content", "-", "type", "content", "-", "md5", "date", "to", "the", "request", "." ]
986a81d323add8e6708367c6a31280e859210d87
https://github.com/SkyLothar/requests-ucloud/blob/986a81d323add8e6708367c6a31280e859210d87/ucloudauth/ufile.py#L141-L171
245,595
caioariede/docker-run-build
docker_rb/utils.py
get_old_options
def get_old_options(cli, image): """ Returns Dockerfile values for CMD and Entrypoint """ return { 'cmd': dockerapi.inspect_config(cli, image, 'Cmd'), 'entrypoint': dockerapi.inspect_config(cli, image, 'Entrypoint'), }
python
def get_old_options(cli, image): """ Returns Dockerfile values for CMD and Entrypoint """ return { 'cmd': dockerapi.inspect_config(cli, image, 'Cmd'), 'entrypoint': dockerapi.inspect_config(cli, image, 'Entrypoint'), }
[ "def", "get_old_options", "(", "cli", ",", "image", ")", ":", "return", "{", "'cmd'", ":", "dockerapi", ".", "inspect_config", "(", "cli", ",", "image", ",", "'Cmd'", ")", ",", "'entrypoint'", ":", "dockerapi", ".", "inspect_config", "(", "cli", ",", "image", ",", "'Entrypoint'", ")", ",", "}" ]
Returns Dockerfile values for CMD and Entrypoint
[ "Returns", "Dockerfile", "values", "for", "CMD", "and", "Entrypoint" ]
76ca4802018a63d6778374ebdba082d6750816b2
https://github.com/caioariede/docker-run-build/blob/76ca4802018a63d6778374ebdba082d6750816b2/docker_rb/utils.py#L13-L19
245,596
caioariede/docker-run-build
docker_rb/utils.py
restore_image_options
def restore_image_options(cli, image, options): """ Restores CMD and ENTRYPOINT values of the image This is needed because we force the overwrite of ENTRYPOINT and CMD in the `run_code_in_container` function, to be able to run the code in the container, through /bin/bash. """ dockerfile = io.StringIO() dockerfile.write(u'FROM {image}\nCMD {cmd}'.format( image=image, cmd=json.dumps(options['cmd']))) if options['entrypoint']: dockerfile.write( '\nENTRYPOINT {}'.format(json.dumps(options['entrypoint']))) cli.build(tag=image, fileobj=dockerfile)
python
def restore_image_options(cli, image, options): """ Restores CMD and ENTRYPOINT values of the image This is needed because we force the overwrite of ENTRYPOINT and CMD in the `run_code_in_container` function, to be able to run the code in the container, through /bin/bash. """ dockerfile = io.StringIO() dockerfile.write(u'FROM {image}\nCMD {cmd}'.format( image=image, cmd=json.dumps(options['cmd']))) if options['entrypoint']: dockerfile.write( '\nENTRYPOINT {}'.format(json.dumps(options['entrypoint']))) cli.build(tag=image, fileobj=dockerfile)
[ "def", "restore_image_options", "(", "cli", ",", "image", ",", "options", ")", ":", "dockerfile", "=", "io", ".", "StringIO", "(", ")", "dockerfile", ".", "write", "(", "u'FROM {image}\\nCMD {cmd}'", ".", "format", "(", "image", "=", "image", ",", "cmd", "=", "json", ".", "dumps", "(", "options", "[", "'cmd'", "]", ")", ")", ")", "if", "options", "[", "'entrypoint'", "]", ":", "dockerfile", ".", "write", "(", "'\\nENTRYPOINT {}'", ".", "format", "(", "json", ".", "dumps", "(", "options", "[", "'entrypoint'", "]", ")", ")", ")", "cli", ".", "build", "(", "tag", "=", "image", ",", "fileobj", "=", "dockerfile", ")" ]
Restores CMD and ENTRYPOINT values of the image This is needed because we force the overwrite of ENTRYPOINT and CMD in the `run_code_in_container` function, to be able to run the code in the container, through /bin/bash.
[ "Restores", "CMD", "and", "ENTRYPOINT", "values", "of", "the", "image" ]
76ca4802018a63d6778374ebdba082d6750816b2
https://github.com/caioariede/docker-run-build/blob/76ca4802018a63d6778374ebdba082d6750816b2/docker_rb/utils.py#L31-L47
245,597
caioariede/docker-run-build
docker_rb/utils.py
run_code_in_container
def run_code_in_container(cli, image, code, mount, entrypoint): """ Run `code` in a container, returning its ID """ kwargs = { 'image': image, } if entrypoint: kwargs['entrypoint'] = '/bin/bash' kwargs['command'] = '-c {}'.format(quote(code)) else: kwargs['command'] = '/bin/bash -c {}'.format(quote(code)) if mount: binds = [] volumes = [] for m in mount: part = m.split(':') if len(part) == 3: pass elif len(part) == 2: part.append('rw') else: raise src, target, mode = part src = os.path.abspath(os.path.expanduser(src)) binds.append('{}:{}:{}'.format(src, target, mode)) volumes.append(target) kwargs['host_config'] = cli.create_host_config(binds=binds) kwargs['volumes'] = volumes container = cli.create_container(**kwargs) container_id = container['Id'] cli.start(container=container_id) return container_id
python
def run_code_in_container(cli, image, code, mount, entrypoint): """ Run `code` in a container, returning its ID """ kwargs = { 'image': image, } if entrypoint: kwargs['entrypoint'] = '/bin/bash' kwargs['command'] = '-c {}'.format(quote(code)) else: kwargs['command'] = '/bin/bash -c {}'.format(quote(code)) if mount: binds = [] volumes = [] for m in mount: part = m.split(':') if len(part) == 3: pass elif len(part) == 2: part.append('rw') else: raise src, target, mode = part src = os.path.abspath(os.path.expanduser(src)) binds.append('{}:{}:{}'.format(src, target, mode)) volumes.append(target) kwargs['host_config'] = cli.create_host_config(binds=binds) kwargs['volumes'] = volumes container = cli.create_container(**kwargs) container_id = container['Id'] cli.start(container=container_id) return container_id
[ "def", "run_code_in_container", "(", "cli", ",", "image", ",", "code", ",", "mount", ",", "entrypoint", ")", ":", "kwargs", "=", "{", "'image'", ":", "image", ",", "}", "if", "entrypoint", ":", "kwargs", "[", "'entrypoint'", "]", "=", "'/bin/bash'", "kwargs", "[", "'command'", "]", "=", "'-c {}'", ".", "format", "(", "quote", "(", "code", ")", ")", "else", ":", "kwargs", "[", "'command'", "]", "=", "'/bin/bash -c {}'", ".", "format", "(", "quote", "(", "code", ")", ")", "if", "mount", ":", "binds", "=", "[", "]", "volumes", "=", "[", "]", "for", "m", "in", "mount", ":", "part", "=", "m", ".", "split", "(", "':'", ")", "if", "len", "(", "part", ")", "==", "3", ":", "pass", "elif", "len", "(", "part", ")", "==", "2", ":", "part", ".", "append", "(", "'rw'", ")", "else", ":", "raise", "src", ",", "target", ",", "mode", "=", "part", "src", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "src", ")", ")", "binds", ".", "append", "(", "'{}:{}:{}'", ".", "format", "(", "src", ",", "target", ",", "mode", ")", ")", "volumes", ".", "append", "(", "target", ")", "kwargs", "[", "'host_config'", "]", "=", "cli", ".", "create_host_config", "(", "binds", "=", "binds", ")", "kwargs", "[", "'volumes'", "]", "=", "volumes", "container", "=", "cli", ".", "create_container", "(", "*", "*", "kwargs", ")", "container_id", "=", "container", "[", "'Id'", "]", "cli", ".", "start", "(", "container", "=", "container_id", ")", "return", "container_id" ]
Run `code` in a container, returning its ID
[ "Run", "code", "in", "a", "container", "returning", "its", "ID" ]
76ca4802018a63d6778374ebdba082d6750816b2
https://github.com/caioariede/docker-run-build/blob/76ca4802018a63d6778374ebdba082d6750816b2/docker_rb/utils.py#L50-L91
245,598
mk-fg/unified2
unified2/parser.py
read
def read(src): 'Event generator from u2 stream.' parser, buff_agg = Parser(), '' while True: buff = parser.read(src) if not buff: break # EOF buff_agg += buff while True: buff_agg, ev = parser.process(buff_agg) if ev is None: break yield ev
python
def read(src): 'Event generator from u2 stream.' parser, buff_agg = Parser(), '' while True: buff = parser.read(src) if not buff: break # EOF buff_agg += buff while True: buff_agg, ev = parser.process(buff_agg) if ev is None: break yield ev
[ "def", "read", "(", "src", ")", ":", "parser", ",", "buff_agg", "=", "Parser", "(", ")", ",", "''", "while", "True", ":", "buff", "=", "parser", ".", "read", "(", "src", ")", "if", "not", "buff", ":", "break", "# EOF", "buff_agg", "+=", "buff", "while", "True", ":", "buff_agg", ",", "ev", "=", "parser", ".", "process", "(", "buff_agg", ")", "if", "ev", "is", "None", ":", "break", "yield", "ev" ]
Event generator from u2 stream.
[ "Event", "generator", "from", "u2", "stream", "." ]
bf6f761a805c2971a61b5339adfb0842186b39d6
https://github.com/mk-fg/unified2/blob/bf6f761a805c2971a61b5339adfb0842186b39d6/unified2/parser.py#L67-L77
245,599
PSU-OIT-ARC/django-local-settings
local_settings/__init__.py
load_and_check_settings
def load_and_check_settings(base_settings, file_name=None, section=None, base_path=None, strategy_type=INIJSONStrategy, disable=None, prompt=None, quiet=None): """Merge local settings from file with base settings, then check. Returns a new dict containing the base settings and the loaded settings. Includes: - base settings - settings from extended file(s), if any - settings from file Settings loaded from the specified file will override base settings, then the settings will be checked to ensure that all required local settings have been set. If a file name is passed: if the file exists, local settings will be loaded from it and any missing settings will be appended to it; if the file does not exist, it will be created and all settings will be added to it. If a file name isn't passed: if the ``LOCAL_SETTINGS_FILE_NAME`` environment variable is set, the specified file will be used; otherwise ``{base_path}/local.cfg`` will be used. ``base_path`` is used when ``file_name`` is relative; if it's not passed, it will be set to the current working directory. When ``prompt`` is ``True``, the user will be prompted for missing local settings. By default, the user is prompted only when running on TTY. The ``LOCAL_SETTINGS_CONFIG_PROMPT`` environment variable can be used to set ``prompt``. When ``quiet`` is ``True``, informational messages will not be printed. The ``LOCAL_SETTINGS_CONFIG_QUIET`` environment variable can be used to set ``quiet``. .. note:: When setting flags via environment variables, use a JSON value like 'true', '1', 'false', or '0'. See :meth:`.Loader.load` and :meth:`.Checker.check` for more info. """ environ_config = get_config_from_environ() disable = environ_config['disable'] if disable is None else disable prompt = environ_config['prompt'] if prompt is None else prompt quiet = environ_config['quiet'] if quiet is None else quiet if disable: return {} if file_name is None: file_name = get_file_name() if ':' in file_name: package, path = file_name.split(':', 1) file_name = pkg_resources.resource_filename(package, path) if not os.path.isabs(file_name): base_path = base_path or os.getcwd() file_name = os.path.normpath(os.path.join(base_path, file_name)) try: loader = Loader(file_name, section, strategy_type=strategy_type) settings, success = loader.load_and_check(base_settings, prompt) except KeyboardInterrupt: # Loading/checking of local settings was aborted with Ctrl-C. # This isn't an error, but we don't want to continue. if not quiet: printer.print_warning('\nAborted loading/checking of local settings') sys.exit(0) if loader.section: file_name = '{loader.file_name}#{loader.section}'.format(loader=loader) else: file_name = loader.file_name if not success: raise SettingsFileDidNotPassCheck(file_name) if not quiet: printer.print_success('Settings loaded successfully from {0}'.format(file_name)) return settings
python
def load_and_check_settings(base_settings, file_name=None, section=None, base_path=None, strategy_type=INIJSONStrategy, disable=None, prompt=None, quiet=None): """Merge local settings from file with base settings, then check. Returns a new dict containing the base settings and the loaded settings. Includes: - base settings - settings from extended file(s), if any - settings from file Settings loaded from the specified file will override base settings, then the settings will be checked to ensure that all required local settings have been set. If a file name is passed: if the file exists, local settings will be loaded from it and any missing settings will be appended to it; if the file does not exist, it will be created and all settings will be added to it. If a file name isn't passed: if the ``LOCAL_SETTINGS_FILE_NAME`` environment variable is set, the specified file will be used; otherwise ``{base_path}/local.cfg`` will be used. ``base_path`` is used when ``file_name`` is relative; if it's not passed, it will be set to the current working directory. When ``prompt`` is ``True``, the user will be prompted for missing local settings. By default, the user is prompted only when running on TTY. The ``LOCAL_SETTINGS_CONFIG_PROMPT`` environment variable can be used to set ``prompt``. When ``quiet`` is ``True``, informational messages will not be printed. The ``LOCAL_SETTINGS_CONFIG_QUIET`` environment variable can be used to set ``quiet``. .. note:: When setting flags via environment variables, use a JSON value like 'true', '1', 'false', or '0'. See :meth:`.Loader.load` and :meth:`.Checker.check` for more info. """ environ_config = get_config_from_environ() disable = environ_config['disable'] if disable is None else disable prompt = environ_config['prompt'] if prompt is None else prompt quiet = environ_config['quiet'] if quiet is None else quiet if disable: return {} if file_name is None: file_name = get_file_name() if ':' in file_name: package, path = file_name.split(':', 1) file_name = pkg_resources.resource_filename(package, path) if not os.path.isabs(file_name): base_path = base_path or os.getcwd() file_name = os.path.normpath(os.path.join(base_path, file_name)) try: loader = Loader(file_name, section, strategy_type=strategy_type) settings, success = loader.load_and_check(base_settings, prompt) except KeyboardInterrupt: # Loading/checking of local settings was aborted with Ctrl-C. # This isn't an error, but we don't want to continue. if not quiet: printer.print_warning('\nAborted loading/checking of local settings') sys.exit(0) if loader.section: file_name = '{loader.file_name}#{loader.section}'.format(loader=loader) else: file_name = loader.file_name if not success: raise SettingsFileDidNotPassCheck(file_name) if not quiet: printer.print_success('Settings loaded successfully from {0}'.format(file_name)) return settings
[ "def", "load_and_check_settings", "(", "base_settings", ",", "file_name", "=", "None", ",", "section", "=", "None", ",", "base_path", "=", "None", ",", "strategy_type", "=", "INIJSONStrategy", ",", "disable", "=", "None", ",", "prompt", "=", "None", ",", "quiet", "=", "None", ")", ":", "environ_config", "=", "get_config_from_environ", "(", ")", "disable", "=", "environ_config", "[", "'disable'", "]", "if", "disable", "is", "None", "else", "disable", "prompt", "=", "environ_config", "[", "'prompt'", "]", "if", "prompt", "is", "None", "else", "prompt", "quiet", "=", "environ_config", "[", "'quiet'", "]", "if", "quiet", "is", "None", "else", "quiet", "if", "disable", ":", "return", "{", "}", "if", "file_name", "is", "None", ":", "file_name", "=", "get_file_name", "(", ")", "if", "':'", "in", "file_name", ":", "package", ",", "path", "=", "file_name", ".", "split", "(", "':'", ",", "1", ")", "file_name", "=", "pkg_resources", ".", "resource_filename", "(", "package", ",", "path", ")", "if", "not", "os", ".", "path", ".", "isabs", "(", "file_name", ")", ":", "base_path", "=", "base_path", "or", "os", ".", "getcwd", "(", ")", "file_name", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "base_path", ",", "file_name", ")", ")", "try", ":", "loader", "=", "Loader", "(", "file_name", ",", "section", ",", "strategy_type", "=", "strategy_type", ")", "settings", ",", "success", "=", "loader", ".", "load_and_check", "(", "base_settings", ",", "prompt", ")", "except", "KeyboardInterrupt", ":", "# Loading/checking of local settings was aborted with Ctrl-C.", "# This isn't an error, but we don't want to continue.", "if", "not", "quiet", ":", "printer", ".", "print_warning", "(", "'\\nAborted loading/checking of local settings'", ")", "sys", ".", "exit", "(", "0", ")", "if", "loader", ".", "section", ":", "file_name", "=", "'{loader.file_name}#{loader.section}'", ".", "format", "(", "loader", "=", "loader", ")", "else", ":", "file_name", "=", "loader", ".", "file_name", "if", "not", "success", ":", "raise", "SettingsFileDidNotPassCheck", "(", "file_name", ")", "if", "not", "quiet", ":", "printer", ".", "print_success", "(", "'Settings loaded successfully from {0}'", ".", "format", "(", "file_name", ")", ")", "return", "settings" ]
Merge local settings from file with base settings, then check. Returns a new dict containing the base settings and the loaded settings. Includes: - base settings - settings from extended file(s), if any - settings from file Settings loaded from the specified file will override base settings, then the settings will be checked to ensure that all required local settings have been set. If a file name is passed: if the file exists, local settings will be loaded from it and any missing settings will be appended to it; if the file does not exist, it will be created and all settings will be added to it. If a file name isn't passed: if the ``LOCAL_SETTINGS_FILE_NAME`` environment variable is set, the specified file will be used; otherwise ``{base_path}/local.cfg`` will be used. ``base_path`` is used when ``file_name`` is relative; if it's not passed, it will be set to the current working directory. When ``prompt`` is ``True``, the user will be prompted for missing local settings. By default, the user is prompted only when running on TTY. The ``LOCAL_SETTINGS_CONFIG_PROMPT`` environment variable can be used to set ``prompt``. When ``quiet`` is ``True``, informational messages will not be printed. The ``LOCAL_SETTINGS_CONFIG_QUIET`` environment variable can be used to set ``quiet``. .. note:: When setting flags via environment variables, use a JSON value like 'true', '1', 'false', or '0'. See :meth:`.Loader.load` and :meth:`.Checker.check` for more info.
[ "Merge", "local", "settings", "from", "file", "with", "base", "settings", "then", "check", "." ]
758810fbd9411c2046a187afcac6532155cac694
https://github.com/PSU-OIT-ARC/django-local-settings/blob/758810fbd9411c2046a187afcac6532155cac694/local_settings/__init__.py#L21-L95