id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
240,100
|
zacernst/timed_dict
|
timed_dict/timed_dict.py
|
TimedDict.set_expiration
|
def set_expiration(self, key, ignore_missing=False,
additional_seconds=None, seconds=None):
'''
Alters the expiration time for a key. If the key is not
present, then raise an Exception unless `ignore_missing`
is set to `True`.
Args:
key: The key whose expiration we are changing.
ignore_missing (bool): If set, then return silently
if the key does not exist. Default is `False`.
additional_seonds (int): Add this many seconds to the
current expiration time.
seconds (int): Expire the key this many seconds from now.
'''
if key not in self.time_dict and ignore_missing:
return
elif key not in self.time_dict and not ignore_missing:
raise Exception('Key missing from `TimedDict` and '
'`ignore_missing` is False.')
if additional_seconds is not None:
self.time_dict[key] += additional_seconds
elif seconds is not None:
self.time_dict[key] = time.time() + seconds
|
python
|
def set_expiration(self, key, ignore_missing=False,
additional_seconds=None, seconds=None):
'''
Alters the expiration time for a key. If the key is not
present, then raise an Exception unless `ignore_missing`
is set to `True`.
Args:
key: The key whose expiration we are changing.
ignore_missing (bool): If set, then return silently
if the key does not exist. Default is `False`.
additional_seonds (int): Add this many seconds to the
current expiration time.
seconds (int): Expire the key this many seconds from now.
'''
if key not in self.time_dict and ignore_missing:
return
elif key not in self.time_dict and not ignore_missing:
raise Exception('Key missing from `TimedDict` and '
'`ignore_missing` is False.')
if additional_seconds is not None:
self.time_dict[key] += additional_seconds
elif seconds is not None:
self.time_dict[key] = time.time() + seconds
|
[
"def",
"set_expiration",
"(",
"self",
",",
"key",
",",
"ignore_missing",
"=",
"False",
",",
"additional_seconds",
"=",
"None",
",",
"seconds",
"=",
"None",
")",
":",
"if",
"key",
"not",
"in",
"self",
".",
"time_dict",
"and",
"ignore_missing",
":",
"return",
"elif",
"key",
"not",
"in",
"self",
".",
"time_dict",
"and",
"not",
"ignore_missing",
":",
"raise",
"Exception",
"(",
"'Key missing from `TimedDict` and '",
"'`ignore_missing` is False.'",
")",
"if",
"additional_seconds",
"is",
"not",
"None",
":",
"self",
".",
"time_dict",
"[",
"key",
"]",
"+=",
"additional_seconds",
"elif",
"seconds",
"is",
"not",
"None",
":",
"self",
".",
"time_dict",
"[",
"key",
"]",
"=",
"time",
".",
"time",
"(",
")",
"+",
"seconds"
] |
Alters the expiration time for a key. If the key is not
present, then raise an Exception unless `ignore_missing`
is set to `True`.
Args:
key: The key whose expiration we are changing.
ignore_missing (bool): If set, then return silently
if the key does not exist. Default is `False`.
additional_seonds (int): Add this many seconds to the
current expiration time.
seconds (int): Expire the key this many seconds from now.
|
[
"Alters",
"the",
"expiration",
"time",
"for",
"a",
"key",
".",
"If",
"the",
"key",
"is",
"not",
"present",
"then",
"raise",
"an",
"Exception",
"unless",
"ignore_missing",
"is",
"set",
"to",
"True",
"."
] |
01a1d145d246832e63c2e92e11d91cac5c3f2d1e
|
https://github.com/zacernst/timed_dict/blob/01a1d145d246832e63c2e92e11d91cac5c3f2d1e/timed_dict/timed_dict.py#L198-L221
|
240,101
|
zacernst/timed_dict
|
timed_dict/timed_dict.py
|
TimedDict.expire_key
|
def expire_key(self, key):
'''
Expire the key, delete the value, and call the callback function
if one is specified.
Args:
key: The ``TimedDict`` key
'''
value = self.base_dict[key]
del self[key]
if self.callback is not None:
self.callback(
key, value, *self.callback_args, **self.callback_kwargs)
|
python
|
def expire_key(self, key):
'''
Expire the key, delete the value, and call the callback function
if one is specified.
Args:
key: The ``TimedDict`` key
'''
value = self.base_dict[key]
del self[key]
if self.callback is not None:
self.callback(
key, value, *self.callback_args, **self.callback_kwargs)
|
[
"def",
"expire_key",
"(",
"self",
",",
"key",
")",
":",
"value",
"=",
"self",
".",
"base_dict",
"[",
"key",
"]",
"del",
"self",
"[",
"key",
"]",
"if",
"self",
".",
"callback",
"is",
"not",
"None",
":",
"self",
".",
"callback",
"(",
"key",
",",
"value",
",",
"*",
"self",
".",
"callback_args",
",",
"*",
"*",
"self",
".",
"callback_kwargs",
")"
] |
Expire the key, delete the value, and call the callback function
if one is specified.
Args:
key: The ``TimedDict`` key
|
[
"Expire",
"the",
"key",
"delete",
"the",
"value",
"and",
"call",
"the",
"callback",
"function",
"if",
"one",
"is",
"specified",
"."
] |
01a1d145d246832e63c2e92e11d91cac5c3f2d1e
|
https://github.com/zacernst/timed_dict/blob/01a1d145d246832e63c2e92e11d91cac5c3f2d1e/timed_dict/timed_dict.py#L318-L330
|
240,102
|
pavelsof/ipalint
|
ipalint/read.py
|
Reader._open
|
def _open(self, file_path=None):
"""
Opens the file specified by the given path. Raises ValueError if there
is a problem with opening or reading the file.
"""
if file_path is None:
file_path = self.file_path
if not os.path.exists(file_path):
raise ValueError('Could not find file: {}'.format(file_path))
try:
f = open(file_path, encoding='utf-8', newline='')
except OSError as err:
self.log.error(str(err))
raise ValueError('Could not open file: {}'.format(file_path))
return f
|
python
|
def _open(self, file_path=None):
"""
Opens the file specified by the given path. Raises ValueError if there
is a problem with opening or reading the file.
"""
if file_path is None:
file_path = self.file_path
if not os.path.exists(file_path):
raise ValueError('Could not find file: {}'.format(file_path))
try:
f = open(file_path, encoding='utf-8', newline='')
except OSError as err:
self.log.error(str(err))
raise ValueError('Could not open file: {}'.format(file_path))
return f
|
[
"def",
"_open",
"(",
"self",
",",
"file_path",
"=",
"None",
")",
":",
"if",
"file_path",
"is",
"None",
":",
"file_path",
"=",
"self",
".",
"file_path",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"raise",
"ValueError",
"(",
"'Could not find file: {}'",
".",
"format",
"(",
"file_path",
")",
")",
"try",
":",
"f",
"=",
"open",
"(",
"file_path",
",",
"encoding",
"=",
"'utf-8'",
",",
"newline",
"=",
"''",
")",
"except",
"OSError",
"as",
"err",
":",
"self",
".",
"log",
".",
"error",
"(",
"str",
"(",
"err",
")",
")",
"raise",
"ValueError",
"(",
"'Could not open file: {}'",
".",
"format",
"(",
"file_path",
")",
")",
"return",
"f"
] |
Opens the file specified by the given path. Raises ValueError if there
is a problem with opening or reading the file.
|
[
"Opens",
"the",
"file",
"specified",
"by",
"the",
"given",
"path",
".",
"Raises",
"ValueError",
"if",
"there",
"is",
"a",
"problem",
"with",
"opening",
"or",
"reading",
"the",
"file",
"."
] |
763e5979ede6980cbfc746b06fd007b379762eeb
|
https://github.com/pavelsof/ipalint/blob/763e5979ede6980cbfc746b06fd007b379762eeb/ipalint/read.py#L116-L133
|
240,103
|
pavelsof/ipalint
|
ipalint/read.py
|
Reader.get_dialect
|
def get_dialect(self):
"""
Returns a Dialect named tuple or None if the dataset file comprises a
single column of data. If the dialect is not already known, then tries
to determine it. Raises ValueError if it fails in the latter case.
"""
if self.is_single_col:
return None
if self.delimiter and self.quotechar:
return Dialect(self.delimiter, self.quotechar,
True if self.escapechar is None else False,
self.escapechar)
ext = os.path.basename(self.file_path).rsplit('.', maxsplit=1)
ext = ext[1].lower() if len(ext) > 1 else None
if ext in TSV_EXTENSIONS:
self.delimiter = '\t'
self.quotechar = '"'
else:
f = self._open()
lines = f.read().splitlines()
f.close()
if lines:
dialect = self._determine_dialect(lines)
else:
dialect = None
if dialect is None:
self.is_single_col = True
else:
self.delimiter = dialect.delimiter
self.quotechar = dialect.quotechar
self.escapechar = dialect.escapechar
return self.get_dialect()
|
python
|
def get_dialect(self):
"""
Returns a Dialect named tuple or None if the dataset file comprises a
single column of data. If the dialect is not already known, then tries
to determine it. Raises ValueError if it fails in the latter case.
"""
if self.is_single_col:
return None
if self.delimiter and self.quotechar:
return Dialect(self.delimiter, self.quotechar,
True if self.escapechar is None else False,
self.escapechar)
ext = os.path.basename(self.file_path).rsplit('.', maxsplit=1)
ext = ext[1].lower() if len(ext) > 1 else None
if ext in TSV_EXTENSIONS:
self.delimiter = '\t'
self.quotechar = '"'
else:
f = self._open()
lines = f.read().splitlines()
f.close()
if lines:
dialect = self._determine_dialect(lines)
else:
dialect = None
if dialect is None:
self.is_single_col = True
else:
self.delimiter = dialect.delimiter
self.quotechar = dialect.quotechar
self.escapechar = dialect.escapechar
return self.get_dialect()
|
[
"def",
"get_dialect",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_single_col",
":",
"return",
"None",
"if",
"self",
".",
"delimiter",
"and",
"self",
".",
"quotechar",
":",
"return",
"Dialect",
"(",
"self",
".",
"delimiter",
",",
"self",
".",
"quotechar",
",",
"True",
"if",
"self",
".",
"escapechar",
"is",
"None",
"else",
"False",
",",
"self",
".",
"escapechar",
")",
"ext",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"file_path",
")",
".",
"rsplit",
"(",
"'.'",
",",
"maxsplit",
"=",
"1",
")",
"ext",
"=",
"ext",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"if",
"len",
"(",
"ext",
")",
">",
"1",
"else",
"None",
"if",
"ext",
"in",
"TSV_EXTENSIONS",
":",
"self",
".",
"delimiter",
"=",
"'\\t'",
"self",
".",
"quotechar",
"=",
"'\"'",
"else",
":",
"f",
"=",
"self",
".",
"_open",
"(",
")",
"lines",
"=",
"f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"f",
".",
"close",
"(",
")",
"if",
"lines",
":",
"dialect",
"=",
"self",
".",
"_determine_dialect",
"(",
"lines",
")",
"else",
":",
"dialect",
"=",
"None",
"if",
"dialect",
"is",
"None",
":",
"self",
".",
"is_single_col",
"=",
"True",
"else",
":",
"self",
".",
"delimiter",
"=",
"dialect",
".",
"delimiter",
"self",
".",
"quotechar",
"=",
"dialect",
".",
"quotechar",
"self",
".",
"escapechar",
"=",
"dialect",
".",
"escapechar",
"return",
"self",
".",
"get_dialect",
"(",
")"
] |
Returns a Dialect named tuple or None if the dataset file comprises a
single column of data. If the dialect is not already known, then tries
to determine it. Raises ValueError if it fails in the latter case.
|
[
"Returns",
"a",
"Dialect",
"named",
"tuple",
"or",
"None",
"if",
"the",
"dataset",
"file",
"comprises",
"a",
"single",
"column",
"of",
"data",
".",
"If",
"the",
"dialect",
"is",
"not",
"already",
"known",
"then",
"tries",
"to",
"determine",
"it",
".",
"Raises",
"ValueError",
"if",
"it",
"fails",
"in",
"the",
"latter",
"case",
"."
] |
763e5979ede6980cbfc746b06fd007b379762eeb
|
https://github.com/pavelsof/ipalint/blob/763e5979ede6980cbfc746b06fd007b379762eeb/ipalint/read.py#L136-L174
|
240,104
|
pavelsof/ipalint
|
ipalint/read.py
|
Reader._get_csv_reader
|
def _get_csv_reader(self, f, dialect):
"""
Returns a csv.reader for the given file handler and csv Dialect named
tuple. If the file has a header, it already will be gone through.
Also, if self.ipa_col is not set, an attempt will be made to infer
which the IPA column is. ValueError would be raised otherwise.
"""
reader = csv.reader(f,
delimiter = dialect.delimiter,
quotechar = dialect.quotechar,
doublequote = dialect.doublequote,
escapechar = dialect.escapechar)
if self.has_header:
header = next(reader)
if not isinstance(self.ipa_col, int):
self.ipa_col = self._infer_ipa_col(header)
else:
if not isinstance(self.ipa_col, int):
if not self.ipa_col:
raise ValueError('Cannot infer IPA column without header')
try:
self.ipa_col = int(self.ipa_col)
except ValueError:
raise ValueError('Cannot find column: {}'.format(self.ipa_col))
return reader
|
python
|
def _get_csv_reader(self, f, dialect):
"""
Returns a csv.reader for the given file handler and csv Dialect named
tuple. If the file has a header, it already will be gone through.
Also, if self.ipa_col is not set, an attempt will be made to infer
which the IPA column is. ValueError would be raised otherwise.
"""
reader = csv.reader(f,
delimiter = dialect.delimiter,
quotechar = dialect.quotechar,
doublequote = dialect.doublequote,
escapechar = dialect.escapechar)
if self.has_header:
header = next(reader)
if not isinstance(self.ipa_col, int):
self.ipa_col = self._infer_ipa_col(header)
else:
if not isinstance(self.ipa_col, int):
if not self.ipa_col:
raise ValueError('Cannot infer IPA column without header')
try:
self.ipa_col = int(self.ipa_col)
except ValueError:
raise ValueError('Cannot find column: {}'.format(self.ipa_col))
return reader
|
[
"def",
"_get_csv_reader",
"(",
"self",
",",
"f",
",",
"dialect",
")",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"f",
",",
"delimiter",
"=",
"dialect",
".",
"delimiter",
",",
"quotechar",
"=",
"dialect",
".",
"quotechar",
",",
"doublequote",
"=",
"dialect",
".",
"doublequote",
",",
"escapechar",
"=",
"dialect",
".",
"escapechar",
")",
"if",
"self",
".",
"has_header",
":",
"header",
"=",
"next",
"(",
"reader",
")",
"if",
"not",
"isinstance",
"(",
"self",
".",
"ipa_col",
",",
"int",
")",
":",
"self",
".",
"ipa_col",
"=",
"self",
".",
"_infer_ipa_col",
"(",
"header",
")",
"else",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"ipa_col",
",",
"int",
")",
":",
"if",
"not",
"self",
".",
"ipa_col",
":",
"raise",
"ValueError",
"(",
"'Cannot infer IPA column without header'",
")",
"try",
":",
"self",
".",
"ipa_col",
"=",
"int",
"(",
"self",
".",
"ipa_col",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'Cannot find column: {}'",
".",
"format",
"(",
"self",
".",
"ipa_col",
")",
")",
"return",
"reader"
] |
Returns a csv.reader for the given file handler and csv Dialect named
tuple. If the file has a header, it already will be gone through.
Also, if self.ipa_col is not set, an attempt will be made to infer
which the IPA column is. ValueError would be raised otherwise.
|
[
"Returns",
"a",
"csv",
".",
"reader",
"for",
"the",
"given",
"file",
"handler",
"and",
"csv",
"Dialect",
"named",
"tuple",
".",
"If",
"the",
"file",
"has",
"a",
"header",
"it",
"already",
"will",
"be",
"gone",
"through",
"."
] |
763e5979ede6980cbfc746b06fd007b379762eeb
|
https://github.com/pavelsof/ipalint/blob/763e5979ede6980cbfc746b06fd007b379762eeb/ipalint/read.py#L221-L250
|
240,105
|
pavelsof/ipalint
|
ipalint/read.py
|
Reader.gen_ipa_data
|
def gen_ipa_data(self):
"""
Generator for iterating over the IPA strings found in the dataset file.
Yields the IPA data string paired with the respective line number.
"""
dialect = self.get_dialect()
f = self._open()
try:
if dialect:
for res in self._gen_csv_data(f, dialect):
yield res
else:
for res in self._gen_txt_data(f):
yield res
finally:
f.close()
|
python
|
def gen_ipa_data(self):
"""
Generator for iterating over the IPA strings found in the dataset file.
Yields the IPA data string paired with the respective line number.
"""
dialect = self.get_dialect()
f = self._open()
try:
if dialect:
for res in self._gen_csv_data(f, dialect):
yield res
else:
for res in self._gen_txt_data(f):
yield res
finally:
f.close()
|
[
"def",
"gen_ipa_data",
"(",
"self",
")",
":",
"dialect",
"=",
"self",
".",
"get_dialect",
"(",
")",
"f",
"=",
"self",
".",
"_open",
"(",
")",
"try",
":",
"if",
"dialect",
":",
"for",
"res",
"in",
"self",
".",
"_gen_csv_data",
"(",
"f",
",",
"dialect",
")",
":",
"yield",
"res",
"else",
":",
"for",
"res",
"in",
"self",
".",
"_gen_txt_data",
"(",
"f",
")",
":",
"yield",
"res",
"finally",
":",
"f",
".",
"close",
"(",
")"
] |
Generator for iterating over the IPA strings found in the dataset file.
Yields the IPA data string paired with the respective line number.
|
[
"Generator",
"for",
"iterating",
"over",
"the",
"IPA",
"strings",
"found",
"in",
"the",
"dataset",
"file",
".",
"Yields",
"the",
"IPA",
"data",
"string",
"paired",
"with",
"the",
"respective",
"line",
"number",
"."
] |
763e5979ede6980cbfc746b06fd007b379762eeb
|
https://github.com/pavelsof/ipalint/blob/763e5979ede6980cbfc746b06fd007b379762eeb/ipalint/read.py#L289-L306
|
240,106
|
fdb/aufmachen
|
aufmachen/BeautifulSoup.py
|
PageElement._findAll
|
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# (Possibly) special case some findAll*(...) searches
elif text is None and not limit and not attrs and not kwargs:
# findAll*(True)
if name is True:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif isinstance(name, basestring):
return [element for element in generator()
if isinstance(element, Tag) and
element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
|
python
|
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# (Possibly) special case some findAll*(...) searches
elif text is None and not limit and not attrs and not kwargs:
# findAll*(True)
if name is True:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif isinstance(name, basestring):
return [element for element in generator()
if isinstance(element, Tag) and
element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
|
[
"def",
"_findAll",
"(",
"self",
",",
"name",
",",
"attrs",
",",
"text",
",",
"limit",
",",
"generator",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"name",
",",
"SoupStrainer",
")",
":",
"strainer",
"=",
"name",
"# (Possibly) special case some findAll*(...) searches",
"elif",
"text",
"is",
"None",
"and",
"not",
"limit",
"and",
"not",
"attrs",
"and",
"not",
"kwargs",
":",
"# findAll*(True)",
"if",
"name",
"is",
"True",
":",
"return",
"[",
"element",
"for",
"element",
"in",
"generator",
"(",
")",
"if",
"isinstance",
"(",
"element",
",",
"Tag",
")",
"]",
"# findAll*('tag-name')",
"elif",
"isinstance",
"(",
"name",
",",
"basestring",
")",
":",
"return",
"[",
"element",
"for",
"element",
"in",
"generator",
"(",
")",
"if",
"isinstance",
"(",
"element",
",",
"Tag",
")",
"and",
"element",
".",
"name",
"==",
"name",
"]",
"else",
":",
"strainer",
"=",
"SoupStrainer",
"(",
"name",
",",
"attrs",
",",
"text",
",",
"*",
"*",
"kwargs",
")",
"# Build a SoupStrainer",
"else",
":",
"strainer",
"=",
"SoupStrainer",
"(",
"name",
",",
"attrs",
",",
"text",
",",
"*",
"*",
"kwargs",
")",
"results",
"=",
"ResultSet",
"(",
"strainer",
")",
"g",
"=",
"generator",
"(",
")",
"while",
"True",
":",
"try",
":",
"i",
"=",
"g",
".",
"next",
"(",
")",
"except",
"StopIteration",
":",
"break",
"if",
"i",
":",
"found",
"=",
"strainer",
".",
"search",
"(",
"i",
")",
"if",
"found",
":",
"results",
".",
"append",
"(",
"found",
")",
"if",
"limit",
"and",
"len",
"(",
"results",
")",
">=",
"limit",
":",
"break",
"return",
"results"
] |
Iterates over a generator looking for things that match.
|
[
"Iterates",
"over",
"a",
"generator",
"looking",
"for",
"things",
"that",
"match",
"."
] |
f2986a0cf087ac53969f82b84d872e3f1c6986f4
|
https://github.com/fdb/aufmachen/blob/f2986a0cf087ac53969f82b84d872e3f1c6986f4/aufmachen/BeautifulSoup.py#L333-L367
|
240,107
|
fdb/aufmachen
|
aufmachen/BeautifulSoup.py
|
Tag.decompose
|
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
|
python
|
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
|
[
"def",
"decompose",
"(",
"self",
")",
":",
"self",
".",
"extract",
"(",
")",
"if",
"len",
"(",
"self",
".",
"contents",
")",
"==",
"0",
":",
"return",
"current",
"=",
"self",
".",
"contents",
"[",
"0",
"]",
"while",
"current",
"is",
"not",
"None",
":",
"next",
"=",
"current",
".",
"next",
"if",
"isinstance",
"(",
"current",
",",
"Tag",
")",
":",
"del",
"current",
".",
"contents",
"[",
":",
"]",
"current",
".",
"parent",
"=",
"None",
"current",
".",
"previous",
"=",
"None",
"current",
".",
"previousSibling",
"=",
"None",
"current",
".",
"next",
"=",
"None",
"current",
".",
"nextSibling",
"=",
"None",
"current",
"=",
"next"
] |
Recursively destroys the contents of this tree.
|
[
"Recursively",
"destroys",
"the",
"contents",
"of",
"this",
"tree",
"."
] |
f2986a0cf087ac53969f82b84d872e3f1c6986f4
|
https://github.com/fdb/aufmachen/blob/f2986a0cf087ac53969f82b84d872e3f1c6986f4/aufmachen/BeautifulSoup.py#L778-L793
|
240,108
|
fdb/aufmachen
|
aufmachen/BeautifulSoup.py
|
BeautifulStoneSoup._smartPop
|
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
|
python
|
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
|
[
"def",
"_smartPop",
"(",
"self",
",",
"name",
")",
":",
"nestingResetTriggers",
"=",
"self",
".",
"NESTABLE_TAGS",
".",
"get",
"(",
"name",
")",
"isNestable",
"=",
"nestingResetTriggers",
"!=",
"None",
"isResetNesting",
"=",
"self",
".",
"RESET_NESTING_TAGS",
".",
"has_key",
"(",
"name",
")",
"popTo",
"=",
"None",
"inclusive",
"=",
"True",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"tagStack",
")",
"-",
"1",
",",
"0",
",",
"-",
"1",
")",
":",
"p",
"=",
"self",
".",
"tagStack",
"[",
"i",
"]",
"if",
"(",
"not",
"p",
"or",
"p",
".",
"name",
"==",
"name",
")",
"and",
"not",
"isNestable",
":",
"#Non-nestable tags get popped to the top or to their",
"#last occurance.",
"popTo",
"=",
"name",
"break",
"if",
"(",
"nestingResetTriggers",
"is",
"not",
"None",
"and",
"p",
".",
"name",
"in",
"nestingResetTriggers",
")",
"or",
"(",
"nestingResetTriggers",
"is",
"None",
"and",
"isResetNesting",
"and",
"self",
".",
"RESET_NESTING_TAGS",
".",
"has_key",
"(",
"p",
".",
"name",
")",
")",
":",
"#If we encounter one of the nesting reset triggers",
"#peculiar to this tag, or we encounter another tag",
"#that causes nesting to reset, pop up to but not",
"#including that tag.",
"popTo",
"=",
"p",
".",
"name",
"inclusive",
"=",
"False",
"break",
"p",
"=",
"p",
".",
"parent",
"if",
"popTo",
":",
"self",
".",
"_popToTag",
"(",
"popTo",
",",
"inclusive",
")"
] |
We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
|
[
"We",
"need",
"to",
"pop",
"up",
"to",
"the",
"previous",
"tag",
"of",
"this",
"type",
"unless",
"one",
"of",
"this",
"tag",
"s",
"nesting",
"reset",
"triggers",
"comes",
"between",
"this",
"tag",
"and",
"the",
"previous",
"tag",
"of",
"this",
"type",
"OR",
"unless",
"this",
"tag",
"is",
"a",
"generic",
"nesting",
"trigger",
"and",
"another",
"generic",
"nesting",
"trigger",
"comes",
"between",
"this",
"tag",
"and",
"the",
"previous",
"tag",
"of",
"this",
"type",
"."
] |
f2986a0cf087ac53969f82b84d872e3f1c6986f4
|
https://github.com/fdb/aufmachen/blob/f2986a0cf087ac53969f82b84d872e3f1c6986f4/aufmachen/BeautifulSoup.py#L1281-L1325
|
240,109
|
fdb/aufmachen
|
aufmachen/BeautifulSoup.py
|
UnicodeDammit._subMSChar
|
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
|
python
|
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
|
[
"def",
"_subMSChar",
"(",
"self",
",",
"orig",
")",
":",
"sub",
"=",
"self",
".",
"MS_CHARS",
".",
"get",
"(",
"orig",
")",
"if",
"isinstance",
"(",
"sub",
",",
"tuple",
")",
":",
"if",
"self",
".",
"smartQuotesTo",
"==",
"'xml'",
":",
"sub",
"=",
"'&#x%s;'",
"%",
"sub",
"[",
"1",
"]",
"else",
":",
"sub",
"=",
"'&%s;'",
"%",
"sub",
"[",
"0",
"]",
"return",
"sub"
] |
Changes a MS smart quote character to an XML or HTML
entity.
|
[
"Changes",
"a",
"MS",
"smart",
"quote",
"character",
"to",
"an",
"XML",
"or",
"HTML",
"entity",
"."
] |
f2986a0cf087ac53969f82b84d872e3f1c6986f4
|
https://github.com/fdb/aufmachen/blob/f2986a0cf087ac53969f82b84d872e3f1c6986f4/aufmachen/BeautifulSoup.py#L1800-L1809
|
240,110
|
mkurnikov/typed-env
|
typed_env/read_dot_env.py
|
read_file_values
|
def read_file_values(env_file, fail_silently=True):
"""
Borrowed from Honcho.
"""
env_data = {}
try:
with open(env_file) as f:
content = f.read()
except IOError:
if fail_silently:
logging.error("Could not read file '{0}'".format(env_file))
return env_data
raise
for line in content.splitlines():
m1 = re.match(r'\A([A-Za-z_0-9]+)=(.*)\Z', line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', r'\1', m3.group(1))
env_data[key] = val
return env_data
|
python
|
def read_file_values(env_file, fail_silently=True):
"""
Borrowed from Honcho.
"""
env_data = {}
try:
with open(env_file) as f:
content = f.read()
except IOError:
if fail_silently:
logging.error("Could not read file '{0}'".format(env_file))
return env_data
raise
for line in content.splitlines():
m1 = re.match(r'\A([A-Za-z_0-9]+)=(.*)\Z', line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', r'\1', m3.group(1))
env_data[key] = val
return env_data
|
[
"def",
"read_file_values",
"(",
"env_file",
",",
"fail_silently",
"=",
"True",
")",
":",
"env_data",
"=",
"{",
"}",
"try",
":",
"with",
"open",
"(",
"env_file",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"except",
"IOError",
":",
"if",
"fail_silently",
":",
"logging",
".",
"error",
"(",
"\"Could not read file '{0}'\"",
".",
"format",
"(",
"env_file",
")",
")",
"return",
"env_data",
"raise",
"for",
"line",
"in",
"content",
".",
"splitlines",
"(",
")",
":",
"m1",
"=",
"re",
".",
"match",
"(",
"r'\\A([A-Za-z_0-9]+)=(.*)\\Z'",
",",
"line",
")",
"if",
"m1",
":",
"key",
",",
"val",
"=",
"m1",
".",
"group",
"(",
"1",
")",
",",
"m1",
".",
"group",
"(",
"2",
")",
"m2",
"=",
"re",
".",
"match",
"(",
"r\"\\A'(.*)'\\Z\"",
",",
"val",
")",
"if",
"m2",
":",
"val",
"=",
"m2",
".",
"group",
"(",
"1",
")",
"m3",
"=",
"re",
".",
"match",
"(",
"r'\\A\"(.*)\"\\Z'",
",",
"val",
")",
"if",
"m3",
":",
"val",
"=",
"re",
".",
"sub",
"(",
"r'\\\\(.)'",
",",
"r'\\1'",
",",
"m3",
".",
"group",
"(",
"1",
")",
")",
"env_data",
"[",
"key",
"]",
"=",
"val",
"return",
"env_data"
] |
Borrowed from Honcho.
|
[
"Borrowed",
"from",
"Honcho",
"."
] |
d0f3e947f0d5561c9fd1679d6faa9830de24d870
|
https://github.com/mkurnikov/typed-env/blob/d0f3e947f0d5561c9fd1679d6faa9830de24d870/typed_env/read_dot_env.py#L8-L37
|
240,111
|
edeposit/edeposit.amqp.ltp
|
src/edeposit/amqp/ltp/fn_composers.py
|
_get_suffix
|
def _get_suffix(path):
"""
Return suffix from `path`.
``/home/xex/somefile.txt`` --> ``txt``.
Args:
path (str): Full file path.
Returns:
str: Suffix.
Raises:
UserWarning: When ``/`` is detected in suffix.
"""
suffix = os.path.basename(path).split(".")[-1]
if "/" in suffix:
raise UserWarning("Filename can't contain '/' in suffix (%s)!" % path)
return suffix
|
python
|
def _get_suffix(path):
"""
Return suffix from `path`.
``/home/xex/somefile.txt`` --> ``txt``.
Args:
path (str): Full file path.
Returns:
str: Suffix.
Raises:
UserWarning: When ``/`` is detected in suffix.
"""
suffix = os.path.basename(path).split(".")[-1]
if "/" in suffix:
raise UserWarning("Filename can't contain '/' in suffix (%s)!" % path)
return suffix
|
[
"def",
"_get_suffix",
"(",
"path",
")",
":",
"suffix",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
"if",
"\"/\"",
"in",
"suffix",
":",
"raise",
"UserWarning",
"(",
"\"Filename can't contain '/' in suffix (%s)!\"",
"%",
"path",
")",
"return",
"suffix"
] |
Return suffix from `path`.
``/home/xex/somefile.txt`` --> ``txt``.
Args:
path (str): Full file path.
Returns:
str: Suffix.
Raises:
UserWarning: When ``/`` is detected in suffix.
|
[
"Return",
"suffix",
"from",
"path",
"."
] |
df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e
|
https://github.com/edeposit/edeposit.amqp.ltp/blob/df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e/src/edeposit/amqp/ltp/fn_composers.py#L14-L34
|
240,112
|
chamrc/to
|
to/layers/sru.py
|
SRU_Compute_CPU
|
def SRU_Compute_CPU(activation_type, d, bidirectional=False, scale_x=1):
"""CPU version of the core SRU computation.
Has the same interface as SRU_Compute_GPU() but is a regular Python function
instead of a torch.autograd.Function because we don't implement backward()
explicitly.
"""
def sru_compute_cpu(u, x, bias, init=None, mask_h=None):
bidir = 2 if bidirectional else 1
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
k = u.size(-1) // d // bidir
if mask_h is None:
mask_h = 1
u = u.view(length, batch, bidir, d, k)
x_tilde = u[..., 0]
forget_bias, reset_bias = bias.view(2, bidir, d)
forget = (u[..., 1] + forget_bias).sigmoid()
reset = (u[..., 2] + reset_bias).sigmoid()
if k == 3:
x_prime = x.view(length, batch, bidir, d)
x_prime = x_prime * scale_x if scale_x != 1 else x_prime
else:
x_prime = u[..., 3]
h = Variable(x.data.new(length, batch, bidir, d))
if init is None:
c_init = Variable(x.data.new(batch, bidir, d).zero_())
else:
c_init = init.view(batch, bidir, d)
c_final = []
for di in range(bidir):
if di == 0:
time_seq = range(length)
else:
time_seq = range(length - 1, -1, -1)
c_prev = c_init[:, di, :]
for t in time_seq:
c_t = (c_prev - x_tilde[t, :, di, :]) * forget[t, :, di, :] + x_tilde[t, :, di, :]
c_prev = c_t
if activation_type == 0:
g_c_t = c_t
elif activation_type == 1:
g_c_t = c_t.tanh()
elif activation_type == 2:
g_c_t = nn.functional.relu(c_t)
else:
assert False, 'Activation type must be 0, 1, or 2, not {}'.format(activation_type)
h[t, :, di, :] = (g_c_t * mask_h - x_prime[t, :, di, :]) * reset[t, :, di, :] + x_prime[t, :, di, :]
c_final.append(c_t)
return h.view(length, batch, -1), torch.stack(c_final, dim=1).view(batch, -1)
return sru_compute_cpu
|
python
|
def SRU_Compute_CPU(activation_type, d, bidirectional=False, scale_x=1):
"""CPU version of the core SRU computation.
Has the same interface as SRU_Compute_GPU() but is a regular Python function
instead of a torch.autograd.Function because we don't implement backward()
explicitly.
"""
def sru_compute_cpu(u, x, bias, init=None, mask_h=None):
bidir = 2 if bidirectional else 1
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
k = u.size(-1) // d // bidir
if mask_h is None:
mask_h = 1
u = u.view(length, batch, bidir, d, k)
x_tilde = u[..., 0]
forget_bias, reset_bias = bias.view(2, bidir, d)
forget = (u[..., 1] + forget_bias).sigmoid()
reset = (u[..., 2] + reset_bias).sigmoid()
if k == 3:
x_prime = x.view(length, batch, bidir, d)
x_prime = x_prime * scale_x if scale_x != 1 else x_prime
else:
x_prime = u[..., 3]
h = Variable(x.data.new(length, batch, bidir, d))
if init is None:
c_init = Variable(x.data.new(batch, bidir, d).zero_())
else:
c_init = init.view(batch, bidir, d)
c_final = []
for di in range(bidir):
if di == 0:
time_seq = range(length)
else:
time_seq = range(length - 1, -1, -1)
c_prev = c_init[:, di, :]
for t in time_seq:
c_t = (c_prev - x_tilde[t, :, di, :]) * forget[t, :, di, :] + x_tilde[t, :, di, :]
c_prev = c_t
if activation_type == 0:
g_c_t = c_t
elif activation_type == 1:
g_c_t = c_t.tanh()
elif activation_type == 2:
g_c_t = nn.functional.relu(c_t)
else:
assert False, 'Activation type must be 0, 1, or 2, not {}'.format(activation_type)
h[t, :, di, :] = (g_c_t * mask_h - x_prime[t, :, di, :]) * reset[t, :, di, :] + x_prime[t, :, di, :]
c_final.append(c_t)
return h.view(length, batch, -1), torch.stack(c_final, dim=1).view(batch, -1)
return sru_compute_cpu
|
[
"def",
"SRU_Compute_CPU",
"(",
"activation_type",
",",
"d",
",",
"bidirectional",
"=",
"False",
",",
"scale_x",
"=",
"1",
")",
":",
"def",
"sru_compute_cpu",
"(",
"u",
",",
"x",
",",
"bias",
",",
"init",
"=",
"None",
",",
"mask_h",
"=",
"None",
")",
":",
"bidir",
"=",
"2",
"if",
"bidirectional",
"else",
"1",
"length",
"=",
"x",
".",
"size",
"(",
"0",
")",
"if",
"x",
".",
"dim",
"(",
")",
"==",
"3",
"else",
"1",
"batch",
"=",
"x",
".",
"size",
"(",
"-",
"2",
")",
"k",
"=",
"u",
".",
"size",
"(",
"-",
"1",
")",
"//",
"d",
"//",
"bidir",
"if",
"mask_h",
"is",
"None",
":",
"mask_h",
"=",
"1",
"u",
"=",
"u",
".",
"view",
"(",
"length",
",",
"batch",
",",
"bidir",
",",
"d",
",",
"k",
")",
"x_tilde",
"=",
"u",
"[",
"...",
",",
"0",
"]",
"forget_bias",
",",
"reset_bias",
"=",
"bias",
".",
"view",
"(",
"2",
",",
"bidir",
",",
"d",
")",
"forget",
"=",
"(",
"u",
"[",
"...",
",",
"1",
"]",
"+",
"forget_bias",
")",
".",
"sigmoid",
"(",
")",
"reset",
"=",
"(",
"u",
"[",
"...",
",",
"2",
"]",
"+",
"reset_bias",
")",
".",
"sigmoid",
"(",
")",
"if",
"k",
"==",
"3",
":",
"x_prime",
"=",
"x",
".",
"view",
"(",
"length",
",",
"batch",
",",
"bidir",
",",
"d",
")",
"x_prime",
"=",
"x_prime",
"*",
"scale_x",
"if",
"scale_x",
"!=",
"1",
"else",
"x_prime",
"else",
":",
"x_prime",
"=",
"u",
"[",
"...",
",",
"3",
"]",
"h",
"=",
"Variable",
"(",
"x",
".",
"data",
".",
"new",
"(",
"length",
",",
"batch",
",",
"bidir",
",",
"d",
")",
")",
"if",
"init",
"is",
"None",
":",
"c_init",
"=",
"Variable",
"(",
"x",
".",
"data",
".",
"new",
"(",
"batch",
",",
"bidir",
",",
"d",
")",
".",
"zero_",
"(",
")",
")",
"else",
":",
"c_init",
"=",
"init",
".",
"view",
"(",
"batch",
",",
"bidir",
",",
"d",
")",
"c_final",
"=",
"[",
"]",
"for",
"di",
"in",
"range",
"(",
"bidir",
")",
":",
"if",
"di",
"==",
"0",
":",
"time_seq",
"=",
"range",
"(",
"length",
")",
"else",
":",
"time_seq",
"=",
"range",
"(",
"length",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
"c_prev",
"=",
"c_init",
"[",
":",
",",
"di",
",",
":",
"]",
"for",
"t",
"in",
"time_seq",
":",
"c_t",
"=",
"(",
"c_prev",
"-",
"x_tilde",
"[",
"t",
",",
":",
",",
"di",
",",
":",
"]",
")",
"*",
"forget",
"[",
"t",
",",
":",
",",
"di",
",",
":",
"]",
"+",
"x_tilde",
"[",
"t",
",",
":",
",",
"di",
",",
":",
"]",
"c_prev",
"=",
"c_t",
"if",
"activation_type",
"==",
"0",
":",
"g_c_t",
"=",
"c_t",
"elif",
"activation_type",
"==",
"1",
":",
"g_c_t",
"=",
"c_t",
".",
"tanh",
"(",
")",
"elif",
"activation_type",
"==",
"2",
":",
"g_c_t",
"=",
"nn",
".",
"functional",
".",
"relu",
"(",
"c_t",
")",
"else",
":",
"assert",
"False",
",",
"'Activation type must be 0, 1, or 2, not {}'",
".",
"format",
"(",
"activation_type",
")",
"h",
"[",
"t",
",",
":",
",",
"di",
",",
":",
"]",
"=",
"(",
"g_c_t",
"*",
"mask_h",
"-",
"x_prime",
"[",
"t",
",",
":",
",",
"di",
",",
":",
"]",
")",
"*",
"reset",
"[",
"t",
",",
":",
",",
"di",
",",
":",
"]",
"+",
"x_prime",
"[",
"t",
",",
":",
",",
"di",
",",
":",
"]",
"c_final",
".",
"append",
"(",
"c_t",
")",
"return",
"h",
".",
"view",
"(",
"length",
",",
"batch",
",",
"-",
"1",
")",
",",
"torch",
".",
"stack",
"(",
"c_final",
",",
"dim",
"=",
"1",
")",
".",
"view",
"(",
"batch",
",",
"-",
"1",
")",
"return",
"sru_compute_cpu"
] |
CPU version of the core SRU computation.
Has the same interface as SRU_Compute_GPU() but is a regular Python function
instead of a torch.autograd.Function because we don't implement backward()
explicitly.
|
[
"CPU",
"version",
"of",
"the",
"core",
"SRU",
"computation",
"."
] |
ea1122bef08615b6c19904dadf2608e10c20c960
|
https://github.com/chamrc/to/blob/ea1122bef08615b6c19904dadf2608e10c20c960/to/layers/sru.py#L502-L567
|
240,113
|
brbsix/subnuker
|
subnuker.py
|
get_encoding
|
def get_encoding(binary):
"""Return the encoding type."""
try:
from chardet import detect
except ImportError:
LOGGER.error("Please install the 'chardet' module")
sys.exit(1)
encoding = detect(binary).get('encoding')
return 'iso-8859-1' if encoding == 'CP949' else encoding
|
python
|
def get_encoding(binary):
"""Return the encoding type."""
try:
from chardet import detect
except ImportError:
LOGGER.error("Please install the 'chardet' module")
sys.exit(1)
encoding = detect(binary).get('encoding')
return 'iso-8859-1' if encoding == 'CP949' else encoding
|
[
"def",
"get_encoding",
"(",
"binary",
")",
":",
"try",
":",
"from",
"chardet",
"import",
"detect",
"except",
"ImportError",
":",
"LOGGER",
".",
"error",
"(",
"\"Please install the 'chardet' module\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"encoding",
"=",
"detect",
"(",
"binary",
")",
".",
"get",
"(",
"'encoding'",
")",
"return",
"'iso-8859-1'",
"if",
"encoding",
"==",
"'CP949'",
"else",
"encoding"
] |
Return the encoding type.
|
[
"Return",
"the",
"encoding",
"type",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L334-L345
|
240,114
|
brbsix/subnuker
|
subnuker.py
|
getch
|
def getch():
"""Request a single character input from the user."""
if sys.platform in ['darwin', 'linux']:
import termios
import tty
file_descriptor = sys.stdin.fileno()
settings = termios.tcgetattr(file_descriptor)
try:
tty.setraw(file_descriptor)
return sys.stdin.read(1)
finally:
termios.tcsetattr(file_descriptor, termios.TCSADRAIN, settings)
elif sys.platform in ['cygwin', 'win32']:
import msvcrt
return msvcrt.getwch()
|
python
|
def getch():
"""Request a single character input from the user."""
if sys.platform in ['darwin', 'linux']:
import termios
import tty
file_descriptor = sys.stdin.fileno()
settings = termios.tcgetattr(file_descriptor)
try:
tty.setraw(file_descriptor)
return sys.stdin.read(1)
finally:
termios.tcsetattr(file_descriptor, termios.TCSADRAIN, settings)
elif sys.platform in ['cygwin', 'win32']:
import msvcrt
return msvcrt.getwch()
|
[
"def",
"getch",
"(",
")",
":",
"if",
"sys",
".",
"platform",
"in",
"[",
"'darwin'",
",",
"'linux'",
"]",
":",
"import",
"termios",
"import",
"tty",
"file_descriptor",
"=",
"sys",
".",
"stdin",
".",
"fileno",
"(",
")",
"settings",
"=",
"termios",
".",
"tcgetattr",
"(",
"file_descriptor",
")",
"try",
":",
"tty",
".",
"setraw",
"(",
"file_descriptor",
")",
"return",
"sys",
".",
"stdin",
".",
"read",
"(",
"1",
")",
"finally",
":",
"termios",
".",
"tcsetattr",
"(",
"file_descriptor",
",",
"termios",
".",
"TCSADRAIN",
",",
"settings",
")",
"elif",
"sys",
".",
"platform",
"in",
"[",
"'cygwin'",
",",
"'win32'",
"]",
":",
"import",
"msvcrt",
"return",
"msvcrt",
".",
"getwch",
"(",
")"
] |
Request a single character input from the user.
|
[
"Request",
"a",
"single",
"character",
"input",
"from",
"the",
"user",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L348-L363
|
240,115
|
brbsix/subnuker
|
subnuker.py
|
ismatch
|
def ismatch(text, pattern):
"""Test whether text contains string or matches regex."""
if hasattr(pattern, 'search'):
return pattern.search(text) is not None
else:
return pattern in text if Config.options.case_sensitive \
else pattern.lower() in text.lower()
|
python
|
def ismatch(text, pattern):
"""Test whether text contains string or matches regex."""
if hasattr(pattern, 'search'):
return pattern.search(text) is not None
else:
return pattern in text if Config.options.case_sensitive \
else pattern.lower() in text.lower()
|
[
"def",
"ismatch",
"(",
"text",
",",
"pattern",
")",
":",
"if",
"hasattr",
"(",
"pattern",
",",
"'search'",
")",
":",
"return",
"pattern",
".",
"search",
"(",
"text",
")",
"is",
"not",
"None",
"else",
":",
"return",
"pattern",
"in",
"text",
"if",
"Config",
".",
"options",
".",
"case_sensitive",
"else",
"pattern",
".",
"lower",
"(",
")",
"in",
"text",
".",
"lower",
"(",
")"
] |
Test whether text contains string or matches regex.
|
[
"Test",
"whether",
"text",
"contains",
"string",
"or",
"matches",
"regex",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L366-L373
|
240,116
|
brbsix/subnuker
|
subnuker.py
|
logger
|
def logger():
"""Configure program logger."""
scriptlogger = logging.getLogger(__program__)
# ensure logger is not reconfigured
if not scriptlogger.hasHandlers():
# set log level
scriptlogger.setLevel(logging.INFO)
fmt = '%(name)s:%(levelname)s: %(message)s'
# configure terminal log
streamhandler = logging.StreamHandler()
streamhandler.setFormatter(logging.Formatter(fmt))
scriptlogger.addHandler(streamhandler)
|
python
|
def logger():
"""Configure program logger."""
scriptlogger = logging.getLogger(__program__)
# ensure logger is not reconfigured
if not scriptlogger.hasHandlers():
# set log level
scriptlogger.setLevel(logging.INFO)
fmt = '%(name)s:%(levelname)s: %(message)s'
# configure terminal log
streamhandler = logging.StreamHandler()
streamhandler.setFormatter(logging.Formatter(fmt))
scriptlogger.addHandler(streamhandler)
|
[
"def",
"logger",
"(",
")",
":",
"scriptlogger",
"=",
"logging",
".",
"getLogger",
"(",
"__program__",
")",
"# ensure logger is not reconfigured",
"if",
"not",
"scriptlogger",
".",
"hasHandlers",
"(",
")",
":",
"# set log level",
"scriptlogger",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"fmt",
"=",
"'%(name)s:%(levelname)s: %(message)s'",
"# configure terminal log",
"streamhandler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"streamhandler",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"fmt",
")",
")",
"scriptlogger",
".",
"addHandler",
"(",
"streamhandler",
")"
] |
Configure program logger.
|
[
"Configure",
"program",
"logger",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L376-L392
|
240,117
|
brbsix/subnuker
|
subnuker.py
|
pattern_logic_aeidon
|
def pattern_logic_aeidon():
"""Return patterns to be used for searching subtitles via aeidon."""
if Config.options.pattern_files:
return prep_patterns(Config.options.pattern_files)
elif Config.options.regex:
return Config.REGEX
else:
return Config.TERMS
|
python
|
def pattern_logic_aeidon():
"""Return patterns to be used for searching subtitles via aeidon."""
if Config.options.pattern_files:
return prep_patterns(Config.options.pattern_files)
elif Config.options.regex:
return Config.REGEX
else:
return Config.TERMS
|
[
"def",
"pattern_logic_aeidon",
"(",
")",
":",
"if",
"Config",
".",
"options",
".",
"pattern_files",
":",
"return",
"prep_patterns",
"(",
"Config",
".",
"options",
".",
"pattern_files",
")",
"elif",
"Config",
".",
"options",
".",
"regex",
":",
"return",
"Config",
".",
"REGEX",
"else",
":",
"return",
"Config",
".",
"TERMS"
] |
Return patterns to be used for searching subtitles via aeidon.
|
[
"Return",
"patterns",
"to",
"be",
"used",
"for",
"searching",
"subtitles",
"via",
"aeidon",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L491-L499
|
240,118
|
brbsix/subnuker
|
subnuker.py
|
pattern_logic_srt
|
def pattern_logic_srt():
"""Return patterns to be used for searching srt subtitles."""
if Config.options.pattern_files and Config.options.regex:
return prep_regex(prep_patterns(Config.options.pattern_files))
elif Config.options.pattern_files:
return prep_patterns(Config.options.pattern_files)
elif Config.options.regex:
return prep_regex(Config.REGEX)
else:
return Config.TERMS
|
python
|
def pattern_logic_srt():
"""Return patterns to be used for searching srt subtitles."""
if Config.options.pattern_files and Config.options.regex:
return prep_regex(prep_patterns(Config.options.pattern_files))
elif Config.options.pattern_files:
return prep_patterns(Config.options.pattern_files)
elif Config.options.regex:
return prep_regex(Config.REGEX)
else:
return Config.TERMS
|
[
"def",
"pattern_logic_srt",
"(",
")",
":",
"if",
"Config",
".",
"options",
".",
"pattern_files",
"and",
"Config",
".",
"options",
".",
"regex",
":",
"return",
"prep_regex",
"(",
"prep_patterns",
"(",
"Config",
".",
"options",
".",
"pattern_files",
")",
")",
"elif",
"Config",
".",
"options",
".",
"pattern_files",
":",
"return",
"prep_patterns",
"(",
"Config",
".",
"options",
".",
"pattern_files",
")",
"elif",
"Config",
".",
"options",
".",
"regex",
":",
"return",
"prep_regex",
"(",
"Config",
".",
"REGEX",
")",
"else",
":",
"return",
"Config",
".",
"TERMS"
] |
Return patterns to be used for searching srt subtitles.
|
[
"Return",
"patterns",
"to",
"be",
"used",
"for",
"searching",
"srt",
"subtitles",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L502-L512
|
240,119
|
brbsix/subnuker
|
subnuker.py
|
prep_patterns
|
def prep_patterns(filenames):
"""Load pattern files passed via options and return list of patterns."""
patterns = []
for filename in filenames:
try:
with open(filename) as file:
patterns += [l.rstrip('\n') for l in file]
except: # pylint: disable=W0702
LOGGER.error("Unable to load pattern file '%s'" % filename)
sys.exit(1)
if patterns:
# return a set to eliminate duplicates
return set(patterns)
else:
LOGGER.error('No terms were loaded')
sys.exit(1)
|
python
|
def prep_patterns(filenames):
"""Load pattern files passed via options and return list of patterns."""
patterns = []
for filename in filenames:
try:
with open(filename) as file:
patterns += [l.rstrip('\n') for l in file]
except: # pylint: disable=W0702
LOGGER.error("Unable to load pattern file '%s'" % filename)
sys.exit(1)
if patterns:
# return a set to eliminate duplicates
return set(patterns)
else:
LOGGER.error('No terms were loaded')
sys.exit(1)
|
[
"def",
"prep_patterns",
"(",
"filenames",
")",
":",
"patterns",
"=",
"[",
"]",
"for",
"filename",
"in",
"filenames",
":",
"try",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"file",
":",
"patterns",
"+=",
"[",
"l",
".",
"rstrip",
"(",
"'\\n'",
")",
"for",
"l",
"in",
"file",
"]",
"except",
":",
"# pylint: disable=W0702",
"LOGGER",
".",
"error",
"(",
"\"Unable to load pattern file '%s'\"",
"%",
"filename",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"patterns",
":",
"# return a set to eliminate duplicates",
"return",
"set",
"(",
"patterns",
")",
"else",
":",
"LOGGER",
".",
"error",
"(",
"'No terms were loaded'",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Load pattern files passed via options and return list of patterns.
|
[
"Load",
"pattern",
"files",
"passed",
"via",
"options",
"and",
"return",
"list",
"of",
"patterns",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L530-L548
|
240,120
|
brbsix/subnuker
|
subnuker.py
|
prep_regex
|
def prep_regex(patterns):
"""Compile regex patterns."""
flags = 0 if Config.options.case_sensitive else re.I
return [re.compile(pattern, flags) for pattern in patterns]
|
python
|
def prep_regex(patterns):
"""Compile regex patterns."""
flags = 0 if Config.options.case_sensitive else re.I
return [re.compile(pattern, flags) for pattern in patterns]
|
[
"def",
"prep_regex",
"(",
"patterns",
")",
":",
"flags",
"=",
"0",
"if",
"Config",
".",
"options",
".",
"case_sensitive",
"else",
"re",
".",
"I",
"return",
"[",
"re",
".",
"compile",
"(",
"pattern",
",",
"flags",
")",
"for",
"pattern",
"in",
"patterns",
"]"
] |
Compile regex patterns.
|
[
"Compile",
"regex",
"patterns",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L551-L556
|
240,121
|
brbsix/subnuker
|
subnuker.py
|
prerequisites
|
def prerequisites():
"""Display information about obtaining the aeidon module."""
url = "http://home.gna.org/gaupol/download.html"
debian = "sudo apt-get install python3-aeidon"
other = "python3 setup.py --user --without-gaupol clean install"
LOGGER.error(
"The aeidon module is missing!\n\n"
"Try '{0}' or the appropriate command for your package manager.\n\n"
"You can also download the tarball for gaupol (which includes "
"aeidon) at {1}. After downloading, unpack and run '{2}'."
.format(debian, url, other))
|
python
|
def prerequisites():
"""Display information about obtaining the aeidon module."""
url = "http://home.gna.org/gaupol/download.html"
debian = "sudo apt-get install python3-aeidon"
other = "python3 setup.py --user --without-gaupol clean install"
LOGGER.error(
"The aeidon module is missing!\n\n"
"Try '{0}' or the appropriate command for your package manager.\n\n"
"You can also download the tarball for gaupol (which includes "
"aeidon) at {1}. After downloading, unpack and run '{2}'."
.format(debian, url, other))
|
[
"def",
"prerequisites",
"(",
")",
":",
"url",
"=",
"\"http://home.gna.org/gaupol/download.html\"",
"debian",
"=",
"\"sudo apt-get install python3-aeidon\"",
"other",
"=",
"\"python3 setup.py --user --without-gaupol clean install\"",
"LOGGER",
".",
"error",
"(",
"\"The aeidon module is missing!\\n\\n\"",
"\"Try '{0}' or the appropriate command for your package manager.\\n\\n\"",
"\"You can also download the tarball for gaupol (which includes \"",
"\"aeidon) at {1}. After downloading, unpack and run '{2}'.\"",
".",
"format",
"(",
"debian",
",",
"url",
",",
"other",
")",
")"
] |
Display information about obtaining the aeidon module.
|
[
"Display",
"information",
"about",
"obtaining",
"the",
"aeidon",
"module",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L559-L571
|
240,122
|
brbsix/subnuker
|
subnuker.py
|
remove_elements
|
def remove_elements(target, indices):
"""Remove multiple elements from a list and return result.
This implementation is faster than the alternative below.
Also note the creation of a new list to avoid altering the
original. We don't have any current use for the original
intact list, but may in the future..."""
copied = list(target)
for index in reversed(indices):
del copied[index]
return copied
|
python
|
def remove_elements(target, indices):
"""Remove multiple elements from a list and return result.
This implementation is faster than the alternative below.
Also note the creation of a new list to avoid altering the
original. We don't have any current use for the original
intact list, but may in the future..."""
copied = list(target)
for index in reversed(indices):
del copied[index]
return copied
|
[
"def",
"remove_elements",
"(",
"target",
",",
"indices",
")",
":",
"copied",
"=",
"list",
"(",
"target",
")",
"for",
"index",
"in",
"reversed",
"(",
"indices",
")",
":",
"del",
"copied",
"[",
"index",
"]",
"return",
"copied"
] |
Remove multiple elements from a list and return result.
This implementation is faster than the alternative below.
Also note the creation of a new list to avoid altering the
original. We don't have any current use for the original
intact list, but may in the future...
|
[
"Remove",
"multiple",
"elements",
"from",
"a",
"list",
"and",
"return",
"result",
".",
"This",
"implementation",
"is",
"faster",
"than",
"the",
"alternative",
"below",
".",
"Also",
"note",
"the",
"creation",
"of",
"a",
"new",
"list",
"to",
"avoid",
"altering",
"the",
"original",
".",
"We",
"don",
"t",
"have",
"any",
"current",
"use",
"for",
"the",
"original",
"intact",
"list",
"but",
"may",
"in",
"the",
"future",
"..."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L574-L585
|
240,123
|
brbsix/subnuker
|
subnuker.py
|
start_aeidon
|
def start_aeidon():
"""Prepare filenames and patterns then process subtitles with aeidon."""
extensions = ['ass', 'srt', 'ssa', 'sub']
Config.filenames = prep_files(Config.args, extensions)
Config.patterns = pattern_logic_aeidon()
for filename in Config.filenames:
AeidonProject(filename)
|
python
|
def start_aeidon():
"""Prepare filenames and patterns then process subtitles with aeidon."""
extensions = ['ass', 'srt', 'ssa', 'sub']
Config.filenames = prep_files(Config.args, extensions)
Config.patterns = pattern_logic_aeidon()
for filename in Config.filenames:
AeidonProject(filename)
|
[
"def",
"start_aeidon",
"(",
")",
":",
"extensions",
"=",
"[",
"'ass'",
",",
"'srt'",
",",
"'ssa'",
",",
"'sub'",
"]",
"Config",
".",
"filenames",
"=",
"prep_files",
"(",
"Config",
".",
"args",
",",
"extensions",
")",
"Config",
".",
"patterns",
"=",
"pattern_logic_aeidon",
"(",
")",
"for",
"filename",
"in",
"Config",
".",
"filenames",
":",
"AeidonProject",
"(",
"filename",
")"
] |
Prepare filenames and patterns then process subtitles with aeidon.
|
[
"Prepare",
"filenames",
"and",
"patterns",
"then",
"process",
"subtitles",
"with",
"aeidon",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L593-L602
|
240,124
|
brbsix/subnuker
|
subnuker.py
|
start_srt
|
def start_srt():
"""Prepare filenames and patterns then process srt subtitles."""
extensions = ['srt']
Config.filenames = prep_files(Config.args, extensions)
Config.patterns = pattern_logic_srt()
for filename in Config.filenames:
SrtProject(filename)
|
python
|
def start_srt():
"""Prepare filenames and patterns then process srt subtitles."""
extensions = ['srt']
Config.filenames = prep_files(Config.args, extensions)
Config.patterns = pattern_logic_srt()
for filename in Config.filenames:
SrtProject(filename)
|
[
"def",
"start_srt",
"(",
")",
":",
"extensions",
"=",
"[",
"'srt'",
"]",
"Config",
".",
"filenames",
"=",
"prep_files",
"(",
"Config",
".",
"args",
",",
"extensions",
")",
"Config",
".",
"patterns",
"=",
"pattern_logic_srt",
"(",
")",
"for",
"filename",
"in",
"Config",
".",
"filenames",
":",
"SrtProject",
"(",
"filename",
")"
] |
Prepare filenames and patterns then process srt subtitles.
|
[
"Prepare",
"filenames",
"and",
"patterns",
"then",
"process",
"srt",
"subtitles",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L605-L614
|
240,125
|
brbsix/subnuker
|
subnuker.py
|
AeidonProject.fixchars
|
def fixchars(self):
"""Replace characters or strings within subtitle file."""
for key in Config.CHARFIXES:
self.project.set_search_string(key)
self.project.set_search_replacement(Config.CHARFIXES[key])
self.project.replace_all()
|
python
|
def fixchars(self):
"""Replace characters or strings within subtitle file."""
for key in Config.CHARFIXES:
self.project.set_search_string(key)
self.project.set_search_replacement(Config.CHARFIXES[key])
self.project.replace_all()
|
[
"def",
"fixchars",
"(",
"self",
")",
":",
"for",
"key",
"in",
"Config",
".",
"CHARFIXES",
":",
"self",
".",
"project",
".",
"set_search_string",
"(",
"key",
")",
"self",
".",
"project",
".",
"set_search_replacement",
"(",
"Config",
".",
"CHARFIXES",
"[",
"key",
"]",
")",
"self",
".",
"project",
".",
"replace_all",
"(",
")"
] |
Replace characters or strings within subtitle file.
|
[
"Replace",
"characters",
"or",
"strings",
"within",
"subtitle",
"file",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L87-L92
|
240,126
|
brbsix/subnuker
|
subnuker.py
|
AeidonProject.open
|
def open(self):
"""Open the subtitle file into an Aeidon project."""
try:
self.project.open_main(self.filename)
except UnicodeDecodeError:
with open(self.filename, 'rb') as openfile:
encoding = get_encoding(openfile.read())
try:
self.project.open_main(self.filename, encoding)
except UnicodeDecodeError:
LOGGER.error("'%s' encountered a fatal encoding error",
self.filename)
sys.exit(1)
except: # pylint: disable=W0702
open_error(self.filename)
except: # pylint: disable=W0702
open_error(self.filename)
|
python
|
def open(self):
"""Open the subtitle file into an Aeidon project."""
try:
self.project.open_main(self.filename)
except UnicodeDecodeError:
with open(self.filename, 'rb') as openfile:
encoding = get_encoding(openfile.read())
try:
self.project.open_main(self.filename, encoding)
except UnicodeDecodeError:
LOGGER.error("'%s' encountered a fatal encoding error",
self.filename)
sys.exit(1)
except: # pylint: disable=W0702
open_error(self.filename)
except: # pylint: disable=W0702
open_error(self.filename)
|
[
"def",
"open",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"project",
".",
"open_main",
"(",
"self",
".",
"filename",
")",
"except",
"UnicodeDecodeError",
":",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"'rb'",
")",
"as",
"openfile",
":",
"encoding",
"=",
"get_encoding",
"(",
"openfile",
".",
"read",
"(",
")",
")",
"try",
":",
"self",
".",
"project",
".",
"open_main",
"(",
"self",
".",
"filename",
",",
"encoding",
")",
"except",
"UnicodeDecodeError",
":",
"LOGGER",
".",
"error",
"(",
"\"'%s' encountered a fatal encoding error\"",
",",
"self",
".",
"filename",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"except",
":",
"# pylint: disable=W0702",
"open_error",
"(",
"self",
".",
"filename",
")",
"except",
":",
"# pylint: disable=W0702",
"open_error",
"(",
"self",
".",
"filename",
")"
] |
Open the subtitle file into an Aeidon project.
|
[
"Open",
"the",
"subtitle",
"file",
"into",
"an",
"Aeidon",
"project",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L99-L117
|
240,127
|
brbsix/subnuker
|
subnuker.py
|
AeidonProject.save
|
def save(self):
"""Save subtitle file."""
try:
# ensure file is encoded properly while saving
self.project.main_file.encoding = 'utf_8'
self.project.save_main()
if self.fix:
LOGGER.info("Saved changes to '%s'", self.filename)
except: # pylint: disable=W0702
LOGGER.error("Unable to save '%s'", self.filename)
sys.exit(1)
|
python
|
def save(self):
"""Save subtitle file."""
try:
# ensure file is encoded properly while saving
self.project.main_file.encoding = 'utf_8'
self.project.save_main()
if self.fix:
LOGGER.info("Saved changes to '%s'", self.filename)
except: # pylint: disable=W0702
LOGGER.error("Unable to save '%s'", self.filename)
sys.exit(1)
|
[
"def",
"save",
"(",
"self",
")",
":",
"try",
":",
"# ensure file is encoded properly while saving",
"self",
".",
"project",
".",
"main_file",
".",
"encoding",
"=",
"'utf_8'",
"self",
".",
"project",
".",
"save_main",
"(",
")",
"if",
"self",
".",
"fix",
":",
"LOGGER",
".",
"info",
"(",
"\"Saved changes to '%s'\"",
",",
"self",
".",
"filename",
")",
"except",
":",
"# pylint: disable=W0702",
"LOGGER",
".",
"error",
"(",
"\"Unable to save '%s'\"",
",",
"self",
".",
"filename",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Save subtitle file.
|
[
"Save",
"subtitle",
"file",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L146-L156
|
240,128
|
brbsix/subnuker
|
subnuker.py
|
AeidonProject.search
|
def search(self):
"""Search srt in project for cells matching list of terms."""
matches = []
for pattern in Config.patterns:
matches += self.termfinder(pattern)
return sorted(set(matches), key=int)
|
python
|
def search(self):
"""Search srt in project for cells matching list of terms."""
matches = []
for pattern in Config.patterns:
matches += self.termfinder(pattern)
return sorted(set(matches), key=int)
|
[
"def",
"search",
"(",
"self",
")",
":",
"matches",
"=",
"[",
"]",
"for",
"pattern",
"in",
"Config",
".",
"patterns",
":",
"matches",
"+=",
"self",
".",
"termfinder",
"(",
"pattern",
")",
"return",
"sorted",
"(",
"set",
"(",
"matches",
")",
",",
"key",
"=",
"int",
")"
] |
Search srt in project for cells matching list of terms.
|
[
"Search",
"srt",
"in",
"project",
"for",
"cells",
"matching",
"list",
"of",
"terms",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L158-L165
|
240,129
|
brbsix/subnuker
|
subnuker.py
|
AeidonProject.termfinder
|
def termfinder(self, pattern):
"""Search srt in project for cells matching term."""
if Config.options.regex:
flags = re.M | re.S | \
(0 if Config.options.case_sensitive else re.I)
self.project.set_search_regex(
pattern, flags=flags)
else:
self.project.set_search_string(
pattern, ignore_case=not Config.options.case_sensitive)
matches = []
while True:
try:
if matches:
last = matches[-1]
new = self.project.find_next(last + 1)[0]
if new != last and new > last:
matches.append(new)
else:
break
else:
matches.append(self.project.find_next()[0])
except StopIteration:
break
return matches
|
python
|
def termfinder(self, pattern):
"""Search srt in project for cells matching term."""
if Config.options.regex:
flags = re.M | re.S | \
(0 if Config.options.case_sensitive else re.I)
self.project.set_search_regex(
pattern, flags=flags)
else:
self.project.set_search_string(
pattern, ignore_case=not Config.options.case_sensitive)
matches = []
while True:
try:
if matches:
last = matches[-1]
new = self.project.find_next(last + 1)[0]
if new != last and new > last:
matches.append(new)
else:
break
else:
matches.append(self.project.find_next()[0])
except StopIteration:
break
return matches
|
[
"def",
"termfinder",
"(",
"self",
",",
"pattern",
")",
":",
"if",
"Config",
".",
"options",
".",
"regex",
":",
"flags",
"=",
"re",
".",
"M",
"|",
"re",
".",
"S",
"|",
"(",
"0",
"if",
"Config",
".",
"options",
".",
"case_sensitive",
"else",
"re",
".",
"I",
")",
"self",
".",
"project",
".",
"set_search_regex",
"(",
"pattern",
",",
"flags",
"=",
"flags",
")",
"else",
":",
"self",
".",
"project",
".",
"set_search_string",
"(",
"pattern",
",",
"ignore_case",
"=",
"not",
"Config",
".",
"options",
".",
"case_sensitive",
")",
"matches",
"=",
"[",
"]",
"while",
"True",
":",
"try",
":",
"if",
"matches",
":",
"last",
"=",
"matches",
"[",
"-",
"1",
"]",
"new",
"=",
"self",
".",
"project",
".",
"find_next",
"(",
"last",
"+",
"1",
")",
"[",
"0",
"]",
"if",
"new",
"!=",
"last",
"and",
"new",
">",
"last",
":",
"matches",
".",
"append",
"(",
"new",
")",
"else",
":",
"break",
"else",
":",
"matches",
".",
"append",
"(",
"self",
".",
"project",
".",
"find_next",
"(",
")",
"[",
"0",
"]",
")",
"except",
"StopIteration",
":",
"break",
"return",
"matches"
] |
Search srt in project for cells matching term.
|
[
"Search",
"srt",
"in",
"project",
"for",
"cells",
"matching",
"term",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L167-L195
|
240,130
|
brbsix/subnuker
|
subnuker.py
|
SrtProject.fixchars
|
def fixchars(self, text):
"""Find and replace problematic characters."""
keys = ''.join(Config.CHARFIXES.keys())
values = ''.join(Config.CHARFIXES.values())
fixed = text.translate(str.maketrans(keys, values))
if fixed != text:
self.modified = True
return fixed
|
python
|
def fixchars(self, text):
"""Find and replace problematic characters."""
keys = ''.join(Config.CHARFIXES.keys())
values = ''.join(Config.CHARFIXES.values())
fixed = text.translate(str.maketrans(keys, values))
if fixed != text:
self.modified = True
return fixed
|
[
"def",
"fixchars",
"(",
"self",
",",
"text",
")",
":",
"keys",
"=",
"''",
".",
"join",
"(",
"Config",
".",
"CHARFIXES",
".",
"keys",
"(",
")",
")",
"values",
"=",
"''",
".",
"join",
"(",
"Config",
".",
"CHARFIXES",
".",
"values",
"(",
")",
")",
"fixed",
"=",
"text",
".",
"translate",
"(",
"str",
".",
"maketrans",
"(",
"keys",
",",
"values",
")",
")",
"if",
"fixed",
"!=",
"text",
":",
"self",
".",
"modified",
"=",
"True",
"return",
"fixed"
] |
Find and replace problematic characters.
|
[
"Find",
"and",
"replace",
"problematic",
"characters",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L229-L236
|
240,131
|
brbsix/subnuker
|
subnuker.py
|
SrtProject.prompt
|
def prompt(self, matches):
"""Prompt user to remove cells from subtitle file."""
if Config.options.autoyes:
return matches
deletions = []
for match in matches:
os.system('clear')
print(self.cells[match])
print('----------------------------------------')
print("Delete cell %s of '%s'?" % (str(match + 1), self.filename))
response = getch().lower()
if response == 'y':
os.system('clear')
deletions.append(match)
elif response == 'n':
os.system('clear')
else:
if deletions or self.modified:
LOGGER.warning("Not saving changes made to '%s'",
self.filename)
sys.exit(0)
return deletions
|
python
|
def prompt(self, matches):
"""Prompt user to remove cells from subtitle file."""
if Config.options.autoyes:
return matches
deletions = []
for match in matches:
os.system('clear')
print(self.cells[match])
print('----------------------------------------')
print("Delete cell %s of '%s'?" % (str(match + 1), self.filename))
response = getch().lower()
if response == 'y':
os.system('clear')
deletions.append(match)
elif response == 'n':
os.system('clear')
else:
if deletions or self.modified:
LOGGER.warning("Not saving changes made to '%s'",
self.filename)
sys.exit(0)
return deletions
|
[
"def",
"prompt",
"(",
"self",
",",
"matches",
")",
":",
"if",
"Config",
".",
"options",
".",
"autoyes",
":",
"return",
"matches",
"deletions",
"=",
"[",
"]",
"for",
"match",
"in",
"matches",
":",
"os",
".",
"system",
"(",
"'clear'",
")",
"print",
"(",
"self",
".",
"cells",
"[",
"match",
"]",
")",
"print",
"(",
"'----------------------------------------'",
")",
"print",
"(",
"\"Delete cell %s of '%s'?\"",
"%",
"(",
"str",
"(",
"match",
"+",
"1",
")",
",",
"self",
".",
"filename",
")",
")",
"response",
"=",
"getch",
"(",
")",
".",
"lower",
"(",
")",
"if",
"response",
"==",
"'y'",
":",
"os",
".",
"system",
"(",
"'clear'",
")",
"deletions",
".",
"append",
"(",
"match",
")",
"elif",
"response",
"==",
"'n'",
":",
"os",
".",
"system",
"(",
"'clear'",
")",
"else",
":",
"if",
"deletions",
"or",
"self",
".",
"modified",
":",
"LOGGER",
".",
"warning",
"(",
"\"Not saving changes made to '%s'\"",
",",
"self",
".",
"filename",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"return",
"deletions"
] |
Prompt user to remove cells from subtitle file.
|
[
"Prompt",
"user",
"to",
"remove",
"cells",
"from",
"subtitle",
"file",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L256-L281
|
240,132
|
brbsix/subnuker
|
subnuker.py
|
SrtProject.renumber
|
def renumber(self):
"""Re-number cells."""
num = 0
for cell in self.cells:
cell_split = cell.splitlines()
if len(cell_split) >= 2:
num += 1
cell_split[0] = str(num)
yield '\n'.join(cell_split)
|
python
|
def renumber(self):
"""Re-number cells."""
num = 0
for cell in self.cells:
cell_split = cell.splitlines()
if len(cell_split) >= 2:
num += 1
cell_split[0] = str(num)
yield '\n'.join(cell_split)
|
[
"def",
"renumber",
"(",
"self",
")",
":",
"num",
"=",
"0",
"for",
"cell",
"in",
"self",
".",
"cells",
":",
"cell_split",
"=",
"cell",
".",
"splitlines",
"(",
")",
"if",
"len",
"(",
"cell_split",
")",
">=",
"2",
":",
"num",
"+=",
"1",
"cell_split",
"[",
"0",
"]",
"=",
"str",
"(",
"num",
")",
"yield",
"'\\n'",
".",
"join",
"(",
"cell_split",
")"
] |
Re-number cells.
|
[
"Re",
"-",
"number",
"cells",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L283-L292
|
240,133
|
brbsix/subnuker
|
subnuker.py
|
SrtProject.save
|
def save(self):
"""Format and save cells."""
# re-number cells
self.cells = list(self.renumber())
# add a newline to the last line if necessary
if not self.cells[-1].endswith('\n'):
self.cells[-1] += '\n'
# save the rejoined the list of cells
with open(self.filename, 'w') as file_open:
file_open.write('\n\n'.join(self.cells))
|
python
|
def save(self):
"""Format and save cells."""
# re-number cells
self.cells = list(self.renumber())
# add a newline to the last line if necessary
if not self.cells[-1].endswith('\n'):
self.cells[-1] += '\n'
# save the rejoined the list of cells
with open(self.filename, 'w') as file_open:
file_open.write('\n\n'.join(self.cells))
|
[
"def",
"save",
"(",
"self",
")",
":",
"# re-number cells",
"self",
".",
"cells",
"=",
"list",
"(",
"self",
".",
"renumber",
"(",
")",
")",
"# add a newline to the last line if necessary",
"if",
"not",
"self",
".",
"cells",
"[",
"-",
"1",
"]",
".",
"endswith",
"(",
"'\\n'",
")",
":",
"self",
".",
"cells",
"[",
"-",
"1",
"]",
"+=",
"'\\n'",
"# save the rejoined the list of cells",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"'w'",
")",
"as",
"file_open",
":",
"file_open",
".",
"write",
"(",
"'\\n\\n'",
".",
"join",
"(",
"self",
".",
"cells",
")",
")"
] |
Format and save cells.
|
[
"Format",
"and",
"save",
"cells",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L294-L306
|
240,134
|
brbsix/subnuker
|
subnuker.py
|
SrtProject.search
|
def search(self):
"""Return list of cells to be removed."""
matches = []
for index, cell in enumerate(self.cells):
for pattern in Config.patterns:
if ismatch(cell, pattern):
matches.append(index)
break
return matches
|
python
|
def search(self):
"""Return list of cells to be removed."""
matches = []
for index, cell in enumerate(self.cells):
for pattern in Config.patterns:
if ismatch(cell, pattern):
matches.append(index)
break
return matches
|
[
"def",
"search",
"(",
"self",
")",
":",
"matches",
"=",
"[",
"]",
"for",
"index",
",",
"cell",
"in",
"enumerate",
"(",
"self",
".",
"cells",
")",
":",
"for",
"pattern",
"in",
"Config",
".",
"patterns",
":",
"if",
"ismatch",
"(",
"cell",
",",
"pattern",
")",
":",
"matches",
".",
"append",
"(",
"index",
")",
"break",
"return",
"matches"
] |
Return list of cells to be removed.
|
[
"Return",
"list",
"of",
"cells",
"to",
"be",
"removed",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L308-L318
|
240,135
|
brbsix/subnuker
|
subnuker.py
|
SrtProject.split
|
def split(self, text):
"""Split text into a list of cells."""
import re
if re.search('\n\n', text):
return text.split('\n\n')
elif re.search('\r\n\r\n', text):
return text.split('\r\n\r\n')
else:
LOGGER.error("'%s' does not appear to be a 'srt' subtitle file",
self.filename)
sys.exit(1)
|
python
|
def split(self, text):
"""Split text into a list of cells."""
import re
if re.search('\n\n', text):
return text.split('\n\n')
elif re.search('\r\n\r\n', text):
return text.split('\r\n\r\n')
else:
LOGGER.error("'%s' does not appear to be a 'srt' subtitle file",
self.filename)
sys.exit(1)
|
[
"def",
"split",
"(",
"self",
",",
"text",
")",
":",
"import",
"re",
"if",
"re",
".",
"search",
"(",
"'\\n\\n'",
",",
"text",
")",
":",
"return",
"text",
".",
"split",
"(",
"'\\n\\n'",
")",
"elif",
"re",
".",
"search",
"(",
"'\\r\\n\\r\\n'",
",",
"text",
")",
":",
"return",
"text",
".",
"split",
"(",
"'\\r\\n\\r\\n'",
")",
"else",
":",
"LOGGER",
".",
"error",
"(",
"\"'%s' does not appear to be a 'srt' subtitle file\"",
",",
"self",
".",
"filename",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Split text into a list of cells.
|
[
"Split",
"text",
"into",
"a",
"list",
"of",
"cells",
"."
] |
a94260a6e84b790a9e39e0b1793443ffd4e1f496
|
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L320-L331
|
240,136
|
sunlightlabs/django-locksmith
|
locksmith/hub/dataviews.py
|
keys
|
def keys(request):
"""Lists API keys. Compatible with jQuery DataTables."""
iDisplayStart = parse_int_param(request, 'iDisplayStart')
iDisplayLength = parse_int_param(request, 'iDisplayLength')
sEcho = parse_int_param(request, 'sEcho')
iSortCol_0 = parse_int_param(request, 'iSortCol_0')
sSortDir_0 = request.GET.get('sSortDir_0', 'asc')
sSearch = request.GET.get('sSearch')
columns = ['key', 'email', 'calls', 'latest_call', 'issued_on']
qry = Key.objects
if sSearch not in (None, ''):
qry = qry.filter(Q(key__icontains=sSearch)
| Q(email__icontains=sSearch)
| Q(name__icontains=sSearch)
| Q(org_name__icontains=sSearch)
| Q(org_url__icontains=sSearch))
qry = qry.values('key', 'email', 'issued_on').annotate(calls=Sum('reports__calls'),
latest_call=Max('reports__date'))
qry = qry.filter(calls__isnull=False)
qry = exclude_internal_keys(qry)
# TODO: Add multi-column sorting
if iSortCol_0 not in (None, ''):
sort_col_field = columns[iSortCol_0]
sort_spec = '{dir}{col}'.format(dir='-' if sSortDir_0 == 'desc' else '',
col=sort_col_field)
qry = qry.order_by(sort_spec)
result = {
'iTotalRecords': Key.objects.count(),
'iTotalDisplayRecords': qry.count(),
'sEcho': sEcho,
'aaData': [[k['key'],
'<a href="{0}">{1}</a>'.format(reverse('key_analytics', args=(k['key'], )), k['email']),
k['calls'],
k['latest_call'].isoformat(),
k['issued_on'].date().isoformat()]
for k in qry[iDisplayStart:iDisplayStart+iDisplayLength]]
}
return HttpResponse(content=json.dumps(result), status=200, content_type='application/json')
|
python
|
def keys(request):
"""Lists API keys. Compatible with jQuery DataTables."""
iDisplayStart = parse_int_param(request, 'iDisplayStart')
iDisplayLength = parse_int_param(request, 'iDisplayLength')
sEcho = parse_int_param(request, 'sEcho')
iSortCol_0 = parse_int_param(request, 'iSortCol_0')
sSortDir_0 = request.GET.get('sSortDir_0', 'asc')
sSearch = request.GET.get('sSearch')
columns = ['key', 'email', 'calls', 'latest_call', 'issued_on']
qry = Key.objects
if sSearch not in (None, ''):
qry = qry.filter(Q(key__icontains=sSearch)
| Q(email__icontains=sSearch)
| Q(name__icontains=sSearch)
| Q(org_name__icontains=sSearch)
| Q(org_url__icontains=sSearch))
qry = qry.values('key', 'email', 'issued_on').annotate(calls=Sum('reports__calls'),
latest_call=Max('reports__date'))
qry = qry.filter(calls__isnull=False)
qry = exclude_internal_keys(qry)
# TODO: Add multi-column sorting
if iSortCol_0 not in (None, ''):
sort_col_field = columns[iSortCol_0]
sort_spec = '{dir}{col}'.format(dir='-' if sSortDir_0 == 'desc' else '',
col=sort_col_field)
qry = qry.order_by(sort_spec)
result = {
'iTotalRecords': Key.objects.count(),
'iTotalDisplayRecords': qry.count(),
'sEcho': sEcho,
'aaData': [[k['key'],
'<a href="{0}">{1}</a>'.format(reverse('key_analytics', args=(k['key'], )), k['email']),
k['calls'],
k['latest_call'].isoformat(),
k['issued_on'].date().isoformat()]
for k in qry[iDisplayStart:iDisplayStart+iDisplayLength]]
}
return HttpResponse(content=json.dumps(result), status=200, content_type='application/json')
|
[
"def",
"keys",
"(",
"request",
")",
":",
"iDisplayStart",
"=",
"parse_int_param",
"(",
"request",
",",
"'iDisplayStart'",
")",
"iDisplayLength",
"=",
"parse_int_param",
"(",
"request",
",",
"'iDisplayLength'",
")",
"sEcho",
"=",
"parse_int_param",
"(",
"request",
",",
"'sEcho'",
")",
"iSortCol_0",
"=",
"parse_int_param",
"(",
"request",
",",
"'iSortCol_0'",
")",
"sSortDir_0",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'sSortDir_0'",
",",
"'asc'",
")",
"sSearch",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'sSearch'",
")",
"columns",
"=",
"[",
"'key'",
",",
"'email'",
",",
"'calls'",
",",
"'latest_call'",
",",
"'issued_on'",
"]",
"qry",
"=",
"Key",
".",
"objects",
"if",
"sSearch",
"not",
"in",
"(",
"None",
",",
"''",
")",
":",
"qry",
"=",
"qry",
".",
"filter",
"(",
"Q",
"(",
"key__icontains",
"=",
"sSearch",
")",
"|",
"Q",
"(",
"email__icontains",
"=",
"sSearch",
")",
"|",
"Q",
"(",
"name__icontains",
"=",
"sSearch",
")",
"|",
"Q",
"(",
"org_name__icontains",
"=",
"sSearch",
")",
"|",
"Q",
"(",
"org_url__icontains",
"=",
"sSearch",
")",
")",
"qry",
"=",
"qry",
".",
"values",
"(",
"'key'",
",",
"'email'",
",",
"'issued_on'",
")",
".",
"annotate",
"(",
"calls",
"=",
"Sum",
"(",
"'reports__calls'",
")",
",",
"latest_call",
"=",
"Max",
"(",
"'reports__date'",
")",
")",
"qry",
"=",
"qry",
".",
"filter",
"(",
"calls__isnull",
"=",
"False",
")",
"qry",
"=",
"exclude_internal_keys",
"(",
"qry",
")",
"# TODO: Add multi-column sorting",
"if",
"iSortCol_0",
"not",
"in",
"(",
"None",
",",
"''",
")",
":",
"sort_col_field",
"=",
"columns",
"[",
"iSortCol_0",
"]",
"sort_spec",
"=",
"'{dir}{col}'",
".",
"format",
"(",
"dir",
"=",
"'-'",
"if",
"sSortDir_0",
"==",
"'desc'",
"else",
"''",
",",
"col",
"=",
"sort_col_field",
")",
"qry",
"=",
"qry",
".",
"order_by",
"(",
"sort_spec",
")",
"result",
"=",
"{",
"'iTotalRecords'",
":",
"Key",
".",
"objects",
".",
"count",
"(",
")",
",",
"'iTotalDisplayRecords'",
":",
"qry",
".",
"count",
"(",
")",
",",
"'sEcho'",
":",
"sEcho",
",",
"'aaData'",
":",
"[",
"[",
"k",
"[",
"'key'",
"]",
",",
"'<a href=\"{0}\">{1}</a>'",
".",
"format",
"(",
"reverse",
"(",
"'key_analytics'",
",",
"args",
"=",
"(",
"k",
"[",
"'key'",
"]",
",",
")",
")",
",",
"k",
"[",
"'email'",
"]",
")",
",",
"k",
"[",
"'calls'",
"]",
",",
"k",
"[",
"'latest_call'",
"]",
".",
"isoformat",
"(",
")",
",",
"k",
"[",
"'issued_on'",
"]",
".",
"date",
"(",
")",
".",
"isoformat",
"(",
")",
"]",
"for",
"k",
"in",
"qry",
"[",
"iDisplayStart",
":",
"iDisplayStart",
"+",
"iDisplayLength",
"]",
"]",
"}",
"return",
"HttpResponse",
"(",
"content",
"=",
"json",
".",
"dumps",
"(",
"result",
")",
",",
"status",
"=",
"200",
",",
"content_type",
"=",
"'application/json'",
")"
] |
Lists API keys. Compatible with jQuery DataTables.
|
[
"Lists",
"API",
"keys",
".",
"Compatible",
"with",
"jQuery",
"DataTables",
"."
] |
eef5b7c25404560aaad50b6e622594f89239b74b
|
https://github.com/sunlightlabs/django-locksmith/blob/eef5b7c25404560aaad50b6e622594f89239b74b/locksmith/hub/dataviews.py#L468-L507
|
240,137
|
MacHu-GWU/angora-project
|
angora/algorithm/iterable.py
|
grouper_dict
|
def grouper_dict(d, n):
"""Evenly divide dictionary into fixed-length piece, no filled value if
chunk size smaller than fixed-length.
Usage::
>>> list(grouper_dict({1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E',
6: 'F', 7: 'G', 8: 'H', 9: 'I', 10: 'J'}))
[{1: 'A', 2: 'B', 3: 'C'}, {4: 'D', 5: 'E', 6: 'F'},
{7: 'G', 8: 'H', 9: 'I'}, {10: 'J'}]
"""
chunk = dict()
counter = 0
for k, v in d.items():
counter += 1
chunk[k] = v
print(counter ,chunk)
if counter == n:
yield chunk
chunk = dict()
counter = 0
if len(chunk) > 0:
yield chunk
|
python
|
def grouper_dict(d, n):
"""Evenly divide dictionary into fixed-length piece, no filled value if
chunk size smaller than fixed-length.
Usage::
>>> list(grouper_dict({1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E',
6: 'F', 7: 'G', 8: 'H', 9: 'I', 10: 'J'}))
[{1: 'A', 2: 'B', 3: 'C'}, {4: 'D', 5: 'E', 6: 'F'},
{7: 'G', 8: 'H', 9: 'I'}, {10: 'J'}]
"""
chunk = dict()
counter = 0
for k, v in d.items():
counter += 1
chunk[k] = v
print(counter ,chunk)
if counter == n:
yield chunk
chunk = dict()
counter = 0
if len(chunk) > 0:
yield chunk
|
[
"def",
"grouper_dict",
"(",
"d",
",",
"n",
")",
":",
"chunk",
"=",
"dict",
"(",
")",
"counter",
"=",
"0",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"counter",
"+=",
"1",
"chunk",
"[",
"k",
"]",
"=",
"v",
"print",
"(",
"counter",
",",
"chunk",
")",
"if",
"counter",
"==",
"n",
":",
"yield",
"chunk",
"chunk",
"=",
"dict",
"(",
")",
"counter",
"=",
"0",
"if",
"len",
"(",
"chunk",
")",
">",
"0",
":",
"yield",
"chunk"
] |
Evenly divide dictionary into fixed-length piece, no filled value if
chunk size smaller than fixed-length.
Usage::
>>> list(grouper_dict({1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E',
6: 'F', 7: 'G', 8: 'H', 9: 'I', 10: 'J'}))
[{1: 'A', 2: 'B', 3: 'C'}, {4: 'D', 5: 'E', 6: 'F'},
{7: 'G', 8: 'H', 9: 'I'}, {10: 'J'}]
|
[
"Evenly",
"divide",
"dictionary",
"into",
"fixed",
"-",
"length",
"piece",
"no",
"filled",
"value",
"if",
"chunk",
"size",
"smaller",
"than",
"fixed",
"-",
"length",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/algorithm/iterable.py#L169-L191
|
240,138
|
MacHu-GWU/angora-project
|
angora/algorithm/iterable.py
|
running_windows
|
def running_windows(iterable, size):
"""Generate n-size running windows.
Usage::
>>> for i in running_windows([1, 2, 3, 4, 5], size=3):
... print(i)
[1, 2, 3]
[2, 3, 4]
[3, 4, 5]
"""
fifo = collections.deque(maxlen=size)
for i in iterable:
fifo.append(i)
if len(fifo) == size:
yield list(fifo)
|
python
|
def running_windows(iterable, size):
"""Generate n-size running windows.
Usage::
>>> for i in running_windows([1, 2, 3, 4, 5], size=3):
... print(i)
[1, 2, 3]
[2, 3, 4]
[3, 4, 5]
"""
fifo = collections.deque(maxlen=size)
for i in iterable:
fifo.append(i)
if len(fifo) == size:
yield list(fifo)
|
[
"def",
"running_windows",
"(",
"iterable",
",",
"size",
")",
":",
"fifo",
"=",
"collections",
".",
"deque",
"(",
"maxlen",
"=",
"size",
")",
"for",
"i",
"in",
"iterable",
":",
"fifo",
".",
"append",
"(",
"i",
")",
"if",
"len",
"(",
"fifo",
")",
"==",
"size",
":",
"yield",
"list",
"(",
"fifo",
")"
] |
Generate n-size running windows.
Usage::
>>> for i in running_windows([1, 2, 3, 4, 5], size=3):
... print(i)
[1, 2, 3]
[2, 3, 4]
[3, 4, 5]
|
[
"Generate",
"n",
"-",
"size",
"running",
"windows",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/algorithm/iterable.py#L194-L209
|
240,139
|
MacHu-GWU/angora-project
|
angora/algorithm/iterable.py
|
shift_to_the_left
|
def shift_to_the_left(array, dist, pad=True, trim=True):
"""Shift array to the left.
:param array: An iterable object.
:type array: iterable object
:param dist: how far you want to shift
:type disk: int
:param pad: pad array[-1] to the right.
:type pad: boolean (default True)
:param trim: trim the first ``#dist`` items.
:type trim: boolean (default True)
Usage::
>>> array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=True)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=False)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=False, trim=True)
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=True)
Warning, with pad=False and trim=False, no change applied.
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
if dist < 0:
raise ValueError("Shift distance has to greater or equal than 0.")
if pad:
if trim:
new_array = array[dist:] + [array[-1]] * dist
else:
new_array = array + [array[-1]] * dist
else:
if trim:
new_array = array[dist:]
else:
print("Warning, with pad=False and trim=False, no change applied.")
new_array = list(array)
return new_array
|
python
|
def shift_to_the_left(array, dist, pad=True, trim=True):
"""Shift array to the left.
:param array: An iterable object.
:type array: iterable object
:param dist: how far you want to shift
:type disk: int
:param pad: pad array[-1] to the right.
:type pad: boolean (default True)
:param trim: trim the first ``#dist`` items.
:type trim: boolean (default True)
Usage::
>>> array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=True)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=False)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=False, trim=True)
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=True)
Warning, with pad=False and trim=False, no change applied.
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
if dist < 0:
raise ValueError("Shift distance has to greater or equal than 0.")
if pad:
if trim:
new_array = array[dist:] + [array[-1]] * dist
else:
new_array = array + [array[-1]] * dist
else:
if trim:
new_array = array[dist:]
else:
print("Warning, with pad=False and trim=False, no change applied.")
new_array = list(array)
return new_array
|
[
"def",
"shift_to_the_left",
"(",
"array",
",",
"dist",
",",
"pad",
"=",
"True",
",",
"trim",
"=",
"True",
")",
":",
"if",
"dist",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Shift distance has to greater or equal than 0.\"",
")",
"if",
"pad",
":",
"if",
"trim",
":",
"new_array",
"=",
"array",
"[",
"dist",
":",
"]",
"+",
"[",
"array",
"[",
"-",
"1",
"]",
"]",
"*",
"dist",
"else",
":",
"new_array",
"=",
"array",
"+",
"[",
"array",
"[",
"-",
"1",
"]",
"]",
"*",
"dist",
"else",
":",
"if",
"trim",
":",
"new_array",
"=",
"array",
"[",
"dist",
":",
"]",
"else",
":",
"print",
"(",
"\"Warning, with pad=False and trim=False, no change applied.\"",
")",
"new_array",
"=",
"list",
"(",
"array",
")",
"return",
"new_array"
] |
Shift array to the left.
:param array: An iterable object.
:type array: iterable object
:param dist: how far you want to shift
:type disk: int
:param pad: pad array[-1] to the right.
:type pad: boolean (default True)
:param trim: trim the first ``#dist`` items.
:type trim: boolean (default True)
Usage::
>>> array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=True)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=False)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=False, trim=True)
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=True)
Warning, with pad=False and trim=False, no change applied.
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
|
[
"Shift",
"array",
"to",
"the",
"left",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/algorithm/iterable.py#L274-L316
|
240,140
|
MacHu-GWU/angora-project
|
angora/algorithm/iterable.py
|
count_generator
|
def count_generator(generator, memory_efficient=True):
"""Count number of item in generator.
memory_efficient=True, 3 times slower, but memory_efficient.
memory_efficient=False, faster, but cost more memory.
"""
if memory_efficient:
counter = 0
for _ in generator:
counter += 1
return counter
else:
return len(list(generator))
|
python
|
def count_generator(generator, memory_efficient=True):
"""Count number of item in generator.
memory_efficient=True, 3 times slower, but memory_efficient.
memory_efficient=False, faster, but cost more memory.
"""
if memory_efficient:
counter = 0
for _ in generator:
counter += 1
return counter
else:
return len(list(generator))
|
[
"def",
"count_generator",
"(",
"generator",
",",
"memory_efficient",
"=",
"True",
")",
":",
"if",
"memory_efficient",
":",
"counter",
"=",
"0",
"for",
"_",
"in",
"generator",
":",
"counter",
"+=",
"1",
"return",
"counter",
"else",
":",
"return",
"len",
"(",
"list",
"(",
"generator",
")",
")"
] |
Count number of item in generator.
memory_efficient=True, 3 times slower, but memory_efficient.
memory_efficient=False, faster, but cost more memory.
|
[
"Count",
"number",
"of",
"item",
"in",
"generator",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/algorithm/iterable.py#L364-L376
|
240,141
|
MacHu-GWU/pyknackhq-project
|
pyknackhq/schema.py
|
Object.get_field_key
|
def get_field_key(self, key, using_name=True):
"""Given a field key or name, return it's field key.
"""
try:
if using_name:
return self.f_name[key].key
else:
return self.f[key].key
except KeyError:
raise ValueError("'%s' are not found!" % key)
|
python
|
def get_field_key(self, key, using_name=True):
"""Given a field key or name, return it's field key.
"""
try:
if using_name:
return self.f_name[key].key
else:
return self.f[key].key
except KeyError:
raise ValueError("'%s' are not found!" % key)
|
[
"def",
"get_field_key",
"(",
"self",
",",
"key",
",",
"using_name",
"=",
"True",
")",
":",
"try",
":",
"if",
"using_name",
":",
"return",
"self",
".",
"f_name",
"[",
"key",
"]",
".",
"key",
"else",
":",
"return",
"self",
".",
"f",
"[",
"key",
"]",
".",
"key",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"'%s' are not found!\"",
"%",
"key",
")"
] |
Given a field key or name, return it's field key.
|
[
"Given",
"a",
"field",
"key",
"or",
"name",
"return",
"it",
"s",
"field",
"key",
"."
] |
dd937f24d7b0a351ba3818eb746c31b29a8cc341
|
https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/schema.py#L80-L89
|
240,142
|
MacHu-GWU/pyknackhq-project
|
pyknackhq/schema.py
|
Application.get_object_key
|
def get_object_key(self, key, using_name=True):
"""Given a object key or name, return it's object key.
"""
try:
if using_name:
return self.o_name[key].key
else:
return self.o[key].key
except KeyError:
raise ValueError("'%s' are not found!" % key)
|
python
|
def get_object_key(self, key, using_name=True):
"""Given a object key or name, return it's object key.
"""
try:
if using_name:
return self.o_name[key].key
else:
return self.o[key].key
except KeyError:
raise ValueError("'%s' are not found!" % key)
|
[
"def",
"get_object_key",
"(",
"self",
",",
"key",
",",
"using_name",
"=",
"True",
")",
":",
"try",
":",
"if",
"using_name",
":",
"return",
"self",
".",
"o_name",
"[",
"key",
"]",
".",
"key",
"else",
":",
"return",
"self",
".",
"o",
"[",
"key",
"]",
".",
"key",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"'%s' are not found!\"",
"%",
"key",
")"
] |
Given a object key or name, return it's object key.
|
[
"Given",
"a",
"object",
"key",
"or",
"name",
"return",
"it",
"s",
"object",
"key",
"."
] |
dd937f24d7b0a351ba3818eb746c31b29a8cc341
|
https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/schema.py#L149-L158
|
240,143
|
MacHu-GWU/angora-project
|
angora/filesystem/winzip.py
|
zip_a_folder
|
def zip_a_folder(src, dst):
"""Add a folder and everything inside to zip archive.
Example::
|---paper
|--- algorithm.pdf
|--- images
|--- 1.jpg
zip_a_folder("paper", "paper.zip")
paper.zip
|---paper
|--- algorithm.pdf
|--- images
|--- 1.jpg
**中文文档**
将整个文件夹添加到压缩包, 包括根目录本身。
"""
src, dst = os.path.abspath(src), os.path.abspath(dst)
cwd = os.getcwd()
todo = list()
dirname, basename = os.path.split(src)
os.chdir(dirname)
for dirname, _, fnamelist in os.walk(basename):
for fname in fnamelist:
newname = os.path.join(dirname, fname)
todo.append(newname)
with ZipFile(dst, "w") as f:
for newname in todo:
f.write(newname)
os.chdir(cwd)
|
python
|
def zip_a_folder(src, dst):
"""Add a folder and everything inside to zip archive.
Example::
|---paper
|--- algorithm.pdf
|--- images
|--- 1.jpg
zip_a_folder("paper", "paper.zip")
paper.zip
|---paper
|--- algorithm.pdf
|--- images
|--- 1.jpg
**中文文档**
将整个文件夹添加到压缩包, 包括根目录本身。
"""
src, dst = os.path.abspath(src), os.path.abspath(dst)
cwd = os.getcwd()
todo = list()
dirname, basename = os.path.split(src)
os.chdir(dirname)
for dirname, _, fnamelist in os.walk(basename):
for fname in fnamelist:
newname = os.path.join(dirname, fname)
todo.append(newname)
with ZipFile(dst, "w") as f:
for newname in todo:
f.write(newname)
os.chdir(cwd)
|
[
"def",
"zip_a_folder",
"(",
"src",
",",
"dst",
")",
":",
"src",
",",
"dst",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"src",
")",
",",
"os",
".",
"path",
".",
"abspath",
"(",
"dst",
")",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"todo",
"=",
"list",
"(",
")",
"dirname",
",",
"basename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"src",
")",
"os",
".",
"chdir",
"(",
"dirname",
")",
"for",
"dirname",
",",
"_",
",",
"fnamelist",
"in",
"os",
".",
"walk",
"(",
"basename",
")",
":",
"for",
"fname",
"in",
"fnamelist",
":",
"newname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"fname",
")",
"todo",
".",
"append",
"(",
"newname",
")",
"with",
"ZipFile",
"(",
"dst",
",",
"\"w\"",
")",
"as",
"f",
":",
"for",
"newname",
"in",
"todo",
":",
"f",
".",
"write",
"(",
"newname",
")",
"os",
".",
"chdir",
"(",
"cwd",
")"
] |
Add a folder and everything inside to zip archive.
Example::
|---paper
|--- algorithm.pdf
|--- images
|--- 1.jpg
zip_a_folder("paper", "paper.zip")
paper.zip
|---paper
|--- algorithm.pdf
|--- images
|--- 1.jpg
**中文文档**
将整个文件夹添加到压缩包, 包括根目录本身。
|
[
"Add",
"a",
"folder",
"and",
"everything",
"inside",
"to",
"zip",
"archive",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/filesystem/winzip.py#L67-L104
|
240,144
|
MacHu-GWU/angora-project
|
angora/filesystem/winzip.py
|
zip_many_files
|
def zip_many_files(list_of_abspath, dst):
"""Add many files to a zip archive.
**中文文档**
将一系列的文件压缩到一个压缩包中, 若有重复的文件名, 在zip中保留所有的副本。
"""
base_dir = os.getcwd()
with ZipFile(dst, "w") as f:
for abspath in list_of_abspath:
dirname, basename = os.path.split(abspath)
os.chdir(dirname)
f.write(basename)
os.chdir(base_dir)
|
python
|
def zip_many_files(list_of_abspath, dst):
"""Add many files to a zip archive.
**中文文档**
将一系列的文件压缩到一个压缩包中, 若有重复的文件名, 在zip中保留所有的副本。
"""
base_dir = os.getcwd()
with ZipFile(dst, "w") as f:
for abspath in list_of_abspath:
dirname, basename = os.path.split(abspath)
os.chdir(dirname)
f.write(basename)
os.chdir(base_dir)
|
[
"def",
"zip_many_files",
"(",
"list_of_abspath",
",",
"dst",
")",
":",
"base_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"with",
"ZipFile",
"(",
"dst",
",",
"\"w\"",
")",
"as",
"f",
":",
"for",
"abspath",
"in",
"list_of_abspath",
":",
"dirname",
",",
"basename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"abspath",
")",
"os",
".",
"chdir",
"(",
"dirname",
")",
"f",
".",
"write",
"(",
"basename",
")",
"os",
".",
"chdir",
"(",
"base_dir",
")"
] |
Add many files to a zip archive.
**中文文档**
将一系列的文件压缩到一个压缩包中, 若有重复的文件名, 在zip中保留所有的副本。
|
[
"Add",
"many",
"files",
"to",
"a",
"zip",
"archive",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/filesystem/winzip.py#L145-L160
|
240,145
|
MacHu-GWU/angora-project
|
angora/filesystem/winzip.py
|
write_gzip
|
def write_gzip(content, abspath):
"""Write binary content to gzip file.
**中文文档**
将二进制内容压缩后编码写入gzip压缩文件。
"""
with gzip.open(abspath, "wb") as f:
f.write(content)
|
python
|
def write_gzip(content, abspath):
"""Write binary content to gzip file.
**中文文档**
将二进制内容压缩后编码写入gzip压缩文件。
"""
with gzip.open(abspath, "wb") as f:
f.write(content)
|
[
"def",
"write_gzip",
"(",
"content",
",",
"abspath",
")",
":",
"with",
"gzip",
".",
"open",
"(",
"abspath",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"content",
")"
] |
Write binary content to gzip file.
**中文文档**
将二进制内容压缩后编码写入gzip压缩文件。
|
[
"Write",
"binary",
"content",
"to",
"gzip",
"file",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/filesystem/winzip.py#L163-L171
|
240,146
|
klmitch/metatools
|
metatools.py
|
MetaMeta.iter_bases
|
def iter_bases(bases):
"""
Performs MRO linearization of a set of base classes. Yields
each base class in turn.
"""
sequences = ([list(inspect.getmro(base)) for base in bases] +
[list(bases)])
# Loop over sequences
while True:
sequences = [seq for seq in sequences if seq]
if not sequences:
return
# Select a good head
for seq in sequences:
head = seq[0]
tails = [seq for seq in sequences if head in seq[1:]]
if not tails:
break
else:
raise TypeError('Cannot create a consistent method '
'resolution order (MRO) for bases %s' %
', '.join([base.__name__ for base in bases]))
# Yield this base class
yield head
# Remove base class from all the other sequences
for seq in sequences:
if seq[0] == head:
del seq[0]
|
python
|
def iter_bases(bases):
"""
Performs MRO linearization of a set of base classes. Yields
each base class in turn.
"""
sequences = ([list(inspect.getmro(base)) for base in bases] +
[list(bases)])
# Loop over sequences
while True:
sequences = [seq for seq in sequences if seq]
if not sequences:
return
# Select a good head
for seq in sequences:
head = seq[0]
tails = [seq for seq in sequences if head in seq[1:]]
if not tails:
break
else:
raise TypeError('Cannot create a consistent method '
'resolution order (MRO) for bases %s' %
', '.join([base.__name__ for base in bases]))
# Yield this base class
yield head
# Remove base class from all the other sequences
for seq in sequences:
if seq[0] == head:
del seq[0]
|
[
"def",
"iter_bases",
"(",
"bases",
")",
":",
"sequences",
"=",
"(",
"[",
"list",
"(",
"inspect",
".",
"getmro",
"(",
"base",
")",
")",
"for",
"base",
"in",
"bases",
"]",
"+",
"[",
"list",
"(",
"bases",
")",
"]",
")",
"# Loop over sequences",
"while",
"True",
":",
"sequences",
"=",
"[",
"seq",
"for",
"seq",
"in",
"sequences",
"if",
"seq",
"]",
"if",
"not",
"sequences",
":",
"return",
"# Select a good head",
"for",
"seq",
"in",
"sequences",
":",
"head",
"=",
"seq",
"[",
"0",
"]",
"tails",
"=",
"[",
"seq",
"for",
"seq",
"in",
"sequences",
"if",
"head",
"in",
"seq",
"[",
"1",
":",
"]",
"]",
"if",
"not",
"tails",
":",
"break",
"else",
":",
"raise",
"TypeError",
"(",
"'Cannot create a consistent method '",
"'resolution order (MRO) for bases %s'",
"%",
"', '",
".",
"join",
"(",
"[",
"base",
".",
"__name__",
"for",
"base",
"in",
"bases",
"]",
")",
")",
"# Yield this base class",
"yield",
"head",
"# Remove base class from all the other sequences",
"for",
"seq",
"in",
"sequences",
":",
"if",
"seq",
"[",
"0",
"]",
"==",
"head",
":",
"del",
"seq",
"[",
"0",
"]"
] |
Performs MRO linearization of a set of base classes. Yields
each base class in turn.
|
[
"Performs",
"MRO",
"linearization",
"of",
"a",
"set",
"of",
"base",
"classes",
".",
"Yields",
"each",
"base",
"class",
"in",
"turn",
"."
] |
7161cf22ef2b194cfd4406e85b81e39a49104d9d
|
https://github.com/klmitch/metatools/blob/7161cf22ef2b194cfd4406e85b81e39a49104d9d/metatools.py#L53-L86
|
240,147
|
klmitch/metatools
|
metatools.py
|
MetaMeta.inherit_dict
|
def inherit_dict(base, namespace, attr_name,
inherit=lambda k, v: True):
"""
Perform inheritance of dictionaries. Returns a list of key
and value pairs for values that were inherited, for
post-processing.
:param base: The base class being considered; see
``iter_bases()``.
:param namespace: The dictionary of the new class being built.
:param attr_name: The name of the attribute containing the
dictionary to be inherited.
:param inherit: Filtering function to determine if a given
item should be inherited. If ``False`` or
``None``, item will not be added, but will be
included in the returned items. If a
function, the function will be called with the
key and value, and the item will be added and
included in the items list only if the
function returns ``True``. By default, all
items are added and included in the items
list.
"""
items = []
# Get the dicts to compare
base_dict = getattr(base, attr_name, {})
new_dict = namespace.setdefault(attr_name, {})
for key, value in base_dict.items():
# Skip keys that have been overridden or that we shouldn't
# inherit
if key in new_dict or (inherit and not inherit(key, value)):
continue
# Inherit the key
if inherit:
new_dict[key] = value
# Save the item for post-processing
items.append((key, value))
return items
|
python
|
def inherit_dict(base, namespace, attr_name,
inherit=lambda k, v: True):
"""
Perform inheritance of dictionaries. Returns a list of key
and value pairs for values that were inherited, for
post-processing.
:param base: The base class being considered; see
``iter_bases()``.
:param namespace: The dictionary of the new class being built.
:param attr_name: The name of the attribute containing the
dictionary to be inherited.
:param inherit: Filtering function to determine if a given
item should be inherited. If ``False`` or
``None``, item will not be added, but will be
included in the returned items. If a
function, the function will be called with the
key and value, and the item will be added and
included in the items list only if the
function returns ``True``. By default, all
items are added and included in the items
list.
"""
items = []
# Get the dicts to compare
base_dict = getattr(base, attr_name, {})
new_dict = namespace.setdefault(attr_name, {})
for key, value in base_dict.items():
# Skip keys that have been overridden or that we shouldn't
# inherit
if key in new_dict or (inherit and not inherit(key, value)):
continue
# Inherit the key
if inherit:
new_dict[key] = value
# Save the item for post-processing
items.append((key, value))
return items
|
[
"def",
"inherit_dict",
"(",
"base",
",",
"namespace",
",",
"attr_name",
",",
"inherit",
"=",
"lambda",
"k",
",",
"v",
":",
"True",
")",
":",
"items",
"=",
"[",
"]",
"# Get the dicts to compare",
"base_dict",
"=",
"getattr",
"(",
"base",
",",
"attr_name",
",",
"{",
"}",
")",
"new_dict",
"=",
"namespace",
".",
"setdefault",
"(",
"attr_name",
",",
"{",
"}",
")",
"for",
"key",
",",
"value",
"in",
"base_dict",
".",
"items",
"(",
")",
":",
"# Skip keys that have been overridden or that we shouldn't",
"# inherit",
"if",
"key",
"in",
"new_dict",
"or",
"(",
"inherit",
"and",
"not",
"inherit",
"(",
"key",
",",
"value",
")",
")",
":",
"continue",
"# Inherit the key",
"if",
"inherit",
":",
"new_dict",
"[",
"key",
"]",
"=",
"value",
"# Save the item for post-processing",
"items",
".",
"append",
"(",
"(",
"key",
",",
"value",
")",
")",
"return",
"items"
] |
Perform inheritance of dictionaries. Returns a list of key
and value pairs for values that were inherited, for
post-processing.
:param base: The base class being considered; see
``iter_bases()``.
:param namespace: The dictionary of the new class being built.
:param attr_name: The name of the attribute containing the
dictionary to be inherited.
:param inherit: Filtering function to determine if a given
item should be inherited. If ``False`` or
``None``, item will not be added, but will be
included in the returned items. If a
function, the function will be called with the
key and value, and the item will be added and
included in the items list only if the
function returns ``True``. By default, all
items are added and included in the items
list.
|
[
"Perform",
"inheritance",
"of",
"dictionaries",
".",
"Returns",
"a",
"list",
"of",
"key",
"and",
"value",
"pairs",
"for",
"values",
"that",
"were",
"inherited",
"for",
"post",
"-",
"processing",
"."
] |
7161cf22ef2b194cfd4406e85b81e39a49104d9d
|
https://github.com/klmitch/metatools/blob/7161cf22ef2b194cfd4406e85b81e39a49104d9d/metatools.py#L89-L131
|
240,148
|
klmitch/metatools
|
metatools.py
|
MetaMeta.inherit_set
|
def inherit_set(base, namespace, attr_name,
inherit=lambda i: True):
"""
Perform inheritance of sets. Returns a list of items that
were inherited, for post-processing.
:param base: The base class being considered; see
``iter_bases()``.
:param namespace: The dictionary of the new class being built.
:param attr_name: The name of the attribute containing the set
to be inherited.
:param inherit: Filtering function to determine if a given
item should be inherited. If ``False`` or
``None``, item will not be added, but will be
included in the returned items. If a
function, the function will be called with the
item, and the item will be added and included
in the items list only if the function returns
``True``. By default, all items are added and
included in the items list.
"""
items = []
# Get the sets to compare
base_set = getattr(base, attr_name, set())
new_set = namespace.setdefault(attr_name, set())
for item in base_set:
# Skip items that have been overridden or that we
# shouldn't inherit
if item in new_set or (inherit and not inherit(item)):
continue
# Inherit the item
if inherit:
new_set.add(item)
items.append(item)
return items
|
python
|
def inherit_set(base, namespace, attr_name,
inherit=lambda i: True):
"""
Perform inheritance of sets. Returns a list of items that
were inherited, for post-processing.
:param base: The base class being considered; see
``iter_bases()``.
:param namespace: The dictionary of the new class being built.
:param attr_name: The name of the attribute containing the set
to be inherited.
:param inherit: Filtering function to determine if a given
item should be inherited. If ``False`` or
``None``, item will not be added, but will be
included in the returned items. If a
function, the function will be called with the
item, and the item will be added and included
in the items list only if the function returns
``True``. By default, all items are added and
included in the items list.
"""
items = []
# Get the sets to compare
base_set = getattr(base, attr_name, set())
new_set = namespace.setdefault(attr_name, set())
for item in base_set:
# Skip items that have been overridden or that we
# shouldn't inherit
if item in new_set or (inherit and not inherit(item)):
continue
# Inherit the item
if inherit:
new_set.add(item)
items.append(item)
return items
|
[
"def",
"inherit_set",
"(",
"base",
",",
"namespace",
",",
"attr_name",
",",
"inherit",
"=",
"lambda",
"i",
":",
"True",
")",
":",
"items",
"=",
"[",
"]",
"# Get the sets to compare",
"base_set",
"=",
"getattr",
"(",
"base",
",",
"attr_name",
",",
"set",
"(",
")",
")",
"new_set",
"=",
"namespace",
".",
"setdefault",
"(",
"attr_name",
",",
"set",
"(",
")",
")",
"for",
"item",
"in",
"base_set",
":",
"# Skip items that have been overridden or that we",
"# shouldn't inherit",
"if",
"item",
"in",
"new_set",
"or",
"(",
"inherit",
"and",
"not",
"inherit",
"(",
"item",
")",
")",
":",
"continue",
"# Inherit the item",
"if",
"inherit",
":",
"new_set",
".",
"add",
"(",
"item",
")",
"items",
".",
"append",
"(",
"item",
")",
"return",
"items"
] |
Perform inheritance of sets. Returns a list of items that
were inherited, for post-processing.
:param base: The base class being considered; see
``iter_bases()``.
:param namespace: The dictionary of the new class being built.
:param attr_name: The name of the attribute containing the set
to be inherited.
:param inherit: Filtering function to determine if a given
item should be inherited. If ``False`` or
``None``, item will not be added, but will be
included in the returned items. If a
function, the function will be called with the
item, and the item will be added and included
in the items list only if the function returns
``True``. By default, all items are added and
included in the items list.
|
[
"Perform",
"inheritance",
"of",
"sets",
".",
"Returns",
"a",
"list",
"of",
"items",
"that",
"were",
"inherited",
"for",
"post",
"-",
"processing",
"."
] |
7161cf22ef2b194cfd4406e85b81e39a49104d9d
|
https://github.com/klmitch/metatools/blob/7161cf22ef2b194cfd4406e85b81e39a49104d9d/metatools.py#L134-L173
|
240,149
|
EnigmaBridge/client.py
|
ebclient/process_data.py
|
ProcessData.decrypt_result
|
def decrypt_result(self, *args, **kwargs):
"""
Decrypts ProcessData result with comm keys
:param args:
:param kwargs:
:return:
"""
if self.response is None:
raise ValueError('Empty response')
if self.response.response is None \
or 'result' not in self.response.response \
or self.response.response['result'] is None:
raise ValueError('No result data')
res_hex = self.response.response['result']
# Strip out the plaintext part
plain_length = bytes_to_long(from_hex(res_hex[0:4]))
if plain_length > 0:
res_hex = res_hex[4 + plain_length:]
else:
res_hex = res_hex[4:]
# Optionally strip trailing _... string
idx_trail = res_hex.find('_')
if idx_trail != -1:
res_hex = res_hex[0:idx_trail]
# Decode hex coding
res_bytes = from_hex(res_hex)
# Crypto stuff - check the length & padding
if len(res_bytes) < 16:
raise InvalidResponse('Result too short')
mac_given = res_bytes[-16:]
res_bytes = res_bytes[:-16]
# Check the MAC
mac_computed = cbc_mac(self.uo.mac_key, res_bytes)
if not str_equals(mac_given, mac_computed):
raise CryptoError('MAC invalid')
# Decrypt
decrypted = aes_dec(self.uo.enc_key, res_bytes)
if len(decrypted) < 1 + 4 + 8 or decrypted[0:1] != bchr(0xf1):
raise InvalidResponse('Invalid format')
self.resp_object_id = bytes_to_long(decrypted[1:5])
self.resp_nonce = EBUtils.demangle_nonce(decrypted[5:5 + EBConsts.FRESHNESS_NONCE_LEN])
self.decrypted = decrypted[5 + EBConsts.FRESHNESS_NONCE_LEN:]
self.decrypted = PKCS7.unpad(self.decrypted)
return self.response
|
python
|
def decrypt_result(self, *args, **kwargs):
"""
Decrypts ProcessData result with comm keys
:param args:
:param kwargs:
:return:
"""
if self.response is None:
raise ValueError('Empty response')
if self.response.response is None \
or 'result' not in self.response.response \
or self.response.response['result'] is None:
raise ValueError('No result data')
res_hex = self.response.response['result']
# Strip out the plaintext part
plain_length = bytes_to_long(from_hex(res_hex[0:4]))
if plain_length > 0:
res_hex = res_hex[4 + plain_length:]
else:
res_hex = res_hex[4:]
# Optionally strip trailing _... string
idx_trail = res_hex.find('_')
if idx_trail != -1:
res_hex = res_hex[0:idx_trail]
# Decode hex coding
res_bytes = from_hex(res_hex)
# Crypto stuff - check the length & padding
if len(res_bytes) < 16:
raise InvalidResponse('Result too short')
mac_given = res_bytes[-16:]
res_bytes = res_bytes[:-16]
# Check the MAC
mac_computed = cbc_mac(self.uo.mac_key, res_bytes)
if not str_equals(mac_given, mac_computed):
raise CryptoError('MAC invalid')
# Decrypt
decrypted = aes_dec(self.uo.enc_key, res_bytes)
if len(decrypted) < 1 + 4 + 8 or decrypted[0:1] != bchr(0xf1):
raise InvalidResponse('Invalid format')
self.resp_object_id = bytes_to_long(decrypted[1:5])
self.resp_nonce = EBUtils.demangle_nonce(decrypted[5:5 + EBConsts.FRESHNESS_NONCE_LEN])
self.decrypted = decrypted[5 + EBConsts.FRESHNESS_NONCE_LEN:]
self.decrypted = PKCS7.unpad(self.decrypted)
return self.response
|
[
"def",
"decrypt_result",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"response",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Empty response'",
")",
"if",
"self",
".",
"response",
".",
"response",
"is",
"None",
"or",
"'result'",
"not",
"in",
"self",
".",
"response",
".",
"response",
"or",
"self",
".",
"response",
".",
"response",
"[",
"'result'",
"]",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'No result data'",
")",
"res_hex",
"=",
"self",
".",
"response",
".",
"response",
"[",
"'result'",
"]",
"# Strip out the plaintext part",
"plain_length",
"=",
"bytes_to_long",
"(",
"from_hex",
"(",
"res_hex",
"[",
"0",
":",
"4",
"]",
")",
")",
"if",
"plain_length",
">",
"0",
":",
"res_hex",
"=",
"res_hex",
"[",
"4",
"+",
"plain_length",
":",
"]",
"else",
":",
"res_hex",
"=",
"res_hex",
"[",
"4",
":",
"]",
"# Optionally strip trailing _... string",
"idx_trail",
"=",
"res_hex",
".",
"find",
"(",
"'_'",
")",
"if",
"idx_trail",
"!=",
"-",
"1",
":",
"res_hex",
"=",
"res_hex",
"[",
"0",
":",
"idx_trail",
"]",
"# Decode hex coding",
"res_bytes",
"=",
"from_hex",
"(",
"res_hex",
")",
"# Crypto stuff - check the length & padding",
"if",
"len",
"(",
"res_bytes",
")",
"<",
"16",
":",
"raise",
"InvalidResponse",
"(",
"'Result too short'",
")",
"mac_given",
"=",
"res_bytes",
"[",
"-",
"16",
":",
"]",
"res_bytes",
"=",
"res_bytes",
"[",
":",
"-",
"16",
"]",
"# Check the MAC",
"mac_computed",
"=",
"cbc_mac",
"(",
"self",
".",
"uo",
".",
"mac_key",
",",
"res_bytes",
")",
"if",
"not",
"str_equals",
"(",
"mac_given",
",",
"mac_computed",
")",
":",
"raise",
"CryptoError",
"(",
"'MAC invalid'",
")",
"# Decrypt",
"decrypted",
"=",
"aes_dec",
"(",
"self",
".",
"uo",
".",
"enc_key",
",",
"res_bytes",
")",
"if",
"len",
"(",
"decrypted",
")",
"<",
"1",
"+",
"4",
"+",
"8",
"or",
"decrypted",
"[",
"0",
":",
"1",
"]",
"!=",
"bchr",
"(",
"0xf1",
")",
":",
"raise",
"InvalidResponse",
"(",
"'Invalid format'",
")",
"self",
".",
"resp_object_id",
"=",
"bytes_to_long",
"(",
"decrypted",
"[",
"1",
":",
"5",
"]",
")",
"self",
".",
"resp_nonce",
"=",
"EBUtils",
".",
"demangle_nonce",
"(",
"decrypted",
"[",
"5",
":",
"5",
"+",
"EBConsts",
".",
"FRESHNESS_NONCE_LEN",
"]",
")",
"self",
".",
"decrypted",
"=",
"decrypted",
"[",
"5",
"+",
"EBConsts",
".",
"FRESHNESS_NONCE_LEN",
":",
"]",
"self",
".",
"decrypted",
"=",
"PKCS7",
".",
"unpad",
"(",
"self",
".",
"decrypted",
")",
"return",
"self",
".",
"response"
] |
Decrypts ProcessData result with comm keys
:param args:
:param kwargs:
:return:
|
[
"Decrypts",
"ProcessData",
"result",
"with",
"comm",
"keys"
] |
0fafe3902da394da88e9f960751d695ca65bbabd
|
https://github.com/EnigmaBridge/client.py/blob/0fafe3902da394da88e9f960751d695ca65bbabd/ebclient/process_data.py#L94-L147
|
240,150
|
langloisjp/tstore
|
tstore/tstore.py
|
TStore.get
|
def get(self, cls, rid):
"""Return record of given type with key `rid`
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['name']
'Toto'
>>> s.get('badcls', '1')
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.get('tstoretest', '2')
Traceback (most recent call last):
...
KeyError: 'No tstoretest record with id 2'
"""
self.validate_record_type(cls)
rows = self.db.select(cls, where={ID: rid}, limit=1)
if not rows:
raise KeyError('No {} record with id {}'.format(cls, rid))
return rows[0]
|
python
|
def get(self, cls, rid):
"""Return record of given type with key `rid`
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['name']
'Toto'
>>> s.get('badcls', '1')
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.get('tstoretest', '2')
Traceback (most recent call last):
...
KeyError: 'No tstoretest record with id 2'
"""
self.validate_record_type(cls)
rows = self.db.select(cls, where={ID: rid}, limit=1)
if not rows:
raise KeyError('No {} record with id {}'.format(cls, rid))
return rows[0]
|
[
"def",
"get",
"(",
"self",
",",
"cls",
",",
"rid",
")",
":",
"self",
".",
"validate_record_type",
"(",
"cls",
")",
"rows",
"=",
"self",
".",
"db",
".",
"select",
"(",
"cls",
",",
"where",
"=",
"{",
"ID",
":",
"rid",
"}",
",",
"limit",
"=",
"1",
")",
"if",
"not",
"rows",
":",
"raise",
"KeyError",
"(",
"'No {} record with id {}'",
".",
"format",
"(",
"cls",
",",
"rid",
")",
")",
"return",
"rows",
"[",
"0",
"]"
] |
Return record of given type with key `rid`
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['name']
'Toto'
>>> s.get('badcls', '1')
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.get('tstoretest', '2')
Traceback (most recent call last):
...
KeyError: 'No tstoretest record with id 2'
|
[
"Return",
"record",
"of",
"given",
"type",
"with",
"key",
"rid"
] |
b438f8aaf09117bf6f922ba06ae5cf46b7b97a57
|
https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/tstore.py#L118-L139
|
240,151
|
langloisjp/tstore
|
tstore/tstore.py
|
TStore.create
|
def create(self, cls, record, user='undefined'):
"""Persist new record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> s.create('tstoretest', {'id': '2', 'name': 'Tata'}, user='jane')
>>> r = s.get('tstoretest', '2')
>>> r[CREATOR]
'jane'
>>> s.create('badcls', {'id': '1', 'name': 'Toto'})
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.create('tstoretest', {'id': '1', 'name': 'Joe'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.create('tstoretest', {'id': '2', 'badfield': 'Joe'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.create('tstoretest', {'id': '2', 'age': 'bad'})
Traceback (most recent call last):
...
ValueError: Bad record (INVALID_TEXT_REPRESENTATION)
"""
self.validate_record(cls, record)
record[CREATION_DATE] = record[UPDATE_DATE] = self.nowstr()
record[CREATOR] = record[UPDATER] = user
try:
return self.db.insert(cls, record)
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
logging.warning("{} {}: {}".format(
error.__class__.__name__,
psycopg2.errorcodes.lookup(error.pgcode), error.pgerror))
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, record[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad record ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode)))
|
python
|
def create(self, cls, record, user='undefined'):
"""Persist new record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> s.create('tstoretest', {'id': '2', 'name': 'Tata'}, user='jane')
>>> r = s.get('tstoretest', '2')
>>> r[CREATOR]
'jane'
>>> s.create('badcls', {'id': '1', 'name': 'Toto'})
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.create('tstoretest', {'id': '1', 'name': 'Joe'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.create('tstoretest', {'id': '2', 'badfield': 'Joe'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.create('tstoretest', {'id': '2', 'age': 'bad'})
Traceback (most recent call last):
...
ValueError: Bad record (INVALID_TEXT_REPRESENTATION)
"""
self.validate_record(cls, record)
record[CREATION_DATE] = record[UPDATE_DATE] = self.nowstr()
record[CREATOR] = record[UPDATER] = user
try:
return self.db.insert(cls, record)
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
logging.warning("{} {}: {}".format(
error.__class__.__name__,
psycopg2.errorcodes.lookup(error.pgcode), error.pgerror))
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, record[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad record ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode)))
|
[
"def",
"create",
"(",
"self",
",",
"cls",
",",
"record",
",",
"user",
"=",
"'undefined'",
")",
":",
"self",
".",
"validate_record",
"(",
"cls",
",",
"record",
")",
"record",
"[",
"CREATION_DATE",
"]",
"=",
"record",
"[",
"UPDATE_DATE",
"]",
"=",
"self",
".",
"nowstr",
"(",
")",
"record",
"[",
"CREATOR",
"]",
"=",
"record",
"[",
"UPDATER",
"]",
"=",
"user",
"try",
":",
"return",
"self",
".",
"db",
".",
"insert",
"(",
"cls",
",",
"record",
")",
"except",
"(",
"psycopg2",
".",
"IntegrityError",
",",
"psycopg2",
".",
"ProgrammingError",
",",
"psycopg2",
".",
"DataError",
")",
"as",
"error",
":",
"logging",
".",
"warning",
"(",
"\"{} {}: {}\"",
".",
"format",
"(",
"error",
".",
"__class__",
".",
"__name__",
",",
"psycopg2",
".",
"errorcodes",
".",
"lookup",
"(",
"error",
".",
"pgcode",
")",
",",
"error",
".",
"pgerror",
")",
")",
"if",
"error",
".",
"pgcode",
"==",
"psycopg2",
".",
"errorcodes",
".",
"UNIQUE_VIOLATION",
":",
"raise",
"KeyError",
"(",
"'There is already a record for {}/{}'",
".",
"format",
"(",
"cls",
",",
"record",
"[",
"ID",
"]",
")",
")",
"elif",
"error",
".",
"pgcode",
"==",
"psycopg2",
".",
"errorcodes",
".",
"UNDEFINED_COLUMN",
":",
"raise",
"ValueError",
"(",
"'Undefined field'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Bad record ({})'",
".",
"format",
"(",
"psycopg2",
".",
"errorcodes",
".",
"lookup",
"(",
"error",
".",
"pgcode",
")",
")",
")"
] |
Persist new record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> s.create('tstoretest', {'id': '2', 'name': 'Tata'}, user='jane')
>>> r = s.get('tstoretest', '2')
>>> r[CREATOR]
'jane'
>>> s.create('badcls', {'id': '1', 'name': 'Toto'})
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.create('tstoretest', {'id': '1', 'name': 'Joe'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.create('tstoretest', {'id': '2', 'badfield': 'Joe'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.create('tstoretest', {'id': '2', 'age': 'bad'})
Traceback (most recent call last):
...
ValueError: Bad record (INVALID_TEXT_REPRESENTATION)
|
[
"Persist",
"new",
"record"
] |
b438f8aaf09117bf6f922ba06ae5cf46b7b97a57
|
https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/tstore.py#L141-L184
|
240,152
|
langloisjp/tstore
|
tstore/tstore.py
|
TStore.update
|
def update(self, cls, rid, partialrecord, user='undefined'):
"""Update existing record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['age']
>>> s.update('tstoretest', '1', {'age': 25})
>>> r = s.get('tstoretest', '1')
>>> r['age']
25
>>> s.update('tstoretest', '1', {'age': 30}, user='jane')
>>> r = s.get('tstoretest', '1')
>>> r[UPDATER]
'jane'
>>> s.update('tstoretest', '2', {'age': 25})
Traceback (most recent call last):
...
KeyError: 'No such record'
>>> s.create('tstoretest', {'id': '2', 'name': 'Joe'})
>>> s.update('tstoretest', '2', {'id': '1'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.update('tstoretest', '2', {'badcol': '1'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.update('tstoretest', '2', {'age': 'hello'})
Traceback (most recent call last):
...
ValueError: Bad update (INVALID_TEXT_REPRESENTATION)
"""
self.validate_partial_record(cls, partialrecord)
partialrecord[UPDATE_DATE] = self.nowstr()
partialrecord[UPDATER] = user
try:
updatecount = self.db.update(cls, partialrecord, where={ID: rid})
if updatecount < 1:
raise KeyError('No such record')
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, partialrecord[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad update ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode)))
|
python
|
def update(self, cls, rid, partialrecord, user='undefined'):
"""Update existing record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['age']
>>> s.update('tstoretest', '1', {'age': 25})
>>> r = s.get('tstoretest', '1')
>>> r['age']
25
>>> s.update('tstoretest', '1', {'age': 30}, user='jane')
>>> r = s.get('tstoretest', '1')
>>> r[UPDATER]
'jane'
>>> s.update('tstoretest', '2', {'age': 25})
Traceback (most recent call last):
...
KeyError: 'No such record'
>>> s.create('tstoretest', {'id': '2', 'name': 'Joe'})
>>> s.update('tstoretest', '2', {'id': '1'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.update('tstoretest', '2', {'badcol': '1'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.update('tstoretest', '2', {'age': 'hello'})
Traceback (most recent call last):
...
ValueError: Bad update (INVALID_TEXT_REPRESENTATION)
"""
self.validate_partial_record(cls, partialrecord)
partialrecord[UPDATE_DATE] = self.nowstr()
partialrecord[UPDATER] = user
try:
updatecount = self.db.update(cls, partialrecord, where={ID: rid})
if updatecount < 1:
raise KeyError('No such record')
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, partialrecord[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad update ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode)))
|
[
"def",
"update",
"(",
"self",
",",
"cls",
",",
"rid",
",",
"partialrecord",
",",
"user",
"=",
"'undefined'",
")",
":",
"self",
".",
"validate_partial_record",
"(",
"cls",
",",
"partialrecord",
")",
"partialrecord",
"[",
"UPDATE_DATE",
"]",
"=",
"self",
".",
"nowstr",
"(",
")",
"partialrecord",
"[",
"UPDATER",
"]",
"=",
"user",
"try",
":",
"updatecount",
"=",
"self",
".",
"db",
".",
"update",
"(",
"cls",
",",
"partialrecord",
",",
"where",
"=",
"{",
"ID",
":",
"rid",
"}",
")",
"if",
"updatecount",
"<",
"1",
":",
"raise",
"KeyError",
"(",
"'No such record'",
")",
"except",
"(",
"psycopg2",
".",
"IntegrityError",
",",
"psycopg2",
".",
"ProgrammingError",
",",
"psycopg2",
".",
"DataError",
")",
"as",
"error",
":",
"if",
"error",
".",
"pgcode",
"==",
"psycopg2",
".",
"errorcodes",
".",
"UNIQUE_VIOLATION",
":",
"raise",
"KeyError",
"(",
"'There is already a record for {}/{}'",
".",
"format",
"(",
"cls",
",",
"partialrecord",
"[",
"ID",
"]",
")",
")",
"elif",
"error",
".",
"pgcode",
"==",
"psycopg2",
".",
"errorcodes",
".",
"UNDEFINED_COLUMN",
":",
"raise",
"ValueError",
"(",
"'Undefined field'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Bad update ({})'",
".",
"format",
"(",
"psycopg2",
".",
"errorcodes",
".",
"lookup",
"(",
"error",
".",
"pgcode",
")",
")",
")"
] |
Update existing record
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['age']
>>> s.update('tstoretest', '1', {'age': 25})
>>> r = s.get('tstoretest', '1')
>>> r['age']
25
>>> s.update('tstoretest', '1', {'age': 30}, user='jane')
>>> r = s.get('tstoretest', '1')
>>> r[UPDATER]
'jane'
>>> s.update('tstoretest', '2', {'age': 25})
Traceback (most recent call last):
...
KeyError: 'No such record'
>>> s.create('tstoretest', {'id': '2', 'name': 'Joe'})
>>> s.update('tstoretest', '2', {'id': '1'})
Traceback (most recent call last):
...
KeyError: 'There is already a record for tstoretest/1'
>>> s.update('tstoretest', '2', {'badcol': '1'})
Traceback (most recent call last):
...
ValueError: Undefined field
>>> s.update('tstoretest', '2', {'age': 'hello'})
Traceback (most recent call last):
...
ValueError: Bad update (INVALID_TEXT_REPRESENTATION)
|
[
"Update",
"existing",
"record"
] |
b438f8aaf09117bf6f922ba06ae5cf46b7b97a57
|
https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/tstore.py#L186-L235
|
240,153
|
langloisjp/tstore
|
tstore/tstore.py
|
TStore.delete
|
def delete(self, cls, rid, user='undefined'):
"""
Delete a record by id.
`user` currently unused. Would be used with soft deletes.
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> len(s.list('tstoretest'))
1
>>> s.delete('tstoretest', '1')
>>> len(s.list('tstoretest'))
0
>>> s.delete('tstoretest', '1')
Traceback (most recent call last):
...
KeyError: 'No record tstoretest/1'
"""
self.validate_record_type(cls)
deletedcount = self.db.delete(cls, {ID: rid})
if deletedcount < 1:
raise KeyError('No record {}/{}'.format(cls, rid))
|
python
|
def delete(self, cls, rid, user='undefined'):
"""
Delete a record by id.
`user` currently unused. Would be used with soft deletes.
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> len(s.list('tstoretest'))
1
>>> s.delete('tstoretest', '1')
>>> len(s.list('tstoretest'))
0
>>> s.delete('tstoretest', '1')
Traceback (most recent call last):
...
KeyError: 'No record tstoretest/1'
"""
self.validate_record_type(cls)
deletedcount = self.db.delete(cls, {ID: rid})
if deletedcount < 1:
raise KeyError('No record {}/{}'.format(cls, rid))
|
[
"def",
"delete",
"(",
"self",
",",
"cls",
",",
"rid",
",",
"user",
"=",
"'undefined'",
")",
":",
"self",
".",
"validate_record_type",
"(",
"cls",
")",
"deletedcount",
"=",
"self",
".",
"db",
".",
"delete",
"(",
"cls",
",",
"{",
"ID",
":",
"rid",
"}",
")",
"if",
"deletedcount",
"<",
"1",
":",
"raise",
"KeyError",
"(",
"'No record {}/{}'",
".",
"format",
"(",
"cls",
",",
"rid",
")",
")"
] |
Delete a record by id.
`user` currently unused. Would be used with soft deletes.
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> len(s.list('tstoretest'))
1
>>> s.delete('tstoretest', '1')
>>> len(s.list('tstoretest'))
0
>>> s.delete('tstoretest', '1')
Traceback (most recent call last):
...
KeyError: 'No record tstoretest/1'
|
[
"Delete",
"a",
"record",
"by",
"id",
"."
] |
b438f8aaf09117bf6f922ba06ae5cf46b7b97a57
|
https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/tstore.py#L248-L269
|
240,154
|
langloisjp/tstore
|
tstore/tstore.py
|
TStore.validate_record_type
|
def validate_record_type(self, cls):
"""
Validate given record is acceptable.
>>> s = teststore()
>>> s.validate_record_type('tstoretest')
>>> s.validate_record_type('bad')
Traceback (most recent call last):
...
ValueError: Unsupported record type "bad"
"""
if self.record_types and cls not in self.record_types:
raise ValueError('Unsupported record type "' + cls + '"')
|
python
|
def validate_record_type(self, cls):
"""
Validate given record is acceptable.
>>> s = teststore()
>>> s.validate_record_type('tstoretest')
>>> s.validate_record_type('bad')
Traceback (most recent call last):
...
ValueError: Unsupported record type "bad"
"""
if self.record_types and cls not in self.record_types:
raise ValueError('Unsupported record type "' + cls + '"')
|
[
"def",
"validate_record_type",
"(",
"self",
",",
"cls",
")",
":",
"if",
"self",
".",
"record_types",
"and",
"cls",
"not",
"in",
"self",
".",
"record_types",
":",
"raise",
"ValueError",
"(",
"'Unsupported record type \"'",
"+",
"cls",
"+",
"'\"'",
")"
] |
Validate given record is acceptable.
>>> s = teststore()
>>> s.validate_record_type('tstoretest')
>>> s.validate_record_type('bad')
Traceback (most recent call last):
...
ValueError: Unsupported record type "bad"
|
[
"Validate",
"given",
"record",
"is",
"acceptable",
"."
] |
b438f8aaf09117bf6f922ba06ae5cf46b7b97a57
|
https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/tstore.py#L271-L283
|
240,155
|
langloisjp/tstore
|
tstore/tstore.py
|
TStore.as_record
|
def as_record(self, cls, content_type, strdata):
"""
Returns a record from serialized string representation.
>>> s = teststore()
>>> s.as_record('tstoretest', 'application/json',
... '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
"""
self.validate_record_type(cls)
parsedrecord = self.deserialize(content_type, strdata)
return self.post_process_record(cls, parsedrecord)
|
python
|
def as_record(self, cls, content_type, strdata):
"""
Returns a record from serialized string representation.
>>> s = teststore()
>>> s.as_record('tstoretest', 'application/json',
... '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
"""
self.validate_record_type(cls)
parsedrecord = self.deserialize(content_type, strdata)
return self.post_process_record(cls, parsedrecord)
|
[
"def",
"as_record",
"(",
"self",
",",
"cls",
",",
"content_type",
",",
"strdata",
")",
":",
"self",
".",
"validate_record_type",
"(",
"cls",
")",
"parsedrecord",
"=",
"self",
".",
"deserialize",
"(",
"content_type",
",",
"strdata",
")",
"return",
"self",
".",
"post_process_record",
"(",
"cls",
",",
"parsedrecord",
")"
] |
Returns a record from serialized string representation.
>>> s = teststore()
>>> s.as_record('tstoretest', 'application/json',
... '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
|
[
"Returns",
"a",
"record",
"from",
"serialized",
"string",
"representation",
"."
] |
b438f8aaf09117bf6f922ba06ae5cf46b7b97a57
|
https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/tstore.py#L285-L296
|
240,156
|
langloisjp/tstore
|
tstore/tstore.py
|
TStore.serialize
|
def serialize(self, cls, record):
"""
Serialize the record to JSON. cls unused in this implementation.
>>> s = teststore()
>>> s.serialize('tstoretest', {'id': '1', 'name': 'Toto'})
'{"id": "1", "name": "Toto"}'
"""
return json.dumps(record, cls=self.encoder)
|
python
|
def serialize(self, cls, record):
"""
Serialize the record to JSON. cls unused in this implementation.
>>> s = teststore()
>>> s.serialize('tstoretest', {'id': '1', 'name': 'Toto'})
'{"id": "1", "name": "Toto"}'
"""
return json.dumps(record, cls=self.encoder)
|
[
"def",
"serialize",
"(",
"self",
",",
"cls",
",",
"record",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"record",
",",
"cls",
"=",
"self",
".",
"encoder",
")"
] |
Serialize the record to JSON. cls unused in this implementation.
>>> s = teststore()
>>> s.serialize('tstoretest', {'id': '1', 'name': 'Toto'})
'{"id": "1", "name": "Toto"}'
|
[
"Serialize",
"the",
"record",
"to",
"JSON",
".",
"cls",
"unused",
"in",
"this",
"implementation",
"."
] |
b438f8aaf09117bf6f922ba06ae5cf46b7b97a57
|
https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/tstore.py#L298-L306
|
240,157
|
langloisjp/tstore
|
tstore/tstore.py
|
TStore.deserialize
|
def deserialize(self, content_type, strdata):
"""Deserialize string of given content type.
`self` unused in this implementation.
>>> s = teststore()
>>> s.deserialize('application/json', '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
>>> s.deserialize('text/plain', 'id: 1, name: Toto')
Traceback (most recent call last):
...
ValueError: Unsupported content type "text/plain"
"""
if content_type != 'application/json':
raise ValueError('Unsupported content type "' + content_type + '"')
return json.loads(strdata)
|
python
|
def deserialize(self, content_type, strdata):
"""Deserialize string of given content type.
`self` unused in this implementation.
>>> s = teststore()
>>> s.deserialize('application/json', '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
>>> s.deserialize('text/plain', 'id: 1, name: Toto')
Traceback (most recent call last):
...
ValueError: Unsupported content type "text/plain"
"""
if content_type != 'application/json':
raise ValueError('Unsupported content type "' + content_type + '"')
return json.loads(strdata)
|
[
"def",
"deserialize",
"(",
"self",
",",
"content_type",
",",
"strdata",
")",
":",
"if",
"content_type",
"!=",
"'application/json'",
":",
"raise",
"ValueError",
"(",
"'Unsupported content type \"'",
"+",
"content_type",
"+",
"'\"'",
")",
"return",
"json",
".",
"loads",
"(",
"strdata",
")"
] |
Deserialize string of given content type.
`self` unused in this implementation.
>>> s = teststore()
>>> s.deserialize('application/json', '{"id": "1", "name": "Toto"}')
{u'id': u'1', u'name': u'Toto'}
>>> s.deserialize('text/plain', 'id: 1, name: Toto')
Traceback (most recent call last):
...
ValueError: Unsupported content type "text/plain"
|
[
"Deserialize",
"string",
"of",
"given",
"content",
"type",
"."
] |
b438f8aaf09117bf6f922ba06ae5cf46b7b97a57
|
https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/tstore.py#L308-L323
|
240,158
|
klmitch/tendril
|
tendril/framers.py
|
IdentityFramer.frameify
|
def frameify(self, state, data):
"""Yield the data as a single frame."""
try:
yield state.recv_buf + data
except FrameSwitch:
pass
finally:
state.recv_buf = ''
|
python
|
def frameify(self, state, data):
"""Yield the data as a single frame."""
try:
yield state.recv_buf + data
except FrameSwitch:
pass
finally:
state.recv_buf = ''
|
[
"def",
"frameify",
"(",
"self",
",",
"state",
",",
"data",
")",
":",
"try",
":",
"yield",
"state",
".",
"recv_buf",
"+",
"data",
"except",
"FrameSwitch",
":",
"pass",
"finally",
":",
"state",
".",
"recv_buf",
"=",
"''"
] |
Yield the data as a single frame.
|
[
"Yield",
"the",
"data",
"as",
"a",
"single",
"frame",
"."
] |
207102c83e88f8f1fa7ba605ef0aab2ae9078b36
|
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/framers.py#L167-L175
|
240,159
|
klmitch/tendril
|
tendril/framers.py
|
ChunkFramer.frameify
|
def frameify(self, state, data):
"""Yield chunk data as a single frame, and buffer the rest."""
# If we've pulled in all the chunk data, buffer the data
if state.chunk_remaining <= 0:
state.recv_buf += data
return
# Pull in any partially-processed data
data = state.recv_buf + data
# Determine how much belongs to the chunk
if len(data) <= state.chunk_remaining:
chunk = data
data = ''
else:
# Pull out only what's part of the chunk
chunk = data[:state.chunk_remaining]
data = data[state.chunk_remaining:]
# Update the state
state.recv_buf = data
state.chunk_remaining -= len(chunk)
# Yield the chunk
try:
yield chunk
except FrameSwitch:
pass
|
python
|
def frameify(self, state, data):
"""Yield chunk data as a single frame, and buffer the rest."""
# If we've pulled in all the chunk data, buffer the data
if state.chunk_remaining <= 0:
state.recv_buf += data
return
# Pull in any partially-processed data
data = state.recv_buf + data
# Determine how much belongs to the chunk
if len(data) <= state.chunk_remaining:
chunk = data
data = ''
else:
# Pull out only what's part of the chunk
chunk = data[:state.chunk_remaining]
data = data[state.chunk_remaining:]
# Update the state
state.recv_buf = data
state.chunk_remaining -= len(chunk)
# Yield the chunk
try:
yield chunk
except FrameSwitch:
pass
|
[
"def",
"frameify",
"(",
"self",
",",
"state",
",",
"data",
")",
":",
"# If we've pulled in all the chunk data, buffer the data",
"if",
"state",
".",
"chunk_remaining",
"<=",
"0",
":",
"state",
".",
"recv_buf",
"+=",
"data",
"return",
"# Pull in any partially-processed data",
"data",
"=",
"state",
".",
"recv_buf",
"+",
"data",
"# Determine how much belongs to the chunk",
"if",
"len",
"(",
"data",
")",
"<=",
"state",
".",
"chunk_remaining",
":",
"chunk",
"=",
"data",
"data",
"=",
"''",
"else",
":",
"# Pull out only what's part of the chunk",
"chunk",
"=",
"data",
"[",
":",
"state",
".",
"chunk_remaining",
"]",
"data",
"=",
"data",
"[",
"state",
".",
"chunk_remaining",
":",
"]",
"# Update the state",
"state",
".",
"recv_buf",
"=",
"data",
"state",
".",
"chunk_remaining",
"-=",
"len",
"(",
"chunk",
")",
"# Yield the chunk",
"try",
":",
"yield",
"chunk",
"except",
"FrameSwitch",
":",
"pass"
] |
Yield chunk data as a single frame, and buffer the rest.
|
[
"Yield",
"chunk",
"data",
"as",
"a",
"single",
"frame",
"and",
"buffer",
"the",
"rest",
"."
] |
207102c83e88f8f1fa7ba605ef0aab2ae9078b36
|
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/framers.py#L207-L235
|
240,160
|
klmitch/tendril
|
tendril/framers.py
|
LineFramer.frameify
|
def frameify(self, state, data):
"""Split data into a sequence of lines."""
# Pull in any partially-processed data
data = state.recv_buf + data
# Loop over the data
while data:
line, sep, rest = data.partition('\n')
# Did we have a whole line?
if sep != '\n':
break
# OK, update the data...
data = rest
# Now, strip off carriage return, if there is one
if self.carriage_return and line[-1] == '\r':
line = line[:-1]
# Yield the line
try:
yield line
except FrameSwitch:
break
# Put any remaining data back into the buffer
state.recv_buf = data
|
python
|
def frameify(self, state, data):
"""Split data into a sequence of lines."""
# Pull in any partially-processed data
data = state.recv_buf + data
# Loop over the data
while data:
line, sep, rest = data.partition('\n')
# Did we have a whole line?
if sep != '\n':
break
# OK, update the data...
data = rest
# Now, strip off carriage return, if there is one
if self.carriage_return and line[-1] == '\r':
line = line[:-1]
# Yield the line
try:
yield line
except FrameSwitch:
break
# Put any remaining data back into the buffer
state.recv_buf = data
|
[
"def",
"frameify",
"(",
"self",
",",
"state",
",",
"data",
")",
":",
"# Pull in any partially-processed data",
"data",
"=",
"state",
".",
"recv_buf",
"+",
"data",
"# Loop over the data",
"while",
"data",
":",
"line",
",",
"sep",
",",
"rest",
"=",
"data",
".",
"partition",
"(",
"'\\n'",
")",
"# Did we have a whole line?",
"if",
"sep",
"!=",
"'\\n'",
":",
"break",
"# OK, update the data...",
"data",
"=",
"rest",
"# Now, strip off carriage return, if there is one",
"if",
"self",
".",
"carriage_return",
"and",
"line",
"[",
"-",
"1",
"]",
"==",
"'\\r'",
":",
"line",
"=",
"line",
"[",
":",
"-",
"1",
"]",
"# Yield the line",
"try",
":",
"yield",
"line",
"except",
"FrameSwitch",
":",
"break",
"# Put any remaining data back into the buffer",
"state",
".",
"recv_buf",
"=",
"data"
] |
Split data into a sequence of lines.
|
[
"Split",
"data",
"into",
"a",
"sequence",
"of",
"lines",
"."
] |
207102c83e88f8f1fa7ba605ef0aab2ae9078b36
|
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/framers.py#L261-L289
|
240,161
|
klmitch/tendril
|
tendril/framers.py
|
StuffingFramer.streamify
|
def streamify(self, state, frame):
"""Prepare frame for output as a byte-stuffed stream."""
# Split the frame apart for stuffing...
pieces = frame.split(self.prefix)
return '%s%s%s%s%s' % (self.prefix, self.begin,
(self.prefix + self.nop).join(pieces),
self.prefix, self.end)
|
python
|
def streamify(self, state, frame):
"""Prepare frame for output as a byte-stuffed stream."""
# Split the frame apart for stuffing...
pieces = frame.split(self.prefix)
return '%s%s%s%s%s' % (self.prefix, self.begin,
(self.prefix + self.nop).join(pieces),
self.prefix, self.end)
|
[
"def",
"streamify",
"(",
"self",
",",
"state",
",",
"frame",
")",
":",
"# Split the frame apart for stuffing...",
"pieces",
"=",
"frame",
".",
"split",
"(",
"self",
".",
"prefix",
")",
"return",
"'%s%s%s%s%s'",
"%",
"(",
"self",
".",
"prefix",
",",
"self",
".",
"begin",
",",
"(",
"self",
".",
"prefix",
"+",
"self",
".",
"nop",
")",
".",
"join",
"(",
"pieces",
")",
",",
"self",
".",
"prefix",
",",
"self",
".",
"end",
")"
] |
Prepare frame for output as a byte-stuffed stream.
|
[
"Prepare",
"frame",
"for",
"output",
"as",
"a",
"byte",
"-",
"stuffed",
"stream",
"."
] |
207102c83e88f8f1fa7ba605ef0aab2ae9078b36
|
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/framers.py#L481-L489
|
240,162
|
klmitch/tendril
|
tendril/framers.py
|
COBSFramer._get_tab
|
def _get_tab(cls):
"""Generate and return the COBS table."""
if not cls._tabs['dec_cobs']:
# Compute the COBS table for decoding
cls._tabs['dec_cobs']['\xff'] = (255, '')
cls._tabs['dec_cobs'].update(dict((chr(l), (l, '\0'))
for l in range(1, 255)))
# Compute the COBS table for encoding
cls._tabs['enc_cobs'] = [(255, '\xff'),
dict((l, chr(l))
for l in range(1, 255)),
]
return cls._tabs['dec_cobs'], cls._tabs['enc_cobs']
|
python
|
def _get_tab(cls):
"""Generate and return the COBS table."""
if not cls._tabs['dec_cobs']:
# Compute the COBS table for decoding
cls._tabs['dec_cobs']['\xff'] = (255, '')
cls._tabs['dec_cobs'].update(dict((chr(l), (l, '\0'))
for l in range(1, 255)))
# Compute the COBS table for encoding
cls._tabs['enc_cobs'] = [(255, '\xff'),
dict((l, chr(l))
for l in range(1, 255)),
]
return cls._tabs['dec_cobs'], cls._tabs['enc_cobs']
|
[
"def",
"_get_tab",
"(",
"cls",
")",
":",
"if",
"not",
"cls",
".",
"_tabs",
"[",
"'dec_cobs'",
"]",
":",
"# Compute the COBS table for decoding",
"cls",
".",
"_tabs",
"[",
"'dec_cobs'",
"]",
"[",
"'\\xff'",
"]",
"=",
"(",
"255",
",",
"''",
")",
"cls",
".",
"_tabs",
"[",
"'dec_cobs'",
"]",
".",
"update",
"(",
"dict",
"(",
"(",
"chr",
"(",
"l",
")",
",",
"(",
"l",
",",
"'\\0'",
")",
")",
"for",
"l",
"in",
"range",
"(",
"1",
",",
"255",
")",
")",
")",
"# Compute the COBS table for encoding",
"cls",
".",
"_tabs",
"[",
"'enc_cobs'",
"]",
"=",
"[",
"(",
"255",
",",
"'\\xff'",
")",
",",
"dict",
"(",
"(",
"l",
",",
"chr",
"(",
"l",
")",
")",
"for",
"l",
"in",
"range",
"(",
"1",
",",
"255",
")",
")",
",",
"]",
"return",
"cls",
".",
"_tabs",
"[",
"'dec_cobs'",
"]",
",",
"cls",
".",
"_tabs",
"[",
"'enc_cobs'",
"]"
] |
Generate and return the COBS table.
|
[
"Generate",
"and",
"return",
"the",
"COBS",
"table",
"."
] |
207102c83e88f8f1fa7ba605ef0aab2ae9078b36
|
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/framers.py#L507-L522
|
240,163
|
klmitch/tendril
|
tendril/framers.py
|
COBSFramer._get_tab_zpe
|
def _get_tab_zpe(cls):
"""Generate and return the COBS ZPE table."""
if not cls._tabs['dec_cobs_zpe']:
# Compute the COBS ZPE table for decoding
cls._tabs['dec_cobs_zpe']['\xe0'] = (224, '')
cls._tabs['dec_cobs_zpe'].update(dict((chr(l), (l, '\0'))
for l in range(1, 224)))
cls._tabs['dec_cobs_zpe'].update(dict((chr(l), (l - 224, '\0\0'))
for l in range(225, 256)))
# Compute the COBS ZPE table for encoding
cls._tabs['enc_cobs_zpe'] = [(224, '\xe0'),
dict((l, chr(l))
for l in range(1, 224)),
dict((l - 224, chr(l))
for l in range(225, 256))
]
return cls._tabs['dec_cobs_zpe'], cls._tabs['enc_cobs_zpe']
|
python
|
def _get_tab_zpe(cls):
"""Generate and return the COBS ZPE table."""
if not cls._tabs['dec_cobs_zpe']:
# Compute the COBS ZPE table for decoding
cls._tabs['dec_cobs_zpe']['\xe0'] = (224, '')
cls._tabs['dec_cobs_zpe'].update(dict((chr(l), (l, '\0'))
for l in range(1, 224)))
cls._tabs['dec_cobs_zpe'].update(dict((chr(l), (l - 224, '\0\0'))
for l in range(225, 256)))
# Compute the COBS ZPE table for encoding
cls._tabs['enc_cobs_zpe'] = [(224, '\xe0'),
dict((l, chr(l))
for l in range(1, 224)),
dict((l - 224, chr(l))
for l in range(225, 256))
]
return cls._tabs['dec_cobs_zpe'], cls._tabs['enc_cobs_zpe']
|
[
"def",
"_get_tab_zpe",
"(",
"cls",
")",
":",
"if",
"not",
"cls",
".",
"_tabs",
"[",
"'dec_cobs_zpe'",
"]",
":",
"# Compute the COBS ZPE table for decoding",
"cls",
".",
"_tabs",
"[",
"'dec_cobs_zpe'",
"]",
"[",
"'\\xe0'",
"]",
"=",
"(",
"224",
",",
"''",
")",
"cls",
".",
"_tabs",
"[",
"'dec_cobs_zpe'",
"]",
".",
"update",
"(",
"dict",
"(",
"(",
"chr",
"(",
"l",
")",
",",
"(",
"l",
",",
"'\\0'",
")",
")",
"for",
"l",
"in",
"range",
"(",
"1",
",",
"224",
")",
")",
")",
"cls",
".",
"_tabs",
"[",
"'dec_cobs_zpe'",
"]",
".",
"update",
"(",
"dict",
"(",
"(",
"chr",
"(",
"l",
")",
",",
"(",
"l",
"-",
"224",
",",
"'\\0\\0'",
")",
")",
"for",
"l",
"in",
"range",
"(",
"225",
",",
"256",
")",
")",
")",
"# Compute the COBS ZPE table for encoding",
"cls",
".",
"_tabs",
"[",
"'enc_cobs_zpe'",
"]",
"=",
"[",
"(",
"224",
",",
"'\\xe0'",
")",
",",
"dict",
"(",
"(",
"l",
",",
"chr",
"(",
"l",
")",
")",
"for",
"l",
"in",
"range",
"(",
"1",
",",
"224",
")",
")",
",",
"dict",
"(",
"(",
"l",
"-",
"224",
",",
"chr",
"(",
"l",
")",
")",
"for",
"l",
"in",
"range",
"(",
"225",
",",
"256",
")",
")",
"]",
"return",
"cls",
".",
"_tabs",
"[",
"'dec_cobs_zpe'",
"]",
",",
"cls",
".",
"_tabs",
"[",
"'enc_cobs_zpe'",
"]"
] |
Generate and return the COBS ZPE table.
|
[
"Generate",
"and",
"return",
"the",
"COBS",
"ZPE",
"table",
"."
] |
207102c83e88f8f1fa7ba605ef0aab2ae9078b36
|
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/framers.py#L525-L544
|
240,164
|
klmitch/tendril
|
tendril/framers.py
|
COBSFramer._decode
|
def _decode(frame, tab):
"""Decode a frame with the help of the table."""
blocks = []
# Decode each block
while frame:
length, endseq = tab[frame[0]]
blocks.extend([frame[1:length], endseq])
frame = frame[length:]
# Remove one (and only one) trailing '\0' as necessary
if blocks and len(blocks[-1]) > 0:
blocks[-1] = blocks[-1][:-1]
# Return the decoded plaintext
return ''.join(blocks)
|
python
|
def _decode(frame, tab):
"""Decode a frame with the help of the table."""
blocks = []
# Decode each block
while frame:
length, endseq = tab[frame[0]]
blocks.extend([frame[1:length], endseq])
frame = frame[length:]
# Remove one (and only one) trailing '\0' as necessary
if blocks and len(blocks[-1]) > 0:
blocks[-1] = blocks[-1][:-1]
# Return the decoded plaintext
return ''.join(blocks)
|
[
"def",
"_decode",
"(",
"frame",
",",
"tab",
")",
":",
"blocks",
"=",
"[",
"]",
"# Decode each block",
"while",
"frame",
":",
"length",
",",
"endseq",
"=",
"tab",
"[",
"frame",
"[",
"0",
"]",
"]",
"blocks",
".",
"extend",
"(",
"[",
"frame",
"[",
"1",
":",
"length",
"]",
",",
"endseq",
"]",
")",
"frame",
"=",
"frame",
"[",
"length",
":",
"]",
"# Remove one (and only one) trailing '\\0' as necessary",
"if",
"blocks",
"and",
"len",
"(",
"blocks",
"[",
"-",
"1",
"]",
")",
">",
"0",
":",
"blocks",
"[",
"-",
"1",
"]",
"=",
"blocks",
"[",
"-",
"1",
"]",
"[",
":",
"-",
"1",
"]",
"# Return the decoded plaintext",
"return",
"''",
".",
"join",
"(",
"blocks",
")"
] |
Decode a frame with the help of the table.
|
[
"Decode",
"a",
"frame",
"with",
"the",
"help",
"of",
"the",
"table",
"."
] |
207102c83e88f8f1fa7ba605ef0aab2ae9078b36
|
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/framers.py#L547-L563
|
240,165
|
klmitch/tendril
|
tendril/framers.py
|
COBSFramer.streamify
|
def streamify(self, state, frame):
"""Prepare frame for output as a COBS-encoded stream."""
# Get the encoding table
enc_tab = self._tables[1][:]
# Need the special un-trailed block length and code
untrail_len, untrail_code = enc_tab.pop(0)
# Set up a repository to receive the encoded blocks
result = []
# Break the frame into blocks
blocks = frame.split('\0')
# Now, walk the block list; done carefully because we need
# look-ahead in some cases
skip = False
for i in range(len(blocks)):
# Skip handled blocks
if skip:
skip = False
continue
blk = blocks[i]
# Encode un-trailed blocks
while len(blk) >= untrail_len - 1:
result.append(untrail_code + blk[:untrail_len - 1])
blk = blk[untrail_len - 1:]
# Do we care about look-ahead?
if (len(enc_tab) > 1 and i + 1 < len(blocks) and
blocks[i + 1] == '' and len(blk) <= 30):
# Use the second encoder table
tab = enc_tab[1]
# Skip the following empty block
skip = True
else:
# Use the regular encoder table
tab = enc_tab[0]
# Encode the block
result.append(tab[len(blk) + 1] + blk)
# Stitch together the result blocks
return ''.join(result) + '\0'
|
python
|
def streamify(self, state, frame):
"""Prepare frame for output as a COBS-encoded stream."""
# Get the encoding table
enc_tab = self._tables[1][:]
# Need the special un-trailed block length and code
untrail_len, untrail_code = enc_tab.pop(0)
# Set up a repository to receive the encoded blocks
result = []
# Break the frame into blocks
blocks = frame.split('\0')
# Now, walk the block list; done carefully because we need
# look-ahead in some cases
skip = False
for i in range(len(blocks)):
# Skip handled blocks
if skip:
skip = False
continue
blk = blocks[i]
# Encode un-trailed blocks
while len(blk) >= untrail_len - 1:
result.append(untrail_code + blk[:untrail_len - 1])
blk = blk[untrail_len - 1:]
# Do we care about look-ahead?
if (len(enc_tab) > 1 and i + 1 < len(blocks) and
blocks[i + 1] == '' and len(blk) <= 30):
# Use the second encoder table
tab = enc_tab[1]
# Skip the following empty block
skip = True
else:
# Use the regular encoder table
tab = enc_tab[0]
# Encode the block
result.append(tab[len(blk) + 1] + blk)
# Stitch together the result blocks
return ''.join(result) + '\0'
|
[
"def",
"streamify",
"(",
"self",
",",
"state",
",",
"frame",
")",
":",
"# Get the encoding table",
"enc_tab",
"=",
"self",
".",
"_tables",
"[",
"1",
"]",
"[",
":",
"]",
"# Need the special un-trailed block length and code",
"untrail_len",
",",
"untrail_code",
"=",
"enc_tab",
".",
"pop",
"(",
"0",
")",
"# Set up a repository to receive the encoded blocks",
"result",
"=",
"[",
"]",
"# Break the frame into blocks",
"blocks",
"=",
"frame",
".",
"split",
"(",
"'\\0'",
")",
"# Now, walk the block list; done carefully because we need",
"# look-ahead in some cases",
"skip",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"blocks",
")",
")",
":",
"# Skip handled blocks",
"if",
"skip",
":",
"skip",
"=",
"False",
"continue",
"blk",
"=",
"blocks",
"[",
"i",
"]",
"# Encode un-trailed blocks",
"while",
"len",
"(",
"blk",
")",
">=",
"untrail_len",
"-",
"1",
":",
"result",
".",
"append",
"(",
"untrail_code",
"+",
"blk",
"[",
":",
"untrail_len",
"-",
"1",
"]",
")",
"blk",
"=",
"blk",
"[",
"untrail_len",
"-",
"1",
":",
"]",
"# Do we care about look-ahead?",
"if",
"(",
"len",
"(",
"enc_tab",
")",
">",
"1",
"and",
"i",
"+",
"1",
"<",
"len",
"(",
"blocks",
")",
"and",
"blocks",
"[",
"i",
"+",
"1",
"]",
"==",
"''",
"and",
"len",
"(",
"blk",
")",
"<=",
"30",
")",
":",
"# Use the second encoder table",
"tab",
"=",
"enc_tab",
"[",
"1",
"]",
"# Skip the following empty block",
"skip",
"=",
"True",
"else",
":",
"# Use the regular encoder table",
"tab",
"=",
"enc_tab",
"[",
"0",
"]",
"# Encode the block",
"result",
".",
"append",
"(",
"tab",
"[",
"len",
"(",
"blk",
")",
"+",
"1",
"]",
"+",
"blk",
")",
"# Stitch together the result blocks",
"return",
"''",
".",
"join",
"(",
"result",
")",
"+",
"'\\0'"
] |
Prepare frame for output as a COBS-encoded stream.
|
[
"Prepare",
"frame",
"for",
"output",
"as",
"a",
"COBS",
"-",
"encoded",
"stream",
"."
] |
207102c83e88f8f1fa7ba605ef0aab2ae9078b36
|
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/framers.py#L603-L650
|
240,166
|
rmed/flask-waffleconf
|
flask_waffleconf/core.py
|
_WaffleState.parse_conf
|
def parse_conf(self, keys=[]):
"""Parse configuration values from the database.
The extension must have been previously initialized.
If a key is not found in the database, it will be created with the
default value specified.
Arguments:
keys (list[str]): list of keys to parse. If the list is empty, then
all the keys known to the application will be used.
Returns:
dict of the parsed config values.
"""
confs = self.app.config.get('WAFFLE_CONFS', {})
if not keys:
keys = confs.keys()
result = {}
for key in keys:
# Some things cannot be changed...
if key.startswith('WAFFLE_'):
continue
# No arbitrary keys
if key not in confs.keys():
continue
stored_conf = self.configstore.get(key)
if not stored_conf:
# Store new record in database
value = confs[key].get('default', '')
stored_conf = self.configstore.put(key, util.serialize(value))
self.configstore.commit()
else:
# Get stored value
value = util.deserialize(stored_conf.get_value())
result[stored_conf.get_key()] = value
return result
|
python
|
def parse_conf(self, keys=[]):
"""Parse configuration values from the database.
The extension must have been previously initialized.
If a key is not found in the database, it will be created with the
default value specified.
Arguments:
keys (list[str]): list of keys to parse. If the list is empty, then
all the keys known to the application will be used.
Returns:
dict of the parsed config values.
"""
confs = self.app.config.get('WAFFLE_CONFS', {})
if not keys:
keys = confs.keys()
result = {}
for key in keys:
# Some things cannot be changed...
if key.startswith('WAFFLE_'):
continue
# No arbitrary keys
if key not in confs.keys():
continue
stored_conf = self.configstore.get(key)
if not stored_conf:
# Store new record in database
value = confs[key].get('default', '')
stored_conf = self.configstore.put(key, util.serialize(value))
self.configstore.commit()
else:
# Get stored value
value = util.deserialize(stored_conf.get_value())
result[stored_conf.get_key()] = value
return result
|
[
"def",
"parse_conf",
"(",
"self",
",",
"keys",
"=",
"[",
"]",
")",
":",
"confs",
"=",
"self",
".",
"app",
".",
"config",
".",
"get",
"(",
"'WAFFLE_CONFS'",
",",
"{",
"}",
")",
"if",
"not",
"keys",
":",
"keys",
"=",
"confs",
".",
"keys",
"(",
")",
"result",
"=",
"{",
"}",
"for",
"key",
"in",
"keys",
":",
"# Some things cannot be changed...",
"if",
"key",
".",
"startswith",
"(",
"'WAFFLE_'",
")",
":",
"continue",
"# No arbitrary keys",
"if",
"key",
"not",
"in",
"confs",
".",
"keys",
"(",
")",
":",
"continue",
"stored_conf",
"=",
"self",
".",
"configstore",
".",
"get",
"(",
"key",
")",
"if",
"not",
"stored_conf",
":",
"# Store new record in database",
"value",
"=",
"confs",
"[",
"key",
"]",
".",
"get",
"(",
"'default'",
",",
"''",
")",
"stored_conf",
"=",
"self",
".",
"configstore",
".",
"put",
"(",
"key",
",",
"util",
".",
"serialize",
"(",
"value",
")",
")",
"self",
".",
"configstore",
".",
"commit",
"(",
")",
"else",
":",
"# Get stored value",
"value",
"=",
"util",
".",
"deserialize",
"(",
"stored_conf",
".",
"get_value",
"(",
")",
")",
"result",
"[",
"stored_conf",
".",
"get_key",
"(",
")",
"]",
"=",
"value",
"return",
"result"
] |
Parse configuration values from the database.
The extension must have been previously initialized.
If a key is not found in the database, it will be created with the
default value specified.
Arguments:
keys (list[str]): list of keys to parse. If the list is empty, then
all the keys known to the application will be used.
Returns:
dict of the parsed config values.
|
[
"Parse",
"configuration",
"values",
"from",
"the",
"database",
"."
] |
a75ed69101796c9f3f42eff9f91e91dc6dd13869
|
https://github.com/rmed/flask-waffleconf/blob/a75ed69101796c9f3f42eff9f91e91dc6dd13869/flask_waffleconf/core.py#L58-L102
|
240,167
|
rmed/flask-waffleconf
|
flask_waffleconf/core.py
|
_WaffleState.update_db
|
def update_db(self, new_values):
"""Update database values and application configuration.
The provided keys must be defined in the ``WAFFLE_CONFS`` setting.
Arguments:
new_values (dict): dict of configuration variables and their values
The dict has the following structure:
{
'MY_CONFIG_VAR' : <CONFIG_VAL>,
'MY_CONFIG_VAR1' : <CONFIG_VAL1>
}
"""
confs = self.app.config.get('WAFFLE_CONFS', {})
to_update = {}
for key in new_values.keys():
# Some things cannot be changed...
if key.startswith('WAFFLE_'):
continue
# No arbitrary keys
if key not in confs.keys():
continue
value = new_values[key]
self.configstore.put(key, util.serialize(value))
self.configstore.commit()
to_update[key] = value
# Update config
if not to_update:
return
self.app.config.update(to_update)
# Notify other processes
if self.app.config.get('WAFFLE_MULTIPROC', False):
self.notify(self)
|
python
|
def update_db(self, new_values):
"""Update database values and application configuration.
The provided keys must be defined in the ``WAFFLE_CONFS`` setting.
Arguments:
new_values (dict): dict of configuration variables and their values
The dict has the following structure:
{
'MY_CONFIG_VAR' : <CONFIG_VAL>,
'MY_CONFIG_VAR1' : <CONFIG_VAL1>
}
"""
confs = self.app.config.get('WAFFLE_CONFS', {})
to_update = {}
for key in new_values.keys():
# Some things cannot be changed...
if key.startswith('WAFFLE_'):
continue
# No arbitrary keys
if key not in confs.keys():
continue
value = new_values[key]
self.configstore.put(key, util.serialize(value))
self.configstore.commit()
to_update[key] = value
# Update config
if not to_update:
return
self.app.config.update(to_update)
# Notify other processes
if self.app.config.get('WAFFLE_MULTIPROC', False):
self.notify(self)
|
[
"def",
"update_db",
"(",
"self",
",",
"new_values",
")",
":",
"confs",
"=",
"self",
".",
"app",
".",
"config",
".",
"get",
"(",
"'WAFFLE_CONFS'",
",",
"{",
"}",
")",
"to_update",
"=",
"{",
"}",
"for",
"key",
"in",
"new_values",
".",
"keys",
"(",
")",
":",
"# Some things cannot be changed...",
"if",
"key",
".",
"startswith",
"(",
"'WAFFLE_'",
")",
":",
"continue",
"# No arbitrary keys",
"if",
"key",
"not",
"in",
"confs",
".",
"keys",
"(",
")",
":",
"continue",
"value",
"=",
"new_values",
"[",
"key",
"]",
"self",
".",
"configstore",
".",
"put",
"(",
"key",
",",
"util",
".",
"serialize",
"(",
"value",
")",
")",
"self",
".",
"configstore",
".",
"commit",
"(",
")",
"to_update",
"[",
"key",
"]",
"=",
"value",
"# Update config",
"if",
"not",
"to_update",
":",
"return",
"self",
".",
"app",
".",
"config",
".",
"update",
"(",
"to_update",
")",
"# Notify other processes",
"if",
"self",
".",
"app",
".",
"config",
".",
"get",
"(",
"'WAFFLE_MULTIPROC'",
",",
"False",
")",
":",
"self",
".",
"notify",
"(",
"self",
")"
] |
Update database values and application configuration.
The provided keys must be defined in the ``WAFFLE_CONFS`` setting.
Arguments:
new_values (dict): dict of configuration variables and their values
The dict has the following structure:
{
'MY_CONFIG_VAR' : <CONFIG_VAL>,
'MY_CONFIG_VAR1' : <CONFIG_VAL1>
}
|
[
"Update",
"database",
"values",
"and",
"application",
"configuration",
"."
] |
a75ed69101796c9f3f42eff9f91e91dc6dd13869
|
https://github.com/rmed/flask-waffleconf/blob/a75ed69101796c9f3f42eff9f91e91dc6dd13869/flask_waffleconf/core.py#L104-L144
|
240,168
|
rmed/flask-waffleconf
|
flask_waffleconf/core.py
|
_WaffleState.update_conf
|
def update_conf(self):
"""Update configuration values from database.
This method should be called when there is an update notification.
"""
parsed = self.parse_conf()
if not parsed:
return None
# Update app config
self.app.config.update(parsed)
|
python
|
def update_conf(self):
"""Update configuration values from database.
This method should be called when there is an update notification.
"""
parsed = self.parse_conf()
if not parsed:
return None
# Update app config
self.app.config.update(parsed)
|
[
"def",
"update_conf",
"(",
"self",
")",
":",
"parsed",
"=",
"self",
".",
"parse_conf",
"(",
")",
"if",
"not",
"parsed",
":",
"return",
"None",
"# Update app config",
"self",
".",
"app",
".",
"config",
".",
"update",
"(",
"parsed",
")"
] |
Update configuration values from database.
This method should be called when there is an update notification.
|
[
"Update",
"configuration",
"values",
"from",
"database",
"."
] |
a75ed69101796c9f3f42eff9f91e91dc6dd13869
|
https://github.com/rmed/flask-waffleconf/blob/a75ed69101796c9f3f42eff9f91e91dc6dd13869/flask_waffleconf/core.py#L146-L157
|
240,169
|
rmed/flask-waffleconf
|
flask_waffleconf/core.py
|
WaffleConf.init_app
|
def init_app(self, app, configstore):
"""Initialize the extension for the given application and store.
Parse the configuration values stored in the database obtained from
the ``WAFFLE_CONFS`` value of the configuration.
Arguments:
app: Flask application instance
configstore (WaffleStore): database store.
"""
if not hasattr(app, 'extensions'):
app.extensions = {}
self.state = _WaffleState(app, configstore)
app.extensions['waffleconf'] = self.state
|
python
|
def init_app(self, app, configstore):
"""Initialize the extension for the given application and store.
Parse the configuration values stored in the database obtained from
the ``WAFFLE_CONFS`` value of the configuration.
Arguments:
app: Flask application instance
configstore (WaffleStore): database store.
"""
if not hasattr(app, 'extensions'):
app.extensions = {}
self.state = _WaffleState(app, configstore)
app.extensions['waffleconf'] = self.state
|
[
"def",
"init_app",
"(",
"self",
",",
"app",
",",
"configstore",
")",
":",
"if",
"not",
"hasattr",
"(",
"app",
",",
"'extensions'",
")",
":",
"app",
".",
"extensions",
"=",
"{",
"}",
"self",
".",
"state",
"=",
"_WaffleState",
"(",
"app",
",",
"configstore",
")",
"app",
".",
"extensions",
"[",
"'waffleconf'",
"]",
"=",
"self",
".",
"state"
] |
Initialize the extension for the given application and store.
Parse the configuration values stored in the database obtained from
the ``WAFFLE_CONFS`` value of the configuration.
Arguments:
app: Flask application instance
configstore (WaffleStore): database store.
|
[
"Initialize",
"the",
"extension",
"for",
"the",
"given",
"application",
"and",
"store",
"."
] |
a75ed69101796c9f3f42eff9f91e91dc6dd13869
|
https://github.com/rmed/flask-waffleconf/blob/a75ed69101796c9f3f42eff9f91e91dc6dd13869/flask_waffleconf/core.py#L177-L192
|
240,170
|
smbapps/isolcss
|
isolcss/main.py
|
isolcss
|
def isolcss(prefix, css):
"""
Returns `css` with all selectors prefixed by `prefix`, or replacing "&"
as SASS and LESS both do.
Tries to parse strictly then falls back, with a warning, to forgiving
parse if necessary.
"""
try:
# Attempt full strict parse, raise exception on failure.
all(True for m in matchiter(selrule_or_atom_re, css))
except ValueError as e:
logger.warning("Strict parse failed at char {}".format(e.args[0]))
splits = matchiter(selrule_or_any_re, css)
else:
splits = matchiter(selrule_or_atom_re, css)
css = []
for m in splits:
if not m.groupdict()['sels']:
css.extend(m.group(0))
continue
sels = matchall(sel_re, m.group('sels'))
# This should never happen because sel_re is a subpattern
# of the original match.
assert sels, "Failed to split selectors: {!r}".format(m.group('sels'))
for sel in sels:
atoms = matchall(atom_re, sel)
if '&' in atoms:
sel = ''.join((prefix if a == '&' else a) for a in atoms)
else:
sel = '%s %s' % (prefix, sel)
css.append(sel)
css.append(m.group('ruleset'))
return ''.join(css)
|
python
|
def isolcss(prefix, css):
"""
Returns `css` with all selectors prefixed by `prefix`, or replacing "&"
as SASS and LESS both do.
Tries to parse strictly then falls back, with a warning, to forgiving
parse if necessary.
"""
try:
# Attempt full strict parse, raise exception on failure.
all(True for m in matchiter(selrule_or_atom_re, css))
except ValueError as e:
logger.warning("Strict parse failed at char {}".format(e.args[0]))
splits = matchiter(selrule_or_any_re, css)
else:
splits = matchiter(selrule_or_atom_re, css)
css = []
for m in splits:
if not m.groupdict()['sels']:
css.extend(m.group(0))
continue
sels = matchall(sel_re, m.group('sels'))
# This should never happen because sel_re is a subpattern
# of the original match.
assert sels, "Failed to split selectors: {!r}".format(m.group('sels'))
for sel in sels:
atoms = matchall(atom_re, sel)
if '&' in atoms:
sel = ''.join((prefix if a == '&' else a) for a in atoms)
else:
sel = '%s %s' % (prefix, sel)
css.append(sel)
css.append(m.group('ruleset'))
return ''.join(css)
|
[
"def",
"isolcss",
"(",
"prefix",
",",
"css",
")",
":",
"try",
":",
"# Attempt full strict parse, raise exception on failure.",
"all",
"(",
"True",
"for",
"m",
"in",
"matchiter",
"(",
"selrule_or_atom_re",
",",
"css",
")",
")",
"except",
"ValueError",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"\"Strict parse failed at char {}\"",
".",
"format",
"(",
"e",
".",
"args",
"[",
"0",
"]",
")",
")",
"splits",
"=",
"matchiter",
"(",
"selrule_or_any_re",
",",
"css",
")",
"else",
":",
"splits",
"=",
"matchiter",
"(",
"selrule_or_atom_re",
",",
"css",
")",
"css",
"=",
"[",
"]",
"for",
"m",
"in",
"splits",
":",
"if",
"not",
"m",
".",
"groupdict",
"(",
")",
"[",
"'sels'",
"]",
":",
"css",
".",
"extend",
"(",
"m",
".",
"group",
"(",
"0",
")",
")",
"continue",
"sels",
"=",
"matchall",
"(",
"sel_re",
",",
"m",
".",
"group",
"(",
"'sels'",
")",
")",
"# This should never happen because sel_re is a subpattern",
"# of the original match.",
"assert",
"sels",
",",
"\"Failed to split selectors: {!r}\"",
".",
"format",
"(",
"m",
".",
"group",
"(",
"'sels'",
")",
")",
"for",
"sel",
"in",
"sels",
":",
"atoms",
"=",
"matchall",
"(",
"atom_re",
",",
"sel",
")",
"if",
"'&'",
"in",
"atoms",
":",
"sel",
"=",
"''",
".",
"join",
"(",
"(",
"prefix",
"if",
"a",
"==",
"'&'",
"else",
"a",
")",
"for",
"a",
"in",
"atoms",
")",
"else",
":",
"sel",
"=",
"'%s %s'",
"%",
"(",
"prefix",
",",
"sel",
")",
"css",
".",
"append",
"(",
"sel",
")",
"css",
".",
"append",
"(",
"m",
".",
"group",
"(",
"'ruleset'",
")",
")",
"return",
"''",
".",
"join",
"(",
"css",
")"
] |
Returns `css` with all selectors prefixed by `prefix`, or replacing "&"
as SASS and LESS both do.
Tries to parse strictly then falls back, with a warning, to forgiving
parse if necessary.
|
[
"Returns",
"css",
"with",
"all",
"selectors",
"prefixed",
"by",
"prefix",
"or",
"replacing",
"&",
"as",
"SASS",
"and",
"LESS",
"both",
"do",
"."
] |
1613dfd297f64292af1216855b6d096f2bed82fe
|
https://github.com/smbapps/isolcss/blob/1613dfd297f64292af1216855b6d096f2bed82fe/isolcss/main.py#L11-L50
|
240,171
|
lewisjared/credkeep
|
credkeep/decrypt.py
|
decrypt_file
|
def decrypt_file(filename, set_env=True, override_env=False):
"""
Decrypts a JSON file containing encrypted secrets. This file should contain an object mapping the key names to
encrypted secrets. This encrypted file can be created using `credkeep.encrypt_file` or the commandline utility.
:param filename: filename of the JSON file
:param set_env: If True, an environment variable representing the key is created.
:param override_env: If True, an existing environment variable with the same key name will be overridden with the
new decrypted value. If False, the environment variable will not be set.
:return: Dict containing the decrypted keys
"""
data = json.load(open(filename))
results = {}
for key, v in data.iteritems():
v_decrypt = decrypt_secret(v)
results[key] = v_decrypt
if set_env:
if key in os.environ and not override_env:
break
os.environ[str(key)] = v_decrypt
return results
|
python
|
def decrypt_file(filename, set_env=True, override_env=False):
"""
Decrypts a JSON file containing encrypted secrets. This file should contain an object mapping the key names to
encrypted secrets. This encrypted file can be created using `credkeep.encrypt_file` or the commandline utility.
:param filename: filename of the JSON file
:param set_env: If True, an environment variable representing the key is created.
:param override_env: If True, an existing environment variable with the same key name will be overridden with the
new decrypted value. If False, the environment variable will not be set.
:return: Dict containing the decrypted keys
"""
data = json.load(open(filename))
results = {}
for key, v in data.iteritems():
v_decrypt = decrypt_secret(v)
results[key] = v_decrypt
if set_env:
if key in os.environ and not override_env:
break
os.environ[str(key)] = v_decrypt
return results
|
[
"def",
"decrypt_file",
"(",
"filename",
",",
"set_env",
"=",
"True",
",",
"override_env",
"=",
"False",
")",
":",
"data",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"filename",
")",
")",
"results",
"=",
"{",
"}",
"for",
"key",
",",
"v",
"in",
"data",
".",
"iteritems",
"(",
")",
":",
"v_decrypt",
"=",
"decrypt_secret",
"(",
"v",
")",
"results",
"[",
"key",
"]",
"=",
"v_decrypt",
"if",
"set_env",
":",
"if",
"key",
"in",
"os",
".",
"environ",
"and",
"not",
"override_env",
":",
"break",
"os",
".",
"environ",
"[",
"str",
"(",
"key",
")",
"]",
"=",
"v_decrypt",
"return",
"results"
] |
Decrypts a JSON file containing encrypted secrets. This file should contain an object mapping the key names to
encrypted secrets. This encrypted file can be created using `credkeep.encrypt_file` or the commandline utility.
:param filename: filename of the JSON file
:param set_env: If True, an environment variable representing the key is created.
:param override_env: If True, an existing environment variable with the same key name will be overridden with the
new decrypted value. If False, the environment variable will not be set.
:return: Dict containing the decrypted keys
|
[
"Decrypts",
"a",
"JSON",
"file",
"containing",
"encrypted",
"secrets",
".",
"This",
"file",
"should",
"contain",
"an",
"object",
"mapping",
"the",
"key",
"names",
"to",
"encrypted",
"secrets",
".",
"This",
"encrypted",
"file",
"can",
"be",
"created",
"using",
"credkeep",
".",
"encrypt_file",
"or",
"the",
"commandline",
"utility",
"."
] |
63638ced094992552a28109b91839bcbbbe9230a
|
https://github.com/lewisjared/credkeep/blob/63638ced094992552a28109b91839bcbbbe9230a/credkeep/decrypt.py#L13-L37
|
240,172
|
lewisjared/credkeep
|
credkeep/decrypt.py
|
decrypt_or_cache
|
def decrypt_or_cache(filename, **kwargs):
"""
Attempts to load a local version of decrypted secrets before making external api calls.
This is useful as it allows credkeep secrets to be used offline. Options for decrypt_filename can be passed to this
function.
:param filename: filename of encrypted JSON file
:return: Dict containing decrypted keys
"""
clear_fname = enc_to_clear_filename(filename)
if clear_fname:
return json.load(open(clear_fname))
return decrypt_file(filename, **kwargs)
|
python
|
def decrypt_or_cache(filename, **kwargs):
"""
Attempts to load a local version of decrypted secrets before making external api calls.
This is useful as it allows credkeep secrets to be used offline. Options for decrypt_filename can be passed to this
function.
:param filename: filename of encrypted JSON file
:return: Dict containing decrypted keys
"""
clear_fname = enc_to_clear_filename(filename)
if clear_fname:
return json.load(open(clear_fname))
return decrypt_file(filename, **kwargs)
|
[
"def",
"decrypt_or_cache",
"(",
"filename",
",",
"*",
"*",
"kwargs",
")",
":",
"clear_fname",
"=",
"enc_to_clear_filename",
"(",
"filename",
")",
"if",
"clear_fname",
":",
"return",
"json",
".",
"load",
"(",
"open",
"(",
"clear_fname",
")",
")",
"return",
"decrypt_file",
"(",
"filename",
",",
"*",
"*",
"kwargs",
")"
] |
Attempts to load a local version of decrypted secrets before making external api calls.
This is useful as it allows credkeep secrets to be used offline. Options for decrypt_filename can be passed to this
function.
:param filename: filename of encrypted JSON file
:return: Dict containing decrypted keys
|
[
"Attempts",
"to",
"load",
"a",
"local",
"version",
"of",
"decrypted",
"secrets",
"before",
"making",
"external",
"api",
"calls",
"."
] |
63638ced094992552a28109b91839bcbbbe9230a
|
https://github.com/lewisjared/credkeep/blob/63638ced094992552a28109b91839bcbbbe9230a/credkeep/decrypt.py#L40-L53
|
240,173
|
datakortet/dkfileutils
|
dkfileutils/path.py
|
Path.list
|
def list(self, filterfn=lambda x: True):
"""Return all direct descendands of directory `self` for which
`filterfn` returns True.
"""
return [self / p for p in self.listdir() if filterfn(self / p)]
|
python
|
def list(self, filterfn=lambda x: True):
"""Return all direct descendands of directory `self` for which
`filterfn` returns True.
"""
return [self / p for p in self.listdir() if filterfn(self / p)]
|
[
"def",
"list",
"(",
"self",
",",
"filterfn",
"=",
"lambda",
"x",
":",
"True",
")",
":",
"return",
"[",
"self",
"/",
"p",
"for",
"p",
"in",
"self",
".",
"listdir",
"(",
")",
"if",
"filterfn",
"(",
"self",
"/",
"p",
")",
"]"
] |
Return all direct descendands of directory `self` for which
`filterfn` returns True.
|
[
"Return",
"all",
"direct",
"descendands",
"of",
"directory",
"self",
"for",
"which",
"filterfn",
"returns",
"True",
"."
] |
924098d6e2edf88ad9b3ffdec9c74530f80a7d77
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/path.py#L329-L333
|
240,174
|
datakortet/dkfileutils
|
dkfileutils/path.py
|
Path.rm
|
def rm(self, fname=None):
"""Remove a file, don't raise exception if file does not exist.
"""
if fname is not None:
return (self / fname).rm()
try:
self.remove()
except OSError:
pass
|
python
|
def rm(self, fname=None):
"""Remove a file, don't raise exception if file does not exist.
"""
if fname is not None:
return (self / fname).rm()
try:
self.remove()
except OSError:
pass
|
[
"def",
"rm",
"(",
"self",
",",
"fname",
"=",
"None",
")",
":",
"if",
"fname",
"is",
"not",
"None",
":",
"return",
"(",
"self",
"/",
"fname",
")",
".",
"rm",
"(",
")",
"try",
":",
"self",
".",
"remove",
"(",
")",
"except",
"OSError",
":",
"pass"
] |
Remove a file, don't raise exception if file does not exist.
|
[
"Remove",
"a",
"file",
"don",
"t",
"raise",
"exception",
"if",
"file",
"does",
"not",
"exist",
"."
] |
924098d6e2edf88ad9b3ffdec9c74530f80a7d77
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/path.py#L372-L380
|
240,175
|
oczkers/pyllegro
|
pyllegro/core.py
|
chunked
|
def chunked(l, n):
"""Chunk one big list into few small lists."""
return [l[i:i + n] for i in range(0, len(l), n)]
|
python
|
def chunked(l, n):
"""Chunk one big list into few small lists."""
return [l[i:i + n] for i in range(0, len(l), n)]
|
[
"def",
"chunked",
"(",
"l",
",",
"n",
")",
":",
"return",
"[",
"l",
"[",
"i",
":",
"i",
"+",
"n",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"l",
")",
",",
"n",
")",
"]"
] |
Chunk one big list into few small lists.
|
[
"Chunk",
"one",
"big",
"list",
"into",
"few",
"small",
"lists",
"."
] |
c6d7090560cb9e579f7f769a9eec131a3db2c258
|
https://github.com/oczkers/pyllegro/blob/c6d7090560cb9e579f7f769a9eec131a3db2c258/pyllegro/core.py#L40-L42
|
240,176
|
oczkers/pyllegro
|
pyllegro/core.py
|
Allegro.getBids
|
def getBids(self, auction_id):
"""Retrieve all bids in given auction."""
bids = {}
rc = self.__ask__('doGetBidItem2', itemId=auction_id)
if rc:
for i in rc:
i = i['bidsArray']
bids[long(i['item'][1])] = {
'price': Decimal(i['item'][6]),
'quantity': int(i['item'][5]),
'date_buy': i['item'][7]
}
return bids
|
python
|
def getBids(self, auction_id):
"""Retrieve all bids in given auction."""
bids = {}
rc = self.__ask__('doGetBidItem2', itemId=auction_id)
if rc:
for i in rc:
i = i['bidsArray']
bids[long(i['item'][1])] = {
'price': Decimal(i['item'][6]),
'quantity': int(i['item'][5]),
'date_buy': i['item'][7]
}
return bids
|
[
"def",
"getBids",
"(",
"self",
",",
"auction_id",
")",
":",
"bids",
"=",
"{",
"}",
"rc",
"=",
"self",
".",
"__ask__",
"(",
"'doGetBidItem2'",
",",
"itemId",
"=",
"auction_id",
")",
"if",
"rc",
":",
"for",
"i",
"in",
"rc",
":",
"i",
"=",
"i",
"[",
"'bidsArray'",
"]",
"bids",
"[",
"long",
"(",
"i",
"[",
"'item'",
"]",
"[",
"1",
"]",
")",
"]",
"=",
"{",
"'price'",
":",
"Decimal",
"(",
"i",
"[",
"'item'",
"]",
"[",
"6",
"]",
")",
",",
"'quantity'",
":",
"int",
"(",
"i",
"[",
"'item'",
"]",
"[",
"5",
"]",
")",
",",
"'date_buy'",
":",
"i",
"[",
"'item'",
"]",
"[",
"7",
"]",
"}",
"return",
"bids"
] |
Retrieve all bids in given auction.
|
[
"Retrieve",
"all",
"bids",
"in",
"given",
"auction",
"."
] |
c6d7090560cb9e579f7f769a9eec131a3db2c258
|
https://github.com/oczkers/pyllegro/blob/c6d7090560cb9e579f7f769a9eec131a3db2c258/pyllegro/core.py#L150-L162
|
240,177
|
oczkers/pyllegro
|
pyllegro/core.py
|
Allegro.getBuyerInfo
|
def getBuyerInfo(self, auction_id, buyer_id):
"""Return buyer info."""
# TODO: add price from getBids
rc = self.__ask__('doGetPostBuyData', itemsArray=self.ArrayOfLong([auction_id]), buyerFilterArray=self.ArrayOfLong([buyer_id]))
rc = rc[0]['usersPostBuyData']['item'][0]['userData']
return {'allegro_aid': auction_id,
'allegro_uid': rc['userId'],
'allegro_login': magicDecode(rc['userLogin']),
'name': magicDecode(rc['userFirstName']),
'surname': magicDecode(rc['userLastName']),
'company': magicDecode(rc['userCompany']),
'postcode': magicDecode(rc['userPostcode']),
'city': magicDecode(rc['userCity']),
'address': magicDecode(rc['userAddress']),
'email': magicDecode(rc['userEmail']),
'phone': rc['userPhone']}
|
python
|
def getBuyerInfo(self, auction_id, buyer_id):
"""Return buyer info."""
# TODO: add price from getBids
rc = self.__ask__('doGetPostBuyData', itemsArray=self.ArrayOfLong([auction_id]), buyerFilterArray=self.ArrayOfLong([buyer_id]))
rc = rc[0]['usersPostBuyData']['item'][0]['userData']
return {'allegro_aid': auction_id,
'allegro_uid': rc['userId'],
'allegro_login': magicDecode(rc['userLogin']),
'name': magicDecode(rc['userFirstName']),
'surname': magicDecode(rc['userLastName']),
'company': magicDecode(rc['userCompany']),
'postcode': magicDecode(rc['userPostcode']),
'city': magicDecode(rc['userCity']),
'address': magicDecode(rc['userAddress']),
'email': magicDecode(rc['userEmail']),
'phone': rc['userPhone']}
|
[
"def",
"getBuyerInfo",
"(",
"self",
",",
"auction_id",
",",
"buyer_id",
")",
":",
"# TODO: add price from getBids",
"rc",
"=",
"self",
".",
"__ask__",
"(",
"'doGetPostBuyData'",
",",
"itemsArray",
"=",
"self",
".",
"ArrayOfLong",
"(",
"[",
"auction_id",
"]",
")",
",",
"buyerFilterArray",
"=",
"self",
".",
"ArrayOfLong",
"(",
"[",
"buyer_id",
"]",
")",
")",
"rc",
"=",
"rc",
"[",
"0",
"]",
"[",
"'usersPostBuyData'",
"]",
"[",
"'item'",
"]",
"[",
"0",
"]",
"[",
"'userData'",
"]",
"return",
"{",
"'allegro_aid'",
":",
"auction_id",
",",
"'allegro_uid'",
":",
"rc",
"[",
"'userId'",
"]",
",",
"'allegro_login'",
":",
"magicDecode",
"(",
"rc",
"[",
"'userLogin'",
"]",
")",
",",
"'name'",
":",
"magicDecode",
"(",
"rc",
"[",
"'userFirstName'",
"]",
")",
",",
"'surname'",
":",
"magicDecode",
"(",
"rc",
"[",
"'userLastName'",
"]",
")",
",",
"'company'",
":",
"magicDecode",
"(",
"rc",
"[",
"'userCompany'",
"]",
")",
",",
"'postcode'",
":",
"magicDecode",
"(",
"rc",
"[",
"'userPostcode'",
"]",
")",
",",
"'city'",
":",
"magicDecode",
"(",
"rc",
"[",
"'userCity'",
"]",
")",
",",
"'address'",
":",
"magicDecode",
"(",
"rc",
"[",
"'userAddress'",
"]",
")",
",",
"'email'",
":",
"magicDecode",
"(",
"rc",
"[",
"'userEmail'",
"]",
")",
",",
"'phone'",
":",
"rc",
"[",
"'userPhone'",
"]",
"}"
] |
Return buyer info.
|
[
"Return",
"buyer",
"info",
"."
] |
c6d7090560cb9e579f7f769a9eec131a3db2c258
|
https://github.com/oczkers/pyllegro/blob/c6d7090560cb9e579f7f769a9eec131a3db2c258/pyllegro/core.py#L164-L179
|
240,178
|
oczkers/pyllegro
|
pyllegro/core.py
|
Allegro.getOrders
|
def getOrders(self, auction_ids):
"""Return orders details."""
orders = {}
# chunk list (only 25 auction_ids per request)
for chunk in chunked(auction_ids, 25):
# auctions = [{'item': auction_id} for auction_id in chunk] # TODO?: is it needed?
auctions = self.ArrayOfLong(chunk)
rc = self.__ask__('doGetPostBuyData', itemsArray=auctions)
for auction in rc:
orders_auction = []
bids = self.getBids(auction['itemId'])
# get orders details
# for i in auction.get('usersPostBuyData', ()):
if not auction['usersPostBuyData']: # empty
continue
for i in auction['usersPostBuyData']['item']:
i = i['userData']
if i['userId'] not in bids: # temporary(?) webapi bug fix
continue
orders_auction.append({
'allegro_aid': auction['itemId'],
'allegro_uid': i['userId'],
'allegro_login': magicDecode(i['userLogin']),
'name': magicDecode(i['userFirstName']),
'surname': magicDecode(i['userLastName']),
'company': magicDecode(i['userCompany']),
'postcode': magicDecode(i['userPostcode']),
'city': magicDecode(i['userCity']),
'address': magicDecode(i['userAddress']),
'email': magicDecode(i['userEmail']),
'phone': i['userPhone'],
'price': bids[i['userId']]['price'],
'quantity': bids[i['userId']]['quantity'],
'date_buy': bids[i['userId']]['date_buy']
})
orders[auction['itemId']] = orders_auction
return orders
|
python
|
def getOrders(self, auction_ids):
"""Return orders details."""
orders = {}
# chunk list (only 25 auction_ids per request)
for chunk in chunked(auction_ids, 25):
# auctions = [{'item': auction_id} for auction_id in chunk] # TODO?: is it needed?
auctions = self.ArrayOfLong(chunk)
rc = self.__ask__('doGetPostBuyData', itemsArray=auctions)
for auction in rc:
orders_auction = []
bids = self.getBids(auction['itemId'])
# get orders details
# for i in auction.get('usersPostBuyData', ()):
if not auction['usersPostBuyData']: # empty
continue
for i in auction['usersPostBuyData']['item']:
i = i['userData']
if i['userId'] not in bids: # temporary(?) webapi bug fix
continue
orders_auction.append({
'allegro_aid': auction['itemId'],
'allegro_uid': i['userId'],
'allegro_login': magicDecode(i['userLogin']),
'name': magicDecode(i['userFirstName']),
'surname': magicDecode(i['userLastName']),
'company': magicDecode(i['userCompany']),
'postcode': magicDecode(i['userPostcode']),
'city': magicDecode(i['userCity']),
'address': magicDecode(i['userAddress']),
'email': magicDecode(i['userEmail']),
'phone': i['userPhone'],
'price': bids[i['userId']]['price'],
'quantity': bids[i['userId']]['quantity'],
'date_buy': bids[i['userId']]['date_buy']
})
orders[auction['itemId']] = orders_auction
return orders
|
[
"def",
"getOrders",
"(",
"self",
",",
"auction_ids",
")",
":",
"orders",
"=",
"{",
"}",
"# chunk list (only 25 auction_ids per request)",
"for",
"chunk",
"in",
"chunked",
"(",
"auction_ids",
",",
"25",
")",
":",
"# auctions = [{'item': auction_id} for auction_id in chunk] # TODO?: is it needed?",
"auctions",
"=",
"self",
".",
"ArrayOfLong",
"(",
"chunk",
")",
"rc",
"=",
"self",
".",
"__ask__",
"(",
"'doGetPostBuyData'",
",",
"itemsArray",
"=",
"auctions",
")",
"for",
"auction",
"in",
"rc",
":",
"orders_auction",
"=",
"[",
"]",
"bids",
"=",
"self",
".",
"getBids",
"(",
"auction",
"[",
"'itemId'",
"]",
")",
"# get orders details",
"# for i in auction.get('usersPostBuyData', ()):",
"if",
"not",
"auction",
"[",
"'usersPostBuyData'",
"]",
":",
"# empty",
"continue",
"for",
"i",
"in",
"auction",
"[",
"'usersPostBuyData'",
"]",
"[",
"'item'",
"]",
":",
"i",
"=",
"i",
"[",
"'userData'",
"]",
"if",
"i",
"[",
"'userId'",
"]",
"not",
"in",
"bids",
":",
"# temporary(?) webapi bug fix",
"continue",
"orders_auction",
".",
"append",
"(",
"{",
"'allegro_aid'",
":",
"auction",
"[",
"'itemId'",
"]",
",",
"'allegro_uid'",
":",
"i",
"[",
"'userId'",
"]",
",",
"'allegro_login'",
":",
"magicDecode",
"(",
"i",
"[",
"'userLogin'",
"]",
")",
",",
"'name'",
":",
"magicDecode",
"(",
"i",
"[",
"'userFirstName'",
"]",
")",
",",
"'surname'",
":",
"magicDecode",
"(",
"i",
"[",
"'userLastName'",
"]",
")",
",",
"'company'",
":",
"magicDecode",
"(",
"i",
"[",
"'userCompany'",
"]",
")",
",",
"'postcode'",
":",
"magicDecode",
"(",
"i",
"[",
"'userPostcode'",
"]",
")",
",",
"'city'",
":",
"magicDecode",
"(",
"i",
"[",
"'userCity'",
"]",
")",
",",
"'address'",
":",
"magicDecode",
"(",
"i",
"[",
"'userAddress'",
"]",
")",
",",
"'email'",
":",
"magicDecode",
"(",
"i",
"[",
"'userEmail'",
"]",
")",
",",
"'phone'",
":",
"i",
"[",
"'userPhone'",
"]",
",",
"'price'",
":",
"bids",
"[",
"i",
"[",
"'userId'",
"]",
"]",
"[",
"'price'",
"]",
",",
"'quantity'",
":",
"bids",
"[",
"i",
"[",
"'userId'",
"]",
"]",
"[",
"'quantity'",
"]",
",",
"'date_buy'",
":",
"bids",
"[",
"i",
"[",
"'userId'",
"]",
"]",
"[",
"'date_buy'",
"]",
"}",
")",
"orders",
"[",
"auction",
"[",
"'itemId'",
"]",
"]",
"=",
"orders_auction",
"return",
"orders"
] |
Return orders details.
|
[
"Return",
"orders",
"details",
"."
] |
c6d7090560cb9e579f7f769a9eec131a3db2c258
|
https://github.com/oczkers/pyllegro/blob/c6d7090560cb9e579f7f769a9eec131a3db2c258/pyllegro/core.py#L181-L217
|
240,179
|
oczkers/pyllegro
|
pyllegro/core.py
|
Allegro.getJournalDeals
|
def getJournalDeals(self, start=None):
"""Return all journal events from start."""
# 1 - utworzenie aktu zakupowego (deala), 2 - utworzenie formularza pozakupowego (karta platnosci), 3 - anulowanie formularza pozakupowego (karta platnosci), 4 - zakończenie (opłacenie) transakcji przez PzA
if start is not None:
self.last_event_id = start
events = []
while self.getJournalDealsInfo(self.last_event_id) > 0:
rc = self.__ask__('doGetSiteJournalDeals', journalStart=self.last_event_id)
for i in rc:
events.append({
'allegro_did': i['dealId'],
'deal_status': i['dealEventType'],
'transaction_id': i['dealTransactionId'],
'time': i['dealEventTime'],
'event_id': i['dealEventId'],
'allegro_aid': i['dealItemId'],
'allegro_uid': i['dealBuyerId'],
# 'seller_id': i['dealSellerId '],
'quantity': i['dealQuantity']
})
self.last_event_id = rc[-1]['dealEventId']
return events
|
python
|
def getJournalDeals(self, start=None):
"""Return all journal events from start."""
# 1 - utworzenie aktu zakupowego (deala), 2 - utworzenie formularza pozakupowego (karta platnosci), 3 - anulowanie formularza pozakupowego (karta platnosci), 4 - zakończenie (opłacenie) transakcji przez PzA
if start is not None:
self.last_event_id = start
events = []
while self.getJournalDealsInfo(self.last_event_id) > 0:
rc = self.__ask__('doGetSiteJournalDeals', journalStart=self.last_event_id)
for i in rc:
events.append({
'allegro_did': i['dealId'],
'deal_status': i['dealEventType'],
'transaction_id': i['dealTransactionId'],
'time': i['dealEventTime'],
'event_id': i['dealEventId'],
'allegro_aid': i['dealItemId'],
'allegro_uid': i['dealBuyerId'],
# 'seller_id': i['dealSellerId '],
'quantity': i['dealQuantity']
})
self.last_event_id = rc[-1]['dealEventId']
return events
|
[
"def",
"getJournalDeals",
"(",
"self",
",",
"start",
"=",
"None",
")",
":",
"# 1 - utworzenie aktu zakupowego (deala), 2 - utworzenie formularza pozakupowego (karta platnosci), 3 - anulowanie formularza pozakupowego (karta platnosci), 4 - zakończenie (opłacenie) transakcji przez PzA",
"if",
"start",
"is",
"not",
"None",
":",
"self",
".",
"last_event_id",
"=",
"start",
"events",
"=",
"[",
"]",
"while",
"self",
".",
"getJournalDealsInfo",
"(",
"self",
".",
"last_event_id",
")",
">",
"0",
":",
"rc",
"=",
"self",
".",
"__ask__",
"(",
"'doGetSiteJournalDeals'",
",",
"journalStart",
"=",
"self",
".",
"last_event_id",
")",
"for",
"i",
"in",
"rc",
":",
"events",
".",
"append",
"(",
"{",
"'allegro_did'",
":",
"i",
"[",
"'dealId'",
"]",
",",
"'deal_status'",
":",
"i",
"[",
"'dealEventType'",
"]",
",",
"'transaction_id'",
":",
"i",
"[",
"'dealTransactionId'",
"]",
",",
"'time'",
":",
"i",
"[",
"'dealEventTime'",
"]",
",",
"'event_id'",
":",
"i",
"[",
"'dealEventId'",
"]",
",",
"'allegro_aid'",
":",
"i",
"[",
"'dealItemId'",
"]",
",",
"'allegro_uid'",
":",
"i",
"[",
"'dealBuyerId'",
"]",
",",
"# 'seller_id': i['dealSellerId '],",
"'quantity'",
":",
"i",
"[",
"'dealQuantity'",
"]",
"}",
")",
"self",
".",
"last_event_id",
"=",
"rc",
"[",
"-",
"1",
"]",
"[",
"'dealEventId'",
"]",
"return",
"events"
] |
Return all journal events from start.
|
[
"Return",
"all",
"journal",
"events",
"from",
"start",
"."
] |
c6d7090560cb9e579f7f769a9eec131a3db2c258
|
https://github.com/oczkers/pyllegro/blob/c6d7090560cb9e579f7f769a9eec131a3db2c258/pyllegro/core.py#L254-L275
|
240,180
|
oczkers/pyllegro
|
pyllegro/core.py
|
Allegro.getWaitingFeedbacks
|
def getWaitingFeedbacks(self):
"""Return all waiting feedbacks from buyers."""
# TODO: return sorted dictionary (negative/positive/neutral)
feedbacks = []
offset = 0
amount = self.__ask__('doGetWaitingFeedbacksCount')
while amount > 0:
rc = self.__ask__('doGetWaitingFeedbacks',
offset=offset, packageSize=200)
feedbacks.extend(rc['feWaitList'])
amount -= 200
offset += 1
return feedbacks
|
python
|
def getWaitingFeedbacks(self):
"""Return all waiting feedbacks from buyers."""
# TODO: return sorted dictionary (negative/positive/neutral)
feedbacks = []
offset = 0
amount = self.__ask__('doGetWaitingFeedbacksCount')
while amount > 0:
rc = self.__ask__('doGetWaitingFeedbacks',
offset=offset, packageSize=200)
feedbacks.extend(rc['feWaitList'])
amount -= 200
offset += 1
return feedbacks
|
[
"def",
"getWaitingFeedbacks",
"(",
"self",
")",
":",
"# TODO: return sorted dictionary (negative/positive/neutral)",
"feedbacks",
"=",
"[",
"]",
"offset",
"=",
"0",
"amount",
"=",
"self",
".",
"__ask__",
"(",
"'doGetWaitingFeedbacksCount'",
")",
"while",
"amount",
">",
"0",
":",
"rc",
"=",
"self",
".",
"__ask__",
"(",
"'doGetWaitingFeedbacks'",
",",
"offset",
"=",
"offset",
",",
"packageSize",
"=",
"200",
")",
"feedbacks",
".",
"extend",
"(",
"rc",
"[",
"'feWaitList'",
"]",
")",
"amount",
"-=",
"200",
"offset",
"+=",
"1",
"return",
"feedbacks"
] |
Return all waiting feedbacks from buyers.
|
[
"Return",
"all",
"waiting",
"feedbacks",
"from",
"buyers",
"."
] |
c6d7090560cb9e579f7f769a9eec131a3db2c258
|
https://github.com/oczkers/pyllegro/blob/c6d7090560cb9e579f7f769a9eec131a3db2c258/pyllegro/core.py#L278-L290
|
240,181
|
jeffrimko/Auxly
|
lib/auxly/shell.py
|
silent
|
def silent(cmd, **kwargs):
"""Calls the given shell command. Output will not be displayed. Returns the
status code.
**Examples**:
::
auxly.shell.silent("ls")
"""
return call(cmd, shell=True, stdout=NULL, stderr=NULL, **kwargs)
|
python
|
def silent(cmd, **kwargs):
"""Calls the given shell command. Output will not be displayed. Returns the
status code.
**Examples**:
::
auxly.shell.silent("ls")
"""
return call(cmd, shell=True, stdout=NULL, stderr=NULL, **kwargs)
|
[
"def",
"silent",
"(",
"cmd",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"call",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"NULL",
",",
"stderr",
"=",
"NULL",
",",
"*",
"*",
"kwargs",
")"
] |
Calls the given shell command. Output will not be displayed. Returns the
status code.
**Examples**:
::
auxly.shell.silent("ls")
|
[
"Calls",
"the",
"given",
"shell",
"command",
".",
"Output",
"will",
"not",
"be",
"displayed",
".",
"Returns",
"the",
"status",
"code",
"."
] |
5aae876bcb6ca117c81d904f9455764cdc78cd48
|
https://github.com/jeffrimko/Auxly/blob/5aae876bcb6ca117c81d904f9455764cdc78cd48/lib/auxly/shell.py#L32-L40
|
240,182
|
jeffrimko/Auxly
|
lib/auxly/shell.py
|
has
|
def has(cmd):
"""Returns true if the give shell command is available.
**Examples**:
::
auxly.shell.has("ls") # True
"""
helps = ["--help", "-h", "--version"]
if "nt" == os.name:
helps.insert(0, "/?")
fakecmd = "fakecmd"
cmderr = strerr(fakecmd).replace(fakecmd, cmd)
for h in helps:
hcmd = "%s %s" % (cmd, h)
if 0 == silent(hcmd):
return True
if len(listout(hcmd)) > 0:
return True
if strerr(hcmd) != cmderr:
return True
return False
|
python
|
def has(cmd):
"""Returns true if the give shell command is available.
**Examples**:
::
auxly.shell.has("ls") # True
"""
helps = ["--help", "-h", "--version"]
if "nt" == os.name:
helps.insert(0, "/?")
fakecmd = "fakecmd"
cmderr = strerr(fakecmd).replace(fakecmd, cmd)
for h in helps:
hcmd = "%s %s" % (cmd, h)
if 0 == silent(hcmd):
return True
if len(listout(hcmd)) > 0:
return True
if strerr(hcmd) != cmderr:
return True
return False
|
[
"def",
"has",
"(",
"cmd",
")",
":",
"helps",
"=",
"[",
"\"--help\"",
",",
"\"-h\"",
",",
"\"--version\"",
"]",
"if",
"\"nt\"",
"==",
"os",
".",
"name",
":",
"helps",
".",
"insert",
"(",
"0",
",",
"\"/?\"",
")",
"fakecmd",
"=",
"\"fakecmd\"",
"cmderr",
"=",
"strerr",
"(",
"fakecmd",
")",
".",
"replace",
"(",
"fakecmd",
",",
"cmd",
")",
"for",
"h",
"in",
"helps",
":",
"hcmd",
"=",
"\"%s %s\"",
"%",
"(",
"cmd",
",",
"h",
")",
"if",
"0",
"==",
"silent",
"(",
"hcmd",
")",
":",
"return",
"True",
"if",
"len",
"(",
"listout",
"(",
"hcmd",
")",
")",
">",
"0",
":",
"return",
"True",
"if",
"strerr",
"(",
"hcmd",
")",
"!=",
"cmderr",
":",
"return",
"True",
"return",
"False"
] |
Returns true if the give shell command is available.
**Examples**:
::
auxly.shell.has("ls") # True
|
[
"Returns",
"true",
"if",
"the",
"give",
"shell",
"command",
"is",
"available",
"."
] |
5aae876bcb6ca117c81d904f9455764cdc78cd48
|
https://github.com/jeffrimko/Auxly/blob/5aae876bcb6ca117c81d904f9455764cdc78cd48/lib/auxly/shell.py#L42-L62
|
240,183
|
mayfield/shellish
|
shellish/tools/csvpretty.py
|
csvpretty
|
def csvpretty(csvfile: csvfile=sys.stdin):
""" Pretty print a CSV file. """
shellish.tabulate(csv.reader(csvfile))
|
python
|
def csvpretty(csvfile: csvfile=sys.stdin):
""" Pretty print a CSV file. """
shellish.tabulate(csv.reader(csvfile))
|
[
"def",
"csvpretty",
"(",
"csvfile",
":",
"csvfile",
"=",
"sys",
".",
"stdin",
")",
":",
"shellish",
".",
"tabulate",
"(",
"csv",
".",
"reader",
"(",
"csvfile",
")",
")"
] |
Pretty print a CSV file.
|
[
"Pretty",
"print",
"a",
"CSV",
"file",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/tools/csvpretty.py#L10-L12
|
240,184
|
jpscaletti/moar
|
moar/storage.py
|
Storage.get_key
|
def get_key(self, path, geometry, filters, options):
"""Generates the thumbnail's key from it's arguments.
If the arguments doesn't change the key will not change
"""
seed = u' '.join([
str(path),
str(geometry),
str(filters),
str(options),
]).encode('utf8')
return md5(seed).hexdigest()
|
python
|
def get_key(self, path, geometry, filters, options):
"""Generates the thumbnail's key from it's arguments.
If the arguments doesn't change the key will not change
"""
seed = u' '.join([
str(path),
str(geometry),
str(filters),
str(options),
]).encode('utf8')
return md5(seed).hexdigest()
|
[
"def",
"get_key",
"(",
"self",
",",
"path",
",",
"geometry",
",",
"filters",
",",
"options",
")",
":",
"seed",
"=",
"u' '",
".",
"join",
"(",
"[",
"str",
"(",
"path",
")",
",",
"str",
"(",
"geometry",
")",
",",
"str",
"(",
"filters",
")",
",",
"str",
"(",
"options",
")",
",",
"]",
")",
".",
"encode",
"(",
"'utf8'",
")",
"return",
"md5",
"(",
"seed",
")",
".",
"hexdigest",
"(",
")"
] |
Generates the thumbnail's key from it's arguments.
If the arguments doesn't change the key will not change
|
[
"Generates",
"the",
"thumbnail",
"s",
"key",
"from",
"it",
"s",
"arguments",
".",
"If",
"the",
"arguments",
"doesn",
"t",
"change",
"the",
"key",
"will",
"not",
"change"
] |
22694e5671b6adaccc4c9c87db7bdd701d20e734
|
https://github.com/jpscaletti/moar/blob/22694e5671b6adaccc4c9c87db7bdd701d20e734/moar/storage.py#L33-L43
|
240,185
|
jpscaletti/moar
|
moar/storage.py
|
Storage.get_source
|
def get_source(self, path_or_url):
"""Returns the source image file descriptor.
path_or_url:
Path to the source image as an absolute path, a path relative
to `self.base_path` or a URL beginning with `http[s]`
"""
if path_or_url.startswith(('http://', 'https://')):
try:
return urlopen(path_or_url)
except IOError:
return None
fullpath = path_or_url
if not os.path.isabs(path_or_url):
fullpath = os.path.join(self.base_path, path_or_url)
try:
return io.open(fullpath, 'rb')
except IOError:
return None
|
python
|
def get_source(self, path_or_url):
"""Returns the source image file descriptor.
path_or_url:
Path to the source image as an absolute path, a path relative
to `self.base_path` or a URL beginning with `http[s]`
"""
if path_or_url.startswith(('http://', 'https://')):
try:
return urlopen(path_or_url)
except IOError:
return None
fullpath = path_or_url
if not os.path.isabs(path_or_url):
fullpath = os.path.join(self.base_path, path_or_url)
try:
return io.open(fullpath, 'rb')
except IOError:
return None
|
[
"def",
"get_source",
"(",
"self",
",",
"path_or_url",
")",
":",
"if",
"path_or_url",
".",
"startswith",
"(",
"(",
"'http://'",
",",
"'https://'",
")",
")",
":",
"try",
":",
"return",
"urlopen",
"(",
"path_or_url",
")",
"except",
"IOError",
":",
"return",
"None",
"fullpath",
"=",
"path_or_url",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"path_or_url",
")",
":",
"fullpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"base_path",
",",
"path_or_url",
")",
"try",
":",
"return",
"io",
".",
"open",
"(",
"fullpath",
",",
"'rb'",
")",
"except",
"IOError",
":",
"return",
"None"
] |
Returns the source image file descriptor.
path_or_url:
Path to the source image as an absolute path, a path relative
to `self.base_path` or a URL beginning with `http[s]`
|
[
"Returns",
"the",
"source",
"image",
"file",
"descriptor",
"."
] |
22694e5671b6adaccc4c9c87db7bdd701d20e734
|
https://github.com/jpscaletti/moar/blob/22694e5671b6adaccc4c9c87db7bdd701d20e734/moar/storage.py#L45-L65
|
240,186
|
jpscaletti/moar
|
moar/storage.py
|
Storage.get_thumb
|
def get_thumb(self, path, key, format):
"""Get the stored thumbnail if exists.
path:
path of the source image
key:
key of the thumbnail
format:
thumbnail's file extension
"""
thumbpath = self.get_thumbpath(path, key, format)
fullpath = os.path.join(self.out_path, thumbpath)
if os.path.isfile(fullpath):
url = self.get_url(thumbpath)
return Thumb(url, key)
return Thumb()
|
python
|
def get_thumb(self, path, key, format):
"""Get the stored thumbnail if exists.
path:
path of the source image
key:
key of the thumbnail
format:
thumbnail's file extension
"""
thumbpath = self.get_thumbpath(path, key, format)
fullpath = os.path.join(self.out_path, thumbpath)
if os.path.isfile(fullpath):
url = self.get_url(thumbpath)
return Thumb(url, key)
return Thumb()
|
[
"def",
"get_thumb",
"(",
"self",
",",
"path",
",",
"key",
",",
"format",
")",
":",
"thumbpath",
"=",
"self",
".",
"get_thumbpath",
"(",
"path",
",",
"key",
",",
"format",
")",
"fullpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"out_path",
",",
"thumbpath",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fullpath",
")",
":",
"url",
"=",
"self",
".",
"get_url",
"(",
"thumbpath",
")",
"return",
"Thumb",
"(",
"url",
",",
"key",
")",
"return",
"Thumb",
"(",
")"
] |
Get the stored thumbnail if exists.
path:
path of the source image
key:
key of the thumbnail
format:
thumbnail's file extension
|
[
"Get",
"the",
"stored",
"thumbnail",
"if",
"exists",
"."
] |
22694e5671b6adaccc4c9c87db7bdd701d20e734
|
https://github.com/jpscaletti/moar/blob/22694e5671b6adaccc4c9c87db7bdd701d20e734/moar/storage.py#L67-L82
|
240,187
|
jpscaletti/moar
|
moar/storage.py
|
Storage.get_thumbpath
|
def get_thumbpath(self, path, key, format):
"""Return the relative path of the thumbnail.
path:
path of the source image
key:
key of the thumbnail
format:
thumbnail file extension
"""
relpath = os.path.dirname(path)
thumbsdir = self.get_thumbsdir(path)
name, _ = os.path.splitext(os.path.basename(path))
name = '{}.{}.{}'.format(name, key, format.lower())
return os.path.join(relpath, thumbsdir, name)
|
python
|
def get_thumbpath(self, path, key, format):
"""Return the relative path of the thumbnail.
path:
path of the source image
key:
key of the thumbnail
format:
thumbnail file extension
"""
relpath = os.path.dirname(path)
thumbsdir = self.get_thumbsdir(path)
name, _ = os.path.splitext(os.path.basename(path))
name = '{}.{}.{}'.format(name, key, format.lower())
return os.path.join(relpath, thumbsdir, name)
|
[
"def",
"get_thumbpath",
"(",
"self",
",",
"path",
",",
"key",
",",
"format",
")",
":",
"relpath",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"thumbsdir",
"=",
"self",
".",
"get_thumbsdir",
"(",
"path",
")",
"name",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
")",
"name",
"=",
"'{}.{}.{}'",
".",
"format",
"(",
"name",
",",
"key",
",",
"format",
".",
"lower",
"(",
")",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"relpath",
",",
"thumbsdir",
",",
"name",
")"
] |
Return the relative path of the thumbnail.
path:
path of the source image
key:
key of the thumbnail
format:
thumbnail file extension
|
[
"Return",
"the",
"relative",
"path",
"of",
"the",
"thumbnail",
"."
] |
22694e5671b6adaccc4c9c87db7bdd701d20e734
|
https://github.com/jpscaletti/moar/blob/22694e5671b6adaccc4c9c87db7bdd701d20e734/moar/storage.py#L84-L98
|
240,188
|
jpscaletti/moar
|
moar/storage.py
|
Storage.save
|
def save(self, path, key, format, data):
"""Save a newly generated thumbnail.
path:
path of the source image
key:
key of the thumbnail
format:
thumbnail's file extension
data:
thumbnail's binary data
"""
thumbpath = self.get_thumbpath(path, key, format)
fullpath = os.path.join(self.out_path, thumbpath)
self.save_thumb(fullpath, data)
url = self.get_url(thumbpath)
thumb = Thumb(url, key, fullpath)
return thumb
|
python
|
def save(self, path, key, format, data):
"""Save a newly generated thumbnail.
path:
path of the source image
key:
key of the thumbnail
format:
thumbnail's file extension
data:
thumbnail's binary data
"""
thumbpath = self.get_thumbpath(path, key, format)
fullpath = os.path.join(self.out_path, thumbpath)
self.save_thumb(fullpath, data)
url = self.get_url(thumbpath)
thumb = Thumb(url, key, fullpath)
return thumb
|
[
"def",
"save",
"(",
"self",
",",
"path",
",",
"key",
",",
"format",
",",
"data",
")",
":",
"thumbpath",
"=",
"self",
".",
"get_thumbpath",
"(",
"path",
",",
"key",
",",
"format",
")",
"fullpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"out_path",
",",
"thumbpath",
")",
"self",
".",
"save_thumb",
"(",
"fullpath",
",",
"data",
")",
"url",
"=",
"self",
".",
"get_url",
"(",
"thumbpath",
")",
"thumb",
"=",
"Thumb",
"(",
"url",
",",
"key",
",",
"fullpath",
")",
"return",
"thumb"
] |
Save a newly generated thumbnail.
path:
path of the source image
key:
key of the thumbnail
format:
thumbnail's file extension
data:
thumbnail's binary data
|
[
"Save",
"a",
"newly",
"generated",
"thumbnail",
"."
] |
22694e5671b6adaccc4c9c87db7bdd701d20e734
|
https://github.com/jpscaletti/moar/blob/22694e5671b6adaccc4c9c87db7bdd701d20e734/moar/storage.py#L112-L129
|
240,189
|
Vito2015/pyextend
|
pyextend/core/log.py
|
add_handler
|
def add_handler(cls, level, fmt, colorful, **kwargs):
"""Add a configured handler to the global logger."""
global g_logger
if isinstance(level, str):
level = getattr(logging, level.upper(), logging.DEBUG)
handler = cls(**kwargs)
handler.setLevel(level)
if colorful:
formatter = ColoredFormatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
else:
formatter = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
g_logger.addHandler(handler)
return handler
|
python
|
def add_handler(cls, level, fmt, colorful, **kwargs):
"""Add a configured handler to the global logger."""
global g_logger
if isinstance(level, str):
level = getattr(logging, level.upper(), logging.DEBUG)
handler = cls(**kwargs)
handler.setLevel(level)
if colorful:
formatter = ColoredFormatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
else:
formatter = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
g_logger.addHandler(handler)
return handler
|
[
"def",
"add_handler",
"(",
"cls",
",",
"level",
",",
"fmt",
",",
"colorful",
",",
"*",
"*",
"kwargs",
")",
":",
"global",
"g_logger",
"if",
"isinstance",
"(",
"level",
",",
"str",
")",
":",
"level",
"=",
"getattr",
"(",
"logging",
",",
"level",
".",
"upper",
"(",
")",
",",
"logging",
".",
"DEBUG",
")",
"handler",
"=",
"cls",
"(",
"*",
"*",
"kwargs",
")",
"handler",
".",
"setLevel",
"(",
"level",
")",
"if",
"colorful",
":",
"formatter",
"=",
"ColoredFormatter",
"(",
"fmt",
",",
"datefmt",
"=",
"'%Y-%m-%d %H:%M:%S'",
")",
"else",
":",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"fmt",
",",
"datefmt",
"=",
"'%Y-%m-%d %H:%M:%S'",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"g_logger",
".",
"addHandler",
"(",
"handler",
")",
"return",
"handler"
] |
Add a configured handler to the global logger.
|
[
"Add",
"a",
"configured",
"handler",
"to",
"the",
"global",
"logger",
"."
] |
36861dfe1087e437ffe9b5a1da9345c85b4fa4a1
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/log.py#L115-L133
|
240,190
|
Vito2015/pyextend
|
pyextend/core/log.py
|
add_filehandler
|
def add_filehandler(level, fmt, filename, mode, backup_count, limit, when):
"""Add a file handler to the global logger."""
kwargs = {}
# If the filename is not set, use the default filename
if filename is None:
filename = getattr(sys.modules['__main__'], '__file__', 'log.py')
filename = os.path.basename(filename.replace('.py', '.log'))
filename = os.path.join('/tmp', filename)
if not os.path.exists(os.path.dirname(filename)):
os.mkdir(os.path.dirname(filename))
kwargs['filename'] = filename
# Choose the filehandler based on the passed arguments
if backup_count == 0: # Use FileHandler
cls = logging.FileHandler
kwargs['mode'] = mode
elif when is None: # Use RotatingFileHandler
cls = logging.handlers.RotatingFileHandler
kwargs['maxBytes'] = limit
kwargs['backupCount'] = backup_count
kwargs['mode'] = mode
else: # Use TimedRotatingFileHandler
cls = logging.handlers.TimedRotatingFileHandler
kwargs['when'] = when
kwargs['interval'] = limit
kwargs['backupCount'] = backup_count
return add_handler(cls, level, fmt, False, **kwargs)
|
python
|
def add_filehandler(level, fmt, filename, mode, backup_count, limit, when):
"""Add a file handler to the global logger."""
kwargs = {}
# If the filename is not set, use the default filename
if filename is None:
filename = getattr(sys.modules['__main__'], '__file__', 'log.py')
filename = os.path.basename(filename.replace('.py', '.log'))
filename = os.path.join('/tmp', filename)
if not os.path.exists(os.path.dirname(filename)):
os.mkdir(os.path.dirname(filename))
kwargs['filename'] = filename
# Choose the filehandler based on the passed arguments
if backup_count == 0: # Use FileHandler
cls = logging.FileHandler
kwargs['mode'] = mode
elif when is None: # Use RotatingFileHandler
cls = logging.handlers.RotatingFileHandler
kwargs['maxBytes'] = limit
kwargs['backupCount'] = backup_count
kwargs['mode'] = mode
else: # Use TimedRotatingFileHandler
cls = logging.handlers.TimedRotatingFileHandler
kwargs['when'] = when
kwargs['interval'] = limit
kwargs['backupCount'] = backup_count
return add_handler(cls, level, fmt, False, **kwargs)
|
[
"def",
"add_filehandler",
"(",
"level",
",",
"fmt",
",",
"filename",
",",
"mode",
",",
"backup_count",
",",
"limit",
",",
"when",
")",
":",
"kwargs",
"=",
"{",
"}",
"# If the filename is not set, use the default filename",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"getattr",
"(",
"sys",
".",
"modules",
"[",
"'__main__'",
"]",
",",
"'__file__'",
",",
"'log.py'",
")",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
".",
"replace",
"(",
"'.py'",
",",
"'.log'",
")",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'/tmp'",
",",
"filename",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
")",
")",
":",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
")",
")",
"kwargs",
"[",
"'filename'",
"]",
"=",
"filename",
"# Choose the filehandler based on the passed arguments",
"if",
"backup_count",
"==",
"0",
":",
"# Use FileHandler",
"cls",
"=",
"logging",
".",
"FileHandler",
"kwargs",
"[",
"'mode'",
"]",
"=",
"mode",
"elif",
"when",
"is",
"None",
":",
"# Use RotatingFileHandler",
"cls",
"=",
"logging",
".",
"handlers",
".",
"RotatingFileHandler",
"kwargs",
"[",
"'maxBytes'",
"]",
"=",
"limit",
"kwargs",
"[",
"'backupCount'",
"]",
"=",
"backup_count",
"kwargs",
"[",
"'mode'",
"]",
"=",
"mode",
"else",
":",
"# Use TimedRotatingFileHandler",
"cls",
"=",
"logging",
".",
"handlers",
".",
"TimedRotatingFileHandler",
"kwargs",
"[",
"'when'",
"]",
"=",
"when",
"kwargs",
"[",
"'interval'",
"]",
"=",
"limit",
"kwargs",
"[",
"'backupCount'",
"]",
"=",
"backup_count",
"return",
"add_handler",
"(",
"cls",
",",
"level",
",",
"fmt",
",",
"False",
",",
"*",
"*",
"kwargs",
")"
] |
Add a file handler to the global logger.
|
[
"Add",
"a",
"file",
"handler",
"to",
"the",
"global",
"logger",
"."
] |
36861dfe1087e437ffe9b5a1da9345c85b4fa4a1
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/log.py#L141-L171
|
240,191
|
Vito2015/pyextend
|
pyextend/core/log.py
|
init_logger
|
def init_logger(name=None):
"""Reload the global logger."""
global g_logger
if g_logger is None:
g_logger = logging.getLogger(name=name)
else:
logging.shutdown()
g_logger.handlers = []
g_logger.setLevel(logging.DEBUG)
|
python
|
def init_logger(name=None):
"""Reload the global logger."""
global g_logger
if g_logger is None:
g_logger = logging.getLogger(name=name)
else:
logging.shutdown()
g_logger.handlers = []
g_logger.setLevel(logging.DEBUG)
|
[
"def",
"init_logger",
"(",
"name",
"=",
"None",
")",
":",
"global",
"g_logger",
"if",
"g_logger",
"is",
"None",
":",
"g_logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
"=",
"name",
")",
"else",
":",
"logging",
".",
"shutdown",
"(",
")",
"g_logger",
".",
"handlers",
"=",
"[",
"]",
"g_logger",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")"
] |
Reload the global logger.
|
[
"Reload",
"the",
"global",
"logger",
"."
] |
36861dfe1087e437ffe9b5a1da9345c85b4fa4a1
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/log.py#L174-L184
|
240,192
|
Vito2015/pyextend
|
pyextend/core/log.py
|
set_logger
|
def set_logger(name=None, filename=None, mode='a', level='NOTSET:NOTSET',
fmt=
'%(asctime)s %(filename)s:%(lineno)d [PID:%(process)-5d THD:%(thread)-5d %(levelname)-7s] %(message)s',
# fmt='[%(levelname)s] %(asctime)s %(message)s',
backup_count=5, limit=20480, when=None, with_filehandler=True):
"""Configure the global logger."""
level = level.split(':')
if len(level) == 1: # Both set to the same level
s_level = f_level = level[0]
else:
s_level = level[0] # StreamHandler log level
f_level = level[1] # FileHandler log level
init_logger(name=name)
add_streamhandler(s_level, fmt)
if with_filehandler:
add_filehandler(f_level, fmt, filename, mode, backup_count, limit, when)
# Import the common log functions for convenient
import_log_funcs()
|
python
|
def set_logger(name=None, filename=None, mode='a', level='NOTSET:NOTSET',
fmt=
'%(asctime)s %(filename)s:%(lineno)d [PID:%(process)-5d THD:%(thread)-5d %(levelname)-7s] %(message)s',
# fmt='[%(levelname)s] %(asctime)s %(message)s',
backup_count=5, limit=20480, when=None, with_filehandler=True):
"""Configure the global logger."""
level = level.split(':')
if len(level) == 1: # Both set to the same level
s_level = f_level = level[0]
else:
s_level = level[0] # StreamHandler log level
f_level = level[1] # FileHandler log level
init_logger(name=name)
add_streamhandler(s_level, fmt)
if with_filehandler:
add_filehandler(f_level, fmt, filename, mode, backup_count, limit, when)
# Import the common log functions for convenient
import_log_funcs()
|
[
"def",
"set_logger",
"(",
"name",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"mode",
"=",
"'a'",
",",
"level",
"=",
"'NOTSET:NOTSET'",
",",
"fmt",
"=",
"'%(asctime)s %(filename)s:%(lineno)d [PID:%(process)-5d THD:%(thread)-5d %(levelname)-7s] %(message)s'",
",",
"# fmt='[%(levelname)s] %(asctime)s %(message)s',",
"backup_count",
"=",
"5",
",",
"limit",
"=",
"20480",
",",
"when",
"=",
"None",
",",
"with_filehandler",
"=",
"True",
")",
":",
"level",
"=",
"level",
".",
"split",
"(",
"':'",
")",
"if",
"len",
"(",
"level",
")",
"==",
"1",
":",
"# Both set to the same level",
"s_level",
"=",
"f_level",
"=",
"level",
"[",
"0",
"]",
"else",
":",
"s_level",
"=",
"level",
"[",
"0",
"]",
"# StreamHandler log level",
"f_level",
"=",
"level",
"[",
"1",
"]",
"# FileHandler log level",
"init_logger",
"(",
"name",
"=",
"name",
")",
"add_streamhandler",
"(",
"s_level",
",",
"fmt",
")",
"if",
"with_filehandler",
":",
"add_filehandler",
"(",
"f_level",
",",
"fmt",
",",
"filename",
",",
"mode",
",",
"backup_count",
",",
"limit",
",",
"when",
")",
"# Import the common log functions for convenient",
"import_log_funcs",
"(",
")"
] |
Configure the global logger.
|
[
"Configure",
"the",
"global",
"logger",
"."
] |
36861dfe1087e437ffe9b5a1da9345c85b4fa4a1
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/log.py#L192-L212
|
240,193
|
Vito2015/pyextend
|
pyextend/core/log.py
|
import_log_funcs
|
def import_log_funcs():
"""Import the common log functions from the global logger to the module."""
global g_logger
curr_mod = sys.modules[__name__]
for func_name in _logging_funcs:
func = getattr(g_logger, func_name)
setattr(curr_mod, func_name, func)
|
python
|
def import_log_funcs():
"""Import the common log functions from the global logger to the module."""
global g_logger
curr_mod = sys.modules[__name__]
for func_name in _logging_funcs:
func = getattr(g_logger, func_name)
setattr(curr_mod, func_name, func)
|
[
"def",
"import_log_funcs",
"(",
")",
":",
"global",
"g_logger",
"curr_mod",
"=",
"sys",
".",
"modules",
"[",
"__name__",
"]",
"for",
"func_name",
"in",
"_logging_funcs",
":",
"func",
"=",
"getattr",
"(",
"g_logger",
",",
"func_name",
")",
"setattr",
"(",
"curr_mod",
",",
"func_name",
",",
"func",
")"
] |
Import the common log functions from the global logger to the module.
|
[
"Import",
"the",
"common",
"log",
"functions",
"from",
"the",
"global",
"logger",
"to",
"the",
"module",
"."
] |
36861dfe1087e437ffe9b5a1da9345c85b4fa4a1
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/log.py#L215-L223
|
240,194
|
lvh/maxims
|
maxims/indirection.py
|
powerupIndirector
|
def powerupIndirector(interface):
"""
A decorator for a powerup indirector from a single interface to a single
in-memory implementation.
The in-memory implementation that is being indirected to must be created
in the ``activate`` callback, and then assigned to ``self.indirected``,
which is an ``inmemory`` attribute.
"""
def decorator(cls):
zi.implementer(iaxiom.IPowerupIndirector)(cls)
cls.powerupInterfaces = [interface]
cls.indirect = _indirect
return cls
return decorator
|
python
|
def powerupIndirector(interface):
"""
A decorator for a powerup indirector from a single interface to a single
in-memory implementation.
The in-memory implementation that is being indirected to must be created
in the ``activate`` callback, and then assigned to ``self.indirected``,
which is an ``inmemory`` attribute.
"""
def decorator(cls):
zi.implementer(iaxiom.IPowerupIndirector)(cls)
cls.powerupInterfaces = [interface]
cls.indirect = _indirect
return cls
return decorator
|
[
"def",
"powerupIndirector",
"(",
"interface",
")",
":",
"def",
"decorator",
"(",
"cls",
")",
":",
"zi",
".",
"implementer",
"(",
"iaxiom",
".",
"IPowerupIndirector",
")",
"(",
"cls",
")",
"cls",
".",
"powerupInterfaces",
"=",
"[",
"interface",
"]",
"cls",
".",
"indirect",
"=",
"_indirect",
"return",
"cls",
"return",
"decorator"
] |
A decorator for a powerup indirector from a single interface to a single
in-memory implementation.
The in-memory implementation that is being indirected to must be created
in the ``activate`` callback, and then assigned to ``self.indirected``,
which is an ``inmemory`` attribute.
|
[
"A",
"decorator",
"for",
"a",
"powerup",
"indirector",
"from",
"a",
"single",
"interface",
"to",
"a",
"single",
"in",
"-",
"memory",
"implementation",
"."
] |
5c53b25d2cc4ccecbfe90193ade9ce0dbfbe4623
|
https://github.com/lvh/maxims/blob/5c53b25d2cc4ccecbfe90193ade9ce0dbfbe4623/maxims/indirection.py#L8-L23
|
240,195
|
pavelsof/ipalint
|
ipalint/core.py
|
Core.lint
|
def lint(self, dataset=None, col=None, no_header=False,
ignore_nfd=False, ignore_ws=False, linewise=False, no_lines=False):
"""
Returns a string containing all the issues found in the dataset
defined by the given file path.
"""
reader = Reader(dataset, has_header=not no_header, ipa_col=col)
recog = Recogniser()
norm = Normaliser(nfc_chars=recog.get_nfc_chars())
for ipa_string, line_num in reader.gen_ipa_data():
ipa_string = norm.normalise(ipa_string, line_num)
recog.recognise(ipa_string, line_num)
rep = Reporter()
norm.report(rep, ignore_nfd, ignore_ws)
recog.report(rep)
return rep.get_report(linewise, no_lines)
|
python
|
def lint(self, dataset=None, col=None, no_header=False,
ignore_nfd=False, ignore_ws=False, linewise=False, no_lines=False):
"""
Returns a string containing all the issues found in the dataset
defined by the given file path.
"""
reader = Reader(dataset, has_header=not no_header, ipa_col=col)
recog = Recogniser()
norm = Normaliser(nfc_chars=recog.get_nfc_chars())
for ipa_string, line_num in reader.gen_ipa_data():
ipa_string = norm.normalise(ipa_string, line_num)
recog.recognise(ipa_string, line_num)
rep = Reporter()
norm.report(rep, ignore_nfd, ignore_ws)
recog.report(rep)
return rep.get_report(linewise, no_lines)
|
[
"def",
"lint",
"(",
"self",
",",
"dataset",
"=",
"None",
",",
"col",
"=",
"None",
",",
"no_header",
"=",
"False",
",",
"ignore_nfd",
"=",
"False",
",",
"ignore_ws",
"=",
"False",
",",
"linewise",
"=",
"False",
",",
"no_lines",
"=",
"False",
")",
":",
"reader",
"=",
"Reader",
"(",
"dataset",
",",
"has_header",
"=",
"not",
"no_header",
",",
"ipa_col",
"=",
"col",
")",
"recog",
"=",
"Recogniser",
"(",
")",
"norm",
"=",
"Normaliser",
"(",
"nfc_chars",
"=",
"recog",
".",
"get_nfc_chars",
"(",
")",
")",
"for",
"ipa_string",
",",
"line_num",
"in",
"reader",
".",
"gen_ipa_data",
"(",
")",
":",
"ipa_string",
"=",
"norm",
".",
"normalise",
"(",
"ipa_string",
",",
"line_num",
")",
"recog",
".",
"recognise",
"(",
"ipa_string",
",",
"line_num",
")",
"rep",
"=",
"Reporter",
"(",
")",
"norm",
".",
"report",
"(",
"rep",
",",
"ignore_nfd",
",",
"ignore_ws",
")",
"recog",
".",
"report",
"(",
"rep",
")",
"return",
"rep",
".",
"get_report",
"(",
"linewise",
",",
"no_lines",
")"
] |
Returns a string containing all the issues found in the dataset
defined by the given file path.
|
[
"Returns",
"a",
"string",
"containing",
"all",
"the",
"issues",
"found",
"in",
"the",
"dataset",
"defined",
"by",
"the",
"given",
"file",
"path",
"."
] |
763e5979ede6980cbfc746b06fd007b379762eeb
|
https://github.com/pavelsof/ipalint/blob/763e5979ede6980cbfc746b06fd007b379762eeb/ipalint/core.py#L58-L77
|
240,196
|
Naught0/lolrune
|
lolrune/utils.py
|
parse_rune_links
|
def parse_rune_links(html: str) -> dict:
"""A function which parses the main Runeforge website into dict format.
Parameters
----------
html : str
The string representation of the html obtained via a GET request.
Returns
-------
dict
The nested rune_links champ rune pages from runeforge.
"""
soup = BeautifulSoup(html, 'lxml')
# Champs with only a single runepage
single_page_raw = soup.find_all('li', class_='champion')
single_page = {re.split('\W+', x.a.div.div['style'])[-3].lower():
[x.a['href']] for x in single_page_raw if x.a is not None}
# Champs with two (or more) runepages
double_page_raw = soup.find_all('div', class_='champion-modal-open')
# This is JSON data which just needs to be decoded
double_page_decode = [json.loads(x['data-loadouts']) for x in double_page_raw]
# This lowers the champ name in the structure,
# and pulls out the champ links, after it's been decoded
double_page = {re.sub('[^A-Za-z0-9]+', '', x[0]['champion'].lower()):
[x[0]['link'], x[1]['link']] for x in double_page_decode}
# Combine the two dicts
champs_combined = {**single_page, **double_page}
return champs_combined
|
python
|
def parse_rune_links(html: str) -> dict:
"""A function which parses the main Runeforge website into dict format.
Parameters
----------
html : str
The string representation of the html obtained via a GET request.
Returns
-------
dict
The nested rune_links champ rune pages from runeforge.
"""
soup = BeautifulSoup(html, 'lxml')
# Champs with only a single runepage
single_page_raw = soup.find_all('li', class_='champion')
single_page = {re.split('\W+', x.a.div.div['style'])[-3].lower():
[x.a['href']] for x in single_page_raw if x.a is not None}
# Champs with two (or more) runepages
double_page_raw = soup.find_all('div', class_='champion-modal-open')
# This is JSON data which just needs to be decoded
double_page_decode = [json.loads(x['data-loadouts']) for x in double_page_raw]
# This lowers the champ name in the structure,
# and pulls out the champ links, after it's been decoded
double_page = {re.sub('[^A-Za-z0-9]+', '', x[0]['champion'].lower()):
[x[0]['link'], x[1]['link']] for x in double_page_decode}
# Combine the two dicts
champs_combined = {**single_page, **double_page}
return champs_combined
|
[
"def",
"parse_rune_links",
"(",
"html",
":",
"str",
")",
"->",
"dict",
":",
"soup",
"=",
"BeautifulSoup",
"(",
"html",
",",
"'lxml'",
")",
"# Champs with only a single runepage",
"single_page_raw",
"=",
"soup",
".",
"find_all",
"(",
"'li'",
",",
"class_",
"=",
"'champion'",
")",
"single_page",
"=",
"{",
"re",
".",
"split",
"(",
"'\\W+'",
",",
"x",
".",
"a",
".",
"div",
".",
"div",
"[",
"'style'",
"]",
")",
"[",
"-",
"3",
"]",
".",
"lower",
"(",
")",
":",
"[",
"x",
".",
"a",
"[",
"'href'",
"]",
"]",
"for",
"x",
"in",
"single_page_raw",
"if",
"x",
".",
"a",
"is",
"not",
"None",
"}",
"# Champs with two (or more) runepages",
"double_page_raw",
"=",
"soup",
".",
"find_all",
"(",
"'div'",
",",
"class_",
"=",
"'champion-modal-open'",
")",
"# This is JSON data which just needs to be decoded",
"double_page_decode",
"=",
"[",
"json",
".",
"loads",
"(",
"x",
"[",
"'data-loadouts'",
"]",
")",
"for",
"x",
"in",
"double_page_raw",
"]",
"# This lowers the champ name in the structure, ",
"# and pulls out the champ links, after it's been decoded",
"double_page",
"=",
"{",
"re",
".",
"sub",
"(",
"'[^A-Za-z0-9]+'",
",",
"''",
",",
"x",
"[",
"0",
"]",
"[",
"'champion'",
"]",
".",
"lower",
"(",
")",
")",
":",
"[",
"x",
"[",
"0",
"]",
"[",
"'link'",
"]",
",",
"x",
"[",
"1",
"]",
"[",
"'link'",
"]",
"]",
"for",
"x",
"in",
"double_page_decode",
"}",
"# Combine the two dicts",
"champs_combined",
"=",
"{",
"*",
"*",
"single_page",
",",
"*",
"*",
"double_page",
"}",
"return",
"champs_combined"
] |
A function which parses the main Runeforge website into dict format.
Parameters
----------
html : str
The string representation of the html obtained via a GET request.
Returns
-------
dict
The nested rune_links champ rune pages from runeforge.
|
[
"A",
"function",
"which",
"parses",
"the",
"main",
"Runeforge",
"website",
"into",
"dict",
"format",
"."
] |
99f67b9137e42a78198ba369ceb371e473759f11
|
https://github.com/Naught0/lolrune/blob/99f67b9137e42a78198ba369ceb371e473759f11/lolrune/utils.py#L7-L39
|
240,197
|
fdb/aufmachen
|
aufmachen/websites/immoweb.py
|
find_string
|
def find_string(regex, s):
"""Find a string using a given regular expression.
If the string cannot be found, returns None.
The regex should contain one matching group,
as only the result of the first group is returned.
s - The string to search.
regex - A string containing the regular expression.
Returns a unicode string or None.
"""
m = re.search(regex, s)
if m is None:
return None
return m.groups()[0]
|
python
|
def find_string(regex, s):
"""Find a string using a given regular expression.
If the string cannot be found, returns None.
The regex should contain one matching group,
as only the result of the first group is returned.
s - The string to search.
regex - A string containing the regular expression.
Returns a unicode string or None.
"""
m = re.search(regex, s)
if m is None:
return None
return m.groups()[0]
|
[
"def",
"find_string",
"(",
"regex",
",",
"s",
")",
":",
"m",
"=",
"re",
".",
"search",
"(",
"regex",
",",
"s",
")",
"if",
"m",
"is",
"None",
":",
"return",
"None",
"return",
"m",
".",
"groups",
"(",
")",
"[",
"0",
"]"
] |
Find a string using a given regular expression.
If the string cannot be found, returns None.
The regex should contain one matching group,
as only the result of the first group is returned.
s - The string to search.
regex - A string containing the regular expression.
Returns a unicode string or None.
|
[
"Find",
"a",
"string",
"using",
"a",
"given",
"regular",
"expression",
".",
"If",
"the",
"string",
"cannot",
"be",
"found",
"returns",
"None",
".",
"The",
"regex",
"should",
"contain",
"one",
"matching",
"group",
"as",
"only",
"the",
"result",
"of",
"the",
"first",
"group",
"is",
"returned",
"."
] |
f2986a0cf087ac53969f82b84d872e3f1c6986f4
|
https://github.com/fdb/aufmachen/blob/f2986a0cf087ac53969f82b84d872e3f1c6986f4/aufmachen/websites/immoweb.py#L99-L113
|
240,198
|
fdb/aufmachen
|
aufmachen/websites/immoweb.py
|
clean_text
|
def clean_text(s):
"""Removes all cruft from the text."""
SPACES_RE = re.compile(r'\s+')
SPECIAL_CHARS_RE = re.compile(r'[^\w\s\.\-\(\)]')
s = SPACES_RE.sub(' ', s)
s = s.strip()
s = SPECIAL_CHARS_RE.sub('', s)
return s
|
python
|
def clean_text(s):
"""Removes all cruft from the text."""
SPACES_RE = re.compile(r'\s+')
SPECIAL_CHARS_RE = re.compile(r'[^\w\s\.\-\(\)]')
s = SPACES_RE.sub(' ', s)
s = s.strip()
s = SPECIAL_CHARS_RE.sub('', s)
return s
|
[
"def",
"clean_text",
"(",
"s",
")",
":",
"SPACES_RE",
"=",
"re",
".",
"compile",
"(",
"r'\\s+'",
")",
"SPECIAL_CHARS_RE",
"=",
"re",
".",
"compile",
"(",
"r'[^\\w\\s\\.\\-\\(\\)]'",
")",
"s",
"=",
"SPACES_RE",
".",
"sub",
"(",
"' '",
",",
"s",
")",
"s",
"=",
"s",
".",
"strip",
"(",
")",
"s",
"=",
"SPECIAL_CHARS_RE",
".",
"sub",
"(",
"''",
",",
"s",
")",
"return",
"s"
] |
Removes all cruft from the text.
|
[
"Removes",
"all",
"cruft",
"from",
"the",
"text",
"."
] |
f2986a0cf087ac53969f82b84d872e3f1c6986f4
|
https://github.com/fdb/aufmachen/blob/f2986a0cf087ac53969f82b84d872e3f1c6986f4/aufmachen/websites/immoweb.py#L154-L161
|
240,199
|
fdb/aufmachen
|
aufmachen/websites/immoweb.py
|
parse_immoweb_link
|
def parse_immoweb_link(url):
"""Parses an Immoweb estate detail URL and returns the Immoweb estate id.
Returns a string with the Immoweb estate id.
"""
IMMOWEB_ID_RE = re.compile(r'.*?IdBien=([0-9]+).*?')
return IMMOWEB_ID_RE.match(url).groups()[0]
|
python
|
def parse_immoweb_link(url):
"""Parses an Immoweb estate detail URL and returns the Immoweb estate id.
Returns a string with the Immoweb estate id.
"""
IMMOWEB_ID_RE = re.compile(r'.*?IdBien=([0-9]+).*?')
return IMMOWEB_ID_RE.match(url).groups()[0]
|
[
"def",
"parse_immoweb_link",
"(",
"url",
")",
":",
"IMMOWEB_ID_RE",
"=",
"re",
".",
"compile",
"(",
"r'.*?IdBien=([0-9]+).*?'",
")",
"return",
"IMMOWEB_ID_RE",
".",
"match",
"(",
"url",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]"
] |
Parses an Immoweb estate detail URL and returns the Immoweb estate id.
Returns a string with the Immoweb estate id.
|
[
"Parses",
"an",
"Immoweb",
"estate",
"detail",
"URL",
"and",
"returns",
"the",
"Immoweb",
"estate",
"id",
"."
] |
f2986a0cf087ac53969f82b84d872e3f1c6986f4
|
https://github.com/fdb/aufmachen/blob/f2986a0cf087ac53969f82b84d872e3f1c6986f4/aufmachen/websites/immoweb.py#L163-L169
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.