id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
241,700
|
makinacorpus/tif2geojson
|
tif2geojson.py
|
_deep_value
|
def _deep_value(*args, **kwargs):
""" Drills down into tree using the keys
"""
node, keys = args[0], args[1:]
for key in keys:
node = node.get(key, {})
default = kwargs.get('default', {})
if node in ({}, [], None):
node = default
return node
|
python
|
def _deep_value(*args, **kwargs):
""" Drills down into tree using the keys
"""
node, keys = args[0], args[1:]
for key in keys:
node = node.get(key, {})
default = kwargs.get('default', {})
if node in ({}, [], None):
node = default
return node
|
[
"def",
"_deep_value",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"node",
",",
"keys",
"=",
"args",
"[",
"0",
"]",
",",
"args",
"[",
"1",
":",
"]",
"for",
"key",
"in",
"keys",
":",
"node",
"=",
"node",
".",
"get",
"(",
"key",
",",
"{",
"}",
")",
"default",
"=",
"kwargs",
".",
"get",
"(",
"'default'",
",",
"{",
"}",
")",
"if",
"node",
"in",
"(",
"{",
"}",
",",
"[",
"]",
",",
"None",
")",
":",
"node",
"=",
"default",
"return",
"node"
] |
Drills down into tree using the keys
|
[
"Drills",
"down",
"into",
"tree",
"using",
"the",
"keys"
] |
071b26cea6e23a3ec87a5cd1f73cc800612021b9
|
https://github.com/makinacorpus/tif2geojson/blob/071b26cea6e23a3ec87a5cd1f73cc800612021b9/tif2geojson.py#L162-L171
|
241,701
|
MacHu-GWU/angora-project
|
angora/dataIO/pk.py
|
load_pk
|
def load_pk(abspath, compress=False, enable_verbose=True):
"""Load Python Object from Pickle file.
:param abspath: File path. Use absolute path as much as you can. File
extension has to be ``.pickle`` or ``.gz``. (for compressed Pickle)
:type abspath: string
:param compress: (default False) Load from a gzip compressed Pickle file.
Check :func:`dump_pk()<dump_pk>` function for more information.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.pk import load_pk
>>> load_pk("test.pickle") # if you have a Pickle file
Loading from test.pickle...
Complete! Elapse 0.000272 sec.
{'a': 1, 'b': 2}
**中文文档**
从Pickle文件中读取数据
参数列表
:param abspath: 文件路径, 扩展名需为 ``.pickle`` 或 ``.gz``
:type abspath: ``字符串``
:param compress: (默认 False) 是否从一个gzip压缩过的Pickle文件中读取数据。 请
参考 :func:`dump_pk()<dump_pk>` 获得更多信息.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
msg = Messenger(enable_verbose=enable_verbose)
if compress: # check extension name
if os.path.splitext(abspath)[1] != ".gz":
raise Exception("compressed pickle has to use extension '.gz'!")
else:
if os.path.splitext(abspath)[1] != ".pickle":
raise Exception("file extension are not '.pickle'!")
msg.show("\nLoading from %s..." % abspath)
st = time.clock()
if compress:
with gzip.open(abspath, "rb") as f:
obj = pickle.loads(f.read())
else:
with open(abspath, "rb") as f:
obj = pickle.load(f)
if enable_verbose:
msg.show(" Complete! Elapse %.6f sec" % (time.clock() - st))
return obj
|
python
|
def load_pk(abspath, compress=False, enable_verbose=True):
"""Load Python Object from Pickle file.
:param abspath: File path. Use absolute path as much as you can. File
extension has to be ``.pickle`` or ``.gz``. (for compressed Pickle)
:type abspath: string
:param compress: (default False) Load from a gzip compressed Pickle file.
Check :func:`dump_pk()<dump_pk>` function for more information.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.pk import load_pk
>>> load_pk("test.pickle") # if you have a Pickle file
Loading from test.pickle...
Complete! Elapse 0.000272 sec.
{'a': 1, 'b': 2}
**中文文档**
从Pickle文件中读取数据
参数列表
:param abspath: 文件路径, 扩展名需为 ``.pickle`` 或 ``.gz``
:type abspath: ``字符串``
:param compress: (默认 False) 是否从一个gzip压缩过的Pickle文件中读取数据。 请
参考 :func:`dump_pk()<dump_pk>` 获得更多信息.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
msg = Messenger(enable_verbose=enable_verbose)
if compress: # check extension name
if os.path.splitext(abspath)[1] != ".gz":
raise Exception("compressed pickle has to use extension '.gz'!")
else:
if os.path.splitext(abspath)[1] != ".pickle":
raise Exception("file extension are not '.pickle'!")
msg.show("\nLoading from %s..." % abspath)
st = time.clock()
if compress:
with gzip.open(abspath, "rb") as f:
obj = pickle.loads(f.read())
else:
with open(abspath, "rb") as f:
obj = pickle.load(f)
if enable_verbose:
msg.show(" Complete! Elapse %.6f sec" % (time.clock() - st))
return obj
|
[
"def",
"load_pk",
"(",
"abspath",
",",
"compress",
"=",
"False",
",",
"enable_verbose",
"=",
"True",
")",
":",
"abspath",
"=",
"str",
"(",
"abspath",
")",
"# try stringlize",
"msg",
"=",
"Messenger",
"(",
"enable_verbose",
"=",
"enable_verbose",
")",
"if",
"compress",
":",
"# check extension name",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"abspath",
")",
"[",
"1",
"]",
"!=",
"\".gz\"",
":",
"raise",
"Exception",
"(",
"\"compressed pickle has to use extension '.gz'!\"",
")",
"else",
":",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"abspath",
")",
"[",
"1",
"]",
"!=",
"\".pickle\"",
":",
"raise",
"Exception",
"(",
"\"file extension are not '.pickle'!\"",
")",
"msg",
".",
"show",
"(",
"\"\\nLoading from %s...\"",
"%",
"abspath",
")",
"st",
"=",
"time",
".",
"clock",
"(",
")",
"if",
"compress",
":",
"with",
"gzip",
".",
"open",
"(",
"abspath",
",",
"\"rb\"",
")",
"as",
"f",
":",
"obj",
"=",
"pickle",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"else",
":",
"with",
"open",
"(",
"abspath",
",",
"\"rb\"",
")",
"as",
"f",
":",
"obj",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"if",
"enable_verbose",
":",
"msg",
".",
"show",
"(",
"\" Complete! Elapse %.6f sec\"",
"%",
"(",
"time",
".",
"clock",
"(",
")",
"-",
"st",
")",
")",
"return",
"obj"
] |
Load Python Object from Pickle file.
:param abspath: File path. Use absolute path as much as you can. File
extension has to be ``.pickle`` or ``.gz``. (for compressed Pickle)
:type abspath: string
:param compress: (default False) Load from a gzip compressed Pickle file.
Check :func:`dump_pk()<dump_pk>` function for more information.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.pk import load_pk
>>> load_pk("test.pickle") # if you have a Pickle file
Loading from test.pickle...
Complete! Elapse 0.000272 sec.
{'a': 1, 'b': 2}
**中文文档**
从Pickle文件中读取数据
参数列表
:param abspath: 文件路径, 扩展名需为 ``.pickle`` 或 ``.gz``
:type abspath: ``字符串``
:param compress: (默认 False) 是否从一个gzip压缩过的Pickle文件中读取数据。 请
参考 :func:`dump_pk()<dump_pk>` 获得更多信息.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
|
[
"Load",
"Python",
"Object",
"from",
"Pickle",
"file",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/dataIO/pk.py#L73-L135
|
241,702
|
MacHu-GWU/angora-project
|
angora/dataIO/pk.py
|
dump_pk
|
def dump_pk(obj, abspath,
pk_protocol=pk_protocol, replace=False, compress=False,
enable_verbose=True):
"""Dump Picklable Python Object to file.
Provides multiple choice to customize the behavior.
:param obj: Picklable Python Object.
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or
``.gz`` (for compressed Pickle).
:type abspath: string
:param pk_protocol: (default your python version) use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param replace: (default False) If ``True``, when you dump Pickle to a
existing path, it silently overwrite it. If False, an exception will be
raised. Default False setting is to prevent overwrite file by mistake.
:type replace: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the Pickle file. Disk usage can be greatly reduced. But you
have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.pk import dump_pk
>>> pk = {"a": 1, "b": 2}
>>> dump_pk(pk, "test.pickle", replace=True)
Dumping to test.pickle...
Complete! Elapse 0.001763 sec
**中文文档**
将Python对象以Pickle的方式序列化, 保存至本地文件。(有些自定义类无法被序列化)
参数列表
:param obj: 可Pickle化的Python对象
:param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz``, 其中gz用于被压
缩的Pickle
:type abspath: ``字符串``
:param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被
py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。
:type pk_protocol: ``整数``
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: ``布尔值``
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Pickle文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_pk(abspath, compress=True)<load_pk>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
msg = Messenger(enable_verbose=enable_verbose)
if compress: # check extension name
root, ext = os.path.splitext(abspath)
if ext != ".gz":
if ext != ".tmp":
raise Exception(
"compressed pickle has to use extension '.gz'!")
else:
_, ext = os.path.splitext(root)
if ext != ".gz":
raise Exception(
"compressed pickle has to use extension '.gz'!")
else:
root, ext = os.path.splitext(abspath)
if ext != ".pickle":
if ext != ".tmp":
raise Exception("file extension are not '.pickle'!")
else:
_, ext = os.path.splitext(root)
if ext != ".pickle":
raise Exception("file extension are not '.pickle'!")
msg.show("\nDumping to %s..." % abspath)
st = time.clock()
if os.path.exists(abspath): # if exists, check replace option
if replace: # replace existing file
if compress:
with gzip.open(abspath, "wb") as f:
f.write(pickle.dumps(obj, protocol=pk_protocol))
else:
with open(abspath, "wb") as f:
pickle.dump(obj, f, protocol=pk_protocol)
else: # stop, print error message
raise Exception("\tCANNOT WRITE to %s, "
"it's already exists" % abspath)
else: # if not exists, just write to it
if compress:
with gzip.open(abspath, "wb") as f:
f.write(pickle.dumps(obj, protocol=pk_protocol))
else:
with open(abspath, "wb") as f:
pickle.dump(obj, f, protocol=pk_protocol)
msg.show(" Complete! Elapse %.6f sec" % (time.clock() - st))
|
python
|
def dump_pk(obj, abspath,
pk_protocol=pk_protocol, replace=False, compress=False,
enable_verbose=True):
"""Dump Picklable Python Object to file.
Provides multiple choice to customize the behavior.
:param obj: Picklable Python Object.
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or
``.gz`` (for compressed Pickle).
:type abspath: string
:param pk_protocol: (default your python version) use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param replace: (default False) If ``True``, when you dump Pickle to a
existing path, it silently overwrite it. If False, an exception will be
raised. Default False setting is to prevent overwrite file by mistake.
:type replace: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the Pickle file. Disk usage can be greatly reduced. But you
have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.pk import dump_pk
>>> pk = {"a": 1, "b": 2}
>>> dump_pk(pk, "test.pickle", replace=True)
Dumping to test.pickle...
Complete! Elapse 0.001763 sec
**中文文档**
将Python对象以Pickle的方式序列化, 保存至本地文件。(有些自定义类无法被序列化)
参数列表
:param obj: 可Pickle化的Python对象
:param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz``, 其中gz用于被压
缩的Pickle
:type abspath: ``字符串``
:param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被
py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。
:type pk_protocol: ``整数``
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: ``布尔值``
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Pickle文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_pk(abspath, compress=True)<load_pk>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
msg = Messenger(enable_verbose=enable_verbose)
if compress: # check extension name
root, ext = os.path.splitext(abspath)
if ext != ".gz":
if ext != ".tmp":
raise Exception(
"compressed pickle has to use extension '.gz'!")
else:
_, ext = os.path.splitext(root)
if ext != ".gz":
raise Exception(
"compressed pickle has to use extension '.gz'!")
else:
root, ext = os.path.splitext(abspath)
if ext != ".pickle":
if ext != ".tmp":
raise Exception("file extension are not '.pickle'!")
else:
_, ext = os.path.splitext(root)
if ext != ".pickle":
raise Exception("file extension are not '.pickle'!")
msg.show("\nDumping to %s..." % abspath)
st = time.clock()
if os.path.exists(abspath): # if exists, check replace option
if replace: # replace existing file
if compress:
with gzip.open(abspath, "wb") as f:
f.write(pickle.dumps(obj, protocol=pk_protocol))
else:
with open(abspath, "wb") as f:
pickle.dump(obj, f, protocol=pk_protocol)
else: # stop, print error message
raise Exception("\tCANNOT WRITE to %s, "
"it's already exists" % abspath)
else: # if not exists, just write to it
if compress:
with gzip.open(abspath, "wb") as f:
f.write(pickle.dumps(obj, protocol=pk_protocol))
else:
with open(abspath, "wb") as f:
pickle.dump(obj, f, protocol=pk_protocol)
msg.show(" Complete! Elapse %.6f sec" % (time.clock() - st))
|
[
"def",
"dump_pk",
"(",
"obj",
",",
"abspath",
",",
"pk_protocol",
"=",
"pk_protocol",
",",
"replace",
"=",
"False",
",",
"compress",
"=",
"False",
",",
"enable_verbose",
"=",
"True",
")",
":",
"abspath",
"=",
"str",
"(",
"abspath",
")",
"# try stringlize",
"msg",
"=",
"Messenger",
"(",
"enable_verbose",
"=",
"enable_verbose",
")",
"if",
"compress",
":",
"# check extension name",
"root",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"abspath",
")",
"if",
"ext",
"!=",
"\".gz\"",
":",
"if",
"ext",
"!=",
"\".tmp\"",
":",
"raise",
"Exception",
"(",
"\"compressed pickle has to use extension '.gz'!\"",
")",
"else",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"root",
")",
"if",
"ext",
"!=",
"\".gz\"",
":",
"raise",
"Exception",
"(",
"\"compressed pickle has to use extension '.gz'!\"",
")",
"else",
":",
"root",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"abspath",
")",
"if",
"ext",
"!=",
"\".pickle\"",
":",
"if",
"ext",
"!=",
"\".tmp\"",
":",
"raise",
"Exception",
"(",
"\"file extension are not '.pickle'!\"",
")",
"else",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"root",
")",
"if",
"ext",
"!=",
"\".pickle\"",
":",
"raise",
"Exception",
"(",
"\"file extension are not '.pickle'!\"",
")",
"msg",
".",
"show",
"(",
"\"\\nDumping to %s...\"",
"%",
"abspath",
")",
"st",
"=",
"time",
".",
"clock",
"(",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"abspath",
")",
":",
"# if exists, check replace option",
"if",
"replace",
":",
"# replace existing file",
"if",
"compress",
":",
"with",
"gzip",
".",
"open",
"(",
"abspath",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"pickle",
".",
"dumps",
"(",
"obj",
",",
"protocol",
"=",
"pk_protocol",
")",
")",
"else",
":",
"with",
"open",
"(",
"abspath",
",",
"\"wb\"",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"obj",
",",
"f",
",",
"protocol",
"=",
"pk_protocol",
")",
"else",
":",
"# stop, print error message",
"raise",
"Exception",
"(",
"\"\\tCANNOT WRITE to %s, \"",
"\"it's already exists\"",
"%",
"abspath",
")",
"else",
":",
"# if not exists, just write to it",
"if",
"compress",
":",
"with",
"gzip",
".",
"open",
"(",
"abspath",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"pickle",
".",
"dumps",
"(",
"obj",
",",
"protocol",
"=",
"pk_protocol",
")",
")",
"else",
":",
"with",
"open",
"(",
"abspath",
",",
"\"wb\"",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"obj",
",",
"f",
",",
"protocol",
"=",
"pk_protocol",
")",
"msg",
".",
"show",
"(",
"\" Complete! Elapse %.6f sec\"",
"%",
"(",
"time",
".",
"clock",
"(",
")",
"-",
"st",
")",
")"
] |
Dump Picklable Python Object to file.
Provides multiple choice to customize the behavior.
:param obj: Picklable Python Object.
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or
``.gz`` (for compressed Pickle).
:type abspath: string
:param pk_protocol: (default your python version) use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param replace: (default False) If ``True``, when you dump Pickle to a
existing path, it silently overwrite it. If False, an exception will be
raised. Default False setting is to prevent overwrite file by mistake.
:type replace: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the Pickle file. Disk usage can be greatly reduced. But you
have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.pk import dump_pk
>>> pk = {"a": 1, "b": 2}
>>> dump_pk(pk, "test.pickle", replace=True)
Dumping to test.pickle...
Complete! Elapse 0.001763 sec
**中文文档**
将Python对象以Pickle的方式序列化, 保存至本地文件。(有些自定义类无法被序列化)
参数列表
:param obj: 可Pickle化的Python对象
:param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz``, 其中gz用于被压
缩的Pickle
:type abspath: ``字符串``
:param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被
py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。
:type pk_protocol: ``整数``
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: ``布尔值``
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Pickle文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_pk(abspath, compress=True)<load_pk>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
|
[
"Dump",
"Picklable",
"Python",
"Object",
"to",
"file",
".",
"Provides",
"multiple",
"choice",
"to",
"customize",
"the",
"behavior",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/dataIO/pk.py#L138-L250
|
241,703
|
MacHu-GWU/angora-project
|
angora/dataIO/pk.py
|
safe_dump_pk
|
def safe_dump_pk(obj, abspath, pk_protocol=pk_protocol, compress=False,
enable_verbose=True):
"""A stable version of dump_pk, silently overwrite existing file.
When your program been interrupted, you lose nothing. Typically if your
program is interrupted by any reason, it only leaves a incomplete file.
If you use replace=True, then you also lose your old file.
So a bettr way is to:
1. dump pickle to a temp file.
2. when it's done, rename it to #abspath, overwrite the old one.
This way guarantee atomic write.
:param obj: Picklable Python Object.
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or
``.gz`` (for compressed Pickle).
:type abspath: string
:param pk_protocol: (default your python version) use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param compress: (default False) If ``True``, use GNU program gzip to
compress the Pickle file. Disk usage can be greatly reduced. But you
have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.pk import safe_dump_pk
>>> pk = {"a": 1, "b": 2}
>>> safe_dump_pk(pk, "test.pickle")
Dumping to test.pickle...
Complete! Elapse 0.001763 sec
**中文文档**
在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式
写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部
都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名,
覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会
影响原文件。
参数列表
:param obj: 可Pickle化的Python对象
:param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz`` , 其中gz用于被压
缩的Pickle
:type abspath: ``字符串``
:param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被
py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。
:type pk_protocol: ``整数``
:param compress: (默认 False) 当为 ``True`` 时, 使用开源压缩标准gzip压缩Pickle文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_pk(abspath, compress=True)<load_pk>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
temp_abspath = "%s.tmp" % abspath
dump_pk(obj, temp_abspath, pk_protocol=pk_protocol,
replace=True, compress=compress, enable_verbose=enable_verbose)
shutil.move(temp_abspath, abspath)
|
python
|
def safe_dump_pk(obj, abspath, pk_protocol=pk_protocol, compress=False,
enable_verbose=True):
"""A stable version of dump_pk, silently overwrite existing file.
When your program been interrupted, you lose nothing. Typically if your
program is interrupted by any reason, it only leaves a incomplete file.
If you use replace=True, then you also lose your old file.
So a bettr way is to:
1. dump pickle to a temp file.
2. when it's done, rename it to #abspath, overwrite the old one.
This way guarantee atomic write.
:param obj: Picklable Python Object.
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or
``.gz`` (for compressed Pickle).
:type abspath: string
:param pk_protocol: (default your python version) use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param compress: (default False) If ``True``, use GNU program gzip to
compress the Pickle file. Disk usage can be greatly reduced. But you
have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.pk import safe_dump_pk
>>> pk = {"a": 1, "b": 2}
>>> safe_dump_pk(pk, "test.pickle")
Dumping to test.pickle...
Complete! Elapse 0.001763 sec
**中文文档**
在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式
写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部
都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名,
覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会
影响原文件。
参数列表
:param obj: 可Pickle化的Python对象
:param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz`` , 其中gz用于被压
缩的Pickle
:type abspath: ``字符串``
:param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被
py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。
:type pk_protocol: ``整数``
:param compress: (默认 False) 当为 ``True`` 时, 使用开源压缩标准gzip压缩Pickle文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_pk(abspath, compress=True)<load_pk>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
temp_abspath = "%s.tmp" % abspath
dump_pk(obj, temp_abspath, pk_protocol=pk_protocol,
replace=True, compress=compress, enable_verbose=enable_verbose)
shutil.move(temp_abspath, abspath)
|
[
"def",
"safe_dump_pk",
"(",
"obj",
",",
"abspath",
",",
"pk_protocol",
"=",
"pk_protocol",
",",
"compress",
"=",
"False",
",",
"enable_verbose",
"=",
"True",
")",
":",
"abspath",
"=",
"str",
"(",
"abspath",
")",
"# try stringlize",
"temp_abspath",
"=",
"\"%s.tmp\"",
"%",
"abspath",
"dump_pk",
"(",
"obj",
",",
"temp_abspath",
",",
"pk_protocol",
"=",
"pk_protocol",
",",
"replace",
"=",
"True",
",",
"compress",
"=",
"compress",
",",
"enable_verbose",
"=",
"enable_verbose",
")",
"shutil",
".",
"move",
"(",
"temp_abspath",
",",
"abspath",
")"
] |
A stable version of dump_pk, silently overwrite existing file.
When your program been interrupted, you lose nothing. Typically if your
program is interrupted by any reason, it only leaves a incomplete file.
If you use replace=True, then you also lose your old file.
So a bettr way is to:
1. dump pickle to a temp file.
2. when it's done, rename it to #abspath, overwrite the old one.
This way guarantee atomic write.
:param obj: Picklable Python Object.
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or
``.gz`` (for compressed Pickle).
:type abspath: string
:param pk_protocol: (default your python version) use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param compress: (default False) If ``True``, use GNU program gzip to
compress the Pickle file. Disk usage can be greatly reduced. But you
have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.pk import safe_dump_pk
>>> pk = {"a": 1, "b": 2}
>>> safe_dump_pk(pk, "test.pickle")
Dumping to test.pickle...
Complete! Elapse 0.001763 sec
**中文文档**
在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式
写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部
都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名,
覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会
影响原文件。
参数列表
:param obj: 可Pickle化的Python对象
:param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz`` , 其中gz用于被压
缩的Pickle
:type abspath: ``字符串``
:param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被
py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。
:type pk_protocol: ``整数``
:param compress: (默认 False) 当为 ``True`` 时, 使用开源压缩标准gzip压缩Pickle文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_pk(abspath, compress=True)<load_pk>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
|
[
"A",
"stable",
"version",
"of",
"dump_pk",
"silently",
"overwrite",
"existing",
"file",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/dataIO/pk.py#L253-L326
|
241,704
|
MacHu-GWU/angora-project
|
angora/dataIO/pk.py
|
obj2str
|
def obj2str(obj, pk_protocol=pk_protocol):
"""Convert arbitrary object to utf-8 string, using
base64encode algorithm.
Usage::
>>> from weatherlab.lib.dataIO.pk import obj2str
>>> data = {"a": 1, "b": 2}
>>> obj2str(data, pk_protocol=2)
'gAJ9cQAoWAEAAABhcQFLAVgBAAAAYnECSwJ1Lg=='
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的"字符串"
"""
return base64.b64encode(pickle.dumps(
obj, protocol=pk_protocol)).decode("utf-8")
|
python
|
def obj2str(obj, pk_protocol=pk_protocol):
"""Convert arbitrary object to utf-8 string, using
base64encode algorithm.
Usage::
>>> from weatherlab.lib.dataIO.pk import obj2str
>>> data = {"a": 1, "b": 2}
>>> obj2str(data, pk_protocol=2)
'gAJ9cQAoWAEAAABhcQFLAVgBAAAAYnECSwJ1Lg=='
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的"字符串"
"""
return base64.b64encode(pickle.dumps(
obj, protocol=pk_protocol)).decode("utf-8")
|
[
"def",
"obj2str",
"(",
"obj",
",",
"pk_protocol",
"=",
"pk_protocol",
")",
":",
"return",
"base64",
".",
"b64encode",
"(",
"pickle",
".",
"dumps",
"(",
"obj",
",",
"protocol",
"=",
"pk_protocol",
")",
")",
".",
"decode",
"(",
"\"utf-8\"",
")"
] |
Convert arbitrary object to utf-8 string, using
base64encode algorithm.
Usage::
>>> from weatherlab.lib.dataIO.pk import obj2str
>>> data = {"a": 1, "b": 2}
>>> obj2str(data, pk_protocol=2)
'gAJ9cQAoWAEAAABhcQFLAVgBAAAAYnECSwJ1Lg=='
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的"字符串"
|
[
"Convert",
"arbitrary",
"object",
"to",
"utf",
"-",
"8",
"string",
"using",
"base64encode",
"algorithm",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/dataIO/pk.py#L363-L379
|
241,705
|
MacHu-GWU/angora-project
|
angora/gadget/codestats.py
|
CodeStats.run
|
def run(self):
"""Run analysis.
The basic idea is to recursively find all script files in specific
programming language, and analyze each file then sum it up.
"""
n_target_file, n_other_file = 0, 0
code, comment, docstr, purecode = 0, 0, 0, 0
fc = FileCollection.from_path_except(self.workspace, self.ignore)
fc_yes, fc_no = fc.select(self.filter, keepboth=True)
n_other_file += len(fc_no)
for abspath in fc_yes:
try:
with open(abspath, "rb") as f:
code_text = f.read().decode("utf-8")
code_, comment_, docstr_, purecode_ = self.analyzer(
code_text)
code += code_
comment += comment_
docstr += docstr_
purecode += purecode_
n_target_file += 1
except Exception as e:
n_other_file += 1
lines = list()
lines.append("Code statistic result for '%s'" % self.workspace)
lines.append(" %r %r files, %r other files." %
(n_target_file, self.language, n_other_file))
lines.append(" code line: %s" % code)
lines.append(" comment line: %s" % comment)
lines.append(" docstr line: %s" % docstr)
lines.append(" purecode line: %s" % purecode)
message = "\n".join(lines)
print(message)
return message
|
python
|
def run(self):
"""Run analysis.
The basic idea is to recursively find all script files in specific
programming language, and analyze each file then sum it up.
"""
n_target_file, n_other_file = 0, 0
code, comment, docstr, purecode = 0, 0, 0, 0
fc = FileCollection.from_path_except(self.workspace, self.ignore)
fc_yes, fc_no = fc.select(self.filter, keepboth=True)
n_other_file += len(fc_no)
for abspath in fc_yes:
try:
with open(abspath, "rb") as f:
code_text = f.read().decode("utf-8")
code_, comment_, docstr_, purecode_ = self.analyzer(
code_text)
code += code_
comment += comment_
docstr += docstr_
purecode += purecode_
n_target_file += 1
except Exception as e:
n_other_file += 1
lines = list()
lines.append("Code statistic result for '%s'" % self.workspace)
lines.append(" %r %r files, %r other files." %
(n_target_file, self.language, n_other_file))
lines.append(" code line: %s" % code)
lines.append(" comment line: %s" % comment)
lines.append(" docstr line: %s" % docstr)
lines.append(" purecode line: %s" % purecode)
message = "\n".join(lines)
print(message)
return message
|
[
"def",
"run",
"(",
"self",
")",
":",
"n_target_file",
",",
"n_other_file",
"=",
"0",
",",
"0",
"code",
",",
"comment",
",",
"docstr",
",",
"purecode",
"=",
"0",
",",
"0",
",",
"0",
",",
"0",
"fc",
"=",
"FileCollection",
".",
"from_path_except",
"(",
"self",
".",
"workspace",
",",
"self",
".",
"ignore",
")",
"fc_yes",
",",
"fc_no",
"=",
"fc",
".",
"select",
"(",
"self",
".",
"filter",
",",
"keepboth",
"=",
"True",
")",
"n_other_file",
"+=",
"len",
"(",
"fc_no",
")",
"for",
"abspath",
"in",
"fc_yes",
":",
"try",
":",
"with",
"open",
"(",
"abspath",
",",
"\"rb\"",
")",
"as",
"f",
":",
"code_text",
"=",
"f",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
"code_",
",",
"comment_",
",",
"docstr_",
",",
"purecode_",
"=",
"self",
".",
"analyzer",
"(",
"code_text",
")",
"code",
"+=",
"code_",
"comment",
"+=",
"comment_",
"docstr",
"+=",
"docstr_",
"purecode",
"+=",
"purecode_",
"n_target_file",
"+=",
"1",
"except",
"Exception",
"as",
"e",
":",
"n_other_file",
"+=",
"1",
"lines",
"=",
"list",
"(",
")",
"lines",
".",
"append",
"(",
"\"Code statistic result for '%s'\"",
"%",
"self",
".",
"workspace",
")",
"lines",
".",
"append",
"(",
"\" %r %r files, %r other files.\"",
"%",
"(",
"n_target_file",
",",
"self",
".",
"language",
",",
"n_other_file",
")",
")",
"lines",
".",
"append",
"(",
"\" code line: %s\"",
"%",
"code",
")",
"lines",
".",
"append",
"(",
"\" comment line: %s\"",
"%",
"comment",
")",
"lines",
".",
"append",
"(",
"\" docstr line: %s\"",
"%",
"docstr",
")",
"lines",
".",
"append",
"(",
"\" purecode line: %s\"",
"%",
"purecode",
")",
"message",
"=",
"\"\\n\"",
".",
"join",
"(",
"lines",
")",
"print",
"(",
"message",
")",
"return",
"message"
] |
Run analysis.
The basic idea is to recursively find all script files in specific
programming language, and analyze each file then sum it up.
|
[
"Run",
"analysis",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/gadget/codestats.py#L97-L137
|
241,706
|
MacHu-GWU/angora-project
|
angora/gadget/codestats.py
|
CodeStats.analyzePython
|
def analyzePython(code_text):
"""Count how many line of code, comment, dosstr, purecode in one
Python script file.
"""
code, comment, docstr = 0, 0, 0
p1 = r"""(?<=%s)[\s\S]*?(?=%s)""" % ('"""', '"""')
p2 = r"""(?<=%s)[\s\S]*?(?=%s)""" % ("'''", "'''")
# count docstr
for pattern in [p1, p2]:
for res in re.findall(pattern, code_text)[::2]:
lines = [i.strip() for i in res.split("\n") if i.strip()]
docstr += len(lines)
# count comment line and code
lines = [i.strip() for i in code_text.split("\n") if i.strip()]
for line in lines:
if line.startswith("#"):
comment += 1
else:
code += 1
purecode = code - docstr # pure code = code - docstr
return code, comment, docstr, purecode
|
python
|
def analyzePython(code_text):
"""Count how many line of code, comment, dosstr, purecode in one
Python script file.
"""
code, comment, docstr = 0, 0, 0
p1 = r"""(?<=%s)[\s\S]*?(?=%s)""" % ('"""', '"""')
p2 = r"""(?<=%s)[\s\S]*?(?=%s)""" % ("'''", "'''")
# count docstr
for pattern in [p1, p2]:
for res in re.findall(pattern, code_text)[::2]:
lines = [i.strip() for i in res.split("\n") if i.strip()]
docstr += len(lines)
# count comment line and code
lines = [i.strip() for i in code_text.split("\n") if i.strip()]
for line in lines:
if line.startswith("#"):
comment += 1
else:
code += 1
purecode = code - docstr # pure code = code - docstr
return code, comment, docstr, purecode
|
[
"def",
"analyzePython",
"(",
"code_text",
")",
":",
"code",
",",
"comment",
",",
"docstr",
"=",
"0",
",",
"0",
",",
"0",
"p1",
"=",
"r\"\"\"(?<=%s)[\\s\\S]*?(?=%s)\"\"\"",
"%",
"(",
"'\"\"\"'",
",",
"'\"\"\"'",
")",
"p2",
"=",
"r\"\"\"(?<=%s)[\\s\\S]*?(?=%s)\"\"\"",
"%",
"(",
"\"'''\"",
",",
"\"'''\"",
")",
"# count docstr",
"for",
"pattern",
"in",
"[",
"p1",
",",
"p2",
"]",
":",
"for",
"res",
"in",
"re",
".",
"findall",
"(",
"pattern",
",",
"code_text",
")",
"[",
":",
":",
"2",
"]",
":",
"lines",
"=",
"[",
"i",
".",
"strip",
"(",
")",
"for",
"i",
"in",
"res",
".",
"split",
"(",
"\"\\n\"",
")",
"if",
"i",
".",
"strip",
"(",
")",
"]",
"docstr",
"+=",
"len",
"(",
"lines",
")",
"# count comment line and code",
"lines",
"=",
"[",
"i",
".",
"strip",
"(",
")",
"for",
"i",
"in",
"code_text",
".",
"split",
"(",
"\"\\n\"",
")",
"if",
"i",
".",
"strip",
"(",
")",
"]",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"comment",
"+=",
"1",
"else",
":",
"code",
"+=",
"1",
"purecode",
"=",
"code",
"-",
"docstr",
"# pure code = code - docstr",
"return",
"code",
",",
"comment",
",",
"docstr",
",",
"purecode"
] |
Count how many line of code, comment, dosstr, purecode in one
Python script file.
|
[
"Count",
"how",
"many",
"line",
"of",
"code",
"comment",
"dosstr",
"purecode",
"in",
"one",
"Python",
"script",
"file",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/gadget/codestats.py#L147-L170
|
241,707
|
JNRowe/jnrbase
|
jnrbase/cmdline.py
|
get_default
|
def get_default(__func: Callable, __arg: str) -> str:
"""Fetch default value for a function argument
Args:
__func: Function to inspect
__arg: Argument to extract default value for
"""
return signature(__func).parameters[__arg].default
|
python
|
def get_default(__func: Callable, __arg: str) -> str:
"""Fetch default value for a function argument
Args:
__func: Function to inspect
__arg: Argument to extract default value for
"""
return signature(__func).parameters[__arg].default
|
[
"def",
"get_default",
"(",
"__func",
":",
"Callable",
",",
"__arg",
":",
"str",
")",
"->",
"str",
":",
"return",
"signature",
"(",
"__func",
")",
".",
"parameters",
"[",
"__arg",
"]",
".",
"default"
] |
Fetch default value for a function argument
Args:
__func: Function to inspect
__arg: Argument to extract default value for
|
[
"Fetch",
"default",
"value",
"for",
"a",
"function",
"argument"
] |
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
|
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/cmdline.py#L38-L45
|
241,708
|
JNRowe/jnrbase
|
jnrbase/cmdline.py
|
config_
|
def config_(name: str, local: bool, package: str, section: str,
key: Optional[str]):
"""Extract or list values from config."""
cfg = config.read_configs(package, name, local=local)
if key:
with suppress(NoOptionError, NoSectionError):
echo(cfg.get(section, key))
else:
with suppress(NoSectionError):
for opt in cfg.options(section):
colourise.pinfo(opt)
echo(' {}'.format(cfg.get(section, opt)))
|
python
|
def config_(name: str, local: bool, package: str, section: str,
key: Optional[str]):
"""Extract or list values from config."""
cfg = config.read_configs(package, name, local=local)
if key:
with suppress(NoOptionError, NoSectionError):
echo(cfg.get(section, key))
else:
with suppress(NoSectionError):
for opt in cfg.options(section):
colourise.pinfo(opt)
echo(' {}'.format(cfg.get(section, opt)))
|
[
"def",
"config_",
"(",
"name",
":",
"str",
",",
"local",
":",
"bool",
",",
"package",
":",
"str",
",",
"section",
":",
"str",
",",
"key",
":",
"Optional",
"[",
"str",
"]",
")",
":",
"cfg",
"=",
"config",
".",
"read_configs",
"(",
"package",
",",
"name",
",",
"local",
"=",
"local",
")",
"if",
"key",
":",
"with",
"suppress",
"(",
"NoOptionError",
",",
"NoSectionError",
")",
":",
"echo",
"(",
"cfg",
".",
"get",
"(",
"section",
",",
"key",
")",
")",
"else",
":",
"with",
"suppress",
"(",
"NoSectionError",
")",
":",
"for",
"opt",
"in",
"cfg",
".",
"options",
"(",
"section",
")",
":",
"colourise",
".",
"pinfo",
"(",
"opt",
")",
"echo",
"(",
"' {}'",
".",
"format",
"(",
"cfg",
".",
"get",
"(",
"section",
",",
"opt",
")",
")",
")"
] |
Extract or list values from config.
|
[
"Extract",
"or",
"list",
"values",
"from",
"config",
"."
] |
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
|
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/cmdline.py#L97-L108
|
241,709
|
JNRowe/jnrbase
|
jnrbase/cmdline.py
|
find_tag
|
def find_tag(match: str, strict: bool, directory: str):
"""Find tag for git repository."""
with suppress(CalledProcessError):
echo(git.find_tag(match, strict=strict, git_dir=directory))
|
python
|
def find_tag(match: str, strict: bool, directory: str):
"""Find tag for git repository."""
with suppress(CalledProcessError):
echo(git.find_tag(match, strict=strict, git_dir=directory))
|
[
"def",
"find_tag",
"(",
"match",
":",
"str",
",",
"strict",
":",
"bool",
",",
"directory",
":",
"str",
")",
":",
"with",
"suppress",
"(",
"CalledProcessError",
")",
":",
"echo",
"(",
"git",
".",
"find_tag",
"(",
"match",
",",
"strict",
"=",
"strict",
",",
"git_dir",
"=",
"directory",
")",
")"
] |
Find tag for git repository.
|
[
"Find",
"tag",
"for",
"git",
"repository",
"."
] |
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
|
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/cmdline.py#L117-L120
|
241,710
|
JNRowe/jnrbase
|
jnrbase/cmdline.py
|
pretty_time
|
def pretty_time(timestamp: str):
"""Format timestamp for human consumption."""
try:
parsed = iso_8601.parse_datetime(timestamp)
except ValueError:
now = datetime.utcnow().replace(tzinfo=timezone.utc)
try:
delta = iso_8601.parse_delta(timestamp)
except ValueError:
delta = human_time.parse_timedelta(timestamp)
parsed = now - delta
echo(human_time.human_timestamp(parsed))
|
python
|
def pretty_time(timestamp: str):
"""Format timestamp for human consumption."""
try:
parsed = iso_8601.parse_datetime(timestamp)
except ValueError:
now = datetime.utcnow().replace(tzinfo=timezone.utc)
try:
delta = iso_8601.parse_delta(timestamp)
except ValueError:
delta = human_time.parse_timedelta(timestamp)
parsed = now - delta
echo(human_time.human_timestamp(parsed))
|
[
"def",
"pretty_time",
"(",
"timestamp",
":",
"str",
")",
":",
"try",
":",
"parsed",
"=",
"iso_8601",
".",
"parse_datetime",
"(",
"timestamp",
")",
"except",
"ValueError",
":",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"timezone",
".",
"utc",
")",
"try",
":",
"delta",
"=",
"iso_8601",
".",
"parse_delta",
"(",
"timestamp",
")",
"except",
"ValueError",
":",
"delta",
"=",
"human_time",
".",
"parse_timedelta",
"(",
"timestamp",
")",
"parsed",
"=",
"now",
"-",
"delta",
"echo",
"(",
"human_time",
".",
"human_timestamp",
"(",
"parsed",
")",
")"
] |
Format timestamp for human consumption.
|
[
"Format",
"timestamp",
"for",
"human",
"consumption",
"."
] |
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
|
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/cmdline.py#L131-L143
|
241,711
|
JNRowe/jnrbase
|
jnrbase/cmdline.py
|
gen_text
|
def gen_text(env: TextIOBase, package: str, tmpl: str):
"""Create output from Jinja template."""
if env:
env_args = json_datetime.load(env)
else:
env_args = {}
jinja_env = template.setup(package)
echo(jinja_env.get_template(tmpl).render(**env_args))
|
python
|
def gen_text(env: TextIOBase, package: str, tmpl: str):
"""Create output from Jinja template."""
if env:
env_args = json_datetime.load(env)
else:
env_args = {}
jinja_env = template.setup(package)
echo(jinja_env.get_template(tmpl).render(**env_args))
|
[
"def",
"gen_text",
"(",
"env",
":",
"TextIOBase",
",",
"package",
":",
"str",
",",
"tmpl",
":",
"str",
")",
":",
"if",
"env",
":",
"env_args",
"=",
"json_datetime",
".",
"load",
"(",
"env",
")",
"else",
":",
"env_args",
"=",
"{",
"}",
"jinja_env",
"=",
"template",
".",
"setup",
"(",
"package",
")",
"echo",
"(",
"jinja_env",
".",
"get_template",
"(",
"tmpl",
")",
".",
"render",
"(",
"*",
"*",
"env_args",
")",
")"
] |
Create output from Jinja template.
|
[
"Create",
"output",
"from",
"Jinja",
"template",
"."
] |
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
|
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/cmdline.py#L160-L167
|
241,712
|
JNRowe/jnrbase
|
jnrbase/cmdline.py
|
time
|
def time(ctx: Context, command: str):
"""Time the output of a command."""
with timer.Timing(verbose=True):
proc = run(command, shell=True)
ctx.exit(proc.returncode)
|
python
|
def time(ctx: Context, command: str):
"""Time the output of a command."""
with timer.Timing(verbose=True):
proc = run(command, shell=True)
ctx.exit(proc.returncode)
|
[
"def",
"time",
"(",
"ctx",
":",
"Context",
",",
"command",
":",
"str",
")",
":",
"with",
"timer",
".",
"Timing",
"(",
"verbose",
"=",
"True",
")",
":",
"proc",
"=",
"run",
"(",
"command",
",",
"shell",
"=",
"True",
")",
"ctx",
".",
"exit",
"(",
"proc",
".",
"returncode",
")"
] |
Time the output of a command.
|
[
"Time",
"the",
"output",
"of",
"a",
"command",
"."
] |
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
|
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/cmdline.py#L173-L177
|
241,713
|
zerok/zs.bibtex
|
src/zs/bibtex/structures.py
|
TypeRegistry.register
|
def register(cls, name, type_):
"""
Register a new type for an entry-type. The 2nd argument has to be a
subclass of structures.Entry.
"""
if not issubclass(type_, Entry):
raise exceptions.InvalidEntryType("%s is not a subclass of Entry" % str(type_))
cls._registry[name.lower()] = type_
|
python
|
def register(cls, name, type_):
"""
Register a new type for an entry-type. The 2nd argument has to be a
subclass of structures.Entry.
"""
if not issubclass(type_, Entry):
raise exceptions.InvalidEntryType("%s is not a subclass of Entry" % str(type_))
cls._registry[name.lower()] = type_
|
[
"def",
"register",
"(",
"cls",
",",
"name",
",",
"type_",
")",
":",
"if",
"not",
"issubclass",
"(",
"type_",
",",
"Entry",
")",
":",
"raise",
"exceptions",
".",
"InvalidEntryType",
"(",
"\"%s is not a subclass of Entry\"",
"%",
"str",
"(",
"type_",
")",
")",
"cls",
".",
"_registry",
"[",
"name",
".",
"lower",
"(",
")",
"]",
"=",
"type_"
] |
Register a new type for an entry-type. The 2nd argument has to be a
subclass of structures.Entry.
|
[
"Register",
"a",
"new",
"type",
"for",
"an",
"entry",
"-",
"type",
".",
"The",
"2nd",
"argument",
"has",
"to",
"be",
"a",
"subclass",
"of",
"structures",
".",
"Entry",
"."
] |
ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9
|
https://github.com/zerok/zs.bibtex/blob/ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9/src/zs/bibtex/structures.py#L51-L59
|
241,714
|
zerok/zs.bibtex
|
src/zs/bibtex/structures.py
|
Bibliography.validate
|
def validate(self, **kwargs):
"""
Validates each entry (passing the provided arguments down to them and
also tries to resolve all cross-references between the entries.
"""
self.check_crossrefs()
for value in self.values():
value.validate(**kwargs)
|
python
|
def validate(self, **kwargs):
"""
Validates each entry (passing the provided arguments down to them and
also tries to resolve all cross-references between the entries.
"""
self.check_crossrefs()
for value in self.values():
value.validate(**kwargs)
|
[
"def",
"validate",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"check_crossrefs",
"(",
")",
"for",
"value",
"in",
"self",
".",
"values",
"(",
")",
":",
"value",
".",
"validate",
"(",
"*",
"*",
"kwargs",
")"
] |
Validates each entry (passing the provided arguments down to them and
also tries to resolve all cross-references between the entries.
|
[
"Validates",
"each",
"entry",
"(",
"passing",
"the",
"provided",
"arguments",
"down",
"to",
"them",
"and",
"also",
"tries",
"to",
"resolve",
"all",
"cross",
"-",
"references",
"between",
"the",
"entries",
"."
] |
ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9
|
https://github.com/zerok/zs.bibtex/blob/ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9/src/zs/bibtex/structures.py#L79-L86
|
241,715
|
zerok/zs.bibtex
|
src/zs/bibtex/structures.py
|
Entry.validate
|
def validate(self, raise_unsupported=False):
"""
Checks if the Entry instance includes all the required fields of its
type. If ``raise_unsupported`` is set to ``True`` it will also check
for potentially unsupported types.
If a problem is found, an InvalidStructure exception is raised.
"""
fields = set(self.keys())
flattened_required_fields = set()
required_errors = []
for field in self.required_fields:
found = False
if isinstance(field, (list, tuple)):
# Check all alternatives
for real_f in field:
if real_f in fields:
flattened_required_fields.add(real_f)
found = True
else:
flattened_required_fields.add(field)
if field in fields:
found = True
if not found:
required_errors.append(field)
unsupported_fields = fields - flattened_required_fields \
- set(self.optional_fields)
if len(required_errors) or (raise_unsupported
and len(unsupported_fields)):
raise exceptions.InvalidStructure("Missing or unsupported fields found",
required_fields=required_errors,
unsupported_fields=unsupported_fields)
|
python
|
def validate(self, raise_unsupported=False):
"""
Checks if the Entry instance includes all the required fields of its
type. If ``raise_unsupported`` is set to ``True`` it will also check
for potentially unsupported types.
If a problem is found, an InvalidStructure exception is raised.
"""
fields = set(self.keys())
flattened_required_fields = set()
required_errors = []
for field in self.required_fields:
found = False
if isinstance(field, (list, tuple)):
# Check all alternatives
for real_f in field:
if real_f in fields:
flattened_required_fields.add(real_f)
found = True
else:
flattened_required_fields.add(field)
if field in fields:
found = True
if not found:
required_errors.append(field)
unsupported_fields = fields - flattened_required_fields \
- set(self.optional_fields)
if len(required_errors) or (raise_unsupported
and len(unsupported_fields)):
raise exceptions.InvalidStructure("Missing or unsupported fields found",
required_fields=required_errors,
unsupported_fields=unsupported_fields)
|
[
"def",
"validate",
"(",
"self",
",",
"raise_unsupported",
"=",
"False",
")",
":",
"fields",
"=",
"set",
"(",
"self",
".",
"keys",
"(",
")",
")",
"flattened_required_fields",
"=",
"set",
"(",
")",
"required_errors",
"=",
"[",
"]",
"for",
"field",
"in",
"self",
".",
"required_fields",
":",
"found",
"=",
"False",
"if",
"isinstance",
"(",
"field",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"# Check all alternatives",
"for",
"real_f",
"in",
"field",
":",
"if",
"real_f",
"in",
"fields",
":",
"flattened_required_fields",
".",
"add",
"(",
"real_f",
")",
"found",
"=",
"True",
"else",
":",
"flattened_required_fields",
".",
"add",
"(",
"field",
")",
"if",
"field",
"in",
"fields",
":",
"found",
"=",
"True",
"if",
"not",
"found",
":",
"required_errors",
".",
"append",
"(",
"field",
")",
"unsupported_fields",
"=",
"fields",
"-",
"flattened_required_fields",
"-",
"set",
"(",
"self",
".",
"optional_fields",
")",
"if",
"len",
"(",
"required_errors",
")",
"or",
"(",
"raise_unsupported",
"and",
"len",
"(",
"unsupported_fields",
")",
")",
":",
"raise",
"exceptions",
".",
"InvalidStructure",
"(",
"\"Missing or unsupported fields found\"",
",",
"required_fields",
"=",
"required_errors",
",",
"unsupported_fields",
"=",
"unsupported_fields",
")"
] |
Checks if the Entry instance includes all the required fields of its
type. If ``raise_unsupported`` is set to ``True`` it will also check
for potentially unsupported types.
If a problem is found, an InvalidStructure exception is raised.
|
[
"Checks",
"if",
"the",
"Entry",
"instance",
"includes",
"all",
"the",
"required",
"fields",
"of",
"its",
"type",
".",
"If",
"raise_unsupported",
"is",
"set",
"to",
"True",
"it",
"will",
"also",
"check",
"for",
"potentially",
"unsupported",
"types",
"."
] |
ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9
|
https://github.com/zerok/zs.bibtex/blob/ac9594b36b9d4ec9a3593c0dc7c1a35e5aea04d9/src/zs/bibtex/structures.py#L125-L156
|
241,716
|
nsavch/python-xonotic-db
|
xon_db/cli.py
|
get
|
def get(file_name, key):
"""
Print a value for the specified key. If key is not found xon_db exists with code 1.
"""
db = XonoticDB.load_path(file_name)
value = db.get(key)
if value is None:
sys.exit(1)
else:
click.echo(value)
|
python
|
def get(file_name, key):
"""
Print a value for the specified key. If key is not found xon_db exists with code 1.
"""
db = XonoticDB.load_path(file_name)
value = db.get(key)
if value is None:
sys.exit(1)
else:
click.echo(value)
|
[
"def",
"get",
"(",
"file_name",
",",
"key",
")",
":",
"db",
"=",
"XonoticDB",
".",
"load_path",
"(",
"file_name",
")",
"value",
"=",
"db",
".",
"get",
"(",
"key",
")",
"if",
"value",
"is",
"None",
":",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"click",
".",
"echo",
"(",
"value",
")"
] |
Print a value for the specified key. If key is not found xon_db exists with code 1.
|
[
"Print",
"a",
"value",
"for",
"the",
"specified",
"key",
".",
"If",
"key",
"is",
"not",
"found",
"xon_db",
"exists",
"with",
"code",
"1",
"."
] |
339fe4c2c74880fd66712ae32789d7e9ae3e8f02
|
https://github.com/nsavch/python-xonotic-db/blob/339fe4c2c74880fd66712ae32789d7e9ae3e8f02/xon_db/cli.py#L31-L40
|
241,717
|
nsavch/python-xonotic-db
|
xon_db/cli.py
|
set
|
def set(file_name, key, value, new):
"""
Set a new value for the specified key.
"""
db = XonoticDB.load_path(file_name)
if key not in db and not new:
click.echo('Key %s is not found in the database' % key, file=sys.stderr)
sys.exit(1)
else:
db[key] = value
db.save(file_name)
|
python
|
def set(file_name, key, value, new):
"""
Set a new value for the specified key.
"""
db = XonoticDB.load_path(file_name)
if key not in db and not new:
click.echo('Key %s is not found in the database' % key, file=sys.stderr)
sys.exit(1)
else:
db[key] = value
db.save(file_name)
|
[
"def",
"set",
"(",
"file_name",
",",
"key",
",",
"value",
",",
"new",
")",
":",
"db",
"=",
"XonoticDB",
".",
"load_path",
"(",
"file_name",
")",
"if",
"key",
"not",
"in",
"db",
"and",
"not",
"new",
":",
"click",
".",
"echo",
"(",
"'Key %s is not found in the database'",
"%",
"key",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"db",
"[",
"key",
"]",
"=",
"value",
"db",
".",
"save",
"(",
"file_name",
")"
] |
Set a new value for the specified key.
|
[
"Set",
"a",
"new",
"value",
"for",
"the",
"specified",
"key",
"."
] |
339fe4c2c74880fd66712ae32789d7e9ae3e8f02
|
https://github.com/nsavch/python-xonotic-db/blob/339fe4c2c74880fd66712ae32789d7e9ae3e8f02/xon_db/cli.py#L48-L58
|
241,718
|
nsavch/python-xonotic-db
|
xon_db/cli.py
|
remove_cts_record
|
def remove_cts_record(file_name, map, position):
"""
Remove cts record on MAP and POSITION
"""
db = XonoticDB.load_path(file_name)
db.remove_cts_record(map, position)
db.save(file_name)
|
python
|
def remove_cts_record(file_name, map, position):
"""
Remove cts record on MAP and POSITION
"""
db = XonoticDB.load_path(file_name)
db.remove_cts_record(map, position)
db.save(file_name)
|
[
"def",
"remove_cts_record",
"(",
"file_name",
",",
"map",
",",
"position",
")",
":",
"db",
"=",
"XonoticDB",
".",
"load_path",
"(",
"file_name",
")",
"db",
".",
"remove_cts_record",
"(",
"map",
",",
"position",
")",
"db",
".",
"save",
"(",
"file_name",
")"
] |
Remove cts record on MAP and POSITION
|
[
"Remove",
"cts",
"record",
"on",
"MAP",
"and",
"POSITION"
] |
339fe4c2c74880fd66712ae32789d7e9ae3e8f02
|
https://github.com/nsavch/python-xonotic-db/blob/339fe4c2c74880fd66712ae32789d7e9ae3e8f02/xon_db/cli.py#L65-L71
|
241,719
|
nsavch/python-xonotic-db
|
xon_db/cli.py
|
remove_all_cts_records_by
|
def remove_all_cts_records_by(file_name, crypto_idfp):
"""
Remove all cts records set by player with CRYPTO_IDFP
"""
db = XonoticDB.load_path(file_name)
db.remove_all_cts_records_by(crypto_idfp)
db.save(file_name)
|
python
|
def remove_all_cts_records_by(file_name, crypto_idfp):
"""
Remove all cts records set by player with CRYPTO_IDFP
"""
db = XonoticDB.load_path(file_name)
db.remove_all_cts_records_by(crypto_idfp)
db.save(file_name)
|
[
"def",
"remove_all_cts_records_by",
"(",
"file_name",
",",
"crypto_idfp",
")",
":",
"db",
"=",
"XonoticDB",
".",
"load_path",
"(",
"file_name",
")",
"db",
".",
"remove_all_cts_records_by",
"(",
"crypto_idfp",
")",
"db",
".",
"save",
"(",
"file_name",
")"
] |
Remove all cts records set by player with CRYPTO_IDFP
|
[
"Remove",
"all",
"cts",
"records",
"set",
"by",
"player",
"with",
"CRYPTO_IDFP"
] |
339fe4c2c74880fd66712ae32789d7e9ae3e8f02
|
https://github.com/nsavch/python-xonotic-db/blob/339fe4c2c74880fd66712ae32789d7e9ae3e8f02/xon_db/cli.py#L77-L83
|
241,720
|
nsavch/python-xonotic-db
|
xon_db/cli.py
|
merge_cts_records
|
def merge_cts_records(file_name, crypto_idfp, crypto_idfps):
"""
Merge cts records made by CRYPTO_IDFPS to CRYPTO_IDFP
"""
db = XonoticDB.load_path(file_name)
db.merge_cts_records(crypto_idfp, crypto_idfps)
db.save(file_name)
|
python
|
def merge_cts_records(file_name, crypto_idfp, crypto_idfps):
"""
Merge cts records made by CRYPTO_IDFPS to CRYPTO_IDFP
"""
db = XonoticDB.load_path(file_name)
db.merge_cts_records(crypto_idfp, crypto_idfps)
db.save(file_name)
|
[
"def",
"merge_cts_records",
"(",
"file_name",
",",
"crypto_idfp",
",",
"crypto_idfps",
")",
":",
"db",
"=",
"XonoticDB",
".",
"load_path",
"(",
"file_name",
")",
"db",
".",
"merge_cts_records",
"(",
"crypto_idfp",
",",
"crypto_idfps",
")",
"db",
".",
"save",
"(",
"file_name",
")"
] |
Merge cts records made by CRYPTO_IDFPS to CRYPTO_IDFP
|
[
"Merge",
"cts",
"records",
"made",
"by",
"CRYPTO_IDFPS",
"to",
"CRYPTO_IDFP"
] |
339fe4c2c74880fd66712ae32789d7e9ae3e8f02
|
https://github.com/nsavch/python-xonotic-db/blob/339fe4c2c74880fd66712ae32789d7e9ae3e8f02/xon_db/cli.py#L90-L96
|
241,721
|
Arvedui/picuplib
|
picuplib/checks.py
|
check_rotation
|
def check_rotation(rotation):
"""checks rotation parameter if illegal value raises exception"""
if rotation not in ALLOWED_ROTATION:
allowed_rotation = ', '.join(ALLOWED_ROTATION)
raise UnsupportedRotation('Rotation %s is not allwoed. Allowed are %s'
% (rotation, allowed_rotation))
|
python
|
def check_rotation(rotation):
"""checks rotation parameter if illegal value raises exception"""
if rotation not in ALLOWED_ROTATION:
allowed_rotation = ', '.join(ALLOWED_ROTATION)
raise UnsupportedRotation('Rotation %s is not allwoed. Allowed are %s'
% (rotation, allowed_rotation))
|
[
"def",
"check_rotation",
"(",
"rotation",
")",
":",
"if",
"rotation",
"not",
"in",
"ALLOWED_ROTATION",
":",
"allowed_rotation",
"=",
"', '",
".",
"join",
"(",
"ALLOWED_ROTATION",
")",
"raise",
"UnsupportedRotation",
"(",
"'Rotation %s is not allwoed. Allowed are %s'",
"%",
"(",
"rotation",
",",
"allowed_rotation",
")",
")"
] |
checks rotation parameter if illegal value raises exception
|
[
"checks",
"rotation",
"parameter",
"if",
"illegal",
"value",
"raises",
"exception"
] |
c8a5d1542dbd421e84afd5ee81fe76efec89fb95
|
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/checks.py#L31-L37
|
241,722
|
Arvedui/picuplib
|
picuplib/checks.py
|
check_resize
|
def check_resize(resize):
"""checks resize parameter if illegal value raises exception"""
if resize is None:
return
resize = resize.lower().strip()
if 'x' in resize:
tmp = resize.lower().split('x')
tmp = [x.strip() for x in resize.split('x')]
if len(tmp) == 2 and tmp[0].isdigit() and tmp[1].isdigit():
return
elif '%' in resize:
tmp = resize.split('%')[0]
if tmp.isnumeric():
tmp = int(tmp)
if 1 <= tmp <= 1000:
return
else:
raise PercentageOutOfRange("percentage must be between 1 and 1000")
raise MallformedResize('Resize value "%s" is mallformed. '
'Desired format is: {width}x{height} or {percentage}%%' % resize)
|
python
|
def check_resize(resize):
"""checks resize parameter if illegal value raises exception"""
if resize is None:
return
resize = resize.lower().strip()
if 'x' in resize:
tmp = resize.lower().split('x')
tmp = [x.strip() for x in resize.split('x')]
if len(tmp) == 2 and tmp[0].isdigit() and tmp[1].isdigit():
return
elif '%' in resize:
tmp = resize.split('%')[0]
if tmp.isnumeric():
tmp = int(tmp)
if 1 <= tmp <= 1000:
return
else:
raise PercentageOutOfRange("percentage must be between 1 and 1000")
raise MallformedResize('Resize value "%s" is mallformed. '
'Desired format is: {width}x{height} or {percentage}%%' % resize)
|
[
"def",
"check_resize",
"(",
"resize",
")",
":",
"if",
"resize",
"is",
"None",
":",
"return",
"resize",
"=",
"resize",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"if",
"'x'",
"in",
"resize",
":",
"tmp",
"=",
"resize",
".",
"lower",
"(",
")",
".",
"split",
"(",
"'x'",
")",
"tmp",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"resize",
".",
"split",
"(",
"'x'",
")",
"]",
"if",
"len",
"(",
"tmp",
")",
"==",
"2",
"and",
"tmp",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
"and",
"tmp",
"[",
"1",
"]",
".",
"isdigit",
"(",
")",
":",
"return",
"elif",
"'%'",
"in",
"resize",
":",
"tmp",
"=",
"resize",
".",
"split",
"(",
"'%'",
")",
"[",
"0",
"]",
"if",
"tmp",
".",
"isnumeric",
"(",
")",
":",
"tmp",
"=",
"int",
"(",
"tmp",
")",
"if",
"1",
"<=",
"tmp",
"<=",
"1000",
":",
"return",
"else",
":",
"raise",
"PercentageOutOfRange",
"(",
"\"percentage must be between 1 and 1000\"",
")",
"raise",
"MallformedResize",
"(",
"'Resize value \"%s\" is mallformed. '",
"'Desired format is: {width}x{height} or {percentage}%%'",
"%",
"resize",
")"
] |
checks resize parameter if illegal value raises exception
|
[
"checks",
"resize",
"parameter",
"if",
"illegal",
"value",
"raises",
"exception"
] |
c8a5d1542dbd421e84afd5ee81fe76efec89fb95
|
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/checks.py#L40-L62
|
241,723
|
Arvedui/picuplib
|
picuplib/checks.py
|
check_response
|
def check_response(response):
"""
checks the response if the server returned an error raises an exception.
"""
if response.status_code < 200 or response.status_code > 300:
raise ServerError('API requests returned with error: %s'
% response.status_code)
try:
response_text = loads(response.text)
except ValueError:
raise ServerError('The API did not returned a JSON string.')
if not response_text:
raise EmptyResponse()
if 'failure' in response_text:
if response_text['failure'] == 'Falscher Dateityp':
raise UnsupportedFormat('Please look at picflash.org '
'witch formats are supported')
else:
raise UnkownError(response_text['failure'])
|
python
|
def check_response(response):
"""
checks the response if the server returned an error raises an exception.
"""
if response.status_code < 200 or response.status_code > 300:
raise ServerError('API requests returned with error: %s'
% response.status_code)
try:
response_text = loads(response.text)
except ValueError:
raise ServerError('The API did not returned a JSON string.')
if not response_text:
raise EmptyResponse()
if 'failure' in response_text:
if response_text['failure'] == 'Falscher Dateityp':
raise UnsupportedFormat('Please look at picflash.org '
'witch formats are supported')
else:
raise UnkownError(response_text['failure'])
|
[
"def",
"check_response",
"(",
"response",
")",
":",
"if",
"response",
".",
"status_code",
"<",
"200",
"or",
"response",
".",
"status_code",
">",
"300",
":",
"raise",
"ServerError",
"(",
"'API requests returned with error: %s'",
"%",
"response",
".",
"status_code",
")",
"try",
":",
"response_text",
"=",
"loads",
"(",
"response",
".",
"text",
")",
"except",
"ValueError",
":",
"raise",
"ServerError",
"(",
"'The API did not returned a JSON string.'",
")",
"if",
"not",
"response_text",
":",
"raise",
"EmptyResponse",
"(",
")",
"if",
"'failure'",
"in",
"response_text",
":",
"if",
"response_text",
"[",
"'failure'",
"]",
"==",
"'Falscher Dateityp'",
":",
"raise",
"UnsupportedFormat",
"(",
"'Please look at picflash.org '",
"'witch formats are supported'",
")",
"else",
":",
"raise",
"UnkownError",
"(",
"response_text",
"[",
"'failure'",
"]",
")"
] |
checks the response if the server returned an error raises an exception.
|
[
"checks",
"the",
"response",
"if",
"the",
"server",
"returned",
"an",
"error",
"raises",
"an",
"exception",
"."
] |
c8a5d1542dbd421e84afd5ee81fe76efec89fb95
|
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/checks.py#L77-L98
|
241,724
|
Arvedui/picuplib
|
picuplib/checks.py
|
check_if_redirect
|
def check_if_redirect(url):
"""
checks if server redirects url
"""
response = head(url, headers={'User-Agent': USER_AGENT})
if response.status_code >= 300 and response.status_code < 400:
return response.headers['location']
return None
|
python
|
def check_if_redirect(url):
"""
checks if server redirects url
"""
response = head(url, headers={'User-Agent': USER_AGENT})
if response.status_code >= 300 and response.status_code < 400:
return response.headers['location']
return None
|
[
"def",
"check_if_redirect",
"(",
"url",
")",
":",
"response",
"=",
"head",
"(",
"url",
",",
"headers",
"=",
"{",
"'User-Agent'",
":",
"USER_AGENT",
"}",
")",
"if",
"response",
".",
"status_code",
">=",
"300",
"and",
"response",
".",
"status_code",
"<",
"400",
":",
"return",
"response",
".",
"headers",
"[",
"'location'",
"]",
"return",
"None"
] |
checks if server redirects url
|
[
"checks",
"if",
"server",
"redirects",
"url"
] |
c8a5d1542dbd421e84afd5ee81fe76efec89fb95
|
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/checks.py#L101-L109
|
241,725
|
deviantony/valigator
|
valigator/scheduler.py
|
validate_backup
|
def validate_backup(configuration, backup_data):
"""Celery task.
It will extract the backup archive into a unique folder
in the temporary directory specified in the configuration.
Once extracted, a Docker container will be started and will
start a restoration procedure. The worker will wait for the
container to exit and retrieve its return code.
A notification is sent if the return code is != 0.
If the return code == 0, the container will be removed.
Lastly, it will remove the temporary workdir.
"""
extract_archive(backup_data['archive_path'],
backup_data['workdir'])
docker_client = Client(configuration['docker']['url'])
container = run_container(docker_client, backup_data)
return_code = docker_client.wait(container)
print('Container return code: {}'.format(return_code))
if return_code != 0:
notifier = MailNotifier(configuration['mail'])
report = {'archive': backup_data['archive_path'],
'image': backup_data['image'],
'container_id': container.get('Id')}
notifier.send_report(report)
else:
docker_client.remove_container(container)
remove_file(backup_data['workdir'])
|
python
|
def validate_backup(configuration, backup_data):
"""Celery task.
It will extract the backup archive into a unique folder
in the temporary directory specified in the configuration.
Once extracted, a Docker container will be started and will
start a restoration procedure. The worker will wait for the
container to exit and retrieve its return code.
A notification is sent if the return code is != 0.
If the return code == 0, the container will be removed.
Lastly, it will remove the temporary workdir.
"""
extract_archive(backup_data['archive_path'],
backup_data['workdir'])
docker_client = Client(configuration['docker']['url'])
container = run_container(docker_client, backup_data)
return_code = docker_client.wait(container)
print('Container return code: {}'.format(return_code))
if return_code != 0:
notifier = MailNotifier(configuration['mail'])
report = {'archive': backup_data['archive_path'],
'image': backup_data['image'],
'container_id': container.get('Id')}
notifier.send_report(report)
else:
docker_client.remove_container(container)
remove_file(backup_data['workdir'])
|
[
"def",
"validate_backup",
"(",
"configuration",
",",
"backup_data",
")",
":",
"extract_archive",
"(",
"backup_data",
"[",
"'archive_path'",
"]",
",",
"backup_data",
"[",
"'workdir'",
"]",
")",
"docker_client",
"=",
"Client",
"(",
"configuration",
"[",
"'docker'",
"]",
"[",
"'url'",
"]",
")",
"container",
"=",
"run_container",
"(",
"docker_client",
",",
"backup_data",
")",
"return_code",
"=",
"docker_client",
".",
"wait",
"(",
"container",
")",
"print",
"(",
"'Container return code: {}'",
".",
"format",
"(",
"return_code",
")",
")",
"if",
"return_code",
"!=",
"0",
":",
"notifier",
"=",
"MailNotifier",
"(",
"configuration",
"[",
"'mail'",
"]",
")",
"report",
"=",
"{",
"'archive'",
":",
"backup_data",
"[",
"'archive_path'",
"]",
",",
"'image'",
":",
"backup_data",
"[",
"'image'",
"]",
",",
"'container_id'",
":",
"container",
".",
"get",
"(",
"'Id'",
")",
"}",
"notifier",
".",
"send_report",
"(",
"report",
")",
"else",
":",
"docker_client",
".",
"remove_container",
"(",
"container",
")",
"remove_file",
"(",
"backup_data",
"[",
"'workdir'",
"]",
")"
] |
Celery task.
It will extract the backup archive into a unique folder
in the temporary directory specified in the configuration.
Once extracted, a Docker container will be started and will
start a restoration procedure. The worker will wait for the
container to exit and retrieve its return code.
A notification is sent if the return code is != 0.
If the return code == 0, the container will be removed.
Lastly, it will remove the temporary workdir.
|
[
"Celery",
"task",
".",
"It",
"will",
"extract",
"the",
"backup",
"archive",
"into",
"a",
"unique",
"folder",
"in",
"the",
"temporary",
"directory",
"specified",
"in",
"the",
"configuration",
"."
] |
0557029bc58ea1270e358c14ca382d3807ed5b6f
|
https://github.com/deviantony/valigator/blob/0557029bc58ea1270e358c14ca382d3807ed5b6f/valigator/scheduler.py#L11-L38
|
241,726
|
codecobblers/modified
|
modified.py
|
module_files
|
def module_files(module, dependencies_dict=None):
"""
Scan a module and its entire dependency tree to create a dict of all files
and their modified time.
@param module: A <module> object
@param dependencies_dict: Pass an existing dict to add only unscanned
files or None to create a new file dict
@return: A dict containing filenames as keys with their modified time
as value
"""
if dependencies_dict is None:
dependencies_dict = dict()
if hasattr(module, '__file__'):
filename = module.__file__
if filename not in dependencies_dict:
realname, modified_time = _get_filename_and_modified(filename)
if realname and realname not in dependencies_dict:
dependencies_dict[realname] = modified_time
for name in dir(module):
try:
item = getattr(module, name)
if hasattr(item, '__file__'):
module_files(item, dependencies_dict)
elif hasattr(item, '__module__'):
item = sys.modules[getattr(item, '__module__')]
if hasattr(item, '__file__'):
module_files(item, dependencies_dict)
except (AttributeError, KeyError):
pass
return dependencies_dict
|
python
|
def module_files(module, dependencies_dict=None):
"""
Scan a module and its entire dependency tree to create a dict of all files
and their modified time.
@param module: A <module> object
@param dependencies_dict: Pass an existing dict to add only unscanned
files or None to create a new file dict
@return: A dict containing filenames as keys with their modified time
as value
"""
if dependencies_dict is None:
dependencies_dict = dict()
if hasattr(module, '__file__'):
filename = module.__file__
if filename not in dependencies_dict:
realname, modified_time = _get_filename_and_modified(filename)
if realname and realname not in dependencies_dict:
dependencies_dict[realname] = modified_time
for name in dir(module):
try:
item = getattr(module, name)
if hasattr(item, '__file__'):
module_files(item, dependencies_dict)
elif hasattr(item, '__module__'):
item = sys.modules[getattr(item, '__module__')]
if hasattr(item, '__file__'):
module_files(item, dependencies_dict)
except (AttributeError, KeyError):
pass
return dependencies_dict
|
[
"def",
"module_files",
"(",
"module",
",",
"dependencies_dict",
"=",
"None",
")",
":",
"if",
"dependencies_dict",
"is",
"None",
":",
"dependencies_dict",
"=",
"dict",
"(",
")",
"if",
"hasattr",
"(",
"module",
",",
"'__file__'",
")",
":",
"filename",
"=",
"module",
".",
"__file__",
"if",
"filename",
"not",
"in",
"dependencies_dict",
":",
"realname",
",",
"modified_time",
"=",
"_get_filename_and_modified",
"(",
"filename",
")",
"if",
"realname",
"and",
"realname",
"not",
"in",
"dependencies_dict",
":",
"dependencies_dict",
"[",
"realname",
"]",
"=",
"modified_time",
"for",
"name",
"in",
"dir",
"(",
"module",
")",
":",
"try",
":",
"item",
"=",
"getattr",
"(",
"module",
",",
"name",
")",
"if",
"hasattr",
"(",
"item",
",",
"'__file__'",
")",
":",
"module_files",
"(",
"item",
",",
"dependencies_dict",
")",
"elif",
"hasattr",
"(",
"item",
",",
"'__module__'",
")",
":",
"item",
"=",
"sys",
".",
"modules",
"[",
"getattr",
"(",
"item",
",",
"'__module__'",
")",
"]",
"if",
"hasattr",
"(",
"item",
",",
"'__file__'",
")",
":",
"module_files",
"(",
"item",
",",
"dependencies_dict",
")",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"pass",
"return",
"dependencies_dict"
] |
Scan a module and its entire dependency tree to create a dict of all files
and their modified time.
@param module: A <module> object
@param dependencies_dict: Pass an existing dict to add only unscanned
files or None to create a new file dict
@return: A dict containing filenames as keys with their modified time
as value
|
[
"Scan",
"a",
"module",
"and",
"its",
"entire",
"dependency",
"tree",
"to",
"create",
"a",
"dict",
"of",
"all",
"files",
"and",
"their",
"modified",
"time",
"."
] |
1cca9337d4de44fa660d1601ed43b71e00d8b6f5
|
https://github.com/codecobblers/modified/blob/1cca9337d4de44fa660d1601ed43b71e00d8b6f5/modified.py#L82-L112
|
241,727
|
codecobblers/modified
|
modified.py
|
files
|
def files():
"""
Scan all modules in the currently running app to create a dict of all
files and their modified time.
@note The scan only occurs the first time this function is called.
Subsequent calls simply return the global dict.
@return: A dict containing filenames as keys with their modified time
as value
"""
if not _scanned:
if not module_files(sys.modules['__main__'], _process_files):
for module in sys.modules.values():
if hasattr(module, '__file__'):
filename = module.__file__
if filename not in _process_files:
realname, modified_time = _get_filename_and_modified(filename)
if realname and realname not in _process_files:
_process_files[realname] = modified_time
return _process_files
|
python
|
def files():
"""
Scan all modules in the currently running app to create a dict of all
files and their modified time.
@note The scan only occurs the first time this function is called.
Subsequent calls simply return the global dict.
@return: A dict containing filenames as keys with their modified time
as value
"""
if not _scanned:
if not module_files(sys.modules['__main__'], _process_files):
for module in sys.modules.values():
if hasattr(module, '__file__'):
filename = module.__file__
if filename not in _process_files:
realname, modified_time = _get_filename_and_modified(filename)
if realname and realname not in _process_files:
_process_files[realname] = modified_time
return _process_files
|
[
"def",
"files",
"(",
")",
":",
"if",
"not",
"_scanned",
":",
"if",
"not",
"module_files",
"(",
"sys",
".",
"modules",
"[",
"'__main__'",
"]",
",",
"_process_files",
")",
":",
"for",
"module",
"in",
"sys",
".",
"modules",
".",
"values",
"(",
")",
":",
"if",
"hasattr",
"(",
"module",
",",
"'__file__'",
")",
":",
"filename",
"=",
"module",
".",
"__file__",
"if",
"filename",
"not",
"in",
"_process_files",
":",
"realname",
",",
"modified_time",
"=",
"_get_filename_and_modified",
"(",
"filename",
")",
"if",
"realname",
"and",
"realname",
"not",
"in",
"_process_files",
":",
"_process_files",
"[",
"realname",
"]",
"=",
"modified_time",
"return",
"_process_files"
] |
Scan all modules in the currently running app to create a dict of all
files and their modified time.
@note The scan only occurs the first time this function is called.
Subsequent calls simply return the global dict.
@return: A dict containing filenames as keys with their modified time
as value
|
[
"Scan",
"all",
"modules",
"in",
"the",
"currently",
"running",
"app",
"to",
"create",
"a",
"dict",
"of",
"all",
"files",
"and",
"their",
"modified",
"time",
"."
] |
1cca9337d4de44fa660d1601ed43b71e00d8b6f5
|
https://github.com/codecobblers/modified/blob/1cca9337d4de44fa660d1601ed43b71e00d8b6f5/modified.py#L115-L135
|
241,728
|
codecobblers/modified
|
modified.py
|
hup_hook
|
def hup_hook(signal_or_callable=signal.SIGTERM, verbose=False):
"""
Register a signal handler for `signal.SIGHUP` that checks for modified
files and only acts if at least one modified file is found.
@type signal_or_callable: str, int or callable
@param signal_or_callable: You can pass either a signal or a callable.
The signal can be specified by name or number. If specifying by name,
the 'SIG' portion is optional. For example, valid values for SIGINT
include 'INT', 'SIGINT' and `signal.SIGINT`.
Alternatively, you can pass a callable that will be called with the list
of changed files. So the call signature should be `func(list)`. The return
value of the callable is ignored.
@type verbose: bool or callable
@param verbose: Defaults to False. True indicates that a message should be
printed. You can also pass a callable such as log.info.
"""
#noinspection PyUnusedLocal
def handle_hup(signum, frame):
changed = modified()
if changed:
if callable(signal_or_callable):
func = signal_or_callable
args = (changed,)
op = 'Calling'
try:
name = signal_or_callable.__name__
except Exception:
name = str(signal_or_callable)
else:
if isinstance(signal_or_callable, int):
name = str(signal_or_callable)
signum = signal_or_callable
if verbose:
for item in dir(signal):
if item.startswith('SIG') and getattr(signal, item) == signal_or_callable:
name = item
break
else:
name = signal_or_callable if signal_or_callable.startswith('SIG') else 'SIG' + signal_or_callable
signum = getattr(signal, name)
func = os.kill
args = (os.getpid(), signum)
op = 'Sending'
if verbose:
more = ' and {0} other files'.format(len(changed)) if len(changed) > 1 else ''
message = '{0} {1} because {2}{3} changed'.format(op, name, changed[0], more)
if callable(verbose):
#noinspection PyCallingNonCallable
verbose(message)
else:
print(message)
func(*args)
files()
signal.signal(signal.SIGHUP, handle_hup)
signal.siginterrupt(signal.SIGHUP, False)
|
python
|
def hup_hook(signal_or_callable=signal.SIGTERM, verbose=False):
"""
Register a signal handler for `signal.SIGHUP` that checks for modified
files and only acts if at least one modified file is found.
@type signal_or_callable: str, int or callable
@param signal_or_callable: You can pass either a signal or a callable.
The signal can be specified by name or number. If specifying by name,
the 'SIG' portion is optional. For example, valid values for SIGINT
include 'INT', 'SIGINT' and `signal.SIGINT`.
Alternatively, you can pass a callable that will be called with the list
of changed files. So the call signature should be `func(list)`. The return
value of the callable is ignored.
@type verbose: bool or callable
@param verbose: Defaults to False. True indicates that a message should be
printed. You can also pass a callable such as log.info.
"""
#noinspection PyUnusedLocal
def handle_hup(signum, frame):
changed = modified()
if changed:
if callable(signal_or_callable):
func = signal_or_callable
args = (changed,)
op = 'Calling'
try:
name = signal_or_callable.__name__
except Exception:
name = str(signal_or_callable)
else:
if isinstance(signal_or_callable, int):
name = str(signal_or_callable)
signum = signal_or_callable
if verbose:
for item in dir(signal):
if item.startswith('SIG') and getattr(signal, item) == signal_or_callable:
name = item
break
else:
name = signal_or_callable if signal_or_callable.startswith('SIG') else 'SIG' + signal_or_callable
signum = getattr(signal, name)
func = os.kill
args = (os.getpid(), signum)
op = 'Sending'
if verbose:
more = ' and {0} other files'.format(len(changed)) if len(changed) > 1 else ''
message = '{0} {1} because {2}{3} changed'.format(op, name, changed[0], more)
if callable(verbose):
#noinspection PyCallingNonCallable
verbose(message)
else:
print(message)
func(*args)
files()
signal.signal(signal.SIGHUP, handle_hup)
signal.siginterrupt(signal.SIGHUP, False)
|
[
"def",
"hup_hook",
"(",
"signal_or_callable",
"=",
"signal",
".",
"SIGTERM",
",",
"verbose",
"=",
"False",
")",
":",
"#noinspection PyUnusedLocal",
"def",
"handle_hup",
"(",
"signum",
",",
"frame",
")",
":",
"changed",
"=",
"modified",
"(",
")",
"if",
"changed",
":",
"if",
"callable",
"(",
"signal_or_callable",
")",
":",
"func",
"=",
"signal_or_callable",
"args",
"=",
"(",
"changed",
",",
")",
"op",
"=",
"'Calling'",
"try",
":",
"name",
"=",
"signal_or_callable",
".",
"__name__",
"except",
"Exception",
":",
"name",
"=",
"str",
"(",
"signal_or_callable",
")",
"else",
":",
"if",
"isinstance",
"(",
"signal_or_callable",
",",
"int",
")",
":",
"name",
"=",
"str",
"(",
"signal_or_callable",
")",
"signum",
"=",
"signal_or_callable",
"if",
"verbose",
":",
"for",
"item",
"in",
"dir",
"(",
"signal",
")",
":",
"if",
"item",
".",
"startswith",
"(",
"'SIG'",
")",
"and",
"getattr",
"(",
"signal",
",",
"item",
")",
"==",
"signal_or_callable",
":",
"name",
"=",
"item",
"break",
"else",
":",
"name",
"=",
"signal_or_callable",
"if",
"signal_or_callable",
".",
"startswith",
"(",
"'SIG'",
")",
"else",
"'SIG'",
"+",
"signal_or_callable",
"signum",
"=",
"getattr",
"(",
"signal",
",",
"name",
")",
"func",
"=",
"os",
".",
"kill",
"args",
"=",
"(",
"os",
".",
"getpid",
"(",
")",
",",
"signum",
")",
"op",
"=",
"'Sending'",
"if",
"verbose",
":",
"more",
"=",
"' and {0} other files'",
".",
"format",
"(",
"len",
"(",
"changed",
")",
")",
"if",
"len",
"(",
"changed",
")",
">",
"1",
"else",
"''",
"message",
"=",
"'{0} {1} because {2}{3} changed'",
".",
"format",
"(",
"op",
",",
"name",
",",
"changed",
"[",
"0",
"]",
",",
"more",
")",
"if",
"callable",
"(",
"verbose",
")",
":",
"#noinspection PyCallingNonCallable",
"verbose",
"(",
"message",
")",
"else",
":",
"print",
"(",
"message",
")",
"func",
"(",
"*",
"args",
")",
"files",
"(",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGHUP",
",",
"handle_hup",
")",
"signal",
".",
"siginterrupt",
"(",
"signal",
".",
"SIGHUP",
",",
"False",
")"
] |
Register a signal handler for `signal.SIGHUP` that checks for modified
files and only acts if at least one modified file is found.
@type signal_or_callable: str, int or callable
@param signal_or_callable: You can pass either a signal or a callable.
The signal can be specified by name or number. If specifying by name,
the 'SIG' portion is optional. For example, valid values for SIGINT
include 'INT', 'SIGINT' and `signal.SIGINT`.
Alternatively, you can pass a callable that will be called with the list
of changed files. So the call signature should be `func(list)`. The return
value of the callable is ignored.
@type verbose: bool or callable
@param verbose: Defaults to False. True indicates that a message should be
printed. You can also pass a callable such as log.info.
|
[
"Register",
"a",
"signal",
"handler",
"for",
"signal",
".",
"SIGHUP",
"that",
"checks",
"for",
"modified",
"files",
"and",
"only",
"acts",
"if",
"at",
"least",
"one",
"modified",
"file",
"is",
"found",
"."
] |
1cca9337d4de44fa660d1601ed43b71e00d8b6f5
|
https://github.com/codecobblers/modified/blob/1cca9337d4de44fa660d1601ed43b71e00d8b6f5/modified.py#L167-L225
|
241,729
|
salimm/msgpack-pystream
|
msgpackstream/backend/python/stream.py
|
StreamUnpacker.handle_segment_ended
|
def handle_segment_ended(self):
'''
process end of the segment based on template
'''
if self._state[1].value.endevent is not None:
self.events.append((self._state[1].value.endevent, self._state[0], None))
if self._state[1].value.multiplier is 2:
self.parentismap = 0
self.waitingforprop = 0
if(len(self._stack) is 0):
self._scstate = ScannerState.IDLE
return
if self._state[1].value.valuetype is not ValueType.RAW:
self._stack[-1][3] = self._stack[-1][3] - 1 # #???
if self._stack[-1][3] is 0:
self._scstate = ScannerState.SEGMENT_ENDED
self._state = self._stack.pop() # pop last state from stack
if self._state[1].value.multiplier is 2:
self.parentismap = 1
self.waitingforprop = 1
self.handle_segment_ended()
else:
if self._stack[-1][1].value.multiplier is 2:
self.parentismap = 1
self.waitingforprop = 1
self._scstate = ScannerState.WAITING_FOR_HEADER
|
python
|
def handle_segment_ended(self):
'''
process end of the segment based on template
'''
if self._state[1].value.endevent is not None:
self.events.append((self._state[1].value.endevent, self._state[0], None))
if self._state[1].value.multiplier is 2:
self.parentismap = 0
self.waitingforprop = 0
if(len(self._stack) is 0):
self._scstate = ScannerState.IDLE
return
if self._state[1].value.valuetype is not ValueType.RAW:
self._stack[-1][3] = self._stack[-1][3] - 1 # #???
if self._stack[-1][3] is 0:
self._scstate = ScannerState.SEGMENT_ENDED
self._state = self._stack.pop() # pop last state from stack
if self._state[1].value.multiplier is 2:
self.parentismap = 1
self.waitingforprop = 1
self.handle_segment_ended()
else:
if self._stack[-1][1].value.multiplier is 2:
self.parentismap = 1
self.waitingforprop = 1
self._scstate = ScannerState.WAITING_FOR_HEADER
|
[
"def",
"handle_segment_ended",
"(",
"self",
")",
":",
"if",
"self",
".",
"_state",
"[",
"1",
"]",
".",
"value",
".",
"endevent",
"is",
"not",
"None",
":",
"self",
".",
"events",
".",
"append",
"(",
"(",
"self",
".",
"_state",
"[",
"1",
"]",
".",
"value",
".",
"endevent",
",",
"self",
".",
"_state",
"[",
"0",
"]",
",",
"None",
")",
")",
"if",
"self",
".",
"_state",
"[",
"1",
"]",
".",
"value",
".",
"multiplier",
"is",
"2",
":",
"self",
".",
"parentismap",
"=",
"0",
"self",
".",
"waitingforprop",
"=",
"0",
"if",
"(",
"len",
"(",
"self",
".",
"_stack",
")",
"is",
"0",
")",
":",
"self",
".",
"_scstate",
"=",
"ScannerState",
".",
"IDLE",
"return",
"if",
"self",
".",
"_state",
"[",
"1",
"]",
".",
"value",
".",
"valuetype",
"is",
"not",
"ValueType",
".",
"RAW",
":",
"self",
".",
"_stack",
"[",
"-",
"1",
"]",
"[",
"3",
"]",
"=",
"self",
".",
"_stack",
"[",
"-",
"1",
"]",
"[",
"3",
"]",
"-",
"1",
"# #???",
"if",
"self",
".",
"_stack",
"[",
"-",
"1",
"]",
"[",
"3",
"]",
"is",
"0",
":",
"self",
".",
"_scstate",
"=",
"ScannerState",
".",
"SEGMENT_ENDED",
"self",
".",
"_state",
"=",
"self",
".",
"_stack",
".",
"pop",
"(",
")",
"# pop last state from stack",
"if",
"self",
".",
"_state",
"[",
"1",
"]",
".",
"value",
".",
"multiplier",
"is",
"2",
":",
"self",
".",
"parentismap",
"=",
"1",
"self",
".",
"waitingforprop",
"=",
"1",
"self",
".",
"handle_segment_ended",
"(",
")",
"else",
":",
"if",
"self",
".",
"_stack",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
".",
"value",
".",
"multiplier",
"is",
"2",
":",
"self",
".",
"parentismap",
"=",
"1",
"self",
".",
"waitingforprop",
"=",
"1",
"self",
".",
"_scstate",
"=",
"ScannerState",
".",
"WAITING_FOR_HEADER"
] |
process end of the segment based on template
|
[
"process",
"end",
"of",
"the",
"segment",
"based",
"on",
"template"
] |
676158a5a8dd8ff56dca080d597943f67fc4325e
|
https://github.com/salimm/msgpack-pystream/blob/676158a5a8dd8ff56dca080d597943f67fc4325e/msgpackstream/backend/python/stream.py#L267-L292
|
241,730
|
davidmiller/letter
|
letter/__main__.py
|
main
|
def main():
"""
Do the things!
Return: 0
Exceptions:
"""
description = 'Letter - a commandline interface'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--gmail', action='store_true', help='Send via Gmail', )
args = parser.parse_args()
to = raw_input('To address > ')
subject = raw_input('Subject > ')
body = raw_input('Your Message > ')
if args.gmail:
user = fromaddr = raw_input('Gmail Address > ')
pw = getpass.getpass()
postie = letter.GmailPostman(user=user, pw=pw)
else:
postie = letter.Postman() # Unauthorized SMTP, localhost:25
fromaddr = raw_input('From address > ')
class Message(letter.Letter):
Postie = postie
From = fromaddr
To = to
Subject = subject
Body = body
return 0
|
python
|
def main():
"""
Do the things!
Return: 0
Exceptions:
"""
description = 'Letter - a commandline interface'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--gmail', action='store_true', help='Send via Gmail', )
args = parser.parse_args()
to = raw_input('To address > ')
subject = raw_input('Subject > ')
body = raw_input('Your Message > ')
if args.gmail:
user = fromaddr = raw_input('Gmail Address > ')
pw = getpass.getpass()
postie = letter.GmailPostman(user=user, pw=pw)
else:
postie = letter.Postman() # Unauthorized SMTP, localhost:25
fromaddr = raw_input('From address > ')
class Message(letter.Letter):
Postie = postie
From = fromaddr
To = to
Subject = subject
Body = body
return 0
|
[
"def",
"main",
"(",
")",
":",
"description",
"=",
"'Letter - a commandline interface'",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"description",
")",
"parser",
".",
"add_argument",
"(",
"'--gmail'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Send via Gmail'",
",",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"to",
"=",
"raw_input",
"(",
"'To address > '",
")",
"subject",
"=",
"raw_input",
"(",
"'Subject > '",
")",
"body",
"=",
"raw_input",
"(",
"'Your Message > '",
")",
"if",
"args",
".",
"gmail",
":",
"user",
"=",
"fromaddr",
"=",
"raw_input",
"(",
"'Gmail Address > '",
")",
"pw",
"=",
"getpass",
".",
"getpass",
"(",
")",
"postie",
"=",
"letter",
".",
"GmailPostman",
"(",
"user",
"=",
"user",
",",
"pw",
"=",
"pw",
")",
"else",
":",
"postie",
"=",
"letter",
".",
"Postman",
"(",
")",
"# Unauthorized SMTP, localhost:25",
"fromaddr",
"=",
"raw_input",
"(",
"'From address > '",
")",
"class",
"Message",
"(",
"letter",
".",
"Letter",
")",
":",
"Postie",
"=",
"postie",
"From",
"=",
"fromaddr",
"To",
"=",
"to",
"Subject",
"=",
"subject",
"Body",
"=",
"body",
"return",
"0"
] |
Do the things!
Return: 0
Exceptions:
|
[
"Do",
"the",
"things!"
] |
c0c66ae2c6a792106e9a8374a01421817c8a8ae0
|
https://github.com/davidmiller/letter/blob/c0c66ae2c6a792106e9a8374a01421817c8a8ae0/letter/__main__.py#L10-L44
|
241,731
|
langloisjp/pysvclog
|
servicelog.py
|
UDPLogger.send
|
def send(self, jsonstr):
"""
Send jsonstr to the UDP collector
>>> logger = UDPLogger()
>>> logger.send('{"key": "value"}')
"""
udp_sock = socket(AF_INET, SOCK_DGRAM)
udp_sock.sendto(jsonstr.encode('utf-8'), self.addr)
|
python
|
def send(self, jsonstr):
"""
Send jsonstr to the UDP collector
>>> logger = UDPLogger()
>>> logger.send('{"key": "value"}')
"""
udp_sock = socket(AF_INET, SOCK_DGRAM)
udp_sock.sendto(jsonstr.encode('utf-8'), self.addr)
|
[
"def",
"send",
"(",
"self",
",",
"jsonstr",
")",
":",
"udp_sock",
"=",
"socket",
"(",
"AF_INET",
",",
"SOCK_DGRAM",
")",
"udp_sock",
".",
"sendto",
"(",
"jsonstr",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"self",
".",
"addr",
")"
] |
Send jsonstr to the UDP collector
>>> logger = UDPLogger()
>>> logger.send('{"key": "value"}')
|
[
"Send",
"jsonstr",
"to",
"the",
"UDP",
"collector"
] |
ab429bb12e13dca63ffce082e633d8879b6e3854
|
https://github.com/langloisjp/pysvclog/blob/ab429bb12e13dca63ffce082e633d8879b6e3854/servicelog.py#L66-L74
|
241,732
|
NegativeMjark/mockingmirror
|
setup.py
|
read_file
|
def read_file(path):
"""Read a UTF-8 file from the package. Takes a list of strings to join to
make the path"""
file_path = os.path.join(here, *path)
with open(file_path, encoding="utf-8") as f:
return f.read()
|
python
|
def read_file(path):
"""Read a UTF-8 file from the package. Takes a list of strings to join to
make the path"""
file_path = os.path.join(here, *path)
with open(file_path, encoding="utf-8") as f:
return f.read()
|
[
"def",
"read_file",
"(",
"path",
")",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"here",
",",
"*",
"path",
")",
"with",
"open",
"(",
"file_path",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")"
] |
Read a UTF-8 file from the package. Takes a list of strings to join to
make the path
|
[
"Read",
"a",
"UTF",
"-",
"8",
"file",
"from",
"the",
"package",
".",
"Takes",
"a",
"list",
"of",
"strings",
"to",
"join",
"to",
"make",
"the",
"path"
] |
75cf7d56ab18922394db89725ae9b37f1d4b3711
|
https://github.com/NegativeMjark/mockingmirror/blob/75cf7d56ab18922394db89725ae9b37f1d4b3711/setup.py#L8-L13
|
241,733
|
NegativeMjark/mockingmirror
|
setup.py
|
exec_file
|
def exec_file(path, name):
"""Extract a constant from a python file by looking for a line defining
the constant and executing it."""
result = {}
code = read_file(path)
lines = [line for line in code.split('\n') if line.startswith(name)]
exec("\n".join(lines), result)
return result[name]
|
python
|
def exec_file(path, name):
"""Extract a constant from a python file by looking for a line defining
the constant and executing it."""
result = {}
code = read_file(path)
lines = [line for line in code.split('\n') if line.startswith(name)]
exec("\n".join(lines), result)
return result[name]
|
[
"def",
"exec_file",
"(",
"path",
",",
"name",
")",
":",
"result",
"=",
"{",
"}",
"code",
"=",
"read_file",
"(",
"path",
")",
"lines",
"=",
"[",
"line",
"for",
"line",
"in",
"code",
".",
"split",
"(",
"'\\n'",
")",
"if",
"line",
".",
"startswith",
"(",
"name",
")",
"]",
"exec",
"(",
"\"\\n\"",
".",
"join",
"(",
"lines",
")",
",",
"result",
")",
"return",
"result",
"[",
"name",
"]"
] |
Extract a constant from a python file by looking for a line defining
the constant and executing it.
|
[
"Extract",
"a",
"constant",
"from",
"a",
"python",
"file",
"by",
"looking",
"for",
"a",
"line",
"defining",
"the",
"constant",
"and",
"executing",
"it",
"."
] |
75cf7d56ab18922394db89725ae9b37f1d4b3711
|
https://github.com/NegativeMjark/mockingmirror/blob/75cf7d56ab18922394db89725ae9b37f1d4b3711/setup.py#L16-L23
|
241,734
|
GreenBankObservatory/django-resetdb
|
django_resetdb/dbops.py
|
pg_dump
|
def pg_dump(db_name, backup_path):
"""Dump db_name to backup_path"""
logger.info("Dumping %s to %s", repr(db_name), repr(backup_path))
return shell(
'pg_dump "{db_name}" -U "{USER}" -h "{HOST}" '
"--schema=public --file={backup_path}".format(
db_name=db_name, backup_path=backup_path, **DB
)
)
|
python
|
def pg_dump(db_name, backup_path):
"""Dump db_name to backup_path"""
logger.info("Dumping %s to %s", repr(db_name), repr(backup_path))
return shell(
'pg_dump "{db_name}" -U "{USER}" -h "{HOST}" '
"--schema=public --file={backup_path}".format(
db_name=db_name, backup_path=backup_path, **DB
)
)
|
[
"def",
"pg_dump",
"(",
"db_name",
",",
"backup_path",
")",
":",
"logger",
".",
"info",
"(",
"\"Dumping %s to %s\"",
",",
"repr",
"(",
"db_name",
")",
",",
"repr",
"(",
"backup_path",
")",
")",
"return",
"shell",
"(",
"'pg_dump \"{db_name}\" -U \"{USER}\" -h \"{HOST}\" '",
"\"--schema=public --file={backup_path}\"",
".",
"format",
"(",
"db_name",
"=",
"db_name",
",",
"backup_path",
"=",
"backup_path",
",",
"*",
"*",
"DB",
")",
")"
] |
Dump db_name to backup_path
|
[
"Dump",
"db_name",
"to",
"backup_path"
] |
767bddacb53823bb003e2abebfe8139a14b843f7
|
https://github.com/GreenBankObservatory/django-resetdb/blob/767bddacb53823bb003e2abebfe8139a14b843f7/django_resetdb/dbops.py#L50-L59
|
241,735
|
mrstephenneal/dirutility
|
dirutility/backup.py
|
ZipBackup._resolve_file_name
|
def _resolve_file_name(source, destination):
"""Create a filename for the destination zip file."""
number = 1
if os.path.exists(os.path.join(destination, os.path.basename(source) + '.zip')):
while True:
zip_filename = os.path.join(destination, os.path.basename(source) + '_' + str(number) + '.zip')
if not os.path.exists(zip_filename):
break
number = number + 1
else:
zip_filename = os.path.join(destination, os.path.basename(source) + '.zip')
return zip_filename
|
python
|
def _resolve_file_name(source, destination):
"""Create a filename for the destination zip file."""
number = 1
if os.path.exists(os.path.join(destination, os.path.basename(source) + '.zip')):
while True:
zip_filename = os.path.join(destination, os.path.basename(source) + '_' + str(number) + '.zip')
if not os.path.exists(zip_filename):
break
number = number + 1
else:
zip_filename = os.path.join(destination, os.path.basename(source) + '.zip')
return zip_filename
|
[
"def",
"_resolve_file_name",
"(",
"source",
",",
"destination",
")",
":",
"number",
"=",
"1",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"destination",
",",
"os",
".",
"path",
".",
"basename",
"(",
"source",
")",
"+",
"'.zip'",
")",
")",
":",
"while",
"True",
":",
"zip_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destination",
",",
"os",
".",
"path",
".",
"basename",
"(",
"source",
")",
"+",
"'_'",
"+",
"str",
"(",
"number",
")",
"+",
"'.zip'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"zip_filename",
")",
":",
"break",
"number",
"=",
"number",
"+",
"1",
"else",
":",
"zip_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destination",
",",
"os",
".",
"path",
".",
"basename",
"(",
"source",
")",
"+",
"'.zip'",
")",
"return",
"zip_filename"
] |
Create a filename for the destination zip file.
|
[
"Create",
"a",
"filename",
"for",
"the",
"destination",
"zip",
"file",
"."
] |
339378659e2d7e09c53acfc51c5df745bb0cd517
|
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/backup.py#L38-L49
|
241,736
|
mrstephenneal/dirutility
|
dirutility/backup.py
|
ZipBackup._backup_compresslevel
|
def _backup_compresslevel(self, dirs):
"""Create a backup file with a compresslevel parameter."""
# Only supported in Python 3.7+
with ZipFile(self.zip_filename, 'w', compresslevel=self.compress_level) as backup_zip:
for path in tqdm(dirs, desc='Writing Zip Files', total=len(dirs)):
backup_zip.write(path, path[len(self.source):len(path)])
|
python
|
def _backup_compresslevel(self, dirs):
"""Create a backup file with a compresslevel parameter."""
# Only supported in Python 3.7+
with ZipFile(self.zip_filename, 'w', compresslevel=self.compress_level) as backup_zip:
for path in tqdm(dirs, desc='Writing Zip Files', total=len(dirs)):
backup_zip.write(path, path[len(self.source):len(path)])
|
[
"def",
"_backup_compresslevel",
"(",
"self",
",",
"dirs",
")",
":",
"# Only supported in Python 3.7+",
"with",
"ZipFile",
"(",
"self",
".",
"zip_filename",
",",
"'w'",
",",
"compresslevel",
"=",
"self",
".",
"compress_level",
")",
"as",
"backup_zip",
":",
"for",
"path",
"in",
"tqdm",
"(",
"dirs",
",",
"desc",
"=",
"'Writing Zip Files'",
",",
"total",
"=",
"len",
"(",
"dirs",
")",
")",
":",
"backup_zip",
".",
"write",
"(",
"path",
",",
"path",
"[",
"len",
"(",
"self",
".",
"source",
")",
":",
"len",
"(",
"path",
")",
"]",
")"
] |
Create a backup file with a compresslevel parameter.
|
[
"Create",
"a",
"backup",
"file",
"with",
"a",
"compresslevel",
"parameter",
"."
] |
339378659e2d7e09c53acfc51c5df745bb0cd517
|
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/backup.py#L72-L77
|
241,737
|
mrstephenneal/dirutility
|
dirutility/backup.py
|
ZipBackup._backup_pb_gui
|
def _backup_pb_gui(self, dirs):
"""Create a zip backup with a GUI progress bar."""
import PySimpleGUI as sg
# Legacy support
with ZipFile(self.zip_filename, 'w') as backup_zip:
for count, path in enumerate(dirs):
backup_zip.write(path, path[len(self.source):len(path)])
if not sg.OneLineProgressMeter('Writing Zip Files', count + 1, len(dirs) - 1, 'Files'):
break
|
python
|
def _backup_pb_gui(self, dirs):
"""Create a zip backup with a GUI progress bar."""
import PySimpleGUI as sg
# Legacy support
with ZipFile(self.zip_filename, 'w') as backup_zip:
for count, path in enumerate(dirs):
backup_zip.write(path, path[len(self.source):len(path)])
if not sg.OneLineProgressMeter('Writing Zip Files', count + 1, len(dirs) - 1, 'Files'):
break
|
[
"def",
"_backup_pb_gui",
"(",
"self",
",",
"dirs",
")",
":",
"import",
"PySimpleGUI",
"as",
"sg",
"# Legacy support",
"with",
"ZipFile",
"(",
"self",
".",
"zip_filename",
",",
"'w'",
")",
"as",
"backup_zip",
":",
"for",
"count",
",",
"path",
"in",
"enumerate",
"(",
"dirs",
")",
":",
"backup_zip",
".",
"write",
"(",
"path",
",",
"path",
"[",
"len",
"(",
"self",
".",
"source",
")",
":",
"len",
"(",
"path",
")",
"]",
")",
"if",
"not",
"sg",
".",
"OneLineProgressMeter",
"(",
"'Writing Zip Files'",
",",
"count",
"+",
"1",
",",
"len",
"(",
"dirs",
")",
"-",
"1",
",",
"'Files'",
")",
":",
"break"
] |
Create a zip backup with a GUI progress bar.
|
[
"Create",
"a",
"zip",
"backup",
"with",
"a",
"GUI",
"progress",
"bar",
"."
] |
339378659e2d7e09c53acfc51c5df745bb0cd517
|
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/backup.py#L79-L87
|
241,738
|
mrstephenneal/dirutility
|
dirutility/backup.py
|
ZipBackup._backup_pb_tqdm
|
def _backup_pb_tqdm(self, dirs):
"""Create a backup with a tqdm progress bar."""
with ZipFile(self.zip_filename, 'w') as backup_zip:
for path in tqdm(dirs, desc='Writing Zip Files', total=len(dirs)):
backup_zip.write(path, path[len(self.source):len(path)])
|
python
|
def _backup_pb_tqdm(self, dirs):
"""Create a backup with a tqdm progress bar."""
with ZipFile(self.zip_filename, 'w') as backup_zip:
for path in tqdm(dirs, desc='Writing Zip Files', total=len(dirs)):
backup_zip.write(path, path[len(self.source):len(path)])
|
[
"def",
"_backup_pb_tqdm",
"(",
"self",
",",
"dirs",
")",
":",
"with",
"ZipFile",
"(",
"self",
".",
"zip_filename",
",",
"'w'",
")",
"as",
"backup_zip",
":",
"for",
"path",
"in",
"tqdm",
"(",
"dirs",
",",
"desc",
"=",
"'Writing Zip Files'",
",",
"total",
"=",
"len",
"(",
"dirs",
")",
")",
":",
"backup_zip",
".",
"write",
"(",
"path",
",",
"path",
"[",
"len",
"(",
"self",
".",
"source",
")",
":",
"len",
"(",
"path",
")",
"]",
")"
] |
Create a backup with a tqdm progress bar.
|
[
"Create",
"a",
"backup",
"with",
"a",
"tqdm",
"progress",
"bar",
"."
] |
339378659e2d7e09c53acfc51c5df745bb0cd517
|
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/backup.py#L89-L93
|
241,739
|
mrstephenneal/dirutility
|
dirutility/backup.py
|
ZipBackup.backup
|
def backup(self, paths=None):
"""Backup method driver."""
if not paths:
paths = self._get_paths()
try:
self._backup_compresslevel(paths)
except TypeError:
try:
self._backup_pb_gui(paths)
except ImportError:
self._backup_pb_tqdm(paths)
# Delete source if specified
if self.delete_source:
shutil.rmtree(self.source)
return self.zip_filename
|
python
|
def backup(self, paths=None):
"""Backup method driver."""
if not paths:
paths = self._get_paths()
try:
self._backup_compresslevel(paths)
except TypeError:
try:
self._backup_pb_gui(paths)
except ImportError:
self._backup_pb_tqdm(paths)
# Delete source if specified
if self.delete_source:
shutil.rmtree(self.source)
return self.zip_filename
|
[
"def",
"backup",
"(",
"self",
",",
"paths",
"=",
"None",
")",
":",
"if",
"not",
"paths",
":",
"paths",
"=",
"self",
".",
"_get_paths",
"(",
")",
"try",
":",
"self",
".",
"_backup_compresslevel",
"(",
"paths",
")",
"except",
"TypeError",
":",
"try",
":",
"self",
".",
"_backup_pb_gui",
"(",
"paths",
")",
"except",
"ImportError",
":",
"self",
".",
"_backup_pb_tqdm",
"(",
"paths",
")",
"# Delete source if specified",
"if",
"self",
".",
"delete_source",
":",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"source",
")",
"return",
"self",
".",
"zip_filename"
] |
Backup method driver.
|
[
"Backup",
"method",
"driver",
"."
] |
339378659e2d7e09c53acfc51c5df745bb0cd517
|
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/backup.py#L95-L111
|
241,740
|
50onRed/smr
|
smr/uri.py
|
get_uris
|
def get_uris(config):
""" returns a tuple of total file size in bytes, and the list of files """
file_names = []
if config.INPUT_DATA is None:
sys.stderr.write("you need to provide INPUT_DATA in config\n")
sys.exit(1)
if isinstance(config.INPUT_DATA, basestring):
config.INPUT_DATA = [config.INPUT_DATA]
file_size = 0
for uri in config.INPUT_DATA:
for regex, uri_method, _, _ in URI_REGEXES:
m = regex.match(uri)
if m is not None:
file_size += uri_method(m, file_names, config)
break
print("going to process {} files...".format(len(file_names)))
return file_size, file_names
|
python
|
def get_uris(config):
""" returns a tuple of total file size in bytes, and the list of files """
file_names = []
if config.INPUT_DATA is None:
sys.stderr.write("you need to provide INPUT_DATA in config\n")
sys.exit(1)
if isinstance(config.INPUT_DATA, basestring):
config.INPUT_DATA = [config.INPUT_DATA]
file_size = 0
for uri in config.INPUT_DATA:
for regex, uri_method, _, _ in URI_REGEXES:
m = regex.match(uri)
if m is not None:
file_size += uri_method(m, file_names, config)
break
print("going to process {} files...".format(len(file_names)))
return file_size, file_names
|
[
"def",
"get_uris",
"(",
"config",
")",
":",
"file_names",
"=",
"[",
"]",
"if",
"config",
".",
"INPUT_DATA",
"is",
"None",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"you need to provide INPUT_DATA in config\\n\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"isinstance",
"(",
"config",
".",
"INPUT_DATA",
",",
"basestring",
")",
":",
"config",
".",
"INPUT_DATA",
"=",
"[",
"config",
".",
"INPUT_DATA",
"]",
"file_size",
"=",
"0",
"for",
"uri",
"in",
"config",
".",
"INPUT_DATA",
":",
"for",
"regex",
",",
"uri_method",
",",
"_",
",",
"_",
"in",
"URI_REGEXES",
":",
"m",
"=",
"regex",
".",
"match",
"(",
"uri",
")",
"if",
"m",
"is",
"not",
"None",
":",
"file_size",
"+=",
"uri_method",
"(",
"m",
",",
"file_names",
",",
"config",
")",
"break",
"print",
"(",
"\"going to process {} files...\"",
".",
"format",
"(",
"len",
"(",
"file_names",
")",
")",
")",
"return",
"file_size",
",",
"file_names"
] |
returns a tuple of total file size in bytes, and the list of files
|
[
"returns",
"a",
"tuple",
"of",
"total",
"file",
"size",
"in",
"bytes",
"and",
"the",
"list",
"of",
"files"
] |
999b33d86b6a900d7c4aadf03cf4a661acba9f1b
|
https://github.com/50onRed/smr/blob/999b33d86b6a900d7c4aadf03cf4a661acba9f1b/smr/uri.py#L84-L100
|
241,741
|
b3j0f/schema
|
b3j0f/schema/lang/python.py
|
buildschema
|
def buildschema(_cls=None, **kwargs):
"""Class decorator used to build a schema from the decorate class.
:param type _cls: class to decorate.
:param kwargs: schema attributes to set.
:rtype: type
:return: schema class.
"""
if _cls is None:
return lambda _cls: buildschema(_cls=_cls, **kwargs)
result = build(_cls, **kwargs)
return result
|
python
|
def buildschema(_cls=None, **kwargs):
"""Class decorator used to build a schema from the decorate class.
:param type _cls: class to decorate.
:param kwargs: schema attributes to set.
:rtype: type
:return: schema class.
"""
if _cls is None:
return lambda _cls: buildschema(_cls=_cls, **kwargs)
result = build(_cls, **kwargs)
return result
|
[
"def",
"buildschema",
"(",
"_cls",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"_cls",
"is",
"None",
":",
"return",
"lambda",
"_cls",
":",
"buildschema",
"(",
"_cls",
"=",
"_cls",
",",
"*",
"*",
"kwargs",
")",
"result",
"=",
"build",
"(",
"_cls",
",",
"*",
"*",
"kwargs",
")",
"return",
"result"
] |
Class decorator used to build a schema from the decorate class.
:param type _cls: class to decorate.
:param kwargs: schema attributes to set.
:rtype: type
:return: schema class.
|
[
"Class",
"decorator",
"used",
"to",
"build",
"a",
"schema",
"from",
"the",
"decorate",
"class",
"."
] |
1c88c23337f5fef50254e65bd407112c43396dd9
|
https://github.com/b3j0f/schema/blob/1c88c23337f5fef50254e65bd407112c43396dd9/b3j0f/schema/lang/python.py#L108-L121
|
241,742
|
b3j0f/schema
|
b3j0f/schema/lang/python.py
|
funcschema
|
def funcschema(default=None, *args, **kwargs):
"""Decorator to use in order to transform a function into a schema."""
if default is None:
return lambda default: funcschema(default=default, *args, **kwargs)
return FunctionSchema(default=default, *args, **kwargs)
|
python
|
def funcschema(default=None, *args, **kwargs):
"""Decorator to use in order to transform a function into a schema."""
if default is None:
return lambda default: funcschema(default=default, *args, **kwargs)
return FunctionSchema(default=default, *args, **kwargs)
|
[
"def",
"funcschema",
"(",
"default",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"default",
"is",
"None",
":",
"return",
"lambda",
"default",
":",
"funcschema",
"(",
"default",
"=",
"default",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"FunctionSchema",
"(",
"default",
"=",
"default",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Decorator to use in order to transform a function into a schema.
|
[
"Decorator",
"to",
"use",
"in",
"order",
"to",
"transform",
"a",
"function",
"into",
"a",
"schema",
"."
] |
1c88c23337f5fef50254e65bd407112c43396dd9
|
https://github.com/b3j0f/schema/blob/1c88c23337f5fef50254e65bd407112c43396dd9/b3j0f/schema/lang/python.py#L488-L493
|
241,743
|
hobson/pug-dj
|
pug/dj/crawler/models.py
|
import_wiki_json
|
def import_wiki_json(path='wikipedia_crawler_data.json', model=WikiItem, batch_len=100, db_alias='default', verbosity=2):
"""Read json file and create the appropriate records according to the given database model."""
return djdb.import_json(path=path, model=model, batch_len=batch_len, db_alias=db_alias, verbosity=verbosity)
|
python
|
def import_wiki_json(path='wikipedia_crawler_data.json', model=WikiItem, batch_len=100, db_alias='default', verbosity=2):
"""Read json file and create the appropriate records according to the given database model."""
return djdb.import_json(path=path, model=model, batch_len=batch_len, db_alias=db_alias, verbosity=verbosity)
|
[
"def",
"import_wiki_json",
"(",
"path",
"=",
"'wikipedia_crawler_data.json'",
",",
"model",
"=",
"WikiItem",
",",
"batch_len",
"=",
"100",
",",
"db_alias",
"=",
"'default'",
",",
"verbosity",
"=",
"2",
")",
":",
"return",
"djdb",
".",
"import_json",
"(",
"path",
"=",
"path",
",",
"model",
"=",
"model",
",",
"batch_len",
"=",
"batch_len",
",",
"db_alias",
"=",
"db_alias",
",",
"verbosity",
"=",
"verbosity",
")"
] |
Read json file and create the appropriate records according to the given database model.
|
[
"Read",
"json",
"file",
"and",
"create",
"the",
"appropriate",
"records",
"according",
"to",
"the",
"given",
"database",
"model",
"."
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/crawler/models.py#L81-L83
|
241,744
|
hobson/pug-dj
|
pug/dj/crawler/models.py
|
WikiItem.import_item
|
def import_item(self, item, crawler='wiki', truncate_strings=True, verbosity=0):
"""Import a single record from a Scrapy Item dict
>> WikiItem().import_item({'url': 'http://test.com', 'modified': '13 January 2014 00:15', 'crawler': 'more than thirty characters in this silly name'}) # doctest: +ELLIPSIS
<WikiItem: WikiItem('more than thirty characters in', u'http://test.com', '', datetime.datetime(2014, 1, 13, 0, 15), '')>
"""
item = dict(item)
self.crawler = str(crawler)
for k, v in self._item_mapping.iteritems():
if verbosity > 2:
print('%r: %r' % (k, v))
value = item.get(k, v['default'])
if value is None:
continue
try:
value = v['type'](value)
except:
pass
field = self.__class__._meta.get_field_by_name(v['name'])[0]
if isinstance(value, basestring):
max_length = getattr(field, 'max_length', None)
if max_length and len(value) > max_length:
if truncate_strings:
value = value[:max_length]
else:
raise RuntimeError('String loaded from json is length %s and destination field max_length is %s.' % (len(value), max_length))
if isinstance(field, (models.DateTimeField, models.DateField)):
value = util.clean_wiki_datetime(value)
setattr(self, v['name'], value)
return self
|
python
|
def import_item(self, item, crawler='wiki', truncate_strings=True, verbosity=0):
"""Import a single record from a Scrapy Item dict
>> WikiItem().import_item({'url': 'http://test.com', 'modified': '13 January 2014 00:15', 'crawler': 'more than thirty characters in this silly name'}) # doctest: +ELLIPSIS
<WikiItem: WikiItem('more than thirty characters in', u'http://test.com', '', datetime.datetime(2014, 1, 13, 0, 15), '')>
"""
item = dict(item)
self.crawler = str(crawler)
for k, v in self._item_mapping.iteritems():
if verbosity > 2:
print('%r: %r' % (k, v))
value = item.get(k, v['default'])
if value is None:
continue
try:
value = v['type'](value)
except:
pass
field = self.__class__._meta.get_field_by_name(v['name'])[0]
if isinstance(value, basestring):
max_length = getattr(field, 'max_length', None)
if max_length and len(value) > max_length:
if truncate_strings:
value = value[:max_length]
else:
raise RuntimeError('String loaded from json is length %s and destination field max_length is %s.' % (len(value), max_length))
if isinstance(field, (models.DateTimeField, models.DateField)):
value = util.clean_wiki_datetime(value)
setattr(self, v['name'], value)
return self
|
[
"def",
"import_item",
"(",
"self",
",",
"item",
",",
"crawler",
"=",
"'wiki'",
",",
"truncate_strings",
"=",
"True",
",",
"verbosity",
"=",
"0",
")",
":",
"item",
"=",
"dict",
"(",
"item",
")",
"self",
".",
"crawler",
"=",
"str",
"(",
"crawler",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_item_mapping",
".",
"iteritems",
"(",
")",
":",
"if",
"verbosity",
">",
"2",
":",
"print",
"(",
"'%r: %r'",
"%",
"(",
"k",
",",
"v",
")",
")",
"value",
"=",
"item",
".",
"get",
"(",
"k",
",",
"v",
"[",
"'default'",
"]",
")",
"if",
"value",
"is",
"None",
":",
"continue",
"try",
":",
"value",
"=",
"v",
"[",
"'type'",
"]",
"(",
"value",
")",
"except",
":",
"pass",
"field",
"=",
"self",
".",
"__class__",
".",
"_meta",
".",
"get_field_by_name",
"(",
"v",
"[",
"'name'",
"]",
")",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"value",
",",
"basestring",
")",
":",
"max_length",
"=",
"getattr",
"(",
"field",
",",
"'max_length'",
",",
"None",
")",
"if",
"max_length",
"and",
"len",
"(",
"value",
")",
">",
"max_length",
":",
"if",
"truncate_strings",
":",
"value",
"=",
"value",
"[",
":",
"max_length",
"]",
"else",
":",
"raise",
"RuntimeError",
"(",
"'String loaded from json is length %s and destination field max_length is %s.'",
"%",
"(",
"len",
"(",
"value",
")",
",",
"max_length",
")",
")",
"if",
"isinstance",
"(",
"field",
",",
"(",
"models",
".",
"DateTimeField",
",",
"models",
".",
"DateField",
")",
")",
":",
"value",
"=",
"util",
".",
"clean_wiki_datetime",
"(",
"value",
")",
"setattr",
"(",
"self",
",",
"v",
"[",
"'name'",
"]",
",",
"value",
")",
"return",
"self"
] |
Import a single record from a Scrapy Item dict
>> WikiItem().import_item({'url': 'http://test.com', 'modified': '13 January 2014 00:15', 'crawler': 'more than thirty characters in this silly name'}) # doctest: +ELLIPSIS
<WikiItem: WikiItem('more than thirty characters in', u'http://test.com', '', datetime.datetime(2014, 1, 13, 0, 15), '')>
|
[
"Import",
"a",
"single",
"record",
"from",
"a",
"Scrapy",
"Item",
"dict"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/crawler/models.py#L41-L71
|
241,745
|
ministryofjustice/django-form-error-reporting
|
form_error_reporting.py
|
OrderedQueryDict.urlencode
|
def urlencode(self):
"""
Convert dictionary into a query string; keys are
assumed to always be str
"""
output = ('%s=%s' % (k, quote(v)) for k, v in self.items())
return '&'.join(output)
|
python
|
def urlencode(self):
"""
Convert dictionary into a query string; keys are
assumed to always be str
"""
output = ('%s=%s' % (k, quote(v)) for k, v in self.items())
return '&'.join(output)
|
[
"def",
"urlencode",
"(",
"self",
")",
":",
"output",
"=",
"(",
"'%s=%s'",
"%",
"(",
"k",
",",
"quote",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
")",
"return",
"'&'",
".",
"join",
"(",
"output",
")"
] |
Convert dictionary into a query string; keys are
assumed to always be str
|
[
"Convert",
"dictionary",
"into",
"a",
"query",
"string",
";",
"keys",
"are",
"assumed",
"to",
"always",
"be",
"str"
] |
2d08dd5cc4321e1abf49241c515ccd7050d9f828
|
https://github.com/ministryofjustice/django-form-error-reporting/blob/2d08dd5cc4321e1abf49241c515ccd7050d9f828/form_error_reporting.py#L24-L30
|
241,746
|
ministryofjustice/django-form-error-reporting
|
form_error_reporting.py
|
GAErrorReportingMixin.is_valid
|
def is_valid(self):
"""
Error reporting is triggered when a form is checked for validity
"""
is_valid = super(GAErrorReportingMixin, self).is_valid()
if self.is_bound and not is_valid:
try:
self.report_errors_to_ga(self.errors)
except: # noqa: E722
logger.exception('Failed to report form errors to Google Analytics')
return is_valid
|
python
|
def is_valid(self):
"""
Error reporting is triggered when a form is checked for validity
"""
is_valid = super(GAErrorReportingMixin, self).is_valid()
if self.is_bound and not is_valid:
try:
self.report_errors_to_ga(self.errors)
except: # noqa: E722
logger.exception('Failed to report form errors to Google Analytics')
return is_valid
|
[
"def",
"is_valid",
"(",
"self",
")",
":",
"is_valid",
"=",
"super",
"(",
"GAErrorReportingMixin",
",",
"self",
")",
".",
"is_valid",
"(",
")",
"if",
"self",
".",
"is_bound",
"and",
"not",
"is_valid",
":",
"try",
":",
"self",
".",
"report_errors_to_ga",
"(",
"self",
".",
"errors",
")",
"except",
":",
"# noqa: E722",
"logger",
".",
"exception",
"(",
"'Failed to report form errors to Google Analytics'",
")",
"return",
"is_valid"
] |
Error reporting is triggered when a form is checked for validity
|
[
"Error",
"reporting",
"is",
"triggered",
"when",
"a",
"form",
"is",
"checked",
"for",
"validity"
] |
2d08dd5cc4321e1abf49241c515ccd7050d9f828
|
https://github.com/ministryofjustice/django-form-error-reporting/blob/2d08dd5cc4321e1abf49241c515ccd7050d9f828/form_error_reporting.py#L44-L54
|
241,747
|
ministryofjustice/django-form-error-reporting
|
form_error_reporting.py
|
GAErrorReportingMixin.get_ga_event_category
|
def get_ga_event_category(self):
"""
Event category, defaults to form class name
"""
return self.ga_event_category or '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
|
python
|
def get_ga_event_category(self):
"""
Event category, defaults to form class name
"""
return self.ga_event_category or '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
|
[
"def",
"get_ga_event_category",
"(",
"self",
")",
":",
"return",
"self",
".",
"ga_event_category",
"or",
"'%s.%s'",
"%",
"(",
"self",
".",
"__class__",
".",
"__module__",
",",
"self",
".",
"__class__",
".",
"__name__",
")"
] |
Event category, defaults to form class name
|
[
"Event",
"category",
"defaults",
"to",
"form",
"class",
"name"
] |
2d08dd5cc4321e1abf49241c515ccd7050d9f828
|
https://github.com/ministryofjustice/django-form-error-reporting/blob/2d08dd5cc4321e1abf49241c515ccd7050d9f828/form_error_reporting.py#L80-L84
|
241,748
|
ministryofjustice/django-form-error-reporting
|
form_error_reporting.py
|
GAErrorReportingMixin.format_ga_hit
|
def format_ga_hit(self, field_name, error_message):
"""
Format a single hit
"""
tracking_id = self.get_ga_tracking_id()
if not tracking_id:
warnings.warn('Google Analytics tracking ID is not set')
return None
query_dict = self.get_ga_query_dict()
query_dict['tid'] = tracking_id
query_dict['cid'] = self.get_ga_client_id()
query_dict['ec'] = self.get_ga_event_category()
query_dict['ea'] = field_name
query_dict['el'] = error_message
return query_dict.urlencode()
|
python
|
def format_ga_hit(self, field_name, error_message):
"""
Format a single hit
"""
tracking_id = self.get_ga_tracking_id()
if not tracking_id:
warnings.warn('Google Analytics tracking ID is not set')
return None
query_dict = self.get_ga_query_dict()
query_dict['tid'] = tracking_id
query_dict['cid'] = self.get_ga_client_id()
query_dict['ec'] = self.get_ga_event_category()
query_dict['ea'] = field_name
query_dict['el'] = error_message
return query_dict.urlencode()
|
[
"def",
"format_ga_hit",
"(",
"self",
",",
"field_name",
",",
"error_message",
")",
":",
"tracking_id",
"=",
"self",
".",
"get_ga_tracking_id",
"(",
")",
"if",
"not",
"tracking_id",
":",
"warnings",
".",
"warn",
"(",
"'Google Analytics tracking ID is not set'",
")",
"return",
"None",
"query_dict",
"=",
"self",
".",
"get_ga_query_dict",
"(",
")",
"query_dict",
"[",
"'tid'",
"]",
"=",
"tracking_id",
"query_dict",
"[",
"'cid'",
"]",
"=",
"self",
".",
"get_ga_client_id",
"(",
")",
"query_dict",
"[",
"'ec'",
"]",
"=",
"self",
".",
"get_ga_event_category",
"(",
")",
"query_dict",
"[",
"'ea'",
"]",
"=",
"field_name",
"query_dict",
"[",
"'el'",
"]",
"=",
"error_message",
"return",
"query_dict",
".",
"urlencode",
"(",
")"
] |
Format a single hit
|
[
"Format",
"a",
"single",
"hit"
] |
2d08dd5cc4321e1abf49241c515ccd7050d9f828
|
https://github.com/ministryofjustice/django-form-error-reporting/blob/2d08dd5cc4321e1abf49241c515ccd7050d9f828/form_error_reporting.py#L100-L114
|
241,749
|
ministryofjustice/django-form-error-reporting
|
form_error_reporting.py
|
GARequestErrorReportingMixin.get_ga_tracking_id
|
def get_ga_tracking_id(self):
"""
Retrieve tracking ID from settings
"""
if hasattr(settings, self.ga_tracking_id_settings_key):
return getattr(settings, self.ga_tracking_id_settings_key)
return super(GARequestErrorReportingMixin, self).get_ga_tracking_id()
|
python
|
def get_ga_tracking_id(self):
"""
Retrieve tracking ID from settings
"""
if hasattr(settings, self.ga_tracking_id_settings_key):
return getattr(settings, self.ga_tracking_id_settings_key)
return super(GARequestErrorReportingMixin, self).get_ga_tracking_id()
|
[
"def",
"get_ga_tracking_id",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"settings",
",",
"self",
".",
"ga_tracking_id_settings_key",
")",
":",
"return",
"getattr",
"(",
"settings",
",",
"self",
".",
"ga_tracking_id_settings_key",
")",
"return",
"super",
"(",
"GARequestErrorReportingMixin",
",",
"self",
")",
".",
"get_ga_tracking_id",
"(",
")"
] |
Retrieve tracking ID from settings
|
[
"Retrieve",
"tracking",
"ID",
"from",
"settings"
] |
2d08dd5cc4321e1abf49241c515ccd7050d9f828
|
https://github.com/ministryofjustice/django-form-error-reporting/blob/2d08dd5cc4321e1abf49241c515ccd7050d9f828/form_error_reporting.py#L150-L156
|
241,750
|
ministryofjustice/django-form-error-reporting
|
form_error_reporting.py
|
GARequestErrorReportingMixin.get_ga_client_id
|
def get_ga_client_id(self):
"""
Retrieve the client ID from the Google Analytics cookie, if available,
and save in the current session
"""
request = self.get_ga_request()
if not request or not hasattr(request, 'session'):
return super(GARequestErrorReportingMixin, self).get_ga_client_id()
if 'ga_client_id' not in request.session:
client_id = self.ga_cookie_re.match(request.COOKIES.get('_ga', ''))
client_id = client_id and client_id.group('cid') or str(uuid.uuid4())
request.session['ga_client_id'] = client_id
return request.session['ga_client_id']
|
python
|
def get_ga_client_id(self):
"""
Retrieve the client ID from the Google Analytics cookie, if available,
and save in the current session
"""
request = self.get_ga_request()
if not request or not hasattr(request, 'session'):
return super(GARequestErrorReportingMixin, self).get_ga_client_id()
if 'ga_client_id' not in request.session:
client_id = self.ga_cookie_re.match(request.COOKIES.get('_ga', ''))
client_id = client_id and client_id.group('cid') or str(uuid.uuid4())
request.session['ga_client_id'] = client_id
return request.session['ga_client_id']
|
[
"def",
"get_ga_client_id",
"(",
"self",
")",
":",
"request",
"=",
"self",
".",
"get_ga_request",
"(",
")",
"if",
"not",
"request",
"or",
"not",
"hasattr",
"(",
"request",
",",
"'session'",
")",
":",
"return",
"super",
"(",
"GARequestErrorReportingMixin",
",",
"self",
")",
".",
"get_ga_client_id",
"(",
")",
"if",
"'ga_client_id'",
"not",
"in",
"request",
".",
"session",
":",
"client_id",
"=",
"self",
".",
"ga_cookie_re",
".",
"match",
"(",
"request",
".",
"COOKIES",
".",
"get",
"(",
"'_ga'",
",",
"''",
")",
")",
"client_id",
"=",
"client_id",
"and",
"client_id",
".",
"group",
"(",
"'cid'",
")",
"or",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"request",
".",
"session",
"[",
"'ga_client_id'",
"]",
"=",
"client_id",
"return",
"request",
".",
"session",
"[",
"'ga_client_id'",
"]"
] |
Retrieve the client ID from the Google Analytics cookie, if available,
and save in the current session
|
[
"Retrieve",
"the",
"client",
"ID",
"from",
"the",
"Google",
"Analytics",
"cookie",
"if",
"available",
"and",
"save",
"in",
"the",
"current",
"session"
] |
2d08dd5cc4321e1abf49241c515ccd7050d9f828
|
https://github.com/ministryofjustice/django-form-error-reporting/blob/2d08dd5cc4321e1abf49241c515ccd7050d9f828/form_error_reporting.py#L165-L177
|
241,751
|
ministryofjustice/django-form-error-reporting
|
form_error_reporting.py
|
GARequestErrorReportingMixin.get_ga_query_dict
|
def get_ga_query_dict(self):
"""
Adds user agent and IP to the default hit parameters
"""
query_dict = super(GARequestErrorReportingMixin, self).get_ga_query_dict()
request = self.get_ga_request()
if not request:
return query_dict
user_ip = request.META.get('HTTP_X_FORWARDED_FOR', request.META.get('REMOTE_ADDR', ''))
user_ip = user_ip.split(',')[0].strip()
user_agent = request.META.get('HTTP_USER_AGENT')
user_language = request.META.get('HTTP_ACCEPT_LANGUAGE')
if user_ip:
query_dict['uip'] = user_ip
if user_agent:
query_dict['ua'] = user_agent
if user_language:
query_dict['ul'] = user_language
return query_dict
|
python
|
def get_ga_query_dict(self):
"""
Adds user agent and IP to the default hit parameters
"""
query_dict = super(GARequestErrorReportingMixin, self).get_ga_query_dict()
request = self.get_ga_request()
if not request:
return query_dict
user_ip = request.META.get('HTTP_X_FORWARDED_FOR', request.META.get('REMOTE_ADDR', ''))
user_ip = user_ip.split(',')[0].strip()
user_agent = request.META.get('HTTP_USER_AGENT')
user_language = request.META.get('HTTP_ACCEPT_LANGUAGE')
if user_ip:
query_dict['uip'] = user_ip
if user_agent:
query_dict['ua'] = user_agent
if user_language:
query_dict['ul'] = user_language
return query_dict
|
[
"def",
"get_ga_query_dict",
"(",
"self",
")",
":",
"query_dict",
"=",
"super",
"(",
"GARequestErrorReportingMixin",
",",
"self",
")",
".",
"get_ga_query_dict",
"(",
")",
"request",
"=",
"self",
".",
"get_ga_request",
"(",
")",
"if",
"not",
"request",
":",
"return",
"query_dict",
"user_ip",
"=",
"request",
".",
"META",
".",
"get",
"(",
"'HTTP_X_FORWARDED_FOR'",
",",
"request",
".",
"META",
".",
"get",
"(",
"'REMOTE_ADDR'",
",",
"''",
")",
")",
"user_ip",
"=",
"user_ip",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"user_agent",
"=",
"request",
".",
"META",
".",
"get",
"(",
"'HTTP_USER_AGENT'",
")",
"user_language",
"=",
"request",
".",
"META",
".",
"get",
"(",
"'HTTP_ACCEPT_LANGUAGE'",
")",
"if",
"user_ip",
":",
"query_dict",
"[",
"'uip'",
"]",
"=",
"user_ip",
"if",
"user_agent",
":",
"query_dict",
"[",
"'ua'",
"]",
"=",
"user_agent",
"if",
"user_language",
":",
"query_dict",
"[",
"'ul'",
"]",
"=",
"user_language",
"return",
"query_dict"
] |
Adds user agent and IP to the default hit parameters
|
[
"Adds",
"user",
"agent",
"and",
"IP",
"to",
"the",
"default",
"hit",
"parameters"
] |
2d08dd5cc4321e1abf49241c515ccd7050d9f828
|
https://github.com/ministryofjustice/django-form-error-reporting/blob/2d08dd5cc4321e1abf49241c515ccd7050d9f828/form_error_reporting.py#L179-L197
|
241,752
|
datakortet/dkfileutils
|
tasks.py
|
build_js
|
def build_js(ctx, force=False):
"""Build all javascript files.
"""
for fname in JSX_FILENAMES:
jstools.babel(
ctx,
'{pkg.source_js}/' + fname,
'{pkg.django_static}/{pkg.name}/js/' + fname + '.js',
force=force
)
|
python
|
def build_js(ctx, force=False):
"""Build all javascript files.
"""
for fname in JSX_FILENAMES:
jstools.babel(
ctx,
'{pkg.source_js}/' + fname,
'{pkg.django_static}/{pkg.name}/js/' + fname + '.js',
force=force
)
|
[
"def",
"build_js",
"(",
"ctx",
",",
"force",
"=",
"False",
")",
":",
"for",
"fname",
"in",
"JSX_FILENAMES",
":",
"jstools",
".",
"babel",
"(",
"ctx",
",",
"'{pkg.source_js}/'",
"+",
"fname",
",",
"'{pkg.django_static}/{pkg.name}/js/'",
"+",
"fname",
"+",
"'.js'",
",",
"force",
"=",
"force",
")"
] |
Build all javascript files.
|
[
"Build",
"all",
"javascript",
"files",
"."
] |
924098d6e2edf88ad9b3ffdec9c74530f80a7d77
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/tasks.py#L79-L88
|
241,753
|
datakortet/dkfileutils
|
tasks.py
|
build
|
def build(ctx, less=False, docs=False, js=False, force=False):
"""Build everything and collectstatic.
"""
specified = any([less, docs, js])
buildall = not specified
if buildall or less:
less_fname = ctx.pkg.source_less / ctx.pkg.name + '.less'
if less_fname.exists():
lessc.LessRule(
ctx,
src='{pkg.source_less}/{pkg.name}.less',
dst='{pkg.django_static}/{pkg.name}/css/{pkg.name}-{version}.min.css',
force=force
)
elif less:
print("WARNING: build --less specified, but no file at:", less_fname)
if buildall or docs:
if WARN_ABOUT_SETTINGS:
warnings.warn(
"autodoc might need a dummy settings file in the root of "
"your package. Since it runs in a separate process you cannot"
"use settings.configure()"
)
doctools.build(ctx, force=force)
if buildall or js:
build_js(ctx, force)
if HAVE_SETTINGS and (force or changed(ctx.pkg.django_static)):
collectstatic(ctx, DJANGO_SETTINGS_MODULE)
|
python
|
def build(ctx, less=False, docs=False, js=False, force=False):
"""Build everything and collectstatic.
"""
specified = any([less, docs, js])
buildall = not specified
if buildall or less:
less_fname = ctx.pkg.source_less / ctx.pkg.name + '.less'
if less_fname.exists():
lessc.LessRule(
ctx,
src='{pkg.source_less}/{pkg.name}.less',
dst='{pkg.django_static}/{pkg.name}/css/{pkg.name}-{version}.min.css',
force=force
)
elif less:
print("WARNING: build --less specified, but no file at:", less_fname)
if buildall or docs:
if WARN_ABOUT_SETTINGS:
warnings.warn(
"autodoc might need a dummy settings file in the root of "
"your package. Since it runs in a separate process you cannot"
"use settings.configure()"
)
doctools.build(ctx, force=force)
if buildall or js:
build_js(ctx, force)
if HAVE_SETTINGS and (force or changed(ctx.pkg.django_static)):
collectstatic(ctx, DJANGO_SETTINGS_MODULE)
|
[
"def",
"build",
"(",
"ctx",
",",
"less",
"=",
"False",
",",
"docs",
"=",
"False",
",",
"js",
"=",
"False",
",",
"force",
"=",
"False",
")",
":",
"specified",
"=",
"any",
"(",
"[",
"less",
",",
"docs",
",",
"js",
"]",
")",
"buildall",
"=",
"not",
"specified",
"if",
"buildall",
"or",
"less",
":",
"less_fname",
"=",
"ctx",
".",
"pkg",
".",
"source_less",
"/",
"ctx",
".",
"pkg",
".",
"name",
"+",
"'.less'",
"if",
"less_fname",
".",
"exists",
"(",
")",
":",
"lessc",
".",
"LessRule",
"(",
"ctx",
",",
"src",
"=",
"'{pkg.source_less}/{pkg.name}.less'",
",",
"dst",
"=",
"'{pkg.django_static}/{pkg.name}/css/{pkg.name}-{version}.min.css'",
",",
"force",
"=",
"force",
")",
"elif",
"less",
":",
"print",
"(",
"\"WARNING: build --less specified, but no file at:\"",
",",
"less_fname",
")",
"if",
"buildall",
"or",
"docs",
":",
"if",
"WARN_ABOUT_SETTINGS",
":",
"warnings",
".",
"warn",
"(",
"\"autodoc might need a dummy settings file in the root of \"",
"\"your package. Since it runs in a separate process you cannot\"",
"\"use settings.configure()\"",
")",
"doctools",
".",
"build",
"(",
"ctx",
",",
"force",
"=",
"force",
")",
"if",
"buildall",
"or",
"js",
":",
"build_js",
"(",
"ctx",
",",
"force",
")",
"if",
"HAVE_SETTINGS",
"and",
"(",
"force",
"or",
"changed",
"(",
"ctx",
".",
"pkg",
".",
"django_static",
")",
")",
":",
"collectstatic",
"(",
"ctx",
",",
"DJANGO_SETTINGS_MODULE",
")"
] |
Build everything and collectstatic.
|
[
"Build",
"everything",
"and",
"collectstatic",
"."
] |
924098d6e2edf88ad9b3ffdec9c74530f80a7d77
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/tasks.py#L92-L123
|
241,754
|
datakortet/dkfileutils
|
tasks.py
|
watch
|
def watch(ctx):
"""Automatically run build whenever a relevant file changes.
"""
watcher = Watcher(ctx)
watcher.watch_directory(
path='{pkg.source_less}', ext='.less',
action=lambda e: build(ctx, less=True)
)
watcher.watch_directory(
path='{pkg.source_js}', ext='.jsx',
action=lambda e: build(ctx, js=True)
)
watcher.watch_directory(
path='{pkg.docs}', ext='.rst',
action=lambda e: build(ctx, docs=True)
)
watcher.start()
|
python
|
def watch(ctx):
"""Automatically run build whenever a relevant file changes.
"""
watcher = Watcher(ctx)
watcher.watch_directory(
path='{pkg.source_less}', ext='.less',
action=lambda e: build(ctx, less=True)
)
watcher.watch_directory(
path='{pkg.source_js}', ext='.jsx',
action=lambda e: build(ctx, js=True)
)
watcher.watch_directory(
path='{pkg.docs}', ext='.rst',
action=lambda e: build(ctx, docs=True)
)
watcher.start()
|
[
"def",
"watch",
"(",
"ctx",
")",
":",
"watcher",
"=",
"Watcher",
"(",
"ctx",
")",
"watcher",
".",
"watch_directory",
"(",
"path",
"=",
"'{pkg.source_less}'",
",",
"ext",
"=",
"'.less'",
",",
"action",
"=",
"lambda",
"e",
":",
"build",
"(",
"ctx",
",",
"less",
"=",
"True",
")",
")",
"watcher",
".",
"watch_directory",
"(",
"path",
"=",
"'{pkg.source_js}'",
",",
"ext",
"=",
"'.jsx'",
",",
"action",
"=",
"lambda",
"e",
":",
"build",
"(",
"ctx",
",",
"js",
"=",
"True",
")",
")",
"watcher",
".",
"watch_directory",
"(",
"path",
"=",
"'{pkg.docs}'",
",",
"ext",
"=",
"'.rst'",
",",
"action",
"=",
"lambda",
"e",
":",
"build",
"(",
"ctx",
",",
"docs",
"=",
"True",
")",
")",
"watcher",
".",
"start",
"(",
")"
] |
Automatically run build whenever a relevant file changes.
|
[
"Automatically",
"run",
"build",
"whenever",
"a",
"relevant",
"file",
"changes",
"."
] |
924098d6e2edf88ad9b3ffdec9c74530f80a7d77
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/tasks.py#L127-L143
|
241,755
|
Metatab/tableintuit
|
tableintuit/stats.py
|
_force_float
|
def _force_float(v):
""" Converts given argument to float. On fail logs warning and returns 0.0.
Args:
v (any): value to convert to float
Returns:
float: converted v or 0.0 if conversion failed.
"""
try:
return float(v)
except Exception as exc:
return float('nan')
logger.warning('Failed to convert {} to float with {} error. Using 0 instead.'.format(v, exc))
|
python
|
def _force_float(v):
""" Converts given argument to float. On fail logs warning and returns 0.0.
Args:
v (any): value to convert to float
Returns:
float: converted v or 0.0 if conversion failed.
"""
try:
return float(v)
except Exception as exc:
return float('nan')
logger.warning('Failed to convert {} to float with {} error. Using 0 instead.'.format(v, exc))
|
[
"def",
"_force_float",
"(",
"v",
")",
":",
"try",
":",
"return",
"float",
"(",
"v",
")",
"except",
"Exception",
"as",
"exc",
":",
"return",
"float",
"(",
"'nan'",
")",
"logger",
".",
"warning",
"(",
"'Failed to convert {} to float with {} error. Using 0 instead.'",
".",
"format",
"(",
"v",
",",
"exc",
")",
")"
] |
Converts given argument to float. On fail logs warning and returns 0.0.
Args:
v (any): value to convert to float
Returns:
float: converted v or 0.0 if conversion failed.
|
[
"Converts",
"given",
"argument",
"to",
"float",
".",
"On",
"fail",
"logs",
"warning",
"and",
"returns",
"0",
".",
"0",
"."
] |
9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c
|
https://github.com/Metatab/tableintuit/blob/9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c/tableintuit/stats.py#L509-L523
|
241,756
|
Metatab/tableintuit
|
tableintuit/stats.py
|
StatSet.dict
|
def dict(self):
"""Return a dict that can be passed into the ColumnStats constructor"""
try:
skewness = self.skewness
kurtosis = self.kurtosis
except ZeroDivisionError:
skewness = kurtosis = float('nan')
base_cols = [
('name', self.column_name),
('flags', self.flags),
('type', self.type.__name__ ),
('lom', self.lom),
('count', self.n),
('nuniques', self.nuniques),
('width', self.size),
]
descriptive_cols = [
('mean', self.mean),
('std', self.stddev),
('min', self.min),
('p25', self.p25),
('p50', self.p50),
('p75', self.p75),
('max', self.max)
]
distribution_cols = [
('skewness', skewness),
('kurtosis', kurtosis),
('hist', self.bins),
('text_hist', text_hist(self.bins)),
]
sample_values_cols = [
('uvalues', self.uvalues)
]
return OrderedDict(
base_cols +
(descriptive_cols if self.descriptive else []) +
(distribution_cols if self.distribution else []) +
(sample_values_cols if self.sample_values else [])
)
|
python
|
def dict(self):
"""Return a dict that can be passed into the ColumnStats constructor"""
try:
skewness = self.skewness
kurtosis = self.kurtosis
except ZeroDivisionError:
skewness = kurtosis = float('nan')
base_cols = [
('name', self.column_name),
('flags', self.flags),
('type', self.type.__name__ ),
('lom', self.lom),
('count', self.n),
('nuniques', self.nuniques),
('width', self.size),
]
descriptive_cols = [
('mean', self.mean),
('std', self.stddev),
('min', self.min),
('p25', self.p25),
('p50', self.p50),
('p75', self.p75),
('max', self.max)
]
distribution_cols = [
('skewness', skewness),
('kurtosis', kurtosis),
('hist', self.bins),
('text_hist', text_hist(self.bins)),
]
sample_values_cols = [
('uvalues', self.uvalues)
]
return OrderedDict(
base_cols +
(descriptive_cols if self.descriptive else []) +
(distribution_cols if self.distribution else []) +
(sample_values_cols if self.sample_values else [])
)
|
[
"def",
"dict",
"(",
"self",
")",
":",
"try",
":",
"skewness",
"=",
"self",
".",
"skewness",
"kurtosis",
"=",
"self",
".",
"kurtosis",
"except",
"ZeroDivisionError",
":",
"skewness",
"=",
"kurtosis",
"=",
"float",
"(",
"'nan'",
")",
"base_cols",
"=",
"[",
"(",
"'name'",
",",
"self",
".",
"column_name",
")",
",",
"(",
"'flags'",
",",
"self",
".",
"flags",
")",
",",
"(",
"'type'",
",",
"self",
".",
"type",
".",
"__name__",
")",
",",
"(",
"'lom'",
",",
"self",
".",
"lom",
")",
",",
"(",
"'count'",
",",
"self",
".",
"n",
")",
",",
"(",
"'nuniques'",
",",
"self",
".",
"nuniques",
")",
",",
"(",
"'width'",
",",
"self",
".",
"size",
")",
",",
"]",
"descriptive_cols",
"=",
"[",
"(",
"'mean'",
",",
"self",
".",
"mean",
")",
",",
"(",
"'std'",
",",
"self",
".",
"stddev",
")",
",",
"(",
"'min'",
",",
"self",
".",
"min",
")",
",",
"(",
"'p25'",
",",
"self",
".",
"p25",
")",
",",
"(",
"'p50'",
",",
"self",
".",
"p50",
")",
",",
"(",
"'p75'",
",",
"self",
".",
"p75",
")",
",",
"(",
"'max'",
",",
"self",
".",
"max",
")",
"]",
"distribution_cols",
"=",
"[",
"(",
"'skewness'",
",",
"skewness",
")",
",",
"(",
"'kurtosis'",
",",
"kurtosis",
")",
",",
"(",
"'hist'",
",",
"self",
".",
"bins",
")",
",",
"(",
"'text_hist'",
",",
"text_hist",
"(",
"self",
".",
"bins",
")",
")",
",",
"]",
"sample_values_cols",
"=",
"[",
"(",
"'uvalues'",
",",
"self",
".",
"uvalues",
")",
"]",
"return",
"OrderedDict",
"(",
"base_cols",
"+",
"(",
"descriptive_cols",
"if",
"self",
".",
"descriptive",
"else",
"[",
"]",
")",
"+",
"(",
"distribution_cols",
"if",
"self",
".",
"distribution",
"else",
"[",
"]",
")",
"+",
"(",
"sample_values_cols",
"if",
"self",
".",
"sample_values",
"else",
"[",
"]",
")",
")"
] |
Return a dict that can be passed into the ColumnStats constructor
|
[
"Return",
"a",
"dict",
"that",
"can",
"be",
"passed",
"into",
"the",
"ColumnStats",
"constructor"
] |
9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c
|
https://github.com/Metatab/tableintuit/blob/9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c/tableintuit/stats.py#L306-L351
|
241,757
|
Metatab/tableintuit
|
tableintuit/stats.py
|
Stats.run
|
def run(self):
""" Run the stats. The source must yield Row proxies.
"""
self._func, self._func_code = self.build()
def process_row(row):
try:
self._func(self._stats, row)
except TypeError as e:
raise TypeError("Failed for '{}'; {}".format(self._func_code, e))
except KeyError:
raise KeyError(
'Failed to find key in row. headers = "{}", code = "{}" '
.format(list(row.keys()), self._func_code))
except Exception as e:
raise type(e)(
'General exception in stats. headers = "{}", code = "{}": {} '
.format(list(row.keys()), self._func_code, e))
# Use all of the rows in the source
if self._sample_size is None:
for i, row in enumerate(self._source):
process_row(row)
# Use a sample of rows, evenly distributed though the source
else:
skip_rate = self._sample_size / self._n_rows
i = 0
skip = skip_rate
for j, row in enumerate(self._source):
skip += skip_rate
if skip >= 1:
skip -= 1
i += 1
process_row(row)
if i < 5000: # Since the hist bins aren't built until 5K row
for k, v in self._stats.items():
v._build_hist_bins()
return self
|
python
|
def run(self):
""" Run the stats. The source must yield Row proxies.
"""
self._func, self._func_code = self.build()
def process_row(row):
try:
self._func(self._stats, row)
except TypeError as e:
raise TypeError("Failed for '{}'; {}".format(self._func_code, e))
except KeyError:
raise KeyError(
'Failed to find key in row. headers = "{}", code = "{}" '
.format(list(row.keys()), self._func_code))
except Exception as e:
raise type(e)(
'General exception in stats. headers = "{}", code = "{}": {} '
.format(list(row.keys()), self._func_code, e))
# Use all of the rows in the source
if self._sample_size is None:
for i, row in enumerate(self._source):
process_row(row)
# Use a sample of rows, evenly distributed though the source
else:
skip_rate = self._sample_size / self._n_rows
i = 0
skip = skip_rate
for j, row in enumerate(self._source):
skip += skip_rate
if skip >= 1:
skip -= 1
i += 1
process_row(row)
if i < 5000: # Since the hist bins aren't built until 5K row
for k, v in self._stats.items():
v._build_hist_bins()
return self
|
[
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"_func",
",",
"self",
".",
"_func_code",
"=",
"self",
".",
"build",
"(",
")",
"def",
"process_row",
"(",
"row",
")",
":",
"try",
":",
"self",
".",
"_func",
"(",
"self",
".",
"_stats",
",",
"row",
")",
"except",
"TypeError",
"as",
"e",
":",
"raise",
"TypeError",
"(",
"\"Failed for '{}'; {}\"",
".",
"format",
"(",
"self",
".",
"_func_code",
",",
"e",
")",
")",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"'Failed to find key in row. headers = \"{}\", code = \"{}\" '",
".",
"format",
"(",
"list",
"(",
"row",
".",
"keys",
"(",
")",
")",
",",
"self",
".",
"_func_code",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"type",
"(",
"e",
")",
"(",
"'General exception in stats. headers = \"{}\", code = \"{}\": {} '",
".",
"format",
"(",
"list",
"(",
"row",
".",
"keys",
"(",
")",
")",
",",
"self",
".",
"_func_code",
",",
"e",
")",
")",
"# Use all of the rows in the source",
"if",
"self",
".",
"_sample_size",
"is",
"None",
":",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"self",
".",
"_source",
")",
":",
"process_row",
"(",
"row",
")",
"# Use a sample of rows, evenly distributed though the source",
"else",
":",
"skip_rate",
"=",
"self",
".",
"_sample_size",
"/",
"self",
".",
"_n_rows",
"i",
"=",
"0",
"skip",
"=",
"skip_rate",
"for",
"j",
",",
"row",
"in",
"enumerate",
"(",
"self",
".",
"_source",
")",
":",
"skip",
"+=",
"skip_rate",
"if",
"skip",
">=",
"1",
":",
"skip",
"-=",
"1",
"i",
"+=",
"1",
"process_row",
"(",
"row",
")",
"if",
"i",
"<",
"5000",
":",
"# Since the hist bins aren't built until 5K row",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_stats",
".",
"items",
"(",
")",
":",
"v",
".",
"_build_hist_bins",
"(",
")",
"return",
"self"
] |
Run the stats. The source must yield Row proxies.
|
[
"Run",
"the",
"stats",
".",
"The",
"source",
"must",
"yield",
"Row",
"proxies",
"."
] |
9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c
|
https://github.com/Metatab/tableintuit/blob/9a3d500d5d90e44e6637dd17ca4c8dae474d6d4c/tableintuit/stats.py#L427-L470
|
241,758
|
openp2pdesign/makerlabs
|
makerlabs/hackaday_io.py
|
get_labs
|
def get_labs(format):
"""Gets Hackerspaces data from hackaday.io."""
hackerspaces_json = data_from_hackaday_io(hackaday_io_labs_map_url)
hackerspaces = {}
# Load all the Hackerspaces
for i in hackerspaces_json:
current_lab = Hackerspace()
current_lab.id = i["id"]
current_lab.url = "https://hackaday.io/hackerspace/" + current_lab.id
current_lab.name = i["name"]
if len(i["description"]) != 0:
current_lab.description = i["description"]
elif len(i["summary"]) != 0:
current_lab.description = i["summary"]
current_lab.created_at = i["moments"]["exact"]
# Check if there are coordinates
if i["latlon"] is not None:
latlon = json.loads(i["latlon"])
current_lab.latitude = latlon["lat"]
current_lab.longitude = latlon["lng"]
# Get country, county and city from them
country = geolocator.reverse(
[latlon["lat"], latlon["lng"]])
current_lab.country = country.raw[
"address"]["country"]
current_lab.address = country.raw["display_name"]
current_lab.address_1 = country.raw["display_name"]
current_lab.country_code = country.raw[
"address"]["country_code"]
current_lab.county = country.raw[
"address"]["state_district"]
current_lab.city = country.raw[
"address"]["city"]
current_lab.postal_code = country.raw[
"address"]["postcode"]
else:
# For labs without a location or coordinates
# add 0,0 as coordinates
current_lab.latitude = 0.0
current_lab.longitude = 0.0
# Add the lab
hackerspaces[i["name"]] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in hackerspaces:
output[j] = hackerspaces[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in hackerspaces:
single = hackerspaces[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in hackerspaces:
output[j] = hackerspaces[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = hackerspaces
# Default: return an oject
else:
output = hackerspaces
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
|
python
|
def get_labs(format):
"""Gets Hackerspaces data from hackaday.io."""
hackerspaces_json = data_from_hackaday_io(hackaday_io_labs_map_url)
hackerspaces = {}
# Load all the Hackerspaces
for i in hackerspaces_json:
current_lab = Hackerspace()
current_lab.id = i["id"]
current_lab.url = "https://hackaday.io/hackerspace/" + current_lab.id
current_lab.name = i["name"]
if len(i["description"]) != 0:
current_lab.description = i["description"]
elif len(i["summary"]) != 0:
current_lab.description = i["summary"]
current_lab.created_at = i["moments"]["exact"]
# Check if there are coordinates
if i["latlon"] is not None:
latlon = json.loads(i["latlon"])
current_lab.latitude = latlon["lat"]
current_lab.longitude = latlon["lng"]
# Get country, county and city from them
country = geolocator.reverse(
[latlon["lat"], latlon["lng"]])
current_lab.country = country.raw[
"address"]["country"]
current_lab.address = country.raw["display_name"]
current_lab.address_1 = country.raw["display_name"]
current_lab.country_code = country.raw[
"address"]["country_code"]
current_lab.county = country.raw[
"address"]["state_district"]
current_lab.city = country.raw[
"address"]["city"]
current_lab.postal_code = country.raw[
"address"]["postcode"]
else:
# For labs without a location or coordinates
# add 0,0 as coordinates
current_lab.latitude = 0.0
current_lab.longitude = 0.0
# Add the lab
hackerspaces[i["name"]] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in hackerspaces:
output[j] = hackerspaces[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in hackerspaces:
single = hackerspaces[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in hackerspaces:
output[j] = hackerspaces[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = hackerspaces
# Default: return an oject
else:
output = hackerspaces
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
|
[
"def",
"get_labs",
"(",
"format",
")",
":",
"hackerspaces_json",
"=",
"data_from_hackaday_io",
"(",
"hackaday_io_labs_map_url",
")",
"hackerspaces",
"=",
"{",
"}",
"# Load all the Hackerspaces",
"for",
"i",
"in",
"hackerspaces_json",
":",
"current_lab",
"=",
"Hackerspace",
"(",
")",
"current_lab",
".",
"id",
"=",
"i",
"[",
"\"id\"",
"]",
"current_lab",
".",
"url",
"=",
"\"https://hackaday.io/hackerspace/\"",
"+",
"current_lab",
".",
"id",
"current_lab",
".",
"name",
"=",
"i",
"[",
"\"name\"",
"]",
"if",
"len",
"(",
"i",
"[",
"\"description\"",
"]",
")",
"!=",
"0",
":",
"current_lab",
".",
"description",
"=",
"i",
"[",
"\"description\"",
"]",
"elif",
"len",
"(",
"i",
"[",
"\"summary\"",
"]",
")",
"!=",
"0",
":",
"current_lab",
".",
"description",
"=",
"i",
"[",
"\"summary\"",
"]",
"current_lab",
".",
"created_at",
"=",
"i",
"[",
"\"moments\"",
"]",
"[",
"\"exact\"",
"]",
"# Check if there are coordinates",
"if",
"i",
"[",
"\"latlon\"",
"]",
"is",
"not",
"None",
":",
"latlon",
"=",
"json",
".",
"loads",
"(",
"i",
"[",
"\"latlon\"",
"]",
")",
"current_lab",
".",
"latitude",
"=",
"latlon",
"[",
"\"lat\"",
"]",
"current_lab",
".",
"longitude",
"=",
"latlon",
"[",
"\"lng\"",
"]",
"# Get country, county and city from them",
"country",
"=",
"geolocator",
".",
"reverse",
"(",
"[",
"latlon",
"[",
"\"lat\"",
"]",
",",
"latlon",
"[",
"\"lng\"",
"]",
"]",
")",
"current_lab",
".",
"country",
"=",
"country",
".",
"raw",
"[",
"\"address\"",
"]",
"[",
"\"country\"",
"]",
"current_lab",
".",
"address",
"=",
"country",
".",
"raw",
"[",
"\"display_name\"",
"]",
"current_lab",
".",
"address_1",
"=",
"country",
".",
"raw",
"[",
"\"display_name\"",
"]",
"current_lab",
".",
"country_code",
"=",
"country",
".",
"raw",
"[",
"\"address\"",
"]",
"[",
"\"country_code\"",
"]",
"current_lab",
".",
"county",
"=",
"country",
".",
"raw",
"[",
"\"address\"",
"]",
"[",
"\"state_district\"",
"]",
"current_lab",
".",
"city",
"=",
"country",
".",
"raw",
"[",
"\"address\"",
"]",
"[",
"\"city\"",
"]",
"current_lab",
".",
"postal_code",
"=",
"country",
".",
"raw",
"[",
"\"address\"",
"]",
"[",
"\"postcode\"",
"]",
"else",
":",
"# For labs without a location or coordinates",
"# add 0,0 as coordinates",
"current_lab",
".",
"latitude",
"=",
"0.0",
"current_lab",
".",
"longitude",
"=",
"0.0",
"# Add the lab",
"hackerspaces",
"[",
"i",
"[",
"\"name\"",
"]",
"]",
"=",
"current_lab",
"# Return a dictiornary / json",
"if",
"format",
".",
"lower",
"(",
")",
"==",
"\"dict\"",
"or",
"format",
".",
"lower",
"(",
")",
"==",
"\"json\"",
":",
"output",
"=",
"{",
"}",
"for",
"j",
"in",
"hackerspaces",
":",
"output",
"[",
"j",
"]",
"=",
"hackerspaces",
"[",
"j",
"]",
".",
"__dict__",
"# Return a geojson",
"elif",
"format",
".",
"lower",
"(",
")",
"==",
"\"geojson\"",
"or",
"format",
".",
"lower",
"(",
")",
"==",
"\"geo\"",
":",
"labs_list",
"=",
"[",
"]",
"for",
"l",
"in",
"hackerspaces",
":",
"single",
"=",
"hackerspaces",
"[",
"l",
"]",
".",
"__dict__",
"single_lab",
"=",
"Feature",
"(",
"type",
"=",
"\"Feature\"",
",",
"geometry",
"=",
"Point",
"(",
"(",
"single",
"[",
"\"latitude\"",
"]",
",",
"single",
"[",
"\"longitude\"",
"]",
")",
")",
",",
"properties",
"=",
"single",
")",
"labs_list",
".",
"append",
"(",
"single_lab",
")",
"output",
"=",
"dumps",
"(",
"FeatureCollection",
"(",
"labs_list",
")",
")",
"# Return a Pandas DataFrame",
"elif",
"format",
".",
"lower",
"(",
")",
"==",
"\"pandas\"",
"or",
"format",
".",
"lower",
"(",
")",
"==",
"\"dataframe\"",
":",
"output",
"=",
"{",
"}",
"for",
"j",
"in",
"hackerspaces",
":",
"output",
"[",
"j",
"]",
"=",
"hackerspaces",
"[",
"j",
"]",
".",
"__dict__",
"# Transform the dict into a Pandas DataFrame",
"output",
"=",
"pd",
".",
"DataFrame",
".",
"from_dict",
"(",
"output",
")",
"output",
"=",
"output",
".",
"transpose",
"(",
")",
"# Return an object",
"elif",
"format",
".",
"lower",
"(",
")",
"==",
"\"object\"",
"or",
"format",
".",
"lower",
"(",
")",
"==",
"\"obj\"",
":",
"output",
"=",
"hackerspaces",
"# Default: return an oject",
"else",
":",
"output",
"=",
"hackerspaces",
"# Return a proper json",
"if",
"format",
".",
"lower",
"(",
")",
"==",
"\"json\"",
":",
"output",
"=",
"json",
".",
"dumps",
"(",
"output",
")",
"return",
"output"
] |
Gets Hackerspaces data from hackaday.io.
|
[
"Gets",
"Hackerspaces",
"data",
"from",
"hackaday",
".",
"io",
"."
] |
b5838440174f10d370abb671358db9a99d7739fd
|
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/hackaday_io.py#L57-L137
|
241,759
|
shreyaspotnis/rampage
|
rampage/server.py
|
check_ramp_for_errors
|
def check_ramp_for_errors(ramp_data):
"""Checks ramp for errors. This is experiment specific checklist."""
error_list = []
keyframe_list = ramps.KeyFrameList(ramp_data['keyframes'])
sorted_key_list = keyframe_list.sorted_key_list()
channel_list = [ramps.Channel(ch_name, ramp_data['channels'][ch_name],
keyframe_list)
for ch_name in ramp_data['channels']]
sorted_absolute_times = [keyframe_list.get_absolute_time(sk) for sk
in sorted_key_list]
ramp_properties = ramp_data['properties']
jump_resolution = ramp_properties['jump_resolution']
for key_name, abs_time in zip(sorted_key_list, sorted_absolute_times):
# check if all times are +ve
if abs_time < 0.0:
error_fmt = "keyframe \'{0}\' has negative absolute time {1}"
error_str = error_fmt.format(key_name, abs_time)
error_list.append(error_str)
# check if all times are a multiple of minimum resolution
steps_float = abs_time / jump_resolution
steps_residue = steps_float - round(steps_float)
if steps_residue > 0.0001:
error_fmt = ("keyframe \'{0}\' has absolute time {1} which is not"
" a multiple of jump_resolution {2}")
error_str = error_fmt.format(key_name, abs_time, jump_resolution)
error_list.append(error_str)
# find missing channels
ch_ids = digital_channel_ids()
ch_ids += dev2_analog_ids()
# ignore p31, since we used that for Dev1 timing
for ch_id in ch_ids:
n_found = 0
for ch in channel_list:
if ch.dct['id'] == ch_id:
n_found += 1
if n_found != 1:
error_fmt = '{0} copies of {1} found. There should only be 1'
error_str = error_fmt.format(n_found, ch_id)
error_list.append(error_str)
# check for timing overlap in keyframelist
error_keyname = keyframe_list.do_keyframes_overlap()
if error_keyname is not None:
error_fmt = '{0} overlaps with the next keyframe'
error_str = error_fmt.format(error_keyname)
error_list.append(error_str)
return error_list
|
python
|
def check_ramp_for_errors(ramp_data):
"""Checks ramp for errors. This is experiment specific checklist."""
error_list = []
keyframe_list = ramps.KeyFrameList(ramp_data['keyframes'])
sorted_key_list = keyframe_list.sorted_key_list()
channel_list = [ramps.Channel(ch_name, ramp_data['channels'][ch_name],
keyframe_list)
for ch_name in ramp_data['channels']]
sorted_absolute_times = [keyframe_list.get_absolute_time(sk) for sk
in sorted_key_list]
ramp_properties = ramp_data['properties']
jump_resolution = ramp_properties['jump_resolution']
for key_name, abs_time in zip(sorted_key_list, sorted_absolute_times):
# check if all times are +ve
if abs_time < 0.0:
error_fmt = "keyframe \'{0}\' has negative absolute time {1}"
error_str = error_fmt.format(key_name, abs_time)
error_list.append(error_str)
# check if all times are a multiple of minimum resolution
steps_float = abs_time / jump_resolution
steps_residue = steps_float - round(steps_float)
if steps_residue > 0.0001:
error_fmt = ("keyframe \'{0}\' has absolute time {1} which is not"
" a multiple of jump_resolution {2}")
error_str = error_fmt.format(key_name, abs_time, jump_resolution)
error_list.append(error_str)
# find missing channels
ch_ids = digital_channel_ids()
ch_ids += dev2_analog_ids()
# ignore p31, since we used that for Dev1 timing
for ch_id in ch_ids:
n_found = 0
for ch in channel_list:
if ch.dct['id'] == ch_id:
n_found += 1
if n_found != 1:
error_fmt = '{0} copies of {1} found. There should only be 1'
error_str = error_fmt.format(n_found, ch_id)
error_list.append(error_str)
# check for timing overlap in keyframelist
error_keyname = keyframe_list.do_keyframes_overlap()
if error_keyname is not None:
error_fmt = '{0} overlaps with the next keyframe'
error_str = error_fmt.format(error_keyname)
error_list.append(error_str)
return error_list
|
[
"def",
"check_ramp_for_errors",
"(",
"ramp_data",
")",
":",
"error_list",
"=",
"[",
"]",
"keyframe_list",
"=",
"ramps",
".",
"KeyFrameList",
"(",
"ramp_data",
"[",
"'keyframes'",
"]",
")",
"sorted_key_list",
"=",
"keyframe_list",
".",
"sorted_key_list",
"(",
")",
"channel_list",
"=",
"[",
"ramps",
".",
"Channel",
"(",
"ch_name",
",",
"ramp_data",
"[",
"'channels'",
"]",
"[",
"ch_name",
"]",
",",
"keyframe_list",
")",
"for",
"ch_name",
"in",
"ramp_data",
"[",
"'channels'",
"]",
"]",
"sorted_absolute_times",
"=",
"[",
"keyframe_list",
".",
"get_absolute_time",
"(",
"sk",
")",
"for",
"sk",
"in",
"sorted_key_list",
"]",
"ramp_properties",
"=",
"ramp_data",
"[",
"'properties'",
"]",
"jump_resolution",
"=",
"ramp_properties",
"[",
"'jump_resolution'",
"]",
"for",
"key_name",
",",
"abs_time",
"in",
"zip",
"(",
"sorted_key_list",
",",
"sorted_absolute_times",
")",
":",
"# check if all times are +ve",
"if",
"abs_time",
"<",
"0.0",
":",
"error_fmt",
"=",
"\"keyframe \\'{0}\\' has negative absolute time {1}\"",
"error_str",
"=",
"error_fmt",
".",
"format",
"(",
"key_name",
",",
"abs_time",
")",
"error_list",
".",
"append",
"(",
"error_str",
")",
"# check if all times are a multiple of minimum resolution",
"steps_float",
"=",
"abs_time",
"/",
"jump_resolution",
"steps_residue",
"=",
"steps_float",
"-",
"round",
"(",
"steps_float",
")",
"if",
"steps_residue",
">",
"0.0001",
":",
"error_fmt",
"=",
"(",
"\"keyframe \\'{0}\\' has absolute time {1} which is not\"",
"\" a multiple of jump_resolution {2}\"",
")",
"error_str",
"=",
"error_fmt",
".",
"format",
"(",
"key_name",
",",
"abs_time",
",",
"jump_resolution",
")",
"error_list",
".",
"append",
"(",
"error_str",
")",
"# find missing channels",
"ch_ids",
"=",
"digital_channel_ids",
"(",
")",
"ch_ids",
"+=",
"dev2_analog_ids",
"(",
")",
"# ignore p31, since we used that for Dev1 timing",
"for",
"ch_id",
"in",
"ch_ids",
":",
"n_found",
"=",
"0",
"for",
"ch",
"in",
"channel_list",
":",
"if",
"ch",
".",
"dct",
"[",
"'id'",
"]",
"==",
"ch_id",
":",
"n_found",
"+=",
"1",
"if",
"n_found",
"!=",
"1",
":",
"error_fmt",
"=",
"'{0} copies of {1} found. There should only be 1'",
"error_str",
"=",
"error_fmt",
".",
"format",
"(",
"n_found",
",",
"ch_id",
")",
"error_list",
".",
"append",
"(",
"error_str",
")",
"# check for timing overlap in keyframelist",
"error_keyname",
"=",
"keyframe_list",
".",
"do_keyframes_overlap",
"(",
")",
"if",
"error_keyname",
"is",
"not",
"None",
":",
"error_fmt",
"=",
"'{0} overlaps with the next keyframe'",
"error_str",
"=",
"error_fmt",
".",
"format",
"(",
"error_keyname",
")",
"error_list",
".",
"append",
"(",
"error_str",
")",
"return",
"error_list"
] |
Checks ramp for errors. This is experiment specific checklist.
|
[
"Checks",
"ramp",
"for",
"errors",
".",
"This",
"is",
"experiment",
"specific",
"checklist",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/server.py#L713-L763
|
241,760
|
JNRowe/jnrbase
|
jnrbase/colourise.py
|
_colourise
|
def _colourise(text: str, colour: str) -> str:
"""Colour text, if possible.
Args:
text: Text to colourise
colour: Colour to display text in
Returns:
Colourised text, if possible
"""
if COLOUR:
text = style(text, fg=colour, bold=True)
return text
|
python
|
def _colourise(text: str, colour: str) -> str:
"""Colour text, if possible.
Args:
text: Text to colourise
colour: Colour to display text in
Returns:
Colourised text, if possible
"""
if COLOUR:
text = style(text, fg=colour, bold=True)
return text
|
[
"def",
"_colourise",
"(",
"text",
":",
"str",
",",
"colour",
":",
"str",
")",
"->",
"str",
":",
"if",
"COLOUR",
":",
"text",
"=",
"style",
"(",
"text",
",",
"fg",
"=",
"colour",
",",
"bold",
"=",
"True",
")",
"return",
"text"
] |
Colour text, if possible.
Args:
text: Text to colourise
colour: Colour to display text in
Returns:
Colourised text, if possible
|
[
"Colour",
"text",
"if",
"possible",
"."
] |
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
|
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/colourise.py#L31-L42
|
241,761
|
HazardDede/dictmentor
|
dictmentor/extensions.py
|
ExternalResource._apply
|
def _apply(self, ctx: ExtensionContext) -> AugmentedDict:
"""
Performs the actual loading of an external resource into the current model.
Args:
ctx: The processing context.
Returns:
Returns a dictionary that gets incorporated into the actual model.
"""
def process(pattern: Pattern[str], _str: str) -> Any:
_match = pattern.match(_str)
if _match is None:
return _str # pragma: no cover
# We got a match
# Group 0: Whole match; Group 1: Our placeholder;
# Group 2: file path to external resource
placeholder, external_path = _match.group(1), _match.group(2)
with open(self.locator(
external_path,
cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None
)) as fhandle:
# Json does not support line breaks. We will have to mask them
content = fhandle.read()
return _str.replace(placeholder, content)
node_key, node_value = ctx.node
_pattern = re.compile(self.__pattern__)
return {node_key: process(_pattern, node_value)}
|
python
|
def _apply(self, ctx: ExtensionContext) -> AugmentedDict:
"""
Performs the actual loading of an external resource into the current model.
Args:
ctx: The processing context.
Returns:
Returns a dictionary that gets incorporated into the actual model.
"""
def process(pattern: Pattern[str], _str: str) -> Any:
_match = pattern.match(_str)
if _match is None:
return _str # pragma: no cover
# We got a match
# Group 0: Whole match; Group 1: Our placeholder;
# Group 2: file path to external resource
placeholder, external_path = _match.group(1), _match.group(2)
with open(self.locator(
external_path,
cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None
)) as fhandle:
# Json does not support line breaks. We will have to mask them
content = fhandle.read()
return _str.replace(placeholder, content)
node_key, node_value = ctx.node
_pattern = re.compile(self.__pattern__)
return {node_key: process(_pattern, node_value)}
|
[
"def",
"_apply",
"(",
"self",
",",
"ctx",
":",
"ExtensionContext",
")",
"->",
"AugmentedDict",
":",
"def",
"process",
"(",
"pattern",
":",
"Pattern",
"[",
"str",
"]",
",",
"_str",
":",
"str",
")",
"->",
"Any",
":",
"_match",
"=",
"pattern",
".",
"match",
"(",
"_str",
")",
"if",
"_match",
"is",
"None",
":",
"return",
"_str",
"# pragma: no cover",
"# We got a match",
"# Group 0: Whole match; Group 1: Our placeholder;",
"# Group 2: file path to external resource",
"placeholder",
",",
"external_path",
"=",
"_match",
".",
"group",
"(",
"1",
")",
",",
"_match",
".",
"group",
"(",
"2",
")",
"with",
"open",
"(",
"self",
".",
"locator",
"(",
"external_path",
",",
"cast",
"(",
"str",
",",
"ctx",
".",
"document",
")",
"if",
"Validator",
".",
"is_file",
"(",
"document",
"=",
"ctx",
".",
"document",
")",
"else",
"None",
")",
")",
"as",
"fhandle",
":",
"# Json does not support line breaks. We will have to mask them",
"content",
"=",
"fhandle",
".",
"read",
"(",
")",
"return",
"_str",
".",
"replace",
"(",
"placeholder",
",",
"content",
")",
"node_key",
",",
"node_value",
"=",
"ctx",
".",
"node",
"_pattern",
"=",
"re",
".",
"compile",
"(",
"self",
".",
"__pattern__",
")",
"return",
"{",
"node_key",
":",
"process",
"(",
"_pattern",
",",
"node_value",
")",
"}"
] |
Performs the actual loading of an external resource into the current model.
Args:
ctx: The processing context.
Returns:
Returns a dictionary that gets incorporated into the actual model.
|
[
"Performs",
"the",
"actual",
"loading",
"of",
"an",
"external",
"resource",
"into",
"the",
"current",
"model",
"."
] |
f50ca26ed04f7a924cde6e4d464c4f6ccba4e320
|
https://github.com/HazardDede/dictmentor/blob/f50ca26ed04f7a924cde6e4d464c4f6ccba4e320/dictmentor/extensions.py#L144-L172
|
241,762
|
HazardDede/dictmentor
|
dictmentor/extensions.py
|
ExternalYamlResource._apply
|
def _apply(self, ctx: ExtensionContext) -> Any:
"""
Loads a yaml fragment from an external file.
Args:
ctx: The processing context.
Returns:
The external resource as a python dictionary. The fragment is already send through
the processor as well.
"""
_, external_path = ctx.node
return ctx.mentor.load_yaml(self.locator(
external_path,
cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None
))
|
python
|
def _apply(self, ctx: ExtensionContext) -> Any:
"""
Loads a yaml fragment from an external file.
Args:
ctx: The processing context.
Returns:
The external resource as a python dictionary. The fragment is already send through
the processor as well.
"""
_, external_path = ctx.node
return ctx.mentor.load_yaml(self.locator(
external_path,
cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None
))
|
[
"def",
"_apply",
"(",
"self",
",",
"ctx",
":",
"ExtensionContext",
")",
"->",
"Any",
":",
"_",
",",
"external_path",
"=",
"ctx",
".",
"node",
"return",
"ctx",
".",
"mentor",
".",
"load_yaml",
"(",
"self",
".",
"locator",
"(",
"external_path",
",",
"cast",
"(",
"str",
",",
"ctx",
".",
"document",
")",
"if",
"Validator",
".",
"is_file",
"(",
"document",
"=",
"ctx",
".",
"document",
")",
"else",
"None",
")",
")"
] |
Loads a yaml fragment from an external file.
Args:
ctx: The processing context.
Returns:
The external resource as a python dictionary. The fragment is already send through
the processor as well.
|
[
"Loads",
"a",
"yaml",
"fragment",
"from",
"an",
"external",
"file",
"."
] |
f50ca26ed04f7a924cde6e4d464c4f6ccba4e320
|
https://github.com/HazardDede/dictmentor/blob/f50ca26ed04f7a924cde6e4d464c4f6ccba4e320/dictmentor/extensions.py#L227-L242
|
241,763
|
OpenGov/carpenter
|
carpenter/blocks/tableanalyzer.py
|
TableAnalyzer.preprocess
|
def preprocess(self):
'''
Performs initial cell conversions to standard types. This will strip units, scale numbers,
and identify numeric data where it's convertible.
'''
self.processed_tables = []
self.flags_by_table = []
self.units_by_table = []
for worksheet, rtable in enumerate(self.raw_tables):
ptable, flags, units = self.preprocess_worksheet(rtable, worksheet)
self.processed_tables.append(ptable)
self.flags_by_table.append(flags)
self.units_by_table.append(units)
return self.processed_tables
|
python
|
def preprocess(self):
'''
Performs initial cell conversions to standard types. This will strip units, scale numbers,
and identify numeric data where it's convertible.
'''
self.processed_tables = []
self.flags_by_table = []
self.units_by_table = []
for worksheet, rtable in enumerate(self.raw_tables):
ptable, flags, units = self.preprocess_worksheet(rtable, worksheet)
self.processed_tables.append(ptable)
self.flags_by_table.append(flags)
self.units_by_table.append(units)
return self.processed_tables
|
[
"def",
"preprocess",
"(",
"self",
")",
":",
"self",
".",
"processed_tables",
"=",
"[",
"]",
"self",
".",
"flags_by_table",
"=",
"[",
"]",
"self",
".",
"units_by_table",
"=",
"[",
"]",
"for",
"worksheet",
",",
"rtable",
"in",
"enumerate",
"(",
"self",
".",
"raw_tables",
")",
":",
"ptable",
",",
"flags",
",",
"units",
"=",
"self",
".",
"preprocess_worksheet",
"(",
"rtable",
",",
"worksheet",
")",
"self",
".",
"processed_tables",
".",
"append",
"(",
"ptable",
")",
"self",
".",
"flags_by_table",
".",
"append",
"(",
"flags",
")",
"self",
".",
"units_by_table",
".",
"append",
"(",
"units",
")",
"return",
"self",
".",
"processed_tables"
] |
Performs initial cell conversions to standard types. This will strip units, scale numbers,
and identify numeric data where it's convertible.
|
[
"Performs",
"initial",
"cell",
"conversions",
"to",
"standard",
"types",
".",
"This",
"will",
"strip",
"units",
"scale",
"numbers",
"and",
"identify",
"numeric",
"data",
"where",
"it",
"s",
"convertible",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/tableanalyzer.py#L45-L59
|
241,764
|
OpenGov/carpenter
|
carpenter/blocks/tableanalyzer.py
|
TableAnalyzer.generate_blocks
|
def generate_blocks(self, assume_complete_blocks=None):
'''
Identifies and extracts all blocks from the input tables. These blocks are logical
identifiers for where related information resides in the original table. Any block can be
converted into a row-titled table which can then be stitched together with other tables from
other blocks to form a fully converted data set.
Args:
assume_complete_blocks: Optimizes block loopups by not allowing titles to be extended.
Blocks should be perfectly dense to be found when active. Optional, defaults to
constructor value.
'''
# Store this value to restore object settings later
_track_assume_blocks = self.assume_complete_blocks
try:
if assume_complete_blocks != None:
self.assume_complete_blocks = assume_complete_blocks
if self.processed_tables == None:
self.preprocess()
self.processed_blocks = []
for worksheet in range(len(self.processed_tables)):
ptable = self.processed_tables[worksheet]
flags = self.flags_by_table[worksheet]
units = self.units_by_table[worksheet]
if not self.assume_complete_blocks:
self.fill_in_table(ptable, worksheet, flags)
self.processed_blocks.extend(self._find_blocks(ptable, worksheet, flags, units,
{ 'worksheet': worksheet }))
return self.processed_blocks
finally:
# After execution, reset assume_complete_blocks back
self.assume_complete_blocks = _track_assume_blocks
|
python
|
def generate_blocks(self, assume_complete_blocks=None):
'''
Identifies and extracts all blocks from the input tables. These blocks are logical
identifiers for where related information resides in the original table. Any block can be
converted into a row-titled table which can then be stitched together with other tables from
other blocks to form a fully converted data set.
Args:
assume_complete_blocks: Optimizes block loopups by not allowing titles to be extended.
Blocks should be perfectly dense to be found when active. Optional, defaults to
constructor value.
'''
# Store this value to restore object settings later
_track_assume_blocks = self.assume_complete_blocks
try:
if assume_complete_blocks != None:
self.assume_complete_blocks = assume_complete_blocks
if self.processed_tables == None:
self.preprocess()
self.processed_blocks = []
for worksheet in range(len(self.processed_tables)):
ptable = self.processed_tables[worksheet]
flags = self.flags_by_table[worksheet]
units = self.units_by_table[worksheet]
if not self.assume_complete_blocks:
self.fill_in_table(ptable, worksheet, flags)
self.processed_blocks.extend(self._find_blocks(ptable, worksheet, flags, units,
{ 'worksheet': worksheet }))
return self.processed_blocks
finally:
# After execution, reset assume_complete_blocks back
self.assume_complete_blocks = _track_assume_blocks
|
[
"def",
"generate_blocks",
"(",
"self",
",",
"assume_complete_blocks",
"=",
"None",
")",
":",
"# Store this value to restore object settings later",
"_track_assume_blocks",
"=",
"self",
".",
"assume_complete_blocks",
"try",
":",
"if",
"assume_complete_blocks",
"!=",
"None",
":",
"self",
".",
"assume_complete_blocks",
"=",
"assume_complete_blocks",
"if",
"self",
".",
"processed_tables",
"==",
"None",
":",
"self",
".",
"preprocess",
"(",
")",
"self",
".",
"processed_blocks",
"=",
"[",
"]",
"for",
"worksheet",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"processed_tables",
")",
")",
":",
"ptable",
"=",
"self",
".",
"processed_tables",
"[",
"worksheet",
"]",
"flags",
"=",
"self",
".",
"flags_by_table",
"[",
"worksheet",
"]",
"units",
"=",
"self",
".",
"units_by_table",
"[",
"worksheet",
"]",
"if",
"not",
"self",
".",
"assume_complete_blocks",
":",
"self",
".",
"fill_in_table",
"(",
"ptable",
",",
"worksheet",
",",
"flags",
")",
"self",
".",
"processed_blocks",
".",
"extend",
"(",
"self",
".",
"_find_blocks",
"(",
"ptable",
",",
"worksheet",
",",
"flags",
",",
"units",
",",
"{",
"'worksheet'",
":",
"worksheet",
"}",
")",
")",
"return",
"self",
".",
"processed_blocks",
"finally",
":",
"# After execution, reset assume_complete_blocks back",
"self",
".",
"assume_complete_blocks",
"=",
"_track_assume_blocks"
] |
Identifies and extracts all blocks from the input tables. These blocks are logical
identifiers for where related information resides in the original table. Any block can be
converted into a row-titled table which can then be stitched together with other tables from
other blocks to form a fully converted data set.
Args:
assume_complete_blocks: Optimizes block loopups by not allowing titles to be extended.
Blocks should be perfectly dense to be found when active. Optional, defaults to
constructor value.
|
[
"Identifies",
"and",
"extracts",
"all",
"blocks",
"from",
"the",
"input",
"tables",
".",
"These",
"blocks",
"are",
"logical",
"identifiers",
"for",
"where",
"related",
"information",
"resides",
"in",
"the",
"original",
"table",
".",
"Any",
"block",
"can",
"be",
"converted",
"into",
"a",
"row",
"-",
"titled",
"table",
"which",
"can",
"then",
"be",
"stitched",
"together",
"with",
"other",
"tables",
"from",
"other",
"blocks",
"to",
"form",
"a",
"fully",
"converted",
"data",
"set",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/tableanalyzer.py#L61-L96
|
241,765
|
OpenGov/carpenter
|
carpenter/blocks/tableanalyzer.py
|
TableAnalyzer.preprocess_worksheet
|
def preprocess_worksheet(self, table, worksheet):
'''
Performs a preprocess pass of the table to attempt naive conversions of data and to record
the initial types of each cell.
'''
table_conversion = []
flags = {}
units = {}
for rind, row in enumerate(table):
conversion_row = []
table_conversion.append(conversion_row)
if self.skippable_rows and worksheet in self.skippable_rows and rind in self.skippable_rows[worksheet]:
self.flag_change(flags, 'interpreted', (rind, None), worksheet, self.FLAGS['skipped-row'])
continue
for cind, cell in enumerate(row):
position = (rind, cind)
if self.skippable_columns and worksheet in self.skippable_columns and cind in self.skippable_columns[worksheet]:
conversion = None
self.flag_change(flags, 'interpreted', position, worksheet, self.FLAGS['skipped-column'])
else:
# Do the heavy lifting in pre_process_cell
conversion = auto_convert_cell(self, cell, position, worksheet, flags, units,
parens_as_neg=self.parens_as_neg)
conversion_row.append(conversion)
# Give back our conversions, type labeling, and conversion flags
return table_conversion, flags, units
|
python
|
def preprocess_worksheet(self, table, worksheet):
'''
Performs a preprocess pass of the table to attempt naive conversions of data and to record
the initial types of each cell.
'''
table_conversion = []
flags = {}
units = {}
for rind, row in enumerate(table):
conversion_row = []
table_conversion.append(conversion_row)
if self.skippable_rows and worksheet in self.skippable_rows and rind in self.skippable_rows[worksheet]:
self.flag_change(flags, 'interpreted', (rind, None), worksheet, self.FLAGS['skipped-row'])
continue
for cind, cell in enumerate(row):
position = (rind, cind)
if self.skippable_columns and worksheet in self.skippable_columns and cind in self.skippable_columns[worksheet]:
conversion = None
self.flag_change(flags, 'interpreted', position, worksheet, self.FLAGS['skipped-column'])
else:
# Do the heavy lifting in pre_process_cell
conversion = auto_convert_cell(self, cell, position, worksheet, flags, units,
parens_as_neg=self.parens_as_neg)
conversion_row.append(conversion)
# Give back our conversions, type labeling, and conversion flags
return table_conversion, flags, units
|
[
"def",
"preprocess_worksheet",
"(",
"self",
",",
"table",
",",
"worksheet",
")",
":",
"table_conversion",
"=",
"[",
"]",
"flags",
"=",
"{",
"}",
"units",
"=",
"{",
"}",
"for",
"rind",
",",
"row",
"in",
"enumerate",
"(",
"table",
")",
":",
"conversion_row",
"=",
"[",
"]",
"table_conversion",
".",
"append",
"(",
"conversion_row",
")",
"if",
"self",
".",
"skippable_rows",
"and",
"worksheet",
"in",
"self",
".",
"skippable_rows",
"and",
"rind",
"in",
"self",
".",
"skippable_rows",
"[",
"worksheet",
"]",
":",
"self",
".",
"flag_change",
"(",
"flags",
",",
"'interpreted'",
",",
"(",
"rind",
",",
"None",
")",
",",
"worksheet",
",",
"self",
".",
"FLAGS",
"[",
"'skipped-row'",
"]",
")",
"continue",
"for",
"cind",
",",
"cell",
"in",
"enumerate",
"(",
"row",
")",
":",
"position",
"=",
"(",
"rind",
",",
"cind",
")",
"if",
"self",
".",
"skippable_columns",
"and",
"worksheet",
"in",
"self",
".",
"skippable_columns",
"and",
"cind",
"in",
"self",
".",
"skippable_columns",
"[",
"worksheet",
"]",
":",
"conversion",
"=",
"None",
"self",
".",
"flag_change",
"(",
"flags",
",",
"'interpreted'",
",",
"position",
",",
"worksheet",
",",
"self",
".",
"FLAGS",
"[",
"'skipped-column'",
"]",
")",
"else",
":",
"# Do the heavy lifting in pre_process_cell",
"conversion",
"=",
"auto_convert_cell",
"(",
"self",
",",
"cell",
",",
"position",
",",
"worksheet",
",",
"flags",
",",
"units",
",",
"parens_as_neg",
"=",
"self",
".",
"parens_as_neg",
")",
"conversion_row",
".",
"append",
"(",
"conversion",
")",
"# Give back our conversions, type labeling, and conversion flags",
"return",
"table_conversion",
",",
"flags",
",",
"units"
] |
Performs a preprocess pass of the table to attempt naive conversions of data and to record
the initial types of each cell.
|
[
"Performs",
"a",
"preprocess",
"pass",
"of",
"the",
"table",
"to",
"attempt",
"naive",
"conversions",
"of",
"data",
"and",
"to",
"record",
"the",
"initial",
"types",
"of",
"each",
"cell",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/tableanalyzer.py#L98-L123
|
241,766
|
OpenGov/carpenter
|
carpenter/blocks/tableanalyzer.py
|
TableAnalyzer.fill_in_table
|
def fill_in_table(self, table, worksheet, flags):
'''
Fills in any rows with missing right hand side data with empty cells.
'''
max_row = 0
min_row = sys.maxint
for row in table:
if len(row) > max_row:
max_row = len(row)
if len(row) < min_row:
min_row = len(row)
if max_row != min_row:
for row in table:
if len(row) < max_row:
row.extend([None]*(max_row-len(row)))
|
python
|
def fill_in_table(self, table, worksheet, flags):
'''
Fills in any rows with missing right hand side data with empty cells.
'''
max_row = 0
min_row = sys.maxint
for row in table:
if len(row) > max_row:
max_row = len(row)
if len(row) < min_row:
min_row = len(row)
if max_row != min_row:
for row in table:
if len(row) < max_row:
row.extend([None]*(max_row-len(row)))
|
[
"def",
"fill_in_table",
"(",
"self",
",",
"table",
",",
"worksheet",
",",
"flags",
")",
":",
"max_row",
"=",
"0",
"min_row",
"=",
"sys",
".",
"maxint",
"for",
"row",
"in",
"table",
":",
"if",
"len",
"(",
"row",
")",
">",
"max_row",
":",
"max_row",
"=",
"len",
"(",
"row",
")",
"if",
"len",
"(",
"row",
")",
"<",
"min_row",
":",
"min_row",
"=",
"len",
"(",
"row",
")",
"if",
"max_row",
"!=",
"min_row",
":",
"for",
"row",
"in",
"table",
":",
"if",
"len",
"(",
"row",
")",
"<",
"max_row",
":",
"row",
".",
"extend",
"(",
"[",
"None",
"]",
"*",
"(",
"max_row",
"-",
"len",
"(",
"row",
")",
")",
")"
] |
Fills in any rows with missing right hand side data with empty cells.
|
[
"Fills",
"in",
"any",
"rows",
"with",
"missing",
"right",
"hand",
"side",
"data",
"with",
"empty",
"cells",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/tableanalyzer.py#L125-L139
|
241,767
|
OpenGov/carpenter
|
carpenter/blocks/tableanalyzer.py
|
TableAnalyzer._find_valid_block
|
def _find_valid_block(self, table, worksheet, flags, units, used_cells, start_pos, end_pos):
'''
Searches for the next location where a valid block could reside and constructs the block
object representing that location.
'''
for row_index in range(len(table)):
if row_index < start_pos[0] or row_index > end_pos[0]:
continue
convRow = table[row_index]
used_row = used_cells[row_index]
for column_index, conv in enumerate(convRow):
if (column_index < start_pos[1] or column_index > end_pos[1] or used_row[column_index]):
continue
# Is non empty cell?
if not is_empty_cell(conv):
block_start, block_end = self._find_block_bounds(table, used_cells,
(row_index, column_index), start_pos, end_pos)
if (block_end[0] > block_start[0] and
block_end[1] > block_start[1]):
try:
return TableBlock(table, used_cells, block_start, block_end, worksheet,
flags, units, self.assume_complete_blocks, self.max_title_rows)
except InvalidBlockError:
pass
# Prevent infinite loops if something goes wrong
used_cells[row_index][column_index] = True
|
python
|
def _find_valid_block(self, table, worksheet, flags, units, used_cells, start_pos, end_pos):
'''
Searches for the next location where a valid block could reside and constructs the block
object representing that location.
'''
for row_index in range(len(table)):
if row_index < start_pos[0] or row_index > end_pos[0]:
continue
convRow = table[row_index]
used_row = used_cells[row_index]
for column_index, conv in enumerate(convRow):
if (column_index < start_pos[1] or column_index > end_pos[1] or used_row[column_index]):
continue
# Is non empty cell?
if not is_empty_cell(conv):
block_start, block_end = self._find_block_bounds(table, used_cells,
(row_index, column_index), start_pos, end_pos)
if (block_end[0] > block_start[0] and
block_end[1] > block_start[1]):
try:
return TableBlock(table, used_cells, block_start, block_end, worksheet,
flags, units, self.assume_complete_blocks, self.max_title_rows)
except InvalidBlockError:
pass
# Prevent infinite loops if something goes wrong
used_cells[row_index][column_index] = True
|
[
"def",
"_find_valid_block",
"(",
"self",
",",
"table",
",",
"worksheet",
",",
"flags",
",",
"units",
",",
"used_cells",
",",
"start_pos",
",",
"end_pos",
")",
":",
"for",
"row_index",
"in",
"range",
"(",
"len",
"(",
"table",
")",
")",
":",
"if",
"row_index",
"<",
"start_pos",
"[",
"0",
"]",
"or",
"row_index",
">",
"end_pos",
"[",
"0",
"]",
":",
"continue",
"convRow",
"=",
"table",
"[",
"row_index",
"]",
"used_row",
"=",
"used_cells",
"[",
"row_index",
"]",
"for",
"column_index",
",",
"conv",
"in",
"enumerate",
"(",
"convRow",
")",
":",
"if",
"(",
"column_index",
"<",
"start_pos",
"[",
"1",
"]",
"or",
"column_index",
">",
"end_pos",
"[",
"1",
"]",
"or",
"used_row",
"[",
"column_index",
"]",
")",
":",
"continue",
"# Is non empty cell?",
"if",
"not",
"is_empty_cell",
"(",
"conv",
")",
":",
"block_start",
",",
"block_end",
"=",
"self",
".",
"_find_block_bounds",
"(",
"table",
",",
"used_cells",
",",
"(",
"row_index",
",",
"column_index",
")",
",",
"start_pos",
",",
"end_pos",
")",
"if",
"(",
"block_end",
"[",
"0",
"]",
">",
"block_start",
"[",
"0",
"]",
"and",
"block_end",
"[",
"1",
"]",
">",
"block_start",
"[",
"1",
"]",
")",
":",
"try",
":",
"return",
"TableBlock",
"(",
"table",
",",
"used_cells",
",",
"block_start",
",",
"block_end",
",",
"worksheet",
",",
"flags",
",",
"units",
",",
"self",
".",
"assume_complete_blocks",
",",
"self",
".",
"max_title_rows",
")",
"except",
"InvalidBlockError",
":",
"pass",
"# Prevent infinite loops if something goes wrong",
"used_cells",
"[",
"row_index",
"]",
"[",
"column_index",
"]",
"=",
"True"
] |
Searches for the next location where a valid block could reside and constructs the block
object representing that location.
|
[
"Searches",
"for",
"the",
"next",
"location",
"where",
"a",
"valid",
"block",
"could",
"reside",
"and",
"constructs",
"the",
"block",
"object",
"representing",
"that",
"location",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/tableanalyzer.py#L198-L223
|
241,768
|
OpenGov/carpenter
|
carpenter/blocks/tableanalyzer.py
|
TableAnalyzer._find_block_bounds
|
def _find_block_bounds(self, table, used_cells, possible_block_start, start_pos, end_pos):
'''
First walk the rows, checking for the farthest left column belonging to the block and the
bottom most row belonging to the block. If a blank cell is hit and the column started with a
blank cell or has length < self.blank_repeat_threshold, then restart one row to the right.
Alternatively, if assume_complete_blocks has been set to true, any blank cell stops block
detection.
Then walk the columns until a column is reached which has blank cells down to the row which
marked the as the row end from prior iteration.
'''
# If we're only looking for complete blocks, then just walk
# until we hit a blank cell
if self.assume_complete_blocks:
block_start, block_end = self._find_complete_block_bounds(
table, used_cells, possible_block_start,
start_pos, end_pos)
# Otherwise do a smart, multi-pass approach to finding blocks
# with potential missing fields
else:
block_start, block_end = self._find_block_start(
table, used_cells, possible_block_start,
start_pos, end_pos)
block_start, block_end = self._find_block_end(
table, used_cells, block_start, block_end,
start_pos, end_pos)
return block_start, block_end
|
python
|
def _find_block_bounds(self, table, used_cells, possible_block_start, start_pos, end_pos):
'''
First walk the rows, checking for the farthest left column belonging to the block and the
bottom most row belonging to the block. If a blank cell is hit and the column started with a
blank cell or has length < self.blank_repeat_threshold, then restart one row to the right.
Alternatively, if assume_complete_blocks has been set to true, any blank cell stops block
detection.
Then walk the columns until a column is reached which has blank cells down to the row which
marked the as the row end from prior iteration.
'''
# If we're only looking for complete blocks, then just walk
# until we hit a blank cell
if self.assume_complete_blocks:
block_start, block_end = self._find_complete_block_bounds(
table, used_cells, possible_block_start,
start_pos, end_pos)
# Otherwise do a smart, multi-pass approach to finding blocks
# with potential missing fields
else:
block_start, block_end = self._find_block_start(
table, used_cells, possible_block_start,
start_pos, end_pos)
block_start, block_end = self._find_block_end(
table, used_cells, block_start, block_end,
start_pos, end_pos)
return block_start, block_end
|
[
"def",
"_find_block_bounds",
"(",
"self",
",",
"table",
",",
"used_cells",
",",
"possible_block_start",
",",
"start_pos",
",",
"end_pos",
")",
":",
"# If we're only looking for complete blocks, then just walk",
"# until we hit a blank cell",
"if",
"self",
".",
"assume_complete_blocks",
":",
"block_start",
",",
"block_end",
"=",
"self",
".",
"_find_complete_block_bounds",
"(",
"table",
",",
"used_cells",
",",
"possible_block_start",
",",
"start_pos",
",",
"end_pos",
")",
"# Otherwise do a smart, multi-pass approach to finding blocks",
"# with potential missing fields",
"else",
":",
"block_start",
",",
"block_end",
"=",
"self",
".",
"_find_block_start",
"(",
"table",
",",
"used_cells",
",",
"possible_block_start",
",",
"start_pos",
",",
"end_pos",
")",
"block_start",
",",
"block_end",
"=",
"self",
".",
"_find_block_end",
"(",
"table",
",",
"used_cells",
",",
"block_start",
",",
"block_end",
",",
"start_pos",
",",
"end_pos",
")",
"return",
"block_start",
",",
"block_end"
] |
First walk the rows, checking for the farthest left column belonging to the block and the
bottom most row belonging to the block. If a blank cell is hit and the column started with a
blank cell or has length < self.blank_repeat_threshold, then restart one row to the right.
Alternatively, if assume_complete_blocks has been set to true, any blank cell stops block
detection.
Then walk the columns until a column is reached which has blank cells down to the row which
marked the as the row end from prior iteration.
|
[
"First",
"walk",
"the",
"rows",
"checking",
"for",
"the",
"farthest",
"left",
"column",
"belonging",
"to",
"the",
"block",
"and",
"the",
"bottom",
"most",
"row",
"belonging",
"to",
"the",
"block",
".",
"If",
"a",
"blank",
"cell",
"is",
"hit",
"and",
"the",
"column",
"started",
"with",
"a",
"blank",
"cell",
"or",
"has",
"length",
"<",
"self",
".",
"blank_repeat_threshold",
"then",
"restart",
"one",
"row",
"to",
"the",
"right",
".",
"Alternatively",
"if",
"assume_complete_blocks",
"has",
"been",
"set",
"to",
"true",
"any",
"blank",
"cell",
"stops",
"block",
"detection",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/tableanalyzer.py#L225-L252
|
241,769
|
OpenGov/carpenter
|
carpenter/blocks/tableanalyzer.py
|
TableAnalyzer._single_length_title
|
def _single_length_title(self, table, row_index, current_col):
'''
Returns true if the row is a single length title element with no other row titles. Useful
for tracking pre-data titles that belong in their own block.
'''
if len(table[row_index]) - current_col <= 0:
return False
return (is_text_cell(table[row_index][current_col]) and
all(not is_text_cell(table[row_index][next_column])
for next_column in xrange(current_col + 1, len(table[row_index]))))
|
python
|
def _single_length_title(self, table, row_index, current_col):
'''
Returns true if the row is a single length title element with no other row titles. Useful
for tracking pre-data titles that belong in their own block.
'''
if len(table[row_index]) - current_col <= 0:
return False
return (is_text_cell(table[row_index][current_col]) and
all(not is_text_cell(table[row_index][next_column])
for next_column in xrange(current_col + 1, len(table[row_index]))))
|
[
"def",
"_single_length_title",
"(",
"self",
",",
"table",
",",
"row_index",
",",
"current_col",
")",
":",
"if",
"len",
"(",
"table",
"[",
"row_index",
"]",
")",
"-",
"current_col",
"<=",
"0",
":",
"return",
"False",
"return",
"(",
"is_text_cell",
"(",
"table",
"[",
"row_index",
"]",
"[",
"current_col",
"]",
")",
"and",
"all",
"(",
"not",
"is_text_cell",
"(",
"table",
"[",
"row_index",
"]",
"[",
"next_column",
"]",
")",
"for",
"next_column",
"in",
"xrange",
"(",
"current_col",
"+",
"1",
",",
"len",
"(",
"table",
"[",
"row_index",
"]",
")",
")",
")",
")"
] |
Returns true if the row is a single length title element with no other row titles. Useful
for tracking pre-data titles that belong in their own block.
|
[
"Returns",
"true",
"if",
"the",
"row",
"is",
"a",
"single",
"length",
"title",
"element",
"with",
"no",
"other",
"row",
"titles",
".",
"Useful",
"for",
"tracking",
"pre",
"-",
"data",
"titles",
"that",
"belong",
"in",
"their",
"own",
"block",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/tableanalyzer.py#L287-L296
|
241,770
|
OpenGov/carpenter
|
carpenter/blocks/tableanalyzer.py
|
TableAnalyzer._find_block_start
|
def _find_block_start(self, table, used_cells, possible_block_start, start_pos, end_pos):
'''
Finds the start of a block from a suggested start location. This location can be at a lower
column but not a lower row. The function traverses columns until it finds a stopping
condition or a repeat condition that restarts on the next column.
Note this also finds the lowest row of block_end.
'''
current_col = possible_block_start[1]
block_start = list(possible_block_start)
block_end = list(possible_block_start)
repeat = True
checked_all = False
# Repeat until we've met satisfactory conditions for catching all edge cases or we've
# checked all valid block locations
while not checked_all and repeat:
block_end[0] = max(block_end[0], possible_block_start[0])
block_end[1] = max(block_end[1], current_col)
single_titled_block = True
table_column = TableTranspose(table)[current_col]
used_column = TableTranspose(used_cells)[current_col]
# We need to find a non empty cell before we can stop
blank_start = is_empty_cell(table_column[possible_block_start[0]])
blank_exited = not blank_start
# Unless we have assume_complete_blocks set to True
if blank_start and self.assume_complete_blocks:
# Found a blank? We're done
repeat = False
break
#TODO refactor code below into new function for easier reading
# Analyze the beginning columns
for row_index in xrange(possible_block_start[0], end_pos[0] + 1):
# Ensure we catch the edge case of the data reaching the edge of the table --
# block_end should then equal end_pos
if blank_exited:
block_end[0] = max(block_end[0], row_index)
if row_index == end_pos[0] or used_column[row_index]:
# We've gone through the whole range
checked_all = True
repeat = False
break
if not blank_exited:
blank_exited = not is_empty_cell(table_column[row_index])
if single_titled_block and not self._single_length_title(table, row_index, current_col):
single_titled_block = False
# If we saw single length titles for several more than threshold rows, then we
# have a unique block before an actual content block
if self._above_blank_repeat_threshold(possible_block_start[0], row_index):
repeat = False
break
if is_empty_cell(table_column[row_index]) and len(table[row_index]) > current_col + 1:
current_col += 1
break
# Go find the left most column that's still valid
table_row = table[row_index]
used_row = used_cells[row_index]
for column_index in range(current_col, start_pos[1] - 1, -1):
if is_empty_cell(table_row[column_index]) or used_row[column_index]:
break
else:
block_start[1] = min(block_start[1], column_index)
# Check if we've seen few enough cells to guess that we have a repeating title
repeat = blank_start or self._below_blank_repeat_threshold(possible_block_start[0], row_index)
return block_start, block_end
|
python
|
def _find_block_start(self, table, used_cells, possible_block_start, start_pos, end_pos):
'''
Finds the start of a block from a suggested start location. This location can be at a lower
column but not a lower row. The function traverses columns until it finds a stopping
condition or a repeat condition that restarts on the next column.
Note this also finds the lowest row of block_end.
'''
current_col = possible_block_start[1]
block_start = list(possible_block_start)
block_end = list(possible_block_start)
repeat = True
checked_all = False
# Repeat until we've met satisfactory conditions for catching all edge cases or we've
# checked all valid block locations
while not checked_all and repeat:
block_end[0] = max(block_end[0], possible_block_start[0])
block_end[1] = max(block_end[1], current_col)
single_titled_block = True
table_column = TableTranspose(table)[current_col]
used_column = TableTranspose(used_cells)[current_col]
# We need to find a non empty cell before we can stop
blank_start = is_empty_cell(table_column[possible_block_start[0]])
blank_exited = not blank_start
# Unless we have assume_complete_blocks set to True
if blank_start and self.assume_complete_blocks:
# Found a blank? We're done
repeat = False
break
#TODO refactor code below into new function for easier reading
# Analyze the beginning columns
for row_index in xrange(possible_block_start[0], end_pos[0] + 1):
# Ensure we catch the edge case of the data reaching the edge of the table --
# block_end should then equal end_pos
if blank_exited:
block_end[0] = max(block_end[0], row_index)
if row_index == end_pos[0] or used_column[row_index]:
# We've gone through the whole range
checked_all = True
repeat = False
break
if not blank_exited:
blank_exited = not is_empty_cell(table_column[row_index])
if single_titled_block and not self._single_length_title(table, row_index, current_col):
single_titled_block = False
# If we saw single length titles for several more than threshold rows, then we
# have a unique block before an actual content block
if self._above_blank_repeat_threshold(possible_block_start[0], row_index):
repeat = False
break
if is_empty_cell(table_column[row_index]) and len(table[row_index]) > current_col + 1:
current_col += 1
break
# Go find the left most column that's still valid
table_row = table[row_index]
used_row = used_cells[row_index]
for column_index in range(current_col, start_pos[1] - 1, -1):
if is_empty_cell(table_row[column_index]) or used_row[column_index]:
break
else:
block_start[1] = min(block_start[1], column_index)
# Check if we've seen few enough cells to guess that we have a repeating title
repeat = blank_start or self._below_blank_repeat_threshold(possible_block_start[0], row_index)
return block_start, block_end
|
[
"def",
"_find_block_start",
"(",
"self",
",",
"table",
",",
"used_cells",
",",
"possible_block_start",
",",
"start_pos",
",",
"end_pos",
")",
":",
"current_col",
"=",
"possible_block_start",
"[",
"1",
"]",
"block_start",
"=",
"list",
"(",
"possible_block_start",
")",
"block_end",
"=",
"list",
"(",
"possible_block_start",
")",
"repeat",
"=",
"True",
"checked_all",
"=",
"False",
"# Repeat until we've met satisfactory conditions for catching all edge cases or we've",
"# checked all valid block locations",
"while",
"not",
"checked_all",
"and",
"repeat",
":",
"block_end",
"[",
"0",
"]",
"=",
"max",
"(",
"block_end",
"[",
"0",
"]",
",",
"possible_block_start",
"[",
"0",
"]",
")",
"block_end",
"[",
"1",
"]",
"=",
"max",
"(",
"block_end",
"[",
"1",
"]",
",",
"current_col",
")",
"single_titled_block",
"=",
"True",
"table_column",
"=",
"TableTranspose",
"(",
"table",
")",
"[",
"current_col",
"]",
"used_column",
"=",
"TableTranspose",
"(",
"used_cells",
")",
"[",
"current_col",
"]",
"# We need to find a non empty cell before we can stop",
"blank_start",
"=",
"is_empty_cell",
"(",
"table_column",
"[",
"possible_block_start",
"[",
"0",
"]",
"]",
")",
"blank_exited",
"=",
"not",
"blank_start",
"# Unless we have assume_complete_blocks set to True",
"if",
"blank_start",
"and",
"self",
".",
"assume_complete_blocks",
":",
"# Found a blank? We're done",
"repeat",
"=",
"False",
"break",
"#TODO refactor code below into new function for easier reading",
"# Analyze the beginning columns",
"for",
"row_index",
"in",
"xrange",
"(",
"possible_block_start",
"[",
"0",
"]",
",",
"end_pos",
"[",
"0",
"]",
"+",
"1",
")",
":",
"# Ensure we catch the edge case of the data reaching the edge of the table --",
"# block_end should then equal end_pos",
"if",
"blank_exited",
":",
"block_end",
"[",
"0",
"]",
"=",
"max",
"(",
"block_end",
"[",
"0",
"]",
",",
"row_index",
")",
"if",
"row_index",
"==",
"end_pos",
"[",
"0",
"]",
"or",
"used_column",
"[",
"row_index",
"]",
":",
"# We've gone through the whole range",
"checked_all",
"=",
"True",
"repeat",
"=",
"False",
"break",
"if",
"not",
"blank_exited",
":",
"blank_exited",
"=",
"not",
"is_empty_cell",
"(",
"table_column",
"[",
"row_index",
"]",
")",
"if",
"single_titled_block",
"and",
"not",
"self",
".",
"_single_length_title",
"(",
"table",
",",
"row_index",
",",
"current_col",
")",
":",
"single_titled_block",
"=",
"False",
"# If we saw single length titles for several more than threshold rows, then we",
"# have a unique block before an actual content block",
"if",
"self",
".",
"_above_blank_repeat_threshold",
"(",
"possible_block_start",
"[",
"0",
"]",
",",
"row_index",
")",
":",
"repeat",
"=",
"False",
"break",
"if",
"is_empty_cell",
"(",
"table_column",
"[",
"row_index",
"]",
")",
"and",
"len",
"(",
"table",
"[",
"row_index",
"]",
")",
">",
"current_col",
"+",
"1",
":",
"current_col",
"+=",
"1",
"break",
"# Go find the left most column that's still valid",
"table_row",
"=",
"table",
"[",
"row_index",
"]",
"used_row",
"=",
"used_cells",
"[",
"row_index",
"]",
"for",
"column_index",
"in",
"range",
"(",
"current_col",
",",
"start_pos",
"[",
"1",
"]",
"-",
"1",
",",
"-",
"1",
")",
":",
"if",
"is_empty_cell",
"(",
"table_row",
"[",
"column_index",
"]",
")",
"or",
"used_row",
"[",
"column_index",
"]",
":",
"break",
"else",
":",
"block_start",
"[",
"1",
"]",
"=",
"min",
"(",
"block_start",
"[",
"1",
"]",
",",
"column_index",
")",
"# Check if we've seen few enough cells to guess that we have a repeating title",
"repeat",
"=",
"blank_start",
"or",
"self",
".",
"_below_blank_repeat_threshold",
"(",
"possible_block_start",
"[",
"0",
"]",
",",
"row_index",
")",
"return",
"block_start",
",",
"block_end"
] |
Finds the start of a block from a suggested start location. This location can be at a lower
column but not a lower row. The function traverses columns until it finds a stopping
condition or a repeat condition that restarts on the next column.
Note this also finds the lowest row of block_end.
|
[
"Finds",
"the",
"start",
"of",
"a",
"block",
"from",
"a",
"suggested",
"start",
"location",
".",
"This",
"location",
"can",
"be",
"at",
"a",
"lower",
"column",
"but",
"not",
"a",
"lower",
"row",
".",
"The",
"function",
"traverses",
"columns",
"until",
"it",
"finds",
"a",
"stopping",
"condition",
"or",
"a",
"repeat",
"condition",
"that",
"restarts",
"on",
"the",
"next",
"column",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/tableanalyzer.py#L310-L377
|
241,771
|
veeti/decent
|
decent/error.py
|
Error.as_dict
|
def as_dict(self, join='.'):
"""
Returns the error as a path to message dictionary. Paths are joined
with the ``join`` string.
"""
if self.path:
path = [str(node) for node in self.path]
else:
path = ''
return { join.join(path): self.message }
|
python
|
def as_dict(self, join='.'):
"""
Returns the error as a path to message dictionary. Paths are joined
with the ``join`` string.
"""
if self.path:
path = [str(node) for node in self.path]
else:
path = ''
return { join.join(path): self.message }
|
[
"def",
"as_dict",
"(",
"self",
",",
"join",
"=",
"'.'",
")",
":",
"if",
"self",
".",
"path",
":",
"path",
"=",
"[",
"str",
"(",
"node",
")",
"for",
"node",
"in",
"self",
".",
"path",
"]",
"else",
":",
"path",
"=",
"''",
"return",
"{",
"join",
".",
"join",
"(",
"path",
")",
":",
"self",
".",
"message",
"}"
] |
Returns the error as a path to message dictionary. Paths are joined
with the ``join`` string.
|
[
"Returns",
"the",
"error",
"as",
"a",
"path",
"to",
"message",
"dictionary",
".",
"Paths",
"are",
"joined",
"with",
"the",
"join",
"string",
"."
] |
07b11536953b9cf4402c65f241706ab717b90bff
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/error.py#L28-L37
|
241,772
|
veeti/decent
|
decent/error.py
|
Invalid.as_dict
|
def as_dict(self, join='.'):
"""
Returns all the errors in this collection as a path to message
dictionary. Paths are joined with the ``join`` string.
"""
result = {}
for e in self.errors:
result.update(e.as_dict(join))
return result
|
python
|
def as_dict(self, join='.'):
"""
Returns all the errors in this collection as a path to message
dictionary. Paths are joined with the ``join`` string.
"""
result = {}
for e in self.errors:
result.update(e.as_dict(join))
return result
|
[
"def",
"as_dict",
"(",
"self",
",",
"join",
"=",
"'.'",
")",
":",
"result",
"=",
"{",
"}",
"for",
"e",
"in",
"self",
".",
"errors",
":",
"result",
".",
"update",
"(",
"e",
".",
"as_dict",
"(",
"join",
")",
")",
"return",
"result"
] |
Returns all the errors in this collection as a path to message
dictionary. Paths are joined with the ``join`` string.
|
[
"Returns",
"all",
"the",
"errors",
"in",
"this",
"collection",
"as",
"a",
"path",
"to",
"message",
"dictionary",
".",
"Paths",
"are",
"joined",
"with",
"the",
"join",
"string",
"."
] |
07b11536953b9cf4402c65f241706ab717b90bff
|
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/error.py#L64-L72
|
241,773
|
cdeboever3/cdpybio
|
cdpybio/gencode.py
|
load_gffutils_db
|
def load_gffutils_db(f):
"""
Load database for gffutils.
Parameters
----------
f : str
Path to database.
Returns
-------
db : gffutils.FeatureDB
gffutils feature database.
"""
import gffutils
db = gffutils.FeatureDB(f, keep_order=True)
return db
|
python
|
def load_gffutils_db(f):
"""
Load database for gffutils.
Parameters
----------
f : str
Path to database.
Returns
-------
db : gffutils.FeatureDB
gffutils feature database.
"""
import gffutils
db = gffutils.FeatureDB(f, keep_order=True)
return db
|
[
"def",
"load_gffutils_db",
"(",
"f",
")",
":",
"import",
"gffutils",
"db",
"=",
"gffutils",
".",
"FeatureDB",
"(",
"f",
",",
"keep_order",
"=",
"True",
")",
"return",
"db"
] |
Load database for gffutils.
Parameters
----------
f : str
Path to database.
Returns
-------
db : gffutils.FeatureDB
gffutils feature database.
|
[
"Load",
"database",
"for",
"gffutils",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/gencode.py#L18-L35
|
241,774
|
cdeboever3/cdpybio
|
cdpybio/gencode.py
|
make_gffutils_db
|
def make_gffutils_db(gtf, db):
"""
Make database for gffutils.
Parameters
----------
gtf : str
Path to Gencode gtf file.
db : str
Path to save database to.
Returns
-------
out_db : gffutils.FeatureDB
gffutils feature database.
"""
import gffutils
out_db = gffutils.create_db(gtf,
db,
keep_order=True,
infer_gene_extent=False)
return out_db
|
python
|
def make_gffutils_db(gtf, db):
"""
Make database for gffutils.
Parameters
----------
gtf : str
Path to Gencode gtf file.
db : str
Path to save database to.
Returns
-------
out_db : gffutils.FeatureDB
gffutils feature database.
"""
import gffutils
out_db = gffutils.create_db(gtf,
db,
keep_order=True,
infer_gene_extent=False)
return out_db
|
[
"def",
"make_gffutils_db",
"(",
"gtf",
",",
"db",
")",
":",
"import",
"gffutils",
"out_db",
"=",
"gffutils",
".",
"create_db",
"(",
"gtf",
",",
"db",
",",
"keep_order",
"=",
"True",
",",
"infer_gene_extent",
"=",
"False",
")",
"return",
"out_db"
] |
Make database for gffutils.
Parameters
----------
gtf : str
Path to Gencode gtf file.
db : str
Path to save database to.
Returns
-------
out_db : gffutils.FeatureDB
gffutils feature database.
|
[
"Make",
"database",
"for",
"gffutils",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/gencode.py#L37-L60
|
241,775
|
cdeboever3/cdpybio
|
cdpybio/gencode.py
|
merge_bed_by_name
|
def merge_bed_by_name(bt):
"""
Merge intervals in a bed file when the intervals have the same name.
Intervals with the same name must be adjacent in the bed file.
"""
name_lines = dict()
for r in bt:
name = r.name
name_lines[name] = name_lines.get(name, []) + [[r.chrom, r.start,
r.end, r.name,
r.strand]]
new_lines = []
for name in name_lines.keys():
new_lines += _merge_interval_list(name_lines[name])
new_lines = ['\t'.join(map(str, x)) for x in new_lines]
return pbt.BedTool('\n'.join(new_lines) + '\n', from_string=True)
|
python
|
def merge_bed_by_name(bt):
"""
Merge intervals in a bed file when the intervals have the same name.
Intervals with the same name must be adjacent in the bed file.
"""
name_lines = dict()
for r in bt:
name = r.name
name_lines[name] = name_lines.get(name, []) + [[r.chrom, r.start,
r.end, r.name,
r.strand]]
new_lines = []
for name in name_lines.keys():
new_lines += _merge_interval_list(name_lines[name])
new_lines = ['\t'.join(map(str, x)) for x in new_lines]
return pbt.BedTool('\n'.join(new_lines) + '\n', from_string=True)
|
[
"def",
"merge_bed_by_name",
"(",
"bt",
")",
":",
"name_lines",
"=",
"dict",
"(",
")",
"for",
"r",
"in",
"bt",
":",
"name",
"=",
"r",
".",
"name",
"name_lines",
"[",
"name",
"]",
"=",
"name_lines",
".",
"get",
"(",
"name",
",",
"[",
"]",
")",
"+",
"[",
"[",
"r",
".",
"chrom",
",",
"r",
".",
"start",
",",
"r",
".",
"end",
",",
"r",
".",
"name",
",",
"r",
".",
"strand",
"]",
"]",
"new_lines",
"=",
"[",
"]",
"for",
"name",
"in",
"name_lines",
".",
"keys",
"(",
")",
":",
"new_lines",
"+=",
"_merge_interval_list",
"(",
"name_lines",
"[",
"name",
"]",
")",
"new_lines",
"=",
"[",
"'\\t'",
".",
"join",
"(",
"map",
"(",
"str",
",",
"x",
")",
")",
"for",
"x",
"in",
"new_lines",
"]",
"return",
"pbt",
".",
"BedTool",
"(",
"'\\n'",
".",
"join",
"(",
"new_lines",
")",
"+",
"'\\n'",
",",
"from_string",
"=",
"True",
")"
] |
Merge intervals in a bed file when the intervals have the same name.
Intervals with the same name must be adjacent in the bed file.
|
[
"Merge",
"intervals",
"in",
"a",
"bed",
"file",
"when",
"the",
"intervals",
"have",
"the",
"same",
"name",
".",
"Intervals",
"with",
"the",
"same",
"name",
"must",
"be",
"adjacent",
"in",
"the",
"bed",
"file",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/gencode.py#L207-L223
|
241,776
|
cdeboever3/cdpybio
|
cdpybio/gencode.py
|
make_feature_bed
|
def make_feature_bed(gtf, feature, out=None):
"""
Make a bed file with the start and stop coordinates for all of a particular
feature in Gencode. Valid features are the features present in the third
column of the Gencode GTF file.
Parameters
----------
gtf : str
Filename of the Gencode gtf file.
feature : str
Feature from third column of Gencode GTF file. As of v19, these include
CDS, exon, gene, Selenocysteine, start_codon, stop_codon, transcript,
and UTR.
out : str
If provided, the bed file will be written to a file with this name.
Returns
-------
bed : pybedtools.BedTool
A sorted pybedtools BedTool object.
"""
bed_lines = []
with open(gtf) as f:
line = f.readline().strip()
while line != '':
if line[0] != '#':
line = line.split('\t')
if line[2] == feature:
chrom = line[0]
start = str(int(line[3]) - 1)
end = line[4]
if feature == 'gene':
name = line[8].split(';')[0].split(' ')[1].strip('"')
else:
# TODO: I may want to have some smarter naming for
# things that aren't genes or transcripts.
name = line[8].split(';')[1].split(' ')[2].strip('"')
strand = line[6]
bed_lines.append('\t'.join([chrom, start, end, name, '.',
strand]) + '\n')
line = f.readline().strip()
bt = pbt.BedTool(''.join(bed_lines), from_string=True)
# We'll sort so bedtools operations can be done faster.
bt = bt.sort()
if out:
bt.saveas(out)
return bt
|
python
|
def make_feature_bed(gtf, feature, out=None):
"""
Make a bed file with the start and stop coordinates for all of a particular
feature in Gencode. Valid features are the features present in the third
column of the Gencode GTF file.
Parameters
----------
gtf : str
Filename of the Gencode gtf file.
feature : str
Feature from third column of Gencode GTF file. As of v19, these include
CDS, exon, gene, Selenocysteine, start_codon, stop_codon, transcript,
and UTR.
out : str
If provided, the bed file will be written to a file with this name.
Returns
-------
bed : pybedtools.BedTool
A sorted pybedtools BedTool object.
"""
bed_lines = []
with open(gtf) as f:
line = f.readline().strip()
while line != '':
if line[0] != '#':
line = line.split('\t')
if line[2] == feature:
chrom = line[0]
start = str(int(line[3]) - 1)
end = line[4]
if feature == 'gene':
name = line[8].split(';')[0].split(' ')[1].strip('"')
else:
# TODO: I may want to have some smarter naming for
# things that aren't genes or transcripts.
name = line[8].split(';')[1].split(' ')[2].strip('"')
strand = line[6]
bed_lines.append('\t'.join([chrom, start, end, name, '.',
strand]) + '\n')
line = f.readline().strip()
bt = pbt.BedTool(''.join(bed_lines), from_string=True)
# We'll sort so bedtools operations can be done faster.
bt = bt.sort()
if out:
bt.saveas(out)
return bt
|
[
"def",
"make_feature_bed",
"(",
"gtf",
",",
"feature",
",",
"out",
"=",
"None",
")",
":",
"bed_lines",
"=",
"[",
"]",
"with",
"open",
"(",
"gtf",
")",
"as",
"f",
":",
"line",
"=",
"f",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"while",
"line",
"!=",
"''",
":",
"if",
"line",
"[",
"0",
"]",
"!=",
"'#'",
":",
"line",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"if",
"line",
"[",
"2",
"]",
"==",
"feature",
":",
"chrom",
"=",
"line",
"[",
"0",
"]",
"start",
"=",
"str",
"(",
"int",
"(",
"line",
"[",
"3",
"]",
")",
"-",
"1",
")",
"end",
"=",
"line",
"[",
"4",
"]",
"if",
"feature",
"==",
"'gene'",
":",
"name",
"=",
"line",
"[",
"8",
"]",
".",
"split",
"(",
"';'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"' '",
")",
"[",
"1",
"]",
".",
"strip",
"(",
"'\"'",
")",
"else",
":",
"# TODO: I may want to have some smarter naming for",
"# things that aren't genes or transcripts.",
"name",
"=",
"line",
"[",
"8",
"]",
".",
"split",
"(",
"';'",
")",
"[",
"1",
"]",
".",
"split",
"(",
"' '",
")",
"[",
"2",
"]",
".",
"strip",
"(",
"'\"'",
")",
"strand",
"=",
"line",
"[",
"6",
"]",
"bed_lines",
".",
"append",
"(",
"'\\t'",
".",
"join",
"(",
"[",
"chrom",
",",
"start",
",",
"end",
",",
"name",
",",
"'.'",
",",
"strand",
"]",
")",
"+",
"'\\n'",
")",
"line",
"=",
"f",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"bt",
"=",
"pbt",
".",
"BedTool",
"(",
"''",
".",
"join",
"(",
"bed_lines",
")",
",",
"from_string",
"=",
"True",
")",
"# We'll sort so bedtools operations can be done faster.",
"bt",
"=",
"bt",
".",
"sort",
"(",
")",
"if",
"out",
":",
"bt",
".",
"saveas",
"(",
"out",
")",
"return",
"bt"
] |
Make a bed file with the start and stop coordinates for all of a particular
feature in Gencode. Valid features are the features present in the third
column of the Gencode GTF file.
Parameters
----------
gtf : str
Filename of the Gencode gtf file.
feature : str
Feature from third column of Gencode GTF file. As of v19, these include
CDS, exon, gene, Selenocysteine, start_codon, stop_codon, transcript,
and UTR.
out : str
If provided, the bed file will be written to a file with this name.
Returns
-------
bed : pybedtools.BedTool
A sorted pybedtools BedTool object.
|
[
"Make",
"a",
"bed",
"file",
"with",
"the",
"start",
"and",
"stop",
"coordinates",
"for",
"all",
"of",
"a",
"particular",
"feature",
"in",
"Gencode",
".",
"Valid",
"features",
"are",
"the",
"features",
"present",
"in",
"the",
"third",
"column",
"of",
"the",
"Gencode",
"GTF",
"file",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/gencode.py#L225-L275
|
241,777
|
cdeboever3/cdpybio
|
cdpybio/gencode.py
|
make_transcript_gene_se
|
def make_transcript_gene_se(fn):
"""
Make a Pandas Series with transcript ID's as the index and values as the
gene ID containing that transcript.
Parameters
----------
fn : str
Filename of the Gencode gtf file.
Returns
-------
se : pandas.Series
Make a Pandas Series with transcript ID's as the index and values as the
gene ID containing that transcript.
"""
import itertools as it
import HTSeq
gtf = it.islice(HTSeq.GFF_Reader(fn), None)
transcripts = []
genes = []
line = gtf.next()
while line != '':
if line.type == 'transcript':
transcripts.append(line.attr['transcript_id'])
genes.append(line.attr['gene_id'])
try:
line = gtf.next()
except StopIteration:
line = ''
return pd.Series(genes, index=transcripts)
|
python
|
def make_transcript_gene_se(fn):
"""
Make a Pandas Series with transcript ID's as the index and values as the
gene ID containing that transcript.
Parameters
----------
fn : str
Filename of the Gencode gtf file.
Returns
-------
se : pandas.Series
Make a Pandas Series with transcript ID's as the index and values as the
gene ID containing that transcript.
"""
import itertools as it
import HTSeq
gtf = it.islice(HTSeq.GFF_Reader(fn), None)
transcripts = []
genes = []
line = gtf.next()
while line != '':
if line.type == 'transcript':
transcripts.append(line.attr['transcript_id'])
genes.append(line.attr['gene_id'])
try:
line = gtf.next()
except StopIteration:
line = ''
return pd.Series(genes, index=transcripts)
|
[
"def",
"make_transcript_gene_se",
"(",
"fn",
")",
":",
"import",
"itertools",
"as",
"it",
"import",
"HTSeq",
"gtf",
"=",
"it",
".",
"islice",
"(",
"HTSeq",
".",
"GFF_Reader",
"(",
"fn",
")",
",",
"None",
")",
"transcripts",
"=",
"[",
"]",
"genes",
"=",
"[",
"]",
"line",
"=",
"gtf",
".",
"next",
"(",
")",
"while",
"line",
"!=",
"''",
":",
"if",
"line",
".",
"type",
"==",
"'transcript'",
":",
"transcripts",
".",
"append",
"(",
"line",
".",
"attr",
"[",
"'transcript_id'",
"]",
")",
"genes",
".",
"append",
"(",
"line",
".",
"attr",
"[",
"'gene_id'",
"]",
")",
"try",
":",
"line",
"=",
"gtf",
".",
"next",
"(",
")",
"except",
"StopIteration",
":",
"line",
"=",
"''",
"return",
"pd",
".",
"Series",
"(",
"genes",
",",
"index",
"=",
"transcripts",
")"
] |
Make a Pandas Series with transcript ID's as the index and values as the
gene ID containing that transcript.
Parameters
----------
fn : str
Filename of the Gencode gtf file.
Returns
-------
se : pandas.Series
Make a Pandas Series with transcript ID's as the index and values as the
gene ID containing that transcript.
|
[
"Make",
"a",
"Pandas",
"Series",
"with",
"transcript",
"ID",
"s",
"as",
"the",
"index",
"and",
"values",
"as",
"the",
"gene",
"ID",
"containing",
"that",
"transcript",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/gencode.py#L320-L354
|
241,778
|
cdeboever3/cdpybio
|
cdpybio/gencode.py
|
make_gene_info_df
|
def make_gene_info_df(fn):
"""
Make a Pandas dataframe with gene information
Parameters
----------
fn : str of filename
Filename of the Gencode gtf file
Returns
-------
df : pandas.DataFrame
Pandas dataframe indexed by gene id with the following columns:
gene_type, gene_status, gene_name.
"""
import itertools as it
import HTSeq
gff_iter = it.islice(HTSeq.GFF_Reader(fn), None)
convD = dict()
eof = False
while not eof:
try:
entry = gff_iter.next()
if entry.type == 'gene':
convD[entry.attr['gene_id']] = [entry.attr['gene_name'],
entry.attr['gene_type'],
entry.iv.chrom,
entry.iv.start,
entry.iv.end,
entry.iv.strand,
entry.attr['gene_status'],
entry.source,
entry.attr['level']]
except StopIteration:
eof = True
ind = ['gene_name', 'gene_type', 'chrom', 'start', 'end', 'strand',
'gene_status', 'source', 'level']
df = pd.DataFrame(convD, index=ind).T
df.index.name = 'gene_id'
return df
|
python
|
def make_gene_info_df(fn):
"""
Make a Pandas dataframe with gene information
Parameters
----------
fn : str of filename
Filename of the Gencode gtf file
Returns
-------
df : pandas.DataFrame
Pandas dataframe indexed by gene id with the following columns:
gene_type, gene_status, gene_name.
"""
import itertools as it
import HTSeq
gff_iter = it.islice(HTSeq.GFF_Reader(fn), None)
convD = dict()
eof = False
while not eof:
try:
entry = gff_iter.next()
if entry.type == 'gene':
convD[entry.attr['gene_id']] = [entry.attr['gene_name'],
entry.attr['gene_type'],
entry.iv.chrom,
entry.iv.start,
entry.iv.end,
entry.iv.strand,
entry.attr['gene_status'],
entry.source,
entry.attr['level']]
except StopIteration:
eof = True
ind = ['gene_name', 'gene_type', 'chrom', 'start', 'end', 'strand',
'gene_status', 'source', 'level']
df = pd.DataFrame(convD, index=ind).T
df.index.name = 'gene_id'
return df
|
[
"def",
"make_gene_info_df",
"(",
"fn",
")",
":",
"import",
"itertools",
"as",
"it",
"import",
"HTSeq",
"gff_iter",
"=",
"it",
".",
"islice",
"(",
"HTSeq",
".",
"GFF_Reader",
"(",
"fn",
")",
",",
"None",
")",
"convD",
"=",
"dict",
"(",
")",
"eof",
"=",
"False",
"while",
"not",
"eof",
":",
"try",
":",
"entry",
"=",
"gff_iter",
".",
"next",
"(",
")",
"if",
"entry",
".",
"type",
"==",
"'gene'",
":",
"convD",
"[",
"entry",
".",
"attr",
"[",
"'gene_id'",
"]",
"]",
"=",
"[",
"entry",
".",
"attr",
"[",
"'gene_name'",
"]",
",",
"entry",
".",
"attr",
"[",
"'gene_type'",
"]",
",",
"entry",
".",
"iv",
".",
"chrom",
",",
"entry",
".",
"iv",
".",
"start",
",",
"entry",
".",
"iv",
".",
"end",
",",
"entry",
".",
"iv",
".",
"strand",
",",
"entry",
".",
"attr",
"[",
"'gene_status'",
"]",
",",
"entry",
".",
"source",
",",
"entry",
".",
"attr",
"[",
"'level'",
"]",
"]",
"except",
"StopIteration",
":",
"eof",
"=",
"True",
"ind",
"=",
"[",
"'gene_name'",
",",
"'gene_type'",
",",
"'chrom'",
",",
"'start'",
",",
"'end'",
",",
"'strand'",
",",
"'gene_status'",
",",
"'source'",
",",
"'level'",
"]",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"convD",
",",
"index",
"=",
"ind",
")",
".",
"T",
"df",
".",
"index",
".",
"name",
"=",
"'gene_id'",
"return",
"df"
] |
Make a Pandas dataframe with gene information
Parameters
----------
fn : str of filename
Filename of the Gencode gtf file
Returns
-------
df : pandas.DataFrame
Pandas dataframe indexed by gene id with the following columns:
gene_type, gene_status, gene_name.
|
[
"Make",
"a",
"Pandas",
"dataframe",
"with",
"gene",
"information"
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/gencode.py#L356-L400
|
241,779
|
cdeboever3/cdpybio
|
cdpybio/gencode.py
|
make_splice_junction_df
|
def make_splice_junction_df(fn, type='gene'):
"""Read the Gencode gtf file and make a pandas dataframe describing the
splice junctions
Parameters
----------
filename : str of filename
Filename of the Gencode gtf file
Returns
-------
df : pandas.DataFrame
Dataframe of splice junctions with the following columns 'gene',
'chrom', 'start', 'end', 'strand', 'chrom:start', 'chrom:end', 'donor',
'acceptor', 'intron'
"""
import itertools as it
import HTSeq
import numpy as np
# GFF_Reader has an option for end_included. However, I think it is
# backwards. So if your gtf is end-inclusive, you want the default
# (end_included=False). With this, one will NOT be subtracted from the end
# coordinate.
gffI = it.islice(HTSeq.GFF_Reader(fn), None)
juncL = []
eof = False
entry = gffI.next()
count = 1
last_count = 1
while not eof:
if entry.type == 'transcript':
exonL = []
entry = gffI.next()
count += 1
gene = entry.attr['gene_id']
strand = entry.iv.strand
while not eof and entry.type != 'transcript':
if entry.type == 'exon':
exonL.append(entry)
try:
entry = gffI.next()
count += 1
except StopIteration:
eof = True
# The gencode gtf file has one based, end inclusive coordinates for
# exons. HTSeq represents intervals as zero based, end exclusive.
# We need one-based, end inclusive to compare with STAR output.
if len(exonL) > 1:
chrom = exonL[0].iv.chrom
# On the minus strand, order of exons in gtf file is reversed.
if strand == '-':
exonL.reverse()
# We take the exclusive end of the exon intervals and add one to
# make the one-based start of the intron.
startL = [ x.iv.end + 1 for x in exonL[:-1] ]
# The zero-based inclusive start of the exon is the one-based
# inclusive end of the intron.
endL = [ x.iv.start for x in exonL[1:] ]
for i in range(len(startL)):
start = startL[i]
end = endL[i]
jxn = '{0}:{1}-{2}:{3}'.format(chrom, start, end, strand)
chrstart = '{}:{}'.format(chrom, start)
chrend = '{}:{}'.format(chrom, end)
donor = _gencode_donor(chrom, start, end, strand)
acceptor = _gencode_acceptor(chrom, start, end, strand)
intron = '{}:{}-{}'.format(chrom, start, end)
juncL.append([jxn, gene, chrom, str(start), str(end),
strand, chrstart, chrend, donor, acceptor,
intron])
else:
try:
entry = gffI.next()
count += 1
except StopIteration:
eof = True
last_count += 1
header = ['gene', 'chrom', 'start', 'end', 'strand', 'chrom:start',
'chrom:end', 'donor', 'acceptor', 'intron']
juncA = np.array(juncL)
df = pd.DataFrame(juncA[:,1:], index=juncA[:,0],
columns=header).drop_duplicates()
df['start'] = df.start.astype(int)
df['end'] = df.end.astype(int)
return df
|
python
|
def make_splice_junction_df(fn, type='gene'):
"""Read the Gencode gtf file and make a pandas dataframe describing the
splice junctions
Parameters
----------
filename : str of filename
Filename of the Gencode gtf file
Returns
-------
df : pandas.DataFrame
Dataframe of splice junctions with the following columns 'gene',
'chrom', 'start', 'end', 'strand', 'chrom:start', 'chrom:end', 'donor',
'acceptor', 'intron'
"""
import itertools as it
import HTSeq
import numpy as np
# GFF_Reader has an option for end_included. However, I think it is
# backwards. So if your gtf is end-inclusive, you want the default
# (end_included=False). With this, one will NOT be subtracted from the end
# coordinate.
gffI = it.islice(HTSeq.GFF_Reader(fn), None)
juncL = []
eof = False
entry = gffI.next()
count = 1
last_count = 1
while not eof:
if entry.type == 'transcript':
exonL = []
entry = gffI.next()
count += 1
gene = entry.attr['gene_id']
strand = entry.iv.strand
while not eof and entry.type != 'transcript':
if entry.type == 'exon':
exonL.append(entry)
try:
entry = gffI.next()
count += 1
except StopIteration:
eof = True
# The gencode gtf file has one based, end inclusive coordinates for
# exons. HTSeq represents intervals as zero based, end exclusive.
# We need one-based, end inclusive to compare with STAR output.
if len(exonL) > 1:
chrom = exonL[0].iv.chrom
# On the minus strand, order of exons in gtf file is reversed.
if strand == '-':
exonL.reverse()
# We take the exclusive end of the exon intervals and add one to
# make the one-based start of the intron.
startL = [ x.iv.end + 1 for x in exonL[:-1] ]
# The zero-based inclusive start of the exon is the one-based
# inclusive end of the intron.
endL = [ x.iv.start for x in exonL[1:] ]
for i in range(len(startL)):
start = startL[i]
end = endL[i]
jxn = '{0}:{1}-{2}:{3}'.format(chrom, start, end, strand)
chrstart = '{}:{}'.format(chrom, start)
chrend = '{}:{}'.format(chrom, end)
donor = _gencode_donor(chrom, start, end, strand)
acceptor = _gencode_acceptor(chrom, start, end, strand)
intron = '{}:{}-{}'.format(chrom, start, end)
juncL.append([jxn, gene, chrom, str(start), str(end),
strand, chrstart, chrend, donor, acceptor,
intron])
else:
try:
entry = gffI.next()
count += 1
except StopIteration:
eof = True
last_count += 1
header = ['gene', 'chrom', 'start', 'end', 'strand', 'chrom:start',
'chrom:end', 'donor', 'acceptor', 'intron']
juncA = np.array(juncL)
df = pd.DataFrame(juncA[:,1:], index=juncA[:,0],
columns=header).drop_duplicates()
df['start'] = df.start.astype(int)
df['end'] = df.end.astype(int)
return df
|
[
"def",
"make_splice_junction_df",
"(",
"fn",
",",
"type",
"=",
"'gene'",
")",
":",
"import",
"itertools",
"as",
"it",
"import",
"HTSeq",
"import",
"numpy",
"as",
"np",
"# GFF_Reader has an option for end_included. However, I think it is",
"# backwards. So if your gtf is end-inclusive, you want the default",
"# (end_included=False). With this, one will NOT be subtracted from the end",
"# coordinate.",
"gffI",
"=",
"it",
".",
"islice",
"(",
"HTSeq",
".",
"GFF_Reader",
"(",
"fn",
")",
",",
"None",
")",
"juncL",
"=",
"[",
"]",
"eof",
"=",
"False",
"entry",
"=",
"gffI",
".",
"next",
"(",
")",
"count",
"=",
"1",
"last_count",
"=",
"1",
"while",
"not",
"eof",
":",
"if",
"entry",
".",
"type",
"==",
"'transcript'",
":",
"exonL",
"=",
"[",
"]",
"entry",
"=",
"gffI",
".",
"next",
"(",
")",
"count",
"+=",
"1",
"gene",
"=",
"entry",
".",
"attr",
"[",
"'gene_id'",
"]",
"strand",
"=",
"entry",
".",
"iv",
".",
"strand",
"while",
"not",
"eof",
"and",
"entry",
".",
"type",
"!=",
"'transcript'",
":",
"if",
"entry",
".",
"type",
"==",
"'exon'",
":",
"exonL",
".",
"append",
"(",
"entry",
")",
"try",
":",
"entry",
"=",
"gffI",
".",
"next",
"(",
")",
"count",
"+=",
"1",
"except",
"StopIteration",
":",
"eof",
"=",
"True",
"# The gencode gtf file has one based, end inclusive coordinates for",
"# exons. HTSeq represents intervals as zero based, end exclusive.",
"# We need one-based, end inclusive to compare with STAR output.",
"if",
"len",
"(",
"exonL",
")",
">",
"1",
":",
"chrom",
"=",
"exonL",
"[",
"0",
"]",
".",
"iv",
".",
"chrom",
"# On the minus strand, order of exons in gtf file is reversed.",
"if",
"strand",
"==",
"'-'",
":",
"exonL",
".",
"reverse",
"(",
")",
"# We take the exclusive end of the exon intervals and add one to",
"# make the one-based start of the intron.",
"startL",
"=",
"[",
"x",
".",
"iv",
".",
"end",
"+",
"1",
"for",
"x",
"in",
"exonL",
"[",
":",
"-",
"1",
"]",
"]",
"# The zero-based inclusive start of the exon is the one-based",
"# inclusive end of the intron.",
"endL",
"=",
"[",
"x",
".",
"iv",
".",
"start",
"for",
"x",
"in",
"exonL",
"[",
"1",
":",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"startL",
")",
")",
":",
"start",
"=",
"startL",
"[",
"i",
"]",
"end",
"=",
"endL",
"[",
"i",
"]",
"jxn",
"=",
"'{0}:{1}-{2}:{3}'",
".",
"format",
"(",
"chrom",
",",
"start",
",",
"end",
",",
"strand",
")",
"chrstart",
"=",
"'{}:{}'",
".",
"format",
"(",
"chrom",
",",
"start",
")",
"chrend",
"=",
"'{}:{}'",
".",
"format",
"(",
"chrom",
",",
"end",
")",
"donor",
"=",
"_gencode_donor",
"(",
"chrom",
",",
"start",
",",
"end",
",",
"strand",
")",
"acceptor",
"=",
"_gencode_acceptor",
"(",
"chrom",
",",
"start",
",",
"end",
",",
"strand",
")",
"intron",
"=",
"'{}:{}-{}'",
".",
"format",
"(",
"chrom",
",",
"start",
",",
"end",
")",
"juncL",
".",
"append",
"(",
"[",
"jxn",
",",
"gene",
",",
"chrom",
",",
"str",
"(",
"start",
")",
",",
"str",
"(",
"end",
")",
",",
"strand",
",",
"chrstart",
",",
"chrend",
",",
"donor",
",",
"acceptor",
",",
"intron",
"]",
")",
"else",
":",
"try",
":",
"entry",
"=",
"gffI",
".",
"next",
"(",
")",
"count",
"+=",
"1",
"except",
"StopIteration",
":",
"eof",
"=",
"True",
"last_count",
"+=",
"1",
"header",
"=",
"[",
"'gene'",
",",
"'chrom'",
",",
"'start'",
",",
"'end'",
",",
"'strand'",
",",
"'chrom:start'",
",",
"'chrom:end'",
",",
"'donor'",
",",
"'acceptor'",
",",
"'intron'",
"]",
"juncA",
"=",
"np",
".",
"array",
"(",
"juncL",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"juncA",
"[",
":",
",",
"1",
":",
"]",
",",
"index",
"=",
"juncA",
"[",
":",
",",
"0",
"]",
",",
"columns",
"=",
"header",
")",
".",
"drop_duplicates",
"(",
")",
"df",
"[",
"'start'",
"]",
"=",
"df",
".",
"start",
".",
"astype",
"(",
"int",
")",
"df",
"[",
"'end'",
"]",
"=",
"df",
".",
"end",
".",
"astype",
"(",
"int",
")",
"return",
"df"
] |
Read the Gencode gtf file and make a pandas dataframe describing the
splice junctions
Parameters
----------
filename : str of filename
Filename of the Gencode gtf file
Returns
-------
df : pandas.DataFrame
Dataframe of splice junctions with the following columns 'gene',
'chrom', 'start', 'end', 'strand', 'chrom:start', 'chrom:end', 'donor',
'acceptor', 'intron'
|
[
"Read",
"the",
"Gencode",
"gtf",
"file",
"and",
"make",
"a",
"pandas",
"dataframe",
"describing",
"the",
"splice",
"junctions"
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/gencode.py#L402-L489
|
241,780
|
rsalmaso/django-fluo
|
fluo/views/decorators.py
|
login_required
|
def login_required(function=None, required=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that, if required, checks that the user is logged in and redirect
to the log-in page if necessary.
"""
if required:
if django.VERSION < (1, 11):
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
redirect_field_name=redirect_field_name
)
else:
actual_decorator = user_passes_test(
lambda u: u.is_authenticated,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
# login not required
def decorator(view_func):
def _wrapper(request, *args, **kwargs):
return function(request, *args, **kwargs)
return wraps(function)(_wrapper)
return method_decorator(decorator)
|
python
|
def login_required(function=None, required=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that, if required, checks that the user is logged in and redirect
to the log-in page if necessary.
"""
if required:
if django.VERSION < (1, 11):
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
redirect_field_name=redirect_field_name
)
else:
actual_decorator = user_passes_test(
lambda u: u.is_authenticated,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
# login not required
def decorator(view_func):
def _wrapper(request, *args, **kwargs):
return function(request, *args, **kwargs)
return wraps(function)(_wrapper)
return method_decorator(decorator)
|
[
"def",
"login_required",
"(",
"function",
"=",
"None",
",",
"required",
"=",
"False",
",",
"redirect_field_name",
"=",
"REDIRECT_FIELD_NAME",
")",
":",
"if",
"required",
":",
"if",
"django",
".",
"VERSION",
"<",
"(",
"1",
",",
"11",
")",
":",
"actual_decorator",
"=",
"user_passes_test",
"(",
"lambda",
"u",
":",
"u",
".",
"is_authenticated",
"(",
")",
",",
"redirect_field_name",
"=",
"redirect_field_name",
")",
"else",
":",
"actual_decorator",
"=",
"user_passes_test",
"(",
"lambda",
"u",
":",
"u",
".",
"is_authenticated",
",",
"redirect_field_name",
"=",
"redirect_field_name",
")",
"if",
"function",
":",
"return",
"actual_decorator",
"(",
"function",
")",
"return",
"actual_decorator",
"# login not required",
"def",
"decorator",
"(",
"view_func",
")",
":",
"def",
"_wrapper",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"function",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wraps",
"(",
"function",
")",
"(",
"_wrapper",
")",
"return",
"method_decorator",
"(",
"decorator",
")"
] |
Decorator for views that, if required, checks that the user is logged in and redirect
to the log-in page if necessary.
|
[
"Decorator",
"for",
"views",
"that",
"if",
"required",
"checks",
"that",
"the",
"user",
"is",
"logged",
"in",
"and",
"redirect",
"to",
"the",
"log",
"-",
"in",
"page",
"if",
"necessary",
"."
] |
1321c1e7d6a912108f79be02a9e7f2108c57f89f
|
https://github.com/rsalmaso/django-fluo/blob/1321c1e7d6a912108f79be02a9e7f2108c57f89f/fluo/views/decorators.py#L54-L78
|
241,781
|
firstprayer/monsql
|
monsql/db.py
|
Database.is_table_existed
|
def is_table_existed(self, tablename):
"""
Check whether the given table name exists in this database. Return boolean.
"""
all_tablenames = self.list_tables()
tablename = tablename.lower()
if tablename in all_tablenames:
return True
else:
return False
|
python
|
def is_table_existed(self, tablename):
"""
Check whether the given table name exists in this database. Return boolean.
"""
all_tablenames = self.list_tables()
tablename = tablename.lower()
if tablename in all_tablenames:
return True
else:
return False
|
[
"def",
"is_table_existed",
"(",
"self",
",",
"tablename",
")",
":",
"all_tablenames",
"=",
"self",
".",
"list_tables",
"(",
")",
"tablename",
"=",
"tablename",
".",
"lower",
"(",
")",
"if",
"tablename",
"in",
"all_tablenames",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
Check whether the given table name exists in this database. Return boolean.
|
[
"Check",
"whether",
"the",
"given",
"table",
"name",
"exists",
"in",
"this",
"database",
".",
"Return",
"boolean",
"."
] |
6285c15b574c8664046eae2edfeb548c7b173efd
|
https://github.com/firstprayer/monsql/blob/6285c15b574c8664046eae2edfeb548c7b173efd/monsql/db.py#L97-L107
|
241,782
|
firstprayer/monsql
|
monsql/db.py
|
Database.drop_table
|
def drop_table(self, tablename, silent=False):
"""
Drop a table
:Parameters:
- tablename: string
- slient: boolean. If false and the table doesn't exists an exception will be raised;
Otherwise it will be ignored
:Return: Nothing
"""
if not silent and not self.is_table_existed(tablename):
raise MonSQLException('TABLE %s DOES NOT EXIST' %tablename)
self.__cursor.execute('DROP TABLE IF EXISTS %s' %(tablename))
self.__db.commit()
|
python
|
def drop_table(self, tablename, silent=False):
"""
Drop a table
:Parameters:
- tablename: string
- slient: boolean. If false and the table doesn't exists an exception will be raised;
Otherwise it will be ignored
:Return: Nothing
"""
if not silent and not self.is_table_existed(tablename):
raise MonSQLException('TABLE %s DOES NOT EXIST' %tablename)
self.__cursor.execute('DROP TABLE IF EXISTS %s' %(tablename))
self.__db.commit()
|
[
"def",
"drop_table",
"(",
"self",
",",
"tablename",
",",
"silent",
"=",
"False",
")",
":",
"if",
"not",
"silent",
"and",
"not",
"self",
".",
"is_table_existed",
"(",
"tablename",
")",
":",
"raise",
"MonSQLException",
"(",
"'TABLE %s DOES NOT EXIST'",
"%",
"tablename",
")",
"self",
".",
"__cursor",
".",
"execute",
"(",
"'DROP TABLE IF EXISTS %s'",
"%",
"(",
"tablename",
")",
")",
"self",
".",
"__db",
".",
"commit",
"(",
")"
] |
Drop a table
:Parameters:
- tablename: string
- slient: boolean. If false and the table doesn't exists an exception will be raised;
Otherwise it will be ignored
:Return: Nothing
|
[
"Drop",
"a",
"table"
] |
6285c15b574c8664046eae2edfeb548c7b173efd
|
https://github.com/firstprayer/monsql/blob/6285c15b574c8664046eae2edfeb548c7b173efd/monsql/db.py#L138-L155
|
241,783
|
pyvec/pyvodb
|
pyvodb/cli/calendar.py
|
calendar
|
def calendar(ctx, date, agenda, year):
"""Show a 3-month calendar of meetups.
\b
date: The date around which the calendar is centered. May be:
- YYYY-MM-DD, YY-MM-DD, YYYY-MM or YY-MM (e.g. 2015-08)
- MM (e.g. 08): the given month in the current year
- pN (e.g. p1): N-th last month
- +N (e.g. +2): N-th next month
- Omitted: today
- YYYY: Show the entire year, as with -y
"""
do_full_year = year
today = ctx.obj['now'].date()
db = ctx.obj['db']
term = ctx.obj['term']
date_info = cliutil.parse_date(date)
if 'relative' in date_info:
year = today.year
month = today.month + date_info['relative']
elif 'date_based' in date_info:
year = date_info.get('year', today.year)
month = date_info.get('month', today.month)
if 'month' not in date_info and 'day' not in date_info:
do_full_year = True
else:
raise click.UsageError('Unknown date format')
if agenda is None:
agenda = not do_full_year
if do_full_year:
first_month = 1
num_months = 12
else:
first_month = month - 1
num_months = 3
calendar = get_calendar(db, year, first_month, num_months)
cliutil.handle_raw_output(ctx, list(calendar.values()))
render_calendar(term, calendar, today, agenda)
|
python
|
def calendar(ctx, date, agenda, year):
"""Show a 3-month calendar of meetups.
\b
date: The date around which the calendar is centered. May be:
- YYYY-MM-DD, YY-MM-DD, YYYY-MM or YY-MM (e.g. 2015-08)
- MM (e.g. 08): the given month in the current year
- pN (e.g. p1): N-th last month
- +N (e.g. +2): N-th next month
- Omitted: today
- YYYY: Show the entire year, as with -y
"""
do_full_year = year
today = ctx.obj['now'].date()
db = ctx.obj['db']
term = ctx.obj['term']
date_info = cliutil.parse_date(date)
if 'relative' in date_info:
year = today.year
month = today.month + date_info['relative']
elif 'date_based' in date_info:
year = date_info.get('year', today.year)
month = date_info.get('month', today.month)
if 'month' not in date_info and 'day' not in date_info:
do_full_year = True
else:
raise click.UsageError('Unknown date format')
if agenda is None:
agenda = not do_full_year
if do_full_year:
first_month = 1
num_months = 12
else:
first_month = month - 1
num_months = 3
calendar = get_calendar(db, year, first_month, num_months)
cliutil.handle_raw_output(ctx, list(calendar.values()))
render_calendar(term, calendar, today, agenda)
|
[
"def",
"calendar",
"(",
"ctx",
",",
"date",
",",
"agenda",
",",
"year",
")",
":",
"do_full_year",
"=",
"year",
"today",
"=",
"ctx",
".",
"obj",
"[",
"'now'",
"]",
".",
"date",
"(",
")",
"db",
"=",
"ctx",
".",
"obj",
"[",
"'db'",
"]",
"term",
"=",
"ctx",
".",
"obj",
"[",
"'term'",
"]",
"date_info",
"=",
"cliutil",
".",
"parse_date",
"(",
"date",
")",
"if",
"'relative'",
"in",
"date_info",
":",
"year",
"=",
"today",
".",
"year",
"month",
"=",
"today",
".",
"month",
"+",
"date_info",
"[",
"'relative'",
"]",
"elif",
"'date_based'",
"in",
"date_info",
":",
"year",
"=",
"date_info",
".",
"get",
"(",
"'year'",
",",
"today",
".",
"year",
")",
"month",
"=",
"date_info",
".",
"get",
"(",
"'month'",
",",
"today",
".",
"month",
")",
"if",
"'month'",
"not",
"in",
"date_info",
"and",
"'day'",
"not",
"in",
"date_info",
":",
"do_full_year",
"=",
"True",
"else",
":",
"raise",
"click",
".",
"UsageError",
"(",
"'Unknown date format'",
")",
"if",
"agenda",
"is",
"None",
":",
"agenda",
"=",
"not",
"do_full_year",
"if",
"do_full_year",
":",
"first_month",
"=",
"1",
"num_months",
"=",
"12",
"else",
":",
"first_month",
"=",
"month",
"-",
"1",
"num_months",
"=",
"3",
"calendar",
"=",
"get_calendar",
"(",
"db",
",",
"year",
",",
"first_month",
",",
"num_months",
")",
"cliutil",
".",
"handle_raw_output",
"(",
"ctx",
",",
"list",
"(",
"calendar",
".",
"values",
"(",
")",
")",
")",
"render_calendar",
"(",
"term",
",",
"calendar",
",",
"today",
",",
"agenda",
")"
] |
Show a 3-month calendar of meetups.
\b
date: The date around which the calendar is centered. May be:
- YYYY-MM-DD, YY-MM-DD, YYYY-MM or YY-MM (e.g. 2015-08)
- MM (e.g. 08): the given month in the current year
- pN (e.g. p1): N-th last month
- +N (e.g. +2): N-th next month
- Omitted: today
- YYYY: Show the entire year, as with -y
|
[
"Show",
"a",
"3",
"-",
"month",
"calendar",
"of",
"meetups",
"."
] |
07183333df26eb12c5c2b98802cde3fb3a6c1339
|
https://github.com/pyvec/pyvodb/blob/07183333df26eb12c5c2b98802cde3fb3a6c1339/pyvodb/cli/calendar.py#L15-L57
|
241,784
|
Noneus/slojsonrpc
|
slojsonrpc/__init__.py
|
SLOJSONRPC.register
|
def register(self, obj):
'''
register all methods for of an object as json rpc methods
obj - object with methods
'''
for method in dir(obj):
#ignore private methods
if not method.startswith('_'):
fct = getattr(obj, method)
#only handle functions
try:
getattr(fct, '__call__')
except AttributeError:
pass
else:
logging.debug('JSONRPC: Found Method: "%s"' % method)
self._methods[method] = {
'argspec': inspect.getargspec(fct),
'fct': fct
}
|
python
|
def register(self, obj):
'''
register all methods for of an object as json rpc methods
obj - object with methods
'''
for method in dir(obj):
#ignore private methods
if not method.startswith('_'):
fct = getattr(obj, method)
#only handle functions
try:
getattr(fct, '__call__')
except AttributeError:
pass
else:
logging.debug('JSONRPC: Found Method: "%s"' % method)
self._methods[method] = {
'argspec': inspect.getargspec(fct),
'fct': fct
}
|
[
"def",
"register",
"(",
"self",
",",
"obj",
")",
":",
"for",
"method",
"in",
"dir",
"(",
"obj",
")",
":",
"#ignore private methods",
"if",
"not",
"method",
".",
"startswith",
"(",
"'_'",
")",
":",
"fct",
"=",
"getattr",
"(",
"obj",
",",
"method",
")",
"#only handle functions",
"try",
":",
"getattr",
"(",
"fct",
",",
"'__call__'",
")",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"logging",
".",
"debug",
"(",
"'JSONRPC: Found Method: \"%s\"'",
"%",
"method",
")",
"self",
".",
"_methods",
"[",
"method",
"]",
"=",
"{",
"'argspec'",
":",
"inspect",
".",
"getargspec",
"(",
"fct",
")",
",",
"'fct'",
":",
"fct",
"}"
] |
register all methods for of an object as json rpc methods
obj - object with methods
|
[
"register",
"all",
"methods",
"for",
"of",
"an",
"object",
"as",
"json",
"rpc",
"methods"
] |
33b5d79486e1562fe4fc7b73b8f3014535168b03
|
https://github.com/Noneus/slojsonrpc/blob/33b5d79486e1562fe4fc7b73b8f3014535168b03/slojsonrpc/__init__.py#L139-L159
|
241,785
|
Noneus/slojsonrpc
|
slojsonrpc/__init__.py
|
SLOJSONRPC._validate_format
|
def _validate_format(req):
'''
Validate jsonrpc compliance of a jsonrpc-dict.
req - the request as a jsonrpc-dict
raises SLOJSONRPCError on validation error
'''
#check for all required keys
for key in SLOJSONRPC._min_keys:
if not key in req:
logging.debug('JSONRPC: Fmt Error: Need key "%s"' % key)
raise SLOJSONRPCError(-32600)
#check all keys if allowed
for key in req.keys():
if not key in SLOJSONRPC._allowed_keys:
logging.debug('JSONRPC: Fmt Error: Not allowed key "%s"' % key)
raise SLOJSONRPCError(-32600)
#needs to be jsonrpc 2.0
if req['jsonrpc'] != '2.0':
logging.debug('JSONRPC: Fmt Error: "jsonrpc" needs to be "2.0"')
raise SLOJSONRPCError(-32600)
|
python
|
def _validate_format(req):
'''
Validate jsonrpc compliance of a jsonrpc-dict.
req - the request as a jsonrpc-dict
raises SLOJSONRPCError on validation error
'''
#check for all required keys
for key in SLOJSONRPC._min_keys:
if not key in req:
logging.debug('JSONRPC: Fmt Error: Need key "%s"' % key)
raise SLOJSONRPCError(-32600)
#check all keys if allowed
for key in req.keys():
if not key in SLOJSONRPC._allowed_keys:
logging.debug('JSONRPC: Fmt Error: Not allowed key "%s"' % key)
raise SLOJSONRPCError(-32600)
#needs to be jsonrpc 2.0
if req['jsonrpc'] != '2.0':
logging.debug('JSONRPC: Fmt Error: "jsonrpc" needs to be "2.0"')
raise SLOJSONRPCError(-32600)
|
[
"def",
"_validate_format",
"(",
"req",
")",
":",
"#check for all required keys",
"for",
"key",
"in",
"SLOJSONRPC",
".",
"_min_keys",
":",
"if",
"not",
"key",
"in",
"req",
":",
"logging",
".",
"debug",
"(",
"'JSONRPC: Fmt Error: Need key \"%s\"'",
"%",
"key",
")",
"raise",
"SLOJSONRPCError",
"(",
"-",
"32600",
")",
"#check all keys if allowed",
"for",
"key",
"in",
"req",
".",
"keys",
"(",
")",
":",
"if",
"not",
"key",
"in",
"SLOJSONRPC",
".",
"_allowed_keys",
":",
"logging",
".",
"debug",
"(",
"'JSONRPC: Fmt Error: Not allowed key \"%s\"'",
"%",
"key",
")",
"raise",
"SLOJSONRPCError",
"(",
"-",
"32600",
")",
"#needs to be jsonrpc 2.0",
"if",
"req",
"[",
"'jsonrpc'",
"]",
"!=",
"'2.0'",
":",
"logging",
".",
"debug",
"(",
"'JSONRPC: Fmt Error: \"jsonrpc\" needs to be \"2.0\"'",
")",
"raise",
"SLOJSONRPCError",
"(",
"-",
"32600",
")"
] |
Validate jsonrpc compliance of a jsonrpc-dict.
req - the request as a jsonrpc-dict
raises SLOJSONRPCError on validation error
|
[
"Validate",
"jsonrpc",
"compliance",
"of",
"a",
"jsonrpc",
"-",
"dict",
"."
] |
33b5d79486e1562fe4fc7b73b8f3014535168b03
|
https://github.com/Noneus/slojsonrpc/blob/33b5d79486e1562fe4fc7b73b8f3014535168b03/slojsonrpc/__init__.py#L169-L192
|
241,786
|
Noneus/slojsonrpc
|
slojsonrpc/__init__.py
|
SLOJSONRPC._validate_params
|
def _validate_params(self, req):
'''
Validate parameters of a jsonrpc-request.
req - request as a jsonrpc-dict
raises SLOJSONRPCError on validation error
'''
#does the method exist?
method = req['method']
if not method in self._methods:
raise SLOJSONRPCError(-32601)
fct = self._methods[method]['fct']
#'id' is only needed for none SLOJSONRPCNotification's
try:
getattr(fct, '__SLOJSONRPCNotification__')
if 'id' in req:
logging.debug('JSONRPC: Fmt Error: no id for SLOJSONRPCNotifications')
raise SLOJSONRPCError(-32602)
except AttributeError:
if not 'id' in req:
logging.debug('JSONRPC: Fmt Error: Need an id for non SLOJSONRPCNotifications')
raise SLOJSONRPCError(-32602)
#get arguments and defaults for the python-function representing
# the method
argspec = self._methods[method]['argspec']
args, defaults = list(argspec.args), \
list(argspec.defaults if argspec.defaults else [])
#ignore self and session
if 'self' in args:
args.remove('self')
args.remove('session')
#create required arguments. delete the ones with defaults
required = list(args)
if defaults:
for default in defaults:
required.pop()
#check if we need paremeters and there are none, then error
if len(required) > 0 and 'params' not in req:
logging.debug('JSONRPC: Parameter Error: More than zero params required')
raise SLOJSONRPCError(-32602)
if 'params' in req:
#parameters must be a dict if there is more then one
if not isinstance(req['params'], dict) and len(required) > 1:
logging.debug('JSONRPC: Parameter Error: "params" must be a dictionary')
raise SLOJSONRPCError(-32602)
if isinstance(req['params'], dict):
#check if required parameters are there
for key in required:
if not key in req['params']:
logging.debug('JSONRPC: Parameter Error: Required key "%s" is missing' % key)
raise SLOJSONRPCError(-32602)
#check if parameters are given that do not exist in the method
for key in req['params']:
if not key in required:
logging.debug('JSONRPC: Parameter Error: Key is not allowed "%s"' % key)
raise SLOJSONRPCError(-32602)
|
python
|
def _validate_params(self, req):
'''
Validate parameters of a jsonrpc-request.
req - request as a jsonrpc-dict
raises SLOJSONRPCError on validation error
'''
#does the method exist?
method = req['method']
if not method in self._methods:
raise SLOJSONRPCError(-32601)
fct = self._methods[method]['fct']
#'id' is only needed for none SLOJSONRPCNotification's
try:
getattr(fct, '__SLOJSONRPCNotification__')
if 'id' in req:
logging.debug('JSONRPC: Fmt Error: no id for SLOJSONRPCNotifications')
raise SLOJSONRPCError(-32602)
except AttributeError:
if not 'id' in req:
logging.debug('JSONRPC: Fmt Error: Need an id for non SLOJSONRPCNotifications')
raise SLOJSONRPCError(-32602)
#get arguments and defaults for the python-function representing
# the method
argspec = self._methods[method]['argspec']
args, defaults = list(argspec.args), \
list(argspec.defaults if argspec.defaults else [])
#ignore self and session
if 'self' in args:
args.remove('self')
args.remove('session')
#create required arguments. delete the ones with defaults
required = list(args)
if defaults:
for default in defaults:
required.pop()
#check if we need paremeters and there are none, then error
if len(required) > 0 and 'params' not in req:
logging.debug('JSONRPC: Parameter Error: More than zero params required')
raise SLOJSONRPCError(-32602)
if 'params' in req:
#parameters must be a dict if there is more then one
if not isinstance(req['params'], dict) and len(required) > 1:
logging.debug('JSONRPC: Parameter Error: "params" must be a dictionary')
raise SLOJSONRPCError(-32602)
if isinstance(req['params'], dict):
#check if required parameters are there
for key in required:
if not key in req['params']:
logging.debug('JSONRPC: Parameter Error: Required key "%s" is missing' % key)
raise SLOJSONRPCError(-32602)
#check if parameters are given that do not exist in the method
for key in req['params']:
if not key in required:
logging.debug('JSONRPC: Parameter Error: Key is not allowed "%s"' % key)
raise SLOJSONRPCError(-32602)
|
[
"def",
"_validate_params",
"(",
"self",
",",
"req",
")",
":",
"#does the method exist?",
"method",
"=",
"req",
"[",
"'method'",
"]",
"if",
"not",
"method",
"in",
"self",
".",
"_methods",
":",
"raise",
"SLOJSONRPCError",
"(",
"-",
"32601",
")",
"fct",
"=",
"self",
".",
"_methods",
"[",
"method",
"]",
"[",
"'fct'",
"]",
"#'id' is only needed for none SLOJSONRPCNotification's",
"try",
":",
"getattr",
"(",
"fct",
",",
"'__SLOJSONRPCNotification__'",
")",
"if",
"'id'",
"in",
"req",
":",
"logging",
".",
"debug",
"(",
"'JSONRPC: Fmt Error: no id for SLOJSONRPCNotifications'",
")",
"raise",
"SLOJSONRPCError",
"(",
"-",
"32602",
")",
"except",
"AttributeError",
":",
"if",
"not",
"'id'",
"in",
"req",
":",
"logging",
".",
"debug",
"(",
"'JSONRPC: Fmt Error: Need an id for non SLOJSONRPCNotifications'",
")",
"raise",
"SLOJSONRPCError",
"(",
"-",
"32602",
")",
"#get arguments and defaults for the python-function representing",
"# the method",
"argspec",
"=",
"self",
".",
"_methods",
"[",
"method",
"]",
"[",
"'argspec'",
"]",
"args",
",",
"defaults",
"=",
"list",
"(",
"argspec",
".",
"args",
")",
",",
"list",
"(",
"argspec",
".",
"defaults",
"if",
"argspec",
".",
"defaults",
"else",
"[",
"]",
")",
"#ignore self and session",
"if",
"'self'",
"in",
"args",
":",
"args",
".",
"remove",
"(",
"'self'",
")",
"args",
".",
"remove",
"(",
"'session'",
")",
"#create required arguments. delete the ones with defaults",
"required",
"=",
"list",
"(",
"args",
")",
"if",
"defaults",
":",
"for",
"default",
"in",
"defaults",
":",
"required",
".",
"pop",
"(",
")",
"#check if we need paremeters and there are none, then error",
"if",
"len",
"(",
"required",
")",
">",
"0",
"and",
"'params'",
"not",
"in",
"req",
":",
"logging",
".",
"debug",
"(",
"'JSONRPC: Parameter Error: More than zero params required'",
")",
"raise",
"SLOJSONRPCError",
"(",
"-",
"32602",
")",
"if",
"'params'",
"in",
"req",
":",
"#parameters must be a dict if there is more then one",
"if",
"not",
"isinstance",
"(",
"req",
"[",
"'params'",
"]",
",",
"dict",
")",
"and",
"len",
"(",
"required",
")",
">",
"1",
":",
"logging",
".",
"debug",
"(",
"'JSONRPC: Parameter Error: \"params\" must be a dictionary'",
")",
"raise",
"SLOJSONRPCError",
"(",
"-",
"32602",
")",
"if",
"isinstance",
"(",
"req",
"[",
"'params'",
"]",
",",
"dict",
")",
":",
"#check if required parameters are there",
"for",
"key",
"in",
"required",
":",
"if",
"not",
"key",
"in",
"req",
"[",
"'params'",
"]",
":",
"logging",
".",
"debug",
"(",
"'JSONRPC: Parameter Error: Required key \"%s\" is missing'",
"%",
"key",
")",
"raise",
"SLOJSONRPCError",
"(",
"-",
"32602",
")",
"#check if parameters are given that do not exist in the method",
"for",
"key",
"in",
"req",
"[",
"'params'",
"]",
":",
"if",
"not",
"key",
"in",
"required",
":",
"logging",
".",
"debug",
"(",
"'JSONRPC: Parameter Error: Key is not allowed \"%s\"'",
"%",
"key",
")",
"raise",
"SLOJSONRPCError",
"(",
"-",
"32602",
")"
] |
Validate parameters of a jsonrpc-request.
req - request as a jsonrpc-dict
raises SLOJSONRPCError on validation error
|
[
"Validate",
"parameters",
"of",
"a",
"jsonrpc",
"-",
"request",
"."
] |
33b5d79486e1562fe4fc7b73b8f3014535168b03
|
https://github.com/Noneus/slojsonrpc/blob/33b5d79486e1562fe4fc7b73b8f3014535168b03/slojsonrpc/__init__.py#L194-L259
|
241,787
|
Noneus/slojsonrpc
|
slojsonrpc/__init__.py
|
SLOJSONRPC.handle_request
|
def handle_request(self, req, validate=True):
'''
handle a jsonrpc request
req - request as jsonrpc-dict
validate - validate the request? (default: True)
returns jsonrpc-dict with result or error
'''
#result that will be filled and returned
res = {'jsonrpc': '2.0', 'id': -1, 'result': None}
logging.debug('')
logging.debug('--------------------REQUEST' +
'--------------------\n' +
json.dumps(req,
sort_keys=True,
indent=4,
separators=(',', ': ')))
logging.debug('-----------------------------------------------')
notification = False
if self._sessionmaker:
session = self._sessionmaker()
try:
#validate request
if validate:
self._validate_format(req)
self._validate_params(req)
method = req['method']
#check if request is a notification
try:
getattr(self._methods[method]['fct'], '__SLOJSONRPCNotification__')
notification = True
except AttributeError:
notification = False
#call the python function
if 'params' in req:
fct = self._methods[method]['fct']
if isinstance(req['params'], dict):
req['params']['session'] = session
res['result'] = fct(**req['params'])
else:
res['result'] = fct(session, req['params'])
else:
res['result'] = self._methods[method]['fct'](session)
except SLOJSONRPCError as e:
res = e.to_json(req.get('id', None))
except:
logging.debug('Uncaught Exception:')
logging.debug('-------------------\n' + traceback.format_exc())
res = SLOJSONRPCError(-32603).to_json(req.get('id', None))
session.close()
logging.debug('--------------------RESULT' +
'--------------------\n' +
json.dumps(res,
sort_keys=True,
indent=4,
separators=(',', ': ')))
logging.debug('----------------------------------------------')
#return None if a notification
if notification:
return None
elif not 'error' in res:
res['id'] = req['id']
return res
|
python
|
def handle_request(self, req, validate=True):
'''
handle a jsonrpc request
req - request as jsonrpc-dict
validate - validate the request? (default: True)
returns jsonrpc-dict with result or error
'''
#result that will be filled and returned
res = {'jsonrpc': '2.0', 'id': -1, 'result': None}
logging.debug('')
logging.debug('--------------------REQUEST' +
'--------------------\n' +
json.dumps(req,
sort_keys=True,
indent=4,
separators=(',', ': ')))
logging.debug('-----------------------------------------------')
notification = False
if self._sessionmaker:
session = self._sessionmaker()
try:
#validate request
if validate:
self._validate_format(req)
self._validate_params(req)
method = req['method']
#check if request is a notification
try:
getattr(self._methods[method]['fct'], '__SLOJSONRPCNotification__')
notification = True
except AttributeError:
notification = False
#call the python function
if 'params' in req:
fct = self._methods[method]['fct']
if isinstance(req['params'], dict):
req['params']['session'] = session
res['result'] = fct(**req['params'])
else:
res['result'] = fct(session, req['params'])
else:
res['result'] = self._methods[method]['fct'](session)
except SLOJSONRPCError as e:
res = e.to_json(req.get('id', None))
except:
logging.debug('Uncaught Exception:')
logging.debug('-------------------\n' + traceback.format_exc())
res = SLOJSONRPCError(-32603).to_json(req.get('id', None))
session.close()
logging.debug('--------------------RESULT' +
'--------------------\n' +
json.dumps(res,
sort_keys=True,
indent=4,
separators=(',', ': ')))
logging.debug('----------------------------------------------')
#return None if a notification
if notification:
return None
elif not 'error' in res:
res['id'] = req['id']
return res
|
[
"def",
"handle_request",
"(",
"self",
",",
"req",
",",
"validate",
"=",
"True",
")",
":",
"#result that will be filled and returned",
"res",
"=",
"{",
"'jsonrpc'",
":",
"'2.0'",
",",
"'id'",
":",
"-",
"1",
",",
"'result'",
":",
"None",
"}",
"logging",
".",
"debug",
"(",
"''",
")",
"logging",
".",
"debug",
"(",
"'--------------------REQUEST'",
"+",
"'--------------------\\n'",
"+",
"json",
".",
"dumps",
"(",
"req",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
")",
"logging",
".",
"debug",
"(",
"'-----------------------------------------------'",
")",
"notification",
"=",
"False",
"if",
"self",
".",
"_sessionmaker",
":",
"session",
"=",
"self",
".",
"_sessionmaker",
"(",
")",
"try",
":",
"#validate request",
"if",
"validate",
":",
"self",
".",
"_validate_format",
"(",
"req",
")",
"self",
".",
"_validate_params",
"(",
"req",
")",
"method",
"=",
"req",
"[",
"'method'",
"]",
"#check if request is a notification",
"try",
":",
"getattr",
"(",
"self",
".",
"_methods",
"[",
"method",
"]",
"[",
"'fct'",
"]",
",",
"'__SLOJSONRPCNotification__'",
")",
"notification",
"=",
"True",
"except",
"AttributeError",
":",
"notification",
"=",
"False",
"#call the python function",
"if",
"'params'",
"in",
"req",
":",
"fct",
"=",
"self",
".",
"_methods",
"[",
"method",
"]",
"[",
"'fct'",
"]",
"if",
"isinstance",
"(",
"req",
"[",
"'params'",
"]",
",",
"dict",
")",
":",
"req",
"[",
"'params'",
"]",
"[",
"'session'",
"]",
"=",
"session",
"res",
"[",
"'result'",
"]",
"=",
"fct",
"(",
"*",
"*",
"req",
"[",
"'params'",
"]",
")",
"else",
":",
"res",
"[",
"'result'",
"]",
"=",
"fct",
"(",
"session",
",",
"req",
"[",
"'params'",
"]",
")",
"else",
":",
"res",
"[",
"'result'",
"]",
"=",
"self",
".",
"_methods",
"[",
"method",
"]",
"[",
"'fct'",
"]",
"(",
"session",
")",
"except",
"SLOJSONRPCError",
"as",
"e",
":",
"res",
"=",
"e",
".",
"to_json",
"(",
"req",
".",
"get",
"(",
"'id'",
",",
"None",
")",
")",
"except",
":",
"logging",
".",
"debug",
"(",
"'Uncaught Exception:'",
")",
"logging",
".",
"debug",
"(",
"'-------------------\\n'",
"+",
"traceback",
".",
"format_exc",
"(",
")",
")",
"res",
"=",
"SLOJSONRPCError",
"(",
"-",
"32603",
")",
".",
"to_json",
"(",
"req",
".",
"get",
"(",
"'id'",
",",
"None",
")",
")",
"session",
".",
"close",
"(",
")",
"logging",
".",
"debug",
"(",
"'--------------------RESULT'",
"+",
"'--------------------\\n'",
"+",
"json",
".",
"dumps",
"(",
"res",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
")",
"logging",
".",
"debug",
"(",
"'----------------------------------------------'",
")",
"#return None if a notification",
"if",
"notification",
":",
"return",
"None",
"elif",
"not",
"'error'",
"in",
"res",
":",
"res",
"[",
"'id'",
"]",
"=",
"req",
"[",
"'id'",
"]",
"return",
"res"
] |
handle a jsonrpc request
req - request as jsonrpc-dict
validate - validate the request? (default: True)
returns jsonrpc-dict with result or error
|
[
"handle",
"a",
"jsonrpc",
"request"
] |
33b5d79486e1562fe4fc7b73b8f3014535168b03
|
https://github.com/Noneus/slojsonrpc/blob/33b5d79486e1562fe4fc7b73b8f3014535168b03/slojsonrpc/__init__.py#L261-L334
|
241,788
|
Noneus/slojsonrpc
|
slojsonrpc/__init__.py
|
SLOJSONRPC.handle_string
|
def handle_string(self, strreq):
'''
Handle a string representing a jsonrpc-request
strreq - jsonrpc-request as a string
returns jsonrpc-response as a string
'''
#convert to jsonrpc-dict
req = None
try:
req = json.loads(strreq)
except:
logging.debug('JSONRPC: Format Exception:')
logging.debug('-----------------\n' + traceback.format_exc())
return json.dumps(SLOJSONRPCError(-32700).to_json())
#handle single request
if isinstance(req, dict):
return json.dumps(self.handle_request(req))
#handle multiple requests
elif isinstance(req, list):
for r in req:
if not isinstance(r, dict):
logging.debug('JSONRPC: Fmt Error: Item ' +
'"%s" in request is no dictionary.' % str(r))
return json.dumps(SLOJSONRPCError(-32700).to_json())
try:
self._validate_format(r)
self._validate_params(r)
except SLOJSONRPCError as e:
return json.dumps(e.to_json(r.get('id', None)))
res = []
for r in req:
res.append(self.handle_request(r, validate=False))
return json.dumps(res)
#invalid request
else:
return json.dumps(SLOJSONRPCError(-32700).to_json())
|
python
|
def handle_string(self, strreq):
'''
Handle a string representing a jsonrpc-request
strreq - jsonrpc-request as a string
returns jsonrpc-response as a string
'''
#convert to jsonrpc-dict
req = None
try:
req = json.loads(strreq)
except:
logging.debug('JSONRPC: Format Exception:')
logging.debug('-----------------\n' + traceback.format_exc())
return json.dumps(SLOJSONRPCError(-32700).to_json())
#handle single request
if isinstance(req, dict):
return json.dumps(self.handle_request(req))
#handle multiple requests
elif isinstance(req, list):
for r in req:
if not isinstance(r, dict):
logging.debug('JSONRPC: Fmt Error: Item ' +
'"%s" in request is no dictionary.' % str(r))
return json.dumps(SLOJSONRPCError(-32700).to_json())
try:
self._validate_format(r)
self._validate_params(r)
except SLOJSONRPCError as e:
return json.dumps(e.to_json(r.get('id', None)))
res = []
for r in req:
res.append(self.handle_request(r, validate=False))
return json.dumps(res)
#invalid request
else:
return json.dumps(SLOJSONRPCError(-32700).to_json())
|
[
"def",
"handle_string",
"(",
"self",
",",
"strreq",
")",
":",
"#convert to jsonrpc-dict",
"req",
"=",
"None",
"try",
":",
"req",
"=",
"json",
".",
"loads",
"(",
"strreq",
")",
"except",
":",
"logging",
".",
"debug",
"(",
"'JSONRPC: Format Exception:'",
")",
"logging",
".",
"debug",
"(",
"'-----------------\\n'",
"+",
"traceback",
".",
"format_exc",
"(",
")",
")",
"return",
"json",
".",
"dumps",
"(",
"SLOJSONRPCError",
"(",
"-",
"32700",
")",
".",
"to_json",
"(",
")",
")",
"#handle single request",
"if",
"isinstance",
"(",
"req",
",",
"dict",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"self",
".",
"handle_request",
"(",
"req",
")",
")",
"#handle multiple requests",
"elif",
"isinstance",
"(",
"req",
",",
"list",
")",
":",
"for",
"r",
"in",
"req",
":",
"if",
"not",
"isinstance",
"(",
"r",
",",
"dict",
")",
":",
"logging",
".",
"debug",
"(",
"'JSONRPC: Fmt Error: Item '",
"+",
"'\"%s\" in request is no dictionary.'",
"%",
"str",
"(",
"r",
")",
")",
"return",
"json",
".",
"dumps",
"(",
"SLOJSONRPCError",
"(",
"-",
"32700",
")",
".",
"to_json",
"(",
")",
")",
"try",
":",
"self",
".",
"_validate_format",
"(",
"r",
")",
"self",
".",
"_validate_params",
"(",
"r",
")",
"except",
"SLOJSONRPCError",
"as",
"e",
":",
"return",
"json",
".",
"dumps",
"(",
"e",
".",
"to_json",
"(",
"r",
".",
"get",
"(",
"'id'",
",",
"None",
")",
")",
")",
"res",
"=",
"[",
"]",
"for",
"r",
"in",
"req",
":",
"res",
".",
"append",
"(",
"self",
".",
"handle_request",
"(",
"r",
",",
"validate",
"=",
"False",
")",
")",
"return",
"json",
".",
"dumps",
"(",
"res",
")",
"#invalid request",
"else",
":",
"return",
"json",
".",
"dumps",
"(",
"SLOJSONRPCError",
"(",
"-",
"32700",
")",
".",
"to_json",
"(",
")",
")"
] |
Handle a string representing a jsonrpc-request
strreq - jsonrpc-request as a string
returns jsonrpc-response as a string
|
[
"Handle",
"a",
"string",
"representing",
"a",
"jsonrpc",
"-",
"request"
] |
33b5d79486e1562fe4fc7b73b8f3014535168b03
|
https://github.com/Noneus/slojsonrpc/blob/33b5d79486e1562fe4fc7b73b8f3014535168b03/slojsonrpc/__init__.py#L336-L376
|
241,789
|
sunlightlabs/django-locksmith
|
locksmith/hub/views.py
|
report_calls
|
def report_calls(request):
'''
POST endpoint for APIs to report their statistics
requires parameters: api, key, calls, date, endpoint & signature
if 'api' or 'key' parameter is invalid returns a 404
if signature is bad returns a 400
returns a 200 with content 'OK' if call succeeds
'''
api_obj = get_object_or_404(Api, name=request.POST['api'])
# check the signature
if get_signature(request.POST, api_obj.signing_key) != request.POST['signature']:
return HttpResponseBadRequest('bad signature')
key_obj = get_object_or_404(Key, key=request.POST['key'])
calls = int(request.POST['calls'])
try:
# use get_or_create to update unique #calls for (date,api,key,endpoint)
report,c = Report.objects.get_or_create(date=request.POST['date'],
api=api_obj,
key=key_obj,
endpoint=request.POST['endpoint'],
defaults={'calls':calls})
if not c:
report.calls = calls
report.save()
except Exception:
raise
return HttpResponse('OK')
|
python
|
def report_calls(request):
'''
POST endpoint for APIs to report their statistics
requires parameters: api, key, calls, date, endpoint & signature
if 'api' or 'key' parameter is invalid returns a 404
if signature is bad returns a 400
returns a 200 with content 'OK' if call succeeds
'''
api_obj = get_object_or_404(Api, name=request.POST['api'])
# check the signature
if get_signature(request.POST, api_obj.signing_key) != request.POST['signature']:
return HttpResponseBadRequest('bad signature')
key_obj = get_object_or_404(Key, key=request.POST['key'])
calls = int(request.POST['calls'])
try:
# use get_or_create to update unique #calls for (date,api,key,endpoint)
report,c = Report.objects.get_or_create(date=request.POST['date'],
api=api_obj,
key=key_obj,
endpoint=request.POST['endpoint'],
defaults={'calls':calls})
if not c:
report.calls = calls
report.save()
except Exception:
raise
return HttpResponse('OK')
|
[
"def",
"report_calls",
"(",
"request",
")",
":",
"api_obj",
"=",
"get_object_or_404",
"(",
"Api",
",",
"name",
"=",
"request",
".",
"POST",
"[",
"'api'",
"]",
")",
"# check the signature",
"if",
"get_signature",
"(",
"request",
".",
"POST",
",",
"api_obj",
".",
"signing_key",
")",
"!=",
"request",
".",
"POST",
"[",
"'signature'",
"]",
":",
"return",
"HttpResponseBadRequest",
"(",
"'bad signature'",
")",
"key_obj",
"=",
"get_object_or_404",
"(",
"Key",
",",
"key",
"=",
"request",
".",
"POST",
"[",
"'key'",
"]",
")",
"calls",
"=",
"int",
"(",
"request",
".",
"POST",
"[",
"'calls'",
"]",
")",
"try",
":",
"# use get_or_create to update unique #calls for (date,api,key,endpoint)",
"report",
",",
"c",
"=",
"Report",
".",
"objects",
".",
"get_or_create",
"(",
"date",
"=",
"request",
".",
"POST",
"[",
"'date'",
"]",
",",
"api",
"=",
"api_obj",
",",
"key",
"=",
"key_obj",
",",
"endpoint",
"=",
"request",
".",
"POST",
"[",
"'endpoint'",
"]",
",",
"defaults",
"=",
"{",
"'calls'",
":",
"calls",
"}",
")",
"if",
"not",
"c",
":",
"report",
".",
"calls",
"=",
"calls",
"report",
".",
"save",
"(",
")",
"except",
"Exception",
":",
"raise",
"return",
"HttpResponse",
"(",
"'OK'",
")"
] |
POST endpoint for APIs to report their statistics
requires parameters: api, key, calls, date, endpoint & signature
if 'api' or 'key' parameter is invalid returns a 404
if signature is bad returns a 400
returns a 200 with content 'OK' if call succeeds
|
[
"POST",
"endpoint",
"for",
"APIs",
"to",
"report",
"their",
"statistics"
] |
eef5b7c25404560aaad50b6e622594f89239b74b
|
https://github.com/sunlightlabs/django-locksmith/blob/eef5b7c25404560aaad50b6e622594f89239b74b/locksmith/hub/views.py#L23-L55
|
241,790
|
sunlightlabs/django-locksmith
|
locksmith/hub/views.py
|
check_key
|
def check_key(request):
'''
POST endpoint determining whether or not a key exists and is valid
'''
api_objs = list(Api.objects.filter(name=request.POST['api']))
if not api_objs:
return HttpResponseBadRequest('Must specify valid API')
# check the signature
if get_signature(request.POST, api_objs[0].signing_key) != request.POST['signature']:
return HttpResponseBadRequest('bad signature')
get_object_or_404(Key, key=request.POST['key'], status='A')
return HttpResponse('OK')
|
python
|
def check_key(request):
'''
POST endpoint determining whether or not a key exists and is valid
'''
api_objs = list(Api.objects.filter(name=request.POST['api']))
if not api_objs:
return HttpResponseBadRequest('Must specify valid API')
# check the signature
if get_signature(request.POST, api_objs[0].signing_key) != request.POST['signature']:
return HttpResponseBadRequest('bad signature')
get_object_or_404(Key, key=request.POST['key'], status='A')
return HttpResponse('OK')
|
[
"def",
"check_key",
"(",
"request",
")",
":",
"api_objs",
"=",
"list",
"(",
"Api",
".",
"objects",
".",
"filter",
"(",
"name",
"=",
"request",
".",
"POST",
"[",
"'api'",
"]",
")",
")",
"if",
"not",
"api_objs",
":",
"return",
"HttpResponseBadRequest",
"(",
"'Must specify valid API'",
")",
"# check the signature",
"if",
"get_signature",
"(",
"request",
".",
"POST",
",",
"api_objs",
"[",
"0",
"]",
".",
"signing_key",
")",
"!=",
"request",
".",
"POST",
"[",
"'signature'",
"]",
":",
"return",
"HttpResponseBadRequest",
"(",
"'bad signature'",
")",
"get_object_or_404",
"(",
"Key",
",",
"key",
"=",
"request",
".",
"POST",
"[",
"'key'",
"]",
",",
"status",
"=",
"'A'",
")",
"return",
"HttpResponse",
"(",
"'OK'",
")"
] |
POST endpoint determining whether or not a key exists and is valid
|
[
"POST",
"endpoint",
"determining",
"whether",
"or",
"not",
"a",
"key",
"exists",
"and",
"is",
"valid"
] |
eef5b7c25404560aaad50b6e622594f89239b74b
|
https://github.com/sunlightlabs/django-locksmith/blob/eef5b7c25404560aaad50b6e622594f89239b74b/locksmith/hub/views.py#L81-L95
|
241,791
|
sunlightlabs/django-locksmith
|
locksmith/hub/views.py
|
register
|
def register(request,
email_template='locksmith/registration_email.txt',
registration_template=getattr(settings, 'LOCKSMITH_REGISTER_TEMPLATE', 'locksmith/register.html'),
registered_template=getattr(settings, 'LOCKSMITH_REGISTERED_TEMPLATE', 'locksmith/registered.html'),
):
'''
API registration view
displays/validates form and sends email on successful submission
'''
if request.method == 'POST':
form = KeyForm(request.POST)
if form.is_valid():
newkey = form.save(commit=False)
newkey.key = uuid.uuid4().hex
newkey.status = 'U'
newkey.save()
send_key_email(newkey, email_template)
return render_to_response(registered_template, {'key': newkey, 'LOCKSMITH_BASE_TEMPLATE': settings.LOCKSMITH_BASE_TEMPLATE }, context_instance=RequestContext(request))
else:
form = KeyForm()
return render_to_response(registration_template, {'form':form, 'LOCKSMITH_BASE_TEMPLATE': settings.LOCKSMITH_BASE_TEMPLATE}, context_instance=RequestContext(request))
|
python
|
def register(request,
email_template='locksmith/registration_email.txt',
registration_template=getattr(settings, 'LOCKSMITH_REGISTER_TEMPLATE', 'locksmith/register.html'),
registered_template=getattr(settings, 'LOCKSMITH_REGISTERED_TEMPLATE', 'locksmith/registered.html'),
):
'''
API registration view
displays/validates form and sends email on successful submission
'''
if request.method == 'POST':
form = KeyForm(request.POST)
if form.is_valid():
newkey = form.save(commit=False)
newkey.key = uuid.uuid4().hex
newkey.status = 'U'
newkey.save()
send_key_email(newkey, email_template)
return render_to_response(registered_template, {'key': newkey, 'LOCKSMITH_BASE_TEMPLATE': settings.LOCKSMITH_BASE_TEMPLATE }, context_instance=RequestContext(request))
else:
form = KeyForm()
return render_to_response(registration_template, {'form':form, 'LOCKSMITH_BASE_TEMPLATE': settings.LOCKSMITH_BASE_TEMPLATE}, context_instance=RequestContext(request))
|
[
"def",
"register",
"(",
"request",
",",
"email_template",
"=",
"'locksmith/registration_email.txt'",
",",
"registration_template",
"=",
"getattr",
"(",
"settings",
",",
"'LOCKSMITH_REGISTER_TEMPLATE'",
",",
"'locksmith/register.html'",
")",
",",
"registered_template",
"=",
"getattr",
"(",
"settings",
",",
"'LOCKSMITH_REGISTERED_TEMPLATE'",
",",
"'locksmith/registered.html'",
")",
",",
")",
":",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"form",
"=",
"KeyForm",
"(",
"request",
".",
"POST",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"newkey",
"=",
"form",
".",
"save",
"(",
"commit",
"=",
"False",
")",
"newkey",
".",
"key",
"=",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
"newkey",
".",
"status",
"=",
"'U'",
"newkey",
".",
"save",
"(",
")",
"send_key_email",
"(",
"newkey",
",",
"email_template",
")",
"return",
"render_to_response",
"(",
"registered_template",
",",
"{",
"'key'",
":",
"newkey",
",",
"'LOCKSMITH_BASE_TEMPLATE'",
":",
"settings",
".",
"LOCKSMITH_BASE_TEMPLATE",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")",
"else",
":",
"form",
"=",
"KeyForm",
"(",
")",
"return",
"render_to_response",
"(",
"registration_template",
",",
"{",
"'form'",
":",
"form",
",",
"'LOCKSMITH_BASE_TEMPLATE'",
":",
"settings",
".",
"LOCKSMITH_BASE_TEMPLATE",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")"
] |
API registration view
displays/validates form and sends email on successful submission
|
[
"API",
"registration",
"view"
] |
eef5b7c25404560aaad50b6e622594f89239b74b
|
https://github.com/sunlightlabs/django-locksmith/blob/eef5b7c25404560aaad50b6e622594f89239b74b/locksmith/hub/views.py#L98-L120
|
241,792
|
sunlightlabs/django-locksmith
|
locksmith/hub/views.py
|
confirm_registration
|
def confirm_registration(request, key, template="locksmith/confirmed.html"):
'''
API key confirmation
visiting this URL marks a Key as ready for use
'''
context = {'LOCKSMITH_BASE_TEMPLATE': settings.LOCKSMITH_BASE_TEMPLATE}
try:
context['key'] = key_obj = Key.objects.get(key=key)
if key_obj.status != 'U':
context['error'] = 'Key Already Activated'
else:
key_obj.status = 'A'
key_obj.save()
key_obj.mark_for_update()
except Key.DoesNotExist:
context['error'] = 'Invalid Key'
return render_to_response(template, context,
context_instance=RequestContext(request))
|
python
|
def confirm_registration(request, key, template="locksmith/confirmed.html"):
'''
API key confirmation
visiting this URL marks a Key as ready for use
'''
context = {'LOCKSMITH_BASE_TEMPLATE': settings.LOCKSMITH_BASE_TEMPLATE}
try:
context['key'] = key_obj = Key.objects.get(key=key)
if key_obj.status != 'U':
context['error'] = 'Key Already Activated'
else:
key_obj.status = 'A'
key_obj.save()
key_obj.mark_for_update()
except Key.DoesNotExist:
context['error'] = 'Invalid Key'
return render_to_response(template, context,
context_instance=RequestContext(request))
|
[
"def",
"confirm_registration",
"(",
"request",
",",
"key",
",",
"template",
"=",
"\"locksmith/confirmed.html\"",
")",
":",
"context",
"=",
"{",
"'LOCKSMITH_BASE_TEMPLATE'",
":",
"settings",
".",
"LOCKSMITH_BASE_TEMPLATE",
"}",
"try",
":",
"context",
"[",
"'key'",
"]",
"=",
"key_obj",
"=",
"Key",
".",
"objects",
".",
"get",
"(",
"key",
"=",
"key",
")",
"if",
"key_obj",
".",
"status",
"!=",
"'U'",
":",
"context",
"[",
"'error'",
"]",
"=",
"'Key Already Activated'",
"else",
":",
"key_obj",
".",
"status",
"=",
"'A'",
"key_obj",
".",
"save",
"(",
")",
"key_obj",
".",
"mark_for_update",
"(",
")",
"except",
"Key",
".",
"DoesNotExist",
":",
"context",
"[",
"'error'",
"]",
"=",
"'Invalid Key'",
"return",
"render_to_response",
"(",
"template",
",",
"context",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")"
] |
API key confirmation
visiting this URL marks a Key as ready for use
|
[
"API",
"key",
"confirmation"
] |
eef5b7c25404560aaad50b6e622594f89239b74b
|
https://github.com/sunlightlabs/django-locksmith/blob/eef5b7c25404560aaad50b6e622594f89239b74b/locksmith/hub/views.py#L158-L176
|
241,793
|
sunlightlabs/django-locksmith
|
locksmith/hub/views.py
|
profile
|
def profile(request):
'''
Viewing of signup details and editing of password
'''
context = {}
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
form.save()
messages.info(request, 'Password Changed.')
else:
form = PasswordChangeForm(request.user)
key = Key.objects.get(email=request.user.email)
#analytics
endpoint_q = key.reports.values('api__name', 'endpoint').annotate(calls=Sum('calls')).order_by('-calls')
endpoints = [{'endpoint':'.'.join((d['api__name'], d['endpoint'])),
'calls': d['calls']} for d in endpoint_q]
date_q = key.reports.values('date').annotate(calls=Sum('calls')).order_by('date')
context['endpoints'], context['endpoint_calls'] = _dictlist_to_lists(endpoints, 'endpoint', 'calls')
context['timeline'] = date_q
context['form'] = form
context['key'] = key
context['password_is_key'] = request.user.check_password(key.key)
return render_to_response('locksmith/profile.html', context,
context_instance=RequestContext(request))
|
python
|
def profile(request):
'''
Viewing of signup details and editing of password
'''
context = {}
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
form.save()
messages.info(request, 'Password Changed.')
else:
form = PasswordChangeForm(request.user)
key = Key.objects.get(email=request.user.email)
#analytics
endpoint_q = key.reports.values('api__name', 'endpoint').annotate(calls=Sum('calls')).order_by('-calls')
endpoints = [{'endpoint':'.'.join((d['api__name'], d['endpoint'])),
'calls': d['calls']} for d in endpoint_q]
date_q = key.reports.values('date').annotate(calls=Sum('calls')).order_by('date')
context['endpoints'], context['endpoint_calls'] = _dictlist_to_lists(endpoints, 'endpoint', 'calls')
context['timeline'] = date_q
context['form'] = form
context['key'] = key
context['password_is_key'] = request.user.check_password(key.key)
return render_to_response('locksmith/profile.html', context,
context_instance=RequestContext(request))
|
[
"def",
"profile",
"(",
"request",
")",
":",
"context",
"=",
"{",
"}",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"form",
"=",
"PasswordChangeForm",
"(",
"request",
".",
"user",
",",
"request",
".",
"POST",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"form",
".",
"save",
"(",
")",
"messages",
".",
"info",
"(",
"request",
",",
"'Password Changed.'",
")",
"else",
":",
"form",
"=",
"PasswordChangeForm",
"(",
"request",
".",
"user",
")",
"key",
"=",
"Key",
".",
"objects",
".",
"get",
"(",
"email",
"=",
"request",
".",
"user",
".",
"email",
")",
"#analytics",
"endpoint_q",
"=",
"key",
".",
"reports",
".",
"values",
"(",
"'api__name'",
",",
"'endpoint'",
")",
".",
"annotate",
"(",
"calls",
"=",
"Sum",
"(",
"'calls'",
")",
")",
".",
"order_by",
"(",
"'-calls'",
")",
"endpoints",
"=",
"[",
"{",
"'endpoint'",
":",
"'.'",
".",
"join",
"(",
"(",
"d",
"[",
"'api__name'",
"]",
",",
"d",
"[",
"'endpoint'",
"]",
")",
")",
",",
"'calls'",
":",
"d",
"[",
"'calls'",
"]",
"}",
"for",
"d",
"in",
"endpoint_q",
"]",
"date_q",
"=",
"key",
".",
"reports",
".",
"values",
"(",
"'date'",
")",
".",
"annotate",
"(",
"calls",
"=",
"Sum",
"(",
"'calls'",
")",
")",
".",
"order_by",
"(",
"'date'",
")",
"context",
"[",
"'endpoints'",
"]",
",",
"context",
"[",
"'endpoint_calls'",
"]",
"=",
"_dictlist_to_lists",
"(",
"endpoints",
",",
"'endpoint'",
",",
"'calls'",
")",
"context",
"[",
"'timeline'",
"]",
"=",
"date_q",
"context",
"[",
"'form'",
"]",
"=",
"form",
"context",
"[",
"'key'",
"]",
"=",
"key",
"context",
"[",
"'password_is_key'",
"]",
"=",
"request",
".",
"user",
".",
"check_password",
"(",
"key",
".",
"key",
")",
"return",
"render_to_response",
"(",
"'locksmith/profile.html'",
",",
"context",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")"
] |
Viewing of signup details and editing of password
|
[
"Viewing",
"of",
"signup",
"details",
"and",
"editing",
"of",
"password"
] |
eef5b7c25404560aaad50b6e622594f89239b74b
|
https://github.com/sunlightlabs/django-locksmith/blob/eef5b7c25404560aaad50b6e622594f89239b74b/locksmith/hub/views.py#L180-L208
|
241,794
|
sunlightlabs/django-locksmith
|
locksmith/hub/views.py
|
_dictlist_to_lists
|
def _dictlist_to_lists(dl, *keys):
''' convert a list of dictionaries to a dictionary of lists
>>> dl = [{'a': 'test', 'b': 3}, {'a': 'zaz', 'b': 444},
{'a': 'wow', 'b': 300}]
>>> _dictlist_to_lists(dl)
(['test', 'zaz', 'wow'], [3, 444, 300])
'''
lists = []
for k in keys:
lists.append([])
for item in dl:
for i, key in enumerate(keys):
x = item[key]
if isinstance(x, unicode):
x = str(x)
lists[i].append(x)
return lists
|
python
|
def _dictlist_to_lists(dl, *keys):
''' convert a list of dictionaries to a dictionary of lists
>>> dl = [{'a': 'test', 'b': 3}, {'a': 'zaz', 'b': 444},
{'a': 'wow', 'b': 300}]
>>> _dictlist_to_lists(dl)
(['test', 'zaz', 'wow'], [3, 444, 300])
'''
lists = []
for k in keys:
lists.append([])
for item in dl:
for i, key in enumerate(keys):
x = item[key]
if isinstance(x, unicode):
x = str(x)
lists[i].append(x)
return lists
|
[
"def",
"_dictlist_to_lists",
"(",
"dl",
",",
"*",
"keys",
")",
":",
"lists",
"=",
"[",
"]",
"for",
"k",
"in",
"keys",
":",
"lists",
".",
"append",
"(",
"[",
"]",
")",
"for",
"item",
"in",
"dl",
":",
"for",
"i",
",",
"key",
"in",
"enumerate",
"(",
"keys",
")",
":",
"x",
"=",
"item",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"x",
",",
"unicode",
")",
":",
"x",
"=",
"str",
"(",
"x",
")",
"lists",
"[",
"i",
"]",
".",
"append",
"(",
"x",
")",
"return",
"lists"
] |
convert a list of dictionaries to a dictionary of lists
>>> dl = [{'a': 'test', 'b': 3}, {'a': 'zaz', 'b': 444},
{'a': 'wow', 'b': 300}]
>>> _dictlist_to_lists(dl)
(['test', 'zaz', 'wow'], [3, 444, 300])
|
[
"convert",
"a",
"list",
"of",
"dictionaries",
"to",
"a",
"dictionary",
"of",
"lists"
] |
eef5b7c25404560aaad50b6e622594f89239b74b
|
https://github.com/sunlightlabs/django-locksmith/blob/eef5b7c25404560aaad50b6e622594f89239b74b/locksmith/hub/views.py#L212-L229
|
241,795
|
sunlightlabs/django-locksmith
|
locksmith/hub/views.py
|
_cumulative_by_date
|
def _cumulative_by_date(model, datefield):
'''
Given a model and date field, generate monthly cumulative totals.
'''
monthly_counts = defaultdict(int)
for obj in model.objects.all().order_by(datefield):
datevalue = getattr(obj, datefield)
monthkey = (datevalue.year, datevalue.month)
monthly_counts[monthkey] += 1
if len(monthly_counts) == 0:
return []
earliest_month = min(monthly_counts.iterkeys())
latest_month = max(monthly_counts.iterkeys())
accumulator = 0
cumulative_counts = []
for (year, month) in cycle_generator(cycle=(1, 12), begin=earliest_month, end=latest_month):
mcount = monthly_counts.get((year, month), 0)
accumulator += mcount
cumulative_counts.append([datetime.date(year, month, 1), accumulator])
return cumulative_counts
|
python
|
def _cumulative_by_date(model, datefield):
'''
Given a model and date field, generate monthly cumulative totals.
'''
monthly_counts = defaultdict(int)
for obj in model.objects.all().order_by(datefield):
datevalue = getattr(obj, datefield)
monthkey = (datevalue.year, datevalue.month)
monthly_counts[monthkey] += 1
if len(monthly_counts) == 0:
return []
earliest_month = min(monthly_counts.iterkeys())
latest_month = max(monthly_counts.iterkeys())
accumulator = 0
cumulative_counts = []
for (year, month) in cycle_generator(cycle=(1, 12), begin=earliest_month, end=latest_month):
mcount = monthly_counts.get((year, month), 0)
accumulator += mcount
cumulative_counts.append([datetime.date(year, month, 1), accumulator])
return cumulative_counts
|
[
"def",
"_cumulative_by_date",
"(",
"model",
",",
"datefield",
")",
":",
"monthly_counts",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"obj",
"in",
"model",
".",
"objects",
".",
"all",
"(",
")",
".",
"order_by",
"(",
"datefield",
")",
":",
"datevalue",
"=",
"getattr",
"(",
"obj",
",",
"datefield",
")",
"monthkey",
"=",
"(",
"datevalue",
".",
"year",
",",
"datevalue",
".",
"month",
")",
"monthly_counts",
"[",
"monthkey",
"]",
"+=",
"1",
"if",
"len",
"(",
"monthly_counts",
")",
"==",
"0",
":",
"return",
"[",
"]",
"earliest_month",
"=",
"min",
"(",
"monthly_counts",
".",
"iterkeys",
"(",
")",
")",
"latest_month",
"=",
"max",
"(",
"monthly_counts",
".",
"iterkeys",
"(",
")",
")",
"accumulator",
"=",
"0",
"cumulative_counts",
"=",
"[",
"]",
"for",
"(",
"year",
",",
"month",
")",
"in",
"cycle_generator",
"(",
"cycle",
"=",
"(",
"1",
",",
"12",
")",
",",
"begin",
"=",
"earliest_month",
",",
"end",
"=",
"latest_month",
")",
":",
"mcount",
"=",
"monthly_counts",
".",
"get",
"(",
"(",
"year",
",",
"month",
")",
",",
"0",
")",
"accumulator",
"+=",
"mcount",
"cumulative_counts",
".",
"append",
"(",
"[",
"datetime",
".",
"date",
"(",
"year",
",",
"month",
",",
"1",
")",
",",
"accumulator",
"]",
")",
"return",
"cumulative_counts"
] |
Given a model and date field, generate monthly cumulative totals.
|
[
"Given",
"a",
"model",
"and",
"date",
"field",
"generate",
"monthly",
"cumulative",
"totals",
"."
] |
eef5b7c25404560aaad50b6e622594f89239b74b
|
https://github.com/sunlightlabs/django-locksmith/blob/eef5b7c25404560aaad50b6e622594f89239b74b/locksmith/hub/views.py#L231-L254
|
241,796
|
luismasuelli/python-cantrips
|
cantrips/types/exception.py
|
factory
|
def factory(codes, base=_Exception):
"""
Creates a custom exception class with arbitrary error codes and arguments.
"""
if not issubclass(base, _Exception):
raise FactoryException("Invalid class passed as parent: Must be a subclass of an Exception class created with this function",
FactoryException.INVALID_EXCEPTION_CLASS, intended_parent=base)
class Error(base):
pass
if isinstance(codes, (list, set, tuple, frozenset)):
codes = {e: e for e in codes}
if not isinstance(codes, dict):
raise FactoryException("Factory codes must be a dict str -> object",
FactoryException.INVALID_CODES_LIST, intended_codes=codes)
for code, value in codes.items():
try:
setattr(Error, code, value)
except TypeError:
raise FactoryException("Cannot set class attribute: (%r) -> (%r)" % (code, value),
FactoryException.INVALID_CODE_VALUE, attribute=code, value=value)
return Error
|
python
|
def factory(codes, base=_Exception):
"""
Creates a custom exception class with arbitrary error codes and arguments.
"""
if not issubclass(base, _Exception):
raise FactoryException("Invalid class passed as parent: Must be a subclass of an Exception class created with this function",
FactoryException.INVALID_EXCEPTION_CLASS, intended_parent=base)
class Error(base):
pass
if isinstance(codes, (list, set, tuple, frozenset)):
codes = {e: e for e in codes}
if not isinstance(codes, dict):
raise FactoryException("Factory codes must be a dict str -> object",
FactoryException.INVALID_CODES_LIST, intended_codes=codes)
for code, value in codes.items():
try:
setattr(Error, code, value)
except TypeError:
raise FactoryException("Cannot set class attribute: (%r) -> (%r)" % (code, value),
FactoryException.INVALID_CODE_VALUE, attribute=code, value=value)
return Error
|
[
"def",
"factory",
"(",
"codes",
",",
"base",
"=",
"_Exception",
")",
":",
"if",
"not",
"issubclass",
"(",
"base",
",",
"_Exception",
")",
":",
"raise",
"FactoryException",
"(",
"\"Invalid class passed as parent: Must be a subclass of an Exception class created with this function\"",
",",
"FactoryException",
".",
"INVALID_EXCEPTION_CLASS",
",",
"intended_parent",
"=",
"base",
")",
"class",
"Error",
"(",
"base",
")",
":",
"pass",
"if",
"isinstance",
"(",
"codes",
",",
"(",
"list",
",",
"set",
",",
"tuple",
",",
"frozenset",
")",
")",
":",
"codes",
"=",
"{",
"e",
":",
"e",
"for",
"e",
"in",
"codes",
"}",
"if",
"not",
"isinstance",
"(",
"codes",
",",
"dict",
")",
":",
"raise",
"FactoryException",
"(",
"\"Factory codes must be a dict str -> object\"",
",",
"FactoryException",
".",
"INVALID_CODES_LIST",
",",
"intended_codes",
"=",
"codes",
")",
"for",
"code",
",",
"value",
"in",
"codes",
".",
"items",
"(",
")",
":",
"try",
":",
"setattr",
"(",
"Error",
",",
"code",
",",
"value",
")",
"except",
"TypeError",
":",
"raise",
"FactoryException",
"(",
"\"Cannot set class attribute: (%r) -> (%r)\"",
"%",
"(",
"code",
",",
"value",
")",
",",
"FactoryException",
".",
"INVALID_CODE_VALUE",
",",
"attribute",
"=",
"code",
",",
"value",
"=",
"value",
")",
"return",
"Error"
] |
Creates a custom exception class with arbitrary error codes and arguments.
|
[
"Creates",
"a",
"custom",
"exception",
"class",
"with",
"arbitrary",
"error",
"codes",
"and",
"arguments",
"."
] |
dba2742c1d1a60863bb65f4a291464f6e68eb2ee
|
https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/types/exception.py#L11-L37
|
241,797
|
hactar-is/frink
|
frink/orm.py
|
InstanceLayerMixin.save
|
def save(self):
"""
Save the current instance to the DB
"""
with rconnect() as conn:
try:
self.validate()
except ValidationError as e:
log.warn(e.messages)
raise
except ModelValidationError as e:
log.warn(e.messages)
raise
except ModelConversionError as e:
log.warn(e.messages)
raise
except ValueError as e:
log.warn(e)
raise
except FrinkError as e:
log.warn(e.messages)
raise
except Exception as e:
log.warn(e)
raise
else:
# If this is a new unsaved object, it'll likely have an
# id of None, which RethinkDB won't like. So if it's None,
# generate a UUID for it. If the save fails, we should re-set
# it to None.
if self.id is None:
self.id = str(uuid.uuid4())
log.debug(self.id)
try:
query = r.db(self._db).table(self._table).insert(
self.to_primitive(),
conflict="replace"
)
log.debug(query)
rv = query.run(conn)
# Returns something like this:
# {
# u'errors': 0,
# u'deleted': 0,
# u'generated_keys': [u'dd8ad1bc-8609-4484-b6c4-ed96c72c03f2'],
# u'unchanged': 0,
# u'skipped': 0,
# u'replaced': 0,
# u'inserted': 1
# }
log.debug(rv)
except Exception as e:
log.warn(e)
self.id = None
raise
else:
return self
|
python
|
def save(self):
"""
Save the current instance to the DB
"""
with rconnect() as conn:
try:
self.validate()
except ValidationError as e:
log.warn(e.messages)
raise
except ModelValidationError as e:
log.warn(e.messages)
raise
except ModelConversionError as e:
log.warn(e.messages)
raise
except ValueError as e:
log.warn(e)
raise
except FrinkError as e:
log.warn(e.messages)
raise
except Exception as e:
log.warn(e)
raise
else:
# If this is a new unsaved object, it'll likely have an
# id of None, which RethinkDB won't like. So if it's None,
# generate a UUID for it. If the save fails, we should re-set
# it to None.
if self.id is None:
self.id = str(uuid.uuid4())
log.debug(self.id)
try:
query = r.db(self._db).table(self._table).insert(
self.to_primitive(),
conflict="replace"
)
log.debug(query)
rv = query.run(conn)
# Returns something like this:
# {
# u'errors': 0,
# u'deleted': 0,
# u'generated_keys': [u'dd8ad1bc-8609-4484-b6c4-ed96c72c03f2'],
# u'unchanged': 0,
# u'skipped': 0,
# u'replaced': 0,
# u'inserted': 1
# }
log.debug(rv)
except Exception as e:
log.warn(e)
self.id = None
raise
else:
return self
|
[
"def",
"save",
"(",
"self",
")",
":",
"with",
"rconnect",
"(",
")",
"as",
"conn",
":",
"try",
":",
"self",
".",
"validate",
"(",
")",
"except",
"ValidationError",
"as",
"e",
":",
"log",
".",
"warn",
"(",
"e",
".",
"messages",
")",
"raise",
"except",
"ModelValidationError",
"as",
"e",
":",
"log",
".",
"warn",
"(",
"e",
".",
"messages",
")",
"raise",
"except",
"ModelConversionError",
"as",
"e",
":",
"log",
".",
"warn",
"(",
"e",
".",
"messages",
")",
"raise",
"except",
"ValueError",
"as",
"e",
":",
"log",
".",
"warn",
"(",
"e",
")",
"raise",
"except",
"FrinkError",
"as",
"e",
":",
"log",
".",
"warn",
"(",
"e",
".",
"messages",
")",
"raise",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"warn",
"(",
"e",
")",
"raise",
"else",
":",
"# If this is a new unsaved object, it'll likely have an",
"# id of None, which RethinkDB won't like. So if it's None,",
"# generate a UUID for it. If the save fails, we should re-set",
"# it to None.",
"if",
"self",
".",
"id",
"is",
"None",
":",
"self",
".",
"id",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"log",
".",
"debug",
"(",
"self",
".",
"id",
")",
"try",
":",
"query",
"=",
"r",
".",
"db",
"(",
"self",
".",
"_db",
")",
".",
"table",
"(",
"self",
".",
"_table",
")",
".",
"insert",
"(",
"self",
".",
"to_primitive",
"(",
")",
",",
"conflict",
"=",
"\"replace\"",
")",
"log",
".",
"debug",
"(",
"query",
")",
"rv",
"=",
"query",
".",
"run",
"(",
"conn",
")",
"# Returns something like this:",
"# {",
"# u'errors': 0,",
"# u'deleted': 0,",
"# u'generated_keys': [u'dd8ad1bc-8609-4484-b6c4-ed96c72c03f2'],",
"# u'unchanged': 0,",
"# u'skipped': 0,",
"# u'replaced': 0,",
"# u'inserted': 1",
"# }",
"log",
".",
"debug",
"(",
"rv",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"warn",
"(",
"e",
")",
"self",
".",
"id",
"=",
"None",
"raise",
"else",
":",
"return",
"self"
] |
Save the current instance to the DB
|
[
"Save",
"the",
"current",
"instance",
"to",
"the",
"DB"
] |
0d2c11daca8ef6d4365e98914bdc0bc65478ae72
|
https://github.com/hactar-is/frink/blob/0d2c11daca8ef6d4365e98914bdc0bc65478ae72/frink/orm.py#L33-L90
|
241,798
|
hactar-is/frink
|
frink/orm.py
|
InstanceLayerMixin.delete
|
def delete(self):
"""
Delete the current instance from the DB.
"""
with rconnect() as conn:
# Can't delete an object without an ID.
if self.id is None:
raise FrinkError("You can't delete an object with no ID")
else:
if isinstance(self.id, uuid.UUID):
self.id = str(self.id)
try:
query = r.db(
self._db
).table(
self._table
).get(
self.id
).delete()
log.debug(query)
rv = query.run(conn)
except Exception as e:
log.warn(e)
raise
else:
return True
|
python
|
def delete(self):
"""
Delete the current instance from the DB.
"""
with rconnect() as conn:
# Can't delete an object without an ID.
if self.id is None:
raise FrinkError("You can't delete an object with no ID")
else:
if isinstance(self.id, uuid.UUID):
self.id = str(self.id)
try:
query = r.db(
self._db
).table(
self._table
).get(
self.id
).delete()
log.debug(query)
rv = query.run(conn)
except Exception as e:
log.warn(e)
raise
else:
return True
|
[
"def",
"delete",
"(",
"self",
")",
":",
"with",
"rconnect",
"(",
")",
"as",
"conn",
":",
"# Can't delete an object without an ID.",
"if",
"self",
".",
"id",
"is",
"None",
":",
"raise",
"FrinkError",
"(",
"\"You can't delete an object with no ID\"",
")",
"else",
":",
"if",
"isinstance",
"(",
"self",
".",
"id",
",",
"uuid",
".",
"UUID",
")",
":",
"self",
".",
"id",
"=",
"str",
"(",
"self",
".",
"id",
")",
"try",
":",
"query",
"=",
"r",
".",
"db",
"(",
"self",
".",
"_db",
")",
".",
"table",
"(",
"self",
".",
"_table",
")",
".",
"get",
"(",
"self",
".",
"id",
")",
".",
"delete",
"(",
")",
"log",
".",
"debug",
"(",
"query",
")",
"rv",
"=",
"query",
".",
"run",
"(",
"conn",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"warn",
"(",
"e",
")",
"raise",
"else",
":",
"return",
"True"
] |
Delete the current instance from the DB.
|
[
"Delete",
"the",
"current",
"instance",
"from",
"the",
"DB",
"."
] |
0d2c11daca8ef6d4365e98914bdc0bc65478ae72
|
https://github.com/hactar-is/frink/blob/0d2c11daca8ef6d4365e98914bdc0bc65478ae72/frink/orm.py#L92-L118
|
241,799
|
hactar-is/frink
|
frink/orm.py
|
ORMLayer.get
|
def get(self, id):
"""
Get a single instance by pk id.
:param id: The UUID of the instance you want to retrieve.
"""
with rconnect() as conn:
if id is None:
raise ValueError
if isinstance(id, uuid.UUID):
id = str(id)
if type(id) != str and type(id) != unicode:
raise ValueError
try:
query = self._base().get(id)
log.debug(query)
rv = query.run(conn)
except ReqlOpFailedError as e:
log.warn(e)
raise
except Exception as e:
log.warn(e)
raise
if rv is not None:
return self._model(rv)
return None
|
python
|
def get(self, id):
"""
Get a single instance by pk id.
:param id: The UUID of the instance you want to retrieve.
"""
with rconnect() as conn:
if id is None:
raise ValueError
if isinstance(id, uuid.UUID):
id = str(id)
if type(id) != str and type(id) != unicode:
raise ValueError
try:
query = self._base().get(id)
log.debug(query)
rv = query.run(conn)
except ReqlOpFailedError as e:
log.warn(e)
raise
except Exception as e:
log.warn(e)
raise
if rv is not None:
return self._model(rv)
return None
|
[
"def",
"get",
"(",
"self",
",",
"id",
")",
":",
"with",
"rconnect",
"(",
")",
"as",
"conn",
":",
"if",
"id",
"is",
"None",
":",
"raise",
"ValueError",
"if",
"isinstance",
"(",
"id",
",",
"uuid",
".",
"UUID",
")",
":",
"id",
"=",
"str",
"(",
"id",
")",
"if",
"type",
"(",
"id",
")",
"!=",
"str",
"and",
"type",
"(",
"id",
")",
"!=",
"unicode",
":",
"raise",
"ValueError",
"try",
":",
"query",
"=",
"self",
".",
"_base",
"(",
")",
".",
"get",
"(",
"id",
")",
"log",
".",
"debug",
"(",
"query",
")",
"rv",
"=",
"query",
".",
"run",
"(",
"conn",
")",
"except",
"ReqlOpFailedError",
"as",
"e",
":",
"log",
".",
"warn",
"(",
"e",
")",
"raise",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"warn",
"(",
"e",
")",
"raise",
"if",
"rv",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_model",
"(",
"rv",
")",
"return",
"None"
] |
Get a single instance by pk id.
:param id: The UUID of the instance you want to retrieve.
|
[
"Get",
"a",
"single",
"instance",
"by",
"pk",
"id",
"."
] |
0d2c11daca8ef6d4365e98914bdc0bc65478ae72
|
https://github.com/hactar-is/frink/blob/0d2c11daca8ef6d4365e98914bdc0bc65478ae72/frink/orm.py#L148-L178
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.