id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
242,000
|
scivision/histutils
|
histutils/__init__.py
|
imgwriteincr
|
def imgwriteincr(fn: Path, imgs, imgslice):
"""
writes HDF5 huge image files in increments
"""
if isinstance(imgslice, int):
if imgslice and not (imgslice % 2000):
print(f'appending images {imgslice} to {fn}')
if isinstance(fn, Path):
# avoid accidental overwriting of source file due to misspecified command line
assert fn.suffix == '.h5', 'Expecting to write .h5 file'
with h5py.File(fn, 'r+') as f:
f['/rawimg'][imgslice, :, :] = imgs
elif isinstance(fn, h5py.File):
f['/rawimg'][imgslice, :, :] = imgs
else:
raise TypeError(
f'{fn} must be Path or h5py.File instead of {type(fn)}')
|
python
|
def imgwriteincr(fn: Path, imgs, imgslice):
"""
writes HDF5 huge image files in increments
"""
if isinstance(imgslice, int):
if imgslice and not (imgslice % 2000):
print(f'appending images {imgslice} to {fn}')
if isinstance(fn, Path):
# avoid accidental overwriting of source file due to misspecified command line
assert fn.suffix == '.h5', 'Expecting to write .h5 file'
with h5py.File(fn, 'r+') as f:
f['/rawimg'][imgslice, :, :] = imgs
elif isinstance(fn, h5py.File):
f['/rawimg'][imgslice, :, :] = imgs
else:
raise TypeError(
f'{fn} must be Path or h5py.File instead of {type(fn)}')
|
[
"def",
"imgwriteincr",
"(",
"fn",
":",
"Path",
",",
"imgs",
",",
"imgslice",
")",
":",
"if",
"isinstance",
"(",
"imgslice",
",",
"int",
")",
":",
"if",
"imgslice",
"and",
"not",
"(",
"imgslice",
"%",
"2000",
")",
":",
"print",
"(",
"f'appending images {imgslice} to {fn}'",
")",
"if",
"isinstance",
"(",
"fn",
",",
"Path",
")",
":",
"# avoid accidental overwriting of source file due to misspecified command line",
"assert",
"fn",
".",
"suffix",
"==",
"'.h5'",
",",
"'Expecting to write .h5 file'",
"with",
"h5py",
".",
"File",
"(",
"fn",
",",
"'r+'",
")",
"as",
"f",
":",
"f",
"[",
"'/rawimg'",
"]",
"[",
"imgslice",
",",
":",
",",
":",
"]",
"=",
"imgs",
"elif",
"isinstance",
"(",
"fn",
",",
"h5py",
".",
"File",
")",
":",
"f",
"[",
"'/rawimg'",
"]",
"[",
"imgslice",
",",
":",
",",
":",
"]",
"=",
"imgs",
"else",
":",
"raise",
"TypeError",
"(",
"f'{fn} must be Path or h5py.File instead of {type(fn)}'",
")"
] |
writes HDF5 huge image files in increments
|
[
"writes",
"HDF5",
"huge",
"image",
"files",
"in",
"increments"
] |
859a91d3894cb57faed34881c6ea16130b90571e
|
https://github.com/scivision/histutils/blob/859a91d3894cb57faed34881c6ea16130b90571e/histutils/__init__.py#L381-L399
|
242,001
|
MacHu-GWU/pyknackhq-project
|
pyknackhq/js.py
|
safe_dump_js
|
def safe_dump_js(js, abspath,
fastmode=False, compress=False, enable_verbose=True):
"""A stable version of dump_js, silently overwrite existing file.
When your program been interrupted, you lose nothing. Typically if your
program is interrupted by any reason, it only leaves a incomplete file.
If you use replace=True, then you also lose your old file.
So a bettr way is to:
1. dump json to a temp file.
2. when it's done, rename it to #abspath, overwrite the old one.
This way guarantee atomic write.
:param js: Serializable python object.
:type js: dict or list
:param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz``
(for compressed json).
:type abspath: string
:param fastmode: (default False) If ``True``, then dumping json without
sorted keys and pretty indent, and it's faster and smaller in size.
:type fastmode: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the json file. Disk usage can be greatly reduced. But you have
to use :func:`load_js(abspath, compress=True)<load_js>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import dump_js
>>> js = {"a": 1, "b": 2}
>>> safe_dump_js(js, "test.json")
Dumping to test.json...
Complete! Elapse 0.002432 sec
**中文文档**
在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式
写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部
都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名,
覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会
影响原文件。
参数列表
:param js: 可Json化的Python对象
:type js: ``字典`` 或 ``列表``
:param abspath: 写入文件的路径。扩展名必须为 ``.json`` 或 ``.gz``, 其中gz用于被压
缩的Json
:type abspath: ``字符串``
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: ``布尔值``
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Json文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_js(abspath, compress=True)<load_js>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
temp_abspath = "%s.tmp" % abspath
dump_js(js, temp_abspath, fastmode=fastmode,
replace=True, compress=compress, enable_verbose=enable_verbose)
shutil.move(temp_abspath, abspath)
|
python
|
def safe_dump_js(js, abspath,
fastmode=False, compress=False, enable_verbose=True):
"""A stable version of dump_js, silently overwrite existing file.
When your program been interrupted, you lose nothing. Typically if your
program is interrupted by any reason, it only leaves a incomplete file.
If you use replace=True, then you also lose your old file.
So a bettr way is to:
1. dump json to a temp file.
2. when it's done, rename it to #abspath, overwrite the old one.
This way guarantee atomic write.
:param js: Serializable python object.
:type js: dict or list
:param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz``
(for compressed json).
:type abspath: string
:param fastmode: (default False) If ``True``, then dumping json without
sorted keys and pretty indent, and it's faster and smaller in size.
:type fastmode: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the json file. Disk usage can be greatly reduced. But you have
to use :func:`load_js(abspath, compress=True)<load_js>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import dump_js
>>> js = {"a": 1, "b": 2}
>>> safe_dump_js(js, "test.json")
Dumping to test.json...
Complete! Elapse 0.002432 sec
**中文文档**
在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式
写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部
都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名,
覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会
影响原文件。
参数列表
:param js: 可Json化的Python对象
:type js: ``字典`` 或 ``列表``
:param abspath: 写入文件的路径。扩展名必须为 ``.json`` 或 ``.gz``, 其中gz用于被压
缩的Json
:type abspath: ``字符串``
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: ``布尔值``
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Json文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_js(abspath, compress=True)<load_js>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
temp_abspath = "%s.tmp" % abspath
dump_js(js, temp_abspath, fastmode=fastmode,
replace=True, compress=compress, enable_verbose=enable_verbose)
shutil.move(temp_abspath, abspath)
|
[
"def",
"safe_dump_js",
"(",
"js",
",",
"abspath",
",",
"fastmode",
"=",
"False",
",",
"compress",
"=",
"False",
",",
"enable_verbose",
"=",
"True",
")",
":",
"abspath",
"=",
"str",
"(",
"abspath",
")",
"# try stringlize",
"temp_abspath",
"=",
"\"%s.tmp\"",
"%",
"abspath",
"dump_js",
"(",
"js",
",",
"temp_abspath",
",",
"fastmode",
"=",
"fastmode",
",",
"replace",
"=",
"True",
",",
"compress",
"=",
"compress",
",",
"enable_verbose",
"=",
"enable_verbose",
")",
"shutil",
".",
"move",
"(",
"temp_abspath",
",",
"abspath",
")"
] |
A stable version of dump_js, silently overwrite existing file.
When your program been interrupted, you lose nothing. Typically if your
program is interrupted by any reason, it only leaves a incomplete file.
If you use replace=True, then you also lose your old file.
So a bettr way is to:
1. dump json to a temp file.
2. when it's done, rename it to #abspath, overwrite the old one.
This way guarantee atomic write.
:param js: Serializable python object.
:type js: dict or list
:param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz``
(for compressed json).
:type abspath: string
:param fastmode: (default False) If ``True``, then dumping json without
sorted keys and pretty indent, and it's faster and smaller in size.
:type fastmode: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the json file. Disk usage can be greatly reduced. But you have
to use :func:`load_js(abspath, compress=True)<load_js>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import dump_js
>>> js = {"a": 1, "b": 2}
>>> safe_dump_js(js, "test.json")
Dumping to test.json...
Complete! Elapse 0.002432 sec
**中文文档**
在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式
写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部
都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名,
覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会
影响原文件。
参数列表
:param js: 可Json化的Python对象
:type js: ``字典`` 或 ``列表``
:param abspath: 写入文件的路径。扩展名必须为 ``.json`` 或 ``.gz``, 其中gz用于被压
缩的Json
:type abspath: ``字符串``
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: ``布尔值``
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Json文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_js(abspath, compress=True)<load_js>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
|
[
"A",
"stable",
"version",
"of",
"dump_js",
"silently",
"overwrite",
"existing",
"file",
"."
] |
dd937f24d7b0a351ba3818eb746c31b29a8cc341
|
https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/js.py#L260-L335
|
242,002
|
political-memory/django-representatives
|
representatives/migrations/0017_auto_20160623_2201.py
|
migrate_constituencies
|
def migrate_constituencies(apps, schema_editor):
"""
Re-save constituencies to recompute fingerprints
"""
Constituency = apps.get_model("representatives", "Constituency")
for c in Constituency.objects.all():
c.save()
|
python
|
def migrate_constituencies(apps, schema_editor):
"""
Re-save constituencies to recompute fingerprints
"""
Constituency = apps.get_model("representatives", "Constituency")
for c in Constituency.objects.all():
c.save()
|
[
"def",
"migrate_constituencies",
"(",
"apps",
",",
"schema_editor",
")",
":",
"Constituency",
"=",
"apps",
".",
"get_model",
"(",
"\"representatives\"",
",",
"\"Constituency\"",
")",
"for",
"c",
"in",
"Constituency",
".",
"objects",
".",
"all",
"(",
")",
":",
"c",
".",
"save",
"(",
")"
] |
Re-save constituencies to recompute fingerprints
|
[
"Re",
"-",
"save",
"constituencies",
"to",
"recompute",
"fingerprints"
] |
811c90d0250149e913e6196f0ab11c97d396be39
|
https://github.com/political-memory/django-representatives/blob/811c90d0250149e913e6196f0ab11c97d396be39/representatives/migrations/0017_auto_20160623_2201.py#L7-L13
|
242,003
|
mayfield/shellish
|
shellish/layout/tree.py
|
treeprint
|
def treeprint(data, render_only=False, file=None, **options):
""" Render a tree structure based on generic python containers. The keys
should be titles and the values are children of the node or None if it's
an empty leaf node; Primitives are valid leaf node labels too. E.g.
sample = {
"Leaf 1": None,
"Leaf 2": "I have a label on me",
"Branch A": {
"Sub Leaf 1 with float label": 3.14,
"Sub Branch": {
"Deep Leaf": None
}
},
"Branch B": {
"Sub Leaf 2": None
}
}
"""
def getiter(obj):
if isinstance(obj, collections.abc.Mapping):
return obj.items()
elif (isinstance(obj, collections.abc.Iterable) and
not isinstance(obj, str)):
return enumerate(obj)
def cycle_check(item, seen=set()):
item_id = id(item)
if item_id in seen:
raise ValueError('Cycle detected for: %s' % repr(item))
else:
seen.add(item_id)
def crawl(obj, cc=cycle_check):
cc(obj)
objiter = getiter(obj)
if objiter is None:
yield TreeNode(obj)
else:
for key, item in objiter:
if isinstance(item, collections.abc.Iterable) and \
not isinstance(item, str):
yield TreeNode(key, children=crawl(item))
elif item is None:
yield TreeNode(key)
else:
yield TreeNode(key, label=item)
t = Tree(**options)
render_gen = t.render(crawl(data))
if render_only:
return render_gen
else:
file = sys.stdout if file is None else file
conv = (lambda x: x.plain()) if not file.isatty() else (lambda x: x)
for x in render_gen:
print(conv(x), file=file)
|
python
|
def treeprint(data, render_only=False, file=None, **options):
""" Render a tree structure based on generic python containers. The keys
should be titles and the values are children of the node or None if it's
an empty leaf node; Primitives are valid leaf node labels too. E.g.
sample = {
"Leaf 1": None,
"Leaf 2": "I have a label on me",
"Branch A": {
"Sub Leaf 1 with float label": 3.14,
"Sub Branch": {
"Deep Leaf": None
}
},
"Branch B": {
"Sub Leaf 2": None
}
}
"""
def getiter(obj):
if isinstance(obj, collections.abc.Mapping):
return obj.items()
elif (isinstance(obj, collections.abc.Iterable) and
not isinstance(obj, str)):
return enumerate(obj)
def cycle_check(item, seen=set()):
item_id = id(item)
if item_id in seen:
raise ValueError('Cycle detected for: %s' % repr(item))
else:
seen.add(item_id)
def crawl(obj, cc=cycle_check):
cc(obj)
objiter = getiter(obj)
if objiter is None:
yield TreeNode(obj)
else:
for key, item in objiter:
if isinstance(item, collections.abc.Iterable) and \
not isinstance(item, str):
yield TreeNode(key, children=crawl(item))
elif item is None:
yield TreeNode(key)
else:
yield TreeNode(key, label=item)
t = Tree(**options)
render_gen = t.render(crawl(data))
if render_only:
return render_gen
else:
file = sys.stdout if file is None else file
conv = (lambda x: x.plain()) if not file.isatty() else (lambda x: x)
for x in render_gen:
print(conv(x), file=file)
|
[
"def",
"treeprint",
"(",
"data",
",",
"render_only",
"=",
"False",
",",
"file",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"def",
"getiter",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"collections",
".",
"abc",
".",
"Mapping",
")",
":",
"return",
"obj",
".",
"items",
"(",
")",
"elif",
"(",
"isinstance",
"(",
"obj",
",",
"collections",
".",
"abc",
".",
"Iterable",
")",
"and",
"not",
"isinstance",
"(",
"obj",
",",
"str",
")",
")",
":",
"return",
"enumerate",
"(",
"obj",
")",
"def",
"cycle_check",
"(",
"item",
",",
"seen",
"=",
"set",
"(",
")",
")",
":",
"item_id",
"=",
"id",
"(",
"item",
")",
"if",
"item_id",
"in",
"seen",
":",
"raise",
"ValueError",
"(",
"'Cycle detected for: %s'",
"%",
"repr",
"(",
"item",
")",
")",
"else",
":",
"seen",
".",
"add",
"(",
"item_id",
")",
"def",
"crawl",
"(",
"obj",
",",
"cc",
"=",
"cycle_check",
")",
":",
"cc",
"(",
"obj",
")",
"objiter",
"=",
"getiter",
"(",
"obj",
")",
"if",
"objiter",
"is",
"None",
":",
"yield",
"TreeNode",
"(",
"obj",
")",
"else",
":",
"for",
"key",
",",
"item",
"in",
"objiter",
":",
"if",
"isinstance",
"(",
"item",
",",
"collections",
".",
"abc",
".",
"Iterable",
")",
"and",
"not",
"isinstance",
"(",
"item",
",",
"str",
")",
":",
"yield",
"TreeNode",
"(",
"key",
",",
"children",
"=",
"crawl",
"(",
"item",
")",
")",
"elif",
"item",
"is",
"None",
":",
"yield",
"TreeNode",
"(",
"key",
")",
"else",
":",
"yield",
"TreeNode",
"(",
"key",
",",
"label",
"=",
"item",
")",
"t",
"=",
"Tree",
"(",
"*",
"*",
"options",
")",
"render_gen",
"=",
"t",
".",
"render",
"(",
"crawl",
"(",
"data",
")",
")",
"if",
"render_only",
":",
"return",
"render_gen",
"else",
":",
"file",
"=",
"sys",
".",
"stdout",
"if",
"file",
"is",
"None",
"else",
"file",
"conv",
"=",
"(",
"lambda",
"x",
":",
"x",
".",
"plain",
"(",
")",
")",
"if",
"not",
"file",
".",
"isatty",
"(",
")",
"else",
"(",
"lambda",
"x",
":",
"x",
")",
"for",
"x",
"in",
"render_gen",
":",
"print",
"(",
"conv",
"(",
"x",
")",
",",
"file",
"=",
"file",
")"
] |
Render a tree structure based on generic python containers. The keys
should be titles and the values are children of the node or None if it's
an empty leaf node; Primitives are valid leaf node labels too. E.g.
sample = {
"Leaf 1": None,
"Leaf 2": "I have a label on me",
"Branch A": {
"Sub Leaf 1 with float label": 3.14,
"Sub Branch": {
"Deep Leaf": None
}
},
"Branch B": {
"Sub Leaf 2": None
}
}
|
[
"Render",
"a",
"tree",
"structure",
"based",
"on",
"generic",
"python",
"containers",
".",
"The",
"keys",
"should",
"be",
"titles",
"and",
"the",
"values",
"are",
"children",
"of",
"the",
"node",
"or",
"None",
"if",
"it",
"s",
"an",
"empty",
"leaf",
"node",
";",
"Primitives",
"are",
"valid",
"leaf",
"node",
"labels",
"too",
".",
"E",
".",
"g",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/tree.py#L60-L116
|
242,004
|
marcosgabarda/django-belt
|
belt/commands.py
|
ProgressBarCommand.terminal_size
|
def terminal_size(self):
"""Gets the terminal columns size."""
try:
_, columns = os.popen('stty size', 'r').read().split()
return min(int(columns) - 10, 100)
except ValueError:
return self.default_terminal_size
|
python
|
def terminal_size(self):
"""Gets the terminal columns size."""
try:
_, columns = os.popen('stty size', 'r').read().split()
return min(int(columns) - 10, 100)
except ValueError:
return self.default_terminal_size
|
[
"def",
"terminal_size",
"(",
"self",
")",
":",
"try",
":",
"_",
",",
"columns",
"=",
"os",
".",
"popen",
"(",
"'stty size'",
",",
"'r'",
")",
".",
"read",
"(",
")",
".",
"split",
"(",
")",
"return",
"min",
"(",
"int",
"(",
"columns",
")",
"-",
"10",
",",
"100",
")",
"except",
"ValueError",
":",
"return",
"self",
".",
"default_terminal_size"
] |
Gets the terminal columns size.
|
[
"Gets",
"the",
"terminal",
"columns",
"size",
"."
] |
81404604c4dff664b1520b01e1f638c9c6bab41b
|
https://github.com/marcosgabarda/django-belt/blob/81404604c4dff664b1520b01e1f638c9c6bab41b/belt/commands.py#L13-L19
|
242,005
|
marcosgabarda/django-belt
|
belt/commands.py
|
ProgressBarCommand.bar
|
def bar(self, progress):
"""Shows on the stdout the progress bar for the given progress."""
if not hasattr(self, "_limit") or not self._limit:
self._limit = self.terminal_size()
graph_progress = int(progress * self._limit)
self.stdout.write('\r', ending='')
progress_format = "[%-{}s] %d%%".format(self._limit)
self.stdout.write(
self.style.SUCCESS(progress_format % (self.progress_symbol * graph_progress, int(progress * 100))),
ending=''
)
self.stdout.flush()
|
python
|
def bar(self, progress):
"""Shows on the stdout the progress bar for the given progress."""
if not hasattr(self, "_limit") or not self._limit:
self._limit = self.terminal_size()
graph_progress = int(progress * self._limit)
self.stdout.write('\r', ending='')
progress_format = "[%-{}s] %d%%".format(self._limit)
self.stdout.write(
self.style.SUCCESS(progress_format % (self.progress_symbol * graph_progress, int(progress * 100))),
ending=''
)
self.stdout.flush()
|
[
"def",
"bar",
"(",
"self",
",",
"progress",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_limit\"",
")",
"or",
"not",
"self",
".",
"_limit",
":",
"self",
".",
"_limit",
"=",
"self",
".",
"terminal_size",
"(",
")",
"graph_progress",
"=",
"int",
"(",
"progress",
"*",
"self",
".",
"_limit",
")",
"self",
".",
"stdout",
".",
"write",
"(",
"'\\r'",
",",
"ending",
"=",
"''",
")",
"progress_format",
"=",
"\"[%-{}s] %d%%\"",
".",
"format",
"(",
"self",
".",
"_limit",
")",
"self",
".",
"stdout",
".",
"write",
"(",
"self",
".",
"style",
".",
"SUCCESS",
"(",
"progress_format",
"%",
"(",
"self",
".",
"progress_symbol",
"*",
"graph_progress",
",",
"int",
"(",
"progress",
"*",
"100",
")",
")",
")",
",",
"ending",
"=",
"''",
")",
"self",
".",
"stdout",
".",
"flush",
"(",
")"
] |
Shows on the stdout the progress bar for the given progress.
|
[
"Shows",
"on",
"the",
"stdout",
"the",
"progress",
"bar",
"for",
"the",
"given",
"progress",
"."
] |
81404604c4dff664b1520b01e1f638c9c6bab41b
|
https://github.com/marcosgabarda/django-belt/blob/81404604c4dff664b1520b01e1f638c9c6bab41b/belt/commands.py#L21-L32
|
242,006
|
sahid/warm
|
warm/components/__init__.py
|
SecurityGroup._Execute
|
def _Execute(self, options):
"""Handles security groups operations."""
whitelist = dict(
name=options["name"],
description=options.get("description", "<empty>"))
return self._agent.client.compute.security_groups.create(**whitelist)
|
python
|
def _Execute(self, options):
"""Handles security groups operations."""
whitelist = dict(
name=options["name"],
description=options.get("description", "<empty>"))
return self._agent.client.compute.security_groups.create(**whitelist)
|
[
"def",
"_Execute",
"(",
"self",
",",
"options",
")",
":",
"whitelist",
"=",
"dict",
"(",
"name",
"=",
"options",
"[",
"\"name\"",
"]",
",",
"description",
"=",
"options",
".",
"get",
"(",
"\"description\"",
",",
"\"<empty>\"",
")",
")",
"return",
"self",
".",
"_agent",
".",
"client",
".",
"compute",
".",
"security_groups",
".",
"create",
"(",
"*",
"*",
"whitelist",
")"
] |
Handles security groups operations.
|
[
"Handles",
"security",
"groups",
"operations",
"."
] |
baf1cb73c6769a556756b9078e60c96d4b1de2bd
|
https://github.com/sahid/warm/blob/baf1cb73c6769a556756b9078e60c96d4b1de2bd/warm/components/__init__.py#L167-L172
|
242,007
|
pwyliu/clancy
|
clancy/config.py
|
load_args
|
def load_args(args):
"""
Load a config file. Merges CLI args and validates.
"""
config = kaptan.Kaptan(handler='yaml')
conf_parent = os.path.expanduser('~')
conf_app = '.clancy'
conf_filename = 'config.yaml'
conf_dir = os.path.join(conf_parent, conf_app)
for loc in [os.curdir, conf_dir]:
configpath = os.path.join(loc, conf_filename)
try:
if os.path.isfile(configpath):
config.import_config(configpath)
break
except (ValueError, ParserError, ScannerError):
warn("Ignoring invalid valid yaml file {}".format(configpath))
config = (config.configuration_data
if config.configuration_data is not None else {})
# Prepend '--' to conf file keys so it matches docopt. This is the dumbest
# thing in the world.
for key, val in config.items():
config['--'+key] = val
del config[key]
return validate(merge(args, config))
|
python
|
def load_args(args):
"""
Load a config file. Merges CLI args and validates.
"""
config = kaptan.Kaptan(handler='yaml')
conf_parent = os.path.expanduser('~')
conf_app = '.clancy'
conf_filename = 'config.yaml'
conf_dir = os.path.join(conf_parent, conf_app)
for loc in [os.curdir, conf_dir]:
configpath = os.path.join(loc, conf_filename)
try:
if os.path.isfile(configpath):
config.import_config(configpath)
break
except (ValueError, ParserError, ScannerError):
warn("Ignoring invalid valid yaml file {}".format(configpath))
config = (config.configuration_data
if config.configuration_data is not None else {})
# Prepend '--' to conf file keys so it matches docopt. This is the dumbest
# thing in the world.
for key, val in config.items():
config['--'+key] = val
del config[key]
return validate(merge(args, config))
|
[
"def",
"load_args",
"(",
"args",
")",
":",
"config",
"=",
"kaptan",
".",
"Kaptan",
"(",
"handler",
"=",
"'yaml'",
")",
"conf_parent",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
"conf_app",
"=",
"'.clancy'",
"conf_filename",
"=",
"'config.yaml'",
"conf_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"conf_parent",
",",
"conf_app",
")",
"for",
"loc",
"in",
"[",
"os",
".",
"curdir",
",",
"conf_dir",
"]",
":",
"configpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"loc",
",",
"conf_filename",
")",
"try",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"configpath",
")",
":",
"config",
".",
"import_config",
"(",
"configpath",
")",
"break",
"except",
"(",
"ValueError",
",",
"ParserError",
",",
"ScannerError",
")",
":",
"warn",
"(",
"\"Ignoring invalid valid yaml file {}\"",
".",
"format",
"(",
"configpath",
")",
")",
"config",
"=",
"(",
"config",
".",
"configuration_data",
"if",
"config",
".",
"configuration_data",
"is",
"not",
"None",
"else",
"{",
"}",
")",
"# Prepend '--' to conf file keys so it matches docopt. This is the dumbest",
"# thing in the world.",
"for",
"key",
",",
"val",
"in",
"config",
".",
"items",
"(",
")",
":",
"config",
"[",
"'--'",
"+",
"key",
"]",
"=",
"val",
"del",
"config",
"[",
"key",
"]",
"return",
"validate",
"(",
"merge",
"(",
"args",
",",
"config",
")",
")"
] |
Load a config file. Merges CLI args and validates.
|
[
"Load",
"a",
"config",
"file",
".",
"Merges",
"CLI",
"args",
"and",
"validates",
"."
] |
cb15a5e2bb735ffce7a84b8413b04faa78c5039c
|
https://github.com/pwyliu/clancy/blob/cb15a5e2bb735ffce7a84b8413b04faa78c5039c/clancy/config.py#L59-L87
|
242,008
|
rsalmaso/django-fluo
|
fluo/middleware/locale.py
|
get_default_language
|
def get_default_language(language_code=None):
"""
Returns default language depending on settings.LANGUAGE_CODE merged with
best match from settings.LANGUAGES
Returns: language_code
Raises ImproperlyConfigured if no match found
"""
if not language_code:
language_code = settings.LANGUAGE_CODE
languages = dict(settings.LANGUAGES).keys()
# first try if there is an exact language
if language_code in languages:
return language_code
# otherwise split the language code if possible, so iso3
language_code = language_code.split("-")[0]
if language_code not in languages:
raise ImproperlyConfigured("No match in LANGUAGES for LANGUAGE_CODE %s" % settings.LANGUAGE_CODE)
return language_code
|
python
|
def get_default_language(language_code=None):
"""
Returns default language depending on settings.LANGUAGE_CODE merged with
best match from settings.LANGUAGES
Returns: language_code
Raises ImproperlyConfigured if no match found
"""
if not language_code:
language_code = settings.LANGUAGE_CODE
languages = dict(settings.LANGUAGES).keys()
# first try if there is an exact language
if language_code in languages:
return language_code
# otherwise split the language code if possible, so iso3
language_code = language_code.split("-")[0]
if language_code not in languages:
raise ImproperlyConfigured("No match in LANGUAGES for LANGUAGE_CODE %s" % settings.LANGUAGE_CODE)
return language_code
|
[
"def",
"get_default_language",
"(",
"language_code",
"=",
"None",
")",
":",
"if",
"not",
"language_code",
":",
"language_code",
"=",
"settings",
".",
"LANGUAGE_CODE",
"languages",
"=",
"dict",
"(",
"settings",
".",
"LANGUAGES",
")",
".",
"keys",
"(",
")",
"# first try if there is an exact language",
"if",
"language_code",
"in",
"languages",
":",
"return",
"language_code",
"# otherwise split the language code if possible, so iso3",
"language_code",
"=",
"language_code",
".",
"split",
"(",
"\"-\"",
")",
"[",
"0",
"]",
"if",
"language_code",
"not",
"in",
"languages",
":",
"raise",
"ImproperlyConfigured",
"(",
"\"No match in LANGUAGES for LANGUAGE_CODE %s\"",
"%",
"settings",
".",
"LANGUAGE_CODE",
")",
"return",
"language_code"
] |
Returns default language depending on settings.LANGUAGE_CODE merged with
best match from settings.LANGUAGES
Returns: language_code
Raises ImproperlyConfigured if no match found
|
[
"Returns",
"default",
"language",
"depending",
"on",
"settings",
".",
"LANGUAGE_CODE",
"merged",
"with",
"best",
"match",
"from",
"settings",
".",
"LANGUAGES"
] |
1321c1e7d6a912108f79be02a9e7f2108c57f89f
|
https://github.com/rsalmaso/django-fluo/blob/1321c1e7d6a912108f79be02a9e7f2108c57f89f/fluo/middleware/locale.py#L68-L93
|
242,009
|
luismasuelli/python-cantrips
|
cantrips/patterns/actions.py
|
Action.as_method
|
def as_method(self, docstring=""):
"""
Converts this action to a function or method.
An optional docstring may be passed.
"""
method = lambda obj, *args, **kwargs: self(obj, *args, **kwargs)
if docstring:
method.__doc__ = docstring
return method
|
python
|
def as_method(self, docstring=""):
"""
Converts this action to a function or method.
An optional docstring may be passed.
"""
method = lambda obj, *args, **kwargs: self(obj, *args, **kwargs)
if docstring:
method.__doc__ = docstring
return method
|
[
"def",
"as_method",
"(",
"self",
",",
"docstring",
"=",
"\"\"",
")",
":",
"method",
"=",
"lambda",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
":",
"self",
"(",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"docstring",
":",
"method",
".",
"__doc__",
"=",
"docstring",
"return",
"method"
] |
Converts this action to a function or method.
An optional docstring may be passed.
|
[
"Converts",
"this",
"action",
"to",
"a",
"function",
"or",
"method",
".",
"An",
"optional",
"docstring",
"may",
"be",
"passed",
"."
] |
dba2742c1d1a60863bb65f4a291464f6e68eb2ee
|
https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/actions.py#L12-L20
|
242,010
|
jpablo128/simplystatic
|
simplystatic/s2page.py
|
Page._create
|
def _create(self, rawtitle):
"""Create a page with this title, if it doesn't exist.
This method first checks whether a page with the same slug
(sanitized name) exists_on_disk. If it does, it doesn't do antyhing.
Otherwise, the relevant attributes are created.
Nothing is written to disc (to the source file). You must call
the write_page method to do that. Doing it this way, after
creation you can call a method to add random text, for example,
before committing the page to disk.
"""
slug = util.make_slug(rawtitle)
if self.site.page_exists_on_disk(slug):
raise ValueError
#print "Attempted to create a page which already exists."
#return False
self._title = unicode(rawtitle,"UTF-8")
self._slug = slug
self._dirs['source_dir'] = os.path.join(self.site.dirs['source'], slug)
self._dirs['source_filename'] = os.path.join(self._dirs['source_dir'],
slug + '.md')
self._dirs['www_dir'] = os.path.join(self.site.dirs['www'], slug)
self._dirs['www_filename'] = os.path.join(self._dirs['www_dir'], \
'index.html')
self._config = self._create_config()
return True
|
python
|
def _create(self, rawtitle):
"""Create a page with this title, if it doesn't exist.
This method first checks whether a page with the same slug
(sanitized name) exists_on_disk. If it does, it doesn't do antyhing.
Otherwise, the relevant attributes are created.
Nothing is written to disc (to the source file). You must call
the write_page method to do that. Doing it this way, after
creation you can call a method to add random text, for example,
before committing the page to disk.
"""
slug = util.make_slug(rawtitle)
if self.site.page_exists_on_disk(slug):
raise ValueError
#print "Attempted to create a page which already exists."
#return False
self._title = unicode(rawtitle,"UTF-8")
self._slug = slug
self._dirs['source_dir'] = os.path.join(self.site.dirs['source'], slug)
self._dirs['source_filename'] = os.path.join(self._dirs['source_dir'],
slug + '.md')
self._dirs['www_dir'] = os.path.join(self.site.dirs['www'], slug)
self._dirs['www_filename'] = os.path.join(self._dirs['www_dir'], \
'index.html')
self._config = self._create_config()
return True
|
[
"def",
"_create",
"(",
"self",
",",
"rawtitle",
")",
":",
"slug",
"=",
"util",
".",
"make_slug",
"(",
"rawtitle",
")",
"if",
"self",
".",
"site",
".",
"page_exists_on_disk",
"(",
"slug",
")",
":",
"raise",
"ValueError",
"#print \"Attempted to create a page which already exists.\"",
"#return False",
"self",
".",
"_title",
"=",
"unicode",
"(",
"rawtitle",
",",
"\"UTF-8\"",
")",
"self",
".",
"_slug",
"=",
"slug",
"self",
".",
"_dirs",
"[",
"'source_dir'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"site",
".",
"dirs",
"[",
"'source'",
"]",
",",
"slug",
")",
"self",
".",
"_dirs",
"[",
"'source_filename'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_dirs",
"[",
"'source_dir'",
"]",
",",
"slug",
"+",
"'.md'",
")",
"self",
".",
"_dirs",
"[",
"'www_dir'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"site",
".",
"dirs",
"[",
"'www'",
"]",
",",
"slug",
")",
"self",
".",
"_dirs",
"[",
"'www_filename'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_dirs",
"[",
"'www_dir'",
"]",
",",
"'index.html'",
")",
"self",
".",
"_config",
"=",
"self",
".",
"_create_config",
"(",
")",
"return",
"True"
] |
Create a page with this title, if it doesn't exist.
This method first checks whether a page with the same slug
(sanitized name) exists_on_disk. If it does, it doesn't do antyhing.
Otherwise, the relevant attributes are created.
Nothing is written to disc (to the source file). You must call
the write_page method to do that. Doing it this way, after
creation you can call a method to add random text, for example,
before committing the page to disk.
|
[
"Create",
"a",
"page",
"with",
"this",
"title",
"if",
"it",
"doesn",
"t",
"exist",
"."
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2page.py#L100-L129
|
242,011
|
jpablo128/simplystatic
|
simplystatic/s2page.py
|
Page.write
|
def write(self):
"""Write the s2 page to the corresponding source file.
It always writes the (serialized) config first, and then the
content (normally markdown). The destination file is in the
source_dir of the site.
"""
if not os.path.isdir(self._dirs['source_dir']):
os.mkdir(self._dirs['source_dir'])
fout = codecs.open(self._dirs['source_filename'], 'w', encoding="utf-8", errors="xmlcharrefreplace")
fout.write(self._config_to_text())
if self._content:
fout.write('\n')
fout.write(self._content)
fout.write('\n')
fout.close()
|
python
|
def write(self):
"""Write the s2 page to the corresponding source file.
It always writes the (serialized) config first, and then the
content (normally markdown). The destination file is in the
source_dir of the site.
"""
if not os.path.isdir(self._dirs['source_dir']):
os.mkdir(self._dirs['source_dir'])
fout = codecs.open(self._dirs['source_filename'], 'w', encoding="utf-8", errors="xmlcharrefreplace")
fout.write(self._config_to_text())
if self._content:
fout.write('\n')
fout.write(self._content)
fout.write('\n')
fout.close()
|
[
"def",
"write",
"(",
"self",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"_dirs",
"[",
"'source_dir'",
"]",
")",
":",
"os",
".",
"mkdir",
"(",
"self",
".",
"_dirs",
"[",
"'source_dir'",
"]",
")",
"fout",
"=",
"codecs",
".",
"open",
"(",
"self",
".",
"_dirs",
"[",
"'source_filename'",
"]",
",",
"'w'",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"errors",
"=",
"\"xmlcharrefreplace\"",
")",
"fout",
".",
"write",
"(",
"self",
".",
"_config_to_text",
"(",
")",
")",
"if",
"self",
".",
"_content",
":",
"fout",
".",
"write",
"(",
"'\\n'",
")",
"fout",
".",
"write",
"(",
"self",
".",
"_content",
")",
"fout",
".",
"write",
"(",
"'\\n'",
")",
"fout",
".",
"close",
"(",
")"
] |
Write the s2 page to the corresponding source file.
It always writes the (serialized) config first, and then the
content (normally markdown). The destination file is in the
source_dir of the site.
|
[
"Write",
"the",
"s2",
"page",
"to",
"the",
"corresponding",
"source",
"file",
"."
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2page.py#L131-L148
|
242,012
|
jpablo128/simplystatic
|
simplystatic/s2page.py
|
Page.rename
|
def rename(self, new_title):
"""Rename an existing s2 page.
For an existing s2 page, updates the directory and file name,
as well as the internal configuration information (since it
contains the title and the slug)
"""
if not isinstance(new_title, str) and \
not isinstance(new_title, unicode):
raise TypeError
# print "Cannot rename page. New title must be string or unicode."
new_slug = util.make_slug(new_title)
if self.site.page_exists_on_disk(new_slug):
raise ValueError
# print "Cannot rename page. A page with the same \
# title/slug already exists."
#wipe the source directory for this page
shutil.rmtree(self._dirs['source_dir'])
#just change dirinfo, config, and write
self._title = new_title
self._slug = new_slug
self._config['title'] = [self._title]
self._config['slug'] = [self._slug]
self._dirs['source_dir'] = os.path.join(self.site.dirs['source'],
new_slug)
self._dirs['source_filename'] = os.path.join(self._dirs['source_dir'],
new_slug + '.md')
self._dirs['www_dir'] = os.path.join(self.site.dirs['www'], new_slug)
#self._dirs['www_filename'] = os.path.join(self._dirs['www_dir'], \
# new_slug + '.html')
self.write()
|
python
|
def rename(self, new_title):
"""Rename an existing s2 page.
For an existing s2 page, updates the directory and file name,
as well as the internal configuration information (since it
contains the title and the slug)
"""
if not isinstance(new_title, str) and \
not isinstance(new_title, unicode):
raise TypeError
# print "Cannot rename page. New title must be string or unicode."
new_slug = util.make_slug(new_title)
if self.site.page_exists_on_disk(new_slug):
raise ValueError
# print "Cannot rename page. A page with the same \
# title/slug already exists."
#wipe the source directory for this page
shutil.rmtree(self._dirs['source_dir'])
#just change dirinfo, config, and write
self._title = new_title
self._slug = new_slug
self._config['title'] = [self._title]
self._config['slug'] = [self._slug]
self._dirs['source_dir'] = os.path.join(self.site.dirs['source'],
new_slug)
self._dirs['source_filename'] = os.path.join(self._dirs['source_dir'],
new_slug + '.md')
self._dirs['www_dir'] = os.path.join(self.site.dirs['www'], new_slug)
#self._dirs['www_filename'] = os.path.join(self._dirs['www_dir'], \
# new_slug + '.html')
self.write()
|
[
"def",
"rename",
"(",
"self",
",",
"new_title",
")",
":",
"if",
"not",
"isinstance",
"(",
"new_title",
",",
"str",
")",
"and",
"not",
"isinstance",
"(",
"new_title",
",",
"unicode",
")",
":",
"raise",
"TypeError",
"# print \"Cannot rename page. New title must be string or unicode.\"",
"new_slug",
"=",
"util",
".",
"make_slug",
"(",
"new_title",
")",
"if",
"self",
".",
"site",
".",
"page_exists_on_disk",
"(",
"new_slug",
")",
":",
"raise",
"ValueError",
"# print \"Cannot rename page. A page with the same \\",
"# title/slug already exists.\"",
"#wipe the source directory for this page",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"_dirs",
"[",
"'source_dir'",
"]",
")",
"#just change dirinfo, config, and write",
"self",
".",
"_title",
"=",
"new_title",
"self",
".",
"_slug",
"=",
"new_slug",
"self",
".",
"_config",
"[",
"'title'",
"]",
"=",
"[",
"self",
".",
"_title",
"]",
"self",
".",
"_config",
"[",
"'slug'",
"]",
"=",
"[",
"self",
".",
"_slug",
"]",
"self",
".",
"_dirs",
"[",
"'source_dir'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"site",
".",
"dirs",
"[",
"'source'",
"]",
",",
"new_slug",
")",
"self",
".",
"_dirs",
"[",
"'source_filename'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_dirs",
"[",
"'source_dir'",
"]",
",",
"new_slug",
"+",
"'.md'",
")",
"self",
".",
"_dirs",
"[",
"'www_dir'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"site",
".",
"dirs",
"[",
"'www'",
"]",
",",
"new_slug",
")",
"#self._dirs['www_filename'] = os.path.join(self._dirs['www_dir'], \\",
"# new_slug + '.html')",
"self",
".",
"write",
"(",
")"
] |
Rename an existing s2 page.
For an existing s2 page, updates the directory and file name,
as well as the internal configuration information (since it
contains the title and the slug)
|
[
"Rename",
"an",
"existing",
"s2",
"page",
"."
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2page.py#L150-L186
|
242,013
|
jpablo128/simplystatic
|
simplystatic/s2page.py
|
Page.render
|
def render(self):
"""Render this page and return the rendition.
Converts the markdown content to html, and then renders the
(mako) template specified in the config, using that html.
The task of writing of the rendition to a real file is
responsibility of the generate method.
"""
(pthemedir, ptemplatefname) = self._theme_and_template_fp()
mylookup = TemplateLookup(directories=[self.site.dirs['s2'], pthemedir], input_encoding='utf-8', output_encoding='utf-8')
makotemplate = Template(filename=ptemplatefname, lookup=mylookup,
module_directory=self.site._makodir)
# I don't really need to use the meta extension here, because I render self._content (has no metadata)
#page_html = markdown.markdown(self._content)
md = markdown.Markdown(extensions=['meta','fenced_code', 'codehilite'],output_format="html5")
page_html = md.convert(self._content) # need to trigger the conversion to obtain md.Meta
# We assume that the page is always in a dir one level below www
themepath = "../themes/" + os.path.split(pthemedir)[1] + '/'
commonpath = "../common/"
# HERE I'll pass the config variable to the mako template, so I can use the title etc.
#buf = StringIO()
#ctx = Context(buf, dict(pageContent=page_html, isFrontPage=False, themePath=themepath, pageTitle='pedo',
# commonPath=commonpath))
#makotemplate.render_context(ctx)
#rendition = buf.getvalue()
# IS THERE `PIWIK CODE?
# IS THERE DISQUS CODE?
# READ from s2 if there's disqus_code.html.tpl and piwik_code.html.tpl
# if there's piwik, just define the variable piwik_code with its contents
# if there's disqus... nested render?
# HERE I NEED TO DIRECTLY INCLUDE A TEMPLATE IN ANOTHER TEMPLATE!!! MAKO!
#d_sn = self.site.site_config['disqus_shortname']
#if d_sn: # the site uses disqus
piwik_code = None
disqus_code, disqus_shortname, disqus_identifier, disqus_title, disqus_url= None, None, None, None, None
piwik_code_tpl = os.path.join(self.site.dirs['s2'],'piwik_code.html.tpl')
if os.path.isfile(piwik_code_tpl):
piwik_code = '/piwik_code.html.tpl'
disqus_code_tpl = os.path.join(self.site.dirs['s2'],'disqus_code.html.tpl')
if os.path.isfile(disqus_code_tpl):
disqus_code = '/disqus_code.html.tpl'
disqus_shortname = self.site.site_config['disqus_shortname']
disqus_identifier = self._config['page_id'][0]
disqus_title = self.title
disqus_url = os.path.join(self.site.site_config['site_url'],self._slug)
rendition = makotemplate.render(pageContent=page_html,isFrontPage=False,
themePath=themepath,
commonPath=commonpath,
pageTitle=self.title,
piwik_code=piwik_code,
disqus_code=disqus_code,
disqus_shortname = disqus_shortname,
disqus_identifier = disqus_identifier,
disqus_url = disqus_url,
disqus_title= disqus_title)
return rendition
|
python
|
def render(self):
"""Render this page and return the rendition.
Converts the markdown content to html, and then renders the
(mako) template specified in the config, using that html.
The task of writing of the rendition to a real file is
responsibility of the generate method.
"""
(pthemedir, ptemplatefname) = self._theme_and_template_fp()
mylookup = TemplateLookup(directories=[self.site.dirs['s2'], pthemedir], input_encoding='utf-8', output_encoding='utf-8')
makotemplate = Template(filename=ptemplatefname, lookup=mylookup,
module_directory=self.site._makodir)
# I don't really need to use the meta extension here, because I render self._content (has no metadata)
#page_html = markdown.markdown(self._content)
md = markdown.Markdown(extensions=['meta','fenced_code', 'codehilite'],output_format="html5")
page_html = md.convert(self._content) # need to trigger the conversion to obtain md.Meta
# We assume that the page is always in a dir one level below www
themepath = "../themes/" + os.path.split(pthemedir)[1] + '/'
commonpath = "../common/"
# HERE I'll pass the config variable to the mako template, so I can use the title etc.
#buf = StringIO()
#ctx = Context(buf, dict(pageContent=page_html, isFrontPage=False, themePath=themepath, pageTitle='pedo',
# commonPath=commonpath))
#makotemplate.render_context(ctx)
#rendition = buf.getvalue()
# IS THERE `PIWIK CODE?
# IS THERE DISQUS CODE?
# READ from s2 if there's disqus_code.html.tpl and piwik_code.html.tpl
# if there's piwik, just define the variable piwik_code with its contents
# if there's disqus... nested render?
# HERE I NEED TO DIRECTLY INCLUDE A TEMPLATE IN ANOTHER TEMPLATE!!! MAKO!
#d_sn = self.site.site_config['disqus_shortname']
#if d_sn: # the site uses disqus
piwik_code = None
disqus_code, disqus_shortname, disqus_identifier, disqus_title, disqus_url= None, None, None, None, None
piwik_code_tpl = os.path.join(self.site.dirs['s2'],'piwik_code.html.tpl')
if os.path.isfile(piwik_code_tpl):
piwik_code = '/piwik_code.html.tpl'
disqus_code_tpl = os.path.join(self.site.dirs['s2'],'disqus_code.html.tpl')
if os.path.isfile(disqus_code_tpl):
disqus_code = '/disqus_code.html.tpl'
disqus_shortname = self.site.site_config['disqus_shortname']
disqus_identifier = self._config['page_id'][0]
disqus_title = self.title
disqus_url = os.path.join(self.site.site_config['site_url'],self._slug)
rendition = makotemplate.render(pageContent=page_html,isFrontPage=False,
themePath=themepath,
commonPath=commonpath,
pageTitle=self.title,
piwik_code=piwik_code,
disqus_code=disqus_code,
disqus_shortname = disqus_shortname,
disqus_identifier = disqus_identifier,
disqus_url = disqus_url,
disqus_title= disqus_title)
return rendition
|
[
"def",
"render",
"(",
"self",
")",
":",
"(",
"pthemedir",
",",
"ptemplatefname",
")",
"=",
"self",
".",
"_theme_and_template_fp",
"(",
")",
"mylookup",
"=",
"TemplateLookup",
"(",
"directories",
"=",
"[",
"self",
".",
"site",
".",
"dirs",
"[",
"'s2'",
"]",
",",
"pthemedir",
"]",
",",
"input_encoding",
"=",
"'utf-8'",
",",
"output_encoding",
"=",
"'utf-8'",
")",
"makotemplate",
"=",
"Template",
"(",
"filename",
"=",
"ptemplatefname",
",",
"lookup",
"=",
"mylookup",
",",
"module_directory",
"=",
"self",
".",
"site",
".",
"_makodir",
")",
"# I don't really need to use the meta extension here, because I render self._content (has no metadata)",
"#page_html = markdown.markdown(self._content)",
"md",
"=",
"markdown",
".",
"Markdown",
"(",
"extensions",
"=",
"[",
"'meta'",
",",
"'fenced_code'",
",",
"'codehilite'",
"]",
",",
"output_format",
"=",
"\"html5\"",
")",
"page_html",
"=",
"md",
".",
"convert",
"(",
"self",
".",
"_content",
")",
"# need to trigger the conversion to obtain md.Meta",
"# We assume that the page is always in a dir one level below www",
"themepath",
"=",
"\"../themes/\"",
"+",
"os",
".",
"path",
".",
"split",
"(",
"pthemedir",
")",
"[",
"1",
"]",
"+",
"'/'",
"commonpath",
"=",
"\"../common/\"",
"# HERE I'll pass the config variable to the mako template, so I can use the title etc.",
"#buf = StringIO()",
"#ctx = Context(buf, dict(pageContent=page_html, isFrontPage=False, themePath=themepath, pageTitle='pedo',",
"# commonPath=commonpath))",
"#makotemplate.render_context(ctx)",
"#rendition = buf.getvalue()",
"# IS THERE `PIWIK CODE?",
"# IS THERE DISQUS CODE?",
"# READ from s2 if there's disqus_code.html.tpl and piwik_code.html.tpl",
"# if there's piwik, just define the variable piwik_code with its contents",
"# if there's disqus... nested render?",
"# HERE I NEED TO DIRECTLY INCLUDE A TEMPLATE IN ANOTHER TEMPLATE!!! MAKO!",
"#d_sn = self.site.site_config['disqus_shortname']",
"#if d_sn: # the site uses disqus",
"piwik_code",
"=",
"None",
"disqus_code",
",",
"disqus_shortname",
",",
"disqus_identifier",
",",
"disqus_title",
",",
"disqus_url",
"=",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
"piwik_code_tpl",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"site",
".",
"dirs",
"[",
"'s2'",
"]",
",",
"'piwik_code.html.tpl'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"piwik_code_tpl",
")",
":",
"piwik_code",
"=",
"'/piwik_code.html.tpl'",
"disqus_code_tpl",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"site",
".",
"dirs",
"[",
"'s2'",
"]",
",",
"'disqus_code.html.tpl'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"disqus_code_tpl",
")",
":",
"disqus_code",
"=",
"'/disqus_code.html.tpl'",
"disqus_shortname",
"=",
"self",
".",
"site",
".",
"site_config",
"[",
"'disqus_shortname'",
"]",
"disqus_identifier",
"=",
"self",
".",
"_config",
"[",
"'page_id'",
"]",
"[",
"0",
"]",
"disqus_title",
"=",
"self",
".",
"title",
"disqus_url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"site",
".",
"site_config",
"[",
"'site_url'",
"]",
",",
"self",
".",
"_slug",
")",
"rendition",
"=",
"makotemplate",
".",
"render",
"(",
"pageContent",
"=",
"page_html",
",",
"isFrontPage",
"=",
"False",
",",
"themePath",
"=",
"themepath",
",",
"commonPath",
"=",
"commonpath",
",",
"pageTitle",
"=",
"self",
".",
"title",
",",
"piwik_code",
"=",
"piwik_code",
",",
"disqus_code",
"=",
"disqus_code",
",",
"disqus_shortname",
"=",
"disqus_shortname",
",",
"disqus_identifier",
"=",
"disqus_identifier",
",",
"disqus_url",
"=",
"disqus_url",
",",
"disqus_title",
"=",
"disqus_title",
")",
"return",
"rendition"
] |
Render this page and return the rendition.
Converts the markdown content to html, and then renders the
(mako) template specified in the config, using that html.
The task of writing of the rendition to a real file is
responsibility of the generate method.
|
[
"Render",
"this",
"page",
"and",
"return",
"the",
"rendition",
"."
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2page.py#L189-L257
|
242,014
|
jpablo128/simplystatic
|
simplystatic/s2page.py
|
Page.generate
|
def generate(self):
"""Generate the page html file.
Just open the destination file for writing and write the result
of rendering this page.
"""
generated_content = ''
if 'published' in (self._config['status'][0]).lower():
if os.path.isdir(self.dirs['www_dir']):
shutil.rmtree(self.dirs['www_dir'])
os.mkdir(self.dirs['www_dir'])
# copy the whole source directory of the page,
# excluding 'nowww' and *s2md
sfl = glob.glob(os.path.join(self.dirs['source_dir'], "*"))
dirlist = [f for f in sfl if os.path.isdir(f)]
filelist = [f for f in sfl if os.path.isfile(f)]
for f in filelist:
if not '.md' in os.path.split(f)[1]:
shutil.copy(f, self.dirs['www_dir'])
for d in dirlist:
rfn = os.path.split(d)[1]
if rfn != 'nowww':
shutil.copytree(d, os.path.join(self.dirs['www_dir'], rfn))
#write the rendered "page" to file
#fout = open(self.dirs['www_filename'], 'w')
#fout.write(self.render())
#fout.close()
generated_content = self.render()
fout = codecs.open(self.dirs['www_filename'], "w", encoding="utf-8", errors="xmlcharrefreplace")
fout.write(generated_content)
fout.close()
return generated_content
|
python
|
def generate(self):
"""Generate the page html file.
Just open the destination file for writing and write the result
of rendering this page.
"""
generated_content = ''
if 'published' in (self._config['status'][0]).lower():
if os.path.isdir(self.dirs['www_dir']):
shutil.rmtree(self.dirs['www_dir'])
os.mkdir(self.dirs['www_dir'])
# copy the whole source directory of the page,
# excluding 'nowww' and *s2md
sfl = glob.glob(os.path.join(self.dirs['source_dir'], "*"))
dirlist = [f for f in sfl if os.path.isdir(f)]
filelist = [f for f in sfl if os.path.isfile(f)]
for f in filelist:
if not '.md' in os.path.split(f)[1]:
shutil.copy(f, self.dirs['www_dir'])
for d in dirlist:
rfn = os.path.split(d)[1]
if rfn != 'nowww':
shutil.copytree(d, os.path.join(self.dirs['www_dir'], rfn))
#write the rendered "page" to file
#fout = open(self.dirs['www_filename'], 'w')
#fout.write(self.render())
#fout.close()
generated_content = self.render()
fout = codecs.open(self.dirs['www_filename'], "w", encoding="utf-8", errors="xmlcharrefreplace")
fout.write(generated_content)
fout.close()
return generated_content
|
[
"def",
"generate",
"(",
"self",
")",
":",
"generated_content",
"=",
"''",
"if",
"'published'",
"in",
"(",
"self",
".",
"_config",
"[",
"'status'",
"]",
"[",
"0",
"]",
")",
".",
"lower",
"(",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"dirs",
"[",
"'www_dir'",
"]",
")",
":",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"dirs",
"[",
"'www_dir'",
"]",
")",
"os",
".",
"mkdir",
"(",
"self",
".",
"dirs",
"[",
"'www_dir'",
"]",
")",
"# copy the whole source directory of the page,",
"# excluding 'nowww' and *s2md",
"sfl",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dirs",
"[",
"'source_dir'",
"]",
",",
"\"*\"",
")",
")",
"dirlist",
"=",
"[",
"f",
"for",
"f",
"in",
"sfl",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"f",
")",
"]",
"filelist",
"=",
"[",
"f",
"for",
"f",
"in",
"sfl",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
"]",
"for",
"f",
"in",
"filelist",
":",
"if",
"not",
"'.md'",
"in",
"os",
".",
"path",
".",
"split",
"(",
"f",
")",
"[",
"1",
"]",
":",
"shutil",
".",
"copy",
"(",
"f",
",",
"self",
".",
"dirs",
"[",
"'www_dir'",
"]",
")",
"for",
"d",
"in",
"dirlist",
":",
"rfn",
"=",
"os",
".",
"path",
".",
"split",
"(",
"d",
")",
"[",
"1",
"]",
"if",
"rfn",
"!=",
"'nowww'",
":",
"shutil",
".",
"copytree",
"(",
"d",
",",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dirs",
"[",
"'www_dir'",
"]",
",",
"rfn",
")",
")",
"#write the rendered \"page\" to file",
"#fout = open(self.dirs['www_filename'], 'w')",
"#fout.write(self.render())",
"#fout.close()",
"generated_content",
"=",
"self",
".",
"render",
"(",
")",
"fout",
"=",
"codecs",
".",
"open",
"(",
"self",
".",
"dirs",
"[",
"'www_filename'",
"]",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"errors",
"=",
"\"xmlcharrefreplace\"",
")",
"fout",
".",
"write",
"(",
"generated_content",
")",
"fout",
".",
"close",
"(",
")",
"return",
"generated_content"
] |
Generate the page html file.
Just open the destination file for writing and write the result
of rendering this page.
|
[
"Generate",
"the",
"page",
"html",
"file",
"."
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2page.py#L261-L296
|
242,015
|
jpablo128/simplystatic
|
simplystatic/s2page.py
|
Page._create_config
|
def _create_config(self):
"""Create the default configuration dictionary for this page."""
configinfo = {'creation_date': [ datetime.datetime.now().date().isoformat()],
'author': [self.site.site_config['default_author']],
'status': [u'draft'],
'lang': [u''],
'tags': [u''],
'title': [self._title],
'slug': [self._slug],
'theme': [u''],
'template': [u''],
'page_id': [uuid.uuid4().hex]
} # when theme and template are empty, the generator uses the defaults. Thus, initially
# they should be empty, to allow for global changes just by changing the site config files.
return configinfo
|
python
|
def _create_config(self):
"""Create the default configuration dictionary for this page."""
configinfo = {'creation_date': [ datetime.datetime.now().date().isoformat()],
'author': [self.site.site_config['default_author']],
'status': [u'draft'],
'lang': [u''],
'tags': [u''],
'title': [self._title],
'slug': [self._slug],
'theme': [u''],
'template': [u''],
'page_id': [uuid.uuid4().hex]
} # when theme and template are empty, the generator uses the defaults. Thus, initially
# they should be empty, to allow for global changes just by changing the site config files.
return configinfo
|
[
"def",
"_create_config",
"(",
"self",
")",
":",
"configinfo",
"=",
"{",
"'creation_date'",
":",
"[",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"date",
"(",
")",
".",
"isoformat",
"(",
")",
"]",
",",
"'author'",
":",
"[",
"self",
".",
"site",
".",
"site_config",
"[",
"'default_author'",
"]",
"]",
",",
"'status'",
":",
"[",
"u'draft'",
"]",
",",
"'lang'",
":",
"[",
"u''",
"]",
",",
"'tags'",
":",
"[",
"u''",
"]",
",",
"'title'",
":",
"[",
"self",
".",
"_title",
"]",
",",
"'slug'",
":",
"[",
"self",
".",
"_slug",
"]",
",",
"'theme'",
":",
"[",
"u''",
"]",
",",
"'template'",
":",
"[",
"u''",
"]",
",",
"'page_id'",
":",
"[",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
"]",
"}",
"# when theme and template are empty, the generator uses the defaults. Thus, initially",
"# they should be empty, to allow for global changes just by changing the site config files.",
"return",
"configinfo"
] |
Create the default configuration dictionary for this page.
|
[
"Create",
"the",
"default",
"configuration",
"dictionary",
"for",
"this",
"page",
"."
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2page.py#L302-L316
|
242,016
|
jpablo128/simplystatic
|
simplystatic/s2page.py
|
Page._load
|
def _load(self, slug):
"""Load the page. The _file_name param is known, because this
method is only called after having checked that the page exists.
"""
#here we know that the slug exists
self._slug = slug
page_dir = os.path.join(self.site.dirs['source'], self._slug)
page_file_name = os.path.join(page_dir, self._slug + '.md')
self._dirs['source_dir'] = page_dir
self._dirs['source_filename'] = page_file_name
self._dirs['www_dir'] = os.path.join(self.site.dirs['www'], slug)
self._dirs['www_filename'] = os.path.join(self._dirs['www_dir'], 'index.html')
#pf = open(self._dirs['source_filename'], 'r')
#page_text = pf.read()
#pf.close()
# need to decode!
pf = codecs.open(self._dirs['source_filename'], mode="r", encoding="utf-8")
page_text = pf.read()
pf.close()
self._parse_text(page_text)
if not self._check_config():
raise ValueError
#sys.exit()
self._title = self._config['title'][0]
|
python
|
def _load(self, slug):
"""Load the page. The _file_name param is known, because this
method is only called after having checked that the page exists.
"""
#here we know that the slug exists
self._slug = slug
page_dir = os.path.join(self.site.dirs['source'], self._slug)
page_file_name = os.path.join(page_dir, self._slug + '.md')
self._dirs['source_dir'] = page_dir
self._dirs['source_filename'] = page_file_name
self._dirs['www_dir'] = os.path.join(self.site.dirs['www'], slug)
self._dirs['www_filename'] = os.path.join(self._dirs['www_dir'], 'index.html')
#pf = open(self._dirs['source_filename'], 'r')
#page_text = pf.read()
#pf.close()
# need to decode!
pf = codecs.open(self._dirs['source_filename'], mode="r", encoding="utf-8")
page_text = pf.read()
pf.close()
self._parse_text(page_text)
if not self._check_config():
raise ValueError
#sys.exit()
self._title = self._config['title'][0]
|
[
"def",
"_load",
"(",
"self",
",",
"slug",
")",
":",
"#here we know that the slug exists",
"self",
".",
"_slug",
"=",
"slug",
"page_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"site",
".",
"dirs",
"[",
"'source'",
"]",
",",
"self",
".",
"_slug",
")",
"page_file_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"page_dir",
",",
"self",
".",
"_slug",
"+",
"'.md'",
")",
"self",
".",
"_dirs",
"[",
"'source_dir'",
"]",
"=",
"page_dir",
"self",
".",
"_dirs",
"[",
"'source_filename'",
"]",
"=",
"page_file_name",
"self",
".",
"_dirs",
"[",
"'www_dir'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"site",
".",
"dirs",
"[",
"'www'",
"]",
",",
"slug",
")",
"self",
".",
"_dirs",
"[",
"'www_filename'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_dirs",
"[",
"'www_dir'",
"]",
",",
"'index.html'",
")",
"#pf = open(self._dirs['source_filename'], 'r')",
"#page_text = pf.read()",
"#pf.close()",
"# need to decode!",
"pf",
"=",
"codecs",
".",
"open",
"(",
"self",
".",
"_dirs",
"[",
"'source_filename'",
"]",
",",
"mode",
"=",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"page_text",
"=",
"pf",
".",
"read",
"(",
")",
"pf",
".",
"close",
"(",
")",
"self",
".",
"_parse_text",
"(",
"page_text",
")",
"if",
"not",
"self",
".",
"_check_config",
"(",
")",
":",
"raise",
"ValueError",
"#sys.exit()",
"self",
".",
"_title",
"=",
"self",
".",
"_config",
"[",
"'title'",
"]",
"[",
"0",
"]"
] |
Load the page. The _file_name param is known, because this
method is only called after having checked that the page exists.
|
[
"Load",
"the",
"page",
".",
"The",
"_file_name",
"param",
"is",
"known",
"because",
"this",
"method",
"is",
"only",
"called",
"after",
"having",
"checked",
"that",
"the",
"page",
"exists",
"."
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2page.py#L318-L344
|
242,017
|
jpablo128/simplystatic
|
simplystatic/s2page.py
|
Page._check_config
|
def _check_config(self):
"""Verify that the configuration is correct."""
required_data = ['creation_date',
'author',
'status',
'lang',
'tags',
'title',
'slug',
'theme',
'template',
'page_id']
isok = True
# exclude some statements from coverage analysis. I would need to
# refactor how the config is loaded/handled etc. It's not worth
# to do that now. Maybe when I change yaml for the markdwn extension.
for e in self._config.keys():
if not e in required_data: # pragma: no cover
print "The configuration in page '" + \
self._slug + "' is corrupt."
isok = False
# check that the theme and template exist, even if they're default.
(pthemedir, ptemplatefname) = self._theme_and_template_fp()
if not os.path.isdir(pthemedir): # pragma: no cover
print "Theme " + self._config['theme'][0] + \
" specified in page '" + \
self._slug + "' does not exist."
isok = False
if not os.path.isfile(ptemplatefname): # pragma: no cover
print "Template " + self._config['template'][0] + \
" specified in page '" + self._slug + \
"' does not exist."
isok = False
return isok
|
python
|
def _check_config(self):
"""Verify that the configuration is correct."""
required_data = ['creation_date',
'author',
'status',
'lang',
'tags',
'title',
'slug',
'theme',
'template',
'page_id']
isok = True
# exclude some statements from coverage analysis. I would need to
# refactor how the config is loaded/handled etc. It's not worth
# to do that now. Maybe when I change yaml for the markdwn extension.
for e in self._config.keys():
if not e in required_data: # pragma: no cover
print "The configuration in page '" + \
self._slug + "' is corrupt."
isok = False
# check that the theme and template exist, even if they're default.
(pthemedir, ptemplatefname) = self._theme_and_template_fp()
if not os.path.isdir(pthemedir): # pragma: no cover
print "Theme " + self._config['theme'][0] + \
" specified in page '" + \
self._slug + "' does not exist."
isok = False
if not os.path.isfile(ptemplatefname): # pragma: no cover
print "Template " + self._config['template'][0] + \
" specified in page '" + self._slug + \
"' does not exist."
isok = False
return isok
|
[
"def",
"_check_config",
"(",
"self",
")",
":",
"required_data",
"=",
"[",
"'creation_date'",
",",
"'author'",
",",
"'status'",
",",
"'lang'",
",",
"'tags'",
",",
"'title'",
",",
"'slug'",
",",
"'theme'",
",",
"'template'",
",",
"'page_id'",
"]",
"isok",
"=",
"True",
"# exclude some statements from coverage analysis. I would need to",
"# refactor how the config is loaded/handled etc. It's not worth",
"# to do that now. Maybe when I change yaml for the markdwn extension.",
"for",
"e",
"in",
"self",
".",
"_config",
".",
"keys",
"(",
")",
":",
"if",
"not",
"e",
"in",
"required_data",
":",
"# pragma: no cover",
"print",
"\"The configuration in page '\"",
"+",
"self",
".",
"_slug",
"+",
"\"' is corrupt.\"",
"isok",
"=",
"False",
"# check that the theme and template exist, even if they're default.",
"(",
"pthemedir",
",",
"ptemplatefname",
")",
"=",
"self",
".",
"_theme_and_template_fp",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"pthemedir",
")",
":",
"# pragma: no cover",
"print",
"\"Theme \"",
"+",
"self",
".",
"_config",
"[",
"'theme'",
"]",
"[",
"0",
"]",
"+",
"\" specified in page '\"",
"+",
"self",
".",
"_slug",
"+",
"\"' does not exist.\"",
"isok",
"=",
"False",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"ptemplatefname",
")",
":",
"# pragma: no cover",
"print",
"\"Template \"",
"+",
"self",
".",
"_config",
"[",
"'template'",
"]",
"[",
"0",
"]",
"+",
"\" specified in page '\"",
"+",
"self",
".",
"_slug",
"+",
"\"' does not exist.\"",
"isok",
"=",
"False",
"return",
"isok"
] |
Verify that the configuration is correct.
|
[
"Verify",
"that",
"the",
"configuration",
"is",
"correct",
"."
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2page.py#L346-L379
|
242,018
|
jpablo128/simplystatic
|
simplystatic/s2page.py
|
Page._theme_and_template_fp
|
def _theme_and_template_fp(self):
"""Return the full paths for theme and template in this page"""
ptheme = self._config['theme'][0]
if ptheme == "":
ptheme = self.site.site_config['default_theme']
pthemedir = os.path.join(self.site.dirs['themes'], ptheme)
ptemplate = self._config['template'][0]
if ptemplate == "":
ptemplate = self.site.site_config['default_template']
ptemplatefname = os.path.join(pthemedir, ptemplate)
return (pthemedir, ptemplatefname)
|
python
|
def _theme_and_template_fp(self):
"""Return the full paths for theme and template in this page"""
ptheme = self._config['theme'][0]
if ptheme == "":
ptheme = self.site.site_config['default_theme']
pthemedir = os.path.join(self.site.dirs['themes'], ptheme)
ptemplate = self._config['template'][0]
if ptemplate == "":
ptemplate = self.site.site_config['default_template']
ptemplatefname = os.path.join(pthemedir, ptemplate)
return (pthemedir, ptemplatefname)
|
[
"def",
"_theme_and_template_fp",
"(",
"self",
")",
":",
"ptheme",
"=",
"self",
".",
"_config",
"[",
"'theme'",
"]",
"[",
"0",
"]",
"if",
"ptheme",
"==",
"\"\"",
":",
"ptheme",
"=",
"self",
".",
"site",
".",
"site_config",
"[",
"'default_theme'",
"]",
"pthemedir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"site",
".",
"dirs",
"[",
"'themes'",
"]",
",",
"ptheme",
")",
"ptemplate",
"=",
"self",
".",
"_config",
"[",
"'template'",
"]",
"[",
"0",
"]",
"if",
"ptemplate",
"==",
"\"\"",
":",
"ptemplate",
"=",
"self",
".",
"site",
".",
"site_config",
"[",
"'default_template'",
"]",
"ptemplatefname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pthemedir",
",",
"ptemplate",
")",
"return",
"(",
"pthemedir",
",",
"ptemplatefname",
")"
] |
Return the full paths for theme and template in this page
|
[
"Return",
"the",
"full",
"paths",
"for",
"theme",
"and",
"template",
"in",
"this",
"page"
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2page.py#L381-L391
|
242,019
|
jpablo128/simplystatic
|
simplystatic/s2page.py
|
Page._parse_text
|
def _parse_text(self, page_text):
"""Extract the s2config and the content from the raw page text."""
# 1 sanitize: remove leading blank lines
# 2 separate "config text" from content, store content
# 3 convert config text + \n to obtain Meta, this is the config.
lines = page_text.split('\n')
i = 0
while lines[i].strip() == '':
i += 1
if i > 0: # i points to the first non-blank line. Else, i is 0, there are no leading blank lines
lines = lines[i:] #remove leading blank lines
i = 0
while lines[i].strip() != '':
i += 1
# i points to the first blank line
cfg_lines = '\n'.join(lines[0:i + 1]) #config lines, plus the empty line
md = markdown.Markdown(extensions=['meta','fenced_code', 'codehilite'],output_format="html5")
md.convert(cfg_lines) # need to trigger the conversion to obtain md.Meta
self._config = md.Meta
self._content = '\n'.join(lines[i+1:])
|
python
|
def _parse_text(self, page_text):
"""Extract the s2config and the content from the raw page text."""
# 1 sanitize: remove leading blank lines
# 2 separate "config text" from content, store content
# 3 convert config text + \n to obtain Meta, this is the config.
lines = page_text.split('\n')
i = 0
while lines[i].strip() == '':
i += 1
if i > 0: # i points to the first non-blank line. Else, i is 0, there are no leading blank lines
lines = lines[i:] #remove leading blank lines
i = 0
while lines[i].strip() != '':
i += 1
# i points to the first blank line
cfg_lines = '\n'.join(lines[0:i + 1]) #config lines, plus the empty line
md = markdown.Markdown(extensions=['meta','fenced_code', 'codehilite'],output_format="html5")
md.convert(cfg_lines) # need to trigger the conversion to obtain md.Meta
self._config = md.Meta
self._content = '\n'.join(lines[i+1:])
|
[
"def",
"_parse_text",
"(",
"self",
",",
"page_text",
")",
":",
"# 1 sanitize: remove leading blank lines",
"# 2 separate \"config text\" from content, store content",
"# 3 convert config text + \\n to obtain Meta, this is the config.",
"lines",
"=",
"page_text",
".",
"split",
"(",
"'\\n'",
")",
"i",
"=",
"0",
"while",
"lines",
"[",
"i",
"]",
".",
"strip",
"(",
")",
"==",
"''",
":",
"i",
"+=",
"1",
"if",
"i",
">",
"0",
":",
"# i points to the first non-blank line. Else, i is 0, there are no leading blank lines",
"lines",
"=",
"lines",
"[",
"i",
":",
"]",
"#remove leading blank lines",
"i",
"=",
"0",
"while",
"lines",
"[",
"i",
"]",
".",
"strip",
"(",
")",
"!=",
"''",
":",
"i",
"+=",
"1",
"# i points to the first blank line",
"cfg_lines",
"=",
"'\\n'",
".",
"join",
"(",
"lines",
"[",
"0",
":",
"i",
"+",
"1",
"]",
")",
"#config lines, plus the empty line",
"md",
"=",
"markdown",
".",
"Markdown",
"(",
"extensions",
"=",
"[",
"'meta'",
",",
"'fenced_code'",
",",
"'codehilite'",
"]",
",",
"output_format",
"=",
"\"html5\"",
")",
"md",
".",
"convert",
"(",
"cfg_lines",
")",
"# need to trigger the conversion to obtain md.Meta",
"self",
".",
"_config",
"=",
"md",
".",
"Meta",
"self",
".",
"_content",
"=",
"'\\n'",
".",
"join",
"(",
"lines",
"[",
"i",
"+",
"1",
":",
"]",
")"
] |
Extract the s2config and the content from the raw page text.
|
[
"Extract",
"the",
"s2config",
"and",
"the",
"content",
"from",
"the",
"raw",
"page",
"text",
"."
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2page.py#L393-L417
|
242,020
|
jpablo128/simplystatic
|
simplystatic/s2page.py
|
Page._config_to_text
|
def _config_to_text(self):
"""Render the configuration as text."""
r = u'' # unicode('',"UTF-8")
for k in self._config:
# if k == 'creation_date':
# r += k + ": " + self._config[k][0] + '\n'
# else:
#uk = unicode(k,"UTF-8")
cosa = '\n '.join(self._config[k]) + '\n'
r += k + ": " + cosa
#r += k + ": " + '\n '.join(self._config[k]) + '\n'
r += '\n'
return r
|
python
|
def _config_to_text(self):
"""Render the configuration as text."""
r = u'' # unicode('',"UTF-8")
for k in self._config:
# if k == 'creation_date':
# r += k + ": " + self._config[k][0] + '\n'
# else:
#uk = unicode(k,"UTF-8")
cosa = '\n '.join(self._config[k]) + '\n'
r += k + ": " + cosa
#r += k + ": " + '\n '.join(self._config[k]) + '\n'
r += '\n'
return r
|
[
"def",
"_config_to_text",
"(",
"self",
")",
":",
"r",
"=",
"u''",
"# unicode('',\"UTF-8\")",
"for",
"k",
"in",
"self",
".",
"_config",
":",
"# if k == 'creation_date':",
"# r += k + \": \" + self._config[k][0] + '\\n'",
"# else:",
"#uk = unicode(k,\"UTF-8\")",
"cosa",
"=",
"'\\n '",
".",
"join",
"(",
"self",
".",
"_config",
"[",
"k",
"]",
")",
"+",
"'\\n'",
"r",
"+=",
"k",
"+",
"\": \"",
"+",
"cosa",
"#r += k + \": \" + '\\n '.join(self._config[k]) + '\\n'",
"r",
"+=",
"'\\n'",
"return",
"r"
] |
Render the configuration as text.
|
[
"Render",
"the",
"configuration",
"as",
"text",
"."
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2page.py#L420-L432
|
242,021
|
jpablo128/simplystatic
|
simplystatic/s2page.py
|
Page.author
|
def author(self):
"""Return the full path of the theme used by this page."""
r = self.site.site_config['default_author']
if 'author' in self._config:
r = self._config['author']
return r
|
python
|
def author(self):
"""Return the full path of the theme used by this page."""
r = self.site.site_config['default_author']
if 'author' in self._config:
r = self._config['author']
return r
|
[
"def",
"author",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"site",
".",
"site_config",
"[",
"'default_author'",
"]",
"if",
"'author'",
"in",
"self",
".",
"_config",
":",
"r",
"=",
"self",
".",
"_config",
"[",
"'author'",
"]",
"return",
"r"
] |
Return the full path of the theme used by this page.
|
[
"Return",
"the",
"full",
"path",
"of",
"the",
"theme",
"used",
"by",
"this",
"page",
"."
] |
91ac579c8f34fa240bef9b87adb0116c6b40b24d
|
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2page.py#L499-L504
|
242,022
|
jalanb/pysyte
|
pysyte/decorators.py
|
memoize
|
def memoize(method):
"""A new method which acts like the given method but memoizes arguments
See https://en.wikipedia.org/wiki/Memoization for the general idea
>>> @memoize
... def test(arg):
... print('called')
... return arg + 1
>>> test(1)
called
2
>>> test(2)
called
3
>>> test(1)
2
The returned method also has an attached method "invalidate"
which removes given values from the cache
Or empties the cache if no values are given
>>> test.invalidate(2)
>>> test(1)
2
>>> test(2)
called
3
"""
method.cache = {}
def invalidate(*arguments, **keyword_arguments):
key = _represent_arguments(*arguments, **keyword_arguments)
if not key:
method.cache = {}
elif key in method.cache:
del method.cache[key]
else:
raise KeyError(
'Not prevously cached: %s(%s)' % (method.__name__, key))
def new_method(*arguments, **keyword_arguments):
"""Cache the arguments and return values of the call
The key cached is the repr() of arguments
This allows more types of values to be used as keys to the cache
Such as lists and tuples
"""
key = _represent_arguments(*arguments, **keyword_arguments)
if key not in method.cache:
method.cache[key] = method(*arguments, **keyword_arguments)
return method.cache[key]
new_method.invalidate = invalidate
new_method.__doc__ = method.__doc__
new_method.__name__ = 'memoize(%s)' % method.__name__
return new_method
|
python
|
def memoize(method):
"""A new method which acts like the given method but memoizes arguments
See https://en.wikipedia.org/wiki/Memoization for the general idea
>>> @memoize
... def test(arg):
... print('called')
... return arg + 1
>>> test(1)
called
2
>>> test(2)
called
3
>>> test(1)
2
The returned method also has an attached method "invalidate"
which removes given values from the cache
Or empties the cache if no values are given
>>> test.invalidate(2)
>>> test(1)
2
>>> test(2)
called
3
"""
method.cache = {}
def invalidate(*arguments, **keyword_arguments):
key = _represent_arguments(*arguments, **keyword_arguments)
if not key:
method.cache = {}
elif key in method.cache:
del method.cache[key]
else:
raise KeyError(
'Not prevously cached: %s(%s)' % (method.__name__, key))
def new_method(*arguments, **keyword_arguments):
"""Cache the arguments and return values of the call
The key cached is the repr() of arguments
This allows more types of values to be used as keys to the cache
Such as lists and tuples
"""
key = _represent_arguments(*arguments, **keyword_arguments)
if key not in method.cache:
method.cache[key] = method(*arguments, **keyword_arguments)
return method.cache[key]
new_method.invalidate = invalidate
new_method.__doc__ = method.__doc__
new_method.__name__ = 'memoize(%s)' % method.__name__
return new_method
|
[
"def",
"memoize",
"(",
"method",
")",
":",
"method",
".",
"cache",
"=",
"{",
"}",
"def",
"invalidate",
"(",
"*",
"arguments",
",",
"*",
"*",
"keyword_arguments",
")",
":",
"key",
"=",
"_represent_arguments",
"(",
"*",
"arguments",
",",
"*",
"*",
"keyword_arguments",
")",
"if",
"not",
"key",
":",
"method",
".",
"cache",
"=",
"{",
"}",
"elif",
"key",
"in",
"method",
".",
"cache",
":",
"del",
"method",
".",
"cache",
"[",
"key",
"]",
"else",
":",
"raise",
"KeyError",
"(",
"'Not prevously cached: %s(%s)'",
"%",
"(",
"method",
".",
"__name__",
",",
"key",
")",
")",
"def",
"new_method",
"(",
"*",
"arguments",
",",
"*",
"*",
"keyword_arguments",
")",
":",
"\"\"\"Cache the arguments and return values of the call\n\n The key cached is the repr() of arguments\n This allows more types of values to be used as keys to the cache\n Such as lists and tuples\n \"\"\"",
"key",
"=",
"_represent_arguments",
"(",
"*",
"arguments",
",",
"*",
"*",
"keyword_arguments",
")",
"if",
"key",
"not",
"in",
"method",
".",
"cache",
":",
"method",
".",
"cache",
"[",
"key",
"]",
"=",
"method",
"(",
"*",
"arguments",
",",
"*",
"*",
"keyword_arguments",
")",
"return",
"method",
".",
"cache",
"[",
"key",
"]",
"new_method",
".",
"invalidate",
"=",
"invalidate",
"new_method",
".",
"__doc__",
"=",
"method",
".",
"__doc__",
"new_method",
".",
"__name__",
"=",
"'memoize(%s)'",
"%",
"method",
".",
"__name__",
"return",
"new_method"
] |
A new method which acts like the given method but memoizes arguments
See https://en.wikipedia.org/wiki/Memoization for the general idea
>>> @memoize
... def test(arg):
... print('called')
... return arg + 1
>>> test(1)
called
2
>>> test(2)
called
3
>>> test(1)
2
The returned method also has an attached method "invalidate"
which removes given values from the cache
Or empties the cache if no values are given
>>> test.invalidate(2)
>>> test(1)
2
>>> test(2)
called
3
|
[
"A",
"new",
"method",
"which",
"acts",
"like",
"the",
"given",
"method",
"but",
"memoizes",
"arguments"
] |
4e278101943d1ceb1a6bcaf6ddc72052ecf13114
|
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/decorators.py#L19-L72
|
242,023
|
jalanb/pysyte
|
pysyte/decorators.py
|
debug
|
def debug(method):
"""Decorator to debug the given method"""
def new_method(*args, **kwargs):
import pdb
try:
import pudb
except ImportError:
pudb = pdb
try:
pudb.runcall(method, *args, **kwargs)
except pdb.bdb.BdbQuit:
sys.exit('Normal quit from debugger')
new_method.__doc__ = method.__doc__
new_method.__name__ = 'debug(%s)' % method.__name__
return new_method
|
python
|
def debug(method):
"""Decorator to debug the given method"""
def new_method(*args, **kwargs):
import pdb
try:
import pudb
except ImportError:
pudb = pdb
try:
pudb.runcall(method, *args, **kwargs)
except pdb.bdb.BdbQuit:
sys.exit('Normal quit from debugger')
new_method.__doc__ = method.__doc__
new_method.__name__ = 'debug(%s)' % method.__name__
return new_method
|
[
"def",
"debug",
"(",
"method",
")",
":",
"def",
"new_method",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"pdb",
"try",
":",
"import",
"pudb",
"except",
"ImportError",
":",
"pudb",
"=",
"pdb",
"try",
":",
"pudb",
".",
"runcall",
"(",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"pdb",
".",
"bdb",
".",
"BdbQuit",
":",
"sys",
".",
"exit",
"(",
"'Normal quit from debugger'",
")",
"new_method",
".",
"__doc__",
"=",
"method",
".",
"__doc__",
"new_method",
".",
"__name__",
"=",
"'debug(%s)'",
"%",
"method",
".",
"__name__",
"return",
"new_method"
] |
Decorator to debug the given method
|
[
"Decorator",
"to",
"debug",
"the",
"given",
"method"
] |
4e278101943d1ceb1a6bcaf6ddc72052ecf13114
|
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/decorators.py#L75-L89
|
242,024
|
jalanb/pysyte
|
pysyte/decorators.py
|
globber
|
def globber(main_method, globs):
"""Recognise globs in args"""
import os
from glob import glob
def main(arguments):
lists_of_paths = [_ for _ in arguments if glob(pathname, recursive=True)]
return main_method(arguments, lists_of_paths)
return main
|
python
|
def globber(main_method, globs):
"""Recognise globs in args"""
import os
from glob import glob
def main(arguments):
lists_of_paths = [_ for _ in arguments if glob(pathname, recursive=True)]
return main_method(arguments, lists_of_paths)
return main
|
[
"def",
"globber",
"(",
"main_method",
",",
"globs",
")",
":",
"import",
"os",
"from",
"glob",
"import",
"glob",
"def",
"main",
"(",
"arguments",
")",
":",
"lists_of_paths",
"=",
"[",
"_",
"for",
"_",
"in",
"arguments",
"if",
"glob",
"(",
"pathname",
",",
"recursive",
"=",
"True",
")",
"]",
"return",
"main_method",
"(",
"arguments",
",",
"lists_of_paths",
")",
"return",
"main"
] |
Recognise globs in args
|
[
"Recognise",
"globs",
"in",
"args"
] |
4e278101943d1ceb1a6bcaf6ddc72052ecf13114
|
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/decorators.py#L135-L143
|
242,025
|
mdeous/fatbotslim
|
fatbotslim/handlers.py
|
CommandHandler._dispatch_trigger
|
def _dispatch_trigger(self, msg):
"""
Dispatches the message to the corresponding method.
"""
if not msg.args[0].startswith(self.trigger_char):
return
split_args = msg.args[0].split()
trigger = split_args[0].lstrip(self.trigger_char)
if trigger in self.triggers:
method = getattr(self, trigger)
if msg.command == PRIVMSG:
if msg.dst == self.irc.nick:
if EVT_PRIVATE in self.triggers[trigger]:
msg.event = EVT_PRIVATE
method(msg)
else:
if EVT_PUBLIC in self.triggers[trigger]:
msg.event = EVT_PUBLIC
method(msg)
elif (msg.command == NOTICE) and (EVT_NOTICE in self.triggers[trigger]):
msg.event = EVT_NOTICE
method(msg)
|
python
|
def _dispatch_trigger(self, msg):
"""
Dispatches the message to the corresponding method.
"""
if not msg.args[0].startswith(self.trigger_char):
return
split_args = msg.args[0].split()
trigger = split_args[0].lstrip(self.trigger_char)
if trigger in self.triggers:
method = getattr(self, trigger)
if msg.command == PRIVMSG:
if msg.dst == self.irc.nick:
if EVT_PRIVATE in self.triggers[trigger]:
msg.event = EVT_PRIVATE
method(msg)
else:
if EVT_PUBLIC in self.triggers[trigger]:
msg.event = EVT_PUBLIC
method(msg)
elif (msg.command == NOTICE) and (EVT_NOTICE in self.triggers[trigger]):
msg.event = EVT_NOTICE
method(msg)
|
[
"def",
"_dispatch_trigger",
"(",
"self",
",",
"msg",
")",
":",
"if",
"not",
"msg",
".",
"args",
"[",
"0",
"]",
".",
"startswith",
"(",
"self",
".",
"trigger_char",
")",
":",
"return",
"split_args",
"=",
"msg",
".",
"args",
"[",
"0",
"]",
".",
"split",
"(",
")",
"trigger",
"=",
"split_args",
"[",
"0",
"]",
".",
"lstrip",
"(",
"self",
".",
"trigger_char",
")",
"if",
"trigger",
"in",
"self",
".",
"triggers",
":",
"method",
"=",
"getattr",
"(",
"self",
",",
"trigger",
")",
"if",
"msg",
".",
"command",
"==",
"PRIVMSG",
":",
"if",
"msg",
".",
"dst",
"==",
"self",
".",
"irc",
".",
"nick",
":",
"if",
"EVT_PRIVATE",
"in",
"self",
".",
"triggers",
"[",
"trigger",
"]",
":",
"msg",
".",
"event",
"=",
"EVT_PRIVATE",
"method",
"(",
"msg",
")",
"else",
":",
"if",
"EVT_PUBLIC",
"in",
"self",
".",
"triggers",
"[",
"trigger",
"]",
":",
"msg",
".",
"event",
"=",
"EVT_PUBLIC",
"method",
"(",
"msg",
")",
"elif",
"(",
"msg",
".",
"command",
"==",
"NOTICE",
")",
"and",
"(",
"EVT_NOTICE",
"in",
"self",
".",
"triggers",
"[",
"trigger",
"]",
")",
":",
"msg",
".",
"event",
"=",
"EVT_NOTICE",
"method",
"(",
"msg",
")"
] |
Dispatches the message to the corresponding method.
|
[
"Dispatches",
"the",
"message",
"to",
"the",
"corresponding",
"method",
"."
] |
341595d24454a79caee23750eac271f9d0626c88
|
https://github.com/mdeous/fatbotslim/blob/341595d24454a79caee23750eac271f9d0626c88/fatbotslim/handlers.py#L178-L199
|
242,026
|
mdeous/fatbotslim
|
fatbotslim/handlers.py
|
RightsHandler.set_restriction
|
def set_restriction(self, command, user, event_types):
"""
Adds restriction for given `command`.
:param command: command on which the restriction should be set.
:type command: str
:param user: username for which the restriction applies.
:type user: str
:param event_types: types of events for which the command is allowed.
:type event_types: list
"""
self.commands_rights[command][user.lower()] = event_types
if command not in self.triggers:
self.triggers[command] = [EVT_PUBLIC, EVT_PRIVATE, EVT_NOTICE]
if not hasattr(self, command):
setattr(self, command, lambda msg: self.handle_rights(msg))
|
python
|
def set_restriction(self, command, user, event_types):
"""
Adds restriction for given `command`.
:param command: command on which the restriction should be set.
:type command: str
:param user: username for which the restriction applies.
:type user: str
:param event_types: types of events for which the command is allowed.
:type event_types: list
"""
self.commands_rights[command][user.lower()] = event_types
if command not in self.triggers:
self.triggers[command] = [EVT_PUBLIC, EVT_PRIVATE, EVT_NOTICE]
if not hasattr(self, command):
setattr(self, command, lambda msg: self.handle_rights(msg))
|
[
"def",
"set_restriction",
"(",
"self",
",",
"command",
",",
"user",
",",
"event_types",
")",
":",
"self",
".",
"commands_rights",
"[",
"command",
"]",
"[",
"user",
".",
"lower",
"(",
")",
"]",
"=",
"event_types",
"if",
"command",
"not",
"in",
"self",
".",
"triggers",
":",
"self",
".",
"triggers",
"[",
"command",
"]",
"=",
"[",
"EVT_PUBLIC",
",",
"EVT_PRIVATE",
",",
"EVT_NOTICE",
"]",
"if",
"not",
"hasattr",
"(",
"self",
",",
"command",
")",
":",
"setattr",
"(",
"self",
",",
"command",
",",
"lambda",
"msg",
":",
"self",
".",
"handle_rights",
"(",
"msg",
")",
")"
] |
Adds restriction for given `command`.
:param command: command on which the restriction should be set.
:type command: str
:param user: username for which the restriction applies.
:type user: str
:param event_types: types of events for which the command is allowed.
:type event_types: list
|
[
"Adds",
"restriction",
"for",
"given",
"command",
"."
] |
341595d24454a79caee23750eac271f9d0626c88
|
https://github.com/mdeous/fatbotslim/blob/341595d24454a79caee23750eac271f9d0626c88/fatbotslim/handlers.py#L247-L262
|
242,027
|
mdeous/fatbotslim
|
fatbotslim/handlers.py
|
RightsHandler.del_restriction
|
def del_restriction(self, command, user, event_types):
"""
Removes restriction for given `command`.
:param command: command on which the restriction should be removed.
:type command: str
:param user: username for which restriction should be removed.
:type user: str
:param event_types: types of events that should be removed from restriction.
:type event_types: list
"""
if user.lower() in self.commands_rights[command]:
for event_type in event_types:
try:
self.commands_rights[command][user.lower()].remove(event_type)
except ValueError:
pass
if not self.commands_rights[command][user.lower()]:
self.commands_rights[command].pop(user.lower())
|
python
|
def del_restriction(self, command, user, event_types):
"""
Removes restriction for given `command`.
:param command: command on which the restriction should be removed.
:type command: str
:param user: username for which restriction should be removed.
:type user: str
:param event_types: types of events that should be removed from restriction.
:type event_types: list
"""
if user.lower() in self.commands_rights[command]:
for event_type in event_types:
try:
self.commands_rights[command][user.lower()].remove(event_type)
except ValueError:
pass
if not self.commands_rights[command][user.lower()]:
self.commands_rights[command].pop(user.lower())
|
[
"def",
"del_restriction",
"(",
"self",
",",
"command",
",",
"user",
",",
"event_types",
")",
":",
"if",
"user",
".",
"lower",
"(",
")",
"in",
"self",
".",
"commands_rights",
"[",
"command",
"]",
":",
"for",
"event_type",
"in",
"event_types",
":",
"try",
":",
"self",
".",
"commands_rights",
"[",
"command",
"]",
"[",
"user",
".",
"lower",
"(",
")",
"]",
".",
"remove",
"(",
"event_type",
")",
"except",
"ValueError",
":",
"pass",
"if",
"not",
"self",
".",
"commands_rights",
"[",
"command",
"]",
"[",
"user",
".",
"lower",
"(",
")",
"]",
":",
"self",
".",
"commands_rights",
"[",
"command",
"]",
".",
"pop",
"(",
"user",
".",
"lower",
"(",
")",
")"
] |
Removes restriction for given `command`.
:param command: command on which the restriction should be removed.
:type command: str
:param user: username for which restriction should be removed.
:type user: str
:param event_types: types of events that should be removed from restriction.
:type event_types: list
|
[
"Removes",
"restriction",
"for",
"given",
"command",
"."
] |
341595d24454a79caee23750eac271f9d0626c88
|
https://github.com/mdeous/fatbotslim/blob/341595d24454a79caee23750eac271f9d0626c88/fatbotslim/handlers.py#L264-L282
|
242,028
|
mdeous/fatbotslim
|
fatbotslim/handlers.py
|
RightsHandler.handle_rights
|
def handle_rights(self, msg):
"""
Catch-all command that is called whenever a restricted command is triggered.
:param msg: message that triggered the command.
:type msg: :class:`fatbotslim.irc.Message`
"""
command = msg.args[0][1:]
if command in self.commands_rights:
if msg.src.name.lower() in self.commands_rights[command]:
if msg.event not in self.commands_rights[command][msg.src.name.lower()]:
msg.propagate = False
elif '*' in self.commands_rights[command]:
if msg.event not in self.commands_rights[command]['*']:
msg.propagate = False
if (not msg.propagate) and self.notify:
message = "You're not allowed to use the '%s' command" % command
if msg.event == EVT_PUBLIC:
self.irc.msg(msg.dst, message)
elif msg.event == EVT_PRIVATE:
self.irc.msg(msg.src.name, message)
elif msg.event == EVT_NOTICE:
self.irc.notice(msg.src.name, message)
|
python
|
def handle_rights(self, msg):
"""
Catch-all command that is called whenever a restricted command is triggered.
:param msg: message that triggered the command.
:type msg: :class:`fatbotslim.irc.Message`
"""
command = msg.args[0][1:]
if command in self.commands_rights:
if msg.src.name.lower() in self.commands_rights[command]:
if msg.event not in self.commands_rights[command][msg.src.name.lower()]:
msg.propagate = False
elif '*' in self.commands_rights[command]:
if msg.event not in self.commands_rights[command]['*']:
msg.propagate = False
if (not msg.propagate) and self.notify:
message = "You're not allowed to use the '%s' command" % command
if msg.event == EVT_PUBLIC:
self.irc.msg(msg.dst, message)
elif msg.event == EVT_PRIVATE:
self.irc.msg(msg.src.name, message)
elif msg.event == EVT_NOTICE:
self.irc.notice(msg.src.name, message)
|
[
"def",
"handle_rights",
"(",
"self",
",",
"msg",
")",
":",
"command",
"=",
"msg",
".",
"args",
"[",
"0",
"]",
"[",
"1",
":",
"]",
"if",
"command",
"in",
"self",
".",
"commands_rights",
":",
"if",
"msg",
".",
"src",
".",
"name",
".",
"lower",
"(",
")",
"in",
"self",
".",
"commands_rights",
"[",
"command",
"]",
":",
"if",
"msg",
".",
"event",
"not",
"in",
"self",
".",
"commands_rights",
"[",
"command",
"]",
"[",
"msg",
".",
"src",
".",
"name",
".",
"lower",
"(",
")",
"]",
":",
"msg",
".",
"propagate",
"=",
"False",
"elif",
"'*'",
"in",
"self",
".",
"commands_rights",
"[",
"command",
"]",
":",
"if",
"msg",
".",
"event",
"not",
"in",
"self",
".",
"commands_rights",
"[",
"command",
"]",
"[",
"'*'",
"]",
":",
"msg",
".",
"propagate",
"=",
"False",
"if",
"(",
"not",
"msg",
".",
"propagate",
")",
"and",
"self",
".",
"notify",
":",
"message",
"=",
"\"You're not allowed to use the '%s' command\"",
"%",
"command",
"if",
"msg",
".",
"event",
"==",
"EVT_PUBLIC",
":",
"self",
".",
"irc",
".",
"msg",
"(",
"msg",
".",
"dst",
",",
"message",
")",
"elif",
"msg",
".",
"event",
"==",
"EVT_PRIVATE",
":",
"self",
".",
"irc",
".",
"msg",
"(",
"msg",
".",
"src",
".",
"name",
",",
"message",
")",
"elif",
"msg",
".",
"event",
"==",
"EVT_NOTICE",
":",
"self",
".",
"irc",
".",
"notice",
"(",
"msg",
".",
"src",
".",
"name",
",",
"message",
")"
] |
Catch-all command that is called whenever a restricted command is triggered.
:param msg: message that triggered the command.
:type msg: :class:`fatbotslim.irc.Message`
|
[
"Catch",
"-",
"all",
"command",
"that",
"is",
"called",
"whenever",
"a",
"restricted",
"command",
"is",
"triggered",
"."
] |
341595d24454a79caee23750eac271f9d0626c88
|
https://github.com/mdeous/fatbotslim/blob/341595d24454a79caee23750eac271f9d0626c88/fatbotslim/handlers.py#L284-L306
|
242,029
|
edwards-lab/libGWAS
|
libgwas/data_parser.py
|
check_inclusions
|
def check_inclusions(item, included=[], excluded=[]):
"""Everything passes if both are empty, otherwise, we have to check if \
empty or is present."""
if (len(included) == 0):
if len(excluded) == 0 or item not in excluded:
return True
else:
return False
else:
if item in included:
return True
return False
|
python
|
def check_inclusions(item, included=[], excluded=[]):
"""Everything passes if both are empty, otherwise, we have to check if \
empty or is present."""
if (len(included) == 0):
if len(excluded) == 0 or item not in excluded:
return True
else:
return False
else:
if item in included:
return True
return False
|
[
"def",
"check_inclusions",
"(",
"item",
",",
"included",
"=",
"[",
"]",
",",
"excluded",
"=",
"[",
"]",
")",
":",
"if",
"(",
"len",
"(",
"included",
")",
"==",
"0",
")",
":",
"if",
"len",
"(",
"excluded",
")",
"==",
"0",
"or",
"item",
"not",
"in",
"excluded",
":",
"return",
"True",
"else",
":",
"return",
"False",
"else",
":",
"if",
"item",
"in",
"included",
":",
"return",
"True",
"return",
"False"
] |
Everything passes if both are empty, otherwise, we have to check if \
empty or is present.
|
[
"Everything",
"passes",
"if",
"both",
"are",
"empty",
"otherwise",
"we",
"have",
"to",
"check",
"if",
"\\",
"empty",
"or",
"is",
"present",
"."
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/data_parser.py#L27-L38
|
242,030
|
francois-vincent/clingon
|
examples/zipcomment.py
|
toto
|
def toto(arch_name, comment='', clear=False, read_comment=False, list_members=False, time_show=False):
""" Small utility for changing comment in a zip file
without changing the file modification datetime.
"""
if comment and clear:
clingon.RunnerError("You cannot specify --comment and --clear together")
z = None
# if archive does not exist, create it with up to 3 files from current directory
if not os.path.isfile(arch_name):
print "Creating archive", arch_name
z = zipfile.ZipFile(arch_name, 'w')
for f in [x for x in glob.iglob('*.*') if not x.endswith('.zip')][:3]:
print " Add file %s to %s" % (f, arch_name)
z.write(f)
if comment:
mtime = os.path.getmtime(arch_name)
if not z:
z = zipfile.ZipFile(arch_name, 'a')
z.comment = comment
if z:
z.close()
if comment:
os.utime(arch_name, (time.time(), mtime))
if read_comment:
z = zipfile.ZipFile(arch_name, 'r')
print "Comment:", z.comment, len(z.comment)
if list_members:
z = zipfile.ZipFile(arch_name, 'r')
print "Members:", z.namelist()
if time_show:
print "Access time:", time.ctime(os.path.getatime(arch_name))
print "Modif time:", time.ctime(os.path.getmtime(arch_name))
|
python
|
def toto(arch_name, comment='', clear=False, read_comment=False, list_members=False, time_show=False):
""" Small utility for changing comment in a zip file
without changing the file modification datetime.
"""
if comment and clear:
clingon.RunnerError("You cannot specify --comment and --clear together")
z = None
# if archive does not exist, create it with up to 3 files from current directory
if not os.path.isfile(arch_name):
print "Creating archive", arch_name
z = zipfile.ZipFile(arch_name, 'w')
for f in [x for x in glob.iglob('*.*') if not x.endswith('.zip')][:3]:
print " Add file %s to %s" % (f, arch_name)
z.write(f)
if comment:
mtime = os.path.getmtime(arch_name)
if not z:
z = zipfile.ZipFile(arch_name, 'a')
z.comment = comment
if z:
z.close()
if comment:
os.utime(arch_name, (time.time(), mtime))
if read_comment:
z = zipfile.ZipFile(arch_name, 'r')
print "Comment:", z.comment, len(z.comment)
if list_members:
z = zipfile.ZipFile(arch_name, 'r')
print "Members:", z.namelist()
if time_show:
print "Access time:", time.ctime(os.path.getatime(arch_name))
print "Modif time:", time.ctime(os.path.getmtime(arch_name))
|
[
"def",
"toto",
"(",
"arch_name",
",",
"comment",
"=",
"''",
",",
"clear",
"=",
"False",
",",
"read_comment",
"=",
"False",
",",
"list_members",
"=",
"False",
",",
"time_show",
"=",
"False",
")",
":",
"if",
"comment",
"and",
"clear",
":",
"clingon",
".",
"RunnerError",
"(",
"\"You cannot specify --comment and --clear together\"",
")",
"z",
"=",
"None",
"# if archive does not exist, create it with up to 3 files from current directory",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"arch_name",
")",
":",
"print",
"\"Creating archive\"",
",",
"arch_name",
"z",
"=",
"zipfile",
".",
"ZipFile",
"(",
"arch_name",
",",
"'w'",
")",
"for",
"f",
"in",
"[",
"x",
"for",
"x",
"in",
"glob",
".",
"iglob",
"(",
"'*.*'",
")",
"if",
"not",
"x",
".",
"endswith",
"(",
"'.zip'",
")",
"]",
"[",
":",
"3",
"]",
":",
"print",
"\" Add file %s to %s\"",
"%",
"(",
"f",
",",
"arch_name",
")",
"z",
".",
"write",
"(",
"f",
")",
"if",
"comment",
":",
"mtime",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"arch_name",
")",
"if",
"not",
"z",
":",
"z",
"=",
"zipfile",
".",
"ZipFile",
"(",
"arch_name",
",",
"'a'",
")",
"z",
".",
"comment",
"=",
"comment",
"if",
"z",
":",
"z",
".",
"close",
"(",
")",
"if",
"comment",
":",
"os",
".",
"utime",
"(",
"arch_name",
",",
"(",
"time",
".",
"time",
"(",
")",
",",
"mtime",
")",
")",
"if",
"read_comment",
":",
"z",
"=",
"zipfile",
".",
"ZipFile",
"(",
"arch_name",
",",
"'r'",
")",
"print",
"\"Comment:\"",
",",
"z",
".",
"comment",
",",
"len",
"(",
"z",
".",
"comment",
")",
"if",
"list_members",
":",
"z",
"=",
"zipfile",
".",
"ZipFile",
"(",
"arch_name",
",",
"'r'",
")",
"print",
"\"Members:\"",
",",
"z",
".",
"namelist",
"(",
")",
"if",
"time_show",
":",
"print",
"\"Access time:\"",
",",
"time",
".",
"ctime",
"(",
"os",
".",
"path",
".",
"getatime",
"(",
"arch_name",
")",
")",
"print",
"\"Modif time:\"",
",",
"time",
".",
"ctime",
"(",
"os",
".",
"path",
".",
"getmtime",
"(",
"arch_name",
")",
")"
] |
Small utility for changing comment in a zip file
without changing the file modification datetime.
|
[
"Small",
"utility",
"for",
"changing",
"comment",
"in",
"a",
"zip",
"file",
"without",
"changing",
"the",
"file",
"modification",
"datetime",
"."
] |
afc9db073dbc72b2562ce3e444152986a555dcbf
|
https://github.com/francois-vincent/clingon/blob/afc9db073dbc72b2562ce3e444152986a555dcbf/examples/zipcomment.py#L11-L42
|
242,031
|
pyvec/pyvodb
|
pyvodb/cli/top.py
|
cli
|
def cli(ctx, data, verbose, color, format, editor):
"""Query a meetup database.
"""
ctx.obj['verbose'] = verbose
if verbose:
logging.basicConfig(level=logging.INFO)
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
ctx.obj['datadir'] = os.path.abspath(data)
if 'db' not in ctx.obj:
ctx.obj['db'] = get_db(data)
if color is None:
ctx.obj['term'] = blessings.Terminal()
elif color is True:
ctx.obj['term'] = blessings.Terminal(force_styling=True)
elif color is False:
ctx.obj['term'] = blessings.Terminal(force_styling=None)
if 'PYVO_TEST_NOW' in os.environ:
# Fake the current date for testing
ctx.obj['now'] = datetime.datetime.strptime(
os.environ['PYVO_TEST_NOW'], '%Y-%m-%d %H:%M:%S')
else:
ctx.obj['now'] = datetime.datetime.now()
ctx.obj['format'] = format
ctx.obj['editor'] = shlex.split(editor)
|
python
|
def cli(ctx, data, verbose, color, format, editor):
"""Query a meetup database.
"""
ctx.obj['verbose'] = verbose
if verbose:
logging.basicConfig(level=logging.INFO)
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
ctx.obj['datadir'] = os.path.abspath(data)
if 'db' not in ctx.obj:
ctx.obj['db'] = get_db(data)
if color is None:
ctx.obj['term'] = blessings.Terminal()
elif color is True:
ctx.obj['term'] = blessings.Terminal(force_styling=True)
elif color is False:
ctx.obj['term'] = blessings.Terminal(force_styling=None)
if 'PYVO_TEST_NOW' in os.environ:
# Fake the current date for testing
ctx.obj['now'] = datetime.datetime.strptime(
os.environ['PYVO_TEST_NOW'], '%Y-%m-%d %H:%M:%S')
else:
ctx.obj['now'] = datetime.datetime.now()
ctx.obj['format'] = format
ctx.obj['editor'] = shlex.split(editor)
|
[
"def",
"cli",
"(",
"ctx",
",",
"data",
",",
"verbose",
",",
"color",
",",
"format",
",",
"editor",
")",
":",
"ctx",
".",
"obj",
"[",
"'verbose'",
"]",
"=",
"verbose",
"if",
"verbose",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
")",
"logging",
".",
"getLogger",
"(",
"'sqlalchemy.engine'",
")",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"ctx",
".",
"obj",
"[",
"'datadir'",
"]",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"data",
")",
"if",
"'db'",
"not",
"in",
"ctx",
".",
"obj",
":",
"ctx",
".",
"obj",
"[",
"'db'",
"]",
"=",
"get_db",
"(",
"data",
")",
"if",
"color",
"is",
"None",
":",
"ctx",
".",
"obj",
"[",
"'term'",
"]",
"=",
"blessings",
".",
"Terminal",
"(",
")",
"elif",
"color",
"is",
"True",
":",
"ctx",
".",
"obj",
"[",
"'term'",
"]",
"=",
"blessings",
".",
"Terminal",
"(",
"force_styling",
"=",
"True",
")",
"elif",
"color",
"is",
"False",
":",
"ctx",
".",
"obj",
"[",
"'term'",
"]",
"=",
"blessings",
".",
"Terminal",
"(",
"force_styling",
"=",
"None",
")",
"if",
"'PYVO_TEST_NOW'",
"in",
"os",
".",
"environ",
":",
"# Fake the current date for testing",
"ctx",
".",
"obj",
"[",
"'now'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"os",
".",
"environ",
"[",
"'PYVO_TEST_NOW'",
"]",
",",
"'%Y-%m-%d %H:%M:%S'",
")",
"else",
":",
"ctx",
".",
"obj",
"[",
"'now'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"ctx",
".",
"obj",
"[",
"'format'",
"]",
"=",
"format",
"ctx",
".",
"obj",
"[",
"'editor'",
"]",
"=",
"shlex",
".",
"split",
"(",
"editor",
")"
] |
Query a meetup database.
|
[
"Query",
"a",
"meetup",
"database",
"."
] |
07183333df26eb12c5c2b98802cde3fb3a6c1339
|
https://github.com/pyvec/pyvodb/blob/07183333df26eb12c5c2b98802cde3fb3a6c1339/pyvodb/cli/top.py#L57-L80
|
242,032
|
Bystroushaak/BalancedDiscStorage
|
src/BalancedDiscStorage/balanced_disc_storage.py
|
BalancedDiscStorage._get_file_iterator
|
def _get_file_iterator(self, file_obj):
"""
For given `file_obj` return iterator, which will read the file in
`self.read_bs` chunks.
Args:
file_obj (file): File-like object.
Return:
iterator: Iterator reading the file-like object in chunks.
"""
file_obj.seek(0)
return iter(lambda: file_obj.read(self.read_bs), '')
|
python
|
def _get_file_iterator(self, file_obj):
"""
For given `file_obj` return iterator, which will read the file in
`self.read_bs` chunks.
Args:
file_obj (file): File-like object.
Return:
iterator: Iterator reading the file-like object in chunks.
"""
file_obj.seek(0)
return iter(lambda: file_obj.read(self.read_bs), '')
|
[
"def",
"_get_file_iterator",
"(",
"self",
",",
"file_obj",
")",
":",
"file_obj",
".",
"seek",
"(",
"0",
")",
"return",
"iter",
"(",
"lambda",
":",
"file_obj",
".",
"read",
"(",
"self",
".",
"read_bs",
")",
",",
"''",
")"
] |
For given `file_obj` return iterator, which will read the file in
`self.read_bs` chunks.
Args:
file_obj (file): File-like object.
Return:
iterator: Iterator reading the file-like object in chunks.
|
[
"For",
"given",
"file_obj",
"return",
"iterator",
"which",
"will",
"read",
"the",
"file",
"in",
"self",
".",
"read_bs",
"chunks",
"."
] |
d96854e2afdd70c814b16d177ff6308841b34b24
|
https://github.com/Bystroushaak/BalancedDiscStorage/blob/d96854e2afdd70c814b16d177ff6308841b34b24/src/BalancedDiscStorage/balanced_disc_storage.py#L50-L63
|
242,033
|
Bystroushaak/BalancedDiscStorage
|
src/BalancedDiscStorage/balanced_disc_storage.py
|
BalancedDiscStorage._get_hash
|
def _get_hash(self, file_obj):
"""
Compute hash for the `file_obj`.
Attr:
file_obj (obj): File-like object with ``.write()`` and ``.seek()``.
Returns:
str: Hexdigest of the hash.
"""
size = 0
hash_buider = self.hash_builder()
for piece in self._get_file_iterator(file_obj):
hash_buider.update(piece)
size += len(piece)
file_obj.seek(0)
return "%s_%x" % (hash_buider.hexdigest(), size)
|
python
|
def _get_hash(self, file_obj):
"""
Compute hash for the `file_obj`.
Attr:
file_obj (obj): File-like object with ``.write()`` and ``.seek()``.
Returns:
str: Hexdigest of the hash.
"""
size = 0
hash_buider = self.hash_builder()
for piece in self._get_file_iterator(file_obj):
hash_buider.update(piece)
size += len(piece)
file_obj.seek(0)
return "%s_%x" % (hash_buider.hexdigest(), size)
|
[
"def",
"_get_hash",
"(",
"self",
",",
"file_obj",
")",
":",
"size",
"=",
"0",
"hash_buider",
"=",
"self",
".",
"hash_builder",
"(",
")",
"for",
"piece",
"in",
"self",
".",
"_get_file_iterator",
"(",
"file_obj",
")",
":",
"hash_buider",
".",
"update",
"(",
"piece",
")",
"size",
"+=",
"len",
"(",
"piece",
")",
"file_obj",
".",
"seek",
"(",
"0",
")",
"return",
"\"%s_%x\"",
"%",
"(",
"hash_buider",
".",
"hexdigest",
"(",
")",
",",
"size",
")"
] |
Compute hash for the `file_obj`.
Attr:
file_obj (obj): File-like object with ``.write()`` and ``.seek()``.
Returns:
str: Hexdigest of the hash.
|
[
"Compute",
"hash",
"for",
"the",
"file_obj",
"."
] |
d96854e2afdd70c814b16d177ff6308841b34b24
|
https://github.com/Bystroushaak/BalancedDiscStorage/blob/d96854e2afdd70c814b16d177ff6308841b34b24/src/BalancedDiscStorage/balanced_disc_storage.py#L65-L83
|
242,034
|
Bystroushaak/BalancedDiscStorage
|
src/BalancedDiscStorage/balanced_disc_storage.py
|
BalancedDiscStorage._create_dir_path
|
def _create_dir_path(self, file_hash, path=None, hash_list=None):
"""
Create proper filesystem paths for given `file_hash`.
Args:
file_hash (str): Hash of the file for which the path should be
created.
path (str, default None): Recursion argument, don't set this.
hash_list (list, default None): Recursion argument, don't set this.
Returns:
str: Created path.
"""
# first, non-recursive call - parse `file_hash`
if hash_list is None:
hash_list = list(file_hash)
if not hash_list:
raise IOError("Directory structure is too full!")
# first, non-recursive call - look for subpath of `self.path`
if not path:
path = os.path.join(
self.path,
hash_list.pop(0)
)
# if the path not yet exists, create it and work on it
if not os.path.exists(path):
os.mkdir(path)
return self._create_dir_path(
file_hash=file_hash,
path=path,
hash_list=hash_list
)
files = os.listdir(path)
# file is already in storage
if file_hash in files:
return path
# if the directory is not yet full, use it
if len(files) < self.dir_limit:
return path
# in full directories create new sub-directories
return self._create_dir_path(
file_hash=file_hash,
path=os.path.join(path, hash_list.pop(0)),
hash_list=hash_list
)
|
python
|
def _create_dir_path(self, file_hash, path=None, hash_list=None):
"""
Create proper filesystem paths for given `file_hash`.
Args:
file_hash (str): Hash of the file for which the path should be
created.
path (str, default None): Recursion argument, don't set this.
hash_list (list, default None): Recursion argument, don't set this.
Returns:
str: Created path.
"""
# first, non-recursive call - parse `file_hash`
if hash_list is None:
hash_list = list(file_hash)
if not hash_list:
raise IOError("Directory structure is too full!")
# first, non-recursive call - look for subpath of `self.path`
if not path:
path = os.path.join(
self.path,
hash_list.pop(0)
)
# if the path not yet exists, create it and work on it
if not os.path.exists(path):
os.mkdir(path)
return self._create_dir_path(
file_hash=file_hash,
path=path,
hash_list=hash_list
)
files = os.listdir(path)
# file is already in storage
if file_hash in files:
return path
# if the directory is not yet full, use it
if len(files) < self.dir_limit:
return path
# in full directories create new sub-directories
return self._create_dir_path(
file_hash=file_hash,
path=os.path.join(path, hash_list.pop(0)),
hash_list=hash_list
)
|
[
"def",
"_create_dir_path",
"(",
"self",
",",
"file_hash",
",",
"path",
"=",
"None",
",",
"hash_list",
"=",
"None",
")",
":",
"# first, non-recursive call - parse `file_hash`",
"if",
"hash_list",
"is",
"None",
":",
"hash_list",
"=",
"list",
"(",
"file_hash",
")",
"if",
"not",
"hash_list",
":",
"raise",
"IOError",
"(",
"\"Directory structure is too full!\"",
")",
"# first, non-recursive call - look for subpath of `self.path`",
"if",
"not",
"path",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"hash_list",
".",
"pop",
"(",
"0",
")",
")",
"# if the path not yet exists, create it and work on it",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"mkdir",
"(",
"path",
")",
"return",
"self",
".",
"_create_dir_path",
"(",
"file_hash",
"=",
"file_hash",
",",
"path",
"=",
"path",
",",
"hash_list",
"=",
"hash_list",
")",
"files",
"=",
"os",
".",
"listdir",
"(",
"path",
")",
"# file is already in storage",
"if",
"file_hash",
"in",
"files",
":",
"return",
"path",
"# if the directory is not yet full, use it",
"if",
"len",
"(",
"files",
")",
"<",
"self",
".",
"dir_limit",
":",
"return",
"path",
"# in full directories create new sub-directories",
"return",
"self",
".",
"_create_dir_path",
"(",
"file_hash",
"=",
"file_hash",
",",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"hash_list",
".",
"pop",
"(",
"0",
")",
")",
",",
"hash_list",
"=",
"hash_list",
")"
] |
Create proper filesystem paths for given `file_hash`.
Args:
file_hash (str): Hash of the file for which the path should be
created.
path (str, default None): Recursion argument, don't set this.
hash_list (list, default None): Recursion argument, don't set this.
Returns:
str: Created path.
|
[
"Create",
"proper",
"filesystem",
"paths",
"for",
"given",
"file_hash",
"."
] |
d96854e2afdd70c814b16d177ff6308841b34b24
|
https://github.com/Bystroushaak/BalancedDiscStorage/blob/d96854e2afdd70c814b16d177ff6308841b34b24/src/BalancedDiscStorage/balanced_disc_storage.py#L100-L151
|
242,035
|
Bystroushaak/BalancedDiscStorage
|
src/BalancedDiscStorage/balanced_disc_storage.py
|
BalancedDiscStorage.file_path_from_hash
|
def file_path_from_hash(self, file_hash, path=None, hash_list=None):
"""
For given `file_hash`, return path on filesystem.
Args:
file_hash (str): Hash of the file, for which you wish to know the
path.
path (str, default None): Recursion argument, don't set this.
hash_list (list, default None): Recursion argument, don't set this.
Returns:
str: Path for given `file_hash` contained in :class:`.PathAndHash`\
object.
Raises:
IOError: If the file with corresponding `file_hash` is not in \
storage.
"""
# first, non-recursive call - parse `file_hash`
if hash_list is None:
hash_list = list(file_hash)
if not hash_list:
raise IOError("Directory structure is too full!")
# first, non-recursive call - look for subpath of `self.path`
if not path:
path = os.path.join(
self.path,
hash_list.pop(0)
)
files = os.listdir(path)
# is the file/unpacked archive in this `path`?
if file_hash in files:
full_path = os.path.join(path, file_hash)
if os.path.isfile(full_path):
return PathAndHash(path=full_path, hash=file_hash)
return PathAndHash(path=full_path + "/", hash=file_hash)
# end of recursion, if there are no more directories to look into
next_path = os.path.join(path, hash_list.pop(0))
if not os.path.exists(next_path):
raise IOError("File not found in the structure.")
# look into subdirectory
return self.file_path_from_hash(
file_hash=file_hash,
path=next_path,
hash_list=hash_list
)
|
python
|
def file_path_from_hash(self, file_hash, path=None, hash_list=None):
"""
For given `file_hash`, return path on filesystem.
Args:
file_hash (str): Hash of the file, for which you wish to know the
path.
path (str, default None): Recursion argument, don't set this.
hash_list (list, default None): Recursion argument, don't set this.
Returns:
str: Path for given `file_hash` contained in :class:`.PathAndHash`\
object.
Raises:
IOError: If the file with corresponding `file_hash` is not in \
storage.
"""
# first, non-recursive call - parse `file_hash`
if hash_list is None:
hash_list = list(file_hash)
if not hash_list:
raise IOError("Directory structure is too full!")
# first, non-recursive call - look for subpath of `self.path`
if not path:
path = os.path.join(
self.path,
hash_list.pop(0)
)
files = os.listdir(path)
# is the file/unpacked archive in this `path`?
if file_hash in files:
full_path = os.path.join(path, file_hash)
if os.path.isfile(full_path):
return PathAndHash(path=full_path, hash=file_hash)
return PathAndHash(path=full_path + "/", hash=file_hash)
# end of recursion, if there are no more directories to look into
next_path = os.path.join(path, hash_list.pop(0))
if not os.path.exists(next_path):
raise IOError("File not found in the structure.")
# look into subdirectory
return self.file_path_from_hash(
file_hash=file_hash,
path=next_path,
hash_list=hash_list
)
|
[
"def",
"file_path_from_hash",
"(",
"self",
",",
"file_hash",
",",
"path",
"=",
"None",
",",
"hash_list",
"=",
"None",
")",
":",
"# first, non-recursive call - parse `file_hash`",
"if",
"hash_list",
"is",
"None",
":",
"hash_list",
"=",
"list",
"(",
"file_hash",
")",
"if",
"not",
"hash_list",
":",
"raise",
"IOError",
"(",
"\"Directory structure is too full!\"",
")",
"# first, non-recursive call - look for subpath of `self.path`",
"if",
"not",
"path",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"hash_list",
".",
"pop",
"(",
"0",
")",
")",
"files",
"=",
"os",
".",
"listdir",
"(",
"path",
")",
"# is the file/unpacked archive in this `path`?",
"if",
"file_hash",
"in",
"files",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"file_hash",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"full_path",
")",
":",
"return",
"PathAndHash",
"(",
"path",
"=",
"full_path",
",",
"hash",
"=",
"file_hash",
")",
"return",
"PathAndHash",
"(",
"path",
"=",
"full_path",
"+",
"\"/\"",
",",
"hash",
"=",
"file_hash",
")",
"# end of recursion, if there are no more directories to look into",
"next_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"hash_list",
".",
"pop",
"(",
"0",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"next_path",
")",
":",
"raise",
"IOError",
"(",
"\"File not found in the structure.\"",
")",
"# look into subdirectory",
"return",
"self",
".",
"file_path_from_hash",
"(",
"file_hash",
"=",
"file_hash",
",",
"path",
"=",
"next_path",
",",
"hash_list",
"=",
"hash_list",
")"
] |
For given `file_hash`, return path on filesystem.
Args:
file_hash (str): Hash of the file, for which you wish to know the
path.
path (str, default None): Recursion argument, don't set this.
hash_list (list, default None): Recursion argument, don't set this.
Returns:
str: Path for given `file_hash` contained in :class:`.PathAndHash`\
object.
Raises:
IOError: If the file with corresponding `file_hash` is not in \
storage.
|
[
"For",
"given",
"file_hash",
"return",
"path",
"on",
"filesystem",
"."
] |
d96854e2afdd70c814b16d177ff6308841b34b24
|
https://github.com/Bystroushaak/BalancedDiscStorage/blob/d96854e2afdd70c814b16d177ff6308841b34b24/src/BalancedDiscStorage/balanced_disc_storage.py#L153-L206
|
242,036
|
Bystroushaak/BalancedDiscStorage
|
src/BalancedDiscStorage/balanced_disc_storage.py
|
BalancedDiscStorage.add_file
|
def add_file(self, file_obj):
"""
Add new file into the storage.
Args:
file_obj (file): Opened file-like object.
Returns:
obj: Path where the file-like object is stored contained with hash\
in :class:`.PathAndHash` object.
Raises:
AssertionError: If the `file_obj` is not file-like object.
IOError: If the file couldn't be added to storage.
"""
BalancedDiscStorage._check_interface(file_obj)
file_hash = self._get_hash(file_obj)
dir_path = self._create_dir_path(file_hash)
final_path = os.path.join(dir_path, file_hash)
def copy_to_file(from_file, to_path):
with open(to_path, "wb") as out_file:
for part in self._get_file_iterator(from_file):
out_file.write(part)
try:
copy_to_file(from_file=file_obj, to_path=final_path)
except Exception:
os.unlink(final_path)
raise
return PathAndHash(path=final_path, hash=file_hash)
|
python
|
def add_file(self, file_obj):
"""
Add new file into the storage.
Args:
file_obj (file): Opened file-like object.
Returns:
obj: Path where the file-like object is stored contained with hash\
in :class:`.PathAndHash` object.
Raises:
AssertionError: If the `file_obj` is not file-like object.
IOError: If the file couldn't be added to storage.
"""
BalancedDiscStorage._check_interface(file_obj)
file_hash = self._get_hash(file_obj)
dir_path = self._create_dir_path(file_hash)
final_path = os.path.join(dir_path, file_hash)
def copy_to_file(from_file, to_path):
with open(to_path, "wb") as out_file:
for part in self._get_file_iterator(from_file):
out_file.write(part)
try:
copy_to_file(from_file=file_obj, to_path=final_path)
except Exception:
os.unlink(final_path)
raise
return PathAndHash(path=final_path, hash=file_hash)
|
[
"def",
"add_file",
"(",
"self",
",",
"file_obj",
")",
":",
"BalancedDiscStorage",
".",
"_check_interface",
"(",
"file_obj",
")",
"file_hash",
"=",
"self",
".",
"_get_hash",
"(",
"file_obj",
")",
"dir_path",
"=",
"self",
".",
"_create_dir_path",
"(",
"file_hash",
")",
"final_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"file_hash",
")",
"def",
"copy_to_file",
"(",
"from_file",
",",
"to_path",
")",
":",
"with",
"open",
"(",
"to_path",
",",
"\"wb\"",
")",
"as",
"out_file",
":",
"for",
"part",
"in",
"self",
".",
"_get_file_iterator",
"(",
"from_file",
")",
":",
"out_file",
".",
"write",
"(",
"part",
")",
"try",
":",
"copy_to_file",
"(",
"from_file",
"=",
"file_obj",
",",
"to_path",
"=",
"final_path",
")",
"except",
"Exception",
":",
"os",
".",
"unlink",
"(",
"final_path",
")",
"raise",
"return",
"PathAndHash",
"(",
"path",
"=",
"final_path",
",",
"hash",
"=",
"file_hash",
")"
] |
Add new file into the storage.
Args:
file_obj (file): Opened file-like object.
Returns:
obj: Path where the file-like object is stored contained with hash\
in :class:`.PathAndHash` object.
Raises:
AssertionError: If the `file_obj` is not file-like object.
IOError: If the file couldn't be added to storage.
|
[
"Add",
"new",
"file",
"into",
"the",
"storage",
"."
] |
d96854e2afdd70c814b16d177ff6308841b34b24
|
https://github.com/Bystroushaak/BalancedDiscStorage/blob/d96854e2afdd70c814b16d177ff6308841b34b24/src/BalancedDiscStorage/balanced_disc_storage.py#L208-L241
|
242,037
|
Bystroushaak/BalancedDiscStorage
|
src/BalancedDiscStorage/balanced_disc_storage.py
|
BalancedDiscStorage._recursive_remove_blank_dirs
|
def _recursive_remove_blank_dirs(self, path):
"""
Make sure, that blank directories are removed from the storage.
Args:
path (str): Path which you suspect that is blank.
"""
path = os.path.abspath(path)
# never delete root of the storage or smaller paths
if path == self.path or len(path) <= len(self.path):
return
# if the path doesn't exists, go one level upper
if not os.path.exists(path):
return self._recursive_remove_blank_dirs(
os.path.dirname(path)
)
# if the directory contains files, end yourself
if os.listdir(path):
return
# blank directories can be removed
shutil.rmtree(path)
# go one level up, check whether the directory is blank too
return self._recursive_remove_blank_dirs(
os.path.dirname(path)
)
|
python
|
def _recursive_remove_blank_dirs(self, path):
"""
Make sure, that blank directories are removed from the storage.
Args:
path (str): Path which you suspect that is blank.
"""
path = os.path.abspath(path)
# never delete root of the storage or smaller paths
if path == self.path or len(path) <= len(self.path):
return
# if the path doesn't exists, go one level upper
if not os.path.exists(path):
return self._recursive_remove_blank_dirs(
os.path.dirname(path)
)
# if the directory contains files, end yourself
if os.listdir(path):
return
# blank directories can be removed
shutil.rmtree(path)
# go one level up, check whether the directory is blank too
return self._recursive_remove_blank_dirs(
os.path.dirname(path)
)
|
[
"def",
"_recursive_remove_blank_dirs",
"(",
"self",
",",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"# never delete root of the storage or smaller paths",
"if",
"path",
"==",
"self",
".",
"path",
"or",
"len",
"(",
"path",
")",
"<=",
"len",
"(",
"self",
".",
"path",
")",
":",
"return",
"# if the path doesn't exists, go one level upper",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"self",
".",
"_recursive_remove_blank_dirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
")",
"# if the directory contains files, end yourself",
"if",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"return",
"# blank directories can be removed",
"shutil",
".",
"rmtree",
"(",
"path",
")",
"# go one level up, check whether the directory is blank too",
"return",
"self",
".",
"_recursive_remove_blank_dirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
")"
] |
Make sure, that blank directories are removed from the storage.
Args:
path (str): Path which you suspect that is blank.
|
[
"Make",
"sure",
"that",
"blank",
"directories",
"are",
"removed",
"from",
"the",
"storage",
"."
] |
d96854e2afdd70c814b16d177ff6308841b34b24
|
https://github.com/Bystroushaak/BalancedDiscStorage/blob/d96854e2afdd70c814b16d177ff6308841b34b24/src/BalancedDiscStorage/balanced_disc_storage.py#L276-L305
|
242,038
|
leonidessaguisagjr/pseudol10nutil
|
pseudol10nutil/pseudol10nutil.py
|
PseudoL10nUtil.pseudolocalize
|
def pseudolocalize(self, s):
"""
Performs pseudo-localization on a string. The specific transforms to be
applied to the string is defined in the transforms field of the object.
:param s: String to pseudo-localize.
:returns: Copy of the string s with the transforms applied. If the input
string is an empty string or None, an empty string is returned.
"""
if not s: # If the string is empty or None
return u""
if not isinstance(s, six.text_type):
raise TypeError("String to pseudo-localize must be of type '{0}'.".format(six.text_type.__name__))
# If no transforms are defined, return the string as-is.
if not self.transforms:
return s
fmt_spec = re.compile(
r"""(
{.*?} # https://docs.python.org/3/library/string.html#formatstrings
|
%(?:\(\w+?\))?.*?[acdeEfFgGiorsuxX%] # https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting
)""", re.VERBOSE)
# If we don't find any format specifiers in the input string, just munge the entire string at once.
if not fmt_spec.search(s):
result = s
for munge in self.transforms:
result = munge(result)
# If there are format specifiers, we do transliterations on the sections of the string that are not format
# specifiers, then do any other munging (padding the length, adding brackets) on the entire string.
else:
substrings = fmt_spec.split(s)
for munge in self.transforms:
if munge in transforms._transliterations:
for idx in range(len(substrings)):
if not fmt_spec.match(substrings[idx]):
substrings[idx] = munge(substrings[idx])
else:
continue
else:
continue
result = u"".join(substrings)
for munge in self.transforms:
if munge not in transforms._transliterations:
result = munge(result)
return result
|
python
|
def pseudolocalize(self, s):
"""
Performs pseudo-localization on a string. The specific transforms to be
applied to the string is defined in the transforms field of the object.
:param s: String to pseudo-localize.
:returns: Copy of the string s with the transforms applied. If the input
string is an empty string or None, an empty string is returned.
"""
if not s: # If the string is empty or None
return u""
if not isinstance(s, six.text_type):
raise TypeError("String to pseudo-localize must be of type '{0}'.".format(six.text_type.__name__))
# If no transforms are defined, return the string as-is.
if not self.transforms:
return s
fmt_spec = re.compile(
r"""(
{.*?} # https://docs.python.org/3/library/string.html#formatstrings
|
%(?:\(\w+?\))?.*?[acdeEfFgGiorsuxX%] # https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting
)""", re.VERBOSE)
# If we don't find any format specifiers in the input string, just munge the entire string at once.
if not fmt_spec.search(s):
result = s
for munge in self.transforms:
result = munge(result)
# If there are format specifiers, we do transliterations on the sections of the string that are not format
# specifiers, then do any other munging (padding the length, adding brackets) on the entire string.
else:
substrings = fmt_spec.split(s)
for munge in self.transforms:
if munge in transforms._transliterations:
for idx in range(len(substrings)):
if not fmt_spec.match(substrings[idx]):
substrings[idx] = munge(substrings[idx])
else:
continue
else:
continue
result = u"".join(substrings)
for munge in self.transforms:
if munge not in transforms._transliterations:
result = munge(result)
return result
|
[
"def",
"pseudolocalize",
"(",
"self",
",",
"s",
")",
":",
"if",
"not",
"s",
":",
"# If the string is empty or None",
"return",
"u\"\"",
"if",
"not",
"isinstance",
"(",
"s",
",",
"six",
".",
"text_type",
")",
":",
"raise",
"TypeError",
"(",
"\"String to pseudo-localize must be of type '{0}'.\"",
".",
"format",
"(",
"six",
".",
"text_type",
".",
"__name__",
")",
")",
"# If no transforms are defined, return the string as-is.",
"if",
"not",
"self",
".",
"transforms",
":",
"return",
"s",
"fmt_spec",
"=",
"re",
".",
"compile",
"(",
"r\"\"\"(\n {.*?} # https://docs.python.org/3/library/string.html#formatstrings\n |\n %(?:\\(\\w+?\\))?.*?[acdeEfFgGiorsuxX%] # https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting\n )\"\"\"",
",",
"re",
".",
"VERBOSE",
")",
"# If we don't find any format specifiers in the input string, just munge the entire string at once.",
"if",
"not",
"fmt_spec",
".",
"search",
"(",
"s",
")",
":",
"result",
"=",
"s",
"for",
"munge",
"in",
"self",
".",
"transforms",
":",
"result",
"=",
"munge",
"(",
"result",
")",
"# If there are format specifiers, we do transliterations on the sections of the string that are not format",
"# specifiers, then do any other munging (padding the length, adding brackets) on the entire string.",
"else",
":",
"substrings",
"=",
"fmt_spec",
".",
"split",
"(",
"s",
")",
"for",
"munge",
"in",
"self",
".",
"transforms",
":",
"if",
"munge",
"in",
"transforms",
".",
"_transliterations",
":",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"substrings",
")",
")",
":",
"if",
"not",
"fmt_spec",
".",
"match",
"(",
"substrings",
"[",
"idx",
"]",
")",
":",
"substrings",
"[",
"idx",
"]",
"=",
"munge",
"(",
"substrings",
"[",
"idx",
"]",
")",
"else",
":",
"continue",
"else",
":",
"continue",
"result",
"=",
"u\"\"",
".",
"join",
"(",
"substrings",
")",
"for",
"munge",
"in",
"self",
".",
"transforms",
":",
"if",
"munge",
"not",
"in",
"transforms",
".",
"_transliterations",
":",
"result",
"=",
"munge",
"(",
"result",
")",
"return",
"result"
] |
Performs pseudo-localization on a string. The specific transforms to be
applied to the string is defined in the transforms field of the object.
:param s: String to pseudo-localize.
:returns: Copy of the string s with the transforms applied. If the input
string is an empty string or None, an empty string is returned.
|
[
"Performs",
"pseudo",
"-",
"localization",
"on",
"a",
"string",
".",
"The",
"specific",
"transforms",
"to",
"be",
"applied",
"to",
"the",
"string",
"is",
"defined",
"in",
"the",
"transforms",
"field",
"of",
"the",
"object",
"."
] |
39cb0ae8cc5c1df5690816a18472e0366a49ab8d
|
https://github.com/leonidessaguisagjr/pseudol10nutil/blob/39cb0ae8cc5c1df5690816a18472e0366a49ab8d/pseudol10nutil/pseudol10nutil.py#L33-L77
|
242,039
|
leonidessaguisagjr/pseudol10nutil
|
pseudol10nutil/pseudol10nutil.py
|
POFileUtil.pseudolocalizefile
|
def pseudolocalizefile(self, input_filename, output_filename, input_encoding='UTF-8', output_encoding='UTF-8',
overwrite_existing=True):
"""
Method for pseudo-localizing the message catalog file.
:param input_filename: Filename of the source (input) message catalog file.
:param output_filename: Filename of the target (output) message catalog file.
:param input_encoding: String indicating the encoding of the input file. Optional, defaults to 'UTF-8'.
:param output_encoding: String indicating the encoding of the output file. Optional, defaults to 'UTF-8'.
:param overwrite_existing: Boolean indicating if an existing output message catalog file should be overwritten.
True by default. If False, an IOError will be raised.
"""
leading_trailing_double_quotes = re.compile(r'^"|"$')
if not os.path.isfile(input_filename):
raise IOError("Input message catalog not found: {0}".format(os.path.abspath(input_filename)))
if os.path.isfile(output_filename) and not overwrite_existing:
raise IOError("Error, output message catalog already exists: {0}".format(os.path.abspath(output_filename)))
with codecs.open(input_filename, mode="r", encoding=input_encoding) as in_fileobj:
with codecs.open(output_filename, mode="w", encoding=output_encoding) as out_fileobj:
for current_line in in_fileobj:
out_fileobj.write(current_line)
if current_line.startswith("msgid"):
msgid = current_line.split(None, 1)[1].strip()
msgid = leading_trailing_double_quotes.sub('', msgid)
msgstr = self.l10nutil.pseudolocalize(msgid)
out_fileobj.write(u"msgstr \"{0}\"\n".format(msgstr))
next(in_fileobj)
|
python
|
def pseudolocalizefile(self, input_filename, output_filename, input_encoding='UTF-8', output_encoding='UTF-8',
overwrite_existing=True):
"""
Method for pseudo-localizing the message catalog file.
:param input_filename: Filename of the source (input) message catalog file.
:param output_filename: Filename of the target (output) message catalog file.
:param input_encoding: String indicating the encoding of the input file. Optional, defaults to 'UTF-8'.
:param output_encoding: String indicating the encoding of the output file. Optional, defaults to 'UTF-8'.
:param overwrite_existing: Boolean indicating if an existing output message catalog file should be overwritten.
True by default. If False, an IOError will be raised.
"""
leading_trailing_double_quotes = re.compile(r'^"|"$')
if not os.path.isfile(input_filename):
raise IOError("Input message catalog not found: {0}".format(os.path.abspath(input_filename)))
if os.path.isfile(output_filename) and not overwrite_existing:
raise IOError("Error, output message catalog already exists: {0}".format(os.path.abspath(output_filename)))
with codecs.open(input_filename, mode="r", encoding=input_encoding) as in_fileobj:
with codecs.open(output_filename, mode="w", encoding=output_encoding) as out_fileobj:
for current_line in in_fileobj:
out_fileobj.write(current_line)
if current_line.startswith("msgid"):
msgid = current_line.split(None, 1)[1].strip()
msgid = leading_trailing_double_quotes.sub('', msgid)
msgstr = self.l10nutil.pseudolocalize(msgid)
out_fileobj.write(u"msgstr \"{0}\"\n".format(msgstr))
next(in_fileobj)
|
[
"def",
"pseudolocalizefile",
"(",
"self",
",",
"input_filename",
",",
"output_filename",
",",
"input_encoding",
"=",
"'UTF-8'",
",",
"output_encoding",
"=",
"'UTF-8'",
",",
"overwrite_existing",
"=",
"True",
")",
":",
"leading_trailing_double_quotes",
"=",
"re",
".",
"compile",
"(",
"r'^\"|\"$'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"input_filename",
")",
":",
"raise",
"IOError",
"(",
"\"Input message catalog not found: {0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"input_filename",
")",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"output_filename",
")",
"and",
"not",
"overwrite_existing",
":",
"raise",
"IOError",
"(",
"\"Error, output message catalog already exists: {0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"output_filename",
")",
")",
")",
"with",
"codecs",
".",
"open",
"(",
"input_filename",
",",
"mode",
"=",
"\"r\"",
",",
"encoding",
"=",
"input_encoding",
")",
"as",
"in_fileobj",
":",
"with",
"codecs",
".",
"open",
"(",
"output_filename",
",",
"mode",
"=",
"\"w\"",
",",
"encoding",
"=",
"output_encoding",
")",
"as",
"out_fileobj",
":",
"for",
"current_line",
"in",
"in_fileobj",
":",
"out_fileobj",
".",
"write",
"(",
"current_line",
")",
"if",
"current_line",
".",
"startswith",
"(",
"\"msgid\"",
")",
":",
"msgid",
"=",
"current_line",
".",
"split",
"(",
"None",
",",
"1",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"msgid",
"=",
"leading_trailing_double_quotes",
".",
"sub",
"(",
"''",
",",
"msgid",
")",
"msgstr",
"=",
"self",
".",
"l10nutil",
".",
"pseudolocalize",
"(",
"msgid",
")",
"out_fileobj",
".",
"write",
"(",
"u\"msgstr \\\"{0}\\\"\\n\"",
".",
"format",
"(",
"msgstr",
")",
")",
"next",
"(",
"in_fileobj",
")"
] |
Method for pseudo-localizing the message catalog file.
:param input_filename: Filename of the source (input) message catalog file.
:param output_filename: Filename of the target (output) message catalog file.
:param input_encoding: String indicating the encoding of the input file. Optional, defaults to 'UTF-8'.
:param output_encoding: String indicating the encoding of the output file. Optional, defaults to 'UTF-8'.
:param overwrite_existing: Boolean indicating if an existing output message catalog file should be overwritten.
True by default. If False, an IOError will be raised.
|
[
"Method",
"for",
"pseudo",
"-",
"localizing",
"the",
"message",
"catalog",
"file",
"."
] |
39cb0ae8cc5c1df5690816a18472e0366a49ab8d
|
https://github.com/leonidessaguisagjr/pseudol10nutil/blob/39cb0ae8cc5c1df5690816a18472e0366a49ab8d/pseudol10nutil/pseudol10nutil.py#L98-L124
|
242,040
|
noobermin/lspreader
|
lspreader/pext.py
|
add_quantities
|
def add_quantities(d, coords=None, massE=0.511e6):
'''
Add physically interesting quantities to
pext data.
Parameters:
-----------
d : pext array
Keywords:
---------
coords : sequence of positions for angle calculation. None
or by default, calculate no angles.
For 2D, takes the angle depending on the order passed,
so this can be used for left-handed coordinate systems
like LSP's xz.
massE : rest mass energy of particles.
Returns a copy with the quantities appended.
'''
keys,items = zip(*calc_quantities(d,coords=coords,massE=massE).items());
return rfn.rec_append_fields(
d, keys, items);
|
python
|
def add_quantities(d, coords=None, massE=0.511e6):
'''
Add physically interesting quantities to
pext data.
Parameters:
-----------
d : pext array
Keywords:
---------
coords : sequence of positions for angle calculation. None
or by default, calculate no angles.
For 2D, takes the angle depending on the order passed,
so this can be used for left-handed coordinate systems
like LSP's xz.
massE : rest mass energy of particles.
Returns a copy with the quantities appended.
'''
keys,items = zip(*calc_quantities(d,coords=coords,massE=massE).items());
return rfn.rec_append_fields(
d, keys, items);
|
[
"def",
"add_quantities",
"(",
"d",
",",
"coords",
"=",
"None",
",",
"massE",
"=",
"0.511e6",
")",
":",
"keys",
",",
"items",
"=",
"zip",
"(",
"*",
"calc_quantities",
"(",
"d",
",",
"coords",
"=",
"coords",
",",
"massE",
"=",
"massE",
")",
".",
"items",
"(",
")",
")",
"return",
"rfn",
".",
"rec_append_fields",
"(",
"d",
",",
"keys",
",",
"items",
")"
] |
Add physically interesting quantities to
pext data.
Parameters:
-----------
d : pext array
Keywords:
---------
coords : sequence of positions for angle calculation. None
or by default, calculate no angles.
For 2D, takes the angle depending on the order passed,
so this can be used for left-handed coordinate systems
like LSP's xz.
massE : rest mass energy of particles.
Returns a copy with the quantities appended.
|
[
"Add",
"physically",
"interesting",
"quantities",
"to",
"pext",
"data",
"."
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/pext.py#L8-L29
|
242,041
|
noobermin/lspreader
|
lspreader/pext.py
|
calc_quantities
|
def calc_quantities(d, coords=None, massE=0.511e6):
'''
Calculate physically interesting quantities from pext
Parameters:
-----------
d : pext array
Keywords:
---------
coords : sequence of positions for angle calculation. None
or by default, calculate no angles.
For 2D, takes the angle depending on the order passed,
so this can be used for left-handed coordinate systems
like LSP's xz.
massE : rest mass energy of particles.
Returns a dictionary of physical quantities.
'''
quants = dict();
quants['u_norm'] = np.sqrt(d['ux']**2+d['uy']**2+d['uz']**2)
quants['KE'] =(np.sqrt(quants['u_norm']**2+1)-1)*massE;
coords[:] = ['u'+coord for coord in coords];
if not coords:
pass;
elif len(coords) == 3:
quants['theta'] = np.arccos(d[coords[2]]/quants['u_norm']);
quants['phi'] = np.arctan2(d[coords[1]],d[coords[0]]);
quants['phi_n'] = np.arctan2(d[coords[2]],d[coords[0]]);
elif len(coords) == 2:
quants['phi'] = np.arctan2(d[coords[1]],d[coords[0]]);
return quants;
|
python
|
def calc_quantities(d, coords=None, massE=0.511e6):
'''
Calculate physically interesting quantities from pext
Parameters:
-----------
d : pext array
Keywords:
---------
coords : sequence of positions for angle calculation. None
or by default, calculate no angles.
For 2D, takes the angle depending on the order passed,
so this can be used for left-handed coordinate systems
like LSP's xz.
massE : rest mass energy of particles.
Returns a dictionary of physical quantities.
'''
quants = dict();
quants['u_norm'] = np.sqrt(d['ux']**2+d['uy']**2+d['uz']**2)
quants['KE'] =(np.sqrt(quants['u_norm']**2+1)-1)*massE;
coords[:] = ['u'+coord for coord in coords];
if not coords:
pass;
elif len(coords) == 3:
quants['theta'] = np.arccos(d[coords[2]]/quants['u_norm']);
quants['phi'] = np.arctan2(d[coords[1]],d[coords[0]]);
quants['phi_n'] = np.arctan2(d[coords[2]],d[coords[0]]);
elif len(coords) == 2:
quants['phi'] = np.arctan2(d[coords[1]],d[coords[0]]);
return quants;
|
[
"def",
"calc_quantities",
"(",
"d",
",",
"coords",
"=",
"None",
",",
"massE",
"=",
"0.511e6",
")",
":",
"quants",
"=",
"dict",
"(",
")",
"quants",
"[",
"'u_norm'",
"]",
"=",
"np",
".",
"sqrt",
"(",
"d",
"[",
"'ux'",
"]",
"**",
"2",
"+",
"d",
"[",
"'uy'",
"]",
"**",
"2",
"+",
"d",
"[",
"'uz'",
"]",
"**",
"2",
")",
"quants",
"[",
"'KE'",
"]",
"=",
"(",
"np",
".",
"sqrt",
"(",
"quants",
"[",
"'u_norm'",
"]",
"**",
"2",
"+",
"1",
")",
"-",
"1",
")",
"*",
"massE",
"coords",
"[",
":",
"]",
"=",
"[",
"'u'",
"+",
"coord",
"for",
"coord",
"in",
"coords",
"]",
"if",
"not",
"coords",
":",
"pass",
"elif",
"len",
"(",
"coords",
")",
"==",
"3",
":",
"quants",
"[",
"'theta'",
"]",
"=",
"np",
".",
"arccos",
"(",
"d",
"[",
"coords",
"[",
"2",
"]",
"]",
"/",
"quants",
"[",
"'u_norm'",
"]",
")",
"quants",
"[",
"'phi'",
"]",
"=",
"np",
".",
"arctan2",
"(",
"d",
"[",
"coords",
"[",
"1",
"]",
"]",
",",
"d",
"[",
"coords",
"[",
"0",
"]",
"]",
")",
"quants",
"[",
"'phi_n'",
"]",
"=",
"np",
".",
"arctan2",
"(",
"d",
"[",
"coords",
"[",
"2",
"]",
"]",
",",
"d",
"[",
"coords",
"[",
"0",
"]",
"]",
")",
"elif",
"len",
"(",
"coords",
")",
"==",
"2",
":",
"quants",
"[",
"'phi'",
"]",
"=",
"np",
".",
"arctan2",
"(",
"d",
"[",
"coords",
"[",
"1",
"]",
"]",
",",
"d",
"[",
"coords",
"[",
"0",
"]",
"]",
")",
"return",
"quants"
] |
Calculate physically interesting quantities from pext
Parameters:
-----------
d : pext array
Keywords:
---------
coords : sequence of positions for angle calculation. None
or by default, calculate no angles.
For 2D, takes the angle depending on the order passed,
so this can be used for left-handed coordinate systems
like LSP's xz.
massE : rest mass energy of particles.
Returns a dictionary of physical quantities.
|
[
"Calculate",
"physically",
"interesting",
"quantities",
"from",
"pext"
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/pext.py#L31-L61
|
242,042
|
mattupstate/cubric
|
cubric/tasks.py
|
create_server
|
def create_server(initialize=True):
"""Create a server"""
with provider() as p:
host_string = p.create_server()
if initialize:
env.host_string = host_string
initialize_server()
|
python
|
def create_server(initialize=True):
"""Create a server"""
with provider() as p:
host_string = p.create_server()
if initialize:
env.host_string = host_string
initialize_server()
|
[
"def",
"create_server",
"(",
"initialize",
"=",
"True",
")",
":",
"with",
"provider",
"(",
")",
"as",
"p",
":",
"host_string",
"=",
"p",
".",
"create_server",
"(",
")",
"if",
"initialize",
":",
"env",
".",
"host_string",
"=",
"host_string",
"initialize_server",
"(",
")"
] |
Create a server
|
[
"Create",
"a",
"server"
] |
a648ce00e4467cd14d71e754240ef6c1f87a34b5
|
https://github.com/mattupstate/cubric/blob/a648ce00e4467cd14d71e754240ef6c1f87a34b5/cubric/tasks.py#L11-L17
|
242,043
|
thomasbiddle/Kippt-for-Python
|
kippt/users.py
|
User.list
|
def list(self, list_id):
""" Retrieve the list given for the user.
"""
r = requests.get(
"https://kippt.com/api/users/%s/lists/%s" % (self.id, list_id),
headers=self.kippt.header
)
return (r.json())
|
python
|
def list(self, list_id):
""" Retrieve the list given for the user.
"""
r = requests.get(
"https://kippt.com/api/users/%s/lists/%s" % (self.id, list_id),
headers=self.kippt.header
)
return (r.json())
|
[
"def",
"list",
"(",
"self",
",",
"list_id",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"\"https://kippt.com/api/users/%s/lists/%s\"",
"%",
"(",
"self",
".",
"id",
",",
"list_id",
")",
",",
"headers",
"=",
"self",
".",
"kippt",
".",
"header",
")",
"return",
"(",
"r",
".",
"json",
"(",
")",
")"
] |
Retrieve the list given for the user.
|
[
"Retrieve",
"the",
"list",
"given",
"for",
"the",
"user",
"."
] |
dddd0ff84d70ccf2d84e50e3cff7aad89f9c1267
|
https://github.com/thomasbiddle/Kippt-for-Python/blob/dddd0ff84d70ccf2d84e50e3cff7aad89f9c1267/kippt/users.py#L148-L157
|
242,044
|
thomasbiddle/Kippt-for-Python
|
kippt/users.py
|
User.relationship
|
def relationship(self):
""" Retrieve what the relationship between the user and
then authenticated user is.
"""
r = requests.get(
"https://kippt.com/api/users/%s/relationship" % (self.id),
headers=self.kippt.header
)
return (r.json())
|
python
|
def relationship(self):
""" Retrieve what the relationship between the user and
then authenticated user is.
"""
r = requests.get(
"https://kippt.com/api/users/%s/relationship" % (self.id),
headers=self.kippt.header
)
return (r.json())
|
[
"def",
"relationship",
"(",
"self",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"\"https://kippt.com/api/users/%s/relationship\"",
"%",
"(",
"self",
".",
"id",
")",
",",
"headers",
"=",
"self",
".",
"kippt",
".",
"header",
")",
"return",
"(",
"r",
".",
"json",
"(",
")",
")"
] |
Retrieve what the relationship between the user and
then authenticated user is.
|
[
"Retrieve",
"what",
"the",
"relationship",
"between",
"the",
"user",
"and",
"then",
"authenticated",
"user",
"is",
"."
] |
dddd0ff84d70ccf2d84e50e3cff7aad89f9c1267
|
https://github.com/thomasbiddle/Kippt-for-Python/blob/dddd0ff84d70ccf2d84e50e3cff7aad89f9c1267/kippt/users.py#L159-L168
|
242,045
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
TableBlock._find_titles
|
def _find_titles(self, row_index, column_index):
'''
Helper method to find all titles for a particular cell.
'''
titles = []
for column_search in range(self.start[1], column_index):
cell = self.table[row_index][column_search]
if cell == None or (isinstance(cell, basestring) and not cell):
continue
elif isinstance(cell, basestring):
titles.append(cell)
else:
break
for row_search in range(self.start[0], row_index):
cell = self.table[row_search][column_index]
if cell == None or (isinstance(cell, basestring) and not cell):
continue
elif isinstance(cell, basestring):
titles.append(cell)
else:
break
return titles
|
python
|
def _find_titles(self, row_index, column_index):
'''
Helper method to find all titles for a particular cell.
'''
titles = []
for column_search in range(self.start[1], column_index):
cell = self.table[row_index][column_search]
if cell == None or (isinstance(cell, basestring) and not cell):
continue
elif isinstance(cell, basestring):
titles.append(cell)
else:
break
for row_search in range(self.start[0], row_index):
cell = self.table[row_search][column_index]
if cell == None or (isinstance(cell, basestring) and not cell):
continue
elif isinstance(cell, basestring):
titles.append(cell)
else:
break
return titles
|
[
"def",
"_find_titles",
"(",
"self",
",",
"row_index",
",",
"column_index",
")",
":",
"titles",
"=",
"[",
"]",
"for",
"column_search",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"1",
"]",
",",
"column_index",
")",
":",
"cell",
"=",
"self",
".",
"table",
"[",
"row_index",
"]",
"[",
"column_search",
"]",
"if",
"cell",
"==",
"None",
"or",
"(",
"isinstance",
"(",
"cell",
",",
"basestring",
")",
"and",
"not",
"cell",
")",
":",
"continue",
"elif",
"isinstance",
"(",
"cell",
",",
"basestring",
")",
":",
"titles",
".",
"append",
"(",
"cell",
")",
"else",
":",
"break",
"for",
"row_search",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"0",
"]",
",",
"row_index",
")",
":",
"cell",
"=",
"self",
".",
"table",
"[",
"row_search",
"]",
"[",
"column_index",
"]",
"if",
"cell",
"==",
"None",
"or",
"(",
"isinstance",
"(",
"cell",
",",
"basestring",
")",
"and",
"not",
"cell",
")",
":",
"continue",
"elif",
"isinstance",
"(",
"cell",
",",
"basestring",
")",
":",
"titles",
".",
"append",
"(",
"cell",
")",
"else",
":",
"break",
"return",
"titles"
] |
Helper method to find all titles for a particular cell.
|
[
"Helper",
"method",
"to",
"find",
"all",
"titles",
"for",
"a",
"particular",
"cell",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L52-L76
|
242,046
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
TableBlock.copy_raw_block
|
def copy_raw_block(self):
'''
Copies the block as it was originally specified by start and end into a new table.
Returns:
A copy of the block with no block transformations.
'''
ctable = []
r, c = 0, 0
try:
for row_index in range(self.start[0], self.end[0]):
r = row_index
row = []
ctable.append(row)
for column_index in range(self.start[1], self.end[1]):
c = column_index
row.append(self.table[row_index][column_index])
except IndexError:
raise InvalidBlockError('Missing table element at [%d, %d]' % (r, c))
return ctable
|
python
|
def copy_raw_block(self):
'''
Copies the block as it was originally specified by start and end into a new table.
Returns:
A copy of the block with no block transformations.
'''
ctable = []
r, c = 0, 0
try:
for row_index in range(self.start[0], self.end[0]):
r = row_index
row = []
ctable.append(row)
for column_index in range(self.start[1], self.end[1]):
c = column_index
row.append(self.table[row_index][column_index])
except IndexError:
raise InvalidBlockError('Missing table element at [%d, %d]' % (r, c))
return ctable
|
[
"def",
"copy_raw_block",
"(",
"self",
")",
":",
"ctable",
"=",
"[",
"]",
"r",
",",
"c",
"=",
"0",
",",
"0",
"try",
":",
"for",
"row_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"0",
"]",
",",
"self",
".",
"end",
"[",
"0",
"]",
")",
":",
"r",
"=",
"row_index",
"row",
"=",
"[",
"]",
"ctable",
".",
"append",
"(",
"row",
")",
"for",
"column_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"1",
"]",
",",
"self",
".",
"end",
"[",
"1",
"]",
")",
":",
"c",
"=",
"column_index",
"row",
".",
"append",
"(",
"self",
".",
"table",
"[",
"row_index",
"]",
"[",
"column_index",
"]",
")",
"except",
"IndexError",
":",
"raise",
"InvalidBlockError",
"(",
"'Missing table element at [%d, %d]'",
"%",
"(",
"r",
",",
"c",
")",
")",
"return",
"ctable"
] |
Copies the block as it was originally specified by start and end into a new table.
Returns:
A copy of the block with no block transformations.
|
[
"Copies",
"the",
"block",
"as",
"it",
"was",
"originally",
"specified",
"by",
"start",
"and",
"end",
"into",
"a",
"new",
"table",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L78-L97
|
242,047
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
TableBlock.copy_numbered_block
|
def copy_numbered_block(self):
'''
Copies the block as it was originally specified by start and end into a new table.
Additionally inserts the original table indices in the first row of the block.
Returns:
A copy of the block with no block transformations.
'''
raw_block = self.copy_raw_block()
# Inserts the column number in row 0
raw_block.insert(0, range(self.start[1], self.end[1]))
return raw_block
|
python
|
def copy_numbered_block(self):
'''
Copies the block as it was originally specified by start and end into a new table.
Additionally inserts the original table indices in the first row of the block.
Returns:
A copy of the block with no block transformations.
'''
raw_block = self.copy_raw_block()
# Inserts the column number in row 0
raw_block.insert(0, range(self.start[1], self.end[1]))
return raw_block
|
[
"def",
"copy_numbered_block",
"(",
"self",
")",
":",
"raw_block",
"=",
"self",
".",
"copy_raw_block",
"(",
")",
"# Inserts the column number in row 0",
"raw_block",
".",
"insert",
"(",
"0",
",",
"range",
"(",
"self",
".",
"start",
"[",
"1",
"]",
",",
"self",
".",
"end",
"[",
"1",
"]",
")",
")",
"return",
"raw_block"
] |
Copies the block as it was originally specified by start and end into a new table.
Additionally inserts the original table indices in the first row of the block.
Returns:
A copy of the block with no block transformations.
|
[
"Copies",
"the",
"block",
"as",
"it",
"was",
"originally",
"specified",
"by",
"start",
"and",
"end",
"into",
"a",
"new",
"table",
".",
"Additionally",
"inserts",
"the",
"original",
"table",
"indices",
"in",
"the",
"first",
"row",
"of",
"the",
"block",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L99-L110
|
242,048
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
TableBlock.convert_to_row_table
|
def convert_to_row_table(self, add_units=True):
'''
Converts the block into row titled elements. These elements are copied into the return
table, which can be much longer than the original block.
Args:
add_units: Indicates if units should be appened to each row item.
Returns:
A row-titled table representing the data in the block.
'''
rtable = []
if add_units:
relavent_units = self.get_relavent_units()
# Create a row for each data element
for row_index in range(self.start[0], self.end[0]):
for column_index in range(self.start[1], self.end[1]):
cell = self.table[row_index][column_index]
if cell != None and isinstance(cell, (int, float, long)):
titles = self._find_titles(row_index, column_index)
titles.append(cell)
if add_units:
titles.append(relavent_units.get((row_index, column_index)))
rtable.append(titles)
# If we had all 'titles', just return the original block
if not rtable:
for row_index in range(self.start[0], self.end[0]):
row = []
rtable.append(row)
for column_index in range(self.start[1], self.end[1]):
row.append(self.table[row_index][column_index])
if add_units:
row.append(relavent_units.get((row_index, column_index)))
return rtable
|
python
|
def convert_to_row_table(self, add_units=True):
'''
Converts the block into row titled elements. These elements are copied into the return
table, which can be much longer than the original block.
Args:
add_units: Indicates if units should be appened to each row item.
Returns:
A row-titled table representing the data in the block.
'''
rtable = []
if add_units:
relavent_units = self.get_relavent_units()
# Create a row for each data element
for row_index in range(self.start[0], self.end[0]):
for column_index in range(self.start[1], self.end[1]):
cell = self.table[row_index][column_index]
if cell != None and isinstance(cell, (int, float, long)):
titles = self._find_titles(row_index, column_index)
titles.append(cell)
if add_units:
titles.append(relavent_units.get((row_index, column_index)))
rtable.append(titles)
# If we had all 'titles', just return the original block
if not rtable:
for row_index in range(self.start[0], self.end[0]):
row = []
rtable.append(row)
for column_index in range(self.start[1], self.end[1]):
row.append(self.table[row_index][column_index])
if add_units:
row.append(relavent_units.get((row_index, column_index)))
return rtable
|
[
"def",
"convert_to_row_table",
"(",
"self",
",",
"add_units",
"=",
"True",
")",
":",
"rtable",
"=",
"[",
"]",
"if",
"add_units",
":",
"relavent_units",
"=",
"self",
".",
"get_relavent_units",
"(",
")",
"# Create a row for each data element",
"for",
"row_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"0",
"]",
",",
"self",
".",
"end",
"[",
"0",
"]",
")",
":",
"for",
"column_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"1",
"]",
",",
"self",
".",
"end",
"[",
"1",
"]",
")",
":",
"cell",
"=",
"self",
".",
"table",
"[",
"row_index",
"]",
"[",
"column_index",
"]",
"if",
"cell",
"!=",
"None",
"and",
"isinstance",
"(",
"cell",
",",
"(",
"int",
",",
"float",
",",
"long",
")",
")",
":",
"titles",
"=",
"self",
".",
"_find_titles",
"(",
"row_index",
",",
"column_index",
")",
"titles",
".",
"append",
"(",
"cell",
")",
"if",
"add_units",
":",
"titles",
".",
"append",
"(",
"relavent_units",
".",
"get",
"(",
"(",
"row_index",
",",
"column_index",
")",
")",
")",
"rtable",
".",
"append",
"(",
"titles",
")",
"# If we had all 'titles', just return the original block",
"if",
"not",
"rtable",
":",
"for",
"row_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"0",
"]",
",",
"self",
".",
"end",
"[",
"0",
"]",
")",
":",
"row",
"=",
"[",
"]",
"rtable",
".",
"append",
"(",
"row",
")",
"for",
"column_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"1",
"]",
",",
"self",
".",
"end",
"[",
"1",
"]",
")",
":",
"row",
".",
"append",
"(",
"self",
".",
"table",
"[",
"row_index",
"]",
"[",
"column_index",
"]",
")",
"if",
"add_units",
":",
"row",
".",
"append",
"(",
"relavent_units",
".",
"get",
"(",
"(",
"row_index",
",",
"column_index",
")",
")",
")",
"return",
"rtable"
] |
Converts the block into row titled elements. These elements are copied into the return
table, which can be much longer than the original block.
Args:
add_units: Indicates if units should be appened to each row item.
Returns:
A row-titled table representing the data in the block.
|
[
"Converts",
"the",
"block",
"into",
"row",
"titled",
"elements",
".",
"These",
"elements",
"are",
"copied",
"into",
"the",
"return",
"table",
"which",
"can",
"be",
"much",
"longer",
"than",
"the",
"original",
"block",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L112-L148
|
242,049
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
TableBlock.flag_is_related
|
def flag_is_related(self, flag):
'''
Checks for relationship between a flag and this block.
Returns:
True if the flag is related to this block.
'''
same_worksheet = flag.worksheet == self.worksheet
if isinstance(flag.location, (tuple, list)):
return (flag.location[0] >= self.start[0] and flag.location[0] < self.end[0] and
flag.location[1] >= self.start[1] and flag.location[1] < self.end[1] and
same_worksheet)
else:
return same_worksheet
|
python
|
def flag_is_related(self, flag):
'''
Checks for relationship between a flag and this block.
Returns:
True if the flag is related to this block.
'''
same_worksheet = flag.worksheet == self.worksheet
if isinstance(flag.location, (tuple, list)):
return (flag.location[0] >= self.start[0] and flag.location[0] < self.end[0] and
flag.location[1] >= self.start[1] and flag.location[1] < self.end[1] and
same_worksheet)
else:
return same_worksheet
|
[
"def",
"flag_is_related",
"(",
"self",
",",
"flag",
")",
":",
"same_worksheet",
"=",
"flag",
".",
"worksheet",
"==",
"self",
".",
"worksheet",
"if",
"isinstance",
"(",
"flag",
".",
"location",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"(",
"flag",
".",
"location",
"[",
"0",
"]",
">=",
"self",
".",
"start",
"[",
"0",
"]",
"and",
"flag",
".",
"location",
"[",
"0",
"]",
"<",
"self",
".",
"end",
"[",
"0",
"]",
"and",
"flag",
".",
"location",
"[",
"1",
"]",
">=",
"self",
".",
"start",
"[",
"1",
"]",
"and",
"flag",
".",
"location",
"[",
"1",
"]",
"<",
"self",
".",
"end",
"[",
"1",
"]",
"and",
"same_worksheet",
")",
"else",
":",
"return",
"same_worksheet"
] |
Checks for relationship between a flag and this block.
Returns:
True if the flag is related to this block.
|
[
"Checks",
"for",
"relationship",
"between",
"a",
"flag",
"and",
"this",
"block",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L150-L163
|
242,050
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
TableBlock.unit_is_related
|
def unit_is_related(self, location, worksheet):
'''
Checks for relationship between a unit location and this block.
Returns:
True if the location is related to this block.
'''
same_worksheet = worksheet == self.worksheet
if isinstance(location, (tuple, list)):
return (location[0] >= self.start[0] and location[0] < self.end[0] and
location[1] >= self.start[1] and location[1] < self.end[1] and
same_worksheet)
else:
return same_worksheet
|
python
|
def unit_is_related(self, location, worksheet):
'''
Checks for relationship between a unit location and this block.
Returns:
True if the location is related to this block.
'''
same_worksheet = worksheet == self.worksheet
if isinstance(location, (tuple, list)):
return (location[0] >= self.start[0] and location[0] < self.end[0] and
location[1] >= self.start[1] and location[1] < self.end[1] and
same_worksheet)
else:
return same_worksheet
|
[
"def",
"unit_is_related",
"(",
"self",
",",
"location",
",",
"worksheet",
")",
":",
"same_worksheet",
"=",
"worksheet",
"==",
"self",
".",
"worksheet",
"if",
"isinstance",
"(",
"location",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"(",
"location",
"[",
"0",
"]",
">=",
"self",
".",
"start",
"[",
"0",
"]",
"and",
"location",
"[",
"0",
"]",
"<",
"self",
".",
"end",
"[",
"0",
"]",
"and",
"location",
"[",
"1",
"]",
">=",
"self",
".",
"start",
"[",
"1",
"]",
"and",
"location",
"[",
"1",
"]",
"<",
"self",
".",
"end",
"[",
"1",
"]",
"and",
"same_worksheet",
")",
"else",
":",
"return",
"same_worksheet"
] |
Checks for relationship between a unit location and this block.
Returns:
True if the location is related to this block.
|
[
"Checks",
"for",
"relationship",
"between",
"a",
"unit",
"location",
"and",
"this",
"block",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L165-L178
|
242,051
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
TableBlock.get_relavent_flags
|
def get_relavent_flags(self):
'''
Retrieves the relevant flags for this data block.
Returns:
All flags related to this block.
'''
relavent_flags = {}
for code, flags_list in self.flags.items():
relavent_flags[code] = []
for flag in flags_list:
if self.flag_is_related(flag):
relavent_flags[code].append(flag)
# Remove that flag level if no error exists
if not relavent_flags[code]:
del relavent_flags[code]
return relavent_flags
|
python
|
def get_relavent_flags(self):
'''
Retrieves the relevant flags for this data block.
Returns:
All flags related to this block.
'''
relavent_flags = {}
for code, flags_list in self.flags.items():
relavent_flags[code] = []
for flag in flags_list:
if self.flag_is_related(flag):
relavent_flags[code].append(flag)
# Remove that flag level if no error exists
if not relavent_flags[code]:
del relavent_flags[code]
return relavent_flags
|
[
"def",
"get_relavent_flags",
"(",
"self",
")",
":",
"relavent_flags",
"=",
"{",
"}",
"for",
"code",
",",
"flags_list",
"in",
"self",
".",
"flags",
".",
"items",
"(",
")",
":",
"relavent_flags",
"[",
"code",
"]",
"=",
"[",
"]",
"for",
"flag",
"in",
"flags_list",
":",
"if",
"self",
".",
"flag_is_related",
"(",
"flag",
")",
":",
"relavent_flags",
"[",
"code",
"]",
".",
"append",
"(",
"flag",
")",
"# Remove that flag level if no error exists",
"if",
"not",
"relavent_flags",
"[",
"code",
"]",
":",
"del",
"relavent_flags",
"[",
"code",
"]",
"return",
"relavent_flags"
] |
Retrieves the relevant flags for this data block.
Returns:
All flags related to this block.
|
[
"Retrieves",
"the",
"relevant",
"flags",
"for",
"this",
"data",
"block",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L180-L198
|
242,052
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
TableBlock.get_relavent_units
|
def get_relavent_units(self):
'''
Retrieves the relevant units for this data block.
Returns:
All flags related to this block.
'''
relavent_units = {}
for location,unit in self.units.items():
if self.unit_is_related(location, self.worksheet):
relavent_units[location] = unit
return relavent_units
|
python
|
def get_relavent_units(self):
'''
Retrieves the relevant units for this data block.
Returns:
All flags related to this block.
'''
relavent_units = {}
for location,unit in self.units.items():
if self.unit_is_related(location, self.worksheet):
relavent_units[location] = unit
return relavent_units
|
[
"def",
"get_relavent_units",
"(",
"self",
")",
":",
"relavent_units",
"=",
"{",
"}",
"for",
"location",
",",
"unit",
"in",
"self",
".",
"units",
".",
"items",
"(",
")",
":",
"if",
"self",
".",
"unit_is_related",
"(",
"location",
",",
"self",
".",
"worksheet",
")",
":",
"relavent_units",
"[",
"location",
"]",
"=",
"unit",
"return",
"relavent_units"
] |
Retrieves the relevant units for this data block.
Returns:
All flags related to this block.
|
[
"Retrieves",
"the",
"relevant",
"units",
"for",
"this",
"data",
"block",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L200-L213
|
242,053
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator.validate_block
|
def validate_block(self):
'''
This method is a multi-stage process which repairs row titles, then repairs column titles,
then checks for invalid rows, and finally for invalid columns.
This maybe should have been written via state machines... Also suggested as being possibly
written with code-injection.
'''
# Don't allow for 0 width or 0 height blocks
if self._check_zero_size():
return False
# Single width or height blocks should be considered
self._check_one_size()
# Repair any obvious block structure issues
self._repair_row()
self._repair_column()
# Fill any remaining empty titles if we're not a complete block
if not self.complete_block:
self._fill_row_holes()
self._fill_column_holes()
# Check for invalid data after repairs
self._validate_rows()
self._validate_columns()
# We're valid enough to be used -- though error flags may have
# been thrown into flags.
return True
|
python
|
def validate_block(self):
'''
This method is a multi-stage process which repairs row titles, then repairs column titles,
then checks for invalid rows, and finally for invalid columns.
This maybe should have been written via state machines... Also suggested as being possibly
written with code-injection.
'''
# Don't allow for 0 width or 0 height blocks
if self._check_zero_size():
return False
# Single width or height blocks should be considered
self._check_one_size()
# Repair any obvious block structure issues
self._repair_row()
self._repair_column()
# Fill any remaining empty titles if we're not a complete block
if not self.complete_block:
self._fill_row_holes()
self._fill_column_holes()
# Check for invalid data after repairs
self._validate_rows()
self._validate_columns()
# We're valid enough to be used -- though error flags may have
# been thrown into flags.
return True
|
[
"def",
"validate_block",
"(",
"self",
")",
":",
"# Don't allow for 0 width or 0 height blocks",
"if",
"self",
".",
"_check_zero_size",
"(",
")",
":",
"return",
"False",
"# Single width or height blocks should be considered",
"self",
".",
"_check_one_size",
"(",
")",
"# Repair any obvious block structure issues",
"self",
".",
"_repair_row",
"(",
")",
"self",
".",
"_repair_column",
"(",
")",
"# Fill any remaining empty titles if we're not a complete block",
"if",
"not",
"self",
".",
"complete_block",
":",
"self",
".",
"_fill_row_holes",
"(",
")",
"self",
".",
"_fill_column_holes",
"(",
")",
"# Check for invalid data after repairs",
"self",
".",
"_validate_rows",
"(",
")",
"self",
".",
"_validate_columns",
"(",
")",
"# We're valid enough to be used -- though error flags may have",
"# been thrown into flags.",
"return",
"True"
] |
This method is a multi-stage process which repairs row titles, then repairs column titles,
then checks for invalid rows, and finally for invalid columns.
This maybe should have been written via state machines... Also suggested as being possibly
written with code-injection.
|
[
"This",
"method",
"is",
"a",
"multi",
"-",
"stage",
"process",
"which",
"repairs",
"row",
"titles",
"then",
"repairs",
"column",
"titles",
"then",
"checks",
"for",
"invalid",
"rows",
"and",
"finally",
"for",
"invalid",
"columns",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L246-L275
|
242,054
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._check_zero_size
|
def _check_zero_size(self):
'''
Checks for zero height or zero width blocks and flags the occurrence.
Returns:
True if the block is size 0.
'''
block_zero = self.end[0] <= self.start[0] or self.end[1] <= self.start[1]
if block_zero:
self.flag_change(self.flags, 'fatal', worksheet=self.worksheet,
message=self.FLAGS['0-size'])
return block_zero
|
python
|
def _check_zero_size(self):
'''
Checks for zero height or zero width blocks and flags the occurrence.
Returns:
True if the block is size 0.
'''
block_zero = self.end[0] <= self.start[0] or self.end[1] <= self.start[1]
if block_zero:
self.flag_change(self.flags, 'fatal', worksheet=self.worksheet,
message=self.FLAGS['0-size'])
return block_zero
|
[
"def",
"_check_zero_size",
"(",
"self",
")",
":",
"block_zero",
"=",
"self",
".",
"end",
"[",
"0",
"]",
"<=",
"self",
".",
"start",
"[",
"0",
"]",
"or",
"self",
".",
"end",
"[",
"1",
"]",
"<=",
"self",
".",
"start",
"[",
"1",
"]",
"if",
"block_zero",
":",
"self",
".",
"flag_change",
"(",
"self",
".",
"flags",
",",
"'fatal'",
",",
"worksheet",
"=",
"self",
".",
"worksheet",
",",
"message",
"=",
"self",
".",
"FLAGS",
"[",
"'0-size'",
"]",
")",
"return",
"block_zero"
] |
Checks for zero height or zero width blocks and flags the occurrence.
Returns:
True if the block is size 0.
|
[
"Checks",
"for",
"zero",
"height",
"or",
"zero",
"width",
"blocks",
"and",
"flags",
"the",
"occurrence",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L277-L288
|
242,055
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._check_one_size
|
def _check_one_size(self):
'''
Checks for single height or single width blocks and flags the occurrence.
Returns:
True if the block is size 1.
'''
block_one = self.end[0] == self.start[0]+1 or self.end[1] == self.start[1]+1
if block_one:
self.flag_change(self.flags, 'error', self.start, self.worksheet,
message=self.FLAGS['1-size'])
return block_one
|
python
|
def _check_one_size(self):
'''
Checks for single height or single width blocks and flags the occurrence.
Returns:
True if the block is size 1.
'''
block_one = self.end[0] == self.start[0]+1 or self.end[1] == self.start[1]+1
if block_one:
self.flag_change(self.flags, 'error', self.start, self.worksheet,
message=self.FLAGS['1-size'])
return block_one
|
[
"def",
"_check_one_size",
"(",
"self",
")",
":",
"block_one",
"=",
"self",
".",
"end",
"[",
"0",
"]",
"==",
"self",
".",
"start",
"[",
"0",
"]",
"+",
"1",
"or",
"self",
".",
"end",
"[",
"1",
"]",
"==",
"self",
".",
"start",
"[",
"1",
"]",
"+",
"1",
"if",
"block_one",
":",
"self",
".",
"flag_change",
"(",
"self",
".",
"flags",
",",
"'error'",
",",
"self",
".",
"start",
",",
"self",
".",
"worksheet",
",",
"message",
"=",
"self",
".",
"FLAGS",
"[",
"'1-size'",
"]",
")",
"return",
"block_one"
] |
Checks for single height or single width blocks and flags the occurrence.
Returns:
True if the block is size 1.
|
[
"Checks",
"for",
"single",
"height",
"or",
"single",
"width",
"blocks",
"and",
"flags",
"the",
"occurrence",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L290-L301
|
242,056
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._repair_row
|
def _repair_row(self):
'''
Searches for missing titles that can be inferred from the surrounding data and automatically
repairs those titles.
'''
# Repair any title rows
check_for_title = True
for row_index in range(self.start[0], self.end[0]):
table_row = self.table[row_index]
row_start = table_row[self.start[1]]
# Look for empty cells leading titles
if check_for_title and is_empty_cell(row_start):
self._stringify_row(row_index)
# Check for year titles in column or row
elif (isinstance(row_start, basestring) and
re.search(allregex.year_regex, row_start)):
self._check_stringify_year_row(row_index)
else:
check_for_title = False
|
python
|
def _repair_row(self):
'''
Searches for missing titles that can be inferred from the surrounding data and automatically
repairs those titles.
'''
# Repair any title rows
check_for_title = True
for row_index in range(self.start[0], self.end[0]):
table_row = self.table[row_index]
row_start = table_row[self.start[1]]
# Look for empty cells leading titles
if check_for_title and is_empty_cell(row_start):
self._stringify_row(row_index)
# Check for year titles in column or row
elif (isinstance(row_start, basestring) and
re.search(allregex.year_regex, row_start)):
self._check_stringify_year_row(row_index)
else:
check_for_title = False
|
[
"def",
"_repair_row",
"(",
"self",
")",
":",
"# Repair any title rows",
"check_for_title",
"=",
"True",
"for",
"row_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"0",
"]",
",",
"self",
".",
"end",
"[",
"0",
"]",
")",
":",
"table_row",
"=",
"self",
".",
"table",
"[",
"row_index",
"]",
"row_start",
"=",
"table_row",
"[",
"self",
".",
"start",
"[",
"1",
"]",
"]",
"# Look for empty cells leading titles",
"if",
"check_for_title",
"and",
"is_empty_cell",
"(",
"row_start",
")",
":",
"self",
".",
"_stringify_row",
"(",
"row_index",
")",
"# Check for year titles in column or row",
"elif",
"(",
"isinstance",
"(",
"row_start",
",",
"basestring",
")",
"and",
"re",
".",
"search",
"(",
"allregex",
".",
"year_regex",
",",
"row_start",
")",
")",
":",
"self",
".",
"_check_stringify_year_row",
"(",
"row_index",
")",
"else",
":",
"check_for_title",
"=",
"False"
] |
Searches for missing titles that can be inferred from the surrounding data and automatically
repairs those titles.
|
[
"Searches",
"for",
"missing",
"titles",
"that",
"can",
"be",
"inferred",
"from",
"the",
"surrounding",
"data",
"and",
"automatically",
"repairs",
"those",
"titles",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L303-L322
|
242,057
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._repair_column
|
def _repair_column(self):
'''
Same as _repair_row but for columns.
'''
# Repair any title columns
check_for_title = True
for column_index in range(self.start[1], self.end[1]):
table_column = TableTranspose(self.table)[column_index]
column_start = table_column[self.start[0]]
# Only iterate through columns starting with a blank cell
if check_for_title and is_empty_cell(column_start):
self._stringify_column(column_index)
# Check for year titles in column or row
elif (isinstance(column_start, basestring) and
re.search(allregex.year_regex, column_start)):
self._check_stringify_year_column(column_index)
else:
check_for_title = False
|
python
|
def _repair_column(self):
'''
Same as _repair_row but for columns.
'''
# Repair any title columns
check_for_title = True
for column_index in range(self.start[1], self.end[1]):
table_column = TableTranspose(self.table)[column_index]
column_start = table_column[self.start[0]]
# Only iterate through columns starting with a blank cell
if check_for_title and is_empty_cell(column_start):
self._stringify_column(column_index)
# Check for year titles in column or row
elif (isinstance(column_start, basestring) and
re.search(allregex.year_regex, column_start)):
self._check_stringify_year_column(column_index)
else:
check_for_title = False
|
[
"def",
"_repair_column",
"(",
"self",
")",
":",
"# Repair any title columns",
"check_for_title",
"=",
"True",
"for",
"column_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"1",
"]",
",",
"self",
".",
"end",
"[",
"1",
"]",
")",
":",
"table_column",
"=",
"TableTranspose",
"(",
"self",
".",
"table",
")",
"[",
"column_index",
"]",
"column_start",
"=",
"table_column",
"[",
"self",
".",
"start",
"[",
"0",
"]",
"]",
"# Only iterate through columns starting with a blank cell",
"if",
"check_for_title",
"and",
"is_empty_cell",
"(",
"column_start",
")",
":",
"self",
".",
"_stringify_column",
"(",
"column_index",
")",
"# Check for year titles in column or row",
"elif",
"(",
"isinstance",
"(",
"column_start",
",",
"basestring",
")",
"and",
"re",
".",
"search",
"(",
"allregex",
".",
"year_regex",
",",
"column_start",
")",
")",
":",
"self",
".",
"_check_stringify_year_column",
"(",
"column_index",
")",
"else",
":",
"check_for_title",
"=",
"False"
] |
Same as _repair_row but for columns.
|
[
"Same",
"as",
"_repair_row",
"but",
"for",
"columns",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L324-L342
|
242,058
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._fill_row_holes
|
def _fill_row_holes(self):
'''
Fill any remaining row title cells that are empty. This must be done after the other passes
to avoid preemptively filling in empty cells reserved for other operations.
'''
for row_index in range(self.start[0], self.max_title_row):
table_row = self.table[row_index]
row_start = table_row[self.start[1]]
if is_text_cell(row_start):
self._check_fill_title_row(row_index)
|
python
|
def _fill_row_holes(self):
'''
Fill any remaining row title cells that are empty. This must be done after the other passes
to avoid preemptively filling in empty cells reserved for other operations.
'''
for row_index in range(self.start[0], self.max_title_row):
table_row = self.table[row_index]
row_start = table_row[self.start[1]]
if is_text_cell(row_start):
self._check_fill_title_row(row_index)
|
[
"def",
"_fill_row_holes",
"(",
"self",
")",
":",
"for",
"row_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"0",
"]",
",",
"self",
".",
"max_title_row",
")",
":",
"table_row",
"=",
"self",
".",
"table",
"[",
"row_index",
"]",
"row_start",
"=",
"table_row",
"[",
"self",
".",
"start",
"[",
"1",
"]",
"]",
"if",
"is_text_cell",
"(",
"row_start",
")",
":",
"self",
".",
"_check_fill_title_row",
"(",
"row_index",
")"
] |
Fill any remaining row title cells that are empty. This must be done after the other passes
to avoid preemptively filling in empty cells reserved for other operations.
|
[
"Fill",
"any",
"remaining",
"row",
"title",
"cells",
"that",
"are",
"empty",
".",
"This",
"must",
"be",
"done",
"after",
"the",
"other",
"passes",
"to",
"avoid",
"preemptively",
"filling",
"in",
"empty",
"cells",
"reserved",
"for",
"other",
"operations",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L344-L353
|
242,059
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._fill_column_holes
|
def _fill_column_holes(self):
'''
Same as _fill_row_holes but for columns.
'''
for column_index in range(self.start[1], self.end[1]):
table_column = TableTranspose(self.table)[column_index]
column_start = table_column[self.start[0]]
if is_text_cell(column_start):
self._check_fill_title_column(column_index)
|
python
|
def _fill_column_holes(self):
'''
Same as _fill_row_holes but for columns.
'''
for column_index in range(self.start[1], self.end[1]):
table_column = TableTranspose(self.table)[column_index]
column_start = table_column[self.start[0]]
if is_text_cell(column_start):
self._check_fill_title_column(column_index)
|
[
"def",
"_fill_column_holes",
"(",
"self",
")",
":",
"for",
"column_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"1",
"]",
",",
"self",
".",
"end",
"[",
"1",
"]",
")",
":",
"table_column",
"=",
"TableTranspose",
"(",
"self",
".",
"table",
")",
"[",
"column_index",
"]",
"column_start",
"=",
"table_column",
"[",
"self",
".",
"start",
"[",
"0",
"]",
"]",
"if",
"is_text_cell",
"(",
"column_start",
")",
":",
"self",
".",
"_check_fill_title_column",
"(",
"column_index",
")"
] |
Same as _fill_row_holes but for columns.
|
[
"Same",
"as",
"_fill_row_holes",
"but",
"for",
"columns",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L355-L363
|
242,060
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._validate_rows
|
def _validate_rows(self):
'''
Checks for any missing data row by row. It also checks for changes in cell type and flags
multiple switches as an error.
'''
for row_index in range(self.start[0], self.end[0]):
table_row = self.table[row_index]
used_row = self.used_cells[row_index]
row_type = None
if self.end[1] > self.start[1]:
row_type = get_cell_type(table_row[self.start[1]])
num_type_changes = 0
for column_index in range(self.start[1], self.end[1]):
if used_row[column_index]:
self.flag_change(self.flags, 'error', (row_index, column_index),
self.worksheet, self.FLAGS['used'])
if not check_cell_type(table_row[column_index], row_type):
row_type = get_cell_type(table_row[column_index])
num_type_changes += 1
if num_type_changes > 1:
self.flag_change(self.flags, 'warning', (row_index, column_index-1),
self.worksheet, self.FLAGS['unexpected-change'])
# Decrement this to catch other cells which change again
num_type_changes -= 1
# Mark this cell as used
used_row[column_index] = True
|
python
|
def _validate_rows(self):
'''
Checks for any missing data row by row. It also checks for changes in cell type and flags
multiple switches as an error.
'''
for row_index in range(self.start[0], self.end[0]):
table_row = self.table[row_index]
used_row = self.used_cells[row_index]
row_type = None
if self.end[1] > self.start[1]:
row_type = get_cell_type(table_row[self.start[1]])
num_type_changes = 0
for column_index in range(self.start[1], self.end[1]):
if used_row[column_index]:
self.flag_change(self.flags, 'error', (row_index, column_index),
self.worksheet, self.FLAGS['used'])
if not check_cell_type(table_row[column_index], row_type):
row_type = get_cell_type(table_row[column_index])
num_type_changes += 1
if num_type_changes > 1:
self.flag_change(self.flags, 'warning', (row_index, column_index-1),
self.worksheet, self.FLAGS['unexpected-change'])
# Decrement this to catch other cells which change again
num_type_changes -= 1
# Mark this cell as used
used_row[column_index] = True
|
[
"def",
"_validate_rows",
"(",
"self",
")",
":",
"for",
"row_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"0",
"]",
",",
"self",
".",
"end",
"[",
"0",
"]",
")",
":",
"table_row",
"=",
"self",
".",
"table",
"[",
"row_index",
"]",
"used_row",
"=",
"self",
".",
"used_cells",
"[",
"row_index",
"]",
"row_type",
"=",
"None",
"if",
"self",
".",
"end",
"[",
"1",
"]",
">",
"self",
".",
"start",
"[",
"1",
"]",
":",
"row_type",
"=",
"get_cell_type",
"(",
"table_row",
"[",
"self",
".",
"start",
"[",
"1",
"]",
"]",
")",
"num_type_changes",
"=",
"0",
"for",
"column_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"1",
"]",
",",
"self",
".",
"end",
"[",
"1",
"]",
")",
":",
"if",
"used_row",
"[",
"column_index",
"]",
":",
"self",
".",
"flag_change",
"(",
"self",
".",
"flags",
",",
"'error'",
",",
"(",
"row_index",
",",
"column_index",
")",
",",
"self",
".",
"worksheet",
",",
"self",
".",
"FLAGS",
"[",
"'used'",
"]",
")",
"if",
"not",
"check_cell_type",
"(",
"table_row",
"[",
"column_index",
"]",
",",
"row_type",
")",
":",
"row_type",
"=",
"get_cell_type",
"(",
"table_row",
"[",
"column_index",
"]",
")",
"num_type_changes",
"+=",
"1",
"if",
"num_type_changes",
">",
"1",
":",
"self",
".",
"flag_change",
"(",
"self",
".",
"flags",
",",
"'warning'",
",",
"(",
"row_index",
",",
"column_index",
"-",
"1",
")",
",",
"self",
".",
"worksheet",
",",
"self",
".",
"FLAGS",
"[",
"'unexpected-change'",
"]",
")",
"# Decrement this to catch other cells which change again",
"num_type_changes",
"-=",
"1",
"# Mark this cell as used",
"used_row",
"[",
"column_index",
"]",
"=",
"True"
] |
Checks for any missing data row by row. It also checks for changes in cell type and flags
multiple switches as an error.
|
[
"Checks",
"for",
"any",
"missing",
"data",
"row",
"by",
"row",
".",
"It",
"also",
"checks",
"for",
"changes",
"in",
"cell",
"type",
"and",
"flags",
"multiple",
"switches",
"as",
"an",
"error",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L365-L392
|
242,061
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._validate_columns
|
def _validate_columns(self):
'''
Same as _validate_rows but for columns. Also ignore used_cells as _validate_rows should
update used_cells.
'''
for column_index in range(self.start[1], self.end[1]):
table_column = TableTranspose(self.table)[column_index]
column_type = None
if self.end[0] > self.start[0]:
column_type = get_cell_type(table_column[self.start[0]])
num_type_changes = 0
for row_index in range(self.start[0], self.end[0]):
if not check_cell_type(table_column[row_index], column_type):
column_type = get_cell_type(table_column[row_index])
num_type_changes += 1
if num_type_changes > 1:
self.flag_change(self.flags, 'warning', (row_index-1, column_index),
self.worksheet, self.FLAGS['unexpected-change'])
# Decrement this to catch other cells which change again
num_type_changes -= 1
|
python
|
def _validate_columns(self):
'''
Same as _validate_rows but for columns. Also ignore used_cells as _validate_rows should
update used_cells.
'''
for column_index in range(self.start[1], self.end[1]):
table_column = TableTranspose(self.table)[column_index]
column_type = None
if self.end[0] > self.start[0]:
column_type = get_cell_type(table_column[self.start[0]])
num_type_changes = 0
for row_index in range(self.start[0], self.end[0]):
if not check_cell_type(table_column[row_index], column_type):
column_type = get_cell_type(table_column[row_index])
num_type_changes += 1
if num_type_changes > 1:
self.flag_change(self.flags, 'warning', (row_index-1, column_index),
self.worksheet, self.FLAGS['unexpected-change'])
# Decrement this to catch other cells which change again
num_type_changes -= 1
|
[
"def",
"_validate_columns",
"(",
"self",
")",
":",
"for",
"column_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"1",
"]",
",",
"self",
".",
"end",
"[",
"1",
"]",
")",
":",
"table_column",
"=",
"TableTranspose",
"(",
"self",
".",
"table",
")",
"[",
"column_index",
"]",
"column_type",
"=",
"None",
"if",
"self",
".",
"end",
"[",
"0",
"]",
">",
"self",
".",
"start",
"[",
"0",
"]",
":",
"column_type",
"=",
"get_cell_type",
"(",
"table_column",
"[",
"self",
".",
"start",
"[",
"0",
"]",
"]",
")",
"num_type_changes",
"=",
"0",
"for",
"row_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"0",
"]",
",",
"self",
".",
"end",
"[",
"0",
"]",
")",
":",
"if",
"not",
"check_cell_type",
"(",
"table_column",
"[",
"row_index",
"]",
",",
"column_type",
")",
":",
"column_type",
"=",
"get_cell_type",
"(",
"table_column",
"[",
"row_index",
"]",
")",
"num_type_changes",
"+=",
"1",
"if",
"num_type_changes",
">",
"1",
":",
"self",
".",
"flag_change",
"(",
"self",
".",
"flags",
",",
"'warning'",
",",
"(",
"row_index",
"-",
"1",
",",
"column_index",
")",
",",
"self",
".",
"worksheet",
",",
"self",
".",
"FLAGS",
"[",
"'unexpected-change'",
"]",
")",
"# Decrement this to catch other cells which change again",
"num_type_changes",
"-=",
"1"
] |
Same as _validate_rows but for columns. Also ignore used_cells as _validate_rows should
update used_cells.
|
[
"Same",
"as",
"_validate_rows",
"but",
"for",
"columns",
".",
"Also",
"ignore",
"used_cells",
"as",
"_validate_rows",
"should",
"update",
"used_cells",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L394-L415
|
242,062
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._stringify_row
|
def _stringify_row(self, row_index):
'''
Stringifies an entire row, filling in blanks with prior titles as they are found.
'''
table_row = self.table[row_index]
prior_cell = None
for column_index in range(self.start[1], self.end[1]):
cell, changed = self._check_interpret_cell(table_row[column_index], prior_cell, row_index, column_index)
if changed:
table_row[column_index] = cell
prior_cell = cell
|
python
|
def _stringify_row(self, row_index):
'''
Stringifies an entire row, filling in blanks with prior titles as they are found.
'''
table_row = self.table[row_index]
prior_cell = None
for column_index in range(self.start[1], self.end[1]):
cell, changed = self._check_interpret_cell(table_row[column_index], prior_cell, row_index, column_index)
if changed:
table_row[column_index] = cell
prior_cell = cell
|
[
"def",
"_stringify_row",
"(",
"self",
",",
"row_index",
")",
":",
"table_row",
"=",
"self",
".",
"table",
"[",
"row_index",
"]",
"prior_cell",
"=",
"None",
"for",
"column_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"1",
"]",
",",
"self",
".",
"end",
"[",
"1",
"]",
")",
":",
"cell",
",",
"changed",
"=",
"self",
".",
"_check_interpret_cell",
"(",
"table_row",
"[",
"column_index",
"]",
",",
"prior_cell",
",",
"row_index",
",",
"column_index",
")",
"if",
"changed",
":",
"table_row",
"[",
"column_index",
"]",
"=",
"cell",
"prior_cell",
"=",
"cell"
] |
Stringifies an entire row, filling in blanks with prior titles as they are found.
|
[
"Stringifies",
"an",
"entire",
"row",
"filling",
"in",
"blanks",
"with",
"prior",
"titles",
"as",
"they",
"are",
"found",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L417-L427
|
242,063
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._stringify_column
|
def _stringify_column(self, column_index):
'''
Same as _stringify_row but for columns.
'''
table_column = TableTranspose(self.table)[column_index]
prior_cell = None
for row_index in range(self.start[0], self.end[0]):
cell, changed = self._check_interpret_cell(table_column[row_index], prior_cell, row_index, column_index)
if changed:
table_column[row_index] = cell
prior_cell = cell
|
python
|
def _stringify_column(self, column_index):
'''
Same as _stringify_row but for columns.
'''
table_column = TableTranspose(self.table)[column_index]
prior_cell = None
for row_index in range(self.start[0], self.end[0]):
cell, changed = self._check_interpret_cell(table_column[row_index], prior_cell, row_index, column_index)
if changed:
table_column[row_index] = cell
prior_cell = cell
|
[
"def",
"_stringify_column",
"(",
"self",
",",
"column_index",
")",
":",
"table_column",
"=",
"TableTranspose",
"(",
"self",
".",
"table",
")",
"[",
"column_index",
"]",
"prior_cell",
"=",
"None",
"for",
"row_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"0",
"]",
",",
"self",
".",
"end",
"[",
"0",
"]",
")",
":",
"cell",
",",
"changed",
"=",
"self",
".",
"_check_interpret_cell",
"(",
"table_column",
"[",
"row_index",
"]",
",",
"prior_cell",
",",
"row_index",
",",
"column_index",
")",
"if",
"changed",
":",
"table_column",
"[",
"row_index",
"]",
"=",
"cell",
"prior_cell",
"=",
"cell"
] |
Same as _stringify_row but for columns.
|
[
"Same",
"as",
"_stringify_row",
"but",
"for",
"columns",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L429-L439
|
242,064
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._check_interpret_cell
|
def _check_interpret_cell(self, cell, prior_cell, row_index, column_index):
'''
Helper function which checks cell type and performs cell translation to strings where
necessary.
Returns:
A tuple of the form '(cell, changed)' where 'changed' indicates if 'cell' differs from
input.
'''
changed = False
if (not is_empty_cell(cell) and
not is_text_cell(cell)):
self.flag_change(self.flags, 'interpreted', (row_index, column_index),
self.worksheet, self.FLAGS['converted-to-string'])
cell = str(cell)
changed = True
# If we find a blank cell, propagate the prior title
elif is_empty_cell(cell):
self.flag_change(self.flags, 'interpreted', (row_index, column_index),
self.worksheet, self.FLAGS['copied-title'])
cell = prior_cell
changed = True
return cell, changed
|
python
|
def _check_interpret_cell(self, cell, prior_cell, row_index, column_index):
'''
Helper function which checks cell type and performs cell translation to strings where
necessary.
Returns:
A tuple of the form '(cell, changed)' where 'changed' indicates if 'cell' differs from
input.
'''
changed = False
if (not is_empty_cell(cell) and
not is_text_cell(cell)):
self.flag_change(self.flags, 'interpreted', (row_index, column_index),
self.worksheet, self.FLAGS['converted-to-string'])
cell = str(cell)
changed = True
# If we find a blank cell, propagate the prior title
elif is_empty_cell(cell):
self.flag_change(self.flags, 'interpreted', (row_index, column_index),
self.worksheet, self.FLAGS['copied-title'])
cell = prior_cell
changed = True
return cell, changed
|
[
"def",
"_check_interpret_cell",
"(",
"self",
",",
"cell",
",",
"prior_cell",
",",
"row_index",
",",
"column_index",
")",
":",
"changed",
"=",
"False",
"if",
"(",
"not",
"is_empty_cell",
"(",
"cell",
")",
"and",
"not",
"is_text_cell",
"(",
"cell",
")",
")",
":",
"self",
".",
"flag_change",
"(",
"self",
".",
"flags",
",",
"'interpreted'",
",",
"(",
"row_index",
",",
"column_index",
")",
",",
"self",
".",
"worksheet",
",",
"self",
".",
"FLAGS",
"[",
"'converted-to-string'",
"]",
")",
"cell",
"=",
"str",
"(",
"cell",
")",
"changed",
"=",
"True",
"# If we find a blank cell, propagate the prior title",
"elif",
"is_empty_cell",
"(",
"cell",
")",
":",
"self",
".",
"flag_change",
"(",
"self",
".",
"flags",
",",
"'interpreted'",
",",
"(",
"row_index",
",",
"column_index",
")",
",",
"self",
".",
"worksheet",
",",
"self",
".",
"FLAGS",
"[",
"'copied-title'",
"]",
")",
"cell",
"=",
"prior_cell",
"changed",
"=",
"True",
"return",
"cell",
",",
"changed"
] |
Helper function which checks cell type and performs cell translation to strings where
necessary.
Returns:
A tuple of the form '(cell, changed)' where 'changed' indicates if 'cell' differs from
input.
|
[
"Helper",
"function",
"which",
"checks",
"cell",
"type",
"and",
"performs",
"cell",
"translation",
"to",
"strings",
"where",
"necessary",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L441-L463
|
242,065
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._check_fill_title_row
|
def _check_fill_title_row(self, row_index):
'''
Checks the given row to see if it is all titles and fills any blanks cells if that is the
case.
'''
table_row = self.table[row_index]
# Determine if the whole row is titles
prior_row = self.table[row_index-1] if row_index > 0 else table_row
for column_index in range(self.start[1], self.end[1]):
if is_num_cell(table_row[column_index]) or is_num_cell(prior_row[column_index]):
return
# Since we're a title row, stringify the row
self._stringify_row(row_index)
|
python
|
def _check_fill_title_row(self, row_index):
'''
Checks the given row to see if it is all titles and fills any blanks cells if that is the
case.
'''
table_row = self.table[row_index]
# Determine if the whole row is titles
prior_row = self.table[row_index-1] if row_index > 0 else table_row
for column_index in range(self.start[1], self.end[1]):
if is_num_cell(table_row[column_index]) or is_num_cell(prior_row[column_index]):
return
# Since we're a title row, stringify the row
self._stringify_row(row_index)
|
[
"def",
"_check_fill_title_row",
"(",
"self",
",",
"row_index",
")",
":",
"table_row",
"=",
"self",
".",
"table",
"[",
"row_index",
"]",
"# Determine if the whole row is titles",
"prior_row",
"=",
"self",
".",
"table",
"[",
"row_index",
"-",
"1",
"]",
"if",
"row_index",
">",
"0",
"else",
"table_row",
"for",
"column_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"1",
"]",
",",
"self",
".",
"end",
"[",
"1",
"]",
")",
":",
"if",
"is_num_cell",
"(",
"table_row",
"[",
"column_index",
"]",
")",
"or",
"is_num_cell",
"(",
"prior_row",
"[",
"column_index",
"]",
")",
":",
"return",
"# Since we're a title row, stringify the row",
"self",
".",
"_stringify_row",
"(",
"row_index",
")"
] |
Checks the given row to see if it is all titles and fills any blanks cells if that is the
case.
|
[
"Checks",
"the",
"given",
"row",
"to",
"see",
"if",
"it",
"is",
"all",
"titles",
"and",
"fills",
"any",
"blanks",
"cells",
"if",
"that",
"is",
"the",
"case",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L465-L477
|
242,066
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._check_fill_title_column
|
def _check_fill_title_column(self, column_index):
'''
Same as _check_fill_title_row but for columns.
'''
# Determine if the whole column is titles
table_column = TableTranspose(self.table)[column_index]
prior_column = TableTranspose(self.table)[column_index-1] if column_index > 0 else table_column
for row_index in range(self.start[0], self.end[0]):
if is_num_cell(table_column[row_index]) or is_num_cell(prior_column[row_index]):
return
# Since we're a title row, stringify the column
self._stringify_column(column_index)
|
python
|
def _check_fill_title_column(self, column_index):
'''
Same as _check_fill_title_row but for columns.
'''
# Determine if the whole column is titles
table_column = TableTranspose(self.table)[column_index]
prior_column = TableTranspose(self.table)[column_index-1] if column_index > 0 else table_column
for row_index in range(self.start[0], self.end[0]):
if is_num_cell(table_column[row_index]) or is_num_cell(prior_column[row_index]):
return
# Since we're a title row, stringify the column
self._stringify_column(column_index)
|
[
"def",
"_check_fill_title_column",
"(",
"self",
",",
"column_index",
")",
":",
"# Determine if the whole column is titles",
"table_column",
"=",
"TableTranspose",
"(",
"self",
".",
"table",
")",
"[",
"column_index",
"]",
"prior_column",
"=",
"TableTranspose",
"(",
"self",
".",
"table",
")",
"[",
"column_index",
"-",
"1",
"]",
"if",
"column_index",
">",
"0",
"else",
"table_column",
"for",
"row_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"0",
"]",
",",
"self",
".",
"end",
"[",
"0",
"]",
")",
":",
"if",
"is_num_cell",
"(",
"table_column",
"[",
"row_index",
"]",
")",
"or",
"is_num_cell",
"(",
"prior_column",
"[",
"row_index",
"]",
")",
":",
"return",
"# Since we're a title row, stringify the column",
"self",
".",
"_stringify_column",
"(",
"column_index",
")"
] |
Same as _check_fill_title_row but for columns.
|
[
"Same",
"as",
"_check_fill_title_row",
"but",
"for",
"columns",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L479-L490
|
242,067
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._check_stringify_year_row
|
def _check_stringify_year_row(self, row_index):
'''
Checks the given row to see if it is labeled year data and fills any blank years within that
data.
'''
table_row = self.table[row_index]
# State trackers
prior_year = None
for column_index in range(self.start[1]+1, self.end[1]):
current_year = table_row[column_index]
# Quit if we see
if not self._check_years(current_year, prior_year):
return
# Only copy when we see a non-empty entry
if current_year:
prior_year = current_year
# If we have a title of years, convert them to strings
self._stringify_row(row_index)
|
python
|
def _check_stringify_year_row(self, row_index):
'''
Checks the given row to see if it is labeled year data and fills any blank years within that
data.
'''
table_row = self.table[row_index]
# State trackers
prior_year = None
for column_index in range(self.start[1]+1, self.end[1]):
current_year = table_row[column_index]
# Quit if we see
if not self._check_years(current_year, prior_year):
return
# Only copy when we see a non-empty entry
if current_year:
prior_year = current_year
# If we have a title of years, convert them to strings
self._stringify_row(row_index)
|
[
"def",
"_check_stringify_year_row",
"(",
"self",
",",
"row_index",
")",
":",
"table_row",
"=",
"self",
".",
"table",
"[",
"row_index",
"]",
"# State trackers",
"prior_year",
"=",
"None",
"for",
"column_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"1",
"]",
"+",
"1",
",",
"self",
".",
"end",
"[",
"1",
"]",
")",
":",
"current_year",
"=",
"table_row",
"[",
"column_index",
"]",
"# Quit if we see",
"if",
"not",
"self",
".",
"_check_years",
"(",
"current_year",
",",
"prior_year",
")",
":",
"return",
"# Only copy when we see a non-empty entry",
"if",
"current_year",
":",
"prior_year",
"=",
"current_year",
"# If we have a title of years, convert them to strings",
"self",
".",
"_stringify_row",
"(",
"row_index",
")"
] |
Checks the given row to see if it is labeled year data and fills any blank years within that
data.
|
[
"Checks",
"the",
"given",
"row",
"to",
"see",
"if",
"it",
"is",
"labeled",
"year",
"data",
"and",
"fills",
"any",
"blank",
"years",
"within",
"that",
"data",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L492-L510
|
242,068
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._check_stringify_year_column
|
def _check_stringify_year_column(self, column_index):
'''
Same as _check_stringify_year_row but for columns.
'''
table_column = TableTranspose(self.table)[column_index]
# State trackers
prior_year = None
for row_index in range(self.start[0]+1, self.end[0]):
current_year = table_column[row_index]
if not self._check_years(current_year, prior_year):
return
# Only copy when we see a non-empty entry
if current_year:
prior_year = current_year
# If we have a title of years, convert them to strings
self._stringify_column(column_index)
|
python
|
def _check_stringify_year_column(self, column_index):
'''
Same as _check_stringify_year_row but for columns.
'''
table_column = TableTranspose(self.table)[column_index]
# State trackers
prior_year = None
for row_index in range(self.start[0]+1, self.end[0]):
current_year = table_column[row_index]
if not self._check_years(current_year, prior_year):
return
# Only copy when we see a non-empty entry
if current_year:
prior_year = current_year
# If we have a title of years, convert them to strings
self._stringify_column(column_index)
|
[
"def",
"_check_stringify_year_column",
"(",
"self",
",",
"column_index",
")",
":",
"table_column",
"=",
"TableTranspose",
"(",
"self",
".",
"table",
")",
"[",
"column_index",
"]",
"# State trackers",
"prior_year",
"=",
"None",
"for",
"row_index",
"in",
"range",
"(",
"self",
".",
"start",
"[",
"0",
"]",
"+",
"1",
",",
"self",
".",
"end",
"[",
"0",
"]",
")",
":",
"current_year",
"=",
"table_column",
"[",
"row_index",
"]",
"if",
"not",
"self",
".",
"_check_years",
"(",
"current_year",
",",
"prior_year",
")",
":",
"return",
"# Only copy when we see a non-empty entry",
"if",
"current_year",
":",
"prior_year",
"=",
"current_year",
"# If we have a title of years, convert them to strings",
"self",
".",
"_stringify_column",
"(",
"column_index",
")"
] |
Same as _check_stringify_year_row but for columns.
|
[
"Same",
"as",
"_check_stringify_year_row",
"but",
"for",
"columns",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L512-L528
|
242,069
|
OpenGov/carpenter
|
carpenter/blocks/block.py
|
BlockValidator._check_years
|
def _check_years(self, cell, prior_year):
'''
Helper method which defines the rules for checking for existence of a year indicator. If the
cell is blank then prior_year is used to determine validity.
'''
# Anything outside these values shouldn't auto
# categorize to strings
min_year = 1900
max_year = 2100
# Empty cells could represent the prior cell's title,
# but an empty cell before we find a year is not a title
if is_empty_cell(cell):
return bool(prior_year)
# Check if we have a numbered cell between min and max years
return is_num_cell(cell) and cell > min_year and cell < max_year
|
python
|
def _check_years(self, cell, prior_year):
'''
Helper method which defines the rules for checking for existence of a year indicator. If the
cell is blank then prior_year is used to determine validity.
'''
# Anything outside these values shouldn't auto
# categorize to strings
min_year = 1900
max_year = 2100
# Empty cells could represent the prior cell's title,
# but an empty cell before we find a year is not a title
if is_empty_cell(cell):
return bool(prior_year)
# Check if we have a numbered cell between min and max years
return is_num_cell(cell) and cell > min_year and cell < max_year
|
[
"def",
"_check_years",
"(",
"self",
",",
"cell",
",",
"prior_year",
")",
":",
"# Anything outside these values shouldn't auto",
"# categorize to strings",
"min_year",
"=",
"1900",
"max_year",
"=",
"2100",
"# Empty cells could represent the prior cell's title,",
"# but an empty cell before we find a year is not a title",
"if",
"is_empty_cell",
"(",
"cell",
")",
":",
"return",
"bool",
"(",
"prior_year",
")",
"# Check if we have a numbered cell between min and max years",
"return",
"is_num_cell",
"(",
"cell",
")",
"and",
"cell",
">",
"min_year",
"and",
"cell",
"<",
"max_year"
] |
Helper method which defines the rules for checking for existence of a year indicator. If the
cell is blank then prior_year is used to determine validity.
|
[
"Helper",
"method",
"which",
"defines",
"the",
"rules",
"for",
"checking",
"for",
"existence",
"of",
"a",
"year",
"indicator",
".",
"If",
"the",
"cell",
"is",
"blank",
"then",
"prior_year",
"is",
"used",
"to",
"determine",
"validity",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L530-L545
|
242,070
|
FlorianLudwig/rueckenwind
|
rw/static.py
|
file_hash
|
def file_hash(content):
"""Generate hash for file or string and avoid strings starting with "ad"
to workaround ad blocks being over aggressiv.
The current implementation is based on sha256.
:param str|FileIO content: The content to hash, either as string or as file-like object
"""
h = hashlib.sha256()
if isinstance(content, bytes_type):
h.update(content)
else:
data = True
while data:
data = content.read(1024 * 1024)
h.update(data)
h_digest = h.digest()
# base64url
# | char | substitute |
# | + | - |
# | / | _ |
#
result = base64.b64encode(h_digest, altchars=b'-_')
# ensure this is a str object in 3.x
result = result.decode('ascii')
result = result.rstrip('=')
if result[:2].lower() == 'ad':
# workaround adblockers blocking everything starting with "ad"
# by replacing the "d" with another charackter
if result[1] == 'd':
result = result[0] + '~' + result[2:]
else:
# upper case D
result = result[0] + '.' + result[2:]
return result
|
python
|
def file_hash(content):
"""Generate hash for file or string and avoid strings starting with "ad"
to workaround ad blocks being over aggressiv.
The current implementation is based on sha256.
:param str|FileIO content: The content to hash, either as string or as file-like object
"""
h = hashlib.sha256()
if isinstance(content, bytes_type):
h.update(content)
else:
data = True
while data:
data = content.read(1024 * 1024)
h.update(data)
h_digest = h.digest()
# base64url
# | char | substitute |
# | + | - |
# | / | _ |
#
result = base64.b64encode(h_digest, altchars=b'-_')
# ensure this is a str object in 3.x
result = result.decode('ascii')
result = result.rstrip('=')
if result[:2].lower() == 'ad':
# workaround adblockers blocking everything starting with "ad"
# by replacing the "d" with another charackter
if result[1] == 'd':
result = result[0] + '~' + result[2:]
else:
# upper case D
result = result[0] + '.' + result[2:]
return result
|
[
"def",
"file_hash",
"(",
"content",
")",
":",
"h",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"if",
"isinstance",
"(",
"content",
",",
"bytes_type",
")",
":",
"h",
".",
"update",
"(",
"content",
")",
"else",
":",
"data",
"=",
"True",
"while",
"data",
":",
"data",
"=",
"content",
".",
"read",
"(",
"1024",
"*",
"1024",
")",
"h",
".",
"update",
"(",
"data",
")",
"h_digest",
"=",
"h",
".",
"digest",
"(",
")",
"# base64url",
"# | char | substitute |",
"# | + | - |",
"# | / | _ |",
"#",
"result",
"=",
"base64",
".",
"b64encode",
"(",
"h_digest",
",",
"altchars",
"=",
"b'-_'",
")",
"# ensure this is a str object in 3.x",
"result",
"=",
"result",
".",
"decode",
"(",
"'ascii'",
")",
"result",
"=",
"result",
".",
"rstrip",
"(",
"'='",
")",
"if",
"result",
"[",
":",
"2",
"]",
".",
"lower",
"(",
")",
"==",
"'ad'",
":",
"# workaround adblockers blocking everything starting with \"ad\"",
"# by replacing the \"d\" with another charackter",
"if",
"result",
"[",
"1",
"]",
"==",
"'d'",
":",
"result",
"=",
"result",
"[",
"0",
"]",
"+",
"'~'",
"+",
"result",
"[",
"2",
":",
"]",
"else",
":",
"# upper case D",
"result",
"=",
"result",
"[",
"0",
"]",
"+",
"'.'",
"+",
"result",
"[",
"2",
":",
"]",
"return",
"result"
] |
Generate hash for file or string and avoid strings starting with "ad"
to workaround ad blocks being over aggressiv.
The current implementation is based on sha256.
:param str|FileIO content: The content to hash, either as string or as file-like object
|
[
"Generate",
"hash",
"for",
"file",
"or",
"string",
"and",
"avoid",
"strings",
"starting",
"with",
"ad",
"to",
"workaround",
"ad",
"blocks",
"being",
"over",
"aggressiv",
"."
] |
47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea
|
https://github.com/FlorianLudwig/rueckenwind/blob/47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea/rw/static.py#L68-L104
|
242,071
|
FlorianLudwig/rueckenwind
|
rw/static.py
|
init
|
def init(scope, app, settings):
"""Plugin for serving static files in development mode"""
cfg = settings.get('rw.static', {})
static = Static()
scope['static'] = static
scope['template_env'].globals['static'] = static
for base_uri, sources in cfg.items():
full_paths = []
for source in sources:
if isinstance(source, dict):
full_path = source['path']
full_paths.append(full_path.format(**os.environ))
continue
elif ',' in source:
module_name, path = [part.strip()
for part in source.split(',')]
else:
module_name = source
path = 'static'
full_path = pkg_resources.resource_filename(module_name, path)
full_paths.append(full_path)
app.root.mount('/' + base_uri + '/<h>/<path:path>',
StaticHandler, {'path': full_paths},
name='static_' + base_uri.replace('.', '_'))
static.handlers.append((base_uri, StaticHandler, full_paths))
static.setup()
|
python
|
def init(scope, app, settings):
"""Plugin for serving static files in development mode"""
cfg = settings.get('rw.static', {})
static = Static()
scope['static'] = static
scope['template_env'].globals['static'] = static
for base_uri, sources in cfg.items():
full_paths = []
for source in sources:
if isinstance(source, dict):
full_path = source['path']
full_paths.append(full_path.format(**os.environ))
continue
elif ',' in source:
module_name, path = [part.strip()
for part in source.split(',')]
else:
module_name = source
path = 'static'
full_path = pkg_resources.resource_filename(module_name, path)
full_paths.append(full_path)
app.root.mount('/' + base_uri + '/<h>/<path:path>',
StaticHandler, {'path': full_paths},
name='static_' + base_uri.replace('.', '_'))
static.handlers.append((base_uri, StaticHandler, full_paths))
static.setup()
|
[
"def",
"init",
"(",
"scope",
",",
"app",
",",
"settings",
")",
":",
"cfg",
"=",
"settings",
".",
"get",
"(",
"'rw.static'",
",",
"{",
"}",
")",
"static",
"=",
"Static",
"(",
")",
"scope",
"[",
"'static'",
"]",
"=",
"static",
"scope",
"[",
"'template_env'",
"]",
".",
"globals",
"[",
"'static'",
"]",
"=",
"static",
"for",
"base_uri",
",",
"sources",
"in",
"cfg",
".",
"items",
"(",
")",
":",
"full_paths",
"=",
"[",
"]",
"for",
"source",
"in",
"sources",
":",
"if",
"isinstance",
"(",
"source",
",",
"dict",
")",
":",
"full_path",
"=",
"source",
"[",
"'path'",
"]",
"full_paths",
".",
"append",
"(",
"full_path",
".",
"format",
"(",
"*",
"*",
"os",
".",
"environ",
")",
")",
"continue",
"elif",
"','",
"in",
"source",
":",
"module_name",
",",
"path",
"=",
"[",
"part",
".",
"strip",
"(",
")",
"for",
"part",
"in",
"source",
".",
"split",
"(",
"','",
")",
"]",
"else",
":",
"module_name",
"=",
"source",
"path",
"=",
"'static'",
"full_path",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"module_name",
",",
"path",
")",
"full_paths",
".",
"append",
"(",
"full_path",
")",
"app",
".",
"root",
".",
"mount",
"(",
"'/'",
"+",
"base_uri",
"+",
"'/<h>/<path:path>'",
",",
"StaticHandler",
",",
"{",
"'path'",
":",
"full_paths",
"}",
",",
"name",
"=",
"'static_'",
"+",
"base_uri",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
")",
"static",
".",
"handlers",
".",
"append",
"(",
"(",
"base_uri",
",",
"StaticHandler",
",",
"full_paths",
")",
")",
"static",
".",
"setup",
"(",
")"
] |
Plugin for serving static files in development mode
|
[
"Plugin",
"for",
"serving",
"static",
"files",
"in",
"development",
"mode"
] |
47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea
|
https://github.com/FlorianLudwig/rueckenwind/blob/47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea/rw/static.py#L151-L178
|
242,072
|
FlorianLudwig/rueckenwind
|
rw/static.py
|
StaticHandler.get_absolute_path
|
def get_absolute_path(cls, roots, path):
"""Returns the absolute location of ``path`` relative to one of
the ``roots``.
``roots`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
"""
for root in roots:
abspath = os.path.abspath(os.path.join(root, path))
if abspath.startswith(root) and os.path.exists(abspath):
return abspath
# XXX TODO
return 'file-not-found'
|
python
|
def get_absolute_path(cls, roots, path):
"""Returns the absolute location of ``path`` relative to one of
the ``roots``.
``roots`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
"""
for root in roots:
abspath = os.path.abspath(os.path.join(root, path))
if abspath.startswith(root) and os.path.exists(abspath):
return abspath
# XXX TODO
return 'file-not-found'
|
[
"def",
"get_absolute_path",
"(",
"cls",
",",
"roots",
",",
"path",
")",
":",
"for",
"root",
"in",
"roots",
":",
"abspath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"path",
")",
")",
"if",
"abspath",
".",
"startswith",
"(",
"root",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"abspath",
")",
":",
"return",
"abspath",
"# XXX TODO",
"return",
"'file-not-found'"
] |
Returns the absolute location of ``path`` relative to one of
the ``roots``.
``roots`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
|
[
"Returns",
"the",
"absolute",
"location",
"of",
"path",
"relative",
"to",
"one",
"of",
"the",
"roots",
"."
] |
47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea
|
https://github.com/FlorianLudwig/rueckenwind/blob/47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea/rw/static.py#L25-L37
|
242,073
|
BlueHack-Core/blueforge
|
blueforge/apis/s3.py
|
S3.upload_file_to_bucket
|
def upload_file_to_bucket(self, bucket, file_path, key, is_public=False):
""" Upload files to S3 Bucket """
with open(file_path, 'rb') as data:
self.__s3.upload_fileobj(data, bucket, key)
if is_public:
self.__s3.put_object_acl(ACL='public-read', Bucket=bucket, Key=key)
bucket_location = self.__s3.get_bucket_location(Bucket=bucket)
file_url = "https://s3-{0}.amazonaws.com/{1}/{2}".format(
bucket_location['LocationConstraint'],
bucket,
key)
return file_url
|
python
|
def upload_file_to_bucket(self, bucket, file_path, key, is_public=False):
""" Upload files to S3 Bucket """
with open(file_path, 'rb') as data:
self.__s3.upload_fileobj(data, bucket, key)
if is_public:
self.__s3.put_object_acl(ACL='public-read', Bucket=bucket, Key=key)
bucket_location = self.__s3.get_bucket_location(Bucket=bucket)
file_url = "https://s3-{0}.amazonaws.com/{1}/{2}".format(
bucket_location['LocationConstraint'],
bucket,
key)
return file_url
|
[
"def",
"upload_file_to_bucket",
"(",
"self",
",",
"bucket",
",",
"file_path",
",",
"key",
",",
"is_public",
"=",
"False",
")",
":",
"with",
"open",
"(",
"file_path",
",",
"'rb'",
")",
"as",
"data",
":",
"self",
".",
"__s3",
".",
"upload_fileobj",
"(",
"data",
",",
"bucket",
",",
"key",
")",
"if",
"is_public",
":",
"self",
".",
"__s3",
".",
"put_object_acl",
"(",
"ACL",
"=",
"'public-read'",
",",
"Bucket",
"=",
"bucket",
",",
"Key",
"=",
"key",
")",
"bucket_location",
"=",
"self",
".",
"__s3",
".",
"get_bucket_location",
"(",
"Bucket",
"=",
"bucket",
")",
"file_url",
"=",
"\"https://s3-{0}.amazonaws.com/{1}/{2}\"",
".",
"format",
"(",
"bucket_location",
"[",
"'LocationConstraint'",
"]",
",",
"bucket",
",",
"key",
")",
"return",
"file_url"
] |
Upload files to S3 Bucket
|
[
"Upload",
"files",
"to",
"S3",
"Bucket"
] |
ac40a888ee9c388638a8f312c51f7500b8891b6c
|
https://github.com/BlueHack-Core/blueforge/blob/ac40a888ee9c388638a8f312c51f7500b8891b6c/blueforge/apis/s3.py#L8-L23
|
242,074
|
BlueHack-Core/blueforge
|
blueforge/apis/s3.py
|
S3.download_file_from_bucket
|
def download_file_from_bucket(self, bucket, file_path, key):
""" Download file from S3 Bucket """
with open(file_path, 'wb') as data:
self.__s3.download_fileobj(bucket, key, data)
return file_path
|
python
|
def download_file_from_bucket(self, bucket, file_path, key):
""" Download file from S3 Bucket """
with open(file_path, 'wb') as data:
self.__s3.download_fileobj(bucket, key, data)
return file_path
|
[
"def",
"download_file_from_bucket",
"(",
"self",
",",
"bucket",
",",
"file_path",
",",
"key",
")",
":",
"with",
"open",
"(",
"file_path",
",",
"'wb'",
")",
"as",
"data",
":",
"self",
".",
"__s3",
".",
"download_fileobj",
"(",
"bucket",
",",
"key",
",",
"data",
")",
"return",
"file_path"
] |
Download file from S3 Bucket
|
[
"Download",
"file",
"from",
"S3",
"Bucket"
] |
ac40a888ee9c388638a8f312c51f7500b8891b6c
|
https://github.com/BlueHack-Core/blueforge/blob/ac40a888ee9c388638a8f312c51f7500b8891b6c/blueforge/apis/s3.py#L25-L29
|
242,075
|
edwards-lab/libGWAS
|
libgwas/parsed_locus.py
|
ParsedLocus.next
|
def next(self):
"""Move to the next valid locus.
Will only return valid loci or exit via StopIteration exception
"""
while True:
self.cur_idx += 1
if self.__datasource.populate_iteration(self):
return self
raise StopIteration
|
python
|
def next(self):
"""Move to the next valid locus.
Will only return valid loci or exit via StopIteration exception
"""
while True:
self.cur_idx += 1
if self.__datasource.populate_iteration(self):
return self
raise StopIteration
|
[
"def",
"next",
"(",
"self",
")",
":",
"while",
"True",
":",
"self",
".",
"cur_idx",
"+=",
"1",
"if",
"self",
".",
"__datasource",
".",
"populate_iteration",
"(",
"self",
")",
":",
"return",
"self",
"raise",
"StopIteration"
] |
Move to the next valid locus.
Will only return valid loci or exit via StopIteration exception
|
[
"Move",
"to",
"the",
"next",
"valid",
"locus",
"."
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/parsed_locus.py#L38-L49
|
242,076
|
shkarupa-alex/tfunicode
|
tfunicode/python/ops/__init__.py
|
_combine_sparse_successor
|
def _combine_sparse_successor(parent_indices, parent_shape, child_indices, child_values, child_shape, name=None):
"""Combines two string `SparseTensor`s, where second `SparseTensor` is the result of expanding
first `SparseTensor`'s values.
Args:
parent_indices: 2D int64 `Tensor` with parent `SparseTensor` indices
parent_shape: 1D int64 `Tensor` with parent `SparseTensor` dense_shape
child_indices: 2D int64 `Tensor` with child `SparseTensor` indices
child_values: 1D int64 `Tensor` with child `SparseTensor` values
child_shape: 1D int64 `Tensor` with child `SparseTensor` dense_shape
name: A name for the operation (optional).
Returns:
`SparseTensor` with an additional dimension of size 1 added.
"""
with ops.name_scope(name, "CombineSparseSuccessor",
[parent_indices, parent_shape, child_indices, child_values, child_shape]):
indices, values, shape = ops_module.combine_sparse_successor(
parent_indices,
parent_shape,
child_indices,
child_values,
child_shape
)
return tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
|
python
|
def _combine_sparse_successor(parent_indices, parent_shape, child_indices, child_values, child_shape, name=None):
"""Combines two string `SparseTensor`s, where second `SparseTensor` is the result of expanding
first `SparseTensor`'s values.
Args:
parent_indices: 2D int64 `Tensor` with parent `SparseTensor` indices
parent_shape: 1D int64 `Tensor` with parent `SparseTensor` dense_shape
child_indices: 2D int64 `Tensor` with child `SparseTensor` indices
child_values: 1D int64 `Tensor` with child `SparseTensor` values
child_shape: 1D int64 `Tensor` with child `SparseTensor` dense_shape
name: A name for the operation (optional).
Returns:
`SparseTensor` with an additional dimension of size 1 added.
"""
with ops.name_scope(name, "CombineSparseSuccessor",
[parent_indices, parent_shape, child_indices, child_values, child_shape]):
indices, values, shape = ops_module.combine_sparse_successor(
parent_indices,
parent_shape,
child_indices,
child_values,
child_shape
)
return tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
|
[
"def",
"_combine_sparse_successor",
"(",
"parent_indices",
",",
"parent_shape",
",",
"child_indices",
",",
"child_values",
",",
"child_shape",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"\"CombineSparseSuccessor\"",
",",
"[",
"parent_indices",
",",
"parent_shape",
",",
"child_indices",
",",
"child_values",
",",
"child_shape",
"]",
")",
":",
"indices",
",",
"values",
",",
"shape",
"=",
"ops_module",
".",
"combine_sparse_successor",
"(",
"parent_indices",
",",
"parent_shape",
",",
"child_indices",
",",
"child_values",
",",
"child_shape",
")",
"return",
"tf",
".",
"SparseTensor",
"(",
"indices",
"=",
"indices",
",",
"values",
"=",
"values",
",",
"dense_shape",
"=",
"shape",
")"
] |
Combines two string `SparseTensor`s, where second `SparseTensor` is the result of expanding
first `SparseTensor`'s values.
Args:
parent_indices: 2D int64 `Tensor` with parent `SparseTensor` indices
parent_shape: 1D int64 `Tensor` with parent `SparseTensor` dense_shape
child_indices: 2D int64 `Tensor` with child `SparseTensor` indices
child_values: 1D int64 `Tensor` with child `SparseTensor` values
child_shape: 1D int64 `Tensor` with child `SparseTensor` dense_shape
name: A name for the operation (optional).
Returns:
`SparseTensor` with an additional dimension of size 1 added.
|
[
"Combines",
"two",
"string",
"SparseTensor",
"s",
"where",
"second",
"SparseTensor",
"is",
"the",
"result",
"of",
"expanding",
"first",
"SparseTensor",
"s",
"values",
"."
] |
72ee2f484b6202394dcda3db47245bc78ae2267d
|
https://github.com/shkarupa-alex/tfunicode/blob/72ee2f484b6202394dcda3db47245bc78ae2267d/tfunicode/python/ops/__init__.py#L26-L51
|
242,077
|
shkarupa-alex/tfunicode
|
tfunicode/python/ops/__init__.py
|
expand_char_ngrams
|
def expand_char_ngrams(source, minn, maxn, itself='ASIS', name=None):
"""Split unicode strings into char ngrams.
Ngrams size configures with minn and max
Args:
source: `Tensor` or `SparseTensor` of any shape, strings to split
minn: Minimum length of char ngram
minn: Maximum length of char ngram
itself: Scalar value, strategy for source word preserving.
One of `"ASIS"`, `"NEVER"`, `"ALWAYS"`, `"ALONE"`.
name: A name for the operation (optional).
Returns:
`SparseTensor` with an additional dimension of size 1 added.
"""
with ops.name_scope(name, "ExpandCharNgrams", [source]):
source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)
if isinstance(source, tf.SparseTensor):
child_indices, child_values, child_shape = ops_module.expand_char_ngrams(source.values, minn, maxn, itself)
result = _combine_sparse_successor(source.indices, source.dense_shape, child_indices, child_values,
child_shape)
else:
indices, values, shape = ops_module.expand_char_ngrams(source, minn, maxn, itself)
result = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
return result
|
python
|
def expand_char_ngrams(source, minn, maxn, itself='ASIS', name=None):
"""Split unicode strings into char ngrams.
Ngrams size configures with minn and max
Args:
source: `Tensor` or `SparseTensor` of any shape, strings to split
minn: Minimum length of char ngram
minn: Maximum length of char ngram
itself: Scalar value, strategy for source word preserving.
One of `"ASIS"`, `"NEVER"`, `"ALWAYS"`, `"ALONE"`.
name: A name for the operation (optional).
Returns:
`SparseTensor` with an additional dimension of size 1 added.
"""
with ops.name_scope(name, "ExpandCharNgrams", [source]):
source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)
if isinstance(source, tf.SparseTensor):
child_indices, child_values, child_shape = ops_module.expand_char_ngrams(source.values, minn, maxn, itself)
result = _combine_sparse_successor(source.indices, source.dense_shape, child_indices, child_values,
child_shape)
else:
indices, values, shape = ops_module.expand_char_ngrams(source, minn, maxn, itself)
result = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
return result
|
[
"def",
"expand_char_ngrams",
"(",
"source",
",",
"minn",
",",
"maxn",
",",
"itself",
"=",
"'ASIS'",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"\"ExpandCharNgrams\"",
",",
"[",
"source",
"]",
")",
":",
"source",
"=",
"convert_to_tensor_or_sparse_tensor",
"(",
"source",
",",
"dtype",
"=",
"tf",
".",
"string",
")",
"if",
"isinstance",
"(",
"source",
",",
"tf",
".",
"SparseTensor",
")",
":",
"child_indices",
",",
"child_values",
",",
"child_shape",
"=",
"ops_module",
".",
"expand_char_ngrams",
"(",
"source",
".",
"values",
",",
"minn",
",",
"maxn",
",",
"itself",
")",
"result",
"=",
"_combine_sparse_successor",
"(",
"source",
".",
"indices",
",",
"source",
".",
"dense_shape",
",",
"child_indices",
",",
"child_values",
",",
"child_shape",
")",
"else",
":",
"indices",
",",
"values",
",",
"shape",
"=",
"ops_module",
".",
"expand_char_ngrams",
"(",
"source",
",",
"minn",
",",
"maxn",
",",
"itself",
")",
"result",
"=",
"tf",
".",
"SparseTensor",
"(",
"indices",
"=",
"indices",
",",
"values",
"=",
"values",
",",
"dense_shape",
"=",
"shape",
")",
"return",
"result"
] |
Split unicode strings into char ngrams.
Ngrams size configures with minn and max
Args:
source: `Tensor` or `SparseTensor` of any shape, strings to split
minn: Minimum length of char ngram
minn: Maximum length of char ngram
itself: Scalar value, strategy for source word preserving.
One of `"ASIS"`, `"NEVER"`, `"ALWAYS"`, `"ALONE"`.
name: A name for the operation (optional).
Returns:
`SparseTensor` with an additional dimension of size 1 added.
|
[
"Split",
"unicode",
"strings",
"into",
"char",
"ngrams",
".",
"Ngrams",
"size",
"configures",
"with",
"minn",
"and",
"max"
] |
72ee2f484b6202394dcda3db47245bc78ae2267d
|
https://github.com/shkarupa-alex/tfunicode/blob/72ee2f484b6202394dcda3db47245bc78ae2267d/tfunicode/python/ops/__init__.py#L54-L79
|
242,078
|
shkarupa-alex/tfunicode
|
tfunicode/python/ops/__init__.py
|
transform_normalize_unicode
|
def transform_normalize_unicode(source, form, name=None):
"""Normalize unicode strings tensor.
Args:
source: `Tensor` or `SparseTensor` of any shape, strings to normalize.
form: Scalar value, name of normalization algorithm.
One of `"NFD"`, `"NFC"`, `"NFKD"`, `"NFKC"`.
name: A name for the operation (optional).
Returns:
`Tensor` or `SparseTensor` of same shape and size as input.
"""
with ops.name_scope(name, "TransformNormalizeUnicode", [source]):
source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)
if isinstance(source, tf.SparseTensor):
result = tf.SparseTensor(
indices=source.indices,
values=ops_module.transform_normalize_unicode(source.values, form),
dense_shape=source.dense_shape
)
else:
result = ops_module.transform_normalize_unicode(source, form)
return result
|
python
|
def transform_normalize_unicode(source, form, name=None):
"""Normalize unicode strings tensor.
Args:
source: `Tensor` or `SparseTensor` of any shape, strings to normalize.
form: Scalar value, name of normalization algorithm.
One of `"NFD"`, `"NFC"`, `"NFKD"`, `"NFKC"`.
name: A name for the operation (optional).
Returns:
`Tensor` or `SparseTensor` of same shape and size as input.
"""
with ops.name_scope(name, "TransformNormalizeUnicode", [source]):
source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)
if isinstance(source, tf.SparseTensor):
result = tf.SparseTensor(
indices=source.indices,
values=ops_module.transform_normalize_unicode(source.values, form),
dense_shape=source.dense_shape
)
else:
result = ops_module.transform_normalize_unicode(source, form)
return result
|
[
"def",
"transform_normalize_unicode",
"(",
"source",
",",
"form",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"\"TransformNormalizeUnicode\"",
",",
"[",
"source",
"]",
")",
":",
"source",
"=",
"convert_to_tensor_or_sparse_tensor",
"(",
"source",
",",
"dtype",
"=",
"tf",
".",
"string",
")",
"if",
"isinstance",
"(",
"source",
",",
"tf",
".",
"SparseTensor",
")",
":",
"result",
"=",
"tf",
".",
"SparseTensor",
"(",
"indices",
"=",
"source",
".",
"indices",
",",
"values",
"=",
"ops_module",
".",
"transform_normalize_unicode",
"(",
"source",
".",
"values",
",",
"form",
")",
",",
"dense_shape",
"=",
"source",
".",
"dense_shape",
")",
"else",
":",
"result",
"=",
"ops_module",
".",
"transform_normalize_unicode",
"(",
"source",
",",
"form",
")",
"return",
"result"
] |
Normalize unicode strings tensor.
Args:
source: `Tensor` or `SparseTensor` of any shape, strings to normalize.
form: Scalar value, name of normalization algorithm.
One of `"NFD"`, `"NFC"`, `"NFKD"`, `"NFKC"`.
name: A name for the operation (optional).
Returns:
`Tensor` or `SparseTensor` of same shape and size as input.
|
[
"Normalize",
"unicode",
"strings",
"tensor",
"."
] |
72ee2f484b6202394dcda3db47245bc78ae2267d
|
https://github.com/shkarupa-alex/tfunicode/blob/72ee2f484b6202394dcda3db47245bc78ae2267d/tfunicode/python/ops/__init__.py#L131-L154
|
242,079
|
shkarupa-alex/tfunicode
|
tfunicode/python/ops/__init__.py
|
transform_wrap_with
|
def transform_wrap_with(source, left, right, name=None):
"""Wrap source strings with "left" and "right" strings
Args:
source: `Tensor` or `SparseTensor` of any shape, strings to replace digits.
left: Scalar string to add in the beginning
right: Scalar string to add in the ending
name: A name for the operation (optional).
Returns:
`SparseTensor` of same shape and size as input.
"""
with ops.name_scope(name, "TransformWrapWith", [source]):
source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)
if isinstance(source, tf.SparseTensor):
result = tf.SparseTensor(
indices=source.indices,
values=ops_module.transform_wrap_with(source.values, left, right),
dense_shape=source.dense_shape
)
else:
result = ops_module.transform_wrap_with(source, left, right)
return result
|
python
|
def transform_wrap_with(source, left, right, name=None):
"""Wrap source strings with "left" and "right" strings
Args:
source: `Tensor` or `SparseTensor` of any shape, strings to replace digits.
left: Scalar string to add in the beginning
right: Scalar string to add in the ending
name: A name for the operation (optional).
Returns:
`SparseTensor` of same shape and size as input.
"""
with ops.name_scope(name, "TransformWrapWith", [source]):
source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)
if isinstance(source, tf.SparseTensor):
result = tf.SparseTensor(
indices=source.indices,
values=ops_module.transform_wrap_with(source.values, left, right),
dense_shape=source.dense_shape
)
else:
result = ops_module.transform_wrap_with(source, left, right)
return result
|
[
"def",
"transform_wrap_with",
"(",
"source",
",",
"left",
",",
"right",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"\"TransformWrapWith\"",
",",
"[",
"source",
"]",
")",
":",
"source",
"=",
"convert_to_tensor_or_sparse_tensor",
"(",
"source",
",",
"dtype",
"=",
"tf",
".",
"string",
")",
"if",
"isinstance",
"(",
"source",
",",
"tf",
".",
"SparseTensor",
")",
":",
"result",
"=",
"tf",
".",
"SparseTensor",
"(",
"indices",
"=",
"source",
".",
"indices",
",",
"values",
"=",
"ops_module",
".",
"transform_wrap_with",
"(",
"source",
".",
"values",
",",
"left",
",",
"right",
")",
",",
"dense_shape",
"=",
"source",
".",
"dense_shape",
")",
"else",
":",
"result",
"=",
"ops_module",
".",
"transform_wrap_with",
"(",
"source",
",",
"left",
",",
"right",
")",
"return",
"result"
] |
Wrap source strings with "left" and "right" strings
Args:
source: `Tensor` or `SparseTensor` of any shape, strings to replace digits.
left: Scalar string to add in the beginning
right: Scalar string to add in the ending
name: A name for the operation (optional).
Returns:
`SparseTensor` of same shape and size as input.
|
[
"Wrap",
"source",
"strings",
"with",
"left",
"and",
"right",
"strings"
] |
72ee2f484b6202394dcda3db47245bc78ae2267d
|
https://github.com/shkarupa-alex/tfunicode/blob/72ee2f484b6202394dcda3db47245bc78ae2267d/tfunicode/python/ops/__init__.py#L305-L328
|
242,080
|
Th3Gam3rz/UsefulUtils
|
src/math.py
|
specialRound
|
def specialRound(number, rounding):
"""A method used to round a number in the way that UsefulUtils rounds."""
temp = 0
if rounding == 0:
temp = number
else:
temp = round(number, rounding)
if temp % 1 == 0:
return int(temp)
else:
return float(temp)
|
python
|
def specialRound(number, rounding):
"""A method used to round a number in the way that UsefulUtils rounds."""
temp = 0
if rounding == 0:
temp = number
else:
temp = round(number, rounding)
if temp % 1 == 0:
return int(temp)
else:
return float(temp)
|
[
"def",
"specialRound",
"(",
"number",
",",
"rounding",
")",
":",
"temp",
"=",
"0",
"if",
"rounding",
"==",
"0",
":",
"temp",
"=",
"number",
"else",
":",
"temp",
"=",
"round",
"(",
"number",
",",
"rounding",
")",
"if",
"temp",
"%",
"1",
"==",
"0",
":",
"return",
"int",
"(",
"temp",
")",
"else",
":",
"return",
"float",
"(",
"temp",
")"
] |
A method used to round a number in the way that UsefulUtils rounds.
|
[
"A",
"method",
"used",
"to",
"round",
"a",
"number",
"in",
"the",
"way",
"that",
"UsefulUtils",
"rounds",
"."
] |
6811af3daa88b42d76c4db372b58e2739d1f5595
|
https://github.com/Th3Gam3rz/UsefulUtils/blob/6811af3daa88b42d76c4db372b58e2739d1f5595/src/math.py#L1-L11
|
242,081
|
Nekroze/librarian
|
librarian/library.py
|
Where_filter_gen
|
def Where_filter_gen(*data):
"""
Generate an sqlite "LIKE" filter generator based on the given data.
This functions arguments should be a N length series of field and data
tuples.
"""
where = []
def Fwhere(field, pattern):
"""Add where filter for the given field with the given pattern."""
where.append("WHERE {0} LIKE '{1}'".format(field, pattern))
def Fstring(field, string):
"""Add a where filter based on a string."""
Fwhere(field, "%{0}%".format(string if not isinstance(string, str)
else str(string)))
def Fdict(field, data):
"""Add where filters to search for dict keys and values."""
for key, value in data.items():
if value == '*':
Fstring(field, key)
else:
Fstring(field, "{0}:%{1}".format(key, value if not
isinstance(value, str)
else str(value)))
def Flist(field, data):
"""Add where filters to search for elements of a list."""
for elem in data:
Fstring(field, elem if not isinstance(elem, str) else
str(elem))
for field, data in data:
if isinstance(data, str):
Fstring(field, data)
elif isinstance(data, dict):
Fdict(field, data)
elif isinstance(data, list):
Flist(field, data)
return ' AND '.join(where)
|
python
|
def Where_filter_gen(*data):
"""
Generate an sqlite "LIKE" filter generator based on the given data.
This functions arguments should be a N length series of field and data
tuples.
"""
where = []
def Fwhere(field, pattern):
"""Add where filter for the given field with the given pattern."""
where.append("WHERE {0} LIKE '{1}'".format(field, pattern))
def Fstring(field, string):
"""Add a where filter based on a string."""
Fwhere(field, "%{0}%".format(string if not isinstance(string, str)
else str(string)))
def Fdict(field, data):
"""Add where filters to search for dict keys and values."""
for key, value in data.items():
if value == '*':
Fstring(field, key)
else:
Fstring(field, "{0}:%{1}".format(key, value if not
isinstance(value, str)
else str(value)))
def Flist(field, data):
"""Add where filters to search for elements of a list."""
for elem in data:
Fstring(field, elem if not isinstance(elem, str) else
str(elem))
for field, data in data:
if isinstance(data, str):
Fstring(field, data)
elif isinstance(data, dict):
Fdict(field, data)
elif isinstance(data, list):
Flist(field, data)
return ' AND '.join(where)
|
[
"def",
"Where_filter_gen",
"(",
"*",
"data",
")",
":",
"where",
"=",
"[",
"]",
"def",
"Fwhere",
"(",
"field",
",",
"pattern",
")",
":",
"\"\"\"Add where filter for the given field with the given pattern.\"\"\"",
"where",
".",
"append",
"(",
"\"WHERE {0} LIKE '{1}'\"",
".",
"format",
"(",
"field",
",",
"pattern",
")",
")",
"def",
"Fstring",
"(",
"field",
",",
"string",
")",
":",
"\"\"\"Add a where filter based on a string.\"\"\"",
"Fwhere",
"(",
"field",
",",
"\"%{0}%\"",
".",
"format",
"(",
"string",
"if",
"not",
"isinstance",
"(",
"string",
",",
"str",
")",
"else",
"str",
"(",
"string",
")",
")",
")",
"def",
"Fdict",
"(",
"field",
",",
"data",
")",
":",
"\"\"\"Add where filters to search for dict keys and values.\"\"\"",
"for",
"key",
",",
"value",
"in",
"data",
".",
"items",
"(",
")",
":",
"if",
"value",
"==",
"'*'",
":",
"Fstring",
"(",
"field",
",",
"key",
")",
"else",
":",
"Fstring",
"(",
"field",
",",
"\"{0}:%{1}\"",
".",
"format",
"(",
"key",
",",
"value",
"if",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
"else",
"str",
"(",
"value",
")",
")",
")",
"def",
"Flist",
"(",
"field",
",",
"data",
")",
":",
"\"\"\"Add where filters to search for elements of a list.\"\"\"",
"for",
"elem",
"in",
"data",
":",
"Fstring",
"(",
"field",
",",
"elem",
"if",
"not",
"isinstance",
"(",
"elem",
",",
"str",
")",
"else",
"str",
"(",
"elem",
")",
")",
"for",
"field",
",",
"data",
"in",
"data",
":",
"if",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"Fstring",
"(",
"field",
",",
"data",
")",
"elif",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"Fdict",
"(",
"field",
",",
"data",
")",
"elif",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"Flist",
"(",
"field",
",",
"data",
")",
"return",
"' AND '",
".",
"join",
"(",
"where",
")"
] |
Generate an sqlite "LIKE" filter generator based on the given data.
This functions arguments should be a N length series of field and data
tuples.
|
[
"Generate",
"an",
"sqlite",
"LIKE",
"filter",
"generator",
"based",
"on",
"the",
"given",
"data",
".",
"This",
"functions",
"arguments",
"should",
"be",
"a",
"N",
"length",
"series",
"of",
"field",
"and",
"data",
"tuples",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/library.py#L11-L52
|
242,082
|
Nekroze/librarian
|
librarian/library.py
|
Library.cache_card
|
def cache_card(self, card):
"""
Cache the card for faster future lookups. Removes the oldest card
when the card cache stores more cards then this libraries cache limit.
"""
code = card.code
self.card_cache[code] = card
if code in self.card_cache_list:
self.card_cache_list.remove(code)
self.card_cache_list.append(code)
if len(self.card_cache_list) > self.cachelimit:
del self.card_cache[self.card_cache_list.pop(0)]
|
python
|
def cache_card(self, card):
"""
Cache the card for faster future lookups. Removes the oldest card
when the card cache stores more cards then this libraries cache limit.
"""
code = card.code
self.card_cache[code] = card
if code in self.card_cache_list:
self.card_cache_list.remove(code)
self.card_cache_list.append(code)
if len(self.card_cache_list) > self.cachelimit:
del self.card_cache[self.card_cache_list.pop(0)]
|
[
"def",
"cache_card",
"(",
"self",
",",
"card",
")",
":",
"code",
"=",
"card",
".",
"code",
"self",
".",
"card_cache",
"[",
"code",
"]",
"=",
"card",
"if",
"code",
"in",
"self",
".",
"card_cache_list",
":",
"self",
".",
"card_cache_list",
".",
"remove",
"(",
"code",
")",
"self",
".",
"card_cache_list",
".",
"append",
"(",
"code",
")",
"if",
"len",
"(",
"self",
".",
"card_cache_list",
")",
">",
"self",
".",
"cachelimit",
":",
"del",
"self",
".",
"card_cache",
"[",
"self",
".",
"card_cache_list",
".",
"pop",
"(",
"0",
")",
"]"
] |
Cache the card for faster future lookups. Removes the oldest card
when the card cache stores more cards then this libraries cache limit.
|
[
"Cache",
"the",
"card",
"for",
"faster",
"future",
"lookups",
".",
"Removes",
"the",
"oldest",
"card",
"when",
"the",
"card",
"cache",
"stores",
"more",
"cards",
"then",
"this",
"libraries",
"cache",
"limit",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/library.py#L82-L94
|
242,083
|
Nekroze/librarian
|
librarian/library.py
|
Library.load_card
|
def load_card(self, code, cache=True):
"""
Load a card with the given code from the database. This calls each
save event hook on the save string before commiting it to the database.
Will cache each resulting card for faster future lookups with this
method while respecting the libraries cache limit. However only if the
cache argument is True.
Will return None if the card could not be loaded.
"""
card = self.card_cache.get(code, None)
if card is None:
code = code if isinstance(code, str) else str(code)
with sqlite3.connect(self.dbname) as carddb:
result = carddb.execute(
"SELECT * FROM CARDS WHERE code = ?", (code,))
loadrow = result.fetchone()
if not loadrow:
return None
loaddict = dict(zip(FIELDS, loadrow))
card = self.cardclass(loaddict=loaddict)
if cache:
self.cache_card(card)
return card
|
python
|
def load_card(self, code, cache=True):
"""
Load a card with the given code from the database. This calls each
save event hook on the save string before commiting it to the database.
Will cache each resulting card for faster future lookups with this
method while respecting the libraries cache limit. However only if the
cache argument is True.
Will return None if the card could not be loaded.
"""
card = self.card_cache.get(code, None)
if card is None:
code = code if isinstance(code, str) else str(code)
with sqlite3.connect(self.dbname) as carddb:
result = carddb.execute(
"SELECT * FROM CARDS WHERE code = ?", (code,))
loadrow = result.fetchone()
if not loadrow:
return None
loaddict = dict(zip(FIELDS, loadrow))
card = self.cardclass(loaddict=loaddict)
if cache:
self.cache_card(card)
return card
|
[
"def",
"load_card",
"(",
"self",
",",
"code",
",",
"cache",
"=",
"True",
")",
":",
"card",
"=",
"self",
".",
"card_cache",
".",
"get",
"(",
"code",
",",
"None",
")",
"if",
"card",
"is",
"None",
":",
"code",
"=",
"code",
"if",
"isinstance",
"(",
"code",
",",
"str",
")",
"else",
"str",
"(",
"code",
")",
"with",
"sqlite3",
".",
"connect",
"(",
"self",
".",
"dbname",
")",
"as",
"carddb",
":",
"result",
"=",
"carddb",
".",
"execute",
"(",
"\"SELECT * FROM CARDS WHERE code = ?\"",
",",
"(",
"code",
",",
")",
")",
"loadrow",
"=",
"result",
".",
"fetchone",
"(",
")",
"if",
"not",
"loadrow",
":",
"return",
"None",
"loaddict",
"=",
"dict",
"(",
"zip",
"(",
"FIELDS",
",",
"loadrow",
")",
")",
"card",
"=",
"self",
".",
"cardclass",
"(",
"loaddict",
"=",
"loaddict",
")",
"if",
"cache",
":",
"self",
".",
"cache_card",
"(",
"card",
")",
"return",
"card"
] |
Load a card with the given code from the database. This calls each
save event hook on the save string before commiting it to the database.
Will cache each resulting card for faster future lookups with this
method while respecting the libraries cache limit. However only if the
cache argument is True.
Will return None if the card could not be loaded.
|
[
"Load",
"a",
"card",
"with",
"the",
"given",
"code",
"from",
"the",
"database",
".",
"This",
"calls",
"each",
"save",
"event",
"hook",
"on",
"the",
"save",
"string",
"before",
"commiting",
"it",
"to",
"the",
"database",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/library.py#L102-L126
|
242,084
|
Nekroze/librarian
|
librarian/library.py
|
Library.save_card
|
def save_card(self, card, cache=False):
"""
Save the given card to the database. This calls each save event hook
on the save string before commiting it to the database.
"""
if cache:
self.cache_card(card)
carddict = card.save()
with sqlite3.connect(self.dbname) as carddb:
carddb.execute("DELETE from CARDS where code = ?",
(carddict["code"],))
carddb.execute("INSERT INTO CARDS VALUES(?, ?, ?, ?, ?)",
[carddict[key] if isinstance(carddict[key], str)
else str(carddict[key]) for key in FIELDS])
|
python
|
def save_card(self, card, cache=False):
"""
Save the given card to the database. This calls each save event hook
on the save string before commiting it to the database.
"""
if cache:
self.cache_card(card)
carddict = card.save()
with sqlite3.connect(self.dbname) as carddb:
carddb.execute("DELETE from CARDS where code = ?",
(carddict["code"],))
carddb.execute("INSERT INTO CARDS VALUES(?, ?, ?, ?, ?)",
[carddict[key] if isinstance(carddict[key], str)
else str(carddict[key]) for key in FIELDS])
|
[
"def",
"save_card",
"(",
"self",
",",
"card",
",",
"cache",
"=",
"False",
")",
":",
"if",
"cache",
":",
"self",
".",
"cache_card",
"(",
"card",
")",
"carddict",
"=",
"card",
".",
"save",
"(",
")",
"with",
"sqlite3",
".",
"connect",
"(",
"self",
".",
"dbname",
")",
"as",
"carddb",
":",
"carddb",
".",
"execute",
"(",
"\"DELETE from CARDS where code = ?\"",
",",
"(",
"carddict",
"[",
"\"code\"",
"]",
",",
")",
")",
"carddb",
".",
"execute",
"(",
"\"INSERT INTO CARDS VALUES(?, ?, ?, ?, ?)\"",
",",
"[",
"carddict",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"carddict",
"[",
"key",
"]",
",",
"str",
")",
"else",
"str",
"(",
"carddict",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"FIELDS",
"]",
")"
] |
Save the given card to the database. This calls each save event hook
on the save string before commiting it to the database.
|
[
"Save",
"the",
"given",
"card",
"to",
"the",
"database",
".",
"This",
"calls",
"each",
"save",
"event",
"hook",
"on",
"the",
"save",
"string",
"before",
"commiting",
"it",
"to",
"the",
"database",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/library.py#L128-L141
|
242,085
|
Nekroze/librarian
|
librarian/library.py
|
Library.retrieve_all
|
def retrieve_all(self):
"""
A generator that iterates over each card in the library database.
This is best used in for loops as it will only load a card from the
library as needed rather then all at once.
"""
with sqlite3.connect(self.dbname) as carddb:
for row in carddb.execute("SELECT code FROM CARDS"):
yield self.load_card(row[0])
|
python
|
def retrieve_all(self):
"""
A generator that iterates over each card in the library database.
This is best used in for loops as it will only load a card from the
library as needed rather then all at once.
"""
with sqlite3.connect(self.dbname) as carddb:
for row in carddb.execute("SELECT code FROM CARDS"):
yield self.load_card(row[0])
|
[
"def",
"retrieve_all",
"(",
"self",
")",
":",
"with",
"sqlite3",
".",
"connect",
"(",
"self",
".",
"dbname",
")",
"as",
"carddb",
":",
"for",
"row",
"in",
"carddb",
".",
"execute",
"(",
"\"SELECT code FROM CARDS\"",
")",
":",
"yield",
"self",
".",
"load_card",
"(",
"row",
"[",
"0",
"]",
")"
] |
A generator that iterates over each card in the library database.
This is best used in for loops as it will only load a card from the
library as needed rather then all at once.
|
[
"A",
"generator",
"that",
"iterates",
"over",
"each",
"card",
"in",
"the",
"library",
"database",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/library.py#L143-L152
|
242,086
|
Nekroze/librarian
|
librarian/library.py
|
Library.filter_search
|
def filter_search(self, code=None, name=None, abilities=None,
attributes=None, info=None):
"""
Return a list of codes and names pertaining to cards that have the
given information values stored.
Can take a code integer, name string, abilities dict {phase: ability
list/"*"}, attributes list, info dict {key, value list/"*"}.
In the above argument examples "*" is a string that may be passed
instead of a list as the dict value to match anything that stores that
key.
"""
command = "SELECT code, name FROM CARDS "
command += Where_filter_gen(("code", code), ("name", name),
("abilities", abilities),
("attributes", attributes),
("info", info))
with sqlite3.connect(self.dbname) as carddb:
return carddb.execute(command).fetchall()
|
python
|
def filter_search(self, code=None, name=None, abilities=None,
attributes=None, info=None):
"""
Return a list of codes and names pertaining to cards that have the
given information values stored.
Can take a code integer, name string, abilities dict {phase: ability
list/"*"}, attributes list, info dict {key, value list/"*"}.
In the above argument examples "*" is a string that may be passed
instead of a list as the dict value to match anything that stores that
key.
"""
command = "SELECT code, name FROM CARDS "
command += Where_filter_gen(("code", code), ("name", name),
("abilities", abilities),
("attributes", attributes),
("info", info))
with sqlite3.connect(self.dbname) as carddb:
return carddb.execute(command).fetchall()
|
[
"def",
"filter_search",
"(",
"self",
",",
"code",
"=",
"None",
",",
"name",
"=",
"None",
",",
"abilities",
"=",
"None",
",",
"attributes",
"=",
"None",
",",
"info",
"=",
"None",
")",
":",
"command",
"=",
"\"SELECT code, name FROM CARDS \"",
"command",
"+=",
"Where_filter_gen",
"(",
"(",
"\"code\"",
",",
"code",
")",
",",
"(",
"\"name\"",
",",
"name",
")",
",",
"(",
"\"abilities\"",
",",
"abilities",
")",
",",
"(",
"\"attributes\"",
",",
"attributes",
")",
",",
"(",
"\"info\"",
",",
"info",
")",
")",
"with",
"sqlite3",
".",
"connect",
"(",
"self",
".",
"dbname",
")",
"as",
"carddb",
":",
"return",
"carddb",
".",
"execute",
"(",
"command",
")",
".",
"fetchall",
"(",
")"
] |
Return a list of codes and names pertaining to cards that have the
given information values stored.
Can take a code integer, name string, abilities dict {phase: ability
list/"*"}, attributes list, info dict {key, value list/"*"}.
In the above argument examples "*" is a string that may be passed
instead of a list as the dict value to match anything that stores that
key.
|
[
"Return",
"a",
"list",
"of",
"codes",
"and",
"names",
"pertaining",
"to",
"cards",
"that",
"have",
"the",
"given",
"information",
"values",
"stored",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/library.py#L154-L174
|
242,087
|
BlackEarth/bf
|
bf/css.py
|
CSS.to_unit
|
def to_unit(C, val, unit=None):
"""convert a string measurement to a Unum"""
md = re.match(r'^(?P<num>[\d\.]+)(?P<unit>.*)$', val)
if md is not None:
un = float(md.group('num')) * CSS.units[md.group('unit')]
if unit is not None:
return un.asUnit(unit)
else:
return un
|
python
|
def to_unit(C, val, unit=None):
"""convert a string measurement to a Unum"""
md = re.match(r'^(?P<num>[\d\.]+)(?P<unit>.*)$', val)
if md is not None:
un = float(md.group('num')) * CSS.units[md.group('unit')]
if unit is not None:
return un.asUnit(unit)
else:
return un
|
[
"def",
"to_unit",
"(",
"C",
",",
"val",
",",
"unit",
"=",
"None",
")",
":",
"md",
"=",
"re",
".",
"match",
"(",
"r'^(?P<num>[\\d\\.]+)(?P<unit>.*)$'",
",",
"val",
")",
"if",
"md",
"is",
"not",
"None",
":",
"un",
"=",
"float",
"(",
"md",
".",
"group",
"(",
"'num'",
")",
")",
"*",
"CSS",
".",
"units",
"[",
"md",
".",
"group",
"(",
"'unit'",
")",
"]",
"if",
"unit",
"is",
"not",
"None",
":",
"return",
"un",
".",
"asUnit",
"(",
"unit",
")",
"else",
":",
"return",
"un"
] |
convert a string measurement to a Unum
|
[
"convert",
"a",
"string",
"measurement",
"to",
"a",
"Unum"
] |
376041168874bbd6dee5ccfeece4a9e553223316
|
https://github.com/BlackEarth/bf/blob/376041168874bbd6dee5ccfeece4a9e553223316/bf/css.py#L48-L56
|
242,088
|
BlackEarth/bf
|
bf/css.py
|
CSS.merge_stylesheets
|
def merge_stylesheets(Class, fn, *cssfns):
"""merge the given CSS files, in order, into a single stylesheet. First listed takes priority.
"""
stylesheet = Class(fn=fn)
for cssfn in cssfns:
css = Class(fn=cssfn)
for sel in sorted(css.styles.keys()):
if sel not in stylesheet.styles:
stylesheet.styles[sel] = css.styles[sel]
else:
for prop in [prop for prop in css.styles[sel] if prop not in stylesheet.styles[sel]]:
stylesheet.styles[sel][prop] = css.styles[sel][prop]
return stylesheet
|
python
|
def merge_stylesheets(Class, fn, *cssfns):
"""merge the given CSS files, in order, into a single stylesheet. First listed takes priority.
"""
stylesheet = Class(fn=fn)
for cssfn in cssfns:
css = Class(fn=cssfn)
for sel in sorted(css.styles.keys()):
if sel not in stylesheet.styles:
stylesheet.styles[sel] = css.styles[sel]
else:
for prop in [prop for prop in css.styles[sel] if prop not in stylesheet.styles[sel]]:
stylesheet.styles[sel][prop] = css.styles[sel][prop]
return stylesheet
|
[
"def",
"merge_stylesheets",
"(",
"Class",
",",
"fn",
",",
"*",
"cssfns",
")",
":",
"stylesheet",
"=",
"Class",
"(",
"fn",
"=",
"fn",
")",
"for",
"cssfn",
"in",
"cssfns",
":",
"css",
"=",
"Class",
"(",
"fn",
"=",
"cssfn",
")",
"for",
"sel",
"in",
"sorted",
"(",
"css",
".",
"styles",
".",
"keys",
"(",
")",
")",
":",
"if",
"sel",
"not",
"in",
"stylesheet",
".",
"styles",
":",
"stylesheet",
".",
"styles",
"[",
"sel",
"]",
"=",
"css",
".",
"styles",
"[",
"sel",
"]",
"else",
":",
"for",
"prop",
"in",
"[",
"prop",
"for",
"prop",
"in",
"css",
".",
"styles",
"[",
"sel",
"]",
"if",
"prop",
"not",
"in",
"stylesheet",
".",
"styles",
"[",
"sel",
"]",
"]",
":",
"stylesheet",
".",
"styles",
"[",
"sel",
"]",
"[",
"prop",
"]",
"=",
"css",
".",
"styles",
"[",
"sel",
"]",
"[",
"prop",
"]",
"return",
"stylesheet"
] |
merge the given CSS files, in order, into a single stylesheet. First listed takes priority.
|
[
"merge",
"the",
"given",
"CSS",
"files",
"in",
"order",
"into",
"a",
"single",
"stylesheet",
".",
"First",
"listed",
"takes",
"priority",
"."
] |
376041168874bbd6dee5ccfeece4a9e553223316
|
https://github.com/BlackEarth/bf/blob/376041168874bbd6dee5ccfeece4a9e553223316/bf/css.py#L76-L88
|
242,089
|
BlackEarth/bf
|
bf/css.py
|
CSS.all_selectors
|
def all_selectors(Class, fn):
"""return a sorted list of selectors that occur in the stylesheet"""
selectors = []
cssparser = cssutils.CSSParser(validate=False)
css = cssparser.parseFile(fn)
for rule in [r for r in css.cssRules if type(r)==cssutils.css.CSSStyleRule]:
selectors += [sel.selectorText for sel in rule.selectorList]
selectors = sorted(list(set(selectors)))
return selectors
|
python
|
def all_selectors(Class, fn):
"""return a sorted list of selectors that occur in the stylesheet"""
selectors = []
cssparser = cssutils.CSSParser(validate=False)
css = cssparser.parseFile(fn)
for rule in [r for r in css.cssRules if type(r)==cssutils.css.CSSStyleRule]:
selectors += [sel.selectorText for sel in rule.selectorList]
selectors = sorted(list(set(selectors)))
return selectors
|
[
"def",
"all_selectors",
"(",
"Class",
",",
"fn",
")",
":",
"selectors",
"=",
"[",
"]",
"cssparser",
"=",
"cssutils",
".",
"CSSParser",
"(",
"validate",
"=",
"False",
")",
"css",
"=",
"cssparser",
".",
"parseFile",
"(",
"fn",
")",
"for",
"rule",
"in",
"[",
"r",
"for",
"r",
"in",
"css",
".",
"cssRules",
"if",
"type",
"(",
"r",
")",
"==",
"cssutils",
".",
"css",
".",
"CSSStyleRule",
"]",
":",
"selectors",
"+=",
"[",
"sel",
".",
"selectorText",
"for",
"sel",
"in",
"rule",
".",
"selectorList",
"]",
"selectors",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"selectors",
")",
")",
")",
"return",
"selectors"
] |
return a sorted list of selectors that occur in the stylesheet
|
[
"return",
"a",
"sorted",
"list",
"of",
"selectors",
"that",
"occur",
"in",
"the",
"stylesheet"
] |
376041168874bbd6dee5ccfeece4a9e553223316
|
https://github.com/BlackEarth/bf/blob/376041168874bbd6dee5ccfeece4a9e553223316/bf/css.py#L91-L99
|
242,090
|
BlackEarth/bf
|
bf/css.py
|
CSS.selector_to_xpath
|
def selector_to_xpath(cls, selector, xmlns=None):
"""convert a css selector into an xpath expression.
xmlns is option single-item dict with namespace prefix and href
"""
selector = selector.replace(' .', ' *.')
if selector[0] == '.':
selector = '*' + selector
log.debug(selector)
if '#' in selector:
selector = selector.replace('#', '*#')
log.debug(selector)
if xmlns is not None:
prefix = list(xmlns.keys())[0]
href = xmlns[prefix]
selector = ' '.join([
(n.strip() != '>' and prefix + '|' + n.strip() or n.strip())
for n in selector.split(' ')
])
log.debug(selector)
path = cssselect.GenericTranslator().css_to_xpath(selector)
path = path.replace("descendant-or-self::", "")
path = path.replace("/descendant::", "//")
path = path.replace('/*/', '//')
log.debug(' ==> %s' % path)
return path
|
python
|
def selector_to_xpath(cls, selector, xmlns=None):
"""convert a css selector into an xpath expression.
xmlns is option single-item dict with namespace prefix and href
"""
selector = selector.replace(' .', ' *.')
if selector[0] == '.':
selector = '*' + selector
log.debug(selector)
if '#' in selector:
selector = selector.replace('#', '*#')
log.debug(selector)
if xmlns is not None:
prefix = list(xmlns.keys())[0]
href = xmlns[prefix]
selector = ' '.join([
(n.strip() != '>' and prefix + '|' + n.strip() or n.strip())
for n in selector.split(' ')
])
log.debug(selector)
path = cssselect.GenericTranslator().css_to_xpath(selector)
path = path.replace("descendant-or-self::", "")
path = path.replace("/descendant::", "//")
path = path.replace('/*/', '//')
log.debug(' ==> %s' % path)
return path
|
[
"def",
"selector_to_xpath",
"(",
"cls",
",",
"selector",
",",
"xmlns",
"=",
"None",
")",
":",
"selector",
"=",
"selector",
".",
"replace",
"(",
"' .'",
",",
"' *.'",
")",
"if",
"selector",
"[",
"0",
"]",
"==",
"'.'",
":",
"selector",
"=",
"'*'",
"+",
"selector",
"log",
".",
"debug",
"(",
"selector",
")",
"if",
"'#'",
"in",
"selector",
":",
"selector",
"=",
"selector",
".",
"replace",
"(",
"'#'",
",",
"'*#'",
")",
"log",
".",
"debug",
"(",
"selector",
")",
"if",
"xmlns",
"is",
"not",
"None",
":",
"prefix",
"=",
"list",
"(",
"xmlns",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"href",
"=",
"xmlns",
"[",
"prefix",
"]",
"selector",
"=",
"' '",
".",
"join",
"(",
"[",
"(",
"n",
".",
"strip",
"(",
")",
"!=",
"'>'",
"and",
"prefix",
"+",
"'|'",
"+",
"n",
".",
"strip",
"(",
")",
"or",
"n",
".",
"strip",
"(",
")",
")",
"for",
"n",
"in",
"selector",
".",
"split",
"(",
"' '",
")",
"]",
")",
"log",
".",
"debug",
"(",
"selector",
")",
"path",
"=",
"cssselect",
".",
"GenericTranslator",
"(",
")",
".",
"css_to_xpath",
"(",
"selector",
")",
"path",
"=",
"path",
".",
"replace",
"(",
"\"descendant-or-self::\"",
",",
"\"\"",
")",
"path",
"=",
"path",
".",
"replace",
"(",
"\"/descendant::\"",
",",
"\"//\"",
")",
"path",
"=",
"path",
".",
"replace",
"(",
"'/*/'",
",",
"'//'",
")",
"log",
".",
"debug",
"(",
"' ==> %s'",
"%",
"path",
")",
"return",
"path"
] |
convert a css selector into an xpath expression.
xmlns is option single-item dict with namespace prefix and href
|
[
"convert",
"a",
"css",
"selector",
"into",
"an",
"xpath",
"expression",
".",
"xmlns",
"is",
"option",
"single",
"-",
"item",
"dict",
"with",
"namespace",
"prefix",
"and",
"href"
] |
376041168874bbd6dee5ccfeece4a9e553223316
|
https://github.com/BlackEarth/bf/blob/376041168874bbd6dee5ccfeece4a9e553223316/bf/css.py#L102-L131
|
242,091
|
mikerhodes/actionqueues
|
actionqueues/aqstatemachine.py
|
AQStateMachine.transition_to_add
|
def transition_to_add(self):
"""Transition to add"""
assert self.state in [AQStateMachineStates.init, AQStateMachineStates.add]
self.state = AQStateMachineStates.add
|
python
|
def transition_to_add(self):
"""Transition to add"""
assert self.state in [AQStateMachineStates.init, AQStateMachineStates.add]
self.state = AQStateMachineStates.add
|
[
"def",
"transition_to_add",
"(",
"self",
")",
":",
"assert",
"self",
".",
"state",
"in",
"[",
"AQStateMachineStates",
".",
"init",
",",
"AQStateMachineStates",
".",
"add",
"]",
"self",
".",
"state",
"=",
"AQStateMachineStates",
".",
"add"
] |
Transition to add
|
[
"Transition",
"to",
"add"
] |
a7a78ab116abe88af95b5315dc9f34d40ce81eb2
|
https://github.com/mikerhodes/actionqueues/blob/a7a78ab116abe88af95b5315dc9f34d40ce81eb2/actionqueues/aqstatemachine.py#L44-L47
|
242,092
|
mikerhodes/actionqueues
|
actionqueues/aqstatemachine.py
|
AQStateMachine.transition_to_execute
|
def transition_to_execute(self):
"""Transition to execute"""
assert self.state in [AQStateMachineStates.add]
self.state = AQStateMachineStates.execute
|
python
|
def transition_to_execute(self):
"""Transition to execute"""
assert self.state in [AQStateMachineStates.add]
self.state = AQStateMachineStates.execute
|
[
"def",
"transition_to_execute",
"(",
"self",
")",
":",
"assert",
"self",
".",
"state",
"in",
"[",
"AQStateMachineStates",
".",
"add",
"]",
"self",
".",
"state",
"=",
"AQStateMachineStates",
".",
"execute"
] |
Transition to execute
|
[
"Transition",
"to",
"execute"
] |
a7a78ab116abe88af95b5315dc9f34d40ce81eb2
|
https://github.com/mikerhodes/actionqueues/blob/a7a78ab116abe88af95b5315dc9f34d40ce81eb2/actionqueues/aqstatemachine.py#L49-L52
|
242,093
|
mikerhodes/actionqueues
|
actionqueues/aqstatemachine.py
|
AQStateMachine.transition_to_rollback
|
def transition_to_rollback(self):
"""Transition to rollback"""
assert self.state in [AQStateMachineStates.execute, AQStateMachineStates.execute_complete]
self.state = AQStateMachineStates.rollback
|
python
|
def transition_to_rollback(self):
"""Transition to rollback"""
assert self.state in [AQStateMachineStates.execute, AQStateMachineStates.execute_complete]
self.state = AQStateMachineStates.rollback
|
[
"def",
"transition_to_rollback",
"(",
"self",
")",
":",
"assert",
"self",
".",
"state",
"in",
"[",
"AQStateMachineStates",
".",
"execute",
",",
"AQStateMachineStates",
".",
"execute_complete",
"]",
"self",
".",
"state",
"=",
"AQStateMachineStates",
".",
"rollback"
] |
Transition to rollback
|
[
"Transition",
"to",
"rollback"
] |
a7a78ab116abe88af95b5315dc9f34d40ce81eb2
|
https://github.com/mikerhodes/actionqueues/blob/a7a78ab116abe88af95b5315dc9f34d40ce81eb2/actionqueues/aqstatemachine.py#L54-L57
|
242,094
|
mikerhodes/actionqueues
|
actionqueues/aqstatemachine.py
|
AQStateMachine.transition_to_execute_complete
|
def transition_to_execute_complete(self):
"""Transition to execute complate"""
assert self.state in [AQStateMachineStates.execute]
self.state = AQStateMachineStates.execute_complete
|
python
|
def transition_to_execute_complete(self):
"""Transition to execute complate"""
assert self.state in [AQStateMachineStates.execute]
self.state = AQStateMachineStates.execute_complete
|
[
"def",
"transition_to_execute_complete",
"(",
"self",
")",
":",
"assert",
"self",
".",
"state",
"in",
"[",
"AQStateMachineStates",
".",
"execute",
"]",
"self",
".",
"state",
"=",
"AQStateMachineStates",
".",
"execute_complete"
] |
Transition to execute complate
|
[
"Transition",
"to",
"execute",
"complate"
] |
a7a78ab116abe88af95b5315dc9f34d40ce81eb2
|
https://github.com/mikerhodes/actionqueues/blob/a7a78ab116abe88af95b5315dc9f34d40ce81eb2/actionqueues/aqstatemachine.py#L59-L62
|
242,095
|
mikerhodes/actionqueues
|
actionqueues/aqstatemachine.py
|
AQStateMachine.transition_to_rollback_complete
|
def transition_to_rollback_complete(self):
"""Transition to rollback complete"""
assert self.state in [AQStateMachineStates.rollback]
self.state = AQStateMachineStates.rollback_complate
|
python
|
def transition_to_rollback_complete(self):
"""Transition to rollback complete"""
assert self.state in [AQStateMachineStates.rollback]
self.state = AQStateMachineStates.rollback_complate
|
[
"def",
"transition_to_rollback_complete",
"(",
"self",
")",
":",
"assert",
"self",
".",
"state",
"in",
"[",
"AQStateMachineStates",
".",
"rollback",
"]",
"self",
".",
"state",
"=",
"AQStateMachineStates",
".",
"rollback_complate"
] |
Transition to rollback complete
|
[
"Transition",
"to",
"rollback",
"complete"
] |
a7a78ab116abe88af95b5315dc9f34d40ce81eb2
|
https://github.com/mikerhodes/actionqueues/blob/a7a78ab116abe88af95b5315dc9f34d40ce81eb2/actionqueues/aqstatemachine.py#L64-L67
|
242,096
|
romaryd/python-jsonrepo
|
jsonrepo/backends/memory.py
|
DictBackend.get
|
def get(self, key, sort_key):
""" Get an element in dictionary """
key = self.prefixed('{}:{}'.format(key, sort_key))
self.logger.debug('Storage - get {}'.format(key))
if key not in self.cache.keys():
return None
return self.cache[key]
|
python
|
def get(self, key, sort_key):
""" Get an element in dictionary """
key = self.prefixed('{}:{}'.format(key, sort_key))
self.logger.debug('Storage - get {}'.format(key))
if key not in self.cache.keys():
return None
return self.cache[key]
|
[
"def",
"get",
"(",
"self",
",",
"key",
",",
"sort_key",
")",
":",
"key",
"=",
"self",
".",
"prefixed",
"(",
"'{}:{}'",
".",
"format",
"(",
"key",
",",
"sort_key",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Storage - get {}'",
".",
"format",
"(",
"key",
")",
")",
"if",
"key",
"not",
"in",
"self",
".",
"cache",
".",
"keys",
"(",
")",
":",
"return",
"None",
"return",
"self",
".",
"cache",
"[",
"key",
"]"
] |
Get an element in dictionary
|
[
"Get",
"an",
"element",
"in",
"dictionary"
] |
08a9c039a5bd21e93e9a6d1bce77d43e6e10b57d
|
https://github.com/romaryd/python-jsonrepo/blob/08a9c039a5bd21e93e9a6d1bce77d43e6e10b57d/jsonrepo/backends/memory.py#L24-L30
|
242,097
|
romaryd/python-jsonrepo
|
jsonrepo/backends/memory.py
|
DictBackend.delete
|
def delete(self, key, sort_key):
primary_key = key
key = self.prefixed('{}:{}'.format(key, sort_key))
""" Delete an element in dictionary """
self.logger.debug('Storage - delete {}'.format(key))
if sort_key is not None:
self.cache[self.prefixed(primary_key)].remove(sort_key)
for index in self._secondary_indexes:
obj = json.loads(self.cache[key])
if index in obj.keys():
self.cache['secondary_indexes'][index][obj[index]].remove(
key)
del(self.cache[key])
return True
|
python
|
def delete(self, key, sort_key):
primary_key = key
key = self.prefixed('{}:{}'.format(key, sort_key))
""" Delete an element in dictionary """
self.logger.debug('Storage - delete {}'.format(key))
if sort_key is not None:
self.cache[self.prefixed(primary_key)].remove(sort_key)
for index in self._secondary_indexes:
obj = json.loads(self.cache[key])
if index in obj.keys():
self.cache['secondary_indexes'][index][obj[index]].remove(
key)
del(self.cache[key])
return True
|
[
"def",
"delete",
"(",
"self",
",",
"key",
",",
"sort_key",
")",
":",
"primary_key",
"=",
"key",
"key",
"=",
"self",
".",
"prefixed",
"(",
"'{}:{}'",
".",
"format",
"(",
"key",
",",
"sort_key",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Storage - delete {}'",
".",
"format",
"(",
"key",
")",
")",
"if",
"sort_key",
"is",
"not",
"None",
":",
"self",
".",
"cache",
"[",
"self",
".",
"prefixed",
"(",
"primary_key",
")",
"]",
".",
"remove",
"(",
"sort_key",
")",
"for",
"index",
"in",
"self",
".",
"_secondary_indexes",
":",
"obj",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"cache",
"[",
"key",
"]",
")",
"if",
"index",
"in",
"obj",
".",
"keys",
"(",
")",
":",
"self",
".",
"cache",
"[",
"'secondary_indexes'",
"]",
"[",
"index",
"]",
"[",
"obj",
"[",
"index",
"]",
"]",
".",
"remove",
"(",
"key",
")",
"del",
"(",
"self",
".",
"cache",
"[",
"key",
"]",
")",
"return",
"True"
] |
Delete an element in dictionary
|
[
"Delete",
"an",
"element",
"in",
"dictionary"
] |
08a9c039a5bd21e93e9a6d1bce77d43e6e10b57d
|
https://github.com/romaryd/python-jsonrepo/blob/08a9c039a5bd21e93e9a6d1bce77d43e6e10b57d/jsonrepo/backends/memory.py#L73-L86
|
242,098
|
lsst-sqre/sqre-apikit
|
apikit/convenience.py
|
set_flask_metadata
|
def set_flask_metadata(app, version, repository, description,
api_version="1.0", name=None, auth=None,
route=None):
"""
Sets metadata on the application to be returned via metadata routes.
Parameters
----------
app : :class:`flask.Flask` instance
Flask application for the microservice you're adding metadata to.
version: `str`
Version of your microservice.
repository: `str`
URL of the repository containing your microservice's source code.
description: `str`
Description of the microservice.
api_version: `str`, optional
Version of the SQuaRE service API framework. Defaults to '1.0'.
name : `str`, optional
Microservice name. Defaults to the Flask app name. If set, changes
the Flask app name to match.
auth : `dict`, `str`, or `None`
The 'auth' parameter must be None, the empty string, the string
'none', or a dict containing a 'type' key, which must be 'none',
'basic', or 'bitly-proxy'. If the type is not 'none', there must
also be a 'data' key containing a dict which holds authentication
information appropriate to the authentication type. The legal
non-dict 'auth' values are equivalent to a 'type' key of 'none'.
route : `None`, `str`, or list of `str`, optional
The 'route' parameter must be None, a string, or a list of strings.
If supplied, each string will be prepended to the metadata route.
Raises
------
TypeError
If arguments are not of the appropriate type.
ValueError
If arguments are the right type but have illegal values.
Returns
-------
Nothing, but sets `app` metadata and decorates it with `/metadata`
and `/v{app_version}/metadata` routes.
"""
errstr = set_flask_metadata.__doc__
if not isinstance(app, Flask):
raise TypeError(errstr)
if name is None:
name = app.name
app.config["NAME"] = name
if app.name != name:
app.name = name
app.config["VERSION"] = version
app.config["REPOSITORY"] = repository
app.config["DESCRIPTION"] = description
app.config["API_VERSION"] = api_version
if not (isinstance(name, str) and isinstance(description, str) and
isinstance(repository, str) and isinstance(version, str) and
isinstance(api_version, str)):
raise TypeError(errstr)
if not (name and description and repository and version and api_version):
raise ValueError(errstr)
if auth is None or (isinstance(auth, str) and ((auth == "none") or
(auth == ""))):
auth = {"type": "none",
"data": None}
if not isinstance(auth, dict):
raise TypeError(errstr)
if "type" not in auth:
raise ValueError(errstr)
atp = auth["type"]
if atp == "none":
app.config["AUTH"] = {"type": "none",
"data": None}
else:
if atp not in ["basic", "bitly-proxy"] or "data" not in auth:
raise ValueError(errstr)
app.config["AUTH"] = auth
add_metadata_route(app, route)
|
python
|
def set_flask_metadata(app, version, repository, description,
api_version="1.0", name=None, auth=None,
route=None):
"""
Sets metadata on the application to be returned via metadata routes.
Parameters
----------
app : :class:`flask.Flask` instance
Flask application for the microservice you're adding metadata to.
version: `str`
Version of your microservice.
repository: `str`
URL of the repository containing your microservice's source code.
description: `str`
Description of the microservice.
api_version: `str`, optional
Version of the SQuaRE service API framework. Defaults to '1.0'.
name : `str`, optional
Microservice name. Defaults to the Flask app name. If set, changes
the Flask app name to match.
auth : `dict`, `str`, or `None`
The 'auth' parameter must be None, the empty string, the string
'none', or a dict containing a 'type' key, which must be 'none',
'basic', or 'bitly-proxy'. If the type is not 'none', there must
also be a 'data' key containing a dict which holds authentication
information appropriate to the authentication type. The legal
non-dict 'auth' values are equivalent to a 'type' key of 'none'.
route : `None`, `str`, or list of `str`, optional
The 'route' parameter must be None, a string, or a list of strings.
If supplied, each string will be prepended to the metadata route.
Raises
------
TypeError
If arguments are not of the appropriate type.
ValueError
If arguments are the right type but have illegal values.
Returns
-------
Nothing, but sets `app` metadata and decorates it with `/metadata`
and `/v{app_version}/metadata` routes.
"""
errstr = set_flask_metadata.__doc__
if not isinstance(app, Flask):
raise TypeError(errstr)
if name is None:
name = app.name
app.config["NAME"] = name
if app.name != name:
app.name = name
app.config["VERSION"] = version
app.config["REPOSITORY"] = repository
app.config["DESCRIPTION"] = description
app.config["API_VERSION"] = api_version
if not (isinstance(name, str) and isinstance(description, str) and
isinstance(repository, str) and isinstance(version, str) and
isinstance(api_version, str)):
raise TypeError(errstr)
if not (name and description and repository and version and api_version):
raise ValueError(errstr)
if auth is None or (isinstance(auth, str) and ((auth == "none") or
(auth == ""))):
auth = {"type": "none",
"data": None}
if not isinstance(auth, dict):
raise TypeError(errstr)
if "type" not in auth:
raise ValueError(errstr)
atp = auth["type"]
if atp == "none":
app.config["AUTH"] = {"type": "none",
"data": None}
else:
if atp not in ["basic", "bitly-proxy"] or "data" not in auth:
raise ValueError(errstr)
app.config["AUTH"] = auth
add_metadata_route(app, route)
|
[
"def",
"set_flask_metadata",
"(",
"app",
",",
"version",
",",
"repository",
",",
"description",
",",
"api_version",
"=",
"\"1.0\"",
",",
"name",
"=",
"None",
",",
"auth",
"=",
"None",
",",
"route",
"=",
"None",
")",
":",
"errstr",
"=",
"set_flask_metadata",
".",
"__doc__",
"if",
"not",
"isinstance",
"(",
"app",
",",
"Flask",
")",
":",
"raise",
"TypeError",
"(",
"errstr",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"app",
".",
"name",
"app",
".",
"config",
"[",
"\"NAME\"",
"]",
"=",
"name",
"if",
"app",
".",
"name",
"!=",
"name",
":",
"app",
".",
"name",
"=",
"name",
"app",
".",
"config",
"[",
"\"VERSION\"",
"]",
"=",
"version",
"app",
".",
"config",
"[",
"\"REPOSITORY\"",
"]",
"=",
"repository",
"app",
".",
"config",
"[",
"\"DESCRIPTION\"",
"]",
"=",
"description",
"app",
".",
"config",
"[",
"\"API_VERSION\"",
"]",
"=",
"api_version",
"if",
"not",
"(",
"isinstance",
"(",
"name",
",",
"str",
")",
"and",
"isinstance",
"(",
"description",
",",
"str",
")",
"and",
"isinstance",
"(",
"repository",
",",
"str",
")",
"and",
"isinstance",
"(",
"version",
",",
"str",
")",
"and",
"isinstance",
"(",
"api_version",
",",
"str",
")",
")",
":",
"raise",
"TypeError",
"(",
"errstr",
")",
"if",
"not",
"(",
"name",
"and",
"description",
"and",
"repository",
"and",
"version",
"and",
"api_version",
")",
":",
"raise",
"ValueError",
"(",
"errstr",
")",
"if",
"auth",
"is",
"None",
"or",
"(",
"isinstance",
"(",
"auth",
",",
"str",
")",
"and",
"(",
"(",
"auth",
"==",
"\"none\"",
")",
"or",
"(",
"auth",
"==",
"\"\"",
")",
")",
")",
":",
"auth",
"=",
"{",
"\"type\"",
":",
"\"none\"",
",",
"\"data\"",
":",
"None",
"}",
"if",
"not",
"isinstance",
"(",
"auth",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"errstr",
")",
"if",
"\"type\"",
"not",
"in",
"auth",
":",
"raise",
"ValueError",
"(",
"errstr",
")",
"atp",
"=",
"auth",
"[",
"\"type\"",
"]",
"if",
"atp",
"==",
"\"none\"",
":",
"app",
".",
"config",
"[",
"\"AUTH\"",
"]",
"=",
"{",
"\"type\"",
":",
"\"none\"",
",",
"\"data\"",
":",
"None",
"}",
"else",
":",
"if",
"atp",
"not",
"in",
"[",
"\"basic\"",
",",
"\"bitly-proxy\"",
"]",
"or",
"\"data\"",
"not",
"in",
"auth",
":",
"raise",
"ValueError",
"(",
"errstr",
")",
"app",
".",
"config",
"[",
"\"AUTH\"",
"]",
"=",
"auth",
"add_metadata_route",
"(",
"app",
",",
"route",
")"
] |
Sets metadata on the application to be returned via metadata routes.
Parameters
----------
app : :class:`flask.Flask` instance
Flask application for the microservice you're adding metadata to.
version: `str`
Version of your microservice.
repository: `str`
URL of the repository containing your microservice's source code.
description: `str`
Description of the microservice.
api_version: `str`, optional
Version of the SQuaRE service API framework. Defaults to '1.0'.
name : `str`, optional
Microservice name. Defaults to the Flask app name. If set, changes
the Flask app name to match.
auth : `dict`, `str`, or `None`
The 'auth' parameter must be None, the empty string, the string
'none', or a dict containing a 'type' key, which must be 'none',
'basic', or 'bitly-proxy'. If the type is not 'none', there must
also be a 'data' key containing a dict which holds authentication
information appropriate to the authentication type. The legal
non-dict 'auth' values are equivalent to a 'type' key of 'none'.
route : `None`, `str`, or list of `str`, optional
The 'route' parameter must be None, a string, or a list of strings.
If supplied, each string will be prepended to the metadata route.
Raises
------
TypeError
If arguments are not of the appropriate type.
ValueError
If arguments are the right type but have illegal values.
Returns
-------
Nothing, but sets `app` metadata and decorates it with `/metadata`
and `/v{app_version}/metadata` routes.
|
[
"Sets",
"metadata",
"on",
"the",
"application",
"to",
"be",
"returned",
"via",
"metadata",
"routes",
"."
] |
ff505b63d2e29303ff7f05f2bd5eabd0f6d7026e
|
https://github.com/lsst-sqre/sqre-apikit/blob/ff505b63d2e29303ff7f05f2bd5eabd0f6d7026e/apikit/convenience.py#L15-L101
|
242,099
|
lsst-sqre/sqre-apikit
|
apikit/convenience.py
|
raise_ise
|
def raise_ise(text):
"""Turn a failed request response into a BackendError that represents
an Internal Server Error. Handy for reflecting HTTP errors from farther
back in the call chain as failures of your service.
Parameters
----------
text: `str`
Error text.
Raises
------
:class:`apikit.BackendError`
The `status_code` will be `500`, and the reason `Internal Server
Error`. Its `content` will be the text you passed.
"""
if isinstance(text, Exception):
# Just in case we are exuberantly passed the entire Exception and
# not its textual representation.
text = str(text)
raise BackendError(status_code=500,
reason="Internal Server Error",
content=text)
|
python
|
def raise_ise(text):
"""Turn a failed request response into a BackendError that represents
an Internal Server Error. Handy for reflecting HTTP errors from farther
back in the call chain as failures of your service.
Parameters
----------
text: `str`
Error text.
Raises
------
:class:`apikit.BackendError`
The `status_code` will be `500`, and the reason `Internal Server
Error`. Its `content` will be the text you passed.
"""
if isinstance(text, Exception):
# Just in case we are exuberantly passed the entire Exception and
# not its textual representation.
text = str(text)
raise BackendError(status_code=500,
reason="Internal Server Error",
content=text)
|
[
"def",
"raise_ise",
"(",
"text",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"Exception",
")",
":",
"# Just in case we are exuberantly passed the entire Exception and",
"# not its textual representation.",
"text",
"=",
"str",
"(",
"text",
")",
"raise",
"BackendError",
"(",
"status_code",
"=",
"500",
",",
"reason",
"=",
"\"Internal Server Error\"",
",",
"content",
"=",
"text",
")"
] |
Turn a failed request response into a BackendError that represents
an Internal Server Error. Handy for reflecting HTTP errors from farther
back in the call chain as failures of your service.
Parameters
----------
text: `str`
Error text.
Raises
------
:class:`apikit.BackendError`
The `status_code` will be `500`, and the reason `Internal Server
Error`. Its `content` will be the text you passed.
|
[
"Turn",
"a",
"failed",
"request",
"response",
"into",
"a",
"BackendError",
"that",
"represents",
"an",
"Internal",
"Server",
"Error",
".",
"Handy",
"for",
"reflecting",
"HTTP",
"errors",
"from",
"farther",
"back",
"in",
"the",
"call",
"chain",
"as",
"failures",
"of",
"your",
"service",
"."
] |
ff505b63d2e29303ff7f05f2bd5eabd0f6d7026e
|
https://github.com/lsst-sqre/sqre-apikit/blob/ff505b63d2e29303ff7f05f2bd5eabd0f6d7026e/apikit/convenience.py#L237-L259
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.