id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
13,200
|
ihucos/plash
|
opt/plash/lib/py/plash/macros/common.py
|
write_file
|
def write_file(fname, *lines):
'write lines to a file'
yield 'touch {}'.format(fname)
for line in lines:
yield "echo {} >> {}".format(line, fname)
|
python
|
def write_file(fname, *lines):
'write lines to a file'
yield 'touch {}'.format(fname)
for line in lines:
yield "echo {} >> {}".format(line, fname)
|
[
"def",
"write_file",
"(",
"fname",
",",
"*",
"lines",
")",
":",
"yield",
"'touch {}'",
".",
"format",
"(",
"fname",
")",
"for",
"line",
"in",
"lines",
":",
"yield",
"\"echo {} >> {}\"",
".",
"format",
"(",
"line",
",",
"fname",
")"
] |
write lines to a file
|
[
"write",
"lines",
"to",
"a",
"file"
] |
2ab2bc956e309d5aa6414c80983bfbf29b0ce572
|
https://github.com/ihucos/plash/blob/2ab2bc956e309d5aa6414c80983bfbf29b0ce572/opt/plash/lib/py/plash/macros/common.py#L62-L66
|
13,201
|
ihucos/plash
|
opt/plash/lib/py/plash/macros/common.py
|
eval_file
|
def eval_file(file):
'evaluate file content as expressions'
fname = os.path.realpath(os.path.expanduser(file))
with open(fname) as f:
inscript = f.read()
sh = run_write_read(['plash', 'eval'], inscript.encode()).decode()
# we remove an possibly existing newline
# because else this macros would add one
if sh.endswith('\n'):
return sh[:-1]
return sh
|
python
|
def eval_file(file):
'evaluate file content as expressions'
fname = os.path.realpath(os.path.expanduser(file))
with open(fname) as f:
inscript = f.read()
sh = run_write_read(['plash', 'eval'], inscript.encode()).decode()
# we remove an possibly existing newline
# because else this macros would add one
if sh.endswith('\n'):
return sh[:-1]
return sh
|
[
"def",
"eval_file",
"(",
"file",
")",
":",
"fname",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"file",
")",
")",
"with",
"open",
"(",
"fname",
")",
"as",
"f",
":",
"inscript",
"=",
"f",
".",
"read",
"(",
")",
"sh",
"=",
"run_write_read",
"(",
"[",
"'plash'",
",",
"'eval'",
"]",
",",
"inscript",
".",
"encode",
"(",
")",
")",
".",
"decode",
"(",
")",
"# we remove an possibly existing newline",
"# because else this macros would add one",
"if",
"sh",
".",
"endswith",
"(",
"'\\n'",
")",
":",
"return",
"sh",
"[",
":",
"-",
"1",
"]",
"return",
"sh"
] |
evaluate file content as expressions
|
[
"evaluate",
"file",
"content",
"as",
"expressions"
] |
2ab2bc956e309d5aa6414c80983bfbf29b0ce572
|
https://github.com/ihucos/plash/blob/2ab2bc956e309d5aa6414c80983bfbf29b0ce572/opt/plash/lib/py/plash/macros/common.py#L78-L92
|
13,202
|
ihucos/plash
|
opt/plash/lib/py/plash/macros/common.py
|
eval_string
|
def eval_string(stri):
'evaluate expressions passed as string'
tokens = shlex.split(stri)
return run_write_read(['plash', 'eval'], '\n'.join(tokens).encode()).decode()
|
python
|
def eval_string(stri):
'evaluate expressions passed as string'
tokens = shlex.split(stri)
return run_write_read(['plash', 'eval'], '\n'.join(tokens).encode()).decode()
|
[
"def",
"eval_string",
"(",
"stri",
")",
":",
"tokens",
"=",
"shlex",
".",
"split",
"(",
"stri",
")",
"return",
"run_write_read",
"(",
"[",
"'plash'",
",",
"'eval'",
"]",
",",
"'\\n'",
".",
"join",
"(",
"tokens",
")",
".",
"encode",
"(",
")",
")",
".",
"decode",
"(",
")"
] |
evaluate expressions passed as string
|
[
"evaluate",
"expressions",
"passed",
"as",
"string"
] |
2ab2bc956e309d5aa6414c80983bfbf29b0ce572
|
https://github.com/ihucos/plash/blob/2ab2bc956e309d5aa6414c80983bfbf29b0ce572/opt/plash/lib/py/plash/macros/common.py#L96-L100
|
13,203
|
ihucos/plash
|
opt/plash/lib/py/plash/macros/common.py
|
eval_stdin
|
def eval_stdin():
'evaluate expressions read from stdin'
cmd = ['plash', 'eval']
p = subprocess.Popen(cmd, stdin=sys.stdin, stdout=sys.stdout)
exit = p.wait()
if exit:
raise subprocess.CalledProcessError(exit, cmd)
|
python
|
def eval_stdin():
'evaluate expressions read from stdin'
cmd = ['plash', 'eval']
p = subprocess.Popen(cmd, stdin=sys.stdin, stdout=sys.stdout)
exit = p.wait()
if exit:
raise subprocess.CalledProcessError(exit, cmd)
|
[
"def",
"eval_stdin",
"(",
")",
":",
"cmd",
"=",
"[",
"'plash'",
",",
"'eval'",
"]",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdin",
"=",
"sys",
".",
"stdin",
",",
"stdout",
"=",
"sys",
".",
"stdout",
")",
"exit",
"=",
"p",
".",
"wait",
"(",
")",
"if",
"exit",
":",
"raise",
"subprocess",
".",
"CalledProcessError",
"(",
"exit",
",",
"cmd",
")"
] |
evaluate expressions read from stdin
|
[
"evaluate",
"expressions",
"read",
"from",
"stdin"
] |
2ab2bc956e309d5aa6414c80983bfbf29b0ce572
|
https://github.com/ihucos/plash/blob/2ab2bc956e309d5aa6414c80983bfbf29b0ce572/opt/plash/lib/py/plash/macros/common.py#L104-L110
|
13,204
|
ihucos/plash
|
opt/plash/lib/py/plash/macros/froms.py
|
from_map
|
def from_map(map_key):
'use resolved map as image'
image_id = subprocess.check_output(['plash', 'map',
map_key]).decode().strip('\n')
if not image_id:
raise MapDoesNotExist('map {} not found'.format(repr(map_key)))
return hint('image', image_id)
|
python
|
def from_map(map_key):
'use resolved map as image'
image_id = subprocess.check_output(['plash', 'map',
map_key]).decode().strip('\n')
if not image_id:
raise MapDoesNotExist('map {} not found'.format(repr(map_key)))
return hint('image', image_id)
|
[
"def",
"from_map",
"(",
"map_key",
")",
":",
"image_id",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'plash'",
",",
"'map'",
",",
"map_key",
"]",
")",
".",
"decode",
"(",
")",
".",
"strip",
"(",
"'\\n'",
")",
"if",
"not",
"image_id",
":",
"raise",
"MapDoesNotExist",
"(",
"'map {} not found'",
".",
"format",
"(",
"repr",
"(",
"map_key",
")",
")",
")",
"return",
"hint",
"(",
"'image'",
",",
"image_id",
")"
] |
use resolved map as image
|
[
"use",
"resolved",
"map",
"as",
"image"
] |
2ab2bc956e309d5aa6414c80983bfbf29b0ce572
|
https://github.com/ihucos/plash/blob/2ab2bc956e309d5aa6414c80983bfbf29b0ce572/opt/plash/lib/py/plash/macros/froms.py#L61-L67
|
13,205
|
dbrgn/drf-dynamic-fields
|
drf_dynamic_fields/__init__.py
|
DynamicFieldsMixin.fields
|
def fields(self):
"""
Filters the fields according to the `fields` query parameter.
A blank `fields` parameter (?fields) will remove all fields. Not
passing `fields` will pass all fields individual fields are comma
separated (?fields=id,name,url,email).
"""
fields = super(DynamicFieldsMixin, self).fields
if not hasattr(self, '_context'):
# We are being called before a request cycle
return fields
# Only filter if this is the root serializer, or if the parent is the
# root serializer with many=True
is_root = self.root == self
parent_is_list_root = self.parent == self.root and getattr(self.parent, 'many', False)
if not (is_root or parent_is_list_root):
return fields
try:
request = self.context['request']
except KeyError:
conf = getattr(settings, 'DRF_DYNAMIC_FIELDS', {})
if not conf.get('SUPPRESS_CONTEXT_WARNING', False) is True:
warnings.warn('Context does not have access to request. '
'See README for more information.')
return fields
# NOTE: drf test framework builds a request object where the query
# parameters are found under the GET attribute.
params = getattr(
request, 'query_params', getattr(request, 'GET', None)
)
if params is None:
warnings.warn('Request object does not contain query paramters')
try:
filter_fields = params.get('fields', None).split(',')
except AttributeError:
filter_fields = None
try:
omit_fields = params.get('omit', None).split(',')
except AttributeError:
omit_fields = []
# Drop any fields that are not specified in the `fields` argument.
existing = set(fields.keys())
if filter_fields is None:
# no fields param given, don't filter.
allowed = existing
else:
allowed = set(filter(None, filter_fields))
# omit fields in the `omit` argument.
omitted = set(filter(None, omit_fields))
for field in existing:
if field not in allowed:
fields.pop(field, None)
if field in omitted:
fields.pop(field, None)
return fields
|
python
|
def fields(self):
"""
Filters the fields according to the `fields` query parameter.
A blank `fields` parameter (?fields) will remove all fields. Not
passing `fields` will pass all fields individual fields are comma
separated (?fields=id,name,url,email).
"""
fields = super(DynamicFieldsMixin, self).fields
if not hasattr(self, '_context'):
# We are being called before a request cycle
return fields
# Only filter if this is the root serializer, or if the parent is the
# root serializer with many=True
is_root = self.root == self
parent_is_list_root = self.parent == self.root and getattr(self.parent, 'many', False)
if not (is_root or parent_is_list_root):
return fields
try:
request = self.context['request']
except KeyError:
conf = getattr(settings, 'DRF_DYNAMIC_FIELDS', {})
if not conf.get('SUPPRESS_CONTEXT_WARNING', False) is True:
warnings.warn('Context does not have access to request. '
'See README for more information.')
return fields
# NOTE: drf test framework builds a request object where the query
# parameters are found under the GET attribute.
params = getattr(
request, 'query_params', getattr(request, 'GET', None)
)
if params is None:
warnings.warn('Request object does not contain query paramters')
try:
filter_fields = params.get('fields', None).split(',')
except AttributeError:
filter_fields = None
try:
omit_fields = params.get('omit', None).split(',')
except AttributeError:
omit_fields = []
# Drop any fields that are not specified in the `fields` argument.
existing = set(fields.keys())
if filter_fields is None:
# no fields param given, don't filter.
allowed = existing
else:
allowed = set(filter(None, filter_fields))
# omit fields in the `omit` argument.
omitted = set(filter(None, omit_fields))
for field in existing:
if field not in allowed:
fields.pop(field, None)
if field in omitted:
fields.pop(field, None)
return fields
|
[
"def",
"fields",
"(",
"self",
")",
":",
"fields",
"=",
"super",
"(",
"DynamicFieldsMixin",
",",
"self",
")",
".",
"fields",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_context'",
")",
":",
"# We are being called before a request cycle",
"return",
"fields",
"# Only filter if this is the root serializer, or if the parent is the",
"# root serializer with many=True",
"is_root",
"=",
"self",
".",
"root",
"==",
"self",
"parent_is_list_root",
"=",
"self",
".",
"parent",
"==",
"self",
".",
"root",
"and",
"getattr",
"(",
"self",
".",
"parent",
",",
"'many'",
",",
"False",
")",
"if",
"not",
"(",
"is_root",
"or",
"parent_is_list_root",
")",
":",
"return",
"fields",
"try",
":",
"request",
"=",
"self",
".",
"context",
"[",
"'request'",
"]",
"except",
"KeyError",
":",
"conf",
"=",
"getattr",
"(",
"settings",
",",
"'DRF_DYNAMIC_FIELDS'",
",",
"{",
"}",
")",
"if",
"not",
"conf",
".",
"get",
"(",
"'SUPPRESS_CONTEXT_WARNING'",
",",
"False",
")",
"is",
"True",
":",
"warnings",
".",
"warn",
"(",
"'Context does not have access to request. '",
"'See README for more information.'",
")",
"return",
"fields",
"# NOTE: drf test framework builds a request object where the query",
"# parameters are found under the GET attribute.",
"params",
"=",
"getattr",
"(",
"request",
",",
"'query_params'",
",",
"getattr",
"(",
"request",
",",
"'GET'",
",",
"None",
")",
")",
"if",
"params",
"is",
"None",
":",
"warnings",
".",
"warn",
"(",
"'Request object does not contain query paramters'",
")",
"try",
":",
"filter_fields",
"=",
"params",
".",
"get",
"(",
"'fields'",
",",
"None",
")",
".",
"split",
"(",
"','",
")",
"except",
"AttributeError",
":",
"filter_fields",
"=",
"None",
"try",
":",
"omit_fields",
"=",
"params",
".",
"get",
"(",
"'omit'",
",",
"None",
")",
".",
"split",
"(",
"','",
")",
"except",
"AttributeError",
":",
"omit_fields",
"=",
"[",
"]",
"# Drop any fields that are not specified in the `fields` argument.",
"existing",
"=",
"set",
"(",
"fields",
".",
"keys",
"(",
")",
")",
"if",
"filter_fields",
"is",
"None",
":",
"# no fields param given, don't filter.",
"allowed",
"=",
"existing",
"else",
":",
"allowed",
"=",
"set",
"(",
"filter",
"(",
"None",
",",
"filter_fields",
")",
")",
"# omit fields in the `omit` argument.",
"omitted",
"=",
"set",
"(",
"filter",
"(",
"None",
",",
"omit_fields",
")",
")",
"for",
"field",
"in",
"existing",
":",
"if",
"field",
"not",
"in",
"allowed",
":",
"fields",
".",
"pop",
"(",
"field",
",",
"None",
")",
"if",
"field",
"in",
"omitted",
":",
"fields",
".",
"pop",
"(",
"field",
",",
"None",
")",
"return",
"fields"
] |
Filters the fields according to the `fields` query parameter.
A blank `fields` parameter (?fields) will remove all fields. Not
passing `fields` will pass all fields individual fields are comma
separated (?fields=id,name,url,email).
|
[
"Filters",
"the",
"fields",
"according",
"to",
"the",
"fields",
"query",
"parameter",
"."
] |
d24da8bc321462ea6231821d6c3d210b76d4785b
|
https://github.com/dbrgn/drf-dynamic-fields/blob/d24da8bc321462ea6231821d6c3d210b76d4785b/drf_dynamic_fields/__init__.py#L16-L84
|
13,206
|
aio-libs/aiohttp_admin
|
aiohttp_admin/admin.py
|
setup_admin_on_rest_handlers
|
def setup_admin_on_rest_handlers(admin, admin_handler):
"""
Initialize routes.
"""
add_route = admin.router.add_route
add_static = admin.router.add_static
static_folder = str(PROJ_ROOT / 'static')
a = admin_handler
add_route('GET', '', a.index_page, name='admin.index')
add_route('POST', '/token', a.token, name='admin.token')
add_static('/static', path=static_folder, name='admin.static')
add_route('DELETE', '/logout', a.logout, name='admin.logout')
|
python
|
def setup_admin_on_rest_handlers(admin, admin_handler):
"""
Initialize routes.
"""
add_route = admin.router.add_route
add_static = admin.router.add_static
static_folder = str(PROJ_ROOT / 'static')
a = admin_handler
add_route('GET', '', a.index_page, name='admin.index')
add_route('POST', '/token', a.token, name='admin.token')
add_static('/static', path=static_folder, name='admin.static')
add_route('DELETE', '/logout', a.logout, name='admin.logout')
|
[
"def",
"setup_admin_on_rest_handlers",
"(",
"admin",
",",
"admin_handler",
")",
":",
"add_route",
"=",
"admin",
".",
"router",
".",
"add_route",
"add_static",
"=",
"admin",
".",
"router",
".",
"add_static",
"static_folder",
"=",
"str",
"(",
"PROJ_ROOT",
"/",
"'static'",
")",
"a",
"=",
"admin_handler",
"add_route",
"(",
"'GET'",
",",
"''",
",",
"a",
".",
"index_page",
",",
"name",
"=",
"'admin.index'",
")",
"add_route",
"(",
"'POST'",
",",
"'/token'",
",",
"a",
".",
"token",
",",
"name",
"=",
"'admin.token'",
")",
"add_static",
"(",
"'/static'",
",",
"path",
"=",
"static_folder",
",",
"name",
"=",
"'admin.static'",
")",
"add_route",
"(",
"'DELETE'",
",",
"'/logout'",
",",
"a",
".",
"logout",
",",
"name",
"=",
"'admin.logout'",
")"
] |
Initialize routes.
|
[
"Initialize",
"routes",
"."
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/admin.py#L148-L160
|
13,207
|
aio-libs/aiohttp_admin
|
aiohttp_admin/admin.py
|
AdminOnRestHandler.index_page
|
async def index_page(self, request):
"""
Return index page with initial state for admin
"""
context = {"initial_state": self.schema.to_json()}
return render_template(
self.template,
request,
context,
app_key=TEMPLATE_APP_KEY,
)
|
python
|
async def index_page(self, request):
"""
Return index page with initial state for admin
"""
context = {"initial_state": self.schema.to_json()}
return render_template(
self.template,
request,
context,
app_key=TEMPLATE_APP_KEY,
)
|
[
"async",
"def",
"index_page",
"(",
"self",
",",
"request",
")",
":",
"context",
"=",
"{",
"\"initial_state\"",
":",
"self",
".",
"schema",
".",
"to_json",
"(",
")",
"}",
"return",
"render_template",
"(",
"self",
".",
"template",
",",
"request",
",",
"context",
",",
"app_key",
"=",
"TEMPLATE_APP_KEY",
",",
")"
] |
Return index page with initial state for admin
|
[
"Return",
"index",
"page",
"with",
"initial",
"state",
"for",
"admin"
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/admin.py#L105-L116
|
13,208
|
aio-libs/aiohttp_admin
|
aiohttp_admin/admin.py
|
AdminOnRestHandler.logout
|
async def logout(self, request):
"""
Simple handler for logout
"""
if "Authorization" not in request.headers:
msg = "Auth header is not present, can not destroy token"
raise JsonValidaitonError(msg)
response = json_response()
await forget(request, response)
return response
|
python
|
async def logout(self, request):
"""
Simple handler for logout
"""
if "Authorization" not in request.headers:
msg = "Auth header is not present, can not destroy token"
raise JsonValidaitonError(msg)
response = json_response()
await forget(request, response)
return response
|
[
"async",
"def",
"logout",
"(",
"self",
",",
"request",
")",
":",
"if",
"\"Authorization\"",
"not",
"in",
"request",
".",
"headers",
":",
"msg",
"=",
"\"Auth header is not present, can not destroy token\"",
"raise",
"JsonValidaitonError",
"(",
"msg",
")",
"response",
"=",
"json_response",
"(",
")",
"await",
"forget",
"(",
"request",
",",
"response",
")",
"return",
"response"
] |
Simple handler for logout
|
[
"Simple",
"handler",
"for",
"logout"
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/admin.py#L134-L145
|
13,209
|
aio-libs/aiohttp_admin
|
aiohttp_admin/utils.py
|
validate_query_structure
|
def validate_query_structure(query):
"""Validate query arguments in list request.
:param query: mapping with pagination and filtering information
"""
query_dict = dict(query)
filters = query_dict.pop('_filters', None)
if filters:
try:
f = json.loads(filters)
except ValueError:
msg = '_filters field can not be serialized'
raise JsonValidaitonError(msg)
else:
query_dict['_filters'] = f
try:
q = ListQuery(query_dict)
except t.DataError as exc:
msg = '_filters query invalid'
raise JsonValidaitonError(msg, **as_dict(exc))
return q
|
python
|
def validate_query_structure(query):
"""Validate query arguments in list request.
:param query: mapping with pagination and filtering information
"""
query_dict = dict(query)
filters = query_dict.pop('_filters', None)
if filters:
try:
f = json.loads(filters)
except ValueError:
msg = '_filters field can not be serialized'
raise JsonValidaitonError(msg)
else:
query_dict['_filters'] = f
try:
q = ListQuery(query_dict)
except t.DataError as exc:
msg = '_filters query invalid'
raise JsonValidaitonError(msg, **as_dict(exc))
return q
|
[
"def",
"validate_query_structure",
"(",
"query",
")",
":",
"query_dict",
"=",
"dict",
"(",
"query",
")",
"filters",
"=",
"query_dict",
".",
"pop",
"(",
"'_filters'",
",",
"None",
")",
"if",
"filters",
":",
"try",
":",
"f",
"=",
"json",
".",
"loads",
"(",
"filters",
")",
"except",
"ValueError",
":",
"msg",
"=",
"'_filters field can not be serialized'",
"raise",
"JsonValidaitonError",
"(",
"msg",
")",
"else",
":",
"query_dict",
"[",
"'_filters'",
"]",
"=",
"f",
"try",
":",
"q",
"=",
"ListQuery",
"(",
"query_dict",
")",
"except",
"t",
".",
"DataError",
"as",
"exc",
":",
"msg",
"=",
"'_filters query invalid'",
"raise",
"JsonValidaitonError",
"(",
"msg",
",",
"*",
"*",
"as_dict",
"(",
"exc",
")",
")",
"return",
"q"
] |
Validate query arguments in list request.
:param query: mapping with pagination and filtering information
|
[
"Validate",
"query",
"arguments",
"in",
"list",
"request",
"."
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/utils.py#L82-L103
|
13,210
|
aio-libs/aiohttp_admin
|
aiohttp_admin/contrib/admin.py
|
Schema.to_json
|
def to_json(self):
"""
Prepare data for the initial state of the admin-on-rest
"""
endpoints = []
for endpoint in self.endpoints:
list_fields = endpoint.fields
resource_type = endpoint.Meta.resource_type
table = endpoint.Meta.table
data = endpoint.to_dict()
data['fields'] = resource_type.get_type_of_fields(
list_fields,
table,
)
endpoints.append(data)
data = {
'title': self.title,
'endpoints': sorted(endpoints, key=lambda x: x['name']),
}
return json.dumps(data)
|
python
|
def to_json(self):
"""
Prepare data for the initial state of the admin-on-rest
"""
endpoints = []
for endpoint in self.endpoints:
list_fields = endpoint.fields
resource_type = endpoint.Meta.resource_type
table = endpoint.Meta.table
data = endpoint.to_dict()
data['fields'] = resource_type.get_type_of_fields(
list_fields,
table,
)
endpoints.append(data)
data = {
'title': self.title,
'endpoints': sorted(endpoints, key=lambda x: x['name']),
}
return json.dumps(data)
|
[
"def",
"to_json",
"(",
"self",
")",
":",
"endpoints",
"=",
"[",
"]",
"for",
"endpoint",
"in",
"self",
".",
"endpoints",
":",
"list_fields",
"=",
"endpoint",
".",
"fields",
"resource_type",
"=",
"endpoint",
".",
"Meta",
".",
"resource_type",
"table",
"=",
"endpoint",
".",
"Meta",
".",
"table",
"data",
"=",
"endpoint",
".",
"to_dict",
"(",
")",
"data",
"[",
"'fields'",
"]",
"=",
"resource_type",
".",
"get_type_of_fields",
"(",
"list_fields",
",",
"table",
",",
")",
"endpoints",
".",
"append",
"(",
"data",
")",
"data",
"=",
"{",
"'title'",
":",
"self",
".",
"title",
",",
"'endpoints'",
":",
"sorted",
"(",
"endpoints",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"'name'",
"]",
")",
",",
"}",
"return",
"json",
".",
"dumps",
"(",
"data",
")"
] |
Prepare data for the initial state of the admin-on-rest
|
[
"Prepare",
"data",
"for",
"the",
"initial",
"state",
"of",
"the",
"admin",
"-",
"on",
"-",
"rest"
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/contrib/admin.py#L30-L52
|
13,211
|
aio-libs/aiohttp_admin
|
aiohttp_admin/contrib/admin.py
|
Schema.resources
|
def resources(self):
"""
Return list of all registered resources.
"""
resources = []
for endpoint in self.endpoints:
resource_type = endpoint.Meta.resource_type
table = endpoint.Meta.table
url = endpoint.name
resources.append((resource_type, {'table': table, 'url': url}))
return resources
|
python
|
def resources(self):
"""
Return list of all registered resources.
"""
resources = []
for endpoint in self.endpoints:
resource_type = endpoint.Meta.resource_type
table = endpoint.Meta.table
url = endpoint.name
resources.append((resource_type, {'table': table, 'url': url}))
return resources
|
[
"def",
"resources",
"(",
"self",
")",
":",
"resources",
"=",
"[",
"]",
"for",
"endpoint",
"in",
"self",
".",
"endpoints",
":",
"resource_type",
"=",
"endpoint",
".",
"Meta",
".",
"resource_type",
"table",
"=",
"endpoint",
".",
"Meta",
".",
"table",
"url",
"=",
"endpoint",
".",
"name",
"resources",
".",
"append",
"(",
"(",
"resource_type",
",",
"{",
"'table'",
":",
"table",
",",
"'url'",
":",
"url",
"}",
")",
")",
"return",
"resources"
] |
Return list of all registered resources.
|
[
"Return",
"list",
"of",
"all",
"registered",
"resources",
"."
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/contrib/admin.py#L55-L68
|
13,212
|
aio-libs/aiohttp_admin
|
aiohttp_admin/backends/sa.py
|
PGResource.get_type_of_fields
|
def get_type_of_fields(fields, table):
"""
Return data types of `fields` that are in `table`. If a given
parameter is empty return primary key.
:param fields: list - list of fields that need to be returned
:param table: sa.Table - the current table
:return: list - list of the tuples `(field_name, fields_type)`
"""
if not fields:
fields = table.primary_key
actual_fields = [
field for field in table.c.items() if field[0] in fields
]
data_type_fields = {
name: FIELD_TYPES.get(type(field_type.type), rc.TEXT_FIELD.value)
for name, field_type in actual_fields
}
return data_type_fields
|
python
|
def get_type_of_fields(fields, table):
"""
Return data types of `fields` that are in `table`. If a given
parameter is empty return primary key.
:param fields: list - list of fields that need to be returned
:param table: sa.Table - the current table
:return: list - list of the tuples `(field_name, fields_type)`
"""
if not fields:
fields = table.primary_key
actual_fields = [
field for field in table.c.items() if field[0] in fields
]
data_type_fields = {
name: FIELD_TYPES.get(type(field_type.type), rc.TEXT_FIELD.value)
for name, field_type in actual_fields
}
return data_type_fields
|
[
"def",
"get_type_of_fields",
"(",
"fields",
",",
"table",
")",
":",
"if",
"not",
"fields",
":",
"fields",
"=",
"table",
".",
"primary_key",
"actual_fields",
"=",
"[",
"field",
"for",
"field",
"in",
"table",
".",
"c",
".",
"items",
"(",
")",
"if",
"field",
"[",
"0",
"]",
"in",
"fields",
"]",
"data_type_fields",
"=",
"{",
"name",
":",
"FIELD_TYPES",
".",
"get",
"(",
"type",
"(",
"field_type",
".",
"type",
")",
",",
"rc",
".",
"TEXT_FIELD",
".",
"value",
")",
"for",
"name",
",",
"field_type",
"in",
"actual_fields",
"}",
"return",
"data_type_fields"
] |
Return data types of `fields` that are in `table`. If a given
parameter is empty return primary key.
:param fields: list - list of fields that need to be returned
:param table: sa.Table - the current table
:return: list - list of the tuples `(field_name, fields_type)`
|
[
"Return",
"data",
"types",
"of",
"fields",
"that",
"are",
"in",
"table",
".",
"If",
"a",
"given",
"parameter",
"is",
"empty",
"return",
"primary",
"key",
"."
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/backends/sa.py#L58-L80
|
13,213
|
aio-libs/aiohttp_admin
|
aiohttp_admin/backends/sa.py
|
PGResource.get_type_for_inputs
|
def get_type_for_inputs(table):
"""
Return information about table's fields in dictionary type.
:param table: sa.Table - the current table
:return: list - list of the dictionaries
"""
return [
dict(
type=INPUT_TYPES.get(
type(field_type.type), rc.TEXT_INPUT.value
),
name=name,
isPrimaryKey=(name in table.primary_key),
props=None,
) for name, field_type in table.c.items()
]
|
python
|
def get_type_for_inputs(table):
"""
Return information about table's fields in dictionary type.
:param table: sa.Table - the current table
:return: list - list of the dictionaries
"""
return [
dict(
type=INPUT_TYPES.get(
type(field_type.type), rc.TEXT_INPUT.value
),
name=name,
isPrimaryKey=(name in table.primary_key),
props=None,
) for name, field_type in table.c.items()
]
|
[
"def",
"get_type_for_inputs",
"(",
"table",
")",
":",
"return",
"[",
"dict",
"(",
"type",
"=",
"INPUT_TYPES",
".",
"get",
"(",
"type",
"(",
"field_type",
".",
"type",
")",
",",
"rc",
".",
"TEXT_INPUT",
".",
"value",
")",
",",
"name",
"=",
"name",
",",
"isPrimaryKey",
"=",
"(",
"name",
"in",
"table",
".",
"primary_key",
")",
",",
"props",
"=",
"None",
",",
")",
"for",
"name",
",",
"field_type",
"in",
"table",
".",
"c",
".",
"items",
"(",
")",
"]"
] |
Return information about table's fields in dictionary type.
:param table: sa.Table - the current table
:return: list - list of the dictionaries
|
[
"Return",
"information",
"about",
"table",
"s",
"fields",
"in",
"dictionary",
"type",
"."
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/backends/sa.py#L83-L99
|
13,214
|
aio-libs/aiohttp_admin
|
aiohttp_admin/__init__.py
|
_setup
|
def _setup(app, *, schema, title=None, app_key=APP_KEY, db=None):
"""Initialize the admin-on-rest admin"""
admin = web.Application(loop=app.loop)
app[app_key] = admin
loader = jinja2.FileSystemLoader([TEMPLATES_ROOT, ])
aiohttp_jinja2.setup(admin, loader=loader, app_key=TEMPLATE_APP_KEY)
if title:
schema.title = title
resources = [
init(db, info['table'], url=info['url'])
for init, info in schema.resources
]
admin_handler = AdminOnRestHandler(
admin,
resources=resources,
loop=app.loop,
schema=schema,
)
admin['admin_handler'] = admin_handler
setup_admin_on_rest_handlers(admin, admin_handler)
return admin
|
python
|
def _setup(app, *, schema, title=None, app_key=APP_KEY, db=None):
"""Initialize the admin-on-rest admin"""
admin = web.Application(loop=app.loop)
app[app_key] = admin
loader = jinja2.FileSystemLoader([TEMPLATES_ROOT, ])
aiohttp_jinja2.setup(admin, loader=loader, app_key=TEMPLATE_APP_KEY)
if title:
schema.title = title
resources = [
init(db, info['table'], url=info['url'])
for init, info in schema.resources
]
admin_handler = AdminOnRestHandler(
admin,
resources=resources,
loop=app.loop,
schema=schema,
)
admin['admin_handler'] = admin_handler
setup_admin_on_rest_handlers(admin, admin_handler)
return admin
|
[
"def",
"_setup",
"(",
"app",
",",
"*",
",",
"schema",
",",
"title",
"=",
"None",
",",
"app_key",
"=",
"APP_KEY",
",",
"db",
"=",
"None",
")",
":",
"admin",
"=",
"web",
".",
"Application",
"(",
"loop",
"=",
"app",
".",
"loop",
")",
"app",
"[",
"app_key",
"]",
"=",
"admin",
"loader",
"=",
"jinja2",
".",
"FileSystemLoader",
"(",
"[",
"TEMPLATES_ROOT",
",",
"]",
")",
"aiohttp_jinja2",
".",
"setup",
"(",
"admin",
",",
"loader",
"=",
"loader",
",",
"app_key",
"=",
"TEMPLATE_APP_KEY",
")",
"if",
"title",
":",
"schema",
".",
"title",
"=",
"title",
"resources",
"=",
"[",
"init",
"(",
"db",
",",
"info",
"[",
"'table'",
"]",
",",
"url",
"=",
"info",
"[",
"'url'",
"]",
")",
"for",
"init",
",",
"info",
"in",
"schema",
".",
"resources",
"]",
"admin_handler",
"=",
"AdminOnRestHandler",
"(",
"admin",
",",
"resources",
"=",
"resources",
",",
"loop",
"=",
"app",
".",
"loop",
",",
"schema",
"=",
"schema",
",",
")",
"admin",
"[",
"'admin_handler'",
"]",
"=",
"admin_handler",
"setup_admin_on_rest_handlers",
"(",
"admin",
",",
"admin_handler",
")",
"return",
"admin"
] |
Initialize the admin-on-rest admin
|
[
"Initialize",
"the",
"admin",
"-",
"on",
"-",
"rest",
"admin"
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/__init__.py#L44-L70
|
13,215
|
aio-libs/aiohttp_admin
|
aiohttp_admin/contrib/models.py
|
ModelAdmin.to_dict
|
def to_dict(self):
"""
Return dict with the all base information about the instance.
"""
data = {
"name": self.name,
"canEdit": self.can_edit,
"canCreate": self.can_create,
"canDelete": self.can_delete,
"perPage": self.per_page,
"showPage": self.generate_data_for_show_page(),
"editPage": self.generate_data_for_edit_page(),
"createPage": self.generate_data_for_create_page(),
}
return data
|
python
|
def to_dict(self):
"""
Return dict with the all base information about the instance.
"""
data = {
"name": self.name,
"canEdit": self.can_edit,
"canCreate": self.can_create,
"canDelete": self.can_delete,
"perPage": self.per_page,
"showPage": self.generate_data_for_show_page(),
"editPage": self.generate_data_for_edit_page(),
"createPage": self.generate_data_for_create_page(),
}
return data
|
[
"def",
"to_dict",
"(",
"self",
")",
":",
"data",
"=",
"{",
"\"name\"",
":",
"self",
".",
"name",
",",
"\"canEdit\"",
":",
"self",
".",
"can_edit",
",",
"\"canCreate\"",
":",
"self",
".",
"can_create",
",",
"\"canDelete\"",
":",
"self",
".",
"can_delete",
",",
"\"perPage\"",
":",
"self",
".",
"per_page",
",",
"\"showPage\"",
":",
"self",
".",
"generate_data_for_show_page",
"(",
")",
",",
"\"editPage\"",
":",
"self",
".",
"generate_data_for_edit_page",
"(",
")",
",",
"\"createPage\"",
":",
"self",
".",
"generate_data_for_create_page",
"(",
")",
",",
"}",
"return",
"data"
] |
Return dict with the all base information about the instance.
|
[
"Return",
"dict",
"with",
"the",
"all",
"base",
"information",
"about",
"the",
"instance",
"."
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/contrib/models.py#L30-L45
|
13,216
|
aio-libs/aiohttp_admin
|
aiohttp_admin/contrib/models.py
|
ModelAdmin.generate_data_for_edit_page
|
def generate_data_for_edit_page(self):
"""
Generate a custom representation of table's fields in dictionary type
if exist edit form else use default representation.
:return: dict
"""
if not self.can_edit:
return {}
if self.edit_form:
return self.edit_form.to_dict()
return self.generate_simple_data_page()
|
python
|
def generate_data_for_edit_page(self):
"""
Generate a custom representation of table's fields in dictionary type
if exist edit form else use default representation.
:return: dict
"""
if not self.can_edit:
return {}
if self.edit_form:
return self.edit_form.to_dict()
return self.generate_simple_data_page()
|
[
"def",
"generate_data_for_edit_page",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"can_edit",
":",
"return",
"{",
"}",
"if",
"self",
".",
"edit_form",
":",
"return",
"self",
".",
"edit_form",
".",
"to_dict",
"(",
")",
"return",
"self",
".",
"generate_simple_data_page",
"(",
")"
] |
Generate a custom representation of table's fields in dictionary type
if exist edit form else use default representation.
:return: dict
|
[
"Generate",
"a",
"custom",
"representation",
"of",
"table",
"s",
"fields",
"in",
"dictionary",
"type",
"if",
"exist",
"edit",
"form",
"else",
"use",
"default",
"representation",
"."
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/contrib/models.py#L55-L69
|
13,217
|
aio-libs/aiohttp_admin
|
aiohttp_admin/contrib/models.py
|
ModelAdmin.generate_data_for_create_page
|
def generate_data_for_create_page(self):
"""
Generate a custom representation of table's fields in dictionary type
if exist create form else use default representation.
:return: dict
"""
if not self.can_create:
return {}
if self.create_form:
return self.create_form.to_dict()
return self.generate_simple_data_page()
|
python
|
def generate_data_for_create_page(self):
"""
Generate a custom representation of table's fields in dictionary type
if exist create form else use default representation.
:return: dict
"""
if not self.can_create:
return {}
if self.create_form:
return self.create_form.to_dict()
return self.generate_simple_data_page()
|
[
"def",
"generate_data_for_create_page",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"can_create",
":",
"return",
"{",
"}",
"if",
"self",
".",
"create_form",
":",
"return",
"self",
".",
"create_form",
".",
"to_dict",
"(",
")",
"return",
"self",
".",
"generate_simple_data_page",
"(",
")"
] |
Generate a custom representation of table's fields in dictionary type
if exist create form else use default representation.
:return: dict
|
[
"Generate",
"a",
"custom",
"representation",
"of",
"table",
"s",
"fields",
"in",
"dictionary",
"type",
"if",
"exist",
"create",
"form",
"else",
"use",
"default",
"representation",
"."
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/aiohttp_admin/contrib/models.py#L83-L96
|
13,218
|
aio-libs/aiohttp_admin
|
demos/motortwit/motortwit/views.py
|
SiteHandler.register
|
async def register(self, request):
"""Registers the user."""
session = await get_session(request)
user_id = session.get('user_id')
if user_id:
return redirect(request, 'timeline')
error = None
form = None
if request.method == 'POST':
form = await request.post()
user_id = await db.get_user_id(self.mongo.user, form['username'])
if not form['username']:
error = 'You have to enter a username'
elif not form['email'] or '@' not in form['email']:
error = 'You have to enter a valid email address'
elif not form['password']:
error = 'You have to enter a password'
elif form['password'] != form['password2']:
error = 'The two passwords do not match'
elif user_id is not None:
error = 'The username is already taken'
else:
await self.mongo.user.insert(
{'username': form['username'],
'email': form['email'],
'pw_hash': generate_password_hash(form['password'])})
return redirect(request, 'login')
return {"error": error, "form": form}
|
python
|
async def register(self, request):
"""Registers the user."""
session = await get_session(request)
user_id = session.get('user_id')
if user_id:
return redirect(request, 'timeline')
error = None
form = None
if request.method == 'POST':
form = await request.post()
user_id = await db.get_user_id(self.mongo.user, form['username'])
if not form['username']:
error = 'You have to enter a username'
elif not form['email'] or '@' not in form['email']:
error = 'You have to enter a valid email address'
elif not form['password']:
error = 'You have to enter a password'
elif form['password'] != form['password2']:
error = 'The two passwords do not match'
elif user_id is not None:
error = 'The username is already taken'
else:
await self.mongo.user.insert(
{'username': form['username'],
'email': form['email'],
'pw_hash': generate_password_hash(form['password'])})
return redirect(request, 'login')
return {"error": error, "form": form}
|
[
"async",
"def",
"register",
"(",
"self",
",",
"request",
")",
":",
"session",
"=",
"await",
"get_session",
"(",
"request",
")",
"user_id",
"=",
"session",
".",
"get",
"(",
"'user_id'",
")",
"if",
"user_id",
":",
"return",
"redirect",
"(",
"request",
",",
"'timeline'",
")",
"error",
"=",
"None",
"form",
"=",
"None",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"form",
"=",
"await",
"request",
".",
"post",
"(",
")",
"user_id",
"=",
"await",
"db",
".",
"get_user_id",
"(",
"self",
".",
"mongo",
".",
"user",
",",
"form",
"[",
"'username'",
"]",
")",
"if",
"not",
"form",
"[",
"'username'",
"]",
":",
"error",
"=",
"'You have to enter a username'",
"elif",
"not",
"form",
"[",
"'email'",
"]",
"or",
"'@'",
"not",
"in",
"form",
"[",
"'email'",
"]",
":",
"error",
"=",
"'You have to enter a valid email address'",
"elif",
"not",
"form",
"[",
"'password'",
"]",
":",
"error",
"=",
"'You have to enter a password'",
"elif",
"form",
"[",
"'password'",
"]",
"!=",
"form",
"[",
"'password2'",
"]",
":",
"error",
"=",
"'The two passwords do not match'",
"elif",
"user_id",
"is",
"not",
"None",
":",
"error",
"=",
"'The username is already taken'",
"else",
":",
"await",
"self",
".",
"mongo",
".",
"user",
".",
"insert",
"(",
"{",
"'username'",
":",
"form",
"[",
"'username'",
"]",
",",
"'email'",
":",
"form",
"[",
"'email'",
"]",
",",
"'pw_hash'",
":",
"generate_password_hash",
"(",
"form",
"[",
"'password'",
"]",
")",
"}",
")",
"return",
"redirect",
"(",
"request",
",",
"'login'",
")",
"return",
"{",
"\"error\"",
":",
"error",
",",
"\"form\"",
":",
"form",
"}"
] |
Registers the user.
|
[
"Registers",
"the",
"user",
"."
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/demos/motortwit/motortwit/views.py#L116-L145
|
13,219
|
aio-libs/aiohttp_admin
|
demos/motortwit/motortwit/views.py
|
SiteHandler.follow_user
|
async def follow_user(self, request):
"""Adds the current user as follower of the given user."""
username = request.match_info['username']
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
whom_id = await db.get_user_id(self.mongo.user, username)
if whom_id is None:
raise web.HTTPFound()
await self.mongo.follower.update(
{'who_id': ObjectId(user_id)},
{'$push': {'whom_id': whom_id}}, upsert=True)
return redirect(request, 'user_timeline', parts={"username": username})
|
python
|
async def follow_user(self, request):
"""Adds the current user as follower of the given user."""
username = request.match_info['username']
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
whom_id = await db.get_user_id(self.mongo.user, username)
if whom_id is None:
raise web.HTTPFound()
await self.mongo.follower.update(
{'who_id': ObjectId(user_id)},
{'$push': {'whom_id': whom_id}}, upsert=True)
return redirect(request, 'user_timeline', parts={"username": username})
|
[
"async",
"def",
"follow_user",
"(",
"self",
",",
"request",
")",
":",
"username",
"=",
"request",
".",
"match_info",
"[",
"'username'",
"]",
"session",
"=",
"await",
"get_session",
"(",
"request",
")",
"user_id",
"=",
"session",
".",
"get",
"(",
"'user_id'",
")",
"if",
"not",
"user_id",
":",
"raise",
"web",
".",
"HTTPNotAuthorized",
"(",
")",
"whom_id",
"=",
"await",
"db",
".",
"get_user_id",
"(",
"self",
".",
"mongo",
".",
"user",
",",
"username",
")",
"if",
"whom_id",
"is",
"None",
":",
"raise",
"web",
".",
"HTTPFound",
"(",
")",
"await",
"self",
".",
"mongo",
".",
"follower",
".",
"update",
"(",
"{",
"'who_id'",
":",
"ObjectId",
"(",
"user_id",
")",
"}",
",",
"{",
"'$push'",
":",
"{",
"'whom_id'",
":",
"whom_id",
"}",
"}",
",",
"upsert",
"=",
"True",
")",
"return",
"redirect",
"(",
"request",
",",
"'user_timeline'",
",",
"parts",
"=",
"{",
"\"username\"",
":",
"username",
"}",
")"
] |
Adds the current user as follower of the given user.
|
[
"Adds",
"the",
"current",
"user",
"as",
"follower",
"of",
"the",
"given",
"user",
"."
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/demos/motortwit/motortwit/views.py#L147-L165
|
13,220
|
aio-libs/aiohttp_admin
|
demos/motortwit/motortwit/views.py
|
SiteHandler.add_message
|
async def add_message(self, request):
"""Registers a new message for the user."""
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
form = await request.post()
if form.get('text'):
user = await self.mongo.user.find_one(
{'_id': ObjectId(session['user_id'])},
{'email': 1, 'username': 1})
await self.mongo.message.insert(
{'author_id': ObjectId(user_id),
'email': user['email'],
'username': user['username'],
'text': form['text'],
'pub_date': datetime.datetime.utcnow()})
return redirect(request, 'timeline')
|
python
|
async def add_message(self, request):
"""Registers a new message for the user."""
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
form = await request.post()
if form.get('text'):
user = await self.mongo.user.find_one(
{'_id': ObjectId(session['user_id'])},
{'email': 1, 'username': 1})
await self.mongo.message.insert(
{'author_id': ObjectId(user_id),
'email': user['email'],
'username': user['username'],
'text': form['text'],
'pub_date': datetime.datetime.utcnow()})
return redirect(request, 'timeline')
|
[
"async",
"def",
"add_message",
"(",
"self",
",",
"request",
")",
":",
"session",
"=",
"await",
"get_session",
"(",
"request",
")",
"user_id",
"=",
"session",
".",
"get",
"(",
"'user_id'",
")",
"if",
"not",
"user_id",
":",
"raise",
"web",
".",
"HTTPNotAuthorized",
"(",
")",
"form",
"=",
"await",
"request",
".",
"post",
"(",
")",
"if",
"form",
".",
"get",
"(",
"'text'",
")",
":",
"user",
"=",
"await",
"self",
".",
"mongo",
".",
"user",
".",
"find_one",
"(",
"{",
"'_id'",
":",
"ObjectId",
"(",
"session",
"[",
"'user_id'",
"]",
")",
"}",
",",
"{",
"'email'",
":",
"1",
",",
"'username'",
":",
"1",
"}",
")",
"await",
"self",
".",
"mongo",
".",
"message",
".",
"insert",
"(",
"{",
"'author_id'",
":",
"ObjectId",
"(",
"user_id",
")",
",",
"'email'",
":",
"user",
"[",
"'email'",
"]",
",",
"'username'",
":",
"user",
"[",
"'username'",
"]",
",",
"'text'",
":",
"form",
"[",
"'text'",
"]",
",",
"'pub_date'",
":",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"}",
")",
"return",
"redirect",
"(",
"request",
",",
"'timeline'",
")"
] |
Registers a new message for the user.
|
[
"Registers",
"a",
"new",
"message",
"for",
"the",
"user",
"."
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/demos/motortwit/motortwit/views.py#L184-L203
|
13,221
|
aio-libs/aiohttp_admin
|
demos/motortwit/motortwit/utils.py
|
robo_avatar_url
|
def robo_avatar_url(user_data, size=80):
"""Return the gravatar image for the given email address."""
hash = md5(str(user_data).strip().lower().encode('utf-8')).hexdigest()
url = "https://robohash.org/{hash}.png?size={size}x{size}".format(
hash=hash, size=size)
return url
|
python
|
def robo_avatar_url(user_data, size=80):
"""Return the gravatar image for the given email address."""
hash = md5(str(user_data).strip().lower().encode('utf-8')).hexdigest()
url = "https://robohash.org/{hash}.png?size={size}x{size}".format(
hash=hash, size=size)
return url
|
[
"def",
"robo_avatar_url",
"(",
"user_data",
",",
"size",
"=",
"80",
")",
":",
"hash",
"=",
"md5",
"(",
"str",
"(",
"user_data",
")",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"url",
"=",
"\"https://robohash.org/{hash}.png?size={size}x{size}\"",
".",
"format",
"(",
"hash",
"=",
"hash",
",",
"size",
"=",
"size",
")",
"return",
"url"
] |
Return the gravatar image for the given email address.
|
[
"Return",
"the",
"gravatar",
"image",
"for",
"the",
"given",
"email",
"address",
"."
] |
82e5032ef14ae8cc3c594fdd45d6c977aab1baad
|
https://github.com/aio-libs/aiohttp_admin/blob/82e5032ef14ae8cc3c594fdd45d6c977aab1baad/demos/motortwit/motortwit/utils.py#L27-L32
|
13,222
|
ponty/PyVirtualDisplay
|
pyvirtualdisplay/smartdisplay.py
|
SmartDisplay.waitgrab
|
def waitgrab(self, timeout=60, autocrop=True, cb_imgcheck=None):
'''start process and create screenshot.
Repeat screenshot until it is not empty and
cb_imgcheck callback function returns True
for current screenshot.
:param autocrop: True -> crop screenshot
:param timeout: int
:param cb_imgcheck: None or callback for testing img,
True = accept img,
False = reject img
'''
t = 0
sleep_time = 0.3 # for fast windows
repeat_time = 1
while 1:
log.debug('sleeping %s secs' % str(sleep_time))
time.sleep(sleep_time)
t += sleep_time
img = self.grab(autocrop=autocrop)
if img:
if not cb_imgcheck:
break
if cb_imgcheck(img):
break
sleep_time = repeat_time
repeat_time += 1 # progressive
if t > timeout:
msg = 'Timeout! elapsed time:%s timeout:%s ' % (t, timeout)
raise DisplayTimeoutError(msg)
break
log.debug('screenshot is empty, next try..')
assert img
# if not img:
# log.debug('screenshot is empty!')
return img
|
python
|
def waitgrab(self, timeout=60, autocrop=True, cb_imgcheck=None):
'''start process and create screenshot.
Repeat screenshot until it is not empty and
cb_imgcheck callback function returns True
for current screenshot.
:param autocrop: True -> crop screenshot
:param timeout: int
:param cb_imgcheck: None or callback for testing img,
True = accept img,
False = reject img
'''
t = 0
sleep_time = 0.3 # for fast windows
repeat_time = 1
while 1:
log.debug('sleeping %s secs' % str(sleep_time))
time.sleep(sleep_time)
t += sleep_time
img = self.grab(autocrop=autocrop)
if img:
if not cb_imgcheck:
break
if cb_imgcheck(img):
break
sleep_time = repeat_time
repeat_time += 1 # progressive
if t > timeout:
msg = 'Timeout! elapsed time:%s timeout:%s ' % (t, timeout)
raise DisplayTimeoutError(msg)
break
log.debug('screenshot is empty, next try..')
assert img
# if not img:
# log.debug('screenshot is empty!')
return img
|
[
"def",
"waitgrab",
"(",
"self",
",",
"timeout",
"=",
"60",
",",
"autocrop",
"=",
"True",
",",
"cb_imgcheck",
"=",
"None",
")",
":",
"t",
"=",
"0",
"sleep_time",
"=",
"0.3",
"# for fast windows",
"repeat_time",
"=",
"1",
"while",
"1",
":",
"log",
".",
"debug",
"(",
"'sleeping %s secs'",
"%",
"str",
"(",
"sleep_time",
")",
")",
"time",
".",
"sleep",
"(",
"sleep_time",
")",
"t",
"+=",
"sleep_time",
"img",
"=",
"self",
".",
"grab",
"(",
"autocrop",
"=",
"autocrop",
")",
"if",
"img",
":",
"if",
"not",
"cb_imgcheck",
":",
"break",
"if",
"cb_imgcheck",
"(",
"img",
")",
":",
"break",
"sleep_time",
"=",
"repeat_time",
"repeat_time",
"+=",
"1",
"# progressive",
"if",
"t",
">",
"timeout",
":",
"msg",
"=",
"'Timeout! elapsed time:%s timeout:%s '",
"%",
"(",
"t",
",",
"timeout",
")",
"raise",
"DisplayTimeoutError",
"(",
"msg",
")",
"break",
"log",
".",
"debug",
"(",
"'screenshot is empty, next try..'",
")",
"assert",
"img",
"# if not img:",
"# log.debug('screenshot is empty!')",
"return",
"img"
] |
start process and create screenshot.
Repeat screenshot until it is not empty and
cb_imgcheck callback function returns True
for current screenshot.
:param autocrop: True -> crop screenshot
:param timeout: int
:param cb_imgcheck: None or callback for testing img,
True = accept img,
False = reject img
|
[
"start",
"process",
"and",
"create",
"screenshot",
".",
"Repeat",
"screenshot",
"until",
"it",
"is",
"not",
"empty",
"and",
"cb_imgcheck",
"callback",
"function",
"returns",
"True",
"for",
"current",
"screenshot",
"."
] |
903841f5ef13bf162be6fdd22daa5c349af45d67
|
https://github.com/ponty/PyVirtualDisplay/blob/903841f5ef13bf162be6fdd22daa5c349af45d67/pyvirtualdisplay/smartdisplay.py#L54-L90
|
13,223
|
ponty/PyVirtualDisplay
|
pyvirtualdisplay/abstractdisplay.py
|
AbstractDisplay._setup_xauth
|
def _setup_xauth(self):
'''
Set up the Xauthority file and the XAUTHORITY environment variable.
'''
handle, filename = tempfile.mkstemp(prefix='PyVirtualDisplay.',
suffix='.Xauthority')
self._xauth_filename = filename
os.close(handle)
# Save old environment
self._old_xauth = {}
self._old_xauth['AUTHFILE'] = os.getenv('AUTHFILE')
self._old_xauth['XAUTHORITY'] = os.getenv('XAUTHORITY')
os.environ['AUTHFILE'] = os.environ['XAUTHORITY'] = filename
cookie = xauth.generate_mcookie()
xauth.call('add', self.new_display_var, '.', cookie)
|
python
|
def _setup_xauth(self):
'''
Set up the Xauthority file and the XAUTHORITY environment variable.
'''
handle, filename = tempfile.mkstemp(prefix='PyVirtualDisplay.',
suffix='.Xauthority')
self._xauth_filename = filename
os.close(handle)
# Save old environment
self._old_xauth = {}
self._old_xauth['AUTHFILE'] = os.getenv('AUTHFILE')
self._old_xauth['XAUTHORITY'] = os.getenv('XAUTHORITY')
os.environ['AUTHFILE'] = os.environ['XAUTHORITY'] = filename
cookie = xauth.generate_mcookie()
xauth.call('add', self.new_display_var, '.', cookie)
|
[
"def",
"_setup_xauth",
"(",
"self",
")",
":",
"handle",
",",
"filename",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"'PyVirtualDisplay.'",
",",
"suffix",
"=",
"'.Xauthority'",
")",
"self",
".",
"_xauth_filename",
"=",
"filename",
"os",
".",
"close",
"(",
"handle",
")",
"# Save old environment",
"self",
".",
"_old_xauth",
"=",
"{",
"}",
"self",
".",
"_old_xauth",
"[",
"'AUTHFILE'",
"]",
"=",
"os",
".",
"getenv",
"(",
"'AUTHFILE'",
")",
"self",
".",
"_old_xauth",
"[",
"'XAUTHORITY'",
"]",
"=",
"os",
".",
"getenv",
"(",
"'XAUTHORITY'",
")",
"os",
".",
"environ",
"[",
"'AUTHFILE'",
"]",
"=",
"os",
".",
"environ",
"[",
"'XAUTHORITY'",
"]",
"=",
"filename",
"cookie",
"=",
"xauth",
".",
"generate_mcookie",
"(",
")",
"xauth",
".",
"call",
"(",
"'add'",
",",
"self",
".",
"new_display_var",
",",
"'.'",
",",
"cookie",
")"
] |
Set up the Xauthority file and the XAUTHORITY environment variable.
|
[
"Set",
"up",
"the",
"Xauthority",
"file",
"and",
"the",
"XAUTHORITY",
"environment",
"variable",
"."
] |
903841f5ef13bf162be6fdd22daa5c349af45d67
|
https://github.com/ponty/PyVirtualDisplay/blob/903841f5ef13bf162be6fdd22daa5c349af45d67/pyvirtualdisplay/abstractdisplay.py#L154-L169
|
13,224
|
ponty/PyVirtualDisplay
|
pyvirtualdisplay/abstractdisplay.py
|
AbstractDisplay._clear_xauth
|
def _clear_xauth(self):
'''
Clear the Xauthority file and restore the environment variables.
'''
os.remove(self._xauth_filename)
for varname in ['AUTHFILE', 'XAUTHORITY']:
if self._old_xauth[varname] is None:
del os.environ[varname]
else:
os.environ[varname] = self._old_xauth[varname]
self._old_xauth = None
|
python
|
def _clear_xauth(self):
'''
Clear the Xauthority file and restore the environment variables.
'''
os.remove(self._xauth_filename)
for varname in ['AUTHFILE', 'XAUTHORITY']:
if self._old_xauth[varname] is None:
del os.environ[varname]
else:
os.environ[varname] = self._old_xauth[varname]
self._old_xauth = None
|
[
"def",
"_clear_xauth",
"(",
"self",
")",
":",
"os",
".",
"remove",
"(",
"self",
".",
"_xauth_filename",
")",
"for",
"varname",
"in",
"[",
"'AUTHFILE'",
",",
"'XAUTHORITY'",
"]",
":",
"if",
"self",
".",
"_old_xauth",
"[",
"varname",
"]",
"is",
"None",
":",
"del",
"os",
".",
"environ",
"[",
"varname",
"]",
"else",
":",
"os",
".",
"environ",
"[",
"varname",
"]",
"=",
"self",
".",
"_old_xauth",
"[",
"varname",
"]",
"self",
".",
"_old_xauth",
"=",
"None"
] |
Clear the Xauthority file and restore the environment variables.
|
[
"Clear",
"the",
"Xauthority",
"file",
"and",
"restore",
"the",
"environment",
"variables",
"."
] |
903841f5ef13bf162be6fdd22daa5c349af45d67
|
https://github.com/ponty/PyVirtualDisplay/blob/903841f5ef13bf162be6fdd22daa5c349af45d67/pyvirtualdisplay/abstractdisplay.py#L171-L181
|
13,225
|
jasonrollins/shareplum
|
shareplum/shareplum.py
|
Office365.GetCookies
|
def GetCookies(self):
"""
Grabs the cookies form your Office Sharepoint site
and uses it as Authentication for the rest of the calls
"""
sectoken = self.GetSecurityToken(self.Username, self.Password)
url = self.share_point_site+ '/_forms/default.aspx?wa=wsignin1.0'
response = requests.post(url, data=sectoken)
return response.cookies
|
python
|
def GetCookies(self):
"""
Grabs the cookies form your Office Sharepoint site
and uses it as Authentication for the rest of the calls
"""
sectoken = self.GetSecurityToken(self.Username, self.Password)
url = self.share_point_site+ '/_forms/default.aspx?wa=wsignin1.0'
response = requests.post(url, data=sectoken)
return response.cookies
|
[
"def",
"GetCookies",
"(",
"self",
")",
":",
"sectoken",
"=",
"self",
".",
"GetSecurityToken",
"(",
"self",
".",
"Username",
",",
"self",
".",
"Password",
")",
"url",
"=",
"self",
".",
"share_point_site",
"+",
"'/_forms/default.aspx?wa=wsignin1.0'",
"response",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"data",
"=",
"sectoken",
")",
"return",
"response",
".",
"cookies"
] |
Grabs the cookies form your Office Sharepoint site
and uses it as Authentication for the rest of the calls
|
[
"Grabs",
"the",
"cookies",
"form",
"your",
"Office",
"Sharepoint",
"site",
"and",
"uses",
"it",
"as",
"Authentication",
"for",
"the",
"rest",
"of",
"the",
"calls"
] |
404f320808912619920e2d787f2c4387225a14e0
|
https://github.com/jasonrollins/shareplum/blob/404f320808912619920e2d787f2c4387225a14e0/shareplum/shareplum.py#L70-L78
|
13,226
|
jasonrollins/shareplum
|
shareplum/shareplum.py
|
Site.DeleteList
|
def DeleteList(self, listName):
"""Delete a List with given name"""
# Build Request
soap_request = soap('DeleteList')
soap_request.add_parameter('listName', listName)
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('Lists'),
headers=self._headers('DeleteList'),
data=str(soap_request),
verify=self._verify_ssl,
timeout=self.timeout)
# Parse Request
if response == 200:
return response.text
else:
return response
|
python
|
def DeleteList(self, listName):
"""Delete a List with given name"""
# Build Request
soap_request = soap('DeleteList')
soap_request.add_parameter('listName', listName)
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('Lists'),
headers=self._headers('DeleteList'),
data=str(soap_request),
verify=self._verify_ssl,
timeout=self.timeout)
# Parse Request
if response == 200:
return response.text
else:
return response
|
[
"def",
"DeleteList",
"(",
"self",
",",
"listName",
")",
":",
"# Build Request",
"soap_request",
"=",
"soap",
"(",
"'DeleteList'",
")",
"soap_request",
".",
"add_parameter",
"(",
"'listName'",
",",
"listName",
")",
"self",
".",
"last_request",
"=",
"str",
"(",
"soap_request",
")",
"# Send Request",
"response",
"=",
"self",
".",
"_session",
".",
"post",
"(",
"url",
"=",
"self",
".",
"_url",
"(",
"'Lists'",
")",
",",
"headers",
"=",
"self",
".",
"_headers",
"(",
"'DeleteList'",
")",
",",
"data",
"=",
"str",
"(",
"soap_request",
")",
",",
"verify",
"=",
"self",
".",
"_verify_ssl",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"# Parse Request",
"if",
"response",
"==",
"200",
":",
"return",
"response",
".",
"text",
"else",
":",
"return",
"response"
] |
Delete a List with given name
|
[
"Delete",
"a",
"List",
"with",
"given",
"name"
] |
404f320808912619920e2d787f2c4387225a14e0
|
https://github.com/jasonrollins/shareplum/blob/404f320808912619920e2d787f2c4387225a14e0/shareplum/shareplum.py#L207-L226
|
13,227
|
jasonrollins/shareplum
|
shareplum/shareplum.py
|
Site.GetListCollection
|
def GetListCollection(self):
"""Returns List information for current Site"""
# Build Request
soap_request = soap('GetListCollection')
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('SiteData'),
headers=self._headers('GetListCollection'),
data=str(soap_request),
verify=self._verify_ssl,
timeout=self.timeout)
# Parse Response
if response.status_code == 200:
envelope = etree.fromstring(response.text.encode('utf-8'), parser=etree.XMLParser(huge_tree=self.huge_tree))
result = envelope[0][0][0].text
lists = envelope[0][0][1]
data = []
for _list in lists:
_list_data = {}
for item in _list:
key = item.tag.replace('{http://schemas.microsoft.com/sharepoint/soap/}', '')
value = item.text
_list_data[key] = value
data.append(_list_data)
return data
else:
return response
|
python
|
def GetListCollection(self):
"""Returns List information for current Site"""
# Build Request
soap_request = soap('GetListCollection')
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('SiteData'),
headers=self._headers('GetListCollection'),
data=str(soap_request),
verify=self._verify_ssl,
timeout=self.timeout)
# Parse Response
if response.status_code == 200:
envelope = etree.fromstring(response.text.encode('utf-8'), parser=etree.XMLParser(huge_tree=self.huge_tree))
result = envelope[0][0][0].text
lists = envelope[0][0][1]
data = []
for _list in lists:
_list_data = {}
for item in _list:
key = item.tag.replace('{http://schemas.microsoft.com/sharepoint/soap/}', '')
value = item.text
_list_data[key] = value
data.append(_list_data)
return data
else:
return response
|
[
"def",
"GetListCollection",
"(",
"self",
")",
":",
"# Build Request",
"soap_request",
"=",
"soap",
"(",
"'GetListCollection'",
")",
"self",
".",
"last_request",
"=",
"str",
"(",
"soap_request",
")",
"# Send Request",
"response",
"=",
"self",
".",
"_session",
".",
"post",
"(",
"url",
"=",
"self",
".",
"_url",
"(",
"'SiteData'",
")",
",",
"headers",
"=",
"self",
".",
"_headers",
"(",
"'GetListCollection'",
")",
",",
"data",
"=",
"str",
"(",
"soap_request",
")",
",",
"verify",
"=",
"self",
".",
"_verify_ssl",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"# Parse Response",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"envelope",
"=",
"etree",
".",
"fromstring",
"(",
"response",
".",
"text",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"parser",
"=",
"etree",
".",
"XMLParser",
"(",
"huge_tree",
"=",
"self",
".",
"huge_tree",
")",
")",
"result",
"=",
"envelope",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"text",
"lists",
"=",
"envelope",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"1",
"]",
"data",
"=",
"[",
"]",
"for",
"_list",
"in",
"lists",
":",
"_list_data",
"=",
"{",
"}",
"for",
"item",
"in",
"_list",
":",
"key",
"=",
"item",
".",
"tag",
".",
"replace",
"(",
"'{http://schemas.microsoft.com/sharepoint/soap/}'",
",",
"''",
")",
"value",
"=",
"item",
".",
"text",
"_list_data",
"[",
"key",
"]",
"=",
"value",
"data",
".",
"append",
"(",
"_list_data",
")",
"return",
"data",
"else",
":",
"return",
"response"
] |
Returns List information for current Site
|
[
"Returns",
"List",
"information",
"for",
"current",
"Site"
] |
404f320808912619920e2d787f2c4387225a14e0
|
https://github.com/jasonrollins/shareplum/blob/404f320808912619920e2d787f2c4387225a14e0/shareplum/shareplum.py#L228-L257
|
13,228
|
jasonrollins/shareplum
|
shareplum/shareplum.py
|
_List._convert_to_internal
|
def _convert_to_internal(self, data):
"""From 'Column Title' to 'Column_x0020_Title'"""
for _dict in data:
keys = list(_dict.keys())[:]
for key in keys:
if key not in self._disp_cols:
raise Exception(key + ' not a column in current List.')
_dict[self._disp_cols[key]['name']] = self._sp_type(key, _dict.pop(key))
|
python
|
def _convert_to_internal(self, data):
"""From 'Column Title' to 'Column_x0020_Title'"""
for _dict in data:
keys = list(_dict.keys())[:]
for key in keys:
if key not in self._disp_cols:
raise Exception(key + ' not a column in current List.')
_dict[self._disp_cols[key]['name']] = self._sp_type(key, _dict.pop(key))
|
[
"def",
"_convert_to_internal",
"(",
"self",
",",
"data",
")",
":",
"for",
"_dict",
"in",
"data",
":",
"keys",
"=",
"list",
"(",
"_dict",
".",
"keys",
"(",
")",
")",
"[",
":",
"]",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
"not",
"in",
"self",
".",
"_disp_cols",
":",
"raise",
"Exception",
"(",
"key",
"+",
"' not a column in current List.'",
")",
"_dict",
"[",
"self",
".",
"_disp_cols",
"[",
"key",
"]",
"[",
"'name'",
"]",
"]",
"=",
"self",
".",
"_sp_type",
"(",
"key",
",",
"_dict",
".",
"pop",
"(",
"key",
")",
")"
] |
From 'Column Title' to 'Column_x0020_Title
|
[
"From",
"Column",
"Title",
"to",
"Column_x0020_Title"
] |
404f320808912619920e2d787f2c4387225a14e0
|
https://github.com/jasonrollins/shareplum/blob/404f320808912619920e2d787f2c4387225a14e0/shareplum/shareplum.py#L358-L365
|
13,229
|
jasonrollins/shareplum
|
shareplum/shareplum.py
|
_List._convert_to_display
|
def _convert_to_display(self, data):
"""From 'Column_x0020_Title' to 'Column Title'"""
for _dict in data:
keys = list(_dict.keys())[:]
for key in keys:
if key not in self._sp_cols:
raise Exception(key + ' not a column in current List.')
_dict[self._sp_cols[key]['name']] = self._python_type(key, _dict.pop(key))
|
python
|
def _convert_to_display(self, data):
"""From 'Column_x0020_Title' to 'Column Title'"""
for _dict in data:
keys = list(_dict.keys())[:]
for key in keys:
if key not in self._sp_cols:
raise Exception(key + ' not a column in current List.')
_dict[self._sp_cols[key]['name']] = self._python_type(key, _dict.pop(key))
|
[
"def",
"_convert_to_display",
"(",
"self",
",",
"data",
")",
":",
"for",
"_dict",
"in",
"data",
":",
"keys",
"=",
"list",
"(",
"_dict",
".",
"keys",
"(",
")",
")",
"[",
":",
"]",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
"not",
"in",
"self",
".",
"_sp_cols",
":",
"raise",
"Exception",
"(",
"key",
"+",
"' not a column in current List.'",
")",
"_dict",
"[",
"self",
".",
"_sp_cols",
"[",
"key",
"]",
"[",
"'name'",
"]",
"]",
"=",
"self",
".",
"_python_type",
"(",
"key",
",",
"_dict",
".",
"pop",
"(",
"key",
")",
")"
] |
From 'Column_x0020_Title' to 'Column Title
|
[
"From",
"Column_x0020_Title",
"to",
"Column",
"Title"
] |
404f320808912619920e2d787f2c4387225a14e0
|
https://github.com/jasonrollins/shareplum/blob/404f320808912619920e2d787f2c4387225a14e0/shareplum/shareplum.py#L367-L374
|
13,230
|
jasonrollins/shareplum
|
shareplum/shareplum.py
|
_List.GetView
|
def GetView(self, viewname):
"""Get Info on View Name
"""
# Build Request
soap_request = soap('GetView')
soap_request.add_parameter('listName', self.listName)
if viewname == None:
views = self.GetViewCollection()
for view in views:
if 'DefaultView' in view:
if views[view]['DefaultView'] == 'TRUE':
viewname = view
break
if self.listName not in ['UserInfo', 'User Information List']:
soap_request.add_parameter('viewName', self.views[viewname]['Name'][1:-1])
else:
soap_request.add_parameter('viewName', viewname)
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('Views'),
headers=self._headers('GetView'),
data=str(soap_request),
verify=self._verify_ssl,
timeout=self.timeout)
# Parse Response
if response.status_code == 200:
envelope = etree.fromstring(response.text.encode('utf-8'), parser=etree.XMLParser(huge_tree=self.huge_tree))
view = envelope[0][0][0][0]
info = {key: value for (key, value) in view.items()}
fields = [x.items()[0][1] for x in view[1]]
return {'info': info, 'fields': fields}
else:
raise Exception("ERROR:", response.status_code, response.text)
|
python
|
def GetView(self, viewname):
"""Get Info on View Name
"""
# Build Request
soap_request = soap('GetView')
soap_request.add_parameter('listName', self.listName)
if viewname == None:
views = self.GetViewCollection()
for view in views:
if 'DefaultView' in view:
if views[view]['DefaultView'] == 'TRUE':
viewname = view
break
if self.listName not in ['UserInfo', 'User Information List']:
soap_request.add_parameter('viewName', self.views[viewname]['Name'][1:-1])
else:
soap_request.add_parameter('viewName', viewname)
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('Views'),
headers=self._headers('GetView'),
data=str(soap_request),
verify=self._verify_ssl,
timeout=self.timeout)
# Parse Response
if response.status_code == 200:
envelope = etree.fromstring(response.text.encode('utf-8'), parser=etree.XMLParser(huge_tree=self.huge_tree))
view = envelope[0][0][0][0]
info = {key: value for (key, value) in view.items()}
fields = [x.items()[0][1] for x in view[1]]
return {'info': info, 'fields': fields}
else:
raise Exception("ERROR:", response.status_code, response.text)
|
[
"def",
"GetView",
"(",
"self",
",",
"viewname",
")",
":",
"# Build Request",
"soap_request",
"=",
"soap",
"(",
"'GetView'",
")",
"soap_request",
".",
"add_parameter",
"(",
"'listName'",
",",
"self",
".",
"listName",
")",
"if",
"viewname",
"==",
"None",
":",
"views",
"=",
"self",
".",
"GetViewCollection",
"(",
")",
"for",
"view",
"in",
"views",
":",
"if",
"'DefaultView'",
"in",
"view",
":",
"if",
"views",
"[",
"view",
"]",
"[",
"'DefaultView'",
"]",
"==",
"'TRUE'",
":",
"viewname",
"=",
"view",
"break",
"if",
"self",
".",
"listName",
"not",
"in",
"[",
"'UserInfo'",
",",
"'User Information List'",
"]",
":",
"soap_request",
".",
"add_parameter",
"(",
"'viewName'",
",",
"self",
".",
"views",
"[",
"viewname",
"]",
"[",
"'Name'",
"]",
"[",
"1",
":",
"-",
"1",
"]",
")",
"else",
":",
"soap_request",
".",
"add_parameter",
"(",
"'viewName'",
",",
"viewname",
")",
"self",
".",
"last_request",
"=",
"str",
"(",
"soap_request",
")",
"# Send Request",
"response",
"=",
"self",
".",
"_session",
".",
"post",
"(",
"url",
"=",
"self",
".",
"_url",
"(",
"'Views'",
")",
",",
"headers",
"=",
"self",
".",
"_headers",
"(",
"'GetView'",
")",
",",
"data",
"=",
"str",
"(",
"soap_request",
")",
",",
"verify",
"=",
"self",
".",
"_verify_ssl",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"# Parse Response",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"envelope",
"=",
"etree",
".",
"fromstring",
"(",
"response",
".",
"text",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"parser",
"=",
"etree",
".",
"XMLParser",
"(",
"huge_tree",
"=",
"self",
".",
"huge_tree",
")",
")",
"view",
"=",
"envelope",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"info",
"=",
"{",
"key",
":",
"value",
"for",
"(",
"key",
",",
"value",
")",
"in",
"view",
".",
"items",
"(",
")",
"}",
"fields",
"=",
"[",
"x",
".",
"items",
"(",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"for",
"x",
"in",
"view",
"[",
"1",
"]",
"]",
"return",
"{",
"'info'",
":",
"info",
",",
"'fields'",
":",
"fields",
"}",
"else",
":",
"raise",
"Exception",
"(",
"\"ERROR:\"",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")"
] |
Get Info on View Name
|
[
"Get",
"Info",
"on",
"View",
"Name"
] |
404f320808912619920e2d787f2c4387225a14e0
|
https://github.com/jasonrollins/shareplum/blob/404f320808912619920e2d787f2c4387225a14e0/shareplum/shareplum.py#L562-L600
|
13,231
|
jasonrollins/shareplum
|
shareplum/shareplum.py
|
_List.UpdateListItems
|
def UpdateListItems(self, data, kind):
"""Update List Items
kind = 'New', 'Update', or 'Delete'
New:
Provide data like so:
data = [{'Title': 'New Title', 'Col1': 'New Value'}]
Update:
Provide data like so:
data = [{'ID': 23, 'Title': 'Updated Title'},
{'ID': 28, 'Col1': 'Updated Value'}]
Delete:
Just provied a list of ID's
data = [23, 28]
"""
if type(data) != list:
raise Exception('data must be a list of dictionaries')
# Build Request
soap_request = soap('UpdateListItems')
soap_request.add_parameter('listName', self.listName)
if kind != 'Delete':
self._convert_to_internal(data)
soap_request.add_actions(data, kind)
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('Lists'),
headers=self._headers('UpdateListItems'),
data=str(soap_request),
verify=self._verify_ssl,
timeout=self.timeout)
# Parse Response
if response.status_code == 200:
envelope = etree.fromstring(response.text.encode('utf-8'), parser=etree.XMLParser(huge_tree=self.huge_tree))
results = envelope[0][0][0][0]
data = {}
for result in results:
if result.text != '0x00000000' and result[0].text != '0x00000000':
data[result.attrib['ID']] = (result[0].text, result[1].text)
else:
data[result.attrib['ID']] = result[0].text
return data
else:
return response
|
python
|
def UpdateListItems(self, data, kind):
"""Update List Items
kind = 'New', 'Update', or 'Delete'
New:
Provide data like so:
data = [{'Title': 'New Title', 'Col1': 'New Value'}]
Update:
Provide data like so:
data = [{'ID': 23, 'Title': 'Updated Title'},
{'ID': 28, 'Col1': 'Updated Value'}]
Delete:
Just provied a list of ID's
data = [23, 28]
"""
if type(data) != list:
raise Exception('data must be a list of dictionaries')
# Build Request
soap_request = soap('UpdateListItems')
soap_request.add_parameter('listName', self.listName)
if kind != 'Delete':
self._convert_to_internal(data)
soap_request.add_actions(data, kind)
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('Lists'),
headers=self._headers('UpdateListItems'),
data=str(soap_request),
verify=self._verify_ssl,
timeout=self.timeout)
# Parse Response
if response.status_code == 200:
envelope = etree.fromstring(response.text.encode('utf-8'), parser=etree.XMLParser(huge_tree=self.huge_tree))
results = envelope[0][0][0][0]
data = {}
for result in results:
if result.text != '0x00000000' and result[0].text != '0x00000000':
data[result.attrib['ID']] = (result[0].text, result[1].text)
else:
data[result.attrib['ID']] = result[0].text
return data
else:
return response
|
[
"def",
"UpdateListItems",
"(",
"self",
",",
"data",
",",
"kind",
")",
":",
"if",
"type",
"(",
"data",
")",
"!=",
"list",
":",
"raise",
"Exception",
"(",
"'data must be a list of dictionaries'",
")",
"# Build Request",
"soap_request",
"=",
"soap",
"(",
"'UpdateListItems'",
")",
"soap_request",
".",
"add_parameter",
"(",
"'listName'",
",",
"self",
".",
"listName",
")",
"if",
"kind",
"!=",
"'Delete'",
":",
"self",
".",
"_convert_to_internal",
"(",
"data",
")",
"soap_request",
".",
"add_actions",
"(",
"data",
",",
"kind",
")",
"self",
".",
"last_request",
"=",
"str",
"(",
"soap_request",
")",
"# Send Request",
"response",
"=",
"self",
".",
"_session",
".",
"post",
"(",
"url",
"=",
"self",
".",
"_url",
"(",
"'Lists'",
")",
",",
"headers",
"=",
"self",
".",
"_headers",
"(",
"'UpdateListItems'",
")",
",",
"data",
"=",
"str",
"(",
"soap_request",
")",
",",
"verify",
"=",
"self",
".",
"_verify_ssl",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"# Parse Response",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"envelope",
"=",
"etree",
".",
"fromstring",
"(",
"response",
".",
"text",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"parser",
"=",
"etree",
".",
"XMLParser",
"(",
"huge_tree",
"=",
"self",
".",
"huge_tree",
")",
")",
"results",
"=",
"envelope",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"data",
"=",
"{",
"}",
"for",
"result",
"in",
"results",
":",
"if",
"result",
".",
"text",
"!=",
"'0x00000000'",
"and",
"result",
"[",
"0",
"]",
".",
"text",
"!=",
"'0x00000000'",
":",
"data",
"[",
"result",
".",
"attrib",
"[",
"'ID'",
"]",
"]",
"=",
"(",
"result",
"[",
"0",
"]",
".",
"text",
",",
"result",
"[",
"1",
"]",
".",
"text",
")",
"else",
":",
"data",
"[",
"result",
".",
"attrib",
"[",
"'ID'",
"]",
"]",
"=",
"result",
"[",
"0",
"]",
".",
"text",
"return",
"data",
"else",
":",
"return",
"response"
] |
Update List Items
kind = 'New', 'Update', or 'Delete'
New:
Provide data like so:
data = [{'Title': 'New Title', 'Col1': 'New Value'}]
Update:
Provide data like so:
data = [{'ID': 23, 'Title': 'Updated Title'},
{'ID': 28, 'Col1': 'Updated Value'}]
Delete:
Just provied a list of ID's
data = [23, 28]
|
[
"Update",
"List",
"Items",
"kind",
"=",
"New",
"Update",
"or",
"Delete"
] |
404f320808912619920e2d787f2c4387225a14e0
|
https://github.com/jasonrollins/shareplum/blob/404f320808912619920e2d787f2c4387225a14e0/shareplum/shareplum.py#L658-L704
|
13,232
|
jasonrollins/shareplum
|
shareplum/shareplum.py
|
_List.GetAttachmentCollection
|
def GetAttachmentCollection(self, _id):
"""Get Attachments for given List Item ID"""
# Build Request
soap_request = soap('GetAttachmentCollection')
soap_request.add_parameter('listName', self.listName)
soap_request.add_parameter('listItemID', _id)
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('Lists'),
headers=self._headers('GetAttachmentCollection'),
data=str(soap_request),
verify=False,
timeout=self.timeout)
# Parse Request
if response.status_code == 200:
envelope = etree.fromstring(response.text.encode('utf-8'), parser=etree.XMLParser(huge_tree=self.huge_tree))
attaches = envelope[0][0][0][0]
attachments = []
for attachment in attaches.getchildren():
attachments.append(attachment.text)
return attachments
else:
return response
|
python
|
def GetAttachmentCollection(self, _id):
"""Get Attachments for given List Item ID"""
# Build Request
soap_request = soap('GetAttachmentCollection')
soap_request.add_parameter('listName', self.listName)
soap_request.add_parameter('listItemID', _id)
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('Lists'),
headers=self._headers('GetAttachmentCollection'),
data=str(soap_request),
verify=False,
timeout=self.timeout)
# Parse Request
if response.status_code == 200:
envelope = etree.fromstring(response.text.encode('utf-8'), parser=etree.XMLParser(huge_tree=self.huge_tree))
attaches = envelope[0][0][0][0]
attachments = []
for attachment in attaches.getchildren():
attachments.append(attachment.text)
return attachments
else:
return response
|
[
"def",
"GetAttachmentCollection",
"(",
"self",
",",
"_id",
")",
":",
"# Build Request",
"soap_request",
"=",
"soap",
"(",
"'GetAttachmentCollection'",
")",
"soap_request",
".",
"add_parameter",
"(",
"'listName'",
",",
"self",
".",
"listName",
")",
"soap_request",
".",
"add_parameter",
"(",
"'listItemID'",
",",
"_id",
")",
"self",
".",
"last_request",
"=",
"str",
"(",
"soap_request",
")",
"# Send Request",
"response",
"=",
"self",
".",
"_session",
".",
"post",
"(",
"url",
"=",
"self",
".",
"_url",
"(",
"'Lists'",
")",
",",
"headers",
"=",
"self",
".",
"_headers",
"(",
"'GetAttachmentCollection'",
")",
",",
"data",
"=",
"str",
"(",
"soap_request",
")",
",",
"verify",
"=",
"False",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"# Parse Request",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"envelope",
"=",
"etree",
".",
"fromstring",
"(",
"response",
".",
"text",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"parser",
"=",
"etree",
".",
"XMLParser",
"(",
"huge_tree",
"=",
"self",
".",
"huge_tree",
")",
")",
"attaches",
"=",
"envelope",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"attachments",
"=",
"[",
"]",
"for",
"attachment",
"in",
"attaches",
".",
"getchildren",
"(",
")",
":",
"attachments",
".",
"append",
"(",
"attachment",
".",
"text",
")",
"return",
"attachments",
"else",
":",
"return",
"response"
] |
Get Attachments for given List Item ID
|
[
"Get",
"Attachments",
"for",
"given",
"List",
"Item",
"ID"
] |
404f320808912619920e2d787f2c4387225a14e0
|
https://github.com/jasonrollins/shareplum/blob/404f320808912619920e2d787f2c4387225a14e0/shareplum/shareplum.py#L706-L731
|
13,233
|
jasonrollins/shareplum
|
shareplum/ListDict.py
|
changes
|
def changes(new_cmp_dict, old_cmp_dict, id_column, columns):
"""Return a list dict of the changes of the
rows that exist in both dictionaries
User must provide an ID column for old_cmp_dict
"""
update_ldict = []
same_keys = set(new_cmp_dict).intersection(set(old_cmp_dict))
for same_key in same_keys:
# Get the Union of the set of keys
# for both dictionaries to account
# for missing keys
old_dict = old_cmp_dict[same_key]
new_dict = new_cmp_dict[same_key]
dict_keys = set(old_dict).intersection(set(new_dict))
update_dict = {}
for dict_key in columns:
old_val = old_dict.get(dict_key, 'NaN')
new_val = new_dict.get(dict_key, 'NaN')
if old_val != new_val and new_val != 'NaN':
if id_column!=None:
try:
update_dict[id_column] = old_dict[id_column]
except KeyError:
print("Input Dictionary 'old_cmp_dict' must have ID column")
update_dict[dict_key] = new_val
if update_dict:
update_ldict.append(update_dict)
return update_ldict
|
python
|
def changes(new_cmp_dict, old_cmp_dict, id_column, columns):
"""Return a list dict of the changes of the
rows that exist in both dictionaries
User must provide an ID column for old_cmp_dict
"""
update_ldict = []
same_keys = set(new_cmp_dict).intersection(set(old_cmp_dict))
for same_key in same_keys:
# Get the Union of the set of keys
# for both dictionaries to account
# for missing keys
old_dict = old_cmp_dict[same_key]
new_dict = new_cmp_dict[same_key]
dict_keys = set(old_dict).intersection(set(new_dict))
update_dict = {}
for dict_key in columns:
old_val = old_dict.get(dict_key, 'NaN')
new_val = new_dict.get(dict_key, 'NaN')
if old_val != new_val and new_val != 'NaN':
if id_column!=None:
try:
update_dict[id_column] = old_dict[id_column]
except KeyError:
print("Input Dictionary 'old_cmp_dict' must have ID column")
update_dict[dict_key] = new_val
if update_dict:
update_ldict.append(update_dict)
return update_ldict
|
[
"def",
"changes",
"(",
"new_cmp_dict",
",",
"old_cmp_dict",
",",
"id_column",
",",
"columns",
")",
":",
"update_ldict",
"=",
"[",
"]",
"same_keys",
"=",
"set",
"(",
"new_cmp_dict",
")",
".",
"intersection",
"(",
"set",
"(",
"old_cmp_dict",
")",
")",
"for",
"same_key",
"in",
"same_keys",
":",
"# Get the Union of the set of keys",
"# for both dictionaries to account",
"# for missing keys",
"old_dict",
"=",
"old_cmp_dict",
"[",
"same_key",
"]",
"new_dict",
"=",
"new_cmp_dict",
"[",
"same_key",
"]",
"dict_keys",
"=",
"set",
"(",
"old_dict",
")",
".",
"intersection",
"(",
"set",
"(",
"new_dict",
")",
")",
"update_dict",
"=",
"{",
"}",
"for",
"dict_key",
"in",
"columns",
":",
"old_val",
"=",
"old_dict",
".",
"get",
"(",
"dict_key",
",",
"'NaN'",
")",
"new_val",
"=",
"new_dict",
".",
"get",
"(",
"dict_key",
",",
"'NaN'",
")",
"if",
"old_val",
"!=",
"new_val",
"and",
"new_val",
"!=",
"'NaN'",
":",
"if",
"id_column",
"!=",
"None",
":",
"try",
":",
"update_dict",
"[",
"id_column",
"]",
"=",
"old_dict",
"[",
"id_column",
"]",
"except",
"KeyError",
":",
"print",
"(",
"\"Input Dictionary 'old_cmp_dict' must have ID column\"",
")",
"update_dict",
"[",
"dict_key",
"]",
"=",
"new_val",
"if",
"update_dict",
":",
"update_ldict",
".",
"append",
"(",
"update_dict",
")",
"return",
"update_ldict"
] |
Return a list dict of the changes of the
rows that exist in both dictionaries
User must provide an ID column for old_cmp_dict
|
[
"Return",
"a",
"list",
"dict",
"of",
"the",
"changes",
"of",
"the",
"rows",
"that",
"exist",
"in",
"both",
"dictionaries",
"User",
"must",
"provide",
"an",
"ID",
"column",
"for",
"old_cmp_dict"
] |
404f320808912619920e2d787f2c4387225a14e0
|
https://github.com/jasonrollins/shareplum/blob/404f320808912619920e2d787f2c4387225a14e0/shareplum/ListDict.py#L4-L33
|
13,234
|
jasonrollins/shareplum
|
shareplum/ListDict.py
|
unique
|
def unique(new_cmp_dict, old_cmp_dict):
"""Return a list dict of
the unique keys in new_cmp_dict
"""
newkeys = set(new_cmp_dict)
oldkeys = set(old_cmp_dict)
unique = newkeys - oldkeys
unique_ldict = []
for key in unique:
unique_ldict.append(new_cmp_dict[key])
return unique_ldict
|
python
|
def unique(new_cmp_dict, old_cmp_dict):
"""Return a list dict of
the unique keys in new_cmp_dict
"""
newkeys = set(new_cmp_dict)
oldkeys = set(old_cmp_dict)
unique = newkeys - oldkeys
unique_ldict = []
for key in unique:
unique_ldict.append(new_cmp_dict[key])
return unique_ldict
|
[
"def",
"unique",
"(",
"new_cmp_dict",
",",
"old_cmp_dict",
")",
":",
"newkeys",
"=",
"set",
"(",
"new_cmp_dict",
")",
"oldkeys",
"=",
"set",
"(",
"old_cmp_dict",
")",
"unique",
"=",
"newkeys",
"-",
"oldkeys",
"unique_ldict",
"=",
"[",
"]",
"for",
"key",
"in",
"unique",
":",
"unique_ldict",
".",
"append",
"(",
"new_cmp_dict",
"[",
"key",
"]",
")",
"return",
"unique_ldict"
] |
Return a list dict of
the unique keys in new_cmp_dict
|
[
"Return",
"a",
"list",
"dict",
"of",
"the",
"unique",
"keys",
"in",
"new_cmp_dict"
] |
404f320808912619920e2d787f2c4387225a14e0
|
https://github.com/jasonrollins/shareplum/blob/404f320808912619920e2d787f2c4387225a14e0/shareplum/ListDict.py#L35-L45
|
13,235
|
improbable-research/keanu
|
keanu-python/keanu/plots/traceplot.py
|
traceplot
|
def traceplot(trace: sample_types, labels: List[Union[str, Tuple[str, str]]] = None, ax: Any = None,
x0: int = 0) -> Any:
"""
Plot samples values.
:param trace: result of MCMC run
:param labels: labels of vertices to be plotted. if None, all vertices are plotted.
:param ax: Matplotlib axes
:param x0: index of first data point, used for sample stream plots
"""
if labels is None:
labels = list(trace.keys())
if ax is None:
_, ax = plt.subplots(len(labels), 1, squeeze=False)
for index, label in enumerate(labels):
data = [sample for sample in trace[label]]
ax[index][0].set_title(label)
ax[index][0].plot(__integer_xaxis(ax[index][0], x0, len(data)), data)
__pause_for_crude_animation()
return ax
|
python
|
def traceplot(trace: sample_types, labels: List[Union[str, Tuple[str, str]]] = None, ax: Any = None,
x0: int = 0) -> Any:
"""
Plot samples values.
:param trace: result of MCMC run
:param labels: labels of vertices to be plotted. if None, all vertices are plotted.
:param ax: Matplotlib axes
:param x0: index of first data point, used for sample stream plots
"""
if labels is None:
labels = list(trace.keys())
if ax is None:
_, ax = plt.subplots(len(labels), 1, squeeze=False)
for index, label in enumerate(labels):
data = [sample for sample in trace[label]]
ax[index][0].set_title(label)
ax[index][0].plot(__integer_xaxis(ax[index][0], x0, len(data)), data)
__pause_for_crude_animation()
return ax
|
[
"def",
"traceplot",
"(",
"trace",
":",
"sample_types",
",",
"labels",
":",
"List",
"[",
"Union",
"[",
"str",
",",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
"]",
"=",
"None",
",",
"ax",
":",
"Any",
"=",
"None",
",",
"x0",
":",
"int",
"=",
"0",
")",
"->",
"Any",
":",
"if",
"labels",
"is",
"None",
":",
"labels",
"=",
"list",
"(",
"trace",
".",
"keys",
"(",
")",
")",
"if",
"ax",
"is",
"None",
":",
"_",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"len",
"(",
"labels",
")",
",",
"1",
",",
"squeeze",
"=",
"False",
")",
"for",
"index",
",",
"label",
"in",
"enumerate",
"(",
"labels",
")",
":",
"data",
"=",
"[",
"sample",
"for",
"sample",
"in",
"trace",
"[",
"label",
"]",
"]",
"ax",
"[",
"index",
"]",
"[",
"0",
"]",
".",
"set_title",
"(",
"label",
")",
"ax",
"[",
"index",
"]",
"[",
"0",
"]",
".",
"plot",
"(",
"__integer_xaxis",
"(",
"ax",
"[",
"index",
"]",
"[",
"0",
"]",
",",
"x0",
",",
"len",
"(",
"data",
")",
")",
",",
"data",
")",
"__pause_for_crude_animation",
"(",
")",
"return",
"ax"
] |
Plot samples values.
:param trace: result of MCMC run
:param labels: labels of vertices to be plotted. if None, all vertices are plotted.
:param ax: Matplotlib axes
:param x0: index of first data point, used for sample stream plots
|
[
"Plot",
"samples",
"values",
"."
] |
73189a8f569078e156168e795f82c7366c59574b
|
https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/keanu-python/keanu/plots/traceplot.py#L12-L37
|
13,236
|
improbable-research/keanu
|
docs/bin/snippet_writer.py
|
read_file_snippets
|
def read_file_snippets(file, snippet_store):
"""Parse a file and add all snippets to the snippet_store dictionary"""
start_reg = re.compile("(.*%%SNIPPET_START%% )([a-zA-Z0-9]+)")
end_reg = re.compile("(.*%%SNIPPET_END%% )([a-zA-Z0-9]+)")
open_snippets = {}
with open(file, encoding="utf-8") as w:
lines = w.readlines()
for line in lines:
printd("Got Line: {}".format(line))
# Check whether we're entering or leaving a snippet
m = start_reg.match(line)
if m:
printd("Opened Snippet {}".format(m.group(2)))
if m.group(2) in snippet_store:
record_error("Repeat definition of Snippet {}".format(m.group(2)))
elif m.group(2) in open_snippets:
record_error("Snippet already opened {}".format(m.group(2)))
else:
printd("Added {} to open snippets list".format(m.group(2)))
open_snippets[m.group(2)] = []
continue
m = end_reg.match(line)
if m:
printd("Found end of Snippet {}".format(m.group(2)))
if m.group(2) not in open_snippets:
record_error("Reached Snippet End but no start")
elif m.group(2) in snippet_store:
record_error("Repeat definition of Snippet {}".format(m.group(2)))
else:
snippet_store[m.group(2)] = open_snippets[m.group(2)]
del open_snippets[m.group(2)]
continue
# If we've got this far, then we're just a normal line, so we can add this to all open snippets
for snippet in open_snippets.values():
printd("Adding Line to snippet")
snippet.append(line)
# Now, warn about any unclosed snippets
for opened in open_snippets:
record_error("Snippet {} left open - ignoring".format(opened))
|
python
|
def read_file_snippets(file, snippet_store):
"""Parse a file and add all snippets to the snippet_store dictionary"""
start_reg = re.compile("(.*%%SNIPPET_START%% )([a-zA-Z0-9]+)")
end_reg = re.compile("(.*%%SNIPPET_END%% )([a-zA-Z0-9]+)")
open_snippets = {}
with open(file, encoding="utf-8") as w:
lines = w.readlines()
for line in lines:
printd("Got Line: {}".format(line))
# Check whether we're entering or leaving a snippet
m = start_reg.match(line)
if m:
printd("Opened Snippet {}".format(m.group(2)))
if m.group(2) in snippet_store:
record_error("Repeat definition of Snippet {}".format(m.group(2)))
elif m.group(2) in open_snippets:
record_error("Snippet already opened {}".format(m.group(2)))
else:
printd("Added {} to open snippets list".format(m.group(2)))
open_snippets[m.group(2)] = []
continue
m = end_reg.match(line)
if m:
printd("Found end of Snippet {}".format(m.group(2)))
if m.group(2) not in open_snippets:
record_error("Reached Snippet End but no start")
elif m.group(2) in snippet_store:
record_error("Repeat definition of Snippet {}".format(m.group(2)))
else:
snippet_store[m.group(2)] = open_snippets[m.group(2)]
del open_snippets[m.group(2)]
continue
# If we've got this far, then we're just a normal line, so we can add this to all open snippets
for snippet in open_snippets.values():
printd("Adding Line to snippet")
snippet.append(line)
# Now, warn about any unclosed snippets
for opened in open_snippets:
record_error("Snippet {} left open - ignoring".format(opened))
|
[
"def",
"read_file_snippets",
"(",
"file",
",",
"snippet_store",
")",
":",
"start_reg",
"=",
"re",
".",
"compile",
"(",
"\"(.*%%SNIPPET_START%% )([a-zA-Z0-9]+)\"",
")",
"end_reg",
"=",
"re",
".",
"compile",
"(",
"\"(.*%%SNIPPET_END%% )([a-zA-Z0-9]+)\"",
")",
"open_snippets",
"=",
"{",
"}",
"with",
"open",
"(",
"file",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"w",
":",
"lines",
"=",
"w",
".",
"readlines",
"(",
")",
"for",
"line",
"in",
"lines",
":",
"printd",
"(",
"\"Got Line: {}\"",
".",
"format",
"(",
"line",
")",
")",
"# Check whether we're entering or leaving a snippet",
"m",
"=",
"start_reg",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"printd",
"(",
"\"Opened Snippet {}\"",
".",
"format",
"(",
"m",
".",
"group",
"(",
"2",
")",
")",
")",
"if",
"m",
".",
"group",
"(",
"2",
")",
"in",
"snippet_store",
":",
"record_error",
"(",
"\"Repeat definition of Snippet {}\"",
".",
"format",
"(",
"m",
".",
"group",
"(",
"2",
")",
")",
")",
"elif",
"m",
".",
"group",
"(",
"2",
")",
"in",
"open_snippets",
":",
"record_error",
"(",
"\"Snippet already opened {}\"",
".",
"format",
"(",
"m",
".",
"group",
"(",
"2",
")",
")",
")",
"else",
":",
"printd",
"(",
"\"Added {} to open snippets list\"",
".",
"format",
"(",
"m",
".",
"group",
"(",
"2",
")",
")",
")",
"open_snippets",
"[",
"m",
".",
"group",
"(",
"2",
")",
"]",
"=",
"[",
"]",
"continue",
"m",
"=",
"end_reg",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"printd",
"(",
"\"Found end of Snippet {}\"",
".",
"format",
"(",
"m",
".",
"group",
"(",
"2",
")",
")",
")",
"if",
"m",
".",
"group",
"(",
"2",
")",
"not",
"in",
"open_snippets",
":",
"record_error",
"(",
"\"Reached Snippet End but no start\"",
")",
"elif",
"m",
".",
"group",
"(",
"2",
")",
"in",
"snippet_store",
":",
"record_error",
"(",
"\"Repeat definition of Snippet {}\"",
".",
"format",
"(",
"m",
".",
"group",
"(",
"2",
")",
")",
")",
"else",
":",
"snippet_store",
"[",
"m",
".",
"group",
"(",
"2",
")",
"]",
"=",
"open_snippets",
"[",
"m",
".",
"group",
"(",
"2",
")",
"]",
"del",
"open_snippets",
"[",
"m",
".",
"group",
"(",
"2",
")",
"]",
"continue",
"# If we've got this far, then we're just a normal line, so we can add this to all open snippets",
"for",
"snippet",
"in",
"open_snippets",
".",
"values",
"(",
")",
":",
"printd",
"(",
"\"Adding Line to snippet\"",
")",
"snippet",
".",
"append",
"(",
"line",
")",
"# Now, warn about any unclosed snippets",
"for",
"opened",
"in",
"open_snippets",
":",
"record_error",
"(",
"\"Snippet {} left open - ignoring\"",
".",
"format",
"(",
"opened",
")",
")"
] |
Parse a file and add all snippets to the snippet_store dictionary
|
[
"Parse",
"a",
"file",
"and",
"add",
"all",
"snippets",
"to",
"the",
"snippet_store",
"dictionary"
] |
73189a8f569078e156168e795f82c7366c59574b
|
https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/docs/bin/snippet_writer.py#L31-L73
|
13,237
|
improbable-research/keanu
|
docs/bin/snippet_writer.py
|
strip_block_whitespace
|
def strip_block_whitespace(string_list):
"""Treats a list of strings as a code block and strips
whitespace so that the min whitespace line sits at char 0 of line."""
min_ws = min([(len(x) - len(x.lstrip())) for x in string_list if x != '\n'])
return [x[min_ws:] if x != '\n' else x for x in string_list]
|
python
|
def strip_block_whitespace(string_list):
"""Treats a list of strings as a code block and strips
whitespace so that the min whitespace line sits at char 0 of line."""
min_ws = min([(len(x) - len(x.lstrip())) for x in string_list if x != '\n'])
return [x[min_ws:] if x != '\n' else x for x in string_list]
|
[
"def",
"strip_block_whitespace",
"(",
"string_list",
")",
":",
"min_ws",
"=",
"min",
"(",
"[",
"(",
"len",
"(",
"x",
")",
"-",
"len",
"(",
"x",
".",
"lstrip",
"(",
")",
")",
")",
"for",
"x",
"in",
"string_list",
"if",
"x",
"!=",
"'\\n'",
"]",
")",
"return",
"[",
"x",
"[",
"min_ws",
":",
"]",
"if",
"x",
"!=",
"'\\n'",
"else",
"x",
"for",
"x",
"in",
"string_list",
"]"
] |
Treats a list of strings as a code block and strips
whitespace so that the min whitespace line sits at char 0 of line.
|
[
"Treats",
"a",
"list",
"of",
"strings",
"as",
"a",
"code",
"block",
"and",
"strips",
"whitespace",
"so",
"that",
"the",
"min",
"whitespace",
"line",
"sits",
"at",
"char",
"0",
"of",
"line",
"."
] |
73189a8f569078e156168e795f82c7366c59574b
|
https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/docs/bin/snippet_writer.py#L126-L130
|
13,238
|
aio-libs/aiohttp-sse
|
aiohttp_sse/__init__.py
|
EventSourceResponse.prepare
|
async def prepare(self, request):
"""Prepare for streaming and send HTTP headers.
:param request: regular aiohttp.web.Request.
"""
if request.method != 'GET':
raise HTTPMethodNotAllowed(request.method, ['GET'])
if not self.prepared:
writer = await super().prepare(request)
self._loop = request.app.loop
self._ping_task = self._loop.create_task(self._ping())
# explicitly enabling chunked encoding, since content length
# usually not known beforehand.
self.enable_chunked_encoding()
return writer
else:
# hackish way to check if connection alive
# should be updated once we have proper API in aiohttp
# https://github.com/aio-libs/aiohttp/issues/3105
if request.protocol.transport is None:
# request disconnected
raise asyncio.CancelledError()
|
python
|
async def prepare(self, request):
"""Prepare for streaming and send HTTP headers.
:param request: regular aiohttp.web.Request.
"""
if request.method != 'GET':
raise HTTPMethodNotAllowed(request.method, ['GET'])
if not self.prepared:
writer = await super().prepare(request)
self._loop = request.app.loop
self._ping_task = self._loop.create_task(self._ping())
# explicitly enabling chunked encoding, since content length
# usually not known beforehand.
self.enable_chunked_encoding()
return writer
else:
# hackish way to check if connection alive
# should be updated once we have proper API in aiohttp
# https://github.com/aio-libs/aiohttp/issues/3105
if request.protocol.transport is None:
# request disconnected
raise asyncio.CancelledError()
|
[
"async",
"def",
"prepare",
"(",
"self",
",",
"request",
")",
":",
"if",
"request",
".",
"method",
"!=",
"'GET'",
":",
"raise",
"HTTPMethodNotAllowed",
"(",
"request",
".",
"method",
",",
"[",
"'GET'",
"]",
")",
"if",
"not",
"self",
".",
"prepared",
":",
"writer",
"=",
"await",
"super",
"(",
")",
".",
"prepare",
"(",
"request",
")",
"self",
".",
"_loop",
"=",
"request",
".",
"app",
".",
"loop",
"self",
".",
"_ping_task",
"=",
"self",
".",
"_loop",
".",
"create_task",
"(",
"self",
".",
"_ping",
"(",
")",
")",
"# explicitly enabling chunked encoding, since content length",
"# usually not known beforehand.",
"self",
".",
"enable_chunked_encoding",
"(",
")",
"return",
"writer",
"else",
":",
"# hackish way to check if connection alive",
"# should be updated once we have proper API in aiohttp",
"# https://github.com/aio-libs/aiohttp/issues/3105",
"if",
"request",
".",
"protocol",
".",
"transport",
"is",
"None",
":",
"# request disconnected",
"raise",
"asyncio",
".",
"CancelledError",
"(",
")"
] |
Prepare for streaming and send HTTP headers.
:param request: regular aiohttp.web.Request.
|
[
"Prepare",
"for",
"streaming",
"and",
"send",
"HTTP",
"headers",
"."
] |
5148d087f9df75ecea61f574d3c768506680e5dc
|
https://github.com/aio-libs/aiohttp-sse/blob/5148d087f9df75ecea61f574d3c768506680e5dc/aiohttp_sse/__init__.py#L52-L74
|
13,239
|
aio-libs/aiohttp-sse
|
aiohttp_sse/__init__.py
|
EventSourceResponse.send
|
async def send(self, data, id=None, event=None, retry=None):
"""Send data using EventSource protocol
:param str data: The data field for the message.
:param str id: The event ID to set the EventSource object's last
event ID value to.
:param str event: The event's type. If this is specified, an event will
be dispatched on the browser to the listener for the specified
event name; the web site would use addEventListener() to listen
for named events. The default event type is "message".
:param int retry: The reconnection time to use when attempting to send
the event. [What code handles this?] This must be an integer,
specifying the reconnection time in milliseconds. If a non-integer
value is specified, the field is ignored.
"""
buffer = io.StringIO()
if id is not None:
buffer.write(self.LINE_SEP_EXPR.sub('', 'id: {}'.format(id)))
buffer.write(self._sep)
if event is not None:
buffer.write(self.LINE_SEP_EXPR.sub('', 'event: {}'.format(event)))
buffer.write(self._sep)
for chunk in self.LINE_SEP_EXPR.split(data):
buffer.write('data: {}'.format(chunk))
buffer.write(self._sep)
if retry is not None:
if not isinstance(retry, int):
raise TypeError('retry argument must be int')
buffer.write('retry: {}'.format(retry))
buffer.write(self._sep)
buffer.write(self._sep)
await self.write(buffer.getvalue().encode('utf-8'))
|
python
|
async def send(self, data, id=None, event=None, retry=None):
"""Send data using EventSource protocol
:param str data: The data field for the message.
:param str id: The event ID to set the EventSource object's last
event ID value to.
:param str event: The event's type. If this is specified, an event will
be dispatched on the browser to the listener for the specified
event name; the web site would use addEventListener() to listen
for named events. The default event type is "message".
:param int retry: The reconnection time to use when attempting to send
the event. [What code handles this?] This must be an integer,
specifying the reconnection time in milliseconds. If a non-integer
value is specified, the field is ignored.
"""
buffer = io.StringIO()
if id is not None:
buffer.write(self.LINE_SEP_EXPR.sub('', 'id: {}'.format(id)))
buffer.write(self._sep)
if event is not None:
buffer.write(self.LINE_SEP_EXPR.sub('', 'event: {}'.format(event)))
buffer.write(self._sep)
for chunk in self.LINE_SEP_EXPR.split(data):
buffer.write('data: {}'.format(chunk))
buffer.write(self._sep)
if retry is not None:
if not isinstance(retry, int):
raise TypeError('retry argument must be int')
buffer.write('retry: {}'.format(retry))
buffer.write(self._sep)
buffer.write(self._sep)
await self.write(buffer.getvalue().encode('utf-8'))
|
[
"async",
"def",
"send",
"(",
"self",
",",
"data",
",",
"id",
"=",
"None",
",",
"event",
"=",
"None",
",",
"retry",
"=",
"None",
")",
":",
"buffer",
"=",
"io",
".",
"StringIO",
"(",
")",
"if",
"id",
"is",
"not",
"None",
":",
"buffer",
".",
"write",
"(",
"self",
".",
"LINE_SEP_EXPR",
".",
"sub",
"(",
"''",
",",
"'id: {}'",
".",
"format",
"(",
"id",
")",
")",
")",
"buffer",
".",
"write",
"(",
"self",
".",
"_sep",
")",
"if",
"event",
"is",
"not",
"None",
":",
"buffer",
".",
"write",
"(",
"self",
".",
"LINE_SEP_EXPR",
".",
"sub",
"(",
"''",
",",
"'event: {}'",
".",
"format",
"(",
"event",
")",
")",
")",
"buffer",
".",
"write",
"(",
"self",
".",
"_sep",
")",
"for",
"chunk",
"in",
"self",
".",
"LINE_SEP_EXPR",
".",
"split",
"(",
"data",
")",
":",
"buffer",
".",
"write",
"(",
"'data: {}'",
".",
"format",
"(",
"chunk",
")",
")",
"buffer",
".",
"write",
"(",
"self",
".",
"_sep",
")",
"if",
"retry",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"retry",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"'retry argument must be int'",
")",
"buffer",
".",
"write",
"(",
"'retry: {}'",
".",
"format",
"(",
"retry",
")",
")",
"buffer",
".",
"write",
"(",
"self",
".",
"_sep",
")",
"buffer",
".",
"write",
"(",
"self",
".",
"_sep",
")",
"await",
"self",
".",
"write",
"(",
"buffer",
".",
"getvalue",
"(",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")"
] |
Send data using EventSource protocol
:param str data: The data field for the message.
:param str id: The event ID to set the EventSource object's last
event ID value to.
:param str event: The event's type. If this is specified, an event will
be dispatched on the browser to the listener for the specified
event name; the web site would use addEventListener() to listen
for named events. The default event type is "message".
:param int retry: The reconnection time to use when attempting to send
the event. [What code handles this?] This must be an integer,
specifying the reconnection time in milliseconds. If a non-integer
value is specified, the field is ignored.
|
[
"Send",
"data",
"using",
"EventSource",
"protocol"
] |
5148d087f9df75ecea61f574d3c768506680e5dc
|
https://github.com/aio-libs/aiohttp-sse/blob/5148d087f9df75ecea61f574d3c768506680e5dc/aiohttp_sse/__init__.py#L76-L111
|
13,240
|
aio-libs/aiohttp-sse
|
aiohttp_sse/__init__.py
|
EventSourceResponse.wait
|
async def wait(self):
"""EventSourceResponse object is used for streaming data to the client,
this method returns future, so we can wain until connection will
be closed or other task explicitly call ``stop_streaming`` method.
"""
if self._ping_task is None:
raise RuntimeError('Response is not started')
with contextlib.suppress(asyncio.CancelledError):
await self._ping_task
|
python
|
async def wait(self):
"""EventSourceResponse object is used for streaming data to the client,
this method returns future, so we can wain until connection will
be closed or other task explicitly call ``stop_streaming`` method.
"""
if self._ping_task is None:
raise RuntimeError('Response is not started')
with contextlib.suppress(asyncio.CancelledError):
await self._ping_task
|
[
"async",
"def",
"wait",
"(",
"self",
")",
":",
"if",
"self",
".",
"_ping_task",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"'Response is not started'",
")",
"with",
"contextlib",
".",
"suppress",
"(",
"asyncio",
".",
"CancelledError",
")",
":",
"await",
"self",
".",
"_ping_task"
] |
EventSourceResponse object is used for streaming data to the client,
this method returns future, so we can wain until connection will
be closed or other task explicitly call ``stop_streaming`` method.
|
[
"EventSourceResponse",
"object",
"is",
"used",
"for",
"streaming",
"data",
"to",
"the",
"client",
"this",
"method",
"returns",
"future",
"so",
"we",
"can",
"wain",
"until",
"connection",
"will",
"be",
"closed",
"or",
"other",
"task",
"explicitly",
"call",
"stop_streaming",
"method",
"."
] |
5148d087f9df75ecea61f574d3c768506680e5dc
|
https://github.com/aio-libs/aiohttp-sse/blob/5148d087f9df75ecea61f574d3c768506680e5dc/aiohttp_sse/__init__.py#L113-L121
|
13,241
|
aio-libs/aiohttp-sse
|
aiohttp_sse/__init__.py
|
EventSourceResponse.ping_interval
|
def ping_interval(self, value):
"""Setter for ping_interval property.
:param int value: interval in sec between two ping values.
"""
if not isinstance(value, int):
raise TypeError("ping interval must be int")
if value < 0:
raise ValueError("ping interval must be greater then 0")
self._ping_interval = value
|
python
|
def ping_interval(self, value):
"""Setter for ping_interval property.
:param int value: interval in sec between two ping values.
"""
if not isinstance(value, int):
raise TypeError("ping interval must be int")
if value < 0:
raise ValueError("ping interval must be greater then 0")
self._ping_interval = value
|
[
"def",
"ping_interval",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"ping interval must be int\"",
")",
"if",
"value",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"ping interval must be greater then 0\"",
")",
"self",
".",
"_ping_interval",
"=",
"value"
] |
Setter for ping_interval property.
:param int value: interval in sec between two ping values.
|
[
"Setter",
"for",
"ping_interval",
"property",
"."
] |
5148d087f9df75ecea61f574d3c768506680e5dc
|
https://github.com/aio-libs/aiohttp-sse/blob/5148d087f9df75ecea61f574d3c768506680e5dc/aiohttp_sse/__init__.py#L140-L151
|
13,242
|
google/budou
|
budou/parser.py
|
get_parser
|
def get_parser(segmenter, **options):
"""Gets a parser.
Args:
segmenter (str): Segmenter to use.
options (:obj:`dict`, optional): Optional settings.
Returns:
Parser (:obj:`budou.parser.Parser`)
Raises:
ValueError: If unsupported segmenter is specified.
"""
if segmenter == 'nlapi':
return NLAPIParser(**options)
elif segmenter == 'mecab':
return MecabParser()
elif segmenter == 'tinysegmenter':
return TinysegmenterParser()
else:
raise ValueError('Segmenter {} is not supported.'.format(segmenter))
|
python
|
def get_parser(segmenter, **options):
"""Gets a parser.
Args:
segmenter (str): Segmenter to use.
options (:obj:`dict`, optional): Optional settings.
Returns:
Parser (:obj:`budou.parser.Parser`)
Raises:
ValueError: If unsupported segmenter is specified.
"""
if segmenter == 'nlapi':
return NLAPIParser(**options)
elif segmenter == 'mecab':
return MecabParser()
elif segmenter == 'tinysegmenter':
return TinysegmenterParser()
else:
raise ValueError('Segmenter {} is not supported.'.format(segmenter))
|
[
"def",
"get_parser",
"(",
"segmenter",
",",
"*",
"*",
"options",
")",
":",
"if",
"segmenter",
"==",
"'nlapi'",
":",
"return",
"NLAPIParser",
"(",
"*",
"*",
"options",
")",
"elif",
"segmenter",
"==",
"'mecab'",
":",
"return",
"MecabParser",
"(",
")",
"elif",
"segmenter",
"==",
"'tinysegmenter'",
":",
"return",
"TinysegmenterParser",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Segmenter {} is not supported.'",
".",
"format",
"(",
"segmenter",
")",
")"
] |
Gets a parser.
Args:
segmenter (str): Segmenter to use.
options (:obj:`dict`, optional): Optional settings.
Returns:
Parser (:obj:`budou.parser.Parser`)
Raises:
ValueError: If unsupported segmenter is specified.
|
[
"Gets",
"a",
"parser",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/parser.py#L129-L149
|
13,243
|
google/budou
|
budou/parser.py
|
preprocess
|
def preprocess(source):
"""Removes unnecessary break lines and white spaces.
Args:
source (str): Input sentence.
Returns:
Preprocessed sentence. (str)
"""
doc = html5lib.parseFragment(source)
source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8')
source = source.replace(u'\n', u'').strip()
source = re.sub(r'\s\s+', u' ', source)
return source
|
python
|
def preprocess(source):
"""Removes unnecessary break lines and white spaces.
Args:
source (str): Input sentence.
Returns:
Preprocessed sentence. (str)
"""
doc = html5lib.parseFragment(source)
source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8')
source = source.replace(u'\n', u'').strip()
source = re.sub(r'\s\s+', u' ', source)
return source
|
[
"def",
"preprocess",
"(",
"source",
")",
":",
"doc",
"=",
"html5lib",
".",
"parseFragment",
"(",
"source",
")",
"source",
"=",
"ET",
".",
"tostring",
"(",
"doc",
",",
"encoding",
"=",
"'utf-8'",
",",
"method",
"=",
"'text'",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"source",
"=",
"source",
".",
"replace",
"(",
"u'\\n'",
",",
"u''",
")",
".",
"strip",
"(",
")",
"source",
"=",
"re",
".",
"sub",
"(",
"r'\\s\\s+'",
",",
"u' '",
",",
"source",
")",
"return",
"source"
] |
Removes unnecessary break lines and white spaces.
Args:
source (str): Input sentence.
Returns:
Preprocessed sentence. (str)
|
[
"Removes",
"unnecessary",
"break",
"lines",
"and",
"white",
"spaces",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/parser.py#L169-L182
|
13,244
|
google/budou
|
budou/budou.py
|
main
|
def main():
"""Budou main method for the command line tool.
"""
args = docopt(__doc__)
if args['--version']:
print(__version__)
sys.exit()
result = parse(
args['<source>'],
segmenter=args['--segmenter'],
language=args['--language'],
classname=args['--classname'])
print(result['html_code'])
sys.exit()
|
python
|
def main():
"""Budou main method for the command line tool.
"""
args = docopt(__doc__)
if args['--version']:
print(__version__)
sys.exit()
result = parse(
args['<source>'],
segmenter=args['--segmenter'],
language=args['--language'],
classname=args['--classname'])
print(result['html_code'])
sys.exit()
|
[
"def",
"main",
"(",
")",
":",
"args",
"=",
"docopt",
"(",
"__doc__",
")",
"if",
"args",
"[",
"'--version'",
"]",
":",
"print",
"(",
"__version__",
")",
"sys",
".",
"exit",
"(",
")",
"result",
"=",
"parse",
"(",
"args",
"[",
"'<source>'",
"]",
",",
"segmenter",
"=",
"args",
"[",
"'--segmenter'",
"]",
",",
"language",
"=",
"args",
"[",
"'--language'",
"]",
",",
"classname",
"=",
"args",
"[",
"'--classname'",
"]",
")",
"print",
"(",
"result",
"[",
"'html_code'",
"]",
")",
"sys",
".",
"exit",
"(",
")"
] |
Budou main method for the command line tool.
|
[
"Budou",
"main",
"method",
"for",
"the",
"command",
"line",
"tool",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/budou.py#L48-L62
|
13,245
|
google/budou
|
budou/budou.py
|
parse
|
def parse(source, segmenter='nlapi', language=None, max_length=None,
classname=None, attributes=None, **kwargs):
"""Parses input source.
Args:
source (str): Input source to process.
segmenter (:obj:`str`, optional): Segmenter to use [default: nlapi].
language (:obj:`str`, optional): Language code.
max_length (:obj:`int`, optional): Maximum length of a chunk.
classname (:obj:`str`, optional): Class name of output SPAN tags.
attributes (:obj:`dict`, optional): Attributes for output SPAN tags.
Returns:
Results in a dict. :code:`chunks` holds a list of chunks
(:obj:`budou.chunk.ChunkList`) and :code:`html_code` holds the output HTML
code.
"""
parser = get_parser(segmenter, **kwargs)
return parser.parse(
source, language=language, max_length=max_length, classname=classname,
attributes=attributes)
|
python
|
def parse(source, segmenter='nlapi', language=None, max_length=None,
classname=None, attributes=None, **kwargs):
"""Parses input source.
Args:
source (str): Input source to process.
segmenter (:obj:`str`, optional): Segmenter to use [default: nlapi].
language (:obj:`str`, optional): Language code.
max_length (:obj:`int`, optional): Maximum length of a chunk.
classname (:obj:`str`, optional): Class name of output SPAN tags.
attributes (:obj:`dict`, optional): Attributes for output SPAN tags.
Returns:
Results in a dict. :code:`chunks` holds a list of chunks
(:obj:`budou.chunk.ChunkList`) and :code:`html_code` holds the output HTML
code.
"""
parser = get_parser(segmenter, **kwargs)
return parser.parse(
source, language=language, max_length=max_length, classname=classname,
attributes=attributes)
|
[
"def",
"parse",
"(",
"source",
",",
"segmenter",
"=",
"'nlapi'",
",",
"language",
"=",
"None",
",",
"max_length",
"=",
"None",
",",
"classname",
"=",
"None",
",",
"attributes",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"parser",
"=",
"get_parser",
"(",
"segmenter",
",",
"*",
"*",
"kwargs",
")",
"return",
"parser",
".",
"parse",
"(",
"source",
",",
"language",
"=",
"language",
",",
"max_length",
"=",
"max_length",
",",
"classname",
"=",
"classname",
",",
"attributes",
"=",
"attributes",
")"
] |
Parses input source.
Args:
source (str): Input source to process.
segmenter (:obj:`str`, optional): Segmenter to use [default: nlapi].
language (:obj:`str`, optional): Language code.
max_length (:obj:`int`, optional): Maximum length of a chunk.
classname (:obj:`str`, optional): Class name of output SPAN tags.
attributes (:obj:`dict`, optional): Attributes for output SPAN tags.
Returns:
Results in a dict. :code:`chunks` holds a list of chunks
(:obj:`budou.chunk.ChunkList`) and :code:`html_code` holds the output HTML
code.
|
[
"Parses",
"input",
"source",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/budou.py#L64-L84
|
13,246
|
google/budou
|
budou/budou.py
|
authenticate
|
def authenticate(json_path=None):
"""Gets a Natural Language API parser by authenticating the API.
**This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a
parser instead.
Args:
json_path (:obj:`str`, optional): The file path to the service account's
credentials.
Returns:
Parser. (:obj:`budou.parser.NLAPIParser`)
"""
msg = ('budou.authentication() is deprecated. '
'Please use budou.get_parser() to obtain a parser instead.')
warnings.warn(msg, DeprecationWarning)
parser = get_parser('nlapi', credentials_path=json_path)
return parser
|
python
|
def authenticate(json_path=None):
"""Gets a Natural Language API parser by authenticating the API.
**This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a
parser instead.
Args:
json_path (:obj:`str`, optional): The file path to the service account's
credentials.
Returns:
Parser. (:obj:`budou.parser.NLAPIParser`)
"""
msg = ('budou.authentication() is deprecated. '
'Please use budou.get_parser() to obtain a parser instead.')
warnings.warn(msg, DeprecationWarning)
parser = get_parser('nlapi', credentials_path=json_path)
return parser
|
[
"def",
"authenticate",
"(",
"json_path",
"=",
"None",
")",
":",
"msg",
"=",
"(",
"'budou.authentication() is deprecated. '",
"'Please use budou.get_parser() to obtain a parser instead.'",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"DeprecationWarning",
")",
"parser",
"=",
"get_parser",
"(",
"'nlapi'",
",",
"credentials_path",
"=",
"json_path",
")",
"return",
"parser"
] |
Gets a Natural Language API parser by authenticating the API.
**This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a
parser instead.
Args:
json_path (:obj:`str`, optional): The file path to the service account's
credentials.
Returns:
Parser. (:obj:`budou.parser.NLAPIParser`)
|
[
"Gets",
"a",
"Natural",
"Language",
"API",
"parser",
"by",
"authenticating",
"the",
"API",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/budou.py#L86-L104
|
13,247
|
google/budou
|
budou/nlapisegmenter.py
|
_memorize
|
def _memorize(func):
"""Decorator to cache the given function's output.
"""
def _wrapper(self, *args, **kwargs):
"""Wrapper to cache the function's output.
"""
if self.use_cache:
cache = load_cache(self.cache_filename)
original_key = ':'.join([
self.__class__.__name__,
func.__name__,
'_'.join([str(a) for a in args]),
'_'.join([str(w) for w in kwargs.values()])])
cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()
cached_val = cache.get(cache_key)
if cached_val:
return cached_val
val = func(self, *args, **kwargs)
if self.use_cache:
cache.set(cache_key, val)
return val
return _wrapper
|
python
|
def _memorize(func):
"""Decorator to cache the given function's output.
"""
def _wrapper(self, *args, **kwargs):
"""Wrapper to cache the function's output.
"""
if self.use_cache:
cache = load_cache(self.cache_filename)
original_key = ':'.join([
self.__class__.__name__,
func.__name__,
'_'.join([str(a) for a in args]),
'_'.join([str(w) for w in kwargs.values()])])
cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()
cached_val = cache.get(cache_key)
if cached_val:
return cached_val
val = func(self, *args, **kwargs)
if self.use_cache:
cache.set(cache_key, val)
return val
return _wrapper
|
[
"def",
"_memorize",
"(",
"func",
")",
":",
"def",
"_wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrapper to cache the function's output.\n \"\"\"",
"if",
"self",
".",
"use_cache",
":",
"cache",
"=",
"load_cache",
"(",
"self",
".",
"cache_filename",
")",
"original_key",
"=",
"':'",
".",
"join",
"(",
"[",
"self",
".",
"__class__",
".",
"__name__",
",",
"func",
".",
"__name__",
",",
"'_'",
".",
"join",
"(",
"[",
"str",
"(",
"a",
")",
"for",
"a",
"in",
"args",
"]",
")",
",",
"'_'",
".",
"join",
"(",
"[",
"str",
"(",
"w",
")",
"for",
"w",
"in",
"kwargs",
".",
"values",
"(",
")",
"]",
")",
"]",
")",
"cache_key",
"=",
"hashlib",
".",
"md5",
"(",
"original_key",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"cached_val",
"=",
"cache",
".",
"get",
"(",
"cache_key",
")",
"if",
"cached_val",
":",
"return",
"cached_val",
"val",
"=",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"use_cache",
":",
"cache",
".",
"set",
"(",
"cache_key",
",",
"val",
")",
"return",
"val",
"return",
"_wrapper"
] |
Decorator to cache the given function's output.
|
[
"Decorator",
"to",
"cache",
"the",
"given",
"function",
"s",
"output",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L62-L84
|
13,248
|
google/budou
|
budou/nlapisegmenter.py
|
NLAPISegmenter._get_source_chunks
|
def _get_source_chunks(self, input_text, language=None):
"""Returns a chunk list retrieved from Syntax Analysis results.
Args:
input_text (str): Text to annotate.
language (:obj:`str`, optional): Language of the text.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
chunks = ChunkList()
seek = 0
result = self._get_annotations(input_text, language=language)
tokens = result['tokens']
language = result['language']
for i, token in enumerate(tokens):
word = token['text']['content']
begin_offset = token['text']['beginOffset']
label = token['dependencyEdge']['label']
pos = token['partOfSpeech']['tag']
if begin_offset > seek:
chunks.append(Chunk.space())
seek = begin_offset
chunk = Chunk(word, pos, label)
if chunk.label in _DEPENDENT_LABEL:
# Determining concatenating direction based on syntax dependency.
chunk.dependency = i < token['dependencyEdge']['headTokenIndex']
if chunk.is_punct():
chunk.dependency = chunk.is_open_punct()
chunks.append(chunk)
seek += len(word)
return chunks, language
|
python
|
def _get_source_chunks(self, input_text, language=None):
"""Returns a chunk list retrieved from Syntax Analysis results.
Args:
input_text (str): Text to annotate.
language (:obj:`str`, optional): Language of the text.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
chunks = ChunkList()
seek = 0
result = self._get_annotations(input_text, language=language)
tokens = result['tokens']
language = result['language']
for i, token in enumerate(tokens):
word = token['text']['content']
begin_offset = token['text']['beginOffset']
label = token['dependencyEdge']['label']
pos = token['partOfSpeech']['tag']
if begin_offset > seek:
chunks.append(Chunk.space())
seek = begin_offset
chunk = Chunk(word, pos, label)
if chunk.label in _DEPENDENT_LABEL:
# Determining concatenating direction based on syntax dependency.
chunk.dependency = i < token['dependencyEdge']['headTokenIndex']
if chunk.is_punct():
chunk.dependency = chunk.is_open_punct()
chunks.append(chunk)
seek += len(word)
return chunks, language
|
[
"def",
"_get_source_chunks",
"(",
"self",
",",
"input_text",
",",
"language",
"=",
"None",
")",
":",
"chunks",
"=",
"ChunkList",
"(",
")",
"seek",
"=",
"0",
"result",
"=",
"self",
".",
"_get_annotations",
"(",
"input_text",
",",
"language",
"=",
"language",
")",
"tokens",
"=",
"result",
"[",
"'tokens'",
"]",
"language",
"=",
"result",
"[",
"'language'",
"]",
"for",
"i",
",",
"token",
"in",
"enumerate",
"(",
"tokens",
")",
":",
"word",
"=",
"token",
"[",
"'text'",
"]",
"[",
"'content'",
"]",
"begin_offset",
"=",
"token",
"[",
"'text'",
"]",
"[",
"'beginOffset'",
"]",
"label",
"=",
"token",
"[",
"'dependencyEdge'",
"]",
"[",
"'label'",
"]",
"pos",
"=",
"token",
"[",
"'partOfSpeech'",
"]",
"[",
"'tag'",
"]",
"if",
"begin_offset",
">",
"seek",
":",
"chunks",
".",
"append",
"(",
"Chunk",
".",
"space",
"(",
")",
")",
"seek",
"=",
"begin_offset",
"chunk",
"=",
"Chunk",
"(",
"word",
",",
"pos",
",",
"label",
")",
"if",
"chunk",
".",
"label",
"in",
"_DEPENDENT_LABEL",
":",
"# Determining concatenating direction based on syntax dependency.",
"chunk",
".",
"dependency",
"=",
"i",
"<",
"token",
"[",
"'dependencyEdge'",
"]",
"[",
"'headTokenIndex'",
"]",
"if",
"chunk",
".",
"is_punct",
"(",
")",
":",
"chunk",
".",
"dependency",
"=",
"chunk",
".",
"is_open_punct",
"(",
")",
"chunks",
".",
"append",
"(",
"chunk",
")",
"seek",
"+=",
"len",
"(",
"word",
")",
"return",
"chunks",
",",
"language"
] |
Returns a chunk list retrieved from Syntax Analysis results.
Args:
input_text (str): Text to annotate.
language (:obj:`str`, optional): Language of the text.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
|
[
"Returns",
"a",
"chunk",
"list",
"retrieved",
"from",
"Syntax",
"Analysis",
"results",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L170-L201
|
13,249
|
google/budou
|
budou/nlapisegmenter.py
|
NLAPISegmenter._group_chunks_by_entities
|
def _group_chunks_by_entities(self, chunks, entities):
"""Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat:
continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks
|
python
|
def _group_chunks_by_entities(self, chunks, entities):
"""Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat:
continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks
|
[
"def",
"_group_chunks_by_entities",
"(",
"self",
",",
"chunks",
",",
"entities",
")",
":",
"for",
"entity",
"in",
"entities",
":",
"chunks_to_concat",
"=",
"chunks",
".",
"get_overlaps",
"(",
"entity",
"[",
"'beginOffset'",
"]",
",",
"len",
"(",
"entity",
"[",
"'content'",
"]",
")",
")",
"if",
"not",
"chunks_to_concat",
":",
"continue",
"new_chunk_word",
"=",
"u''",
".",
"join",
"(",
"[",
"chunk",
".",
"word",
"for",
"chunk",
"in",
"chunks_to_concat",
"]",
")",
"new_chunk",
"=",
"Chunk",
"(",
"new_chunk_word",
")",
"chunks",
".",
"swap",
"(",
"chunks_to_concat",
",",
"new_chunk",
")",
"return",
"chunks"
] |
Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
|
[
"Groups",
"chunks",
"by",
"entities",
"retrieved",
"from",
"NL",
"API",
"Entity",
"Analysis",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L203-L221
|
13,250
|
google/budou
|
budou/nlapisegmenter.py
|
NLAPISegmenter._get_annotations
|
def _get_annotations(self, text, language=''):
"""Returns the list of annotations retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
Results in a dictionary. :code:`tokens` contains the list of annotations
and :code:`language` contains the inferred language from the input.
"""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().annotateText(body=body)
response = request.execute()
tokens = response.get('tokens', [])
language = response.get('language')
return {'tokens': tokens, 'language': language}
|
python
|
def _get_annotations(self, text, language=''):
"""Returns the list of annotations retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
Results in a dictionary. :code:`tokens` contains the list of annotations
and :code:`language` contains the inferred language from the input.
"""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().annotateText(body=body)
response = request.execute()
tokens = response.get('tokens', [])
language = response.get('language')
return {'tokens': tokens, 'language': language}
|
[
"def",
"_get_annotations",
"(",
"self",
",",
"text",
",",
"language",
"=",
"''",
")",
":",
"body",
"=",
"{",
"'document'",
":",
"{",
"'type'",
":",
"'PLAIN_TEXT'",
",",
"'content'",
":",
"text",
",",
"}",
",",
"'features'",
":",
"{",
"'extract_syntax'",
":",
"True",
",",
"}",
",",
"'encodingType'",
":",
"'UTF32'",
",",
"}",
"if",
"language",
":",
"body",
"[",
"'document'",
"]",
"[",
"'language'",
"]",
"=",
"language",
"request",
"=",
"self",
".",
"service",
".",
"documents",
"(",
")",
".",
"annotateText",
"(",
"body",
"=",
"body",
")",
"response",
"=",
"request",
".",
"execute",
"(",
")",
"tokens",
"=",
"response",
".",
"get",
"(",
"'tokens'",
",",
"[",
"]",
")",
"language",
"=",
"response",
".",
"get",
"(",
"'language'",
")",
"return",
"{",
"'tokens'",
":",
"tokens",
",",
"'language'",
":",
"language",
"}"
] |
Returns the list of annotations retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
Results in a dictionary. :code:`tokens` contains the list of annotations
and :code:`language` contains the inferred language from the input.
|
[
"Returns",
"the",
"list",
"of",
"annotations",
"retrieved",
"from",
"the",
"given",
"text",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L224-L253
|
13,251
|
google/budou
|
budou/nlapisegmenter.py
|
NLAPISegmenter._get_entities
|
def _get_entities(self, text, language=''):
"""Returns the list of entities retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
List of entities.
"""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().analyzeEntities(body=body)
response = request.execute()
result = []
for entity in response.get('entities', []):
mentions = entity.get('mentions', [])
if not mentions:
continue
entity_text = mentions[0]['text']
offset = entity_text['beginOffset']
for word in entity_text['content'].split():
result.append({'content': word, 'beginOffset': offset})
offset += len(word)
return result
|
python
|
def _get_entities(self, text, language=''):
"""Returns the list of entities retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
List of entities.
"""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'encodingType': 'UTF32',
}
if language:
body['document']['language'] = language
request = self.service.documents().analyzeEntities(body=body)
response = request.execute()
result = []
for entity in response.get('entities', []):
mentions = entity.get('mentions', [])
if not mentions:
continue
entity_text = mentions[0]['text']
offset = entity_text['beginOffset']
for word in entity_text['content'].split():
result.append({'content': word, 'beginOffset': offset})
offset += len(word)
return result
|
[
"def",
"_get_entities",
"(",
"self",
",",
"text",
",",
"language",
"=",
"''",
")",
":",
"body",
"=",
"{",
"'document'",
":",
"{",
"'type'",
":",
"'PLAIN_TEXT'",
",",
"'content'",
":",
"text",
",",
"}",
",",
"'encodingType'",
":",
"'UTF32'",
",",
"}",
"if",
"language",
":",
"body",
"[",
"'document'",
"]",
"[",
"'language'",
"]",
"=",
"language",
"request",
"=",
"self",
".",
"service",
".",
"documents",
"(",
")",
".",
"analyzeEntities",
"(",
"body",
"=",
"body",
")",
"response",
"=",
"request",
".",
"execute",
"(",
")",
"result",
"=",
"[",
"]",
"for",
"entity",
"in",
"response",
".",
"get",
"(",
"'entities'",
",",
"[",
"]",
")",
":",
"mentions",
"=",
"entity",
".",
"get",
"(",
"'mentions'",
",",
"[",
"]",
")",
"if",
"not",
"mentions",
":",
"continue",
"entity_text",
"=",
"mentions",
"[",
"0",
"]",
"[",
"'text'",
"]",
"offset",
"=",
"entity_text",
"[",
"'beginOffset'",
"]",
"for",
"word",
"in",
"entity_text",
"[",
"'content'",
"]",
".",
"split",
"(",
")",
":",
"result",
".",
"append",
"(",
"{",
"'content'",
":",
"word",
",",
"'beginOffset'",
":",
"offset",
"}",
")",
"offset",
"+=",
"len",
"(",
"word",
")",
"return",
"result"
] |
Returns the list of entities retrieved from the given text.
Args:
text (str): Input text.
language (:obj:`str`, optional): Language code.
Returns:
List of entities.
|
[
"Returns",
"the",
"list",
"of",
"entities",
"retrieved",
"from",
"the",
"given",
"text",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L256-L288
|
13,252
|
google/budou
|
budou/cachefactory.py
|
PickleCache.get
|
def get(self, key):
"""Gets a value by a key.
Args:
key (str): Key to retrieve the value.
Returns: Retrieved value.
"""
self._create_file_if_none_exists()
with open(self.filename, 'rb') as file_object:
cache_pickle = pickle.load(file_object)
val = cache_pickle.get(key, None)
return val
|
python
|
def get(self, key):
"""Gets a value by a key.
Args:
key (str): Key to retrieve the value.
Returns: Retrieved value.
"""
self._create_file_if_none_exists()
with open(self.filename, 'rb') as file_object:
cache_pickle = pickle.load(file_object)
val = cache_pickle.get(key, None)
return val
|
[
"def",
"get",
"(",
"self",
",",
"key",
")",
":",
"self",
".",
"_create_file_if_none_exists",
"(",
")",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"'rb'",
")",
"as",
"file_object",
":",
"cache_pickle",
"=",
"pickle",
".",
"load",
"(",
"file_object",
")",
"val",
"=",
"cache_pickle",
".",
"get",
"(",
"key",
",",
"None",
")",
"return",
"val"
] |
Gets a value by a key.
Args:
key (str): Key to retrieve the value.
Returns: Retrieved value.
|
[
"Gets",
"a",
"value",
"by",
"a",
"key",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/cachefactory.py#L93-L105
|
13,253
|
google/budou
|
budou/cachefactory.py
|
PickleCache.set
|
def set(self, key, val):
"""Sets a value in a key.
Args:
key (str): Key for the value.
val: Value to set.
Returns:
Retrieved value.
"""
self._create_file_if_none_exists()
with open(self.filename, 'r+b') as file_object:
cache_pickle = pickle.load(file_object)
cache_pickle[key] = val
file_object.seek(0)
pickle.dump(cache_pickle, file_object)
|
python
|
def set(self, key, val):
"""Sets a value in a key.
Args:
key (str): Key for the value.
val: Value to set.
Returns:
Retrieved value.
"""
self._create_file_if_none_exists()
with open(self.filename, 'r+b') as file_object:
cache_pickle = pickle.load(file_object)
cache_pickle[key] = val
file_object.seek(0)
pickle.dump(cache_pickle, file_object)
|
[
"def",
"set",
"(",
"self",
",",
"key",
",",
"val",
")",
":",
"self",
".",
"_create_file_if_none_exists",
"(",
")",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"'r+b'",
")",
"as",
"file_object",
":",
"cache_pickle",
"=",
"pickle",
".",
"load",
"(",
"file_object",
")",
"cache_pickle",
"[",
"key",
"]",
"=",
"val",
"file_object",
".",
"seek",
"(",
"0",
")",
"pickle",
".",
"dump",
"(",
"cache_pickle",
",",
"file_object",
")"
] |
Sets a value in a key.
Args:
key (str): Key for the value.
val: Value to set.
Returns:
Retrieved value.
|
[
"Sets",
"a",
"value",
"in",
"a",
"key",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/cachefactory.py#L107-L122
|
13,254
|
google/budou
|
budou/chunk.py
|
Chunk.serialize
|
def serialize(self):
"""Returns serialized chunk data in dictionary."""
return {
'word': self.word,
'pos': self.pos,
'label': self.label,
'dependency': self.dependency,
'has_cjk': self.has_cjk(),
}
|
python
|
def serialize(self):
"""Returns serialized chunk data in dictionary."""
return {
'word': self.word,
'pos': self.pos,
'label': self.label,
'dependency': self.dependency,
'has_cjk': self.has_cjk(),
}
|
[
"def",
"serialize",
"(",
"self",
")",
":",
"return",
"{",
"'word'",
":",
"self",
".",
"word",
",",
"'pos'",
":",
"self",
".",
"pos",
",",
"'label'",
":",
"self",
".",
"label",
",",
"'dependency'",
":",
"self",
".",
"dependency",
",",
"'has_cjk'",
":",
"self",
".",
"has_cjk",
"(",
")",
",",
"}"
] |
Returns serialized chunk data in dictionary.
|
[
"Returns",
"serialized",
"chunk",
"data",
"in",
"dictionary",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L79-L87
|
13,255
|
google/budou
|
budou/chunk.py
|
Chunk.has_cjk
|
def has_cjk(self):
"""Checks if the word of the chunk contains CJK characters.
This is using unicode codepoint ranges from
https://github.com/nltk/nltk/blob/develop/nltk/tokenize/util.py#L149
Returns:
bool: True if the chunk has any CJK character.
"""
cjk_codepoint_ranges = [
(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215),
(63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)]
for char in self.word:
if any([start <= ord(char) <= end
for start, end in cjk_codepoint_ranges]):
return True
return False
|
python
|
def has_cjk(self):
"""Checks if the word of the chunk contains CJK characters.
This is using unicode codepoint ranges from
https://github.com/nltk/nltk/blob/develop/nltk/tokenize/util.py#L149
Returns:
bool: True if the chunk has any CJK character.
"""
cjk_codepoint_ranges = [
(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215),
(63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)]
for char in self.word:
if any([start <= ord(char) <= end
for start, end in cjk_codepoint_ranges]):
return True
return False
|
[
"def",
"has_cjk",
"(",
"self",
")",
":",
"cjk_codepoint_ranges",
"=",
"[",
"(",
"4352",
",",
"4607",
")",
",",
"(",
"11904",
",",
"42191",
")",
",",
"(",
"43072",
",",
"43135",
")",
",",
"(",
"44032",
",",
"55215",
")",
",",
"(",
"63744",
",",
"64255",
")",
",",
"(",
"65072",
",",
"65103",
")",
",",
"(",
"65381",
",",
"65500",
")",
",",
"(",
"131072",
",",
"196607",
")",
"]",
"for",
"char",
"in",
"self",
".",
"word",
":",
"if",
"any",
"(",
"[",
"start",
"<=",
"ord",
"(",
"char",
")",
"<=",
"end",
"for",
"start",
",",
"end",
"in",
"cjk_codepoint_ranges",
"]",
")",
":",
"return",
"True",
"return",
"False"
] |
Checks if the word of the chunk contains CJK characters.
This is using unicode codepoint ranges from
https://github.com/nltk/nltk/blob/develop/nltk/tokenize/util.py#L149
Returns:
bool: True if the chunk has any CJK character.
|
[
"Checks",
"if",
"the",
"word",
"of",
"the",
"chunk",
"contains",
"CJK",
"characters",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L119-L135
|
13,256
|
google/budou
|
budou/chunk.py
|
ChunkList.get_overlaps
|
def get_overlaps(self, offset, length):
"""Returns chunks overlapped with the given range.
Args:
offset (int): Begin offset of the range.
length (int): Length of the range.
Returns:
Overlapped chunks. (:obj:`budou.chunk.ChunkList`)
"""
# In case entity's offset points to a space just before the entity.
if ''.join([chunk.word for chunk in self])[offset] == ' ':
offset += 1
index = 0
result = ChunkList()
for chunk in self:
if offset < index + len(chunk.word) and index < offset + length:
result.append(chunk)
index += len(chunk.word)
return result
|
python
|
def get_overlaps(self, offset, length):
"""Returns chunks overlapped with the given range.
Args:
offset (int): Begin offset of the range.
length (int): Length of the range.
Returns:
Overlapped chunks. (:obj:`budou.chunk.ChunkList`)
"""
# In case entity's offset points to a space just before the entity.
if ''.join([chunk.word for chunk in self])[offset] == ' ':
offset += 1
index = 0
result = ChunkList()
for chunk in self:
if offset < index + len(chunk.word) and index < offset + length:
result.append(chunk)
index += len(chunk.word)
return result
|
[
"def",
"get_overlaps",
"(",
"self",
",",
"offset",
",",
"length",
")",
":",
"# In case entity's offset points to a space just before the entity.",
"if",
"''",
".",
"join",
"(",
"[",
"chunk",
".",
"word",
"for",
"chunk",
"in",
"self",
"]",
")",
"[",
"offset",
"]",
"==",
"' '",
":",
"offset",
"+=",
"1",
"index",
"=",
"0",
"result",
"=",
"ChunkList",
"(",
")",
"for",
"chunk",
"in",
"self",
":",
"if",
"offset",
"<",
"index",
"+",
"len",
"(",
"chunk",
".",
"word",
")",
"and",
"index",
"<",
"offset",
"+",
"length",
":",
"result",
".",
"append",
"(",
"chunk",
")",
"index",
"+=",
"len",
"(",
"chunk",
".",
"word",
")",
"return",
"result"
] |
Returns chunks overlapped with the given range.
Args:
offset (int): Begin offset of the range.
length (int): Length of the range.
Returns:
Overlapped chunks. (:obj:`budou.chunk.ChunkList`)
|
[
"Returns",
"chunks",
"overlapped",
"with",
"the",
"given",
"range",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L189-L208
|
13,257
|
google/budou
|
budou/chunk.py
|
ChunkList.swap
|
def swap(self, old_chunks, new_chunk):
"""Swaps old consecutive chunks with new chunk.
Args:
old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to
be removed.
new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted.
"""
indexes = [self.index(chunk) for chunk in old_chunks]
del self[indexes[0]:indexes[-1] + 1]
self.insert(indexes[0], new_chunk)
|
python
|
def swap(self, old_chunks, new_chunk):
"""Swaps old consecutive chunks with new chunk.
Args:
old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to
be removed.
new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted.
"""
indexes = [self.index(chunk) for chunk in old_chunks]
del self[indexes[0]:indexes[-1] + 1]
self.insert(indexes[0], new_chunk)
|
[
"def",
"swap",
"(",
"self",
",",
"old_chunks",
",",
"new_chunk",
")",
":",
"indexes",
"=",
"[",
"self",
".",
"index",
"(",
"chunk",
")",
"for",
"chunk",
"in",
"old_chunks",
"]",
"del",
"self",
"[",
"indexes",
"[",
"0",
"]",
":",
"indexes",
"[",
"-",
"1",
"]",
"+",
"1",
"]",
"self",
".",
"insert",
"(",
"indexes",
"[",
"0",
"]",
",",
"new_chunk",
")"
] |
Swaps old consecutive chunks with new chunk.
Args:
old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to
be removed.
new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted.
|
[
"Swaps",
"old",
"consecutive",
"chunks",
"with",
"new",
"chunk",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L210-L220
|
13,258
|
google/budou
|
budou/chunk.py
|
ChunkList.resolve_dependencies
|
def resolve_dependencies(self):
"""Resolves chunk dependency by concatenating them.
"""
self._concatenate_inner(True)
self._concatenate_inner(False)
self._insert_breaklines()
|
python
|
def resolve_dependencies(self):
"""Resolves chunk dependency by concatenating them.
"""
self._concatenate_inner(True)
self._concatenate_inner(False)
self._insert_breaklines()
|
[
"def",
"resolve_dependencies",
"(",
"self",
")",
":",
"self",
".",
"_concatenate_inner",
"(",
"True",
")",
"self",
".",
"_concatenate_inner",
"(",
"False",
")",
"self",
".",
"_insert_breaklines",
"(",
")"
] |
Resolves chunk dependency by concatenating them.
|
[
"Resolves",
"chunk",
"dependency",
"by",
"concatenating",
"them",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L222-L227
|
13,259
|
google/budou
|
budou/chunk.py
|
ChunkList._concatenate_inner
|
def _concatenate_inner(self, direction):
"""Concatenates chunks based on each chunk's dependency.
Args:
direction (bool): Direction of concatenation process. True for forward.
"""
tmp_bucket = []
source_chunks = self if direction else self[::-1]
target_chunks = ChunkList()
for chunk in source_chunks:
if (
# if the chunk has matched dependency, do concatenation.
chunk.dependency == direction or
# if the chunk is SPACE, concatenate to the previous chunk.
(direction is False and chunk.is_space())
):
tmp_bucket.append(chunk)
continue
tmp_bucket.append(chunk)
if not direction:
tmp_bucket = tmp_bucket[::-1]
new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])
new_chunk = Chunk(new_word, pos=chunk.pos, label=chunk.label,
dependency=chunk.dependency)
target_chunks.append(new_chunk)
tmp_bucket = ChunkList()
if tmp_bucket:
target_chunks += tmp_bucket
if not direction:
target_chunks = target_chunks[::-1]
self.list = target_chunks
|
python
|
def _concatenate_inner(self, direction):
"""Concatenates chunks based on each chunk's dependency.
Args:
direction (bool): Direction of concatenation process. True for forward.
"""
tmp_bucket = []
source_chunks = self if direction else self[::-1]
target_chunks = ChunkList()
for chunk in source_chunks:
if (
# if the chunk has matched dependency, do concatenation.
chunk.dependency == direction or
# if the chunk is SPACE, concatenate to the previous chunk.
(direction is False and chunk.is_space())
):
tmp_bucket.append(chunk)
continue
tmp_bucket.append(chunk)
if not direction:
tmp_bucket = tmp_bucket[::-1]
new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])
new_chunk = Chunk(new_word, pos=chunk.pos, label=chunk.label,
dependency=chunk.dependency)
target_chunks.append(new_chunk)
tmp_bucket = ChunkList()
if tmp_bucket:
target_chunks += tmp_bucket
if not direction:
target_chunks = target_chunks[::-1]
self.list = target_chunks
|
[
"def",
"_concatenate_inner",
"(",
"self",
",",
"direction",
")",
":",
"tmp_bucket",
"=",
"[",
"]",
"source_chunks",
"=",
"self",
"if",
"direction",
"else",
"self",
"[",
":",
":",
"-",
"1",
"]",
"target_chunks",
"=",
"ChunkList",
"(",
")",
"for",
"chunk",
"in",
"source_chunks",
":",
"if",
"(",
"# if the chunk has matched dependency, do concatenation.",
"chunk",
".",
"dependency",
"==",
"direction",
"or",
"# if the chunk is SPACE, concatenate to the previous chunk.",
"(",
"direction",
"is",
"False",
"and",
"chunk",
".",
"is_space",
"(",
")",
")",
")",
":",
"tmp_bucket",
".",
"append",
"(",
"chunk",
")",
"continue",
"tmp_bucket",
".",
"append",
"(",
"chunk",
")",
"if",
"not",
"direction",
":",
"tmp_bucket",
"=",
"tmp_bucket",
"[",
":",
":",
"-",
"1",
"]",
"new_word",
"=",
"''",
".",
"join",
"(",
"[",
"tmp_chunk",
".",
"word",
"for",
"tmp_chunk",
"in",
"tmp_bucket",
"]",
")",
"new_chunk",
"=",
"Chunk",
"(",
"new_word",
",",
"pos",
"=",
"chunk",
".",
"pos",
",",
"label",
"=",
"chunk",
".",
"label",
",",
"dependency",
"=",
"chunk",
".",
"dependency",
")",
"target_chunks",
".",
"append",
"(",
"new_chunk",
")",
"tmp_bucket",
"=",
"ChunkList",
"(",
")",
"if",
"tmp_bucket",
":",
"target_chunks",
"+=",
"tmp_bucket",
"if",
"not",
"direction",
":",
"target_chunks",
"=",
"target_chunks",
"[",
":",
":",
"-",
"1",
"]",
"self",
".",
"list",
"=",
"target_chunks"
] |
Concatenates chunks based on each chunk's dependency.
Args:
direction (bool): Direction of concatenation process. True for forward.
|
[
"Concatenates",
"chunks",
"based",
"on",
"each",
"chunk",
"s",
"dependency",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L229-L259
|
13,260
|
google/budou
|
budou/chunk.py
|
ChunkList._insert_breaklines
|
def _insert_breaklines(self):
"""Inserts a breakline instead of a trailing space if the chunk is in CJK.
"""
target_chunks = ChunkList()
for chunk in self:
if chunk.word[-1] == ' ' and chunk.has_cjk():
chunk.word = chunk.word[:-1]
target_chunks.append(chunk)
target_chunks.append(chunk.breakline())
else:
target_chunks.append(chunk)
self.list = target_chunks
|
python
|
def _insert_breaklines(self):
"""Inserts a breakline instead of a trailing space if the chunk is in CJK.
"""
target_chunks = ChunkList()
for chunk in self:
if chunk.word[-1] == ' ' and chunk.has_cjk():
chunk.word = chunk.word[:-1]
target_chunks.append(chunk)
target_chunks.append(chunk.breakline())
else:
target_chunks.append(chunk)
self.list = target_chunks
|
[
"def",
"_insert_breaklines",
"(",
"self",
")",
":",
"target_chunks",
"=",
"ChunkList",
"(",
")",
"for",
"chunk",
"in",
"self",
":",
"if",
"chunk",
".",
"word",
"[",
"-",
"1",
"]",
"==",
"' '",
"and",
"chunk",
".",
"has_cjk",
"(",
")",
":",
"chunk",
".",
"word",
"=",
"chunk",
".",
"word",
"[",
":",
"-",
"1",
"]",
"target_chunks",
".",
"append",
"(",
"chunk",
")",
"target_chunks",
".",
"append",
"(",
"chunk",
".",
"breakline",
"(",
")",
")",
"else",
":",
"target_chunks",
".",
"append",
"(",
"chunk",
")",
"self",
".",
"list",
"=",
"target_chunks"
] |
Inserts a breakline instead of a trailing space if the chunk is in CJK.
|
[
"Inserts",
"a",
"breakline",
"instead",
"of",
"a",
"trailing",
"space",
"if",
"the",
"chunk",
"is",
"in",
"CJK",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L261-L272
|
13,261
|
google/budou
|
budou/chunk.py
|
ChunkList.html_serialize
|
def html_serialize(self, attributes, max_length=None):
"""Returns concatenated HTML code with SPAN tag.
Args:
attributes (dict): A map of name-value pairs for attributes of output
SPAN tags.
max_length (:obj:`int`, optional): Maximum length of span enclosed chunk.
Returns:
The organized HTML code. (str)
"""
doc = ET.Element('span')
for chunk in self:
if (chunk.has_cjk() and
not (max_length and len(chunk.word) > max_length)):
ele = ET.Element('span')
ele.text = chunk.word
for key, val in attributes.items():
ele.attrib[key] = val
doc.append(ele)
else:
# add word without span tag for non-CJK text (e.g. English)
# by appending it after the last element
if doc.getchildren():
if doc.getchildren()[-1].tail is None:
doc.getchildren()[-1].tail = chunk.word
else:
doc.getchildren()[-1].tail += chunk.word
else:
if doc.text is None:
doc.text = chunk.word
else:
doc.text += chunk.word
result = ET.tostring(doc, encoding='utf-8').decode('utf-8')
result = html5lib.serialize(
html5lib.parseFragment(result), sanitize=True,
quote_attr_values='always')
return result
|
python
|
def html_serialize(self, attributes, max_length=None):
"""Returns concatenated HTML code with SPAN tag.
Args:
attributes (dict): A map of name-value pairs for attributes of output
SPAN tags.
max_length (:obj:`int`, optional): Maximum length of span enclosed chunk.
Returns:
The organized HTML code. (str)
"""
doc = ET.Element('span')
for chunk in self:
if (chunk.has_cjk() and
not (max_length and len(chunk.word) > max_length)):
ele = ET.Element('span')
ele.text = chunk.word
for key, val in attributes.items():
ele.attrib[key] = val
doc.append(ele)
else:
# add word without span tag for non-CJK text (e.g. English)
# by appending it after the last element
if doc.getchildren():
if doc.getchildren()[-1].tail is None:
doc.getchildren()[-1].tail = chunk.word
else:
doc.getchildren()[-1].tail += chunk.word
else:
if doc.text is None:
doc.text = chunk.word
else:
doc.text += chunk.word
result = ET.tostring(doc, encoding='utf-8').decode('utf-8')
result = html5lib.serialize(
html5lib.parseFragment(result), sanitize=True,
quote_attr_values='always')
return result
|
[
"def",
"html_serialize",
"(",
"self",
",",
"attributes",
",",
"max_length",
"=",
"None",
")",
":",
"doc",
"=",
"ET",
".",
"Element",
"(",
"'span'",
")",
"for",
"chunk",
"in",
"self",
":",
"if",
"(",
"chunk",
".",
"has_cjk",
"(",
")",
"and",
"not",
"(",
"max_length",
"and",
"len",
"(",
"chunk",
".",
"word",
")",
">",
"max_length",
")",
")",
":",
"ele",
"=",
"ET",
".",
"Element",
"(",
"'span'",
")",
"ele",
".",
"text",
"=",
"chunk",
".",
"word",
"for",
"key",
",",
"val",
"in",
"attributes",
".",
"items",
"(",
")",
":",
"ele",
".",
"attrib",
"[",
"key",
"]",
"=",
"val",
"doc",
".",
"append",
"(",
"ele",
")",
"else",
":",
"# add word without span tag for non-CJK text (e.g. English)",
"# by appending it after the last element",
"if",
"doc",
".",
"getchildren",
"(",
")",
":",
"if",
"doc",
".",
"getchildren",
"(",
")",
"[",
"-",
"1",
"]",
".",
"tail",
"is",
"None",
":",
"doc",
".",
"getchildren",
"(",
")",
"[",
"-",
"1",
"]",
".",
"tail",
"=",
"chunk",
".",
"word",
"else",
":",
"doc",
".",
"getchildren",
"(",
")",
"[",
"-",
"1",
"]",
".",
"tail",
"+=",
"chunk",
".",
"word",
"else",
":",
"if",
"doc",
".",
"text",
"is",
"None",
":",
"doc",
".",
"text",
"=",
"chunk",
".",
"word",
"else",
":",
"doc",
".",
"text",
"+=",
"chunk",
".",
"word",
"result",
"=",
"ET",
".",
"tostring",
"(",
"doc",
",",
"encoding",
"=",
"'utf-8'",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"result",
"=",
"html5lib",
".",
"serialize",
"(",
"html5lib",
".",
"parseFragment",
"(",
"result",
")",
",",
"sanitize",
"=",
"True",
",",
"quote_attr_values",
"=",
"'always'",
")",
"return",
"result"
] |
Returns concatenated HTML code with SPAN tag.
Args:
attributes (dict): A map of name-value pairs for attributes of output
SPAN tags.
max_length (:obj:`int`, optional): Maximum length of span enclosed chunk.
Returns:
The organized HTML code. (str)
|
[
"Returns",
"concatenated",
"HTML",
"code",
"with",
"SPAN",
"tag",
"."
] |
101224e6523186851f38ee57a6b2e7bdbd826de2
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L274-L311
|
13,262
|
c-w/gutenberg
|
gutenberg/acquire/text.py
|
_etextno_to_uri_subdirectory
|
def _etextno_to_uri_subdirectory(etextno):
"""Returns the subdirectory that an etextno will be found in a gutenberg
mirror. Generally, one finds the subdirectory by separating out each digit
of the etext number, and uses it for a directory. The exception here is for
etext numbers less than 10, which are prepended with a 0 for the directory
traversal.
>>> _etextno_to_uri_subdirectory(1)
'0/1'
>>> _etextno_to_uri_subdirectory(19)
'1/19'
>>> _etextno_to_uri_subdirectory(15453)
'1/5/4/5/15453'
"""
str_etextno = str(etextno).zfill(2)
all_but_last_digit = list(str_etextno[:-1])
subdir_part = "/".join(all_but_last_digit)
subdir = "{}/{}".format(subdir_part, etextno) # etextno not zfilled
return subdir
|
python
|
def _etextno_to_uri_subdirectory(etextno):
"""Returns the subdirectory that an etextno will be found in a gutenberg
mirror. Generally, one finds the subdirectory by separating out each digit
of the etext number, and uses it for a directory. The exception here is for
etext numbers less than 10, which are prepended with a 0 for the directory
traversal.
>>> _etextno_to_uri_subdirectory(1)
'0/1'
>>> _etextno_to_uri_subdirectory(19)
'1/19'
>>> _etextno_to_uri_subdirectory(15453)
'1/5/4/5/15453'
"""
str_etextno = str(etextno).zfill(2)
all_but_last_digit = list(str_etextno[:-1])
subdir_part = "/".join(all_but_last_digit)
subdir = "{}/{}".format(subdir_part, etextno) # etextno not zfilled
return subdir
|
[
"def",
"_etextno_to_uri_subdirectory",
"(",
"etextno",
")",
":",
"str_etextno",
"=",
"str",
"(",
"etextno",
")",
".",
"zfill",
"(",
"2",
")",
"all_but_last_digit",
"=",
"list",
"(",
"str_etextno",
"[",
":",
"-",
"1",
"]",
")",
"subdir_part",
"=",
"\"/\"",
".",
"join",
"(",
"all_but_last_digit",
")",
"subdir",
"=",
"\"{}/{}\"",
".",
"format",
"(",
"subdir_part",
",",
"etextno",
")",
"# etextno not zfilled",
"return",
"subdir"
] |
Returns the subdirectory that an etextno will be found in a gutenberg
mirror. Generally, one finds the subdirectory by separating out each digit
of the etext number, and uses it for a directory. The exception here is for
etext numbers less than 10, which are prepended with a 0 for the directory
traversal.
>>> _etextno_to_uri_subdirectory(1)
'0/1'
>>> _etextno_to_uri_subdirectory(19)
'1/19'
>>> _etextno_to_uri_subdirectory(15453)
'1/5/4/5/15453'
|
[
"Returns",
"the",
"subdirectory",
"that",
"an",
"etextno",
"will",
"be",
"found",
"in",
"a",
"gutenberg",
"mirror",
".",
"Generally",
"one",
"finds",
"the",
"subdirectory",
"by",
"separating",
"out",
"each",
"digit",
"of",
"the",
"etext",
"number",
"and",
"uses",
"it",
"for",
"a",
"directory",
".",
"The",
"exception",
"here",
"is",
"for",
"etext",
"numbers",
"less",
"than",
"10",
"which",
"are",
"prepended",
"with",
"a",
"0",
"for",
"the",
"directory",
"traversal",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/text.py#L30-L48
|
13,263
|
c-w/gutenberg
|
gutenberg/acquire/text.py
|
_format_download_uri_for_extension
|
def _format_download_uri_for_extension(etextno, extension, mirror=None):
"""Returns the download location on the Project Gutenberg servers for a
given text and extension. The list of available extensions for a given
text can be found via the formaturi metadata extractor.
"""
mirror = mirror or _GUTENBERG_MIRROR
root = mirror.strip().rstrip('/')
path = _etextno_to_uri_subdirectory(etextno)
uri = '{root}/{path}/{etextno}{extension}'.format(
root=root,
path=path,
etextno=etextno,
extension=extension)
return uri
|
python
|
def _format_download_uri_for_extension(etextno, extension, mirror=None):
"""Returns the download location on the Project Gutenberg servers for a
given text and extension. The list of available extensions for a given
text can be found via the formaturi metadata extractor.
"""
mirror = mirror or _GUTENBERG_MIRROR
root = mirror.strip().rstrip('/')
path = _etextno_to_uri_subdirectory(etextno)
uri = '{root}/{path}/{etextno}{extension}'.format(
root=root,
path=path,
etextno=etextno,
extension=extension)
return uri
|
[
"def",
"_format_download_uri_for_extension",
"(",
"etextno",
",",
"extension",
",",
"mirror",
"=",
"None",
")",
":",
"mirror",
"=",
"mirror",
"or",
"_GUTENBERG_MIRROR",
"root",
"=",
"mirror",
".",
"strip",
"(",
")",
".",
"rstrip",
"(",
"'/'",
")",
"path",
"=",
"_etextno_to_uri_subdirectory",
"(",
"etextno",
")",
"uri",
"=",
"'{root}/{path}/{etextno}{extension}'",
".",
"format",
"(",
"root",
"=",
"root",
",",
"path",
"=",
"path",
",",
"etextno",
"=",
"etextno",
",",
"extension",
"=",
"extension",
")",
"return",
"uri"
] |
Returns the download location on the Project Gutenberg servers for a
given text and extension. The list of available extensions for a given
text can be found via the formaturi metadata extractor.
|
[
"Returns",
"the",
"download",
"location",
"on",
"the",
"Project",
"Gutenberg",
"servers",
"for",
"a",
"given",
"text",
"and",
"extension",
".",
"The",
"list",
"of",
"available",
"extensions",
"for",
"a",
"given",
"text",
"can",
"be",
"found",
"via",
"the",
"formaturi",
"metadata",
"extractor",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/text.py#L64-L80
|
13,264
|
c-w/gutenberg
|
gutenberg/acquire/text.py
|
_format_download_uri
|
def _format_download_uri(etextno, mirror=None, prefer_ascii=False):
"""Returns the download location on the Project Gutenberg servers for a
given text.
Use prefer_ascii to control whether you want to fetch plaintext us-ascii
file first (default old behavior) or if you prefer UTF-8 then 8-bits then
plaintext.
Raises:
UnknownDownloadUri: If no download location can be found for the text.
"""
mirror = mirror or _GUTENBERG_MIRROR
if not _does_mirror_exist(mirror):
raise UnknownDownloadUriException(
'Could not reach Gutenberg mirror "{:s}". Try setting a '
'different mirror (https://www.gutenberg.org/MIRRORS.ALL) for '
'--mirror flag or GUTENBERG_MIRROR environment variable.'
.format(mirror))
# Check https://www.gutenberg.org/files/ for details about available
# extensions ;
# - .txt is plaintext us-ascii
# - -8.txt is 8-bit plaintext, multiple encodings
# - -0.txt is UTF-8
ascii_first = ('.txt', '-0.txt', '-8.txt')
utf8_first = ('-0.txt', '-8.txt', '.txt')
extensions = ascii_first if prefer_ascii else utf8_first
for extension in extensions:
uri = _format_download_uri_for_extension(etextno, extension, mirror)
if _does_uri_exist(uri):
return uri
raise UnknownDownloadUriException(
'Failed to find a textual download candidate for {} on {}. '
'Either the book does not exist or it is only available in '
'non-textual formats.'
.format(etextno, mirror))
|
python
|
def _format_download_uri(etextno, mirror=None, prefer_ascii=False):
"""Returns the download location on the Project Gutenberg servers for a
given text.
Use prefer_ascii to control whether you want to fetch plaintext us-ascii
file first (default old behavior) or if you prefer UTF-8 then 8-bits then
plaintext.
Raises:
UnknownDownloadUri: If no download location can be found for the text.
"""
mirror = mirror or _GUTENBERG_MIRROR
if not _does_mirror_exist(mirror):
raise UnknownDownloadUriException(
'Could not reach Gutenberg mirror "{:s}". Try setting a '
'different mirror (https://www.gutenberg.org/MIRRORS.ALL) for '
'--mirror flag or GUTENBERG_MIRROR environment variable.'
.format(mirror))
# Check https://www.gutenberg.org/files/ for details about available
# extensions ;
# - .txt is plaintext us-ascii
# - -8.txt is 8-bit plaintext, multiple encodings
# - -0.txt is UTF-8
ascii_first = ('.txt', '-0.txt', '-8.txt')
utf8_first = ('-0.txt', '-8.txt', '.txt')
extensions = ascii_first if prefer_ascii else utf8_first
for extension in extensions:
uri = _format_download_uri_for_extension(etextno, extension, mirror)
if _does_uri_exist(uri):
return uri
raise UnknownDownloadUriException(
'Failed to find a textual download candidate for {} on {}. '
'Either the book does not exist or it is only available in '
'non-textual formats.'
.format(etextno, mirror))
|
[
"def",
"_format_download_uri",
"(",
"etextno",
",",
"mirror",
"=",
"None",
",",
"prefer_ascii",
"=",
"False",
")",
":",
"mirror",
"=",
"mirror",
"or",
"_GUTENBERG_MIRROR",
"if",
"not",
"_does_mirror_exist",
"(",
"mirror",
")",
":",
"raise",
"UnknownDownloadUriException",
"(",
"'Could not reach Gutenberg mirror \"{:s}\". Try setting a '",
"'different mirror (https://www.gutenberg.org/MIRRORS.ALL) for '",
"'--mirror flag or GUTENBERG_MIRROR environment variable.'",
".",
"format",
"(",
"mirror",
")",
")",
"# Check https://www.gutenberg.org/files/ for details about available",
"# extensions ;",
"# - .txt is plaintext us-ascii",
"# - -8.txt is 8-bit plaintext, multiple encodings",
"# - -0.txt is UTF-8",
"ascii_first",
"=",
"(",
"'.txt'",
",",
"'-0.txt'",
",",
"'-8.txt'",
")",
"utf8_first",
"=",
"(",
"'-0.txt'",
",",
"'-8.txt'",
",",
"'.txt'",
")",
"extensions",
"=",
"ascii_first",
"if",
"prefer_ascii",
"else",
"utf8_first",
"for",
"extension",
"in",
"extensions",
":",
"uri",
"=",
"_format_download_uri_for_extension",
"(",
"etextno",
",",
"extension",
",",
"mirror",
")",
"if",
"_does_uri_exist",
"(",
"uri",
")",
":",
"return",
"uri",
"raise",
"UnknownDownloadUriException",
"(",
"'Failed to find a textual download candidate for {} on {}. '",
"'Either the book does not exist or it is only available in '",
"'non-textual formats.'",
".",
"format",
"(",
"etextno",
",",
"mirror",
")",
")"
] |
Returns the download location on the Project Gutenberg servers for a
given text.
Use prefer_ascii to control whether you want to fetch plaintext us-ascii
file first (default old behavior) or if you prefer UTF-8 then 8-bits then
plaintext.
Raises:
UnknownDownloadUri: If no download location can be found for the text.
|
[
"Returns",
"the",
"download",
"location",
"on",
"the",
"Project",
"Gutenberg",
"servers",
"for",
"a",
"given",
"text",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/text.py#L83-L119
|
13,265
|
c-w/gutenberg
|
gutenberg/acquire/text.py
|
load_etext
|
def load_etext(etextno, refresh_cache=False, mirror=None, prefer_ascii=False):
"""Returns a unicode representation of the full body of a Project Gutenberg
text. After making an initial remote call to Project Gutenberg's servers,
the text is persisted locally.
"""
etextno = validate_etextno(etextno)
cached = os.path.join(_TEXT_CACHE, '{}.txt.gz'.format(etextno))
if refresh_cache:
remove(cached)
if not os.path.exists(cached):
makedirs(os.path.dirname(cached))
download_uri = _format_download_uri(etextno, mirror, prefer_ascii)
response = requests.get(download_uri)
# Ensure proper UTF-8 saving. There might be instances of ebooks or
# mirrors which advertise a broken encoding, and this will break
# downstream usages. For example, #55517 from aleph.gutenberg.org:
#
# from gutenberg.acquire import load_etext
# print(load_etext(55517, refresh_cache=True)[0:1000])
#
# response.encoding will be 'ISO-8859-1' while the file is UTF-8
if response.encoding != response.apparent_encoding:
response.encoding = response.apparent_encoding
text = response.text
with closing(gzip.open(cached, 'w')) as cache:
cache.write(text.encode('utf-8'))
with closing(gzip.open(cached, 'r')) as cache:
text = cache.read().decode('utf-8')
return text
|
python
|
def load_etext(etextno, refresh_cache=False, mirror=None, prefer_ascii=False):
"""Returns a unicode representation of the full body of a Project Gutenberg
text. After making an initial remote call to Project Gutenberg's servers,
the text is persisted locally.
"""
etextno = validate_etextno(etextno)
cached = os.path.join(_TEXT_CACHE, '{}.txt.gz'.format(etextno))
if refresh_cache:
remove(cached)
if not os.path.exists(cached):
makedirs(os.path.dirname(cached))
download_uri = _format_download_uri(etextno, mirror, prefer_ascii)
response = requests.get(download_uri)
# Ensure proper UTF-8 saving. There might be instances of ebooks or
# mirrors which advertise a broken encoding, and this will break
# downstream usages. For example, #55517 from aleph.gutenberg.org:
#
# from gutenberg.acquire import load_etext
# print(load_etext(55517, refresh_cache=True)[0:1000])
#
# response.encoding will be 'ISO-8859-1' while the file is UTF-8
if response.encoding != response.apparent_encoding:
response.encoding = response.apparent_encoding
text = response.text
with closing(gzip.open(cached, 'w')) as cache:
cache.write(text.encode('utf-8'))
with closing(gzip.open(cached, 'r')) as cache:
text = cache.read().decode('utf-8')
return text
|
[
"def",
"load_etext",
"(",
"etextno",
",",
"refresh_cache",
"=",
"False",
",",
"mirror",
"=",
"None",
",",
"prefer_ascii",
"=",
"False",
")",
":",
"etextno",
"=",
"validate_etextno",
"(",
"etextno",
")",
"cached",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_TEXT_CACHE",
",",
"'{}.txt.gz'",
".",
"format",
"(",
"etextno",
")",
")",
"if",
"refresh_cache",
":",
"remove",
"(",
"cached",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"cached",
")",
":",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"cached",
")",
")",
"download_uri",
"=",
"_format_download_uri",
"(",
"etextno",
",",
"mirror",
",",
"prefer_ascii",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"download_uri",
")",
"# Ensure proper UTF-8 saving. There might be instances of ebooks or",
"# mirrors which advertise a broken encoding, and this will break",
"# downstream usages. For example, #55517 from aleph.gutenberg.org:",
"#",
"# from gutenberg.acquire import load_etext",
"# print(load_etext(55517, refresh_cache=True)[0:1000])",
"#",
"# response.encoding will be 'ISO-8859-1' while the file is UTF-8",
"if",
"response",
".",
"encoding",
"!=",
"response",
".",
"apparent_encoding",
":",
"response",
".",
"encoding",
"=",
"response",
".",
"apparent_encoding",
"text",
"=",
"response",
".",
"text",
"with",
"closing",
"(",
"gzip",
".",
"open",
"(",
"cached",
",",
"'w'",
")",
")",
"as",
"cache",
":",
"cache",
".",
"write",
"(",
"text",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"with",
"closing",
"(",
"gzip",
".",
"open",
"(",
"cached",
",",
"'r'",
")",
")",
"as",
"cache",
":",
"text",
"=",
"cache",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"text"
] |
Returns a unicode representation of the full body of a Project Gutenberg
text. After making an initial remote call to Project Gutenberg's servers,
the text is persisted locally.
|
[
"Returns",
"a",
"unicode",
"representation",
"of",
"the",
"full",
"body",
"of",
"a",
"Project",
"Gutenberg",
"text",
".",
"After",
"making",
"an",
"initial",
"remote",
"call",
"to",
"Project",
"Gutenberg",
"s",
"servers",
"the",
"text",
"is",
"persisted",
"locally",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/text.py#L122-L153
|
13,266
|
c-w/gutenberg
|
gutenberg/_util/logging.py
|
disable_logging
|
def disable_logging(logger=None):
"""Context manager to temporarily suppress all logging for a given logger
or the root logger if no particular logger is specified.
"""
logger = logger or logging.getLogger()
disabled = logger.disabled
logger.disabled = True
yield
logger.disabled = disabled
|
python
|
def disable_logging(logger=None):
"""Context manager to temporarily suppress all logging for a given logger
or the root logger if no particular logger is specified.
"""
logger = logger or logging.getLogger()
disabled = logger.disabled
logger.disabled = True
yield
logger.disabled = disabled
|
[
"def",
"disable_logging",
"(",
"logger",
"=",
"None",
")",
":",
"logger",
"=",
"logger",
"or",
"logging",
".",
"getLogger",
"(",
")",
"disabled",
"=",
"logger",
".",
"disabled",
"logger",
".",
"disabled",
"=",
"True",
"yield",
"logger",
".",
"disabled",
"=",
"disabled"
] |
Context manager to temporarily suppress all logging for a given logger
or the root logger if no particular logger is specified.
|
[
"Context",
"manager",
"to",
"temporarily",
"suppress",
"all",
"logging",
"for",
"a",
"given",
"logger",
"or",
"the",
"root",
"logger",
"if",
"no",
"particular",
"logger",
"is",
"specified",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/_util/logging.py#L11-L20
|
13,267
|
c-w/gutenberg
|
gutenberg/_util/os.py
|
makedirs
|
def makedirs(*args, **kwargs):
"""Wrapper around os.makedirs that doesn't raise an exception if the
directory already exists.
"""
try:
os.makedirs(*args, **kwargs)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
|
python
|
def makedirs(*args, **kwargs):
"""Wrapper around os.makedirs that doesn't raise an exception if the
directory already exists.
"""
try:
os.makedirs(*args, **kwargs)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
|
[
"def",
"makedirs",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"OSError",
"as",
"ex",
":",
"if",
"ex",
".",
"errno",
"!=",
"errno",
".",
"EEXIST",
":",
"raise"
] |
Wrapper around os.makedirs that doesn't raise an exception if the
directory already exists.
|
[
"Wrapper",
"around",
"os",
".",
"makedirs",
"that",
"doesn",
"t",
"raise",
"an",
"exception",
"if",
"the",
"directory",
"already",
"exists",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/_util/os.py#L12-L21
|
13,268
|
c-w/gutenberg
|
gutenberg/_util/os.py
|
remove
|
def remove(path):
"""Wrapper that switches between os.remove and shutil.rmtree depending on
whether the provided path is a file or directory.
"""
if not os.path.exists(path):
return
if os.path.isdir(path):
return shutil.rmtree(path)
if os.path.isfile(path):
return os.remove(path)
|
python
|
def remove(path):
"""Wrapper that switches between os.remove and shutil.rmtree depending on
whether the provided path is a file or directory.
"""
if not os.path.exists(path):
return
if os.path.isdir(path):
return shutil.rmtree(path)
if os.path.isfile(path):
return os.remove(path)
|
[
"def",
"remove",
"(",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"return",
"shutil",
".",
"rmtree",
"(",
"path",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"return",
"os",
".",
"remove",
"(",
"path",
")"
] |
Wrapper that switches between os.remove and shutil.rmtree depending on
whether the provided path is a file or directory.
|
[
"Wrapper",
"that",
"switches",
"between",
"os",
".",
"remove",
"and",
"shutil",
".",
"rmtree",
"depending",
"on",
"whether",
"the",
"provided",
"path",
"is",
"a",
"file",
"or",
"directory",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/_util/os.py#L24-L36
|
13,269
|
c-w/gutenberg
|
gutenberg/_util/os.py
|
determine_encoding
|
def determine_encoding(path, default=None):
"""Determines the encoding of a file based on byte order marks.
Arguments:
path (str): The path to the file.
default (str, optional): The encoding to return if the byte-order-mark
lookup does not return an answer.
Returns:
str: The encoding of the file.
"""
byte_order_marks = (
('utf-8-sig', (codecs.BOM_UTF8, )),
('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)),
('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)),
)
try:
with open(path, 'rb') as infile:
raw = infile.read(4)
except IOError:
return default
for encoding, boms in byte_order_marks:
if any(raw.startswith(bom) for bom in boms):
return encoding
return default
|
python
|
def determine_encoding(path, default=None):
"""Determines the encoding of a file based on byte order marks.
Arguments:
path (str): The path to the file.
default (str, optional): The encoding to return if the byte-order-mark
lookup does not return an answer.
Returns:
str: The encoding of the file.
"""
byte_order_marks = (
('utf-8-sig', (codecs.BOM_UTF8, )),
('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)),
('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)),
)
try:
with open(path, 'rb') as infile:
raw = infile.read(4)
except IOError:
return default
for encoding, boms in byte_order_marks:
if any(raw.startswith(bom) for bom in boms):
return encoding
return default
|
[
"def",
"determine_encoding",
"(",
"path",
",",
"default",
"=",
"None",
")",
":",
"byte_order_marks",
"=",
"(",
"(",
"'utf-8-sig'",
",",
"(",
"codecs",
".",
"BOM_UTF8",
",",
")",
")",
",",
"(",
"'utf-16'",
",",
"(",
"codecs",
".",
"BOM_UTF16_LE",
",",
"codecs",
".",
"BOM_UTF16_BE",
")",
")",
",",
"(",
"'utf-32'",
",",
"(",
"codecs",
".",
"BOM_UTF32_LE",
",",
"codecs",
".",
"BOM_UTF32_BE",
")",
")",
",",
")",
"try",
":",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"infile",
":",
"raw",
"=",
"infile",
".",
"read",
"(",
"4",
")",
"except",
"IOError",
":",
"return",
"default",
"for",
"encoding",
",",
"boms",
"in",
"byte_order_marks",
":",
"if",
"any",
"(",
"raw",
".",
"startswith",
"(",
"bom",
")",
"for",
"bom",
"in",
"boms",
")",
":",
"return",
"encoding",
"return",
"default"
] |
Determines the encoding of a file based on byte order marks.
Arguments:
path (str): The path to the file.
default (str, optional): The encoding to return if the byte-order-mark
lookup does not return an answer.
Returns:
str: The encoding of the file.
|
[
"Determines",
"the",
"encoding",
"of",
"a",
"file",
"based",
"on",
"byte",
"order",
"marks",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/_util/os.py#L39-L67
|
13,270
|
c-w/gutenberg
|
gutenberg/_util/os.py
|
reopen_encoded
|
def reopen_encoded(fileobj, mode='r', fallback_encoding=None):
"""Makes sure that a file was opened with some valid encoding.
Arguments:
fileobj (file): The file-object.
mode (str, optional): The mode in which to re-open the file.
fallback_encoding (str, optional): The encoding in which to re-open
the file if it does not specify an encoding itself.
Returns:
file: The re-opened file.
"""
encoding = determine_encoding(fileobj.name, fallback_encoding)
fileobj.close()
return open(fileobj.name, mode, encoding=encoding)
|
python
|
def reopen_encoded(fileobj, mode='r', fallback_encoding=None):
"""Makes sure that a file was opened with some valid encoding.
Arguments:
fileobj (file): The file-object.
mode (str, optional): The mode in which to re-open the file.
fallback_encoding (str, optional): The encoding in which to re-open
the file if it does not specify an encoding itself.
Returns:
file: The re-opened file.
"""
encoding = determine_encoding(fileobj.name, fallback_encoding)
fileobj.close()
return open(fileobj.name, mode, encoding=encoding)
|
[
"def",
"reopen_encoded",
"(",
"fileobj",
",",
"mode",
"=",
"'r'",
",",
"fallback_encoding",
"=",
"None",
")",
":",
"encoding",
"=",
"determine_encoding",
"(",
"fileobj",
".",
"name",
",",
"fallback_encoding",
")",
"fileobj",
".",
"close",
"(",
")",
"return",
"open",
"(",
"fileobj",
".",
"name",
",",
"mode",
",",
"encoding",
"=",
"encoding",
")"
] |
Makes sure that a file was opened with some valid encoding.
Arguments:
fileobj (file): The file-object.
mode (str, optional): The mode in which to re-open the file.
fallback_encoding (str, optional): The encoding in which to re-open
the file if it does not specify an encoding itself.
Returns:
file: The re-opened file.
|
[
"Makes",
"sure",
"that",
"a",
"file",
"was",
"opened",
"with",
"some",
"valid",
"encoding",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/_util/os.py#L70-L85
|
13,271
|
c-w/gutenberg
|
gutenberg/query/api.py
|
get_metadata
|
def get_metadata(feature_name, etextno):
"""Looks up the value of a meta-data feature for a given text.
Arguments:
feature_name (str): The name of the meta-data to look up.
etextno (int): The identifier of the Gutenberg text for which to look
up the meta-data.
Returns:
frozenset: The values of the meta-data for the text or an empty set if
the text does not have meta-data associated with the feature.
Raises:
UnsupportedFeature: If there is no MetadataExtractor registered that
can extract meta-data for the given feature name.
"""
metadata_values = MetadataExtractor.get(feature_name).get_metadata(etextno)
return frozenset(metadata_values)
|
python
|
def get_metadata(feature_name, etextno):
"""Looks up the value of a meta-data feature for a given text.
Arguments:
feature_name (str): The name of the meta-data to look up.
etextno (int): The identifier of the Gutenberg text for which to look
up the meta-data.
Returns:
frozenset: The values of the meta-data for the text or an empty set if
the text does not have meta-data associated with the feature.
Raises:
UnsupportedFeature: If there is no MetadataExtractor registered that
can extract meta-data for the given feature name.
"""
metadata_values = MetadataExtractor.get(feature_name).get_metadata(etextno)
return frozenset(metadata_values)
|
[
"def",
"get_metadata",
"(",
"feature_name",
",",
"etextno",
")",
":",
"metadata_values",
"=",
"MetadataExtractor",
".",
"get",
"(",
"feature_name",
")",
".",
"get_metadata",
"(",
"etextno",
")",
"return",
"frozenset",
"(",
"metadata_values",
")"
] |
Looks up the value of a meta-data feature for a given text.
Arguments:
feature_name (str): The name of the meta-data to look up.
etextno (int): The identifier of the Gutenberg text for which to look
up the meta-data.
Returns:
frozenset: The values of the meta-data for the text or an empty set if
the text does not have meta-data associated with the feature.
Raises:
UnsupportedFeature: If there is no MetadataExtractor registered that
can extract meta-data for the given feature name.
|
[
"Looks",
"up",
"the",
"value",
"of",
"a",
"meta",
"-",
"data",
"feature",
"for",
"a",
"given",
"text",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/query/api.py#L20-L38
|
13,272
|
c-w/gutenberg
|
gutenberg/query/api.py
|
get_etexts
|
def get_etexts(feature_name, value):
"""Looks up all the texts that have meta-data matching some criterion.
Arguments:
feature_name (str): The meta-data on which to select the texts.
value (str): The value of the meta-data on which to filter the texts.
Returns:
frozenset: The set of all the Project Gutenberg text identifiers that
match the provided query.
Raises:
UnsupportedFeature: If there is no MetadataExtractor registered that
can extract meta-data for the given feature name.
"""
matching_etexts = MetadataExtractor.get(feature_name).get_etexts(value)
return frozenset(matching_etexts)
|
python
|
def get_etexts(feature_name, value):
"""Looks up all the texts that have meta-data matching some criterion.
Arguments:
feature_name (str): The meta-data on which to select the texts.
value (str): The value of the meta-data on which to filter the texts.
Returns:
frozenset: The set of all the Project Gutenberg text identifiers that
match the provided query.
Raises:
UnsupportedFeature: If there is no MetadataExtractor registered that
can extract meta-data for the given feature name.
"""
matching_etexts = MetadataExtractor.get(feature_name).get_etexts(value)
return frozenset(matching_etexts)
|
[
"def",
"get_etexts",
"(",
"feature_name",
",",
"value",
")",
":",
"matching_etexts",
"=",
"MetadataExtractor",
".",
"get",
"(",
"feature_name",
")",
".",
"get_etexts",
"(",
"value",
")",
"return",
"frozenset",
"(",
"matching_etexts",
")"
] |
Looks up all the texts that have meta-data matching some criterion.
Arguments:
feature_name (str): The meta-data on which to select the texts.
value (str): The value of the meta-data on which to filter the texts.
Returns:
frozenset: The set of all the Project Gutenberg text identifiers that
match the provided query.
Raises:
UnsupportedFeature: If there is no MetadataExtractor registered that
can extract meta-data for the given feature name.
|
[
"Looks",
"up",
"all",
"the",
"texts",
"that",
"have",
"meta",
"-",
"data",
"matching",
"some",
"criterion",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/query/api.py#L41-L58
|
13,273
|
c-w/gutenberg
|
gutenberg/query/api.py
|
MetadataExtractor._uri_to_etext
|
def _uri_to_etext(cls, uri_ref):
"""Converts the representation used to identify a text in the
meta-data RDF graph to a human-friendly integer text identifier.
"""
try:
return validate_etextno(int(os.path.basename(uri_ref.toPython())))
except InvalidEtextIdException:
return None
|
python
|
def _uri_to_etext(cls, uri_ref):
"""Converts the representation used to identify a text in the
meta-data RDF graph to a human-friendly integer text identifier.
"""
try:
return validate_etextno(int(os.path.basename(uri_ref.toPython())))
except InvalidEtextIdException:
return None
|
[
"def",
"_uri_to_etext",
"(",
"cls",
",",
"uri_ref",
")",
":",
"try",
":",
"return",
"validate_etextno",
"(",
"int",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"uri_ref",
".",
"toPython",
"(",
")",
")",
")",
")",
"except",
"InvalidEtextIdException",
":",
"return",
"None"
] |
Converts the representation used to identify a text in the
meta-data RDF graph to a human-friendly integer text identifier.
|
[
"Converts",
"the",
"representation",
"used",
"to",
"identify",
"a",
"text",
"in",
"the",
"meta",
"-",
"data",
"RDF",
"graph",
"to",
"a",
"human",
"-",
"friendly",
"integer",
"text",
"identifier",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/query/api.py#L127-L135
|
13,274
|
c-w/gutenberg
|
gutenberg/query/api.py
|
MetadataExtractor._implementations
|
def _implementations(cls):
"""Returns all the concrete subclasses of MetadataExtractor.
"""
if cls.__implementations:
return cls.__implementations
cls.__implementations = {}
for implementation in all_subclasses(MetadataExtractor):
try:
feature_name = implementation.feature_name()
cls.__implementations[feature_name] = implementation
except NotImplementedError:
pass
return cls.__implementations
|
python
|
def _implementations(cls):
"""Returns all the concrete subclasses of MetadataExtractor.
"""
if cls.__implementations:
return cls.__implementations
cls.__implementations = {}
for implementation in all_subclasses(MetadataExtractor):
try:
feature_name = implementation.feature_name()
cls.__implementations[feature_name] = implementation
except NotImplementedError:
pass
return cls.__implementations
|
[
"def",
"_implementations",
"(",
"cls",
")",
":",
"if",
"cls",
".",
"__implementations",
":",
"return",
"cls",
".",
"__implementations",
"cls",
".",
"__implementations",
"=",
"{",
"}",
"for",
"implementation",
"in",
"all_subclasses",
"(",
"MetadataExtractor",
")",
":",
"try",
":",
"feature_name",
"=",
"implementation",
".",
"feature_name",
"(",
")",
"cls",
".",
"__implementations",
"[",
"feature_name",
"]",
"=",
"implementation",
"except",
"NotImplementedError",
":",
"pass",
"return",
"cls",
".",
"__implementations"
] |
Returns all the concrete subclasses of MetadataExtractor.
|
[
"Returns",
"all",
"the",
"concrete",
"subclasses",
"of",
"MetadataExtractor",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/query/api.py#L138-L152
|
13,275
|
c-w/gutenberg
|
gutenberg/query/api.py
|
MetadataExtractor.get
|
def get(feature_name):
"""Returns the MetadataExtractor that can extract information about the
provided feature name.
Raises:
UnsupportedFeature: If no extractor exists for the feature name.
"""
implementations = MetadataExtractor._implementations()
try:
return implementations[feature_name]
except KeyError:
raise UnsupportedFeatureException(
'no MetadataExtractor registered for feature "{feature_name}" '
'(try any of the following: {supported_features})'
.format(feature_name=feature_name,
supported_features=', '.join(sorted(implementations))))
|
python
|
def get(feature_name):
"""Returns the MetadataExtractor that can extract information about the
provided feature name.
Raises:
UnsupportedFeature: If no extractor exists for the feature name.
"""
implementations = MetadataExtractor._implementations()
try:
return implementations[feature_name]
except KeyError:
raise UnsupportedFeatureException(
'no MetadataExtractor registered for feature "{feature_name}" '
'(try any of the following: {supported_features})'
.format(feature_name=feature_name,
supported_features=', '.join(sorted(implementations))))
|
[
"def",
"get",
"(",
"feature_name",
")",
":",
"implementations",
"=",
"MetadataExtractor",
".",
"_implementations",
"(",
")",
"try",
":",
"return",
"implementations",
"[",
"feature_name",
"]",
"except",
"KeyError",
":",
"raise",
"UnsupportedFeatureException",
"(",
"'no MetadataExtractor registered for feature \"{feature_name}\" '",
"'(try any of the following: {supported_features})'",
".",
"format",
"(",
"feature_name",
"=",
"feature_name",
",",
"supported_features",
"=",
"', '",
".",
"join",
"(",
"sorted",
"(",
"implementations",
")",
")",
")",
")"
] |
Returns the MetadataExtractor that can extract information about the
provided feature name.
Raises:
UnsupportedFeature: If no extractor exists for the feature name.
|
[
"Returns",
"the",
"MetadataExtractor",
"that",
"can",
"extract",
"information",
"about",
"the",
"provided",
"feature",
"name",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/query/api.py#L155-L171
|
13,276
|
c-w/gutenberg
|
gutenberg/acquire/metadata.py
|
set_metadata_cache
|
def set_metadata_cache(cache):
"""Sets the metadata cache object to use.
"""
global _METADATA_CACHE
if _METADATA_CACHE and _METADATA_CACHE.is_open:
_METADATA_CACHE.close()
_METADATA_CACHE = cache
|
python
|
def set_metadata_cache(cache):
"""Sets the metadata cache object to use.
"""
global _METADATA_CACHE
if _METADATA_CACHE and _METADATA_CACHE.is_open:
_METADATA_CACHE.close()
_METADATA_CACHE = cache
|
[
"def",
"set_metadata_cache",
"(",
"cache",
")",
":",
"global",
"_METADATA_CACHE",
"if",
"_METADATA_CACHE",
"and",
"_METADATA_CACHE",
".",
"is_open",
":",
"_METADATA_CACHE",
".",
"close",
"(",
")",
"_METADATA_CACHE",
"=",
"cache"
] |
Sets the metadata cache object to use.
|
[
"Sets",
"the",
"metadata",
"cache",
"object",
"to",
"use",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L323-L332
|
13,277
|
c-w/gutenberg
|
gutenberg/acquire/metadata.py
|
_create_metadata_cache
|
def _create_metadata_cache(cache_location):
"""Creates a new metadata cache instance appropriate for this platform.
"""
cache_url = os.getenv('GUTENBERG_FUSEKI_URL')
if cache_url:
return FusekiMetadataCache(cache_location, cache_url)
try:
return SleepycatMetadataCache(cache_location)
except InvalidCacheException:
logging.warning('Unable to create cache based on BSD-DB. '
'Falling back to SQLite backend. '
'Performance may be degraded significantly.')
return SqliteMetadataCache(cache_location)
|
python
|
def _create_metadata_cache(cache_location):
"""Creates a new metadata cache instance appropriate for this platform.
"""
cache_url = os.getenv('GUTENBERG_FUSEKI_URL')
if cache_url:
return FusekiMetadataCache(cache_location, cache_url)
try:
return SleepycatMetadataCache(cache_location)
except InvalidCacheException:
logging.warning('Unable to create cache based on BSD-DB. '
'Falling back to SQLite backend. '
'Performance may be degraded significantly.')
return SqliteMetadataCache(cache_location)
|
[
"def",
"_create_metadata_cache",
"(",
"cache_location",
")",
":",
"cache_url",
"=",
"os",
".",
"getenv",
"(",
"'GUTENBERG_FUSEKI_URL'",
")",
"if",
"cache_url",
":",
"return",
"FusekiMetadataCache",
"(",
"cache_location",
",",
"cache_url",
")",
"try",
":",
"return",
"SleepycatMetadataCache",
"(",
"cache_location",
")",
"except",
"InvalidCacheException",
":",
"logging",
".",
"warning",
"(",
"'Unable to create cache based on BSD-DB. '",
"'Falling back to SQLite backend. '",
"'Performance may be degraded significantly.'",
")",
"return",
"SqliteMetadataCache",
"(",
"cache_location",
")"
] |
Creates a new metadata cache instance appropriate for this platform.
|
[
"Creates",
"a",
"new",
"metadata",
"cache",
"instance",
"appropriate",
"for",
"this",
"platform",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L347-L362
|
13,278
|
c-w/gutenberg
|
gutenberg/acquire/metadata.py
|
MetadataCache.open
|
def open(self):
"""Opens an existing cache.
"""
try:
self.graph.open(self.cache_uri, create=False)
self._add_namespaces(self.graph)
self.is_open = True
except Exception:
raise InvalidCacheException('The cache is invalid or not created')
|
python
|
def open(self):
"""Opens an existing cache.
"""
try:
self.graph.open(self.cache_uri, create=False)
self._add_namespaces(self.graph)
self.is_open = True
except Exception:
raise InvalidCacheException('The cache is invalid or not created')
|
[
"def",
"open",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"graph",
".",
"open",
"(",
"self",
".",
"cache_uri",
",",
"create",
"=",
"False",
")",
"self",
".",
"_add_namespaces",
"(",
"self",
".",
"graph",
")",
"self",
".",
"is_open",
"=",
"True",
"except",
"Exception",
":",
"raise",
"InvalidCacheException",
"(",
"'The cache is invalid or not created'",
")"
] |
Opens an existing cache.
|
[
"Opens",
"an",
"existing",
"cache",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L61-L70
|
13,279
|
c-w/gutenberg
|
gutenberg/acquire/metadata.py
|
MetadataCache.populate
|
def populate(self):
"""Populates a new cache.
"""
if self.exists:
raise CacheAlreadyExistsException('location: %s' % self.cache_uri)
self._populate_setup()
with closing(self.graph):
with self._download_metadata_archive() as metadata_archive:
for fact in self._iter_metadata_triples(metadata_archive):
self._add_to_graph(fact)
|
python
|
def populate(self):
"""Populates a new cache.
"""
if self.exists:
raise CacheAlreadyExistsException('location: %s' % self.cache_uri)
self._populate_setup()
with closing(self.graph):
with self._download_metadata_archive() as metadata_archive:
for fact in self._iter_metadata_triples(metadata_archive):
self._add_to_graph(fact)
|
[
"def",
"populate",
"(",
"self",
")",
":",
"if",
"self",
".",
"exists",
":",
"raise",
"CacheAlreadyExistsException",
"(",
"'location: %s'",
"%",
"self",
".",
"cache_uri",
")",
"self",
".",
"_populate_setup",
"(",
")",
"with",
"closing",
"(",
"self",
".",
"graph",
")",
":",
"with",
"self",
".",
"_download_metadata_archive",
"(",
")",
"as",
"metadata_archive",
":",
"for",
"fact",
"in",
"self",
".",
"_iter_metadata_triples",
"(",
"metadata_archive",
")",
":",
"self",
".",
"_add_to_graph",
"(",
"fact",
")"
] |
Populates a new cache.
|
[
"Populates",
"a",
"new",
"cache",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L86-L98
|
13,280
|
c-w/gutenberg
|
gutenberg/acquire/metadata.py
|
MetadataCache.refresh
|
def refresh(self):
"""Refresh the cache by deleting the old one and creating a new one.
"""
if self.exists:
self.delete()
self.populate()
self.open()
|
python
|
def refresh(self):
"""Refresh the cache by deleting the old one and creating a new one.
"""
if self.exists:
self.delete()
self.populate()
self.open()
|
[
"def",
"refresh",
"(",
"self",
")",
":",
"if",
"self",
".",
"exists",
":",
"self",
".",
"delete",
"(",
")",
"self",
".",
"populate",
"(",
")",
"self",
".",
"open",
"(",
")"
] |
Refresh the cache by deleting the old one and creating a new one.
|
[
"Refresh",
"the",
"cache",
"by",
"deleting",
"the",
"old",
"one",
"and",
"creating",
"a",
"new",
"one",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L112-L119
|
13,281
|
c-w/gutenberg
|
gutenberg/acquire/metadata.py
|
MetadataCache._download_metadata_archive
|
def _download_metadata_archive(self):
"""Makes a remote call to the Project Gutenberg servers and downloads
the entire Project Gutenberg meta-data catalog. The catalog describes
the texts on Project Gutenberg in RDF. The function returns a
file-pointer to the catalog.
"""
with tempfile.NamedTemporaryFile(delete=False) as metadata_archive:
shutil.copyfileobj(urlopen(self.catalog_source), metadata_archive)
yield metadata_archive.name
remove(metadata_archive.name)
|
python
|
def _download_metadata_archive(self):
"""Makes a remote call to the Project Gutenberg servers and downloads
the entire Project Gutenberg meta-data catalog. The catalog describes
the texts on Project Gutenberg in RDF. The function returns a
file-pointer to the catalog.
"""
with tempfile.NamedTemporaryFile(delete=False) as metadata_archive:
shutil.copyfileobj(urlopen(self.catalog_source), metadata_archive)
yield metadata_archive.name
remove(metadata_archive.name)
|
[
"def",
"_download_metadata_archive",
"(",
"self",
")",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"delete",
"=",
"False",
")",
"as",
"metadata_archive",
":",
"shutil",
".",
"copyfileobj",
"(",
"urlopen",
"(",
"self",
".",
"catalog_source",
")",
",",
"metadata_archive",
")",
"yield",
"metadata_archive",
".",
"name",
"remove",
"(",
"metadata_archive",
".",
"name",
")"
] |
Makes a remote call to the Project Gutenberg servers and downloads
the entire Project Gutenberg meta-data catalog. The catalog describes
the texts on Project Gutenberg in RDF. The function returns a
file-pointer to the catalog.
|
[
"Makes",
"a",
"remote",
"call",
"to",
"the",
"Project",
"Gutenberg",
"servers",
"and",
"downloads",
"the",
"entire",
"Project",
"Gutenberg",
"meta",
"-",
"data",
"catalog",
".",
"The",
"catalog",
"describes",
"the",
"texts",
"on",
"Project",
"Gutenberg",
"in",
"RDF",
".",
"The",
"function",
"returns",
"a",
"file",
"-",
"pointer",
"to",
"the",
"catalog",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L138-L148
|
13,282
|
c-w/gutenberg
|
gutenberg/acquire/metadata.py
|
MetadataCache._metadata_is_invalid
|
def _metadata_is_invalid(cls, fact):
"""Determines if the fact is not well formed.
"""
return any(isinstance(token, URIRef) and ' ' in token
for token in fact)
|
python
|
def _metadata_is_invalid(cls, fact):
"""Determines if the fact is not well formed.
"""
return any(isinstance(token, URIRef) and ' ' in token
for token in fact)
|
[
"def",
"_metadata_is_invalid",
"(",
"cls",
",",
"fact",
")",
":",
"return",
"any",
"(",
"isinstance",
"(",
"token",
",",
"URIRef",
")",
"and",
"' '",
"in",
"token",
"for",
"token",
"in",
"fact",
")"
] |
Determines if the fact is not well formed.
|
[
"Determines",
"if",
"the",
"fact",
"is",
"not",
"well",
"formed",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L151-L156
|
13,283
|
c-w/gutenberg
|
gutenberg/acquire/metadata.py
|
MetadataCache._iter_metadata_triples
|
def _iter_metadata_triples(cls, metadata_archive_path):
"""Yields all meta-data of Project Gutenberg texts contained in the
catalog dump.
"""
pg_rdf_regex = re.compile(r'pg\d+.rdf$')
with closing(tarfile.open(metadata_archive_path)) as metadata_archive:
for item in metadata_archive:
if pg_rdf_regex.search(item.name):
with disable_logging():
extracted = metadata_archive.extractfile(item)
graph = Graph().parse(extracted)
for fact in graph:
if cls._metadata_is_invalid(fact):
logging.info('skipping invalid triple %s', fact)
else:
yield fact
|
python
|
def _iter_metadata_triples(cls, metadata_archive_path):
"""Yields all meta-data of Project Gutenberg texts contained in the
catalog dump.
"""
pg_rdf_regex = re.compile(r'pg\d+.rdf$')
with closing(tarfile.open(metadata_archive_path)) as metadata_archive:
for item in metadata_archive:
if pg_rdf_regex.search(item.name):
with disable_logging():
extracted = metadata_archive.extractfile(item)
graph = Graph().parse(extracted)
for fact in graph:
if cls._metadata_is_invalid(fact):
logging.info('skipping invalid triple %s', fact)
else:
yield fact
|
[
"def",
"_iter_metadata_triples",
"(",
"cls",
",",
"metadata_archive_path",
")",
":",
"pg_rdf_regex",
"=",
"re",
".",
"compile",
"(",
"r'pg\\d+.rdf$'",
")",
"with",
"closing",
"(",
"tarfile",
".",
"open",
"(",
"metadata_archive_path",
")",
")",
"as",
"metadata_archive",
":",
"for",
"item",
"in",
"metadata_archive",
":",
"if",
"pg_rdf_regex",
".",
"search",
"(",
"item",
".",
"name",
")",
":",
"with",
"disable_logging",
"(",
")",
":",
"extracted",
"=",
"metadata_archive",
".",
"extractfile",
"(",
"item",
")",
"graph",
"=",
"Graph",
"(",
")",
".",
"parse",
"(",
"extracted",
")",
"for",
"fact",
"in",
"graph",
":",
"if",
"cls",
".",
"_metadata_is_invalid",
"(",
"fact",
")",
":",
"logging",
".",
"info",
"(",
"'skipping invalid triple %s'",
",",
"fact",
")",
"else",
":",
"yield",
"fact"
] |
Yields all meta-data of Project Gutenberg texts contained in the
catalog dump.
|
[
"Yields",
"all",
"meta",
"-",
"data",
"of",
"Project",
"Gutenberg",
"texts",
"contained",
"in",
"the",
"catalog",
"dump",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L159-L175
|
13,284
|
c-w/gutenberg
|
gutenberg/acquire/metadata.py
|
FusekiMetadataCache._populate_setup
|
def _populate_setup(self):
"""Just create a local marker file since the actual database should
already be created on the Fuseki server.
"""
makedirs(os.path.dirname(self._cache_marker))
with codecs.open(self._cache_marker, 'w', encoding='utf-8') as fobj:
fobj.write(self.cache_uri)
self.graph.open(self.cache_uri)
|
python
|
def _populate_setup(self):
"""Just create a local marker file since the actual database should
already be created on the Fuseki server.
"""
makedirs(os.path.dirname(self._cache_marker))
with codecs.open(self._cache_marker, 'w', encoding='utf-8') as fobj:
fobj.write(self.cache_uri)
self.graph.open(self.cache_uri)
|
[
"def",
"_populate_setup",
"(",
"self",
")",
":",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"_cache_marker",
")",
")",
"with",
"codecs",
".",
"open",
"(",
"self",
".",
"_cache_marker",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"fobj",
":",
"fobj",
".",
"write",
"(",
"self",
".",
"cache_uri",
")",
"self",
".",
"graph",
".",
"open",
"(",
"self",
".",
"cache_uri",
")"
] |
Just create a local marker file since the actual database should
already be created on the Fuseki server.
|
[
"Just",
"create",
"a",
"local",
"marker",
"file",
"since",
"the",
"actual",
"database",
"should",
"already",
"be",
"created",
"on",
"the",
"Fuseki",
"server",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L219-L227
|
13,285
|
c-w/gutenberg
|
gutenberg/acquire/metadata.py
|
FusekiMetadataCache.delete
|
def delete(self):
"""Deletes the local marker file and also any data in the Fuseki
server.
"""
MetadataCache.delete(self)
try:
self.graph.query('DELETE WHERE { ?s ?p ?o . }')
except ResultException:
# this is often just a false positive since Jena Fuseki does not
# return tuples for a deletion query, so swallowing the exception
# here is fine
logging.exception('error when deleting graph')
|
python
|
def delete(self):
"""Deletes the local marker file and also any data in the Fuseki
server.
"""
MetadataCache.delete(self)
try:
self.graph.query('DELETE WHERE { ?s ?p ?o . }')
except ResultException:
# this is often just a false positive since Jena Fuseki does not
# return tuples for a deletion query, so swallowing the exception
# here is fine
logging.exception('error when deleting graph')
|
[
"def",
"delete",
"(",
"self",
")",
":",
"MetadataCache",
".",
"delete",
"(",
"self",
")",
"try",
":",
"self",
".",
"graph",
".",
"query",
"(",
"'DELETE WHERE { ?s ?p ?o . }'",
")",
"except",
"ResultException",
":",
"# this is often just a false positive since Jena Fuseki does not",
"# return tuples for a deletion query, so swallowing the exception",
"# here is fine",
"logging",
".",
"exception",
"(",
"'error when deleting graph'",
")"
] |
Deletes the local marker file and also any data in the Fuseki
server.
|
[
"Deletes",
"the",
"local",
"marker",
"file",
"and",
"also",
"any",
"data",
"in",
"the",
"Fuseki",
"server",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L229-L241
|
13,286
|
c-w/gutenberg
|
gutenberg/acquire/metadata.py
|
FusekiMetadataCache._metadata_is_invalid
|
def _metadata_is_invalid(cls, fact):
"""Filters out blank nodes since the SPARQLUpdateStore does not
support them.
"""
return (MetadataCache._metadata_is_invalid(fact)
or any(isinstance(token, BNode) for token in fact))
|
python
|
def _metadata_is_invalid(cls, fact):
"""Filters out blank nodes since the SPARQLUpdateStore does not
support them.
"""
return (MetadataCache._metadata_is_invalid(fact)
or any(isinstance(token, BNode) for token in fact))
|
[
"def",
"_metadata_is_invalid",
"(",
"cls",
",",
"fact",
")",
":",
"return",
"(",
"MetadataCache",
".",
"_metadata_is_invalid",
"(",
"fact",
")",
"or",
"any",
"(",
"isinstance",
"(",
"token",
",",
"BNode",
")",
"for",
"token",
"in",
"fact",
")",
")"
] |
Filters out blank nodes since the SPARQLUpdateStore does not
support them.
|
[
"Filters",
"out",
"blank",
"nodes",
"since",
"the",
"SPARQLUpdateStore",
"does",
"not",
"support",
"them",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/acquire/metadata.py#L269-L275
|
13,287
|
c-w/gutenberg
|
gutenberg/_util/objects.py
|
all_subclasses
|
def all_subclasses(cls):
"""Recursively returns all the subclasses of the provided class.
"""
subclasses = cls.__subclasses__()
descendants = (descendant for subclass in subclasses
for descendant in all_subclasses(subclass))
return set(subclasses) | set(descendants)
|
python
|
def all_subclasses(cls):
"""Recursively returns all the subclasses of the provided class.
"""
subclasses = cls.__subclasses__()
descendants = (descendant for subclass in subclasses
for descendant in all_subclasses(subclass))
return set(subclasses) | set(descendants)
|
[
"def",
"all_subclasses",
"(",
"cls",
")",
":",
"subclasses",
"=",
"cls",
".",
"__subclasses__",
"(",
")",
"descendants",
"=",
"(",
"descendant",
"for",
"subclass",
"in",
"subclasses",
"for",
"descendant",
"in",
"all_subclasses",
"(",
"subclass",
")",
")",
"return",
"set",
"(",
"subclasses",
")",
"|",
"set",
"(",
"descendants",
")"
] |
Recursively returns all the subclasses of the provided class.
|
[
"Recursively",
"returns",
"all",
"the",
"subclasses",
"of",
"the",
"provided",
"class",
"."
] |
d1ef3da6fba6c3636d452479ed6bcb17c7d4d246
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/_util/objects.py#L4-L11
|
13,288
|
ralphbean/ansi2html
|
ansi2html/converter.py
|
Ansi2HTMLConverter._collapse_cursor
|
def _collapse_cursor(self, parts):
""" Act on any CursorMoveUp commands by deleting preceding tokens """
final_parts = []
for part in parts:
# Throw out empty string tokens ("")
if not part:
continue
# Go back, deleting every token in the last 'line'
if part == CursorMoveUp:
if final_parts:
final_parts.pop()
while final_parts and '\n' not in final_parts[-1]:
final_parts.pop()
continue
# Otherwise, just pass this token forward
final_parts.append(part)
return final_parts
|
python
|
def _collapse_cursor(self, parts):
""" Act on any CursorMoveUp commands by deleting preceding tokens """
final_parts = []
for part in parts:
# Throw out empty string tokens ("")
if not part:
continue
# Go back, deleting every token in the last 'line'
if part == CursorMoveUp:
if final_parts:
final_parts.pop()
while final_parts and '\n' not in final_parts[-1]:
final_parts.pop()
continue
# Otherwise, just pass this token forward
final_parts.append(part)
return final_parts
|
[
"def",
"_collapse_cursor",
"(",
"self",
",",
"parts",
")",
":",
"final_parts",
"=",
"[",
"]",
"for",
"part",
"in",
"parts",
":",
"# Throw out empty string tokens (\"\")",
"if",
"not",
"part",
":",
"continue",
"# Go back, deleting every token in the last 'line'",
"if",
"part",
"==",
"CursorMoveUp",
":",
"if",
"final_parts",
":",
"final_parts",
".",
"pop",
"(",
")",
"while",
"final_parts",
"and",
"'\\n'",
"not",
"in",
"final_parts",
"[",
"-",
"1",
"]",
":",
"final_parts",
".",
"pop",
"(",
")",
"continue",
"# Otherwise, just pass this token forward",
"final_parts",
".",
"append",
"(",
"part",
")",
"return",
"final_parts"
] |
Act on any CursorMoveUp commands by deleting preceding tokens
|
[
"Act",
"on",
"any",
"CursorMoveUp",
"commands",
"by",
"deleting",
"preceding",
"tokens"
] |
ac3b230f29d3ab180d29efd98c14ffef29707e2b
|
https://github.com/ralphbean/ansi2html/blob/ac3b230f29d3ab180d29efd98c14ffef29707e2b/ansi2html/converter.py#L413-L436
|
13,289
|
ralphbean/ansi2html
|
ansi2html/converter.py
|
Ansi2HTMLConverter.prepare
|
def prepare(self, ansi='', ensure_trailing_newline=False):
""" Load the contents of 'ansi' into this object """
body, styles = self.apply_regex(ansi)
if ensure_trailing_newline and _needs_extra_newline(body):
body += '\n'
self._attrs = {
'dark_bg': self.dark_bg,
'line_wrap': self.line_wrap,
'font_size': self.font_size,
'body': body,
'styles': styles,
}
return self._attrs
|
python
|
def prepare(self, ansi='', ensure_trailing_newline=False):
""" Load the contents of 'ansi' into this object """
body, styles = self.apply_regex(ansi)
if ensure_trailing_newline and _needs_extra_newline(body):
body += '\n'
self._attrs = {
'dark_bg': self.dark_bg,
'line_wrap': self.line_wrap,
'font_size': self.font_size,
'body': body,
'styles': styles,
}
return self._attrs
|
[
"def",
"prepare",
"(",
"self",
",",
"ansi",
"=",
"''",
",",
"ensure_trailing_newline",
"=",
"False",
")",
":",
"body",
",",
"styles",
"=",
"self",
".",
"apply_regex",
"(",
"ansi",
")",
"if",
"ensure_trailing_newline",
"and",
"_needs_extra_newline",
"(",
"body",
")",
":",
"body",
"+=",
"'\\n'",
"self",
".",
"_attrs",
"=",
"{",
"'dark_bg'",
":",
"self",
".",
"dark_bg",
",",
"'line_wrap'",
":",
"self",
".",
"line_wrap",
",",
"'font_size'",
":",
"self",
".",
"font_size",
",",
"'body'",
":",
"body",
",",
"'styles'",
":",
"styles",
",",
"}",
"return",
"self",
".",
"_attrs"
] |
Load the contents of 'ansi' into this object
|
[
"Load",
"the",
"contents",
"of",
"ansi",
"into",
"this",
"object"
] |
ac3b230f29d3ab180d29efd98c14ffef29707e2b
|
https://github.com/ralphbean/ansi2html/blob/ac3b230f29d3ab180d29efd98c14ffef29707e2b/ansi2html/converter.py#L438-L454
|
13,290
|
PyO3/setuptools-rust
|
setuptools_rust/build_ext.py
|
build_ext.run
|
def run(self):
"""Run build_rust sub command """
if self.has_rust_extensions():
log.info("running build_rust")
build_rust = self.get_finalized_command("build_rust")
build_rust.inplace = self.inplace
build_rust.run()
_build_ext.run(self)
|
python
|
def run(self):
"""Run build_rust sub command """
if self.has_rust_extensions():
log.info("running build_rust")
build_rust = self.get_finalized_command("build_rust")
build_rust.inplace = self.inplace
build_rust.run()
_build_ext.run(self)
|
[
"def",
"run",
"(",
"self",
")",
":",
"if",
"self",
".",
"has_rust_extensions",
"(",
")",
":",
"log",
".",
"info",
"(",
"\"running build_rust\"",
")",
"build_rust",
"=",
"self",
".",
"get_finalized_command",
"(",
"\"build_rust\"",
")",
"build_rust",
".",
"inplace",
"=",
"self",
".",
"inplace",
"build_rust",
".",
"run",
"(",
")",
"_build_ext",
".",
"run",
"(",
"self",
")"
] |
Run build_rust sub command
|
[
"Run",
"build_rust",
"sub",
"command"
] |
cd3ecec5749927a5c69b8ea516fc918ae95d18ce
|
https://github.com/PyO3/setuptools-rust/blob/cd3ecec5749927a5c69b8ea516fc918ae95d18ce/setuptools_rust/build_ext.py#L20-L28
|
13,291
|
PyO3/setuptools-rust
|
setuptools_rust/extension.py
|
RustExtension.get_lib_name
|
def get_lib_name(self):
""" Parse Cargo.toml to get the name of the shared library. """
# We import in here to make sure the the setup_requires are already installed
import toml
cfg = toml.load(self.path)
name = cfg.get("lib", {}).get("name")
if name is None:
name = cfg.get("package", {}).get("name")
if name is None:
raise Exception(
"Can not parse library name from Cargo.toml. "
"Cargo.toml missing value for 'name' key "
"in both the [package] section and the [lib] section"
)
name = re.sub(r"[./\\-]", "_", name)
return name
|
python
|
def get_lib_name(self):
""" Parse Cargo.toml to get the name of the shared library. """
# We import in here to make sure the the setup_requires are already installed
import toml
cfg = toml.load(self.path)
name = cfg.get("lib", {}).get("name")
if name is None:
name = cfg.get("package", {}).get("name")
if name is None:
raise Exception(
"Can not parse library name from Cargo.toml. "
"Cargo.toml missing value for 'name' key "
"in both the [package] section and the [lib] section"
)
name = re.sub(r"[./\\-]", "_", name)
return name
|
[
"def",
"get_lib_name",
"(",
"self",
")",
":",
"# We import in here to make sure the the setup_requires are already installed",
"import",
"toml",
"cfg",
"=",
"toml",
".",
"load",
"(",
"self",
".",
"path",
")",
"name",
"=",
"cfg",
".",
"get",
"(",
"\"lib\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"name\"",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"cfg",
".",
"get",
"(",
"\"package\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"name\"",
")",
"if",
"name",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Can not parse library name from Cargo.toml. \"",
"\"Cargo.toml missing value for 'name' key \"",
"\"in both the [package] section and the [lib] section\"",
")",
"name",
"=",
"re",
".",
"sub",
"(",
"r\"[./\\\\-]\"",
",",
"\"_\"",
",",
"name",
")",
"return",
"name"
] |
Parse Cargo.toml to get the name of the shared library.
|
[
"Parse",
"Cargo",
".",
"toml",
"to",
"get",
"the",
"name",
"of",
"the",
"shared",
"library",
"."
] |
cd3ecec5749927a5c69b8ea516fc918ae95d18ce
|
https://github.com/PyO3/setuptools-rust/blob/cd3ecec5749927a5c69b8ea516fc918ae95d18ce/setuptools_rust/extension.py#L106-L122
|
13,292
|
PyO3/setuptools-rust
|
setuptools_rust/tomlgen.py
|
find_rust_extensions
|
def find_rust_extensions(*directories, **kwargs):
"""Attempt to find Rust extensions in given directories.
This function will recurse through the directories in the given
directories, to find a name whose name is ``libfile``. When such
a file is found, an extension is created, expecting the cargo
manifest file (``Cargo.toml``) to be next to that file. The
extension destination will be deduced from the name of the
directory where that ``libfile`` is contained.
Arguments:
directories (list, *optional*): a list of directories to walk
through recursively to find extensions. If none are given,
then the current directory will be used instead.
Keyword Arguments:
libfile (str): the name of the file to look for when searching
for Rust extensions. Defaults to ``lib.rs``, but might be
changed to allow defining more *Pythonic* filenames
(like ``__init__.rs``)!
Note:
All other keyword arguments will be directly passed to the
`RustExtension` instance created when an extension is found.
One may be interested in passing ``bindings`` and ``strip``
options::
>>> import setuptools_rust as rust
>>> rust.find_rust_extensions(binding=rust.Binding.PyO3)
Example:
Consider the following project::
lib/
└ mylib/
└ rustext/
├ lib.rs
├ ...
└ Cargo.toml
setup.py
There is only one extension that can be found in the ``lib``
module::
>>> import setuptools_rust as rust
>>> for ext in rust.find_rust_extensions("lib"):
... print(ext.name, "=>", ext.path)
lib.mylib.rustext => lib/mylib/rustext/Cargo.toml
"""
# Get the file used to mark a Rust extension
libfile = kwargs.get("libfile", "lib.rs")
# Get the directories to explore
directories = directories or [os.getcwd()]
extensions = []
for directory in directories:
for base, dirs, files in os.walk(directory):
if libfile in files:
dotpath = os.path.relpath(base).replace(os.path.sep, ".")
tomlpath = os.path.join(base, "Cargo.toml")
ext = RustExtension(dotpath, tomlpath, **kwargs)
ext.libfile = os.path.join(base, libfile)
extensions.append(ext)
return extensions
|
python
|
def find_rust_extensions(*directories, **kwargs):
"""Attempt to find Rust extensions in given directories.
This function will recurse through the directories in the given
directories, to find a name whose name is ``libfile``. When such
a file is found, an extension is created, expecting the cargo
manifest file (``Cargo.toml``) to be next to that file. The
extension destination will be deduced from the name of the
directory where that ``libfile`` is contained.
Arguments:
directories (list, *optional*): a list of directories to walk
through recursively to find extensions. If none are given,
then the current directory will be used instead.
Keyword Arguments:
libfile (str): the name of the file to look for when searching
for Rust extensions. Defaults to ``lib.rs``, but might be
changed to allow defining more *Pythonic* filenames
(like ``__init__.rs``)!
Note:
All other keyword arguments will be directly passed to the
`RustExtension` instance created when an extension is found.
One may be interested in passing ``bindings`` and ``strip``
options::
>>> import setuptools_rust as rust
>>> rust.find_rust_extensions(binding=rust.Binding.PyO3)
Example:
Consider the following project::
lib/
└ mylib/
└ rustext/
├ lib.rs
├ ...
└ Cargo.toml
setup.py
There is only one extension that can be found in the ``lib``
module::
>>> import setuptools_rust as rust
>>> for ext in rust.find_rust_extensions("lib"):
... print(ext.name, "=>", ext.path)
lib.mylib.rustext => lib/mylib/rustext/Cargo.toml
"""
# Get the file used to mark a Rust extension
libfile = kwargs.get("libfile", "lib.rs")
# Get the directories to explore
directories = directories or [os.getcwd()]
extensions = []
for directory in directories:
for base, dirs, files in os.walk(directory):
if libfile in files:
dotpath = os.path.relpath(base).replace(os.path.sep, ".")
tomlpath = os.path.join(base, "Cargo.toml")
ext = RustExtension(dotpath, tomlpath, **kwargs)
ext.libfile = os.path.join(base, libfile)
extensions.append(ext)
return extensions
|
[
"def",
"find_rust_extensions",
"(",
"*",
"directories",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get the file used to mark a Rust extension",
"libfile",
"=",
"kwargs",
".",
"get",
"(",
"\"libfile\"",
",",
"\"lib.rs\"",
")",
"# Get the directories to explore",
"directories",
"=",
"directories",
"or",
"[",
"os",
".",
"getcwd",
"(",
")",
"]",
"extensions",
"=",
"[",
"]",
"for",
"directory",
"in",
"directories",
":",
"for",
"base",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"directory",
")",
":",
"if",
"libfile",
"in",
"files",
":",
"dotpath",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"base",
")",
".",
"replace",
"(",
"os",
".",
"path",
".",
"sep",
",",
"\".\"",
")",
"tomlpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base",
",",
"\"Cargo.toml\"",
")",
"ext",
"=",
"RustExtension",
"(",
"dotpath",
",",
"tomlpath",
",",
"*",
"*",
"kwargs",
")",
"ext",
".",
"libfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base",
",",
"libfile",
")",
"extensions",
".",
"append",
"(",
"ext",
")",
"return",
"extensions"
] |
Attempt to find Rust extensions in given directories.
This function will recurse through the directories in the given
directories, to find a name whose name is ``libfile``. When such
a file is found, an extension is created, expecting the cargo
manifest file (``Cargo.toml``) to be next to that file. The
extension destination will be deduced from the name of the
directory where that ``libfile`` is contained.
Arguments:
directories (list, *optional*): a list of directories to walk
through recursively to find extensions. If none are given,
then the current directory will be used instead.
Keyword Arguments:
libfile (str): the name of the file to look for when searching
for Rust extensions. Defaults to ``lib.rs``, but might be
changed to allow defining more *Pythonic* filenames
(like ``__init__.rs``)!
Note:
All other keyword arguments will be directly passed to the
`RustExtension` instance created when an extension is found.
One may be interested in passing ``bindings`` and ``strip``
options::
>>> import setuptools_rust as rust
>>> rust.find_rust_extensions(binding=rust.Binding.PyO3)
Example:
Consider the following project::
lib/
└ mylib/
└ rustext/
├ lib.rs
├ ...
└ Cargo.toml
setup.py
There is only one extension that can be found in the ``lib``
module::
>>> import setuptools_rust as rust
>>> for ext in rust.find_rust_extensions("lib"):
... print(ext.name, "=>", ext.path)
lib.mylib.rustext => lib/mylib/rustext/Cargo.toml
|
[
"Attempt",
"to",
"find",
"Rust",
"extensions",
"in",
"given",
"directories",
"."
] |
cd3ecec5749927a5c69b8ea516fc918ae95d18ce
|
https://github.com/PyO3/setuptools-rust/blob/cd3ecec5749927a5c69b8ea516fc918ae95d18ce/setuptools_rust/tomlgen.py#L207-L274
|
13,293
|
gamechanger/mongothon
|
mongothon/events.py
|
EventHandlerRegistrar.register
|
def register(self, event, fn):
"""
Registers the given function as a handler to be applied
in response to the the given event.
"""
# TODO: Can we check the method signature?
self._handler_dict.setdefault(event, [])
if fn not in self._handler_dict[event]:
self._handler_dict[event].append(fn)
|
python
|
def register(self, event, fn):
"""
Registers the given function as a handler to be applied
in response to the the given event.
"""
# TODO: Can we check the method signature?
self._handler_dict.setdefault(event, [])
if fn not in self._handler_dict[event]:
self._handler_dict[event].append(fn)
|
[
"def",
"register",
"(",
"self",
",",
"event",
",",
"fn",
")",
":",
"# TODO: Can we check the method signature?",
"self",
".",
"_handler_dict",
".",
"setdefault",
"(",
"event",
",",
"[",
"]",
")",
"if",
"fn",
"not",
"in",
"self",
".",
"_handler_dict",
"[",
"event",
"]",
":",
"self",
".",
"_handler_dict",
"[",
"event",
"]",
".",
"append",
"(",
"fn",
")"
] |
Registers the given function as a handler to be applied
in response to the the given event.
|
[
"Registers",
"the",
"given",
"function",
"as",
"a",
"handler",
"to",
"be",
"applied",
"in",
"response",
"to",
"the",
"the",
"given",
"event",
"."
] |
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
|
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/events.py#L15-L24
|
13,294
|
gamechanger/mongothon
|
mongothon/events.py
|
EventHandlerRegistrar.apply
|
def apply(self, event, document, *args, **kwargs):
"""
Applies all middleware functions registered against the given
event in order to the given document.
"""
for fn in self._handler_dict.get(event, []):
fn(document, *args, **kwargs)
|
python
|
def apply(self, event, document, *args, **kwargs):
"""
Applies all middleware functions registered against the given
event in order to the given document.
"""
for fn in self._handler_dict.get(event, []):
fn(document, *args, **kwargs)
|
[
"def",
"apply",
"(",
"self",
",",
"event",
",",
"document",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"fn",
"in",
"self",
".",
"_handler_dict",
".",
"get",
"(",
"event",
",",
"[",
"]",
")",
":",
"fn",
"(",
"document",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Applies all middleware functions registered against the given
event in order to the given document.
|
[
"Applies",
"all",
"middleware",
"functions",
"registered",
"against",
"the",
"given",
"event",
"in",
"order",
"to",
"the",
"given",
"document",
"."
] |
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
|
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/events.py#L26-L32
|
13,295
|
gamechanger/mongothon
|
mongothon/events.py
|
EventHandlerRegistrar.deregister
|
def deregister(self, event, fn):
"""
Deregister the handler function from the given event.
"""
if event in self._handler_dict and fn in self._handler_dict[event]:
self._handler_dict[event].remove(fn)
|
python
|
def deregister(self, event, fn):
"""
Deregister the handler function from the given event.
"""
if event in self._handler_dict and fn in self._handler_dict[event]:
self._handler_dict[event].remove(fn)
|
[
"def",
"deregister",
"(",
"self",
",",
"event",
",",
"fn",
")",
":",
"if",
"event",
"in",
"self",
".",
"_handler_dict",
"and",
"fn",
"in",
"self",
".",
"_handler_dict",
"[",
"event",
"]",
":",
"self",
".",
"_handler_dict",
"[",
"event",
"]",
".",
"remove",
"(",
"fn",
")"
] |
Deregister the handler function from the given event.
|
[
"Deregister",
"the",
"handler",
"function",
"from",
"the",
"given",
"event",
"."
] |
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
|
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/events.py#L34-L39
|
13,296
|
gamechanger/mongothon
|
mongothon/queries.py
|
ScopeBuilder.unpack_scope
|
def unpack_scope(cls, scope):
"""Unpacks the response from a scope function. The function should return
either a query, a query and a projection, or a query a projection and
an query options hash."""
query = {}
projection = {}
options = {}
if isinstance(scope, tuple):
if len(scope) > 3:
raise ValueError("Invalid scope")
if len(scope) >= 1:
query = scope[0]
if len(scope) >= 2:
projection = scope[1]
if len(scope) == 3:
options = scope[2]
elif isinstance(scope, dict):
query = scope
else:
raise ValueError("Invalid scope")
return query, projection, options
|
python
|
def unpack_scope(cls, scope):
"""Unpacks the response from a scope function. The function should return
either a query, a query and a projection, or a query a projection and
an query options hash."""
query = {}
projection = {}
options = {}
if isinstance(scope, tuple):
if len(scope) > 3:
raise ValueError("Invalid scope")
if len(scope) >= 1:
query = scope[0]
if len(scope) >= 2:
projection = scope[1]
if len(scope) == 3:
options = scope[2]
elif isinstance(scope, dict):
query = scope
else:
raise ValueError("Invalid scope")
return query, projection, options
|
[
"def",
"unpack_scope",
"(",
"cls",
",",
"scope",
")",
":",
"query",
"=",
"{",
"}",
"projection",
"=",
"{",
"}",
"options",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"scope",
",",
"tuple",
")",
":",
"if",
"len",
"(",
"scope",
")",
">",
"3",
":",
"raise",
"ValueError",
"(",
"\"Invalid scope\"",
")",
"if",
"len",
"(",
"scope",
")",
">=",
"1",
":",
"query",
"=",
"scope",
"[",
"0",
"]",
"if",
"len",
"(",
"scope",
")",
">=",
"2",
":",
"projection",
"=",
"scope",
"[",
"1",
"]",
"if",
"len",
"(",
"scope",
")",
"==",
"3",
":",
"options",
"=",
"scope",
"[",
"2",
"]",
"elif",
"isinstance",
"(",
"scope",
",",
"dict",
")",
":",
"query",
"=",
"scope",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid scope\"",
")",
"return",
"query",
",",
"projection",
",",
"options"
] |
Unpacks the response from a scope function. The function should return
either a query, a query and a projection, or a query a projection and
an query options hash.
|
[
"Unpacks",
"the",
"response",
"from",
"a",
"scope",
"function",
".",
"The",
"function",
"should",
"return",
"either",
"a",
"query",
"a",
"query",
"and",
"a",
"projection",
"or",
"a",
"query",
"a",
"projection",
"and",
"an",
"query",
"options",
"hash",
"."
] |
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
|
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/queries.py#L24-L46
|
13,297
|
gamechanger/mongothon
|
mongothon/queries.py
|
ScopeBuilder.register_fn
|
def register_fn(cls, f):
"""Registers a scope function on this builder."""
def inner(self, *args, **kwargs):
try:
query, projection, options = cls.unpack_scope(f(*args, **kwargs))
new_query = deepcopy(self.query)
new_projection = deepcopy(self.projection)
new_options = deepcopy(self.options)
deep_merge(query, new_query)
new_projection.update(projection)
new_options.update(options)
return ScopeBuilder(self.model, self.fns, new_query,
new_projection, new_options)
except ValueError:
raise ValueError("Scope function \"{}\ returns an invalid scope".format(f.__name__))
setattr(cls, f.__name__, inner)
|
python
|
def register_fn(cls, f):
"""Registers a scope function on this builder."""
def inner(self, *args, **kwargs):
try:
query, projection, options = cls.unpack_scope(f(*args, **kwargs))
new_query = deepcopy(self.query)
new_projection = deepcopy(self.projection)
new_options = deepcopy(self.options)
deep_merge(query, new_query)
new_projection.update(projection)
new_options.update(options)
return ScopeBuilder(self.model, self.fns, new_query,
new_projection, new_options)
except ValueError:
raise ValueError("Scope function \"{}\ returns an invalid scope".format(f.__name__))
setattr(cls, f.__name__, inner)
|
[
"def",
"register_fn",
"(",
"cls",
",",
"f",
")",
":",
"def",
"inner",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"query",
",",
"projection",
",",
"options",
"=",
"cls",
".",
"unpack_scope",
"(",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"new_query",
"=",
"deepcopy",
"(",
"self",
".",
"query",
")",
"new_projection",
"=",
"deepcopy",
"(",
"self",
".",
"projection",
")",
"new_options",
"=",
"deepcopy",
"(",
"self",
".",
"options",
")",
"deep_merge",
"(",
"query",
",",
"new_query",
")",
"new_projection",
".",
"update",
"(",
"projection",
")",
"new_options",
".",
"update",
"(",
"options",
")",
"return",
"ScopeBuilder",
"(",
"self",
".",
"model",
",",
"self",
".",
"fns",
",",
"new_query",
",",
"new_projection",
",",
"new_options",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Scope function \\\"{}\\ returns an invalid scope\"",
".",
"format",
"(",
"f",
".",
"__name__",
")",
")",
"setattr",
"(",
"cls",
",",
"f",
".",
"__name__",
",",
"inner",
")"
] |
Registers a scope function on this builder.
|
[
"Registers",
"a",
"scope",
"function",
"on",
"this",
"builder",
"."
] |
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
|
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/queries.py#L50-L66
|
13,298
|
gamechanger/mongothon
|
mongothon/queries.py
|
ScopeBuilder.cursor
|
def cursor(self):
"""
Returns a cursor for the currently assembled query, creating it if
it doesn't already exist.
"""
if not self._active_cursor:
self._active_cursor = self.model.find(self.query,
self.projection or None,
**self.options)
return self._active_cursor
|
python
|
def cursor(self):
"""
Returns a cursor for the currently assembled query, creating it if
it doesn't already exist.
"""
if not self._active_cursor:
self._active_cursor = self.model.find(self.query,
self.projection or None,
**self.options)
return self._active_cursor
|
[
"def",
"cursor",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_active_cursor",
":",
"self",
".",
"_active_cursor",
"=",
"self",
".",
"model",
".",
"find",
"(",
"self",
".",
"query",
",",
"self",
".",
"projection",
"or",
"None",
",",
"*",
"*",
"self",
".",
"options",
")",
"return",
"self",
".",
"_active_cursor"
] |
Returns a cursor for the currently assembled query, creating it if
it doesn't already exist.
|
[
"Returns",
"a",
"cursor",
"for",
"the",
"currently",
"assembled",
"query",
"creating",
"it",
"if",
"it",
"doesn",
"t",
"already",
"exist",
"."
] |
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
|
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/queries.py#L80-L89
|
13,299
|
gamechanger/mongothon
|
mongothon/model.py
|
Model._ensure_object_id
|
def _ensure_object_id(cls, id):
"""Checks whether the given id is an ObjectId instance, and if not wraps it."""
if isinstance(id, ObjectId):
return id
if isinstance(id, basestring) and OBJECTIDEXPR.match(id):
return ObjectId(id)
return id
|
python
|
def _ensure_object_id(cls, id):
"""Checks whether the given id is an ObjectId instance, and if not wraps it."""
if isinstance(id, ObjectId):
return id
if isinstance(id, basestring) and OBJECTIDEXPR.match(id):
return ObjectId(id)
return id
|
[
"def",
"_ensure_object_id",
"(",
"cls",
",",
"id",
")",
":",
"if",
"isinstance",
"(",
"id",
",",
"ObjectId",
")",
":",
"return",
"id",
"if",
"isinstance",
"(",
"id",
",",
"basestring",
")",
"and",
"OBJECTIDEXPR",
".",
"match",
"(",
"id",
")",
":",
"return",
"ObjectId",
"(",
"id",
")",
"return",
"id"
] |
Checks whether the given id is an ObjectId instance, and if not wraps it.
|
[
"Checks",
"whether",
"the",
"given",
"id",
"is",
"an",
"ObjectId",
"instance",
"and",
"if",
"not",
"wraps",
"it",
"."
] |
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
|
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L69-L77
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.