id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
240,700
|
campbellr/smashrun-client
|
smashrun/client.py
|
Smashrun.get_polyline
|
def get_polyline(self, id_num, style='google'):
"""Return the polyline of the activity with the given id.
:param style: The type of polyline to return. May be one of
'google', 'svg', or 'geojson'.
"""
parts = ['my', 'activities', id_num, 'polyline']
if style != 'google':
parts.append(style)
url = self._build_url(*parts)
return self._json(url)
|
python
|
def get_polyline(self, id_num, style='google'):
"""Return the polyline of the activity with the given id.
:param style: The type of polyline to return. May be one of
'google', 'svg', or 'geojson'.
"""
parts = ['my', 'activities', id_num, 'polyline']
if style != 'google':
parts.append(style)
url = self._build_url(*parts)
return self._json(url)
|
[
"def",
"get_polyline",
"(",
"self",
",",
"id_num",
",",
"style",
"=",
"'google'",
")",
":",
"parts",
"=",
"[",
"'my'",
",",
"'activities'",
",",
"id_num",
",",
"'polyline'",
"]",
"if",
"style",
"!=",
"'google'",
":",
"parts",
".",
"append",
"(",
"style",
")",
"url",
"=",
"self",
".",
"_build_url",
"(",
"*",
"parts",
")",
"return",
"self",
".",
"_json",
"(",
"url",
")"
] |
Return the polyline of the activity with the given id.
:param style: The type of polyline to return. May be one of
'google', 'svg', or 'geojson'.
|
[
"Return",
"the",
"polyline",
"of",
"the",
"activity",
"with",
"the",
"given",
"id",
"."
] |
2522cb4d0545cf482a49a9533f12aac94c5aecdc
|
https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L115-L127
|
240,701
|
campbellr/smashrun-client
|
smashrun/client.py
|
Smashrun.get_splits
|
def get_splits(self, id_num, unit='mi'):
"""Return the splits of the activity with the given id.
:param unit: The unit to use for splits. May be one of
'mi' or 'km'.
"""
url = self._build_url('my', 'activities', id_num, 'splits', unit)
return self._json(url)
|
python
|
def get_splits(self, id_num, unit='mi'):
"""Return the splits of the activity with the given id.
:param unit: The unit to use for splits. May be one of
'mi' or 'km'.
"""
url = self._build_url('my', 'activities', id_num, 'splits', unit)
return self._json(url)
|
[
"def",
"get_splits",
"(",
"self",
",",
"id_num",
",",
"unit",
"=",
"'mi'",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"'my'",
",",
"'activities'",
",",
"id_num",
",",
"'splits'",
",",
"unit",
")",
"return",
"self",
".",
"_json",
"(",
"url",
")"
] |
Return the splits of the activity with the given id.
:param unit: The unit to use for splits. May be one of
'mi' or 'km'.
|
[
"Return",
"the",
"splits",
"of",
"the",
"activity",
"with",
"the",
"given",
"id",
"."
] |
2522cb4d0545cf482a49a9533f12aac94c5aecdc
|
https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L129-L138
|
240,702
|
campbellr/smashrun-client
|
smashrun/client.py
|
Smashrun.get_stats
|
def get_stats(self, year=None, month=None):
"""Return stats for the given year and month."""
parts = ['my', 'stats']
if month and not year:
raise ValueError("month cannot be specified without year")
if year:
parts.append(year)
if month:
parts.append(year)
url = self._build_url(*parts)
return self._json(url)
|
python
|
def get_stats(self, year=None, month=None):
"""Return stats for the given year and month."""
parts = ['my', 'stats']
if month and not year:
raise ValueError("month cannot be specified without year")
if year:
parts.append(year)
if month:
parts.append(year)
url = self._build_url(*parts)
return self._json(url)
|
[
"def",
"get_stats",
"(",
"self",
",",
"year",
"=",
"None",
",",
"month",
"=",
"None",
")",
":",
"parts",
"=",
"[",
"'my'",
",",
"'stats'",
"]",
"if",
"month",
"and",
"not",
"year",
":",
"raise",
"ValueError",
"(",
"\"month cannot be specified without year\"",
")",
"if",
"year",
":",
"parts",
".",
"append",
"(",
"year",
")",
"if",
"month",
":",
"parts",
".",
"append",
"(",
"year",
")",
"url",
"=",
"self",
".",
"_build_url",
"(",
"*",
"parts",
")",
"return",
"self",
".",
"_json",
"(",
"url",
")"
] |
Return stats for the given year and month.
|
[
"Return",
"stats",
"for",
"the",
"given",
"year",
"and",
"month",
"."
] |
2522cb4d0545cf482a49a9533f12aac94c5aecdc
|
https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L140-L150
|
240,703
|
campbellr/smashrun-client
|
smashrun/client.py
|
Smashrun.create_weight
|
def create_weight(self, weight, date=None):
"""Submit a new weight record.
:param weight: The weight, in kilograms.
:param date: The date the weight was recorded. If not
specified, the current date will be used.
"""
url = self._build_url('my', 'body', 'weight')
data = {'weightInKilograms': weight}
if date:
if not date.is_aware():
raise ValueError("provided date is not timezone aware")
data.update(date=date.isoformat())
headers = {'Content-Type': 'application/json; charset=utf8'}
r = self.session.post(url, data=json.dumps(data), headers=headers)
r.raise_for_status()
return r
|
python
|
def create_weight(self, weight, date=None):
"""Submit a new weight record.
:param weight: The weight, in kilograms.
:param date: The date the weight was recorded. If not
specified, the current date will be used.
"""
url = self._build_url('my', 'body', 'weight')
data = {'weightInKilograms': weight}
if date:
if not date.is_aware():
raise ValueError("provided date is not timezone aware")
data.update(date=date.isoformat())
headers = {'Content-Type': 'application/json; charset=utf8'}
r = self.session.post(url, data=json.dumps(data), headers=headers)
r.raise_for_status()
return r
|
[
"def",
"create_weight",
"(",
"self",
",",
"weight",
",",
"date",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"'my'",
",",
"'body'",
",",
"'weight'",
")",
"data",
"=",
"{",
"'weightInKilograms'",
":",
"weight",
"}",
"if",
"date",
":",
"if",
"not",
"date",
".",
"is_aware",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"provided date is not timezone aware\"",
")",
"data",
".",
"update",
"(",
"date",
"=",
"date",
".",
"isoformat",
"(",
")",
")",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json; charset=utf8'",
"}",
"r",
"=",
"self",
".",
"session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
",",
"headers",
"=",
"headers",
")",
"r",
".",
"raise_for_status",
"(",
")",
"return",
"r"
] |
Submit a new weight record.
:param weight: The weight, in kilograms.
:param date: The date the weight was recorded. If not
specified, the current date will be used.
|
[
"Submit",
"a",
"new",
"weight",
"record",
"."
] |
2522cb4d0545cf482a49a9533f12aac94c5aecdc
|
https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L167-L184
|
240,704
|
kyan001/PyConsoleIOTools
|
consoleiotools.py
|
pause
|
def pause(msg="Press Enter to Continue..."):
"""press to continue"""
print('\n' + Fore.YELLOW + msg + Fore.RESET, end='')
input()
|
python
|
def pause(msg="Press Enter to Continue..."):
"""press to continue"""
print('\n' + Fore.YELLOW + msg + Fore.RESET, end='')
input()
|
[
"def",
"pause",
"(",
"msg",
"=",
"\"Press Enter to Continue...\"",
")",
":",
"print",
"(",
"'\\n'",
"+",
"Fore",
".",
"YELLOW",
"+",
"msg",
"+",
"Fore",
".",
"RESET",
",",
"end",
"=",
"''",
")",
"input",
"(",
")"
] |
press to continue
|
[
"press",
"to",
"continue"
] |
cc0cc2b9caf62a0fd1a9a72952a6f8c4d17694c4
|
https://github.com/kyan001/PyConsoleIOTools/blob/cc0cc2b9caf62a0fd1a9a72952a6f8c4d17694c4/consoleiotools.py#L76-L79
|
240,705
|
fangpenlin/pyramid-handy
|
pyramid_handy/tweens/allow_origin.py
|
allow_origin_tween_factory
|
def allow_origin_tween_factory(handler, registry):
"""Allow cross origin XHR requests
"""
def allow_origin_tween(request):
settings = request.registry.settings
request_origin = request.headers.get('origin')
def is_origin_allowed(origin):
allowed_origins = (
request.registry.settings.get('api.allowed_origins', [])
)
if isinstance(allowed_origins, str):
allowed_origins = allowed_origins.splitlines()
if not origin:
return False
for allowed_origin in allowed_origins:
if origin.lower().startswith(allowed_origin):
return True
return False
def allow_origin_callback(request, response):
"""Set access-control-allow-origin et. al headers
"""
allowed_methods = settings.get(
'api.allowed_methods',
str('GET, POST, PUT, DELETE, PATCH, OPTIONS'),
)
if callable(allowed_methods):
allowed_methods = allowed_methods(request)
allowed_headers = settings.get(
'api.allowed_headers',
str('Content-Type, Authorization, Range'),
)
if callable(allowed_headers):
allowed_headers = allowed_headers(request)
allowed_credentials = settings.get(
'api.allowed_credentials',
str('true'),
)
if callable(allowed_credentials):
allowed_credentials = allowed_credentials(request)
response.headers['Access-Control-Allow-Origin'] = request_origin
if allowed_credentials:
response.headers[str('Access-Control-Allow-Credentials')] = \
str(allowed_credentials)
if allowed_methods:
response.headers[str('Access-Control-Allow-Methods')] = str(
allowed_methods,
)
if allowed_headers:
response.headers[str('Access-Control-Allow-Headers')] = str(
allowed_headers,
)
if not is_origin_allowed(request_origin):
return handler(request)
request.add_response_callback(allow_origin_callback)
return handler(request)
return allow_origin_tween
|
python
|
def allow_origin_tween_factory(handler, registry):
"""Allow cross origin XHR requests
"""
def allow_origin_tween(request):
settings = request.registry.settings
request_origin = request.headers.get('origin')
def is_origin_allowed(origin):
allowed_origins = (
request.registry.settings.get('api.allowed_origins', [])
)
if isinstance(allowed_origins, str):
allowed_origins = allowed_origins.splitlines()
if not origin:
return False
for allowed_origin in allowed_origins:
if origin.lower().startswith(allowed_origin):
return True
return False
def allow_origin_callback(request, response):
"""Set access-control-allow-origin et. al headers
"""
allowed_methods = settings.get(
'api.allowed_methods',
str('GET, POST, PUT, DELETE, PATCH, OPTIONS'),
)
if callable(allowed_methods):
allowed_methods = allowed_methods(request)
allowed_headers = settings.get(
'api.allowed_headers',
str('Content-Type, Authorization, Range'),
)
if callable(allowed_headers):
allowed_headers = allowed_headers(request)
allowed_credentials = settings.get(
'api.allowed_credentials',
str('true'),
)
if callable(allowed_credentials):
allowed_credentials = allowed_credentials(request)
response.headers['Access-Control-Allow-Origin'] = request_origin
if allowed_credentials:
response.headers[str('Access-Control-Allow-Credentials')] = \
str(allowed_credentials)
if allowed_methods:
response.headers[str('Access-Control-Allow-Methods')] = str(
allowed_methods,
)
if allowed_headers:
response.headers[str('Access-Control-Allow-Headers')] = str(
allowed_headers,
)
if not is_origin_allowed(request_origin):
return handler(request)
request.add_response_callback(allow_origin_callback)
return handler(request)
return allow_origin_tween
|
[
"def",
"allow_origin_tween_factory",
"(",
"handler",
",",
"registry",
")",
":",
"def",
"allow_origin_tween",
"(",
"request",
")",
":",
"settings",
"=",
"request",
".",
"registry",
".",
"settings",
"request_origin",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"'origin'",
")",
"def",
"is_origin_allowed",
"(",
"origin",
")",
":",
"allowed_origins",
"=",
"(",
"request",
".",
"registry",
".",
"settings",
".",
"get",
"(",
"'api.allowed_origins'",
",",
"[",
"]",
")",
")",
"if",
"isinstance",
"(",
"allowed_origins",
",",
"str",
")",
":",
"allowed_origins",
"=",
"allowed_origins",
".",
"splitlines",
"(",
")",
"if",
"not",
"origin",
":",
"return",
"False",
"for",
"allowed_origin",
"in",
"allowed_origins",
":",
"if",
"origin",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"allowed_origin",
")",
":",
"return",
"True",
"return",
"False",
"def",
"allow_origin_callback",
"(",
"request",
",",
"response",
")",
":",
"\"\"\"Set access-control-allow-origin et. al headers\n\n \"\"\"",
"allowed_methods",
"=",
"settings",
".",
"get",
"(",
"'api.allowed_methods'",
",",
"str",
"(",
"'GET, POST, PUT, DELETE, PATCH, OPTIONS'",
")",
",",
")",
"if",
"callable",
"(",
"allowed_methods",
")",
":",
"allowed_methods",
"=",
"allowed_methods",
"(",
"request",
")",
"allowed_headers",
"=",
"settings",
".",
"get",
"(",
"'api.allowed_headers'",
",",
"str",
"(",
"'Content-Type, Authorization, Range'",
")",
",",
")",
"if",
"callable",
"(",
"allowed_headers",
")",
":",
"allowed_headers",
"=",
"allowed_headers",
"(",
"request",
")",
"allowed_credentials",
"=",
"settings",
".",
"get",
"(",
"'api.allowed_credentials'",
",",
"str",
"(",
"'true'",
")",
",",
")",
"if",
"callable",
"(",
"allowed_credentials",
")",
":",
"allowed_credentials",
"=",
"allowed_credentials",
"(",
"request",
")",
"response",
".",
"headers",
"[",
"'Access-Control-Allow-Origin'",
"]",
"=",
"request_origin",
"if",
"allowed_credentials",
":",
"response",
".",
"headers",
"[",
"str",
"(",
"'Access-Control-Allow-Credentials'",
")",
"]",
"=",
"str",
"(",
"allowed_credentials",
")",
"if",
"allowed_methods",
":",
"response",
".",
"headers",
"[",
"str",
"(",
"'Access-Control-Allow-Methods'",
")",
"]",
"=",
"str",
"(",
"allowed_methods",
",",
")",
"if",
"allowed_headers",
":",
"response",
".",
"headers",
"[",
"str",
"(",
"'Access-Control-Allow-Headers'",
")",
"]",
"=",
"str",
"(",
"allowed_headers",
",",
")",
"if",
"not",
"is_origin_allowed",
"(",
"request_origin",
")",
":",
"return",
"handler",
"(",
"request",
")",
"request",
".",
"add_response_callback",
"(",
"allow_origin_callback",
")",
"return",
"handler",
"(",
"request",
")",
"return",
"allow_origin_tween"
] |
Allow cross origin XHR requests
|
[
"Allow",
"cross",
"origin",
"XHR",
"requests"
] |
e3cbc19224ab1f0a14aab556990bceabd2d1f658
|
https://github.com/fangpenlin/pyramid-handy/blob/e3cbc19224ab1f0a14aab556990bceabd2d1f658/pyramid_handy/tweens/allow_origin.py#L6-L70
|
240,706
|
fr33jc/bang
|
bang/providers/hpcloud/reddwarf.py
|
DBSecurityGroupRules.create
|
def create(self, dbsecgroup_id, source_cidr, port=3306):
"""
Creates a security group rule.
:param str dbsecgroup_id: The ID of the security group in which this
rule should be created.
:param str source_cidr: The source IP address range from which access
should be allowed.
:param int port: The port number used by db clients to connect to the
db server. This would have been specified at db instance creation
time.
:rtype: :class:`DBSecurityGroupRule`.
"""
body = {
"security_group_rule": {
"security_group_id": dbsecgroup_id,
"cidr": source_cidr,
"from_port": port,
"to_port": port,
}
}
return self._create("/security-group-rules", body,
"security_group_rule")
|
python
|
def create(self, dbsecgroup_id, source_cidr, port=3306):
"""
Creates a security group rule.
:param str dbsecgroup_id: The ID of the security group in which this
rule should be created.
:param str source_cidr: The source IP address range from which access
should be allowed.
:param int port: The port number used by db clients to connect to the
db server. This would have been specified at db instance creation
time.
:rtype: :class:`DBSecurityGroupRule`.
"""
body = {
"security_group_rule": {
"security_group_id": dbsecgroup_id,
"cidr": source_cidr,
"from_port": port,
"to_port": port,
}
}
return self._create("/security-group-rules", body,
"security_group_rule")
|
[
"def",
"create",
"(",
"self",
",",
"dbsecgroup_id",
",",
"source_cidr",
",",
"port",
"=",
"3306",
")",
":",
"body",
"=",
"{",
"\"security_group_rule\"",
":",
"{",
"\"security_group_id\"",
":",
"dbsecgroup_id",
",",
"\"cidr\"",
":",
"source_cidr",
",",
"\"from_port\"",
":",
"port",
",",
"\"to_port\"",
":",
"port",
",",
"}",
"}",
"return",
"self",
".",
"_create",
"(",
"\"/security-group-rules\"",
",",
"body",
",",
"\"security_group_rule\"",
")"
] |
Creates a security group rule.
:param str dbsecgroup_id: The ID of the security group in which this
rule should be created.
:param str source_cidr: The source IP address range from which access
should be allowed.
:param int port: The port number used by db clients to connect to the
db server. This would have been specified at db instance creation
time.
:rtype: :class:`DBSecurityGroupRule`.
|
[
"Creates",
"a",
"security",
"group",
"rule",
"."
] |
8f000713f88d2a9a8c1193b63ca10a6578560c16
|
https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/providers/hpcloud/reddwarf.py#L65-L89
|
240,707
|
dusty-phillips/opterator
|
examples/cp.py
|
main
|
def main(filename1, filename2, recursive=False, backup=False,
suffix='~', *other_filenames):
'''An example copy script with some example parameters that might
be used in a file or directory copy command.
:param recursive: -r --recursive copy directories
recursively
:param backup: -b --backup backup any files you copy over
:param suffix: -S --suffix override the usual backup
suffix '''
filenames = [filename1, filename2] + list(other_filenames)
destination = filenames.pop()
print("You asked to move %s to %s" % (filenames, destination))
if recursive:
print("You asked to copy directories recursively.")
if backup:
print("You asked to backup any overwritten files.")
print("You would use the suffix %s" % suffix)
|
python
|
def main(filename1, filename2, recursive=False, backup=False,
suffix='~', *other_filenames):
'''An example copy script with some example parameters that might
be used in a file or directory copy command.
:param recursive: -r --recursive copy directories
recursively
:param backup: -b --backup backup any files you copy over
:param suffix: -S --suffix override the usual backup
suffix '''
filenames = [filename1, filename2] + list(other_filenames)
destination = filenames.pop()
print("You asked to move %s to %s" % (filenames, destination))
if recursive:
print("You asked to copy directories recursively.")
if backup:
print("You asked to backup any overwritten files.")
print("You would use the suffix %s" % suffix)
|
[
"def",
"main",
"(",
"filename1",
",",
"filename2",
",",
"recursive",
"=",
"False",
",",
"backup",
"=",
"False",
",",
"suffix",
"=",
"'~'",
",",
"*",
"other_filenames",
")",
":",
"filenames",
"=",
"[",
"filename1",
",",
"filename2",
"]",
"+",
"list",
"(",
"other_filenames",
")",
"destination",
"=",
"filenames",
".",
"pop",
"(",
")",
"print",
"(",
"\"You asked to move %s to %s\"",
"%",
"(",
"filenames",
",",
"destination",
")",
")",
"if",
"recursive",
":",
"print",
"(",
"\"You asked to copy directories recursively.\"",
")",
"if",
"backup",
":",
"print",
"(",
"\"You asked to backup any overwritten files.\"",
")",
"print",
"(",
"\"You would use the suffix %s\"",
"%",
"suffix",
")"
] |
An example copy script with some example parameters that might
be used in a file or directory copy command.
:param recursive: -r --recursive copy directories
recursively
:param backup: -b --backup backup any files you copy over
:param suffix: -S --suffix override the usual backup
suffix
|
[
"An",
"example",
"copy",
"script",
"with",
"some",
"example",
"parameters",
"that",
"might",
"be",
"used",
"in",
"a",
"file",
"or",
"directory",
"copy",
"command",
"."
] |
84fe31f22c73dc0a3666ed82c179461b1799c257
|
https://github.com/dusty-phillips/opterator/blob/84fe31f22c73dc0a3666ed82c179461b1799c257/examples/cp.py#L5-L23
|
240,708
|
emin63/eyap
|
eyap/core/github_comments.py
|
GitHubCommentGroup.parse_date
|
def parse_date(my_date):
"""Parse a date into canonical format of datetime.dateime.
:param my_date: Either datetime.datetime or string in
'%Y-%m-%dT%H:%M:%SZ' format.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: A datetime.datetime.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Parse a date and make sure it has no time zone.
"""
if isinstance(my_date, datetime.datetime):
result = my_date
elif isinstance(my_date, str):
result = datetime.datetime.strptime(my_date, '%Y-%m-%dT%H:%M:%SZ')
else:
raise ValueError('Unexpected date format for "%s" of type "%s"' % (
str(my_date), type(my_date)))
assert result.tzinfo is None, 'Unexpected tzinfo for date %s' % (
result)
return result
|
python
|
def parse_date(my_date):
"""Parse a date into canonical format of datetime.dateime.
:param my_date: Either datetime.datetime or string in
'%Y-%m-%dT%H:%M:%SZ' format.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: A datetime.datetime.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Parse a date and make sure it has no time zone.
"""
if isinstance(my_date, datetime.datetime):
result = my_date
elif isinstance(my_date, str):
result = datetime.datetime.strptime(my_date, '%Y-%m-%dT%H:%M:%SZ')
else:
raise ValueError('Unexpected date format for "%s" of type "%s"' % (
str(my_date), type(my_date)))
assert result.tzinfo is None, 'Unexpected tzinfo for date %s' % (
result)
return result
|
[
"def",
"parse_date",
"(",
"my_date",
")",
":",
"if",
"isinstance",
"(",
"my_date",
",",
"datetime",
".",
"datetime",
")",
":",
"result",
"=",
"my_date",
"elif",
"isinstance",
"(",
"my_date",
",",
"str",
")",
":",
"result",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"my_date",
",",
"'%Y-%m-%dT%H:%M:%SZ'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unexpected date format for \"%s\" of type \"%s\"'",
"%",
"(",
"str",
"(",
"my_date",
")",
",",
"type",
"(",
"my_date",
")",
")",
")",
"assert",
"result",
".",
"tzinfo",
"is",
"None",
",",
"'Unexpected tzinfo for date %s'",
"%",
"(",
"result",
")",
"return",
"result"
] |
Parse a date into canonical format of datetime.dateime.
:param my_date: Either datetime.datetime or string in
'%Y-%m-%dT%H:%M:%SZ' format.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: A datetime.datetime.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Parse a date and make sure it has no time zone.
|
[
"Parse",
"a",
"date",
"into",
"canonical",
"format",
"of",
"datetime",
".",
"dateime",
"."
] |
a610761973b478ca0e864e970be05ce29d5994a5
|
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/github_comments.py#L70-L94
|
240,709
|
emin63/eyap
|
eyap/core/github_comments.py
|
GitHubCommentGroup.get_thread_info
|
def get_thread_info(self, enforce_re=True, latest_date=None):
"""Return a json list with information about threads in the group.
:param enforce_re=True: Whether to require titles to match
regexp in self.topic_re.
:param latest_date=None: Optional datetime.datetime for latest
date to consider. Things past this
are ignored.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: List of github items found.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Return a json list with information about threads
in the group. Along with latest_date, this can be used
to show issues.
"""
result = []
my_re = re.compile(self.topic_re)
url = '%s/issues?sort=updated' % (self.base_url)
latest_date = self.parse_date(latest_date) if latest_date else None
while url:
kwargs = {} if not self.gh_info.user else {'auth': (
self.gh_info.user, self.gh_info.token)}
my_req = requests.get(url, params=self.params, **kwargs)
my_json = my_req.json()
for item in my_json:
if (not enforce_re) or my_re.search(item['title']):
idate = self.parse_date(item['updated_at'])
if (latest_date is not None and idate > latest_date):
logging.debug('Skip %s since updated at %s > %s',
item['title'], idate, latest_date)
continue
result.append(item)
if self.max_threads is not None and len(
result) >= self.max_threads:
logging.debug('Stopping after max_threads=%i threads.',
len(result))
return result
url = None
if 'link' in my_req.headers:
link = my_req.headers['link'].split(',')
for thing in link:
potential_url, part = thing.split('; ')
if part == 'rel="next"':
url = potential_url.lstrip(' <').rstrip('> ')
return result
|
python
|
def get_thread_info(self, enforce_re=True, latest_date=None):
"""Return a json list with information about threads in the group.
:param enforce_re=True: Whether to require titles to match
regexp in self.topic_re.
:param latest_date=None: Optional datetime.datetime for latest
date to consider. Things past this
are ignored.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: List of github items found.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Return a json list with information about threads
in the group. Along with latest_date, this can be used
to show issues.
"""
result = []
my_re = re.compile(self.topic_re)
url = '%s/issues?sort=updated' % (self.base_url)
latest_date = self.parse_date(latest_date) if latest_date else None
while url:
kwargs = {} if not self.gh_info.user else {'auth': (
self.gh_info.user, self.gh_info.token)}
my_req = requests.get(url, params=self.params, **kwargs)
my_json = my_req.json()
for item in my_json:
if (not enforce_re) or my_re.search(item['title']):
idate = self.parse_date(item['updated_at'])
if (latest_date is not None and idate > latest_date):
logging.debug('Skip %s since updated at %s > %s',
item['title'], idate, latest_date)
continue
result.append(item)
if self.max_threads is not None and len(
result) >= self.max_threads:
logging.debug('Stopping after max_threads=%i threads.',
len(result))
return result
url = None
if 'link' in my_req.headers:
link = my_req.headers['link'].split(',')
for thing in link:
potential_url, part = thing.split('; ')
if part == 'rel="next"':
url = potential_url.lstrip(' <').rstrip('> ')
return result
|
[
"def",
"get_thread_info",
"(",
"self",
",",
"enforce_re",
"=",
"True",
",",
"latest_date",
"=",
"None",
")",
":",
"result",
"=",
"[",
"]",
"my_re",
"=",
"re",
".",
"compile",
"(",
"self",
".",
"topic_re",
")",
"url",
"=",
"'%s/issues?sort=updated'",
"%",
"(",
"self",
".",
"base_url",
")",
"latest_date",
"=",
"self",
".",
"parse_date",
"(",
"latest_date",
")",
"if",
"latest_date",
"else",
"None",
"while",
"url",
":",
"kwargs",
"=",
"{",
"}",
"if",
"not",
"self",
".",
"gh_info",
".",
"user",
"else",
"{",
"'auth'",
":",
"(",
"self",
".",
"gh_info",
".",
"user",
",",
"self",
".",
"gh_info",
".",
"token",
")",
"}",
"my_req",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"self",
".",
"params",
",",
"*",
"*",
"kwargs",
")",
"my_json",
"=",
"my_req",
".",
"json",
"(",
")",
"for",
"item",
"in",
"my_json",
":",
"if",
"(",
"not",
"enforce_re",
")",
"or",
"my_re",
".",
"search",
"(",
"item",
"[",
"'title'",
"]",
")",
":",
"idate",
"=",
"self",
".",
"parse_date",
"(",
"item",
"[",
"'updated_at'",
"]",
")",
"if",
"(",
"latest_date",
"is",
"not",
"None",
"and",
"idate",
">",
"latest_date",
")",
":",
"logging",
".",
"debug",
"(",
"'Skip %s since updated at %s > %s'",
",",
"item",
"[",
"'title'",
"]",
",",
"idate",
",",
"latest_date",
")",
"continue",
"result",
".",
"append",
"(",
"item",
")",
"if",
"self",
".",
"max_threads",
"is",
"not",
"None",
"and",
"len",
"(",
"result",
")",
">=",
"self",
".",
"max_threads",
":",
"logging",
".",
"debug",
"(",
"'Stopping after max_threads=%i threads.'",
",",
"len",
"(",
"result",
")",
")",
"return",
"result",
"url",
"=",
"None",
"if",
"'link'",
"in",
"my_req",
".",
"headers",
":",
"link",
"=",
"my_req",
".",
"headers",
"[",
"'link'",
"]",
".",
"split",
"(",
"','",
")",
"for",
"thing",
"in",
"link",
":",
"potential_url",
",",
"part",
"=",
"thing",
".",
"split",
"(",
"'; '",
")",
"if",
"part",
"==",
"'rel=\"next\"'",
":",
"url",
"=",
"potential_url",
".",
"lstrip",
"(",
"' <'",
")",
".",
"rstrip",
"(",
"'> '",
")",
"return",
"result"
] |
Return a json list with information about threads in the group.
:param enforce_re=True: Whether to require titles to match
regexp in self.topic_re.
:param latest_date=None: Optional datetime.datetime for latest
date to consider. Things past this
are ignored.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: List of github items found.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Return a json list with information about threads
in the group. Along with latest_date, this can be used
to show issues.
|
[
"Return",
"a",
"json",
"list",
"with",
"information",
"about",
"threads",
"in",
"the",
"group",
"."
] |
a610761973b478ca0e864e970be05ce29d5994a5
|
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/github_comments.py#L96-L147
|
240,710
|
emin63/eyap
|
eyap/core/github_comments.py
|
GitHubCommentGroup.export
|
def export(self, out_filename):
"""Export desired threads as a zipfile to out_filename.
"""
with zipfile.ZipFile(out_filename, 'w', zipfile.ZIP_DEFLATED) as arc:
id_list = list(self.get_thread_info())
for num, my_info in enumerate(id_list):
logging.info('Working on item %i : %s', num, my_info['number'])
my_thread = GitHubCommentThread(
self.gh_info.owner, self.gh_info.realm, my_info['title'],
self.gh_info.user, self.gh_info.token,
thread_id=my_info['number'])
csec = my_thread.get_comment_section()
cdict = [item.to_dict() for item in csec.comments]
my_json = json.dumps(cdict)
arc.writestr('%i__%s' % (my_info['number'], my_info['title']),
my_json)
|
python
|
def export(self, out_filename):
"""Export desired threads as a zipfile to out_filename.
"""
with zipfile.ZipFile(out_filename, 'w', zipfile.ZIP_DEFLATED) as arc:
id_list = list(self.get_thread_info())
for num, my_info in enumerate(id_list):
logging.info('Working on item %i : %s', num, my_info['number'])
my_thread = GitHubCommentThread(
self.gh_info.owner, self.gh_info.realm, my_info['title'],
self.gh_info.user, self.gh_info.token,
thread_id=my_info['number'])
csec = my_thread.get_comment_section()
cdict = [item.to_dict() for item in csec.comments]
my_json = json.dumps(cdict)
arc.writestr('%i__%s' % (my_info['number'], my_info['title']),
my_json)
|
[
"def",
"export",
"(",
"self",
",",
"out_filename",
")",
":",
"with",
"zipfile",
".",
"ZipFile",
"(",
"out_filename",
",",
"'w'",
",",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"as",
"arc",
":",
"id_list",
"=",
"list",
"(",
"self",
".",
"get_thread_info",
"(",
")",
")",
"for",
"num",
",",
"my_info",
"in",
"enumerate",
"(",
"id_list",
")",
":",
"logging",
".",
"info",
"(",
"'Working on item %i : %s'",
",",
"num",
",",
"my_info",
"[",
"'number'",
"]",
")",
"my_thread",
"=",
"GitHubCommentThread",
"(",
"self",
".",
"gh_info",
".",
"owner",
",",
"self",
".",
"gh_info",
".",
"realm",
",",
"my_info",
"[",
"'title'",
"]",
",",
"self",
".",
"gh_info",
".",
"user",
",",
"self",
".",
"gh_info",
".",
"token",
",",
"thread_id",
"=",
"my_info",
"[",
"'number'",
"]",
")",
"csec",
"=",
"my_thread",
".",
"get_comment_section",
"(",
")",
"cdict",
"=",
"[",
"item",
".",
"to_dict",
"(",
")",
"for",
"item",
"in",
"csec",
".",
"comments",
"]",
"my_json",
"=",
"json",
".",
"dumps",
"(",
"cdict",
")",
"arc",
".",
"writestr",
"(",
"'%i__%s'",
"%",
"(",
"my_info",
"[",
"'number'",
"]",
",",
"my_info",
"[",
"'title'",
"]",
")",
",",
"my_json",
")"
] |
Export desired threads as a zipfile to out_filename.
|
[
"Export",
"desired",
"threads",
"as",
"a",
"zipfile",
"to",
"out_filename",
"."
] |
a610761973b478ca0e864e970be05ce29d5994a5
|
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/github_comments.py#L149-L164
|
240,711
|
emin63/eyap
|
eyap/core/github_comments.py
|
GitHubCommentThread.sleep_if_necessary
|
def sleep_if_necessary(cls, user, token, endpoint='search', msg=''):
"""Sleep a little if hit github recently to honor rate limit.
"""
my_kw = {'auth': (user, token)} if user else {}
info = requests.get('https://api.github.com/rate_limit', **my_kw)
info_dict = info.json()
remaining = info_dict['resources'][endpoint]['remaining']
logging.debug('Search remaining on github is at %s', remaining)
if remaining <= 5:
sleep_time = 120
else:
sleep_time = 0
if sleep_time:
logging.warning('Sleep %i since github requests remaining = %i%s',
sleep_time, remaining, msg)
time.sleep(sleep_time)
return True
return False
|
python
|
def sleep_if_necessary(cls, user, token, endpoint='search', msg=''):
"""Sleep a little if hit github recently to honor rate limit.
"""
my_kw = {'auth': (user, token)} if user else {}
info = requests.get('https://api.github.com/rate_limit', **my_kw)
info_dict = info.json()
remaining = info_dict['resources'][endpoint]['remaining']
logging.debug('Search remaining on github is at %s', remaining)
if remaining <= 5:
sleep_time = 120
else:
sleep_time = 0
if sleep_time:
logging.warning('Sleep %i since github requests remaining = %i%s',
sleep_time, remaining, msg)
time.sleep(sleep_time)
return True
return False
|
[
"def",
"sleep_if_necessary",
"(",
"cls",
",",
"user",
",",
"token",
",",
"endpoint",
"=",
"'search'",
",",
"msg",
"=",
"''",
")",
":",
"my_kw",
"=",
"{",
"'auth'",
":",
"(",
"user",
",",
"token",
")",
"}",
"if",
"user",
"else",
"{",
"}",
"info",
"=",
"requests",
".",
"get",
"(",
"'https://api.github.com/rate_limit'",
",",
"*",
"*",
"my_kw",
")",
"info_dict",
"=",
"info",
".",
"json",
"(",
")",
"remaining",
"=",
"info_dict",
"[",
"'resources'",
"]",
"[",
"endpoint",
"]",
"[",
"'remaining'",
"]",
"logging",
".",
"debug",
"(",
"'Search remaining on github is at %s'",
",",
"remaining",
")",
"if",
"remaining",
"<=",
"5",
":",
"sleep_time",
"=",
"120",
"else",
":",
"sleep_time",
"=",
"0",
"if",
"sleep_time",
":",
"logging",
".",
"warning",
"(",
"'Sleep %i since github requests remaining = %i%s'",
",",
"sleep_time",
",",
"remaining",
",",
"msg",
")",
"time",
".",
"sleep",
"(",
"sleep_time",
")",
"return",
"True",
"return",
"False"
] |
Sleep a little if hit github recently to honor rate limit.
|
[
"Sleep",
"a",
"little",
"if",
"hit",
"github",
"recently",
"to",
"honor",
"rate",
"limit",
"."
] |
a610761973b478ca0e864e970be05ce29d5994a5
|
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/github_comments.py#L217-L236
|
240,712
|
emin63/eyap
|
eyap/core/github_comments.py
|
GitHubCommentThread.update_cache_key
|
def update_cache_key(cls, cache_key, item=None):
"""Get item in cache for cache_key and add item if item is not None.
"""
contents = cls.__thread_id_cache.get(cache_key, None)
if item is not None:
cls.__thread_id_cache[cache_key] = item
return contents
|
python
|
def update_cache_key(cls, cache_key, item=None):
"""Get item in cache for cache_key and add item if item is not None.
"""
contents = cls.__thread_id_cache.get(cache_key, None)
if item is not None:
cls.__thread_id_cache[cache_key] = item
return contents
|
[
"def",
"update_cache_key",
"(",
"cls",
",",
"cache_key",
",",
"item",
"=",
"None",
")",
":",
"contents",
"=",
"cls",
".",
"__thread_id_cache",
".",
"get",
"(",
"cache_key",
",",
"None",
")",
"if",
"item",
"is",
"not",
"None",
":",
"cls",
".",
"__thread_id_cache",
"[",
"cache_key",
"]",
"=",
"item",
"return",
"contents"
] |
Get item in cache for cache_key and add item if item is not None.
|
[
"Get",
"item",
"in",
"cache",
"for",
"cache_key",
"and",
"add",
"item",
"if",
"item",
"is",
"not",
"None",
"."
] |
a610761973b478ca0e864e970be05ce29d5994a5
|
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/github_comments.py#L239-L246
|
240,713
|
emin63/eyap
|
eyap/core/github_comments.py
|
GitHubCommentThread.lookup_thread_id
|
def lookup_thread_id(self):
"""Lookup thread id as required by CommentThread.lookup_thread_id.
This implementation will query GitHub with the required parameters
to try and find the topic for the owner, realm, topic, etc., specified
in init.
"""
query_string = 'in:title "%s" repo:%s/%s' % (
self.topic, self.owner, self.realm)
cache_key = (self.owner, self.realm, self.topic)
result = self.lookup_cache_key(cache_key)
if result is not None:
my_req = self.raw_pull(result)
if my_req.status_code != 200:
result = None # Cached item was no good
elif my_req.json()['title'] != self.topic:
logging.debug('Title must have changed; ignore cache')
result = None
else:
logging.debug('Using cached thread id %s for %s', str(result),
str(cache_key))
return result
data, dummy_hdr = self.raw_search(self.user, self.token, query_string)
if data['total_count'] == 1: # unique match
if data['items'][0]['title'] == self.topic:
result = data['items'][0]['number']
else:
result = None
elif data['total_count'] > 1: # multiple matches since github doesn't
searched_data = [ # have unique search we must filter
item for item in data['items'] if item['title'] == self.topic]
if not searched_data: # no matches
return None
elif len(searched_data) > 1:
raise yap_exceptions.UnableToFindUniqueTopic(
self.topic, data['total_count'], '')
else:
assert len(searched_data) == 1, (
'Confused searching for topic "%s"' % str(self.topic))
result = searched_data[0]['number']
else:
result = None
self.update_cache_key(cache_key, result)
return result
|
python
|
def lookup_thread_id(self):
"""Lookup thread id as required by CommentThread.lookup_thread_id.
This implementation will query GitHub with the required parameters
to try and find the topic for the owner, realm, topic, etc., specified
in init.
"""
query_string = 'in:title "%s" repo:%s/%s' % (
self.topic, self.owner, self.realm)
cache_key = (self.owner, self.realm, self.topic)
result = self.lookup_cache_key(cache_key)
if result is not None:
my_req = self.raw_pull(result)
if my_req.status_code != 200:
result = None # Cached item was no good
elif my_req.json()['title'] != self.topic:
logging.debug('Title must have changed; ignore cache')
result = None
else:
logging.debug('Using cached thread id %s for %s', str(result),
str(cache_key))
return result
data, dummy_hdr = self.raw_search(self.user, self.token, query_string)
if data['total_count'] == 1: # unique match
if data['items'][0]['title'] == self.topic:
result = data['items'][0]['number']
else:
result = None
elif data['total_count'] > 1: # multiple matches since github doesn't
searched_data = [ # have unique search we must filter
item for item in data['items'] if item['title'] == self.topic]
if not searched_data: # no matches
return None
elif len(searched_data) > 1:
raise yap_exceptions.UnableToFindUniqueTopic(
self.topic, data['total_count'], '')
else:
assert len(searched_data) == 1, (
'Confused searching for topic "%s"' % str(self.topic))
result = searched_data[0]['number']
else:
result = None
self.update_cache_key(cache_key, result)
return result
|
[
"def",
"lookup_thread_id",
"(",
"self",
")",
":",
"query_string",
"=",
"'in:title \"%s\" repo:%s/%s'",
"%",
"(",
"self",
".",
"topic",
",",
"self",
".",
"owner",
",",
"self",
".",
"realm",
")",
"cache_key",
"=",
"(",
"self",
".",
"owner",
",",
"self",
".",
"realm",
",",
"self",
".",
"topic",
")",
"result",
"=",
"self",
".",
"lookup_cache_key",
"(",
"cache_key",
")",
"if",
"result",
"is",
"not",
"None",
":",
"my_req",
"=",
"self",
".",
"raw_pull",
"(",
"result",
")",
"if",
"my_req",
".",
"status_code",
"!=",
"200",
":",
"result",
"=",
"None",
"# Cached item was no good",
"elif",
"my_req",
".",
"json",
"(",
")",
"[",
"'title'",
"]",
"!=",
"self",
".",
"topic",
":",
"logging",
".",
"debug",
"(",
"'Title must have changed; ignore cache'",
")",
"result",
"=",
"None",
"else",
":",
"logging",
".",
"debug",
"(",
"'Using cached thread id %s for %s'",
",",
"str",
"(",
"result",
")",
",",
"str",
"(",
"cache_key",
")",
")",
"return",
"result",
"data",
",",
"dummy_hdr",
"=",
"self",
".",
"raw_search",
"(",
"self",
".",
"user",
",",
"self",
".",
"token",
",",
"query_string",
")",
"if",
"data",
"[",
"'total_count'",
"]",
"==",
"1",
":",
"# unique match",
"if",
"data",
"[",
"'items'",
"]",
"[",
"0",
"]",
"[",
"'title'",
"]",
"==",
"self",
".",
"topic",
":",
"result",
"=",
"data",
"[",
"'items'",
"]",
"[",
"0",
"]",
"[",
"'number'",
"]",
"else",
":",
"result",
"=",
"None",
"elif",
"data",
"[",
"'total_count'",
"]",
">",
"1",
":",
"# multiple matches since github doesn't",
"searched_data",
"=",
"[",
"# have unique search we must filter",
"item",
"for",
"item",
"in",
"data",
"[",
"'items'",
"]",
"if",
"item",
"[",
"'title'",
"]",
"==",
"self",
".",
"topic",
"]",
"if",
"not",
"searched_data",
":",
"# no matches",
"return",
"None",
"elif",
"len",
"(",
"searched_data",
")",
">",
"1",
":",
"raise",
"yap_exceptions",
".",
"UnableToFindUniqueTopic",
"(",
"self",
".",
"topic",
",",
"data",
"[",
"'total_count'",
"]",
",",
"''",
")",
"else",
":",
"assert",
"len",
"(",
"searched_data",
")",
"==",
"1",
",",
"(",
"'Confused searching for topic \"%s\"'",
"%",
"str",
"(",
"self",
".",
"topic",
")",
")",
"result",
"=",
"searched_data",
"[",
"0",
"]",
"[",
"'number'",
"]",
"else",
":",
"result",
"=",
"None",
"self",
".",
"update_cache_key",
"(",
"cache_key",
",",
"result",
")",
"return",
"result"
] |
Lookup thread id as required by CommentThread.lookup_thread_id.
This implementation will query GitHub with the required parameters
to try and find the topic for the owner, realm, topic, etc., specified
in init.
|
[
"Lookup",
"thread",
"id",
"as",
"required",
"by",
"CommentThread",
".",
"lookup_thread_id",
"."
] |
a610761973b478ca0e864e970be05ce29d5994a5
|
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/github_comments.py#L254-L300
|
240,714
|
emin63/eyap
|
eyap/core/github_comments.py
|
GitHubCommentThread.raw_search
|
def raw_search(cls, user, token, query, page=0):
"""Do a raw search for github issues.
:arg user: Username to use in accessing github.
:arg token: Token to use in accessing github.
:arg query: String query to use in searching github.
:arg page=0: Number of pages to automatically paginate.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: The pair (result, header) representing the result
from github along with the header.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Search for issues on github. If page > 0 then we
will pull out up to page more pages via automatic
pagination. The best way to check if you got the
full results is to check if results['total_count']
matches len(results['items']).
"""
page = int(page)
kwargs = {} if not user else {'auth': (user, token)}
my_url = cls.search_url
data = {'items': []}
while my_url:
cls.sleep_if_necessary(
user, token, msg='\nquery="%s"' % str(query))
my_req = requests.get(my_url, params={'q': query}, **kwargs)
if my_req.status_code != 200:
raise GitHubAngry(
'Bad status code %s finding query %s because %s' % (
my_req.status_code, query, my_req.reason))
my_json = my_req.json()
assert isinstance(my_json['items'], list)
data['items'].extend(my_json.pop('items'))
data.update(my_json)
my_url = None
if page and my_req.links.get('next', False):
my_url = my_req.links['next']['url']
if my_url:
page = page - 1
logging.debug(
'Paginating %s in raw_search (%i more pages allowed)',
my_req.links, page)
return data, my_req.headers
|
python
|
def raw_search(cls, user, token, query, page=0):
"""Do a raw search for github issues.
:arg user: Username to use in accessing github.
:arg token: Token to use in accessing github.
:arg query: String query to use in searching github.
:arg page=0: Number of pages to automatically paginate.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: The pair (result, header) representing the result
from github along with the header.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Search for issues on github. If page > 0 then we
will pull out up to page more pages via automatic
pagination. The best way to check if you got the
full results is to check if results['total_count']
matches len(results['items']).
"""
page = int(page)
kwargs = {} if not user else {'auth': (user, token)}
my_url = cls.search_url
data = {'items': []}
while my_url:
cls.sleep_if_necessary(
user, token, msg='\nquery="%s"' % str(query))
my_req = requests.get(my_url, params={'q': query}, **kwargs)
if my_req.status_code != 200:
raise GitHubAngry(
'Bad status code %s finding query %s because %s' % (
my_req.status_code, query, my_req.reason))
my_json = my_req.json()
assert isinstance(my_json['items'], list)
data['items'].extend(my_json.pop('items'))
data.update(my_json)
my_url = None
if page and my_req.links.get('next', False):
my_url = my_req.links['next']['url']
if my_url:
page = page - 1
logging.debug(
'Paginating %s in raw_search (%i more pages allowed)',
my_req.links, page)
return data, my_req.headers
|
[
"def",
"raw_search",
"(",
"cls",
",",
"user",
",",
"token",
",",
"query",
",",
"page",
"=",
"0",
")",
":",
"page",
"=",
"int",
"(",
"page",
")",
"kwargs",
"=",
"{",
"}",
"if",
"not",
"user",
"else",
"{",
"'auth'",
":",
"(",
"user",
",",
"token",
")",
"}",
"my_url",
"=",
"cls",
".",
"search_url",
"data",
"=",
"{",
"'items'",
":",
"[",
"]",
"}",
"while",
"my_url",
":",
"cls",
".",
"sleep_if_necessary",
"(",
"user",
",",
"token",
",",
"msg",
"=",
"'\\nquery=\"%s\"'",
"%",
"str",
"(",
"query",
")",
")",
"my_req",
"=",
"requests",
".",
"get",
"(",
"my_url",
",",
"params",
"=",
"{",
"'q'",
":",
"query",
"}",
",",
"*",
"*",
"kwargs",
")",
"if",
"my_req",
".",
"status_code",
"!=",
"200",
":",
"raise",
"GitHubAngry",
"(",
"'Bad status code %s finding query %s because %s'",
"%",
"(",
"my_req",
".",
"status_code",
",",
"query",
",",
"my_req",
".",
"reason",
")",
")",
"my_json",
"=",
"my_req",
".",
"json",
"(",
")",
"assert",
"isinstance",
"(",
"my_json",
"[",
"'items'",
"]",
",",
"list",
")",
"data",
"[",
"'items'",
"]",
".",
"extend",
"(",
"my_json",
".",
"pop",
"(",
"'items'",
")",
")",
"data",
".",
"update",
"(",
"my_json",
")",
"my_url",
"=",
"None",
"if",
"page",
"and",
"my_req",
".",
"links",
".",
"get",
"(",
"'next'",
",",
"False",
")",
":",
"my_url",
"=",
"my_req",
".",
"links",
"[",
"'next'",
"]",
"[",
"'url'",
"]",
"if",
"my_url",
":",
"page",
"=",
"page",
"-",
"1",
"logging",
".",
"debug",
"(",
"'Paginating %s in raw_search (%i more pages allowed)'",
",",
"my_req",
".",
"links",
",",
"page",
")",
"return",
"data",
",",
"my_req",
".",
"headers"
] |
Do a raw search for github issues.
:arg user: Username to use in accessing github.
:arg token: Token to use in accessing github.
:arg query: String query to use in searching github.
:arg page=0: Number of pages to automatically paginate.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: The pair (result, header) representing the result
from github along with the header.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Search for issues on github. If page > 0 then we
will pull out up to page more pages via automatic
pagination. The best way to check if you got the
full results is to check if results['total_count']
matches len(results['items']).
|
[
"Do",
"a",
"raw",
"search",
"for",
"github",
"issues",
"."
] |
a610761973b478ca0e864e970be05ce29d5994a5
|
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/github_comments.py#L303-L354
|
240,715
|
emin63/eyap
|
eyap/core/github_comments.py
|
GitHubCommentThread.raw_pull
|
def raw_pull(self, topic):
"""Do a raw pull of data for given topic down from github.
:arg topic: String topic (i.e., issue title).
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: Result of request data from github API.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Encapsulate call that gets raw data from github.
"""
assert topic is not None, 'A topic of None is not allowed'
kwargs = {} if not self.user else {'auth': (self.user, self.token)}
my_req = requests.get('%s/issues/%s' % (
self.base_url, topic), **kwargs)
return my_req
|
python
|
def raw_pull(self, topic):
"""Do a raw pull of data for given topic down from github.
:arg topic: String topic (i.e., issue title).
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: Result of request data from github API.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Encapsulate call that gets raw data from github.
"""
assert topic is not None, 'A topic of None is not allowed'
kwargs = {} if not self.user else {'auth': (self.user, self.token)}
my_req = requests.get('%s/issues/%s' % (
self.base_url, topic), **kwargs)
return my_req
|
[
"def",
"raw_pull",
"(",
"self",
",",
"topic",
")",
":",
"assert",
"topic",
"is",
"not",
"None",
",",
"'A topic of None is not allowed'",
"kwargs",
"=",
"{",
"}",
"if",
"not",
"self",
".",
"user",
"else",
"{",
"'auth'",
":",
"(",
"self",
".",
"user",
",",
"self",
".",
"token",
")",
"}",
"my_req",
"=",
"requests",
".",
"get",
"(",
"'%s/issues/%s'",
"%",
"(",
"self",
".",
"base_url",
",",
"topic",
")",
",",
"*",
"*",
"kwargs",
")",
"return",
"my_req"
] |
Do a raw pull of data for given topic down from github.
:arg topic: String topic (i.e., issue title).
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: Result of request data from github API.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Encapsulate call that gets raw data from github.
|
[
"Do",
"a",
"raw",
"pull",
"of",
"data",
"for",
"given",
"topic",
"down",
"from",
"github",
"."
] |
a610761973b478ca0e864e970be05ce29d5994a5
|
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/github_comments.py#L356-L374
|
240,716
|
emin63/eyap
|
eyap/core/github_comments.py
|
GitHubCommentThread.lookup_comment_list
|
def lookup_comment_list(self):
"""Lookup list of comments for an issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: The pair (ISSUE, COMMENTS) where ISSUE is a dict for the
main issue and COMMENTS is a list of comments on the issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Do the work of getting data from github, handling paging,
and so on.
"""
if self.thread_id is None:
return None, None
# Just pulling a single issue here so pagination shouldn't be problem
my_req = self.raw_pull(self.thread_id)
if my_req.status_code != 200:
raise GitHubAngry('Bad status code %s because %s' % (
my_req.status_code, my_req.reason))
issue_json = my_req.json()
comments_url = issue_json['comments_url'] + self.url_extras
kwargs = {} if not self.user else {'auth': (self.user, self.token)}
comments_json = []
while comments_url:
logging.debug('Pulling comments URL: %s', comments_url)
c_req = requests.get(comments_url, **kwargs)
my_json = c_req.json()
assert isinstance(my_json, list)
comments_json.extend(my_json)
comments_url = None
if 'link' in c_req.headers: # need to handle pagination.
logging.debug('Paginating in lookup_comment_list')
link = c_req.headers['link'].split(',')
for thing in link:
potential_url, part = thing.split('; ')
if part == 'rel="next"':
comments_url = potential_url.lstrip(' <').rstrip('> ')
return issue_json, comments_json
|
python
|
def lookup_comment_list(self):
"""Lookup list of comments for an issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: The pair (ISSUE, COMMENTS) where ISSUE is a dict for the
main issue and COMMENTS is a list of comments on the issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Do the work of getting data from github, handling paging,
and so on.
"""
if self.thread_id is None:
return None, None
# Just pulling a single issue here so pagination shouldn't be problem
my_req = self.raw_pull(self.thread_id)
if my_req.status_code != 200:
raise GitHubAngry('Bad status code %s because %s' % (
my_req.status_code, my_req.reason))
issue_json = my_req.json()
comments_url = issue_json['comments_url'] + self.url_extras
kwargs = {} if not self.user else {'auth': (self.user, self.token)}
comments_json = []
while comments_url:
logging.debug('Pulling comments URL: %s', comments_url)
c_req = requests.get(comments_url, **kwargs)
my_json = c_req.json()
assert isinstance(my_json, list)
comments_json.extend(my_json)
comments_url = None
if 'link' in c_req.headers: # need to handle pagination.
logging.debug('Paginating in lookup_comment_list')
link = c_req.headers['link'].split(',')
for thing in link:
potential_url, part = thing.split('; ')
if part == 'rel="next"':
comments_url = potential_url.lstrip(' <').rstrip('> ')
return issue_json, comments_json
|
[
"def",
"lookup_comment_list",
"(",
"self",
")",
":",
"if",
"self",
".",
"thread_id",
"is",
"None",
":",
"return",
"None",
",",
"None",
"# Just pulling a single issue here so pagination shouldn't be problem",
"my_req",
"=",
"self",
".",
"raw_pull",
"(",
"self",
".",
"thread_id",
")",
"if",
"my_req",
".",
"status_code",
"!=",
"200",
":",
"raise",
"GitHubAngry",
"(",
"'Bad status code %s because %s'",
"%",
"(",
"my_req",
".",
"status_code",
",",
"my_req",
".",
"reason",
")",
")",
"issue_json",
"=",
"my_req",
".",
"json",
"(",
")",
"comments_url",
"=",
"issue_json",
"[",
"'comments_url'",
"]",
"+",
"self",
".",
"url_extras",
"kwargs",
"=",
"{",
"}",
"if",
"not",
"self",
".",
"user",
"else",
"{",
"'auth'",
":",
"(",
"self",
".",
"user",
",",
"self",
".",
"token",
")",
"}",
"comments_json",
"=",
"[",
"]",
"while",
"comments_url",
":",
"logging",
".",
"debug",
"(",
"'Pulling comments URL: %s'",
",",
"comments_url",
")",
"c_req",
"=",
"requests",
".",
"get",
"(",
"comments_url",
",",
"*",
"*",
"kwargs",
")",
"my_json",
"=",
"c_req",
".",
"json",
"(",
")",
"assert",
"isinstance",
"(",
"my_json",
",",
"list",
")",
"comments_json",
".",
"extend",
"(",
"my_json",
")",
"comments_url",
"=",
"None",
"if",
"'link'",
"in",
"c_req",
".",
"headers",
":",
"# need to handle pagination.",
"logging",
".",
"debug",
"(",
"'Paginating in lookup_comment_list'",
")",
"link",
"=",
"c_req",
".",
"headers",
"[",
"'link'",
"]",
".",
"split",
"(",
"','",
")",
"for",
"thing",
"in",
"link",
":",
"potential_url",
",",
"part",
"=",
"thing",
".",
"split",
"(",
"'; '",
")",
"if",
"part",
"==",
"'rel=\"next\"'",
":",
"comments_url",
"=",
"potential_url",
".",
"lstrip",
"(",
"' <'",
")",
".",
"rstrip",
"(",
"'> '",
")",
"return",
"issue_json",
",",
"comments_json"
] |
Lookup list of comments for an issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: The pair (ISSUE, COMMENTS) where ISSUE is a dict for the
main issue and COMMENTS is a list of comments on the issue.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Do the work of getting data from github, handling paging,
and so on.
|
[
"Lookup",
"list",
"of",
"comments",
"for",
"an",
"issue",
"."
] |
a610761973b478ca0e864e970be05ce29d5994a5
|
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/github_comments.py#L376-L417
|
240,717
|
emin63/eyap
|
eyap/core/github_comments.py
|
GitHubCommentThread.add_comment
|
def add_comment(self, body, allow_create=False, allow_hashes=True,
summary=None, hash_create=False):
"""Implement as required by CommentThread.add_comment.
:arg body: String/text of comment to add.
:arg allow_create=False: Whether to automatically create a new thread
if a thread does not exist (usually by calling
self.create_thread).
:arg allow_hashes=True: Whether to support hashtag mentions of other
topics and automatically insert comment in
body into those topics as well.
*IMPORTANT*: if you recursively call
add_comment to insert the hashes, you should
make sure to set this to False to prevent
infinite hash processing loops.
arg summary=None: Optional summary. If not given, we will
extract one from body automatically if
necessary.
:arg hash_create=False: Whether to allow creating new threads via
hash mentions.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: Response object indicating whether added succesfully.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: This uses the GitHub API to try to add the given comment
to the desired thread.
"""
if self.thread_id is None:
self.thread_id = self.lookup_thread_id()
data = json.dumps({'body': body})
if self.thread_id is None:
if allow_create:
return self.create_thread(body)
else:
raise ValueError(
'Cannot find comment existing comment for %s' % self.topic)
result = requests.post('%s/issues/%s/comments' % (
self.base_url, self.thread_id), data, auth=(self.user, self.token))
if result.status_code != 201:
if result.reason == 'Not Found' and allow_create:
return self.create_thread(body)
else:
raise GitHubAngry(
'Bad status %s add_comment on %s because %s' % (
result.status_code, self.topic, result.reason))
if allow_hashes:
self.process_hashes(body, allow_create=hash_create)
return result
|
python
|
def add_comment(self, body, allow_create=False, allow_hashes=True,
summary=None, hash_create=False):
"""Implement as required by CommentThread.add_comment.
:arg body: String/text of comment to add.
:arg allow_create=False: Whether to automatically create a new thread
if a thread does not exist (usually by calling
self.create_thread).
:arg allow_hashes=True: Whether to support hashtag mentions of other
topics and automatically insert comment in
body into those topics as well.
*IMPORTANT*: if you recursively call
add_comment to insert the hashes, you should
make sure to set this to False to prevent
infinite hash processing loops.
arg summary=None: Optional summary. If not given, we will
extract one from body automatically if
necessary.
:arg hash_create=False: Whether to allow creating new threads via
hash mentions.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: Response object indicating whether added succesfully.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: This uses the GitHub API to try to add the given comment
to the desired thread.
"""
if self.thread_id is None:
self.thread_id = self.lookup_thread_id()
data = json.dumps({'body': body})
if self.thread_id is None:
if allow_create:
return self.create_thread(body)
else:
raise ValueError(
'Cannot find comment existing comment for %s' % self.topic)
result = requests.post('%s/issues/%s/comments' % (
self.base_url, self.thread_id), data, auth=(self.user, self.token))
if result.status_code != 201:
if result.reason == 'Not Found' and allow_create:
return self.create_thread(body)
else:
raise GitHubAngry(
'Bad status %s add_comment on %s because %s' % (
result.status_code, self.topic, result.reason))
if allow_hashes:
self.process_hashes(body, allow_create=hash_create)
return result
|
[
"def",
"add_comment",
"(",
"self",
",",
"body",
",",
"allow_create",
"=",
"False",
",",
"allow_hashes",
"=",
"True",
",",
"summary",
"=",
"None",
",",
"hash_create",
"=",
"False",
")",
":",
"if",
"self",
".",
"thread_id",
"is",
"None",
":",
"self",
".",
"thread_id",
"=",
"self",
".",
"lookup_thread_id",
"(",
")",
"data",
"=",
"json",
".",
"dumps",
"(",
"{",
"'body'",
":",
"body",
"}",
")",
"if",
"self",
".",
"thread_id",
"is",
"None",
":",
"if",
"allow_create",
":",
"return",
"self",
".",
"create_thread",
"(",
"body",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Cannot find comment existing comment for %s'",
"%",
"self",
".",
"topic",
")",
"result",
"=",
"requests",
".",
"post",
"(",
"'%s/issues/%s/comments'",
"%",
"(",
"self",
".",
"base_url",
",",
"self",
".",
"thread_id",
")",
",",
"data",
",",
"auth",
"=",
"(",
"self",
".",
"user",
",",
"self",
".",
"token",
")",
")",
"if",
"result",
".",
"status_code",
"!=",
"201",
":",
"if",
"result",
".",
"reason",
"==",
"'Not Found'",
"and",
"allow_create",
":",
"return",
"self",
".",
"create_thread",
"(",
"body",
")",
"else",
":",
"raise",
"GitHubAngry",
"(",
"'Bad status %s add_comment on %s because %s'",
"%",
"(",
"result",
".",
"status_code",
",",
"self",
".",
"topic",
",",
"result",
".",
"reason",
")",
")",
"if",
"allow_hashes",
":",
"self",
".",
"process_hashes",
"(",
"body",
",",
"allow_create",
"=",
"hash_create",
")",
"return",
"result"
] |
Implement as required by CommentThread.add_comment.
:arg body: String/text of comment to add.
:arg allow_create=False: Whether to automatically create a new thread
if a thread does not exist (usually by calling
self.create_thread).
:arg allow_hashes=True: Whether to support hashtag mentions of other
topics and automatically insert comment in
body into those topics as well.
*IMPORTANT*: if you recursively call
add_comment to insert the hashes, you should
make sure to set this to False to prevent
infinite hash processing loops.
arg summary=None: Optional summary. If not given, we will
extract one from body automatically if
necessary.
:arg hash_create=False: Whether to allow creating new threads via
hash mentions.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: Response object indicating whether added succesfully.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: This uses the GitHub API to try to add the given comment
to the desired thread.
|
[
"Implement",
"as",
"required",
"by",
"CommentThread",
".",
"add_comment",
"."
] |
a610761973b478ca0e864e970be05ce29d5994a5
|
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/github_comments.py#L445-L504
|
240,718
|
emin63/eyap
|
eyap/core/github_comments.py
|
GitHubCommentThread.process_hashes
|
def process_hashes(self, body, allow_create=False):
"""Process any hashes mentioned and push them to related topics.
:arg body: Body of the comment to check for hashes and push out.
:arg allow_create=False: Whether to allow creating new topics
from hash tag mentions.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Look for hashtags matching self.hashtag_re and when found,
add comment from body to those topics.
"""
hash_re = re.compile(self.hashtag_re)
hashes = hash_re.findall(body)
done = {self.topic.lower(): True}
for mention in hashes:
mention = mention.strip('#')
if mention.lower() in done:
continue # Do not duplicate hash mentions
new_thread = self.__class__(
owner=self.owner, realm=self.realm, topic=mention,
user=self.user, token=self.token)
my_comment = '# Hashtag copy from %s:\n%s' % (self.topic, body)
new_thread.add_comment(
my_comment, allow_create=allow_create,
allow_hashes=False) # allow_hashes=False to prevent inf loop
done[mention.lower()] = True
|
python
|
def process_hashes(self, body, allow_create=False):
"""Process any hashes mentioned and push them to related topics.
:arg body: Body of the comment to check for hashes and push out.
:arg allow_create=False: Whether to allow creating new topics
from hash tag mentions.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Look for hashtags matching self.hashtag_re and when found,
add comment from body to those topics.
"""
hash_re = re.compile(self.hashtag_re)
hashes = hash_re.findall(body)
done = {self.topic.lower(): True}
for mention in hashes:
mention = mention.strip('#')
if mention.lower() in done:
continue # Do not duplicate hash mentions
new_thread = self.__class__(
owner=self.owner, realm=self.realm, topic=mention,
user=self.user, token=self.token)
my_comment = '# Hashtag copy from %s:\n%s' % (self.topic, body)
new_thread.add_comment(
my_comment, allow_create=allow_create,
allow_hashes=False) # allow_hashes=False to prevent inf loop
done[mention.lower()] = True
|
[
"def",
"process_hashes",
"(",
"self",
",",
"body",
",",
"allow_create",
"=",
"False",
")",
":",
"hash_re",
"=",
"re",
".",
"compile",
"(",
"self",
".",
"hashtag_re",
")",
"hashes",
"=",
"hash_re",
".",
"findall",
"(",
"body",
")",
"done",
"=",
"{",
"self",
".",
"topic",
".",
"lower",
"(",
")",
":",
"True",
"}",
"for",
"mention",
"in",
"hashes",
":",
"mention",
"=",
"mention",
".",
"strip",
"(",
"'#'",
")",
"if",
"mention",
".",
"lower",
"(",
")",
"in",
"done",
":",
"continue",
"# Do not duplicate hash mentions",
"new_thread",
"=",
"self",
".",
"__class__",
"(",
"owner",
"=",
"self",
".",
"owner",
",",
"realm",
"=",
"self",
".",
"realm",
",",
"topic",
"=",
"mention",
",",
"user",
"=",
"self",
".",
"user",
",",
"token",
"=",
"self",
".",
"token",
")",
"my_comment",
"=",
"'# Hashtag copy from %s:\\n%s'",
"%",
"(",
"self",
".",
"topic",
",",
"body",
")",
"new_thread",
".",
"add_comment",
"(",
"my_comment",
",",
"allow_create",
"=",
"allow_create",
",",
"allow_hashes",
"=",
"False",
")",
"# allow_hashes=False to prevent inf loop",
"done",
"[",
"mention",
".",
"lower",
"(",
")",
"]",
"=",
"True"
] |
Process any hashes mentioned and push them to related topics.
:arg body: Body of the comment to check for hashes and push out.
:arg allow_create=False: Whether to allow creating new topics
from hash tag mentions.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Look for hashtags matching self.hashtag_re and when found,
add comment from body to those topics.
|
[
"Process",
"any",
"hashes",
"mentioned",
"and",
"push",
"them",
"to",
"related",
"topics",
"."
] |
a610761973b478ca0e864e970be05ce29d5994a5
|
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/github_comments.py#L506-L534
|
240,719
|
emin63/eyap
|
eyap/core/github_comments.py
|
GitHubCommentThread.upload_attachment
|
def upload_attachment(self, location, data):
"""Upload attachment as required by CommentThread class.
See CommentThread.upload_attachment for details.
"""
self.validate_attachment_location(location)
content = data.read() if hasattr(data, 'read') else data
orig_content = content
if isinstance(content, bytes):
content = base64.b64encode(orig_content).decode('ascii')
else:
pass # Should be base64 encoded already
apath = '%s/%s' % (self.attachment_location, location)
url = '%s/contents/%s' % (self.base_url, apath)
result = requests.put(
url, auth=(self.user, self.token), data=json.dumps({
'message': 'file attachment %s' % location,
'content': content}))
if result.status_code != 201:
raise ValueError(
"Can't upload attachment %s due to error %s." % (
location, result.reason))
return '[%s](https://github.com/%s/%s/blob/master/%s)' % (
location, self.owner, self.realm, apath)
|
python
|
def upload_attachment(self, location, data):
"""Upload attachment as required by CommentThread class.
See CommentThread.upload_attachment for details.
"""
self.validate_attachment_location(location)
content = data.read() if hasattr(data, 'read') else data
orig_content = content
if isinstance(content, bytes):
content = base64.b64encode(orig_content).decode('ascii')
else:
pass # Should be base64 encoded already
apath = '%s/%s' % (self.attachment_location, location)
url = '%s/contents/%s' % (self.base_url, apath)
result = requests.put(
url, auth=(self.user, self.token), data=json.dumps({
'message': 'file attachment %s' % location,
'content': content}))
if result.status_code != 201:
raise ValueError(
"Can't upload attachment %s due to error %s." % (
location, result.reason))
return '[%s](https://github.com/%s/%s/blob/master/%s)' % (
location, self.owner, self.realm, apath)
|
[
"def",
"upload_attachment",
"(",
"self",
",",
"location",
",",
"data",
")",
":",
"self",
".",
"validate_attachment_location",
"(",
"location",
")",
"content",
"=",
"data",
".",
"read",
"(",
")",
"if",
"hasattr",
"(",
"data",
",",
"'read'",
")",
"else",
"data",
"orig_content",
"=",
"content",
"if",
"isinstance",
"(",
"content",
",",
"bytes",
")",
":",
"content",
"=",
"base64",
".",
"b64encode",
"(",
"orig_content",
")",
".",
"decode",
"(",
"'ascii'",
")",
"else",
":",
"pass",
"# Should be base64 encoded already",
"apath",
"=",
"'%s/%s'",
"%",
"(",
"self",
".",
"attachment_location",
",",
"location",
")",
"url",
"=",
"'%s/contents/%s'",
"%",
"(",
"self",
".",
"base_url",
",",
"apath",
")",
"result",
"=",
"requests",
".",
"put",
"(",
"url",
",",
"auth",
"=",
"(",
"self",
".",
"user",
",",
"self",
".",
"token",
")",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"{",
"'message'",
":",
"'file attachment %s'",
"%",
"location",
",",
"'content'",
":",
"content",
"}",
")",
")",
"if",
"result",
".",
"status_code",
"!=",
"201",
":",
"raise",
"ValueError",
"(",
"\"Can't upload attachment %s due to error %s.\"",
"%",
"(",
"location",
",",
"result",
".",
"reason",
")",
")",
"return",
"'[%s](https://github.com/%s/%s/blob/master/%s)'",
"%",
"(",
"location",
",",
"self",
".",
"owner",
",",
"self",
".",
"realm",
",",
"apath",
")"
] |
Upload attachment as required by CommentThread class.
See CommentThread.upload_attachment for details.
|
[
"Upload",
"attachment",
"as",
"required",
"by",
"CommentThread",
"class",
"."
] |
a610761973b478ca0e864e970be05ce29d5994a5
|
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/github_comments.py#L547-L570
|
240,720
|
stevepeak/tornpsql
|
tornpsql/__init__.py
|
_Connection._reconnect
|
def _reconnect(self):
"""Closes the existing database connection and re-opens it."""
self.close()
self._db = psycopg2.connect(**self._db_args)
if self._search_path:
self.execute('set search_path=%s;' % self._search_path)
if self._timezone:
self.execute("set timezone='%s';" % self._timezone)
|
python
|
def _reconnect(self):
"""Closes the existing database connection and re-opens it."""
self.close()
self._db = psycopg2.connect(**self._db_args)
if self._search_path:
self.execute('set search_path=%s;' % self._search_path)
if self._timezone:
self.execute("set timezone='%s';" % self._timezone)
|
[
"def",
"_reconnect",
"(",
"self",
")",
":",
"self",
".",
"close",
"(",
")",
"self",
".",
"_db",
"=",
"psycopg2",
".",
"connect",
"(",
"*",
"*",
"self",
".",
"_db_args",
")",
"if",
"self",
".",
"_search_path",
":",
"self",
".",
"execute",
"(",
"'set search_path=%s;'",
"%",
"self",
".",
"_search_path",
")",
"if",
"self",
".",
"_timezone",
":",
"self",
".",
"execute",
"(",
"\"set timezone='%s';\"",
"%",
"self",
".",
"_timezone",
")"
] |
Closes the existing database connection and re-opens it.
|
[
"Closes",
"the",
"existing",
"database",
"connection",
"and",
"re",
"-",
"opens",
"it",
"."
] |
a109d0f95d6432d0e3b5eba1c9854357ba527f27
|
https://github.com/stevepeak/tornpsql/blob/a109d0f95d6432d0e3b5eba1c9854357ba527f27/tornpsql/__init__.py#L134-L143
|
240,721
|
stevepeak/tornpsql
|
tornpsql/__init__.py
|
_Connection._reregister_types
|
def _reregister_types(self):
"""Registers existing types for a new connection"""
for _type in self._register_types:
psycopg2.extensions.register_type(psycopg2.extensions.new_type(*_type))
|
python
|
def _reregister_types(self):
"""Registers existing types for a new connection"""
for _type in self._register_types:
psycopg2.extensions.register_type(psycopg2.extensions.new_type(*_type))
|
[
"def",
"_reregister_types",
"(",
"self",
")",
":",
"for",
"_type",
"in",
"self",
".",
"_register_types",
":",
"psycopg2",
".",
"extensions",
".",
"register_type",
"(",
"psycopg2",
".",
"extensions",
".",
"new_type",
"(",
"*",
"_type",
")",
")"
] |
Registers existing types for a new connection
|
[
"Registers",
"existing",
"types",
"for",
"a",
"new",
"connection"
] |
a109d0f95d6432d0e3b5eba1c9854357ba527f27
|
https://github.com/stevepeak/tornpsql/blob/a109d0f95d6432d0e3b5eba1c9854357ba527f27/tornpsql/__init__.py#L145-L148
|
240,722
|
stevepeak/tornpsql
|
tornpsql/__init__.py
|
_Connection.register_type
|
def register_type(self, oids, name, casting):
"""Callback to register data types when reconnect
"""
assert type(oids) is tuple
assert isinstance(name, basestring)
assert hasattr(casting, '__call__')
self._register_types.append((oids, name, casting))
psycopg2.extensions.register_type(psycopg2.extensions.new_type(oids, name, casting))
|
python
|
def register_type(self, oids, name, casting):
"""Callback to register data types when reconnect
"""
assert type(oids) is tuple
assert isinstance(name, basestring)
assert hasattr(casting, '__call__')
self._register_types.append((oids, name, casting))
psycopg2.extensions.register_type(psycopg2.extensions.new_type(oids, name, casting))
|
[
"def",
"register_type",
"(",
"self",
",",
"oids",
",",
"name",
",",
"casting",
")",
":",
"assert",
"type",
"(",
"oids",
")",
"is",
"tuple",
"assert",
"isinstance",
"(",
"name",
",",
"basestring",
")",
"assert",
"hasattr",
"(",
"casting",
",",
"'__call__'",
")",
"self",
".",
"_register_types",
".",
"append",
"(",
"(",
"oids",
",",
"name",
",",
"casting",
")",
")",
"psycopg2",
".",
"extensions",
".",
"register_type",
"(",
"psycopg2",
".",
"extensions",
".",
"new_type",
"(",
"oids",
",",
"name",
",",
"casting",
")",
")"
] |
Callback to register data types when reconnect
|
[
"Callback",
"to",
"register",
"data",
"types",
"when",
"reconnect"
] |
a109d0f95d6432d0e3b5eba1c9854357ba527f27
|
https://github.com/stevepeak/tornpsql/blob/a109d0f95d6432d0e3b5eba1c9854357ba527f27/tornpsql/__init__.py#L158-L165
|
240,723
|
stevepeak/tornpsql
|
tornpsql/__init__.py
|
_Connection.query
|
def query(self, query, *parameters, **kwargs):
"""Returns a row list for the given query and parameters."""
cursor = self._cursor()
try:
self._execute(cursor, query, parameters or None, kwargs)
if cursor.description:
column_names = [column.name for column in cursor.description]
res = [Row(zip(column_names, row)) for row in cursor.fetchall()]
cursor.close()
return res
except:
cursor.close()
raise
|
python
|
def query(self, query, *parameters, **kwargs):
"""Returns a row list for the given query and parameters."""
cursor = self._cursor()
try:
self._execute(cursor, query, parameters or None, kwargs)
if cursor.description:
column_names = [column.name for column in cursor.description]
res = [Row(zip(column_names, row)) for row in cursor.fetchall()]
cursor.close()
return res
except:
cursor.close()
raise
|
[
"def",
"query",
"(",
"self",
",",
"query",
",",
"*",
"parameters",
",",
"*",
"*",
"kwargs",
")",
":",
"cursor",
"=",
"self",
".",
"_cursor",
"(",
")",
"try",
":",
"self",
".",
"_execute",
"(",
"cursor",
",",
"query",
",",
"parameters",
"or",
"None",
",",
"kwargs",
")",
"if",
"cursor",
".",
"description",
":",
"column_names",
"=",
"[",
"column",
".",
"name",
"for",
"column",
"in",
"cursor",
".",
"description",
"]",
"res",
"=",
"[",
"Row",
"(",
"zip",
"(",
"column_names",
",",
"row",
")",
")",
"for",
"row",
"in",
"cursor",
".",
"fetchall",
"(",
")",
"]",
"cursor",
".",
"close",
"(",
")",
"return",
"res",
"except",
":",
"cursor",
".",
"close",
"(",
")",
"raise"
] |
Returns a row list for the given query and parameters.
|
[
"Returns",
"a",
"row",
"list",
"for",
"the",
"given",
"query",
"and",
"parameters",
"."
] |
a109d0f95d6432d0e3b5eba1c9854357ba527f27
|
https://github.com/stevepeak/tornpsql/blob/a109d0f95d6432d0e3b5eba1c9854357ba527f27/tornpsql/__init__.py#L187-L199
|
240,724
|
stevepeak/tornpsql
|
tornpsql/__init__.py
|
_Connection.iter
|
def iter(self, query, *parameters, **kwargs):
"""Returns a generator for records from the query."""
cursor = self._cursor()
try:
self._execute(cursor, query, parameters or None, kwargs)
if cursor.description:
column_names = [column.name for column in cursor.description]
while True:
record = cursor.fetchone()
if not record:
break
yield Row(zip(column_names, record))
raise StopIteration
except:
cursor.close()
raise
|
python
|
def iter(self, query, *parameters, **kwargs):
"""Returns a generator for records from the query."""
cursor = self._cursor()
try:
self._execute(cursor, query, parameters or None, kwargs)
if cursor.description:
column_names = [column.name for column in cursor.description]
while True:
record = cursor.fetchone()
if not record:
break
yield Row(zip(column_names, record))
raise StopIteration
except:
cursor.close()
raise
|
[
"def",
"iter",
"(",
"self",
",",
"query",
",",
"*",
"parameters",
",",
"*",
"*",
"kwargs",
")",
":",
"cursor",
"=",
"self",
".",
"_cursor",
"(",
")",
"try",
":",
"self",
".",
"_execute",
"(",
"cursor",
",",
"query",
",",
"parameters",
"or",
"None",
",",
"kwargs",
")",
"if",
"cursor",
".",
"description",
":",
"column_names",
"=",
"[",
"column",
".",
"name",
"for",
"column",
"in",
"cursor",
".",
"description",
"]",
"while",
"True",
":",
"record",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"if",
"not",
"record",
":",
"break",
"yield",
"Row",
"(",
"zip",
"(",
"column_names",
",",
"record",
")",
")",
"raise",
"StopIteration",
"except",
":",
"cursor",
".",
"close",
"(",
")",
"raise"
] |
Returns a generator for records from the query.
|
[
"Returns",
"a",
"generator",
"for",
"records",
"from",
"the",
"query",
"."
] |
a109d0f95d6432d0e3b5eba1c9854357ba527f27
|
https://github.com/stevepeak/tornpsql/blob/a109d0f95d6432d0e3b5eba1c9854357ba527f27/tornpsql/__init__.py#L201-L217
|
240,725
|
stevepeak/tornpsql
|
tornpsql/__init__.py
|
_Connection.execute
|
def execute(self, query, *parameters, **kwargs):
"""Same as query, but do not process results. Always returns `None`."""
cursor = self._cursor()
try:
self._execute(cursor, query, parameters, kwargs)
except:
raise
finally:
cursor.close()
|
python
|
def execute(self, query, *parameters, **kwargs):
"""Same as query, but do not process results. Always returns `None`."""
cursor = self._cursor()
try:
self._execute(cursor, query, parameters, kwargs)
except:
raise
finally:
cursor.close()
|
[
"def",
"execute",
"(",
"self",
",",
"query",
",",
"*",
"parameters",
",",
"*",
"*",
"kwargs",
")",
":",
"cursor",
"=",
"self",
".",
"_cursor",
"(",
")",
"try",
":",
"self",
".",
"_execute",
"(",
"cursor",
",",
"query",
",",
"parameters",
",",
"kwargs",
")",
"except",
":",
"raise",
"finally",
":",
"cursor",
".",
"close",
"(",
")"
] |
Same as query, but do not process results. Always returns `None`.
|
[
"Same",
"as",
"query",
"but",
"do",
"not",
"process",
"results",
".",
"Always",
"returns",
"None",
"."
] |
a109d0f95d6432d0e3b5eba1c9854357ba527f27
|
https://github.com/stevepeak/tornpsql/blob/a109d0f95d6432d0e3b5eba1c9854357ba527f27/tornpsql/__init__.py#L219-L229
|
240,726
|
stevepeak/tornpsql
|
tornpsql/__init__.py
|
_Connection.get
|
def get(self, query, *parameters, **kwargs):
"""Returns the first row returned for the given query."""
rows = self.query(query, *parameters, **kwargs)
if not rows:
return None
elif len(rows) > 1:
raise ValueError('Multiple rows returned for get() query')
else:
return rows[0]
|
python
|
def get(self, query, *parameters, **kwargs):
"""Returns the first row returned for the given query."""
rows = self.query(query, *parameters, **kwargs)
if not rows:
return None
elif len(rows) > 1:
raise ValueError('Multiple rows returned for get() query')
else:
return rows[0]
|
[
"def",
"get",
"(",
"self",
",",
"query",
",",
"*",
"parameters",
",",
"*",
"*",
"kwargs",
")",
":",
"rows",
"=",
"self",
".",
"query",
"(",
"query",
",",
"*",
"parameters",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"rows",
":",
"return",
"None",
"elif",
"len",
"(",
"rows",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Multiple rows returned for get() query'",
")",
"else",
":",
"return",
"rows",
"[",
"0",
"]"
] |
Returns the first row returned for the given query.
|
[
"Returns",
"the",
"first",
"row",
"returned",
"for",
"the",
"given",
"query",
"."
] |
a109d0f95d6432d0e3b5eba1c9854357ba527f27
|
https://github.com/stevepeak/tornpsql/blob/a109d0f95d6432d0e3b5eba1c9854357ba527f27/tornpsql/__init__.py#L231-L239
|
240,727
|
stevepeak/tornpsql
|
tornpsql/__init__.py
|
_Connection.executemany
|
def executemany(self, query, *parameters):
"""Executes the given query against all the given param sequences.
"""
cursor = self._cursor()
try:
self._executemany(cursor, query, parameters)
if cursor.description:
column_names = [column.name for column in cursor.description]
res = [Row(zip(column_names, row)) for row in cursor.fetchall()]
cursor.close()
return res
except Exception: # pragma: no cover
cursor.close()
raise
|
python
|
def executemany(self, query, *parameters):
"""Executes the given query against all the given param sequences.
"""
cursor = self._cursor()
try:
self._executemany(cursor, query, parameters)
if cursor.description:
column_names = [column.name for column in cursor.description]
res = [Row(zip(column_names, row)) for row in cursor.fetchall()]
cursor.close()
return res
except Exception: # pragma: no cover
cursor.close()
raise
|
[
"def",
"executemany",
"(",
"self",
",",
"query",
",",
"*",
"parameters",
")",
":",
"cursor",
"=",
"self",
".",
"_cursor",
"(",
")",
"try",
":",
"self",
".",
"_executemany",
"(",
"cursor",
",",
"query",
",",
"parameters",
")",
"if",
"cursor",
".",
"description",
":",
"column_names",
"=",
"[",
"column",
".",
"name",
"for",
"column",
"in",
"cursor",
".",
"description",
"]",
"res",
"=",
"[",
"Row",
"(",
"zip",
"(",
"column_names",
",",
"row",
")",
")",
"for",
"row",
"in",
"cursor",
".",
"fetchall",
"(",
")",
"]",
"cursor",
".",
"close",
"(",
")",
"return",
"res",
"except",
"Exception",
":",
"# pragma: no cover",
"cursor",
".",
"close",
"(",
")",
"raise"
] |
Executes the given query against all the given param sequences.
|
[
"Executes",
"the",
"given",
"query",
"against",
"all",
"the",
"given",
"param",
"sequences",
"."
] |
a109d0f95d6432d0e3b5eba1c9854357ba527f27
|
https://github.com/stevepeak/tornpsql/blob/a109d0f95d6432d0e3b5eba1c9854357ba527f27/tornpsql/__init__.py#L241-L255
|
240,728
|
MacHu-GWU/pyknackhq-project
|
pyknackhq/zzz_manual_install.py
|
install
|
def install():
"""Install your package to all Python version you have installed on Windows.
"""
import os, shutil
_ROOT = os.getcwd()
_PACKAGE_NAME = os.path.basename(_ROOT)
print("Installing [%s] to all python version..." % _PACKAGE_NAME)
# find all Python release installed on this windows computer
installed_python_version = list()
for root, folder_list, _ in os.walk(r"C:\\"):
for folder in folder_list:
if folder.startswith("Python"):
if os.path.exists(os.path.join(root, folder, "pythonw.exe")):
installed_python_version.append(folder)
break
print("\tYou have installed: {0}".format(", ".join(installed_python_version)))
# remove __pycache__ folder and *.pyc file
print("\tRemoving *.pyc file ...")
pyc_folder_list = list()
for root, folder_list, _ in os.walk(_ROOT):
if os.path.basename(root) == "__pycache__":
pyc_folder_list.append(root)
for folder in pyc_folder_list:
shutil.rmtree(folder)
print("\t\tall *.pyc file has been removed.")
# install this package to all python version
for py_root in installed_python_version:
dst = os.path.join(r"C:\\", py_root, r"Lib\site-packages", _PACKAGE_NAME)
try:
shutil.rmtree(dst)
except:
pass
print("\tRemoved %s." % dst)
shutil.copytree(_ROOT, dst)
print("\tInstalled %s." % dst)
print("Complete!")
|
python
|
def install():
"""Install your package to all Python version you have installed on Windows.
"""
import os, shutil
_ROOT = os.getcwd()
_PACKAGE_NAME = os.path.basename(_ROOT)
print("Installing [%s] to all python version..." % _PACKAGE_NAME)
# find all Python release installed on this windows computer
installed_python_version = list()
for root, folder_list, _ in os.walk(r"C:\\"):
for folder in folder_list:
if folder.startswith("Python"):
if os.path.exists(os.path.join(root, folder, "pythonw.exe")):
installed_python_version.append(folder)
break
print("\tYou have installed: {0}".format(", ".join(installed_python_version)))
# remove __pycache__ folder and *.pyc file
print("\tRemoving *.pyc file ...")
pyc_folder_list = list()
for root, folder_list, _ in os.walk(_ROOT):
if os.path.basename(root) == "__pycache__":
pyc_folder_list.append(root)
for folder in pyc_folder_list:
shutil.rmtree(folder)
print("\t\tall *.pyc file has been removed.")
# install this package to all python version
for py_root in installed_python_version:
dst = os.path.join(r"C:\\", py_root, r"Lib\site-packages", _PACKAGE_NAME)
try:
shutil.rmtree(dst)
except:
pass
print("\tRemoved %s." % dst)
shutil.copytree(_ROOT, dst)
print("\tInstalled %s." % dst)
print("Complete!")
|
[
"def",
"install",
"(",
")",
":",
"import",
"os",
",",
"shutil",
"_ROOT",
"=",
"os",
".",
"getcwd",
"(",
")",
"_PACKAGE_NAME",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"_ROOT",
")",
"print",
"(",
"\"Installing [%s] to all python version...\"",
"%",
"_PACKAGE_NAME",
")",
"# find all Python release installed on this windows computer",
"installed_python_version",
"=",
"list",
"(",
")",
"for",
"root",
",",
"folder_list",
",",
"_",
"in",
"os",
".",
"walk",
"(",
"r\"C:\\\\\"",
")",
":",
"for",
"folder",
"in",
"folder_list",
":",
"if",
"folder",
".",
"startswith",
"(",
"\"Python\"",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"folder",
",",
"\"pythonw.exe\"",
")",
")",
":",
"installed_python_version",
".",
"append",
"(",
"folder",
")",
"break",
"print",
"(",
"\"\\tYou have installed: {0}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"installed_python_version",
")",
")",
")",
"# remove __pycache__ folder and *.pyc file",
"print",
"(",
"\"\\tRemoving *.pyc file ...\"",
")",
"pyc_folder_list",
"=",
"list",
"(",
")",
"for",
"root",
",",
"folder_list",
",",
"_",
"in",
"os",
".",
"walk",
"(",
"_ROOT",
")",
":",
"if",
"os",
".",
"path",
".",
"basename",
"(",
"root",
")",
"==",
"\"__pycache__\"",
":",
"pyc_folder_list",
".",
"append",
"(",
"root",
")",
"for",
"folder",
"in",
"pyc_folder_list",
":",
"shutil",
".",
"rmtree",
"(",
"folder",
")",
"print",
"(",
"\"\\t\\tall *.pyc file has been removed.\"",
")",
"# install this package to all python version",
"for",
"py_root",
"in",
"installed_python_version",
":",
"dst",
"=",
"os",
".",
"path",
".",
"join",
"(",
"r\"C:\\\\\"",
",",
"py_root",
",",
"r\"Lib\\site-packages\"",
",",
"_PACKAGE_NAME",
")",
"try",
":",
"shutil",
".",
"rmtree",
"(",
"dst",
")",
"except",
":",
"pass",
"print",
"(",
"\"\\tRemoved %s.\"",
"%",
"dst",
")",
"shutil",
".",
"copytree",
"(",
"_ROOT",
",",
"dst",
")",
"print",
"(",
"\"\\tInstalled %s.\"",
"%",
"dst",
")",
"print",
"(",
"\"Complete!\"",
")"
] |
Install your package to all Python version you have installed on Windows.
|
[
"Install",
"your",
"package",
"to",
"all",
"Python",
"version",
"you",
"have",
"installed",
"on",
"Windows",
"."
] |
dd937f24d7b0a351ba3818eb746c31b29a8cc341
|
https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/zzz_manual_install.py#L100-L141
|
240,729
|
romaryd/python-jsonrepo
|
jsonrepo/record.py
|
namedtuple_asdict
|
def namedtuple_asdict(obj):
"""
Serializing a nested namedtuple into a Python dict
"""
if obj is None:
return obj
if hasattr(obj, "_asdict"): # detect namedtuple
return OrderedDict(zip(obj._fields, (namedtuple_asdict(item)
for item in obj)))
if isinstance(obj, str): # iterables - strings
return obj
if hasattr(obj, "keys"): # iterables - mapping
return OrderedDict(zip(obj.keys(), (namedtuple_asdict(item)
for item in obj.values())))
if hasattr(obj, "__iter__"): # iterables - sequence
return type(obj)((namedtuple_asdict(item) for item in obj))
# non-iterable cannot contain namedtuples
return obj
|
python
|
def namedtuple_asdict(obj):
"""
Serializing a nested namedtuple into a Python dict
"""
if obj is None:
return obj
if hasattr(obj, "_asdict"): # detect namedtuple
return OrderedDict(zip(obj._fields, (namedtuple_asdict(item)
for item in obj)))
if isinstance(obj, str): # iterables - strings
return obj
if hasattr(obj, "keys"): # iterables - mapping
return OrderedDict(zip(obj.keys(), (namedtuple_asdict(item)
for item in obj.values())))
if hasattr(obj, "__iter__"): # iterables - sequence
return type(obj)((namedtuple_asdict(item) for item in obj))
# non-iterable cannot contain namedtuples
return obj
|
[
"def",
"namedtuple_asdict",
"(",
"obj",
")",
":",
"if",
"obj",
"is",
"None",
":",
"return",
"obj",
"if",
"hasattr",
"(",
"obj",
",",
"\"_asdict\"",
")",
":",
"# detect namedtuple",
"return",
"OrderedDict",
"(",
"zip",
"(",
"obj",
".",
"_fields",
",",
"(",
"namedtuple_asdict",
"(",
"item",
")",
"for",
"item",
"in",
"obj",
")",
")",
")",
"if",
"isinstance",
"(",
"obj",
",",
"str",
")",
":",
"# iterables - strings",
"return",
"obj",
"if",
"hasattr",
"(",
"obj",
",",
"\"keys\"",
")",
":",
"# iterables - mapping",
"return",
"OrderedDict",
"(",
"zip",
"(",
"obj",
".",
"keys",
"(",
")",
",",
"(",
"namedtuple_asdict",
"(",
"item",
")",
"for",
"item",
"in",
"obj",
".",
"values",
"(",
")",
")",
")",
")",
"if",
"hasattr",
"(",
"obj",
",",
"\"__iter__\"",
")",
":",
"# iterables - sequence",
"return",
"type",
"(",
"obj",
")",
"(",
"(",
"namedtuple_asdict",
"(",
"item",
")",
"for",
"item",
"in",
"obj",
")",
")",
"# non-iterable cannot contain namedtuples",
"return",
"obj"
] |
Serializing a nested namedtuple into a Python dict
|
[
"Serializing",
"a",
"nested",
"namedtuple",
"into",
"a",
"Python",
"dict"
] |
08a9c039a5bd21e93e9a6d1bce77d43e6e10b57d
|
https://github.com/romaryd/python-jsonrepo/blob/08a9c039a5bd21e93e9a6d1bce77d43e6e10b57d/jsonrepo/record.py#L11-L28
|
240,730
|
romaryd/python-jsonrepo
|
jsonrepo/record.py
|
DictRecord.from_json
|
def from_json(cls, json_dump):
"""
How to get a context from a json dump
"""
context = cls()
if json_dump is None:
return None
ctxt = json.loads(json_dump)
for k in ctxt:
context[k] = ctxt[k]
return context
|
python
|
def from_json(cls, json_dump):
"""
How to get a context from a json dump
"""
context = cls()
if json_dump is None:
return None
ctxt = json.loads(json_dump)
for k in ctxt:
context[k] = ctxt[k]
return context
|
[
"def",
"from_json",
"(",
"cls",
",",
"json_dump",
")",
":",
"context",
"=",
"cls",
"(",
")",
"if",
"json_dump",
"is",
"None",
":",
"return",
"None",
"ctxt",
"=",
"json",
".",
"loads",
"(",
"json_dump",
")",
"for",
"k",
"in",
"ctxt",
":",
"context",
"[",
"k",
"]",
"=",
"ctxt",
"[",
"k",
"]",
"return",
"context"
] |
How to get a context from a json dump
|
[
"How",
"to",
"get",
"a",
"context",
"from",
"a",
"json",
"dump"
] |
08a9c039a5bd21e93e9a6d1bce77d43e6e10b57d
|
https://github.com/romaryd/python-jsonrepo/blob/08a9c039a5bd21e93e9a6d1bce77d43e6e10b57d/jsonrepo/record.py#L54-L64
|
240,731
|
klmitch/tendril
|
tendril/manager.py
|
TendrilManager.find_tendril
|
def find_tendril(cls, proto, addr):
"""
Finds the tendril corresponding to the protocol and address
tuple. Returns the Tendril object, or raises KeyError if the
tendril is not tracked.
The address tuple is the tuple of the local address and the
remote address for the tendril.
"""
# First, normalize the proto
proto = proto.lower()
# Now, find and return the tendril
return cls._tendrils[proto][addr]
|
python
|
def find_tendril(cls, proto, addr):
"""
Finds the tendril corresponding to the protocol and address
tuple. Returns the Tendril object, or raises KeyError if the
tendril is not tracked.
The address tuple is the tuple of the local address and the
remote address for the tendril.
"""
# First, normalize the proto
proto = proto.lower()
# Now, find and return the tendril
return cls._tendrils[proto][addr]
|
[
"def",
"find_tendril",
"(",
"cls",
",",
"proto",
",",
"addr",
")",
":",
"# First, normalize the proto",
"proto",
"=",
"proto",
".",
"lower",
"(",
")",
"# Now, find and return the tendril",
"return",
"cls",
".",
"_tendrils",
"[",
"proto",
"]",
"[",
"addr",
"]"
] |
Finds the tendril corresponding to the protocol and address
tuple. Returns the Tendril object, or raises KeyError if the
tendril is not tracked.
The address tuple is the tuple of the local address and the
remote address for the tendril.
|
[
"Finds",
"the",
"tendril",
"corresponding",
"to",
"the",
"protocol",
"and",
"address",
"tuple",
".",
"Returns",
"the",
"Tendril",
"object",
"or",
"raises",
"KeyError",
"if",
"the",
"tendril",
"is",
"not",
"tracked",
"."
] |
207102c83e88f8f1fa7ba605ef0aab2ae9078b36
|
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/manager.py#L99-L113
|
240,732
|
klmitch/tendril
|
tendril/manager.py
|
TendrilManager._track_tendril
|
def _track_tendril(self, tendril):
"""
Adds the tendril to the set of tracked tendrils.
"""
self.tendrils[tendril._tendril_key] = tendril
# Also add to _tendrils
self._tendrils.setdefault(tendril.proto, weakref.WeakValueDictionary())
self._tendrils[tendril.proto][tendril._tendril_key] = tendril
|
python
|
def _track_tendril(self, tendril):
"""
Adds the tendril to the set of tracked tendrils.
"""
self.tendrils[tendril._tendril_key] = tendril
# Also add to _tendrils
self._tendrils.setdefault(tendril.proto, weakref.WeakValueDictionary())
self._tendrils[tendril.proto][tendril._tendril_key] = tendril
|
[
"def",
"_track_tendril",
"(",
"self",
",",
"tendril",
")",
":",
"self",
".",
"tendrils",
"[",
"tendril",
".",
"_tendril_key",
"]",
"=",
"tendril",
"# Also add to _tendrils",
"self",
".",
"_tendrils",
".",
"setdefault",
"(",
"tendril",
".",
"proto",
",",
"weakref",
".",
"WeakValueDictionary",
"(",
")",
")",
"self",
".",
"_tendrils",
"[",
"tendril",
".",
"proto",
"]",
"[",
"tendril",
".",
"_tendril_key",
"]",
"=",
"tendril"
] |
Adds the tendril to the set of tracked tendrils.
|
[
"Adds",
"the",
"tendril",
"to",
"the",
"set",
"of",
"tracked",
"tendrils",
"."
] |
207102c83e88f8f1fa7ba605ef0aab2ae9078b36
|
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/manager.py#L173-L182
|
240,733
|
klmitch/tendril
|
tendril/manager.py
|
TendrilManager._untrack_tendril
|
def _untrack_tendril(self, tendril):
"""
Removes the tendril from the set of tracked tendrils.
"""
try:
del self.tendrils[tendril._tendril_key]
except KeyError:
pass
# Also remove from _tendrils
try:
del self._tendrils[tendril.proto][tendril._tendril_key]
except KeyError:
pass
|
python
|
def _untrack_tendril(self, tendril):
"""
Removes the tendril from the set of tracked tendrils.
"""
try:
del self.tendrils[tendril._tendril_key]
except KeyError:
pass
# Also remove from _tendrils
try:
del self._tendrils[tendril.proto][tendril._tendril_key]
except KeyError:
pass
|
[
"def",
"_untrack_tendril",
"(",
"self",
",",
"tendril",
")",
":",
"try",
":",
"del",
"self",
".",
"tendrils",
"[",
"tendril",
".",
"_tendril_key",
"]",
"except",
"KeyError",
":",
"pass",
"# Also remove from _tendrils",
"try",
":",
"del",
"self",
".",
"_tendrils",
"[",
"tendril",
".",
"proto",
"]",
"[",
"tendril",
".",
"_tendril_key",
"]",
"except",
"KeyError",
":",
"pass"
] |
Removes the tendril from the set of tracked tendrils.
|
[
"Removes",
"the",
"tendril",
"from",
"the",
"set",
"of",
"tracked",
"tendrils",
"."
] |
207102c83e88f8f1fa7ba605ef0aab2ae9078b36
|
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/manager.py#L184-L198
|
240,734
|
klmitch/tendril
|
tendril/manager.py
|
TendrilManager.get_local_addr
|
def get_local_addr(self, timeout=None):
"""
Retrieve the current local address.
:param timeout: If not given or given as ``None``, waits until
the local address is available. Otherwise,
waits for as long as specified. If the local
address is not set by the time the timeout
expires, returns ``None``.
"""
# If we're not running, just return None
if not self.running:
return None
# OK, we're running; wait on the _local_addr_event
if not self._local_addr_event.wait(timeout):
# Still not set after timeout
return None
# We have a local address!
return self._local_addr
|
python
|
def get_local_addr(self, timeout=None):
"""
Retrieve the current local address.
:param timeout: If not given or given as ``None``, waits until
the local address is available. Otherwise,
waits for as long as specified. If the local
address is not set by the time the timeout
expires, returns ``None``.
"""
# If we're not running, just return None
if not self.running:
return None
# OK, we're running; wait on the _local_addr_event
if not self._local_addr_event.wait(timeout):
# Still not set after timeout
return None
# We have a local address!
return self._local_addr
|
[
"def",
"get_local_addr",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"# If we're not running, just return None",
"if",
"not",
"self",
".",
"running",
":",
"return",
"None",
"# OK, we're running; wait on the _local_addr_event",
"if",
"not",
"self",
".",
"_local_addr_event",
".",
"wait",
"(",
"timeout",
")",
":",
"# Still not set after timeout",
"return",
"None",
"# We have a local address!",
"return",
"self",
".",
"_local_addr"
] |
Retrieve the current local address.
:param timeout: If not given or given as ``None``, waits until
the local address is available. Otherwise,
waits for as long as specified. If the local
address is not set by the time the timeout
expires, returns ``None``.
|
[
"Retrieve",
"the",
"current",
"local",
"address",
"."
] |
207102c83e88f8f1fa7ba605ef0aab2ae9078b36
|
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/manager.py#L291-L312
|
240,735
|
klmitch/tendril
|
tendril/manager.py
|
TendrilManager.connect
|
def connect(self, target, acceptor, wrapper=None):
"""
Initiate a connection from the tendril manager's endpoint.
Once the connection is completed, a Tendril object will be
created and passed to the given acceptor.
:param target: The target of the connection attempt.
:param acceptor: A callable which will initialize the state of
the new Tendril object.
:param wrapper: A callable taking, as its first argument, a
socket.socket object. The callable must
return a valid proxy for the socket.socket
object, which will subsequently be used to
communicate on the connection.
For passing extra arguments to the acceptor or the wrapper,
see the ``TendrilPartial`` class; for chaining together
multiple wrappers, see the ``WrapperChain`` class.
"""
if not self.running:
raise ValueError("TendrilManager not running")
# Check the target address
fam = utils.addr_info(target)
# Verify that we're in the right family
if self.addr_family != fam:
raise ValueError("address family mismatch")
|
python
|
def connect(self, target, acceptor, wrapper=None):
"""
Initiate a connection from the tendril manager's endpoint.
Once the connection is completed, a Tendril object will be
created and passed to the given acceptor.
:param target: The target of the connection attempt.
:param acceptor: A callable which will initialize the state of
the new Tendril object.
:param wrapper: A callable taking, as its first argument, a
socket.socket object. The callable must
return a valid proxy for the socket.socket
object, which will subsequently be used to
communicate on the connection.
For passing extra arguments to the acceptor or the wrapper,
see the ``TendrilPartial`` class; for chaining together
multiple wrappers, see the ``WrapperChain`` class.
"""
if not self.running:
raise ValueError("TendrilManager not running")
# Check the target address
fam = utils.addr_info(target)
# Verify that we're in the right family
if self.addr_family != fam:
raise ValueError("address family mismatch")
|
[
"def",
"connect",
"(",
"self",
",",
"target",
",",
"acceptor",
",",
"wrapper",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"running",
":",
"raise",
"ValueError",
"(",
"\"TendrilManager not running\"",
")",
"# Check the target address",
"fam",
"=",
"utils",
".",
"addr_info",
"(",
"target",
")",
"# Verify that we're in the right family",
"if",
"self",
".",
"addr_family",
"!=",
"fam",
":",
"raise",
"ValueError",
"(",
"\"address family mismatch\"",
")"
] |
Initiate a connection from the tendril manager's endpoint.
Once the connection is completed, a Tendril object will be
created and passed to the given acceptor.
:param target: The target of the connection attempt.
:param acceptor: A callable which will initialize the state of
the new Tendril object.
:param wrapper: A callable taking, as its first argument, a
socket.socket object. The callable must
return a valid proxy for the socket.socket
object, which will subsequently be used to
communicate on the connection.
For passing extra arguments to the acceptor or the wrapper,
see the ``TendrilPartial`` class; for chaining together
multiple wrappers, see the ``WrapperChain`` class.
|
[
"Initiate",
"a",
"connection",
"from",
"the",
"tendril",
"manager",
"s",
"endpoint",
".",
"Once",
"the",
"connection",
"is",
"completed",
"a",
"Tendril",
"object",
"will",
"be",
"created",
"and",
"passed",
"to",
"the",
"given",
"acceptor",
"."
] |
207102c83e88f8f1fa7ba605ef0aab2ae9078b36
|
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/manager.py#L341-L369
|
240,736
|
bagrat/pyerarchy
|
pyerarchy/core/node.py
|
Node.read
|
def read(self, *args, **kwargs):
"""Reads the node as a file
"""
with self.open('r') as f:
return f.read(*args, **kwargs)
|
python
|
def read(self, *args, **kwargs):
"""Reads the node as a file
"""
with self.open('r') as f:
return f.read(*args, **kwargs)
|
[
"def",
"read",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"self",
".",
"open",
"(",
"'r'",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Reads the node as a file
|
[
"Reads",
"the",
"node",
"as",
"a",
"file"
] |
75d71408af72c583b104d27ac02fbe164354ee91
|
https://github.com/bagrat/pyerarchy/blob/75d71408af72c583b104d27ac02fbe164354ee91/pyerarchy/core/node.py#L43-L47
|
240,737
|
bagrat/pyerarchy
|
pyerarchy/core/node.py
|
Node.ls
|
def ls(self):
"""List the children entities of the directory.
Raises exception if the object is a file.
:return:
"""
if self.isfile():
raise NotDirectoryError('Cannot ls() on non-directory node: {path}'.format(path=self._pyerarchy_path))
return os.listdir(self._pyerarchy_path)
|
python
|
def ls(self):
"""List the children entities of the directory.
Raises exception if the object is a file.
:return:
"""
if self.isfile():
raise NotDirectoryError('Cannot ls() on non-directory node: {path}'.format(path=self._pyerarchy_path))
return os.listdir(self._pyerarchy_path)
|
[
"def",
"ls",
"(",
"self",
")",
":",
"if",
"self",
".",
"isfile",
"(",
")",
":",
"raise",
"NotDirectoryError",
"(",
"'Cannot ls() on non-directory node: {path}'",
".",
"format",
"(",
"path",
"=",
"self",
".",
"_pyerarchy_path",
")",
")",
"return",
"os",
".",
"listdir",
"(",
"self",
".",
"_pyerarchy_path",
")"
] |
List the children entities of the directory.
Raises exception if the object is a file.
:return:
|
[
"List",
"the",
"children",
"entities",
"of",
"the",
"directory",
"."
] |
75d71408af72c583b104d27ac02fbe164354ee91
|
https://github.com/bagrat/pyerarchy/blob/75d71408af72c583b104d27ac02fbe164354ee91/pyerarchy/core/node.py#L54-L64
|
240,738
|
bagrat/pyerarchy
|
pyerarchy/core/node.py
|
Node.mkdir
|
def mkdir(self, children, mode=0o0755, return_node=True):
"""Creates child entities in directory.
Raises exception if the object is a file.
:param children: The list of children to be created.
:return: The child object, if one child is provided. None, otherwise.
"""
result = None
if isinstance(children, (str, unicode)):
if os.path.isabs(children):
raise BadValueError('Cannot mkdir an absolute path: {path}'.format(path=self._pyerarchy_path))
rel_path = os.path.join(self._pyerarchy_path, children)
os.makedirs(rel_path, mode)
if return_node:
result = Node(rel_path)
else:
for child in children:
self.mkdir(child, mode, False)
return result
|
python
|
def mkdir(self, children, mode=0o0755, return_node=True):
"""Creates child entities in directory.
Raises exception if the object is a file.
:param children: The list of children to be created.
:return: The child object, if one child is provided. None, otherwise.
"""
result = None
if isinstance(children, (str, unicode)):
if os.path.isabs(children):
raise BadValueError('Cannot mkdir an absolute path: {path}'.format(path=self._pyerarchy_path))
rel_path = os.path.join(self._pyerarchy_path, children)
os.makedirs(rel_path, mode)
if return_node:
result = Node(rel_path)
else:
for child in children:
self.mkdir(child, mode, False)
return result
|
[
"def",
"mkdir",
"(",
"self",
",",
"children",
",",
"mode",
"=",
"0o0755",
",",
"return_node",
"=",
"True",
")",
":",
"result",
"=",
"None",
"if",
"isinstance",
"(",
"children",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"children",
")",
":",
"raise",
"BadValueError",
"(",
"'Cannot mkdir an absolute path: {path}'",
".",
"format",
"(",
"path",
"=",
"self",
".",
"_pyerarchy_path",
")",
")",
"rel_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_pyerarchy_path",
",",
"children",
")",
"os",
".",
"makedirs",
"(",
"rel_path",
",",
"mode",
")",
"if",
"return_node",
":",
"result",
"=",
"Node",
"(",
"rel_path",
")",
"else",
":",
"for",
"child",
"in",
"children",
":",
"self",
".",
"mkdir",
"(",
"child",
",",
"mode",
",",
"False",
")",
"return",
"result"
] |
Creates child entities in directory.
Raises exception if the object is a file.
:param children: The list of children to be created.
:return: The child object, if one child is provided. None, otherwise.
|
[
"Creates",
"child",
"entities",
"in",
"directory",
"."
] |
75d71408af72c583b104d27ac02fbe164354ee91
|
https://github.com/bagrat/pyerarchy/blob/75d71408af72c583b104d27ac02fbe164354ee91/pyerarchy/core/node.py#L75-L98
|
240,739
|
sxslex/capitalize-name
|
capitalize_name/__init__.py
|
capitalize
|
def capitalize(
full_name,
articles=None,
separator_characters=None,
ignore_worls=None,
):
"""Returns the correct writing of a compound name, respecting the
first letters of the names in upper case."""
if articles is None:
articles = _ARTICLES
if separator_characters is None:
separator_characters = _SEPARATOR_CHARACTERS
if ignore_worls is None:
ignore_worls = _IGNORE_WORLS
new_full_name = full_name
if hasattr(new_full_name, 'strip'):
new_full_name = new_full_name.strip()
if not new_full_name:
return full_name
new_full_name = deep_unicode(new_full_name)
list_full_name = []
start_idx = 0
for step_idx, char in enumerate(list(new_full_name)):
if char in separator_characters:
list_full_name.extend(
[
_setting_word(
new_full_name[start_idx:step_idx],
separator_characters, ignore_worls,
articles if list_full_name else []
),
char
]
)
start_idx = step_idx + 1
list_full_name.append(
_setting_word(
new_full_name[start_idx:],
separator_characters, ignore_worls, articles
)
)
return ''.join(list_full_name)
|
python
|
def capitalize(
full_name,
articles=None,
separator_characters=None,
ignore_worls=None,
):
"""Returns the correct writing of a compound name, respecting the
first letters of the names in upper case."""
if articles is None:
articles = _ARTICLES
if separator_characters is None:
separator_characters = _SEPARATOR_CHARACTERS
if ignore_worls is None:
ignore_worls = _IGNORE_WORLS
new_full_name = full_name
if hasattr(new_full_name, 'strip'):
new_full_name = new_full_name.strip()
if not new_full_name:
return full_name
new_full_name = deep_unicode(new_full_name)
list_full_name = []
start_idx = 0
for step_idx, char in enumerate(list(new_full_name)):
if char in separator_characters:
list_full_name.extend(
[
_setting_word(
new_full_name[start_idx:step_idx],
separator_characters, ignore_worls,
articles if list_full_name else []
),
char
]
)
start_idx = step_idx + 1
list_full_name.append(
_setting_word(
new_full_name[start_idx:],
separator_characters, ignore_worls, articles
)
)
return ''.join(list_full_name)
|
[
"def",
"capitalize",
"(",
"full_name",
",",
"articles",
"=",
"None",
",",
"separator_characters",
"=",
"None",
",",
"ignore_worls",
"=",
"None",
",",
")",
":",
"if",
"articles",
"is",
"None",
":",
"articles",
"=",
"_ARTICLES",
"if",
"separator_characters",
"is",
"None",
":",
"separator_characters",
"=",
"_SEPARATOR_CHARACTERS",
"if",
"ignore_worls",
"is",
"None",
":",
"ignore_worls",
"=",
"_IGNORE_WORLS",
"new_full_name",
"=",
"full_name",
"if",
"hasattr",
"(",
"new_full_name",
",",
"'strip'",
")",
":",
"new_full_name",
"=",
"new_full_name",
".",
"strip",
"(",
")",
"if",
"not",
"new_full_name",
":",
"return",
"full_name",
"new_full_name",
"=",
"deep_unicode",
"(",
"new_full_name",
")",
"list_full_name",
"=",
"[",
"]",
"start_idx",
"=",
"0",
"for",
"step_idx",
",",
"char",
"in",
"enumerate",
"(",
"list",
"(",
"new_full_name",
")",
")",
":",
"if",
"char",
"in",
"separator_characters",
":",
"list_full_name",
".",
"extend",
"(",
"[",
"_setting_word",
"(",
"new_full_name",
"[",
"start_idx",
":",
"step_idx",
"]",
",",
"separator_characters",
",",
"ignore_worls",
",",
"articles",
"if",
"list_full_name",
"else",
"[",
"]",
")",
",",
"char",
"]",
")",
"start_idx",
"=",
"step_idx",
"+",
"1",
"list_full_name",
".",
"append",
"(",
"_setting_word",
"(",
"new_full_name",
"[",
"start_idx",
":",
"]",
",",
"separator_characters",
",",
"ignore_worls",
",",
"articles",
")",
")",
"return",
"''",
".",
"join",
"(",
"list_full_name",
")"
] |
Returns the correct writing of a compound name, respecting the
first letters of the names in upper case.
|
[
"Returns",
"the",
"correct",
"writing",
"of",
"a",
"compound",
"name",
"respecting",
"the",
"first",
"letters",
"of",
"the",
"names",
"in",
"upper",
"case",
"."
] |
98f288a3cffaecdb8aaee5154e783ba46849bccd
|
https://github.com/sxslex/capitalize-name/blob/98f288a3cffaecdb8aaee5154e783ba46849bccd/capitalize_name/__init__.py#L28-L69
|
240,740
|
sxslex/capitalize-name
|
capitalize_name/__init__.py
|
deep_unicode
|
def deep_unicode(s, encodings=None):
"""decode "DEEP" S using the codec registered for encoding."""
if encodings is None:
encodings = ['utf-8', 'latin-1']
if isinstance(s, (list, tuple)):
return [deep_unicode(i) for i in s]
if isinstance(s, dict):
return dict([
(deep_unicode(key), deep_unicode(s[key]))
for key in s
])
# in_dict = {}
# for key in s:
# in_dict[to_unicode(key)] = to_unicode(s[key])
# return in_dict
elif isinstance(s, str):
for encoding in encodings:
try:
return s.decode(encoding)
except:
pass
return s
|
python
|
def deep_unicode(s, encodings=None):
"""decode "DEEP" S using the codec registered for encoding."""
if encodings is None:
encodings = ['utf-8', 'latin-1']
if isinstance(s, (list, tuple)):
return [deep_unicode(i) for i in s]
if isinstance(s, dict):
return dict([
(deep_unicode(key), deep_unicode(s[key]))
for key in s
])
# in_dict = {}
# for key in s:
# in_dict[to_unicode(key)] = to_unicode(s[key])
# return in_dict
elif isinstance(s, str):
for encoding in encodings:
try:
return s.decode(encoding)
except:
pass
return s
|
[
"def",
"deep_unicode",
"(",
"s",
",",
"encodings",
"=",
"None",
")",
":",
"if",
"encodings",
"is",
"None",
":",
"encodings",
"=",
"[",
"'utf-8'",
",",
"'latin-1'",
"]",
"if",
"isinstance",
"(",
"s",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"[",
"deep_unicode",
"(",
"i",
")",
"for",
"i",
"in",
"s",
"]",
"if",
"isinstance",
"(",
"s",
",",
"dict",
")",
":",
"return",
"dict",
"(",
"[",
"(",
"deep_unicode",
"(",
"key",
")",
",",
"deep_unicode",
"(",
"s",
"[",
"key",
"]",
")",
")",
"for",
"key",
"in",
"s",
"]",
")",
"# in_dict = {}",
"# for key in s:",
"# in_dict[to_unicode(key)] = to_unicode(s[key])",
"# return in_dict",
"elif",
"isinstance",
"(",
"s",
",",
"str",
")",
":",
"for",
"encoding",
"in",
"encodings",
":",
"try",
":",
"return",
"s",
".",
"decode",
"(",
"encoding",
")",
"except",
":",
"pass",
"return",
"s"
] |
decode "DEEP" S using the codec registered for encoding.
|
[
"decode",
"DEEP",
"S",
"using",
"the",
"codec",
"registered",
"for",
"encoding",
"."
] |
98f288a3cffaecdb8aaee5154e783ba46849bccd
|
https://github.com/sxslex/capitalize-name/blob/98f288a3cffaecdb8aaee5154e783ba46849bccd/capitalize_name/__init__.py#L86-L107
|
240,741
|
sxslex/capitalize-name
|
capitalize_name/__init__.py
|
deep_encode
|
def deep_encode(s, encoding='utf-8', errors='strict'):
"""Encode "DEEP" S using the codec registered for encoding."""
# encoding defaults to the default encoding. errors may be given to set
# a different error handling scheme. Default is 'strict' meaning
# that encoding errors raise
# a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
# 'xmlcharrefreplace' as well as any other name registered with
# codecs.register_error that can handle UnicodeEncodeErrors.
s = deep_encode(s)
if sys.version_info.major < 3 and isinstance(s, unicode):
return s.encode(encoding, errors)
if isinstance(s, (list, tuple)):
return [deep_encode(i, encoding=encoding, errors=errors) for i in s]
if isinstance(s, dict):
return dict([
(
deep_encode(key, encoding=encoding, errors=errors),
deep_encode(s[key], encoding=encoding, errors=errors)
) for key in s
])
# new_dict = {}
# for key in s:
# new_dict[
# to_encode(key, encoding=encoding, errors=errors)
# ] = to_encode(s[key], encoding=encoding, errors=errors)
# return new_dict
return s
|
python
|
def deep_encode(s, encoding='utf-8', errors='strict'):
"""Encode "DEEP" S using the codec registered for encoding."""
# encoding defaults to the default encoding. errors may be given to set
# a different error handling scheme. Default is 'strict' meaning
# that encoding errors raise
# a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
# 'xmlcharrefreplace' as well as any other name registered with
# codecs.register_error that can handle UnicodeEncodeErrors.
s = deep_encode(s)
if sys.version_info.major < 3 and isinstance(s, unicode):
return s.encode(encoding, errors)
if isinstance(s, (list, tuple)):
return [deep_encode(i, encoding=encoding, errors=errors) for i in s]
if isinstance(s, dict):
return dict([
(
deep_encode(key, encoding=encoding, errors=errors),
deep_encode(s[key], encoding=encoding, errors=errors)
) for key in s
])
# new_dict = {}
# for key in s:
# new_dict[
# to_encode(key, encoding=encoding, errors=errors)
# ] = to_encode(s[key], encoding=encoding, errors=errors)
# return new_dict
return s
|
[
"def",
"deep_encode",
"(",
"s",
",",
"encoding",
"=",
"'utf-8'",
",",
"errors",
"=",
"'strict'",
")",
":",
"# encoding defaults to the default encoding. errors may be given to set",
"# a different error handling scheme. Default is 'strict' meaning",
"# that encoding errors raise",
"# a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and",
"# 'xmlcharrefreplace' as well as any other name registered with",
"# codecs.register_error that can handle UnicodeEncodeErrors.",
"s",
"=",
"deep_encode",
"(",
"s",
")",
"if",
"sys",
".",
"version_info",
".",
"major",
"<",
"3",
"and",
"isinstance",
"(",
"s",
",",
"unicode",
")",
":",
"return",
"s",
".",
"encode",
"(",
"encoding",
",",
"errors",
")",
"if",
"isinstance",
"(",
"s",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"[",
"deep_encode",
"(",
"i",
",",
"encoding",
"=",
"encoding",
",",
"errors",
"=",
"errors",
")",
"for",
"i",
"in",
"s",
"]",
"if",
"isinstance",
"(",
"s",
",",
"dict",
")",
":",
"return",
"dict",
"(",
"[",
"(",
"deep_encode",
"(",
"key",
",",
"encoding",
"=",
"encoding",
",",
"errors",
"=",
"errors",
")",
",",
"deep_encode",
"(",
"s",
"[",
"key",
"]",
",",
"encoding",
"=",
"encoding",
",",
"errors",
"=",
"errors",
")",
")",
"for",
"key",
"in",
"s",
"]",
")",
"# new_dict = {}",
"# for key in s:",
"# new_dict[",
"# to_encode(key, encoding=encoding, errors=errors)",
"# ] = to_encode(s[key], encoding=encoding, errors=errors)",
"# return new_dict",
"return",
"s"
] |
Encode "DEEP" S using the codec registered for encoding.
|
[
"Encode",
"DEEP",
"S",
"using",
"the",
"codec",
"registered",
"for",
"encoding",
"."
] |
98f288a3cffaecdb8aaee5154e783ba46849bccd
|
https://github.com/sxslex/capitalize-name/blob/98f288a3cffaecdb8aaee5154e783ba46849bccd/capitalize_name/__init__.py#L110-L136
|
240,742
|
bretth/djset
|
djset/djset.py
|
DjBase.namespace
|
def namespace(self, key, glob=False):
"""Return a namespace for keyring"""
if not self.name:
self.name = os.environ['DJANGO_SETTINGS_MODULE']
ns = '.'.join([key, self._glob]) if glob else '.'.join([self.name, self._glob])
return ns
|
python
|
def namespace(self, key, glob=False):
"""Return a namespace for keyring"""
if not self.name:
self.name = os.environ['DJANGO_SETTINGS_MODULE']
ns = '.'.join([key, self._glob]) if glob else '.'.join([self.name, self._glob])
return ns
|
[
"def",
"namespace",
"(",
"self",
",",
"key",
",",
"glob",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"name",
":",
"self",
".",
"name",
"=",
"os",
".",
"environ",
"[",
"'DJANGO_SETTINGS_MODULE'",
"]",
"ns",
"=",
"'.'",
".",
"join",
"(",
"[",
"key",
",",
"self",
".",
"_glob",
"]",
")",
"if",
"glob",
"else",
"'.'",
".",
"join",
"(",
"[",
"self",
".",
"name",
",",
"self",
".",
"_glob",
"]",
")",
"return",
"ns"
] |
Return a namespace for keyring
|
[
"Return",
"a",
"namespace",
"for",
"keyring"
] |
e04cbcadc311f6edec50a718415d0004aa304034
|
https://github.com/bretth/djset/blob/e04cbcadc311f6edec50a718415d0004aa304034/djset/djset.py#L31-L36
|
240,743
|
bretth/djset
|
djset/djset.py
|
DjBase.get
|
def get(self, key, prompt_default='', prompt_help=''):
"""Return a value from the environ or keyring"""
value = os.getenv(key)
if not value:
ns = self.namespace(key)
value = self.keyring.get_password(ns, key)
else:
ns = 'environ'
if not value:
ns = self.namespace(key, glob=True)
value = self.keyring.get_password(ns, key)
if not value:
ns = ''
if not value and self.prompt:
value = self._prompt_for_value(key, prompt_default, prompt_help)
if value:
self.set(key, value)
if ns:
self.kns[key] = ns
return value
|
python
|
def get(self, key, prompt_default='', prompt_help=''):
"""Return a value from the environ or keyring"""
value = os.getenv(key)
if not value:
ns = self.namespace(key)
value = self.keyring.get_password(ns, key)
else:
ns = 'environ'
if not value:
ns = self.namespace(key, glob=True)
value = self.keyring.get_password(ns, key)
if not value:
ns = ''
if not value and self.prompt:
value = self._prompt_for_value(key, prompt_default, prompt_help)
if value:
self.set(key, value)
if ns:
self.kns[key] = ns
return value
|
[
"def",
"get",
"(",
"self",
",",
"key",
",",
"prompt_default",
"=",
"''",
",",
"prompt_help",
"=",
"''",
")",
":",
"value",
"=",
"os",
".",
"getenv",
"(",
"key",
")",
"if",
"not",
"value",
":",
"ns",
"=",
"self",
".",
"namespace",
"(",
"key",
")",
"value",
"=",
"self",
".",
"keyring",
".",
"get_password",
"(",
"ns",
",",
"key",
")",
"else",
":",
"ns",
"=",
"'environ'",
"if",
"not",
"value",
":",
"ns",
"=",
"self",
".",
"namespace",
"(",
"key",
",",
"glob",
"=",
"True",
")",
"value",
"=",
"self",
".",
"keyring",
".",
"get_password",
"(",
"ns",
",",
"key",
")",
"if",
"not",
"value",
":",
"ns",
"=",
"''",
"if",
"not",
"value",
"and",
"self",
".",
"prompt",
":",
"value",
"=",
"self",
".",
"_prompt_for_value",
"(",
"key",
",",
"prompt_default",
",",
"prompt_help",
")",
"if",
"value",
":",
"self",
".",
"set",
"(",
"key",
",",
"value",
")",
"if",
"ns",
":",
"self",
".",
"kns",
"[",
"key",
"]",
"=",
"ns",
"return",
"value"
] |
Return a value from the environ or keyring
|
[
"Return",
"a",
"value",
"from",
"the",
"environ",
"or",
"keyring"
] |
e04cbcadc311f6edec50a718415d0004aa304034
|
https://github.com/bretth/djset/blob/e04cbcadc311f6edec50a718415d0004aa304034/djset/djset.py#L38-L57
|
240,744
|
bretth/djset
|
djset/djset.py
|
DjBase.set
|
def set(self, key, value, glob=False):
"""Set the key value pair in a local or global namespace"""
ns = self.namespace(key, glob)
self.keyring.set_password(ns, key, value)
|
python
|
def set(self, key, value, glob=False):
"""Set the key value pair in a local or global namespace"""
ns = self.namespace(key, glob)
self.keyring.set_password(ns, key, value)
|
[
"def",
"set",
"(",
"self",
",",
"key",
",",
"value",
",",
"glob",
"=",
"False",
")",
":",
"ns",
"=",
"self",
".",
"namespace",
"(",
"key",
",",
"glob",
")",
"self",
".",
"keyring",
".",
"set_password",
"(",
"ns",
",",
"key",
",",
"value",
")"
] |
Set the key value pair in a local or global namespace
|
[
"Set",
"the",
"key",
"value",
"pair",
"in",
"a",
"local",
"or",
"global",
"namespace"
] |
e04cbcadc311f6edec50a718415d0004aa304034
|
https://github.com/bretth/djset/blob/e04cbcadc311f6edec50a718415d0004aa304034/djset/djset.py#L59-L62
|
240,745
|
bretth/djset
|
djset/djset.py
|
DjBase.remove
|
def remove(self, key, glob=False):
"""Remove key value pair in a local or global namespace."""
ns = self.namespace(key, glob)
try:
self.keyring.delete_password(ns, key)
except PasswordDeleteError: # OSX and gnome have no delete method
self.set(key, '', glob)
|
python
|
def remove(self, key, glob=False):
"""Remove key value pair in a local or global namespace."""
ns = self.namespace(key, glob)
try:
self.keyring.delete_password(ns, key)
except PasswordDeleteError: # OSX and gnome have no delete method
self.set(key, '', glob)
|
[
"def",
"remove",
"(",
"self",
",",
"key",
",",
"glob",
"=",
"False",
")",
":",
"ns",
"=",
"self",
".",
"namespace",
"(",
"key",
",",
"glob",
")",
"try",
":",
"self",
".",
"keyring",
".",
"delete_password",
"(",
"ns",
",",
"key",
")",
"except",
"PasswordDeleteError",
":",
"# OSX and gnome have no delete method ",
"self",
".",
"set",
"(",
"key",
",",
"''",
",",
"glob",
")"
] |
Remove key value pair in a local or global namespace.
|
[
"Remove",
"key",
"value",
"pair",
"in",
"a",
"local",
"or",
"global",
"namespace",
"."
] |
e04cbcadc311f6edec50a718415d0004aa304034
|
https://github.com/bretth/djset/blob/e04cbcadc311f6edec50a718415d0004aa304034/djset/djset.py#L64-L70
|
240,746
|
bretth/djset
|
djset/djset.py
|
DjSecret.get
|
def get(self, key, prompt_default='', prompt_help=''):
"""
Return the value for key from the environment or keyring.
The keyring value is resolved from a local namespace or a global one.
"""
value = super(DjSecret, self).get(key, prompt_default, prompt_help='')
if not value and self.raise_on_none:
error_msg = "The %s setting is undefined in the environment and djset %s" % (key, self._glob)
raise self.raise_on_none(error_msg)
return value
|
python
|
def get(self, key, prompt_default='', prompt_help=''):
"""
Return the value for key from the environment or keyring.
The keyring value is resolved from a local namespace or a global one.
"""
value = super(DjSecret, self).get(key, prompt_default, prompt_help='')
if not value and self.raise_on_none:
error_msg = "The %s setting is undefined in the environment and djset %s" % (key, self._glob)
raise self.raise_on_none(error_msg)
return value
|
[
"def",
"get",
"(",
"self",
",",
"key",
",",
"prompt_default",
"=",
"''",
",",
"prompt_help",
"=",
"''",
")",
":",
"value",
"=",
"super",
"(",
"DjSecret",
",",
"self",
")",
".",
"get",
"(",
"key",
",",
"prompt_default",
",",
"prompt_help",
"=",
"''",
")",
"if",
"not",
"value",
"and",
"self",
".",
"raise_on_none",
":",
"error_msg",
"=",
"\"The %s setting is undefined in the environment and djset %s\"",
"%",
"(",
"key",
",",
"self",
".",
"_glob",
")",
"raise",
"self",
".",
"raise_on_none",
"(",
"error_msg",
")",
"return",
"value"
] |
Return the value for key from the environment or keyring.
The keyring value is resolved from a local namespace or a global one.
|
[
"Return",
"the",
"value",
"for",
"key",
"from",
"the",
"environment",
"or",
"keyring",
".",
"The",
"keyring",
"value",
"is",
"resolved",
"from",
"a",
"local",
"namespace",
"or",
"a",
"global",
"one",
"."
] |
e04cbcadc311f6edec50a718415d0004aa304034
|
https://github.com/bretth/djset/blob/e04cbcadc311f6edec50a718415d0004aa304034/djset/djset.py#L88-L97
|
240,747
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
PygWidgetsButton.draw
|
def draw(self):
"""Draws the button in its current state.
Should be called every time through the main loop
"""
if not self.visible:
return
# Blit the button's current appearance to the surface.
if self.isEnabled:
if self.mouseIsDown:
if self.mouseOverButton and self.lastMouseDownOverButton:
self.window.blit(self.surfaceDown, self.loc)
else:
self.window.blit(self.surfaceUp, self.loc)
else: # mouse is up
if self.mouseOverButton:
self.window.blit(self.surfaceOver, self.loc)
else:
self.window.blit(self.surfaceUp, self.loc)
else:
self.window.blit(self.surfaceDisabled, self.loc)
|
python
|
def draw(self):
"""Draws the button in its current state.
Should be called every time through the main loop
"""
if not self.visible:
return
# Blit the button's current appearance to the surface.
if self.isEnabled:
if self.mouseIsDown:
if self.mouseOverButton and self.lastMouseDownOverButton:
self.window.blit(self.surfaceDown, self.loc)
else:
self.window.blit(self.surfaceUp, self.loc)
else: # mouse is up
if self.mouseOverButton:
self.window.blit(self.surfaceOver, self.loc)
else:
self.window.blit(self.surfaceUp, self.loc)
else:
self.window.blit(self.surfaceDisabled, self.loc)
|
[
"def",
"draw",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"visible",
":",
"return",
"# Blit the button's current appearance to the surface.\r",
"if",
"self",
".",
"isEnabled",
":",
"if",
"self",
".",
"mouseIsDown",
":",
"if",
"self",
".",
"mouseOverButton",
"and",
"self",
".",
"lastMouseDownOverButton",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceDown",
",",
"self",
".",
"loc",
")",
"else",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceUp",
",",
"self",
".",
"loc",
")",
"else",
":",
"# mouse is up\r",
"if",
"self",
".",
"mouseOverButton",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceOver",
",",
"self",
".",
"loc",
")",
"else",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceUp",
",",
"self",
".",
"loc",
")",
"else",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceDisabled",
",",
"self",
".",
"loc",
")"
] |
Draws the button in its current state.
Should be called every time through the main loop
|
[
"Draws",
"the",
"button",
"in",
"its",
"current",
"state",
".",
"Should",
"be",
"called",
"every",
"time",
"through",
"the",
"main",
"loop"
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L444-L467
|
240,748
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
PygWidgetsButton._debug
|
def _debug(self):
"""This is just for debugging, so we can see what buttons would be drawn.
Not intended to be used in production."""
self.window.blit(self.surfaceUp, (self.loc[0], 10))
self.window.blit(self.surfaceOver, (self.loc[0], 60))
self.window.blit(self.surfaceDown, (self.loc[0], 110))
self.window.blit(self.surfaceDisabled, (self.loc[0], 160))
|
python
|
def _debug(self):
"""This is just for debugging, so we can see what buttons would be drawn.
Not intended to be used in production."""
self.window.blit(self.surfaceUp, (self.loc[0], 10))
self.window.blit(self.surfaceOver, (self.loc[0], 60))
self.window.blit(self.surfaceDown, (self.loc[0], 110))
self.window.blit(self.surfaceDisabled, (self.loc[0], 160))
|
[
"def",
"_debug",
"(",
"self",
")",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceUp",
",",
"(",
"self",
".",
"loc",
"[",
"0",
"]",
",",
"10",
")",
")",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceOver",
",",
"(",
"self",
".",
"loc",
"[",
"0",
"]",
",",
"60",
")",
")",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceDown",
",",
"(",
"self",
".",
"loc",
"[",
"0",
"]",
",",
"110",
")",
")",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceDisabled",
",",
"(",
"self",
".",
"loc",
"[",
"0",
"]",
",",
"160",
")",
")"
] |
This is just for debugging, so we can see what buttons would be drawn.
Not intended to be used in production.
|
[
"This",
"is",
"just",
"for",
"debugging",
"so",
"we",
"can",
"see",
"what",
"buttons",
"would",
"be",
"drawn",
".",
"Not",
"intended",
"to",
"be",
"used",
"in",
"production",
"."
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L470-L477
|
240,749
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
PygWidgetsCheckBox.draw
|
def draw(self):
"""Draws the checkbox."""
if not self.visible:
return
# Blit the current checkbox's image.
if self.isEnabled:
if self.mouseIsDown and self.lastMouseDownOverButton and self.mouseOverButton:
if self.value:
self.window.blit(self.surfaceOnDown, self.loc)
else:
self.window.blit(self.surfaceOffDown, self.loc)
else:
if self.value:
self.window.blit(self.surfaceOn, self.loc)
else:
self.window.blit(self.surfaceOff, self.loc)
else:
if self.value:
self.window.blit(self.surfaceOnDisabled, self.loc)
else:
self.window.blit(self.surfaceOffDisabled, self.loc)
|
python
|
def draw(self):
"""Draws the checkbox."""
if not self.visible:
return
# Blit the current checkbox's image.
if self.isEnabled:
if self.mouseIsDown and self.lastMouseDownOverButton and self.mouseOverButton:
if self.value:
self.window.blit(self.surfaceOnDown, self.loc)
else:
self.window.blit(self.surfaceOffDown, self.loc)
else:
if self.value:
self.window.blit(self.surfaceOn, self.loc)
else:
self.window.blit(self.surfaceOff, self.loc)
else:
if self.value:
self.window.blit(self.surfaceOnDisabled, self.loc)
else:
self.window.blit(self.surfaceOffDisabled, self.loc)
|
[
"def",
"draw",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"visible",
":",
"return",
"# Blit the current checkbox's image.\r",
"if",
"self",
".",
"isEnabled",
":",
"if",
"self",
".",
"mouseIsDown",
"and",
"self",
".",
"lastMouseDownOverButton",
"and",
"self",
".",
"mouseOverButton",
":",
"if",
"self",
".",
"value",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceOnDown",
",",
"self",
".",
"loc",
")",
"else",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceOffDown",
",",
"self",
".",
"loc",
")",
"else",
":",
"if",
"self",
".",
"value",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceOn",
",",
"self",
".",
"loc",
")",
"else",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceOff",
",",
"self",
".",
"loc",
")",
"else",
":",
"if",
"self",
".",
"value",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceOnDisabled",
",",
"self",
".",
"loc",
")",
"else",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceOffDisabled",
",",
"self",
".",
"loc",
")"
] |
Draws the checkbox.
|
[
"Draws",
"the",
"checkbox",
"."
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L839-L863
|
240,750
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
PygWidgetsRadioButton.getSelectedRadioButton
|
def getSelectedRadioButton(self):
"""Returns the nickname of the currently selected radio button."""
radioButtonListInGroup = PygWidgetsRadioButton.__PygWidgets__Radio__Buttons__Groups__Dicts__[self.group]
for radioButton in radioButtonListInGroup:
if radioButton.getValue():
selectedNickname = radioButton.getNickname()
return selectedNickname
raise Exception('No radio button was selected')
|
python
|
def getSelectedRadioButton(self):
"""Returns the nickname of the currently selected radio button."""
radioButtonListInGroup = PygWidgetsRadioButton.__PygWidgets__Radio__Buttons__Groups__Dicts__[self.group]
for radioButton in radioButtonListInGroup:
if radioButton.getValue():
selectedNickname = radioButton.getNickname()
return selectedNickname
raise Exception('No radio button was selected')
|
[
"def",
"getSelectedRadioButton",
"(",
"self",
")",
":",
"radioButtonListInGroup",
"=",
"PygWidgetsRadioButton",
".",
"__PygWidgets__Radio__Buttons__Groups__Dicts__",
"[",
"self",
".",
"group",
"]",
"for",
"radioButton",
"in",
"radioButtonListInGroup",
":",
"if",
"radioButton",
".",
"getValue",
"(",
")",
":",
"selectedNickname",
"=",
"radioButton",
".",
"getNickname",
"(",
")",
"return",
"selectedNickname",
"raise",
"Exception",
"(",
"'No radio button was selected'",
")"
] |
Returns the nickname of the currently selected radio button.
|
[
"Returns",
"the",
"nickname",
"of",
"the",
"currently",
"selected",
"radio",
"button",
"."
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L1248-L1256
|
240,751
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
PygWidgetsRadioButton.enableGroup
|
def enableGroup(self):
"""Enables all radio buttons in the group."""
radioButtonListInGroup = PygWidgetsRadioButton.__PygWidgets__Radio__Buttons__Groups__Dicts__[self.group]
for radioButton in radioButtonListInGroup:
radioButton.enable()
|
python
|
def enableGroup(self):
"""Enables all radio buttons in the group."""
radioButtonListInGroup = PygWidgetsRadioButton.__PygWidgets__Radio__Buttons__Groups__Dicts__[self.group]
for radioButton in radioButtonListInGroup:
radioButton.enable()
|
[
"def",
"enableGroup",
"(",
"self",
")",
":",
"radioButtonListInGroup",
"=",
"PygWidgetsRadioButton",
".",
"__PygWidgets__Radio__Buttons__Groups__Dicts__",
"[",
"self",
".",
"group",
"]",
"for",
"radioButton",
"in",
"radioButtonListInGroup",
":",
"radioButton",
".",
"enable",
"(",
")"
] |
Enables all radio buttons in the group.
|
[
"Enables",
"all",
"radio",
"buttons",
"in",
"the",
"group",
"."
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L1289-L1293
|
240,752
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
PygWidgetsRadioButton.disableGroup
|
def disableGroup(self):
"""Disables all radio buttons in the group"""
radioButtonListInGroup = PygWidgetsRadioButton.__PygWidgets__Radio__Buttons__Groups__Dicts__[self.group]
for radioButton in radioButtonListInGroup:
radioButton.disable()
|
python
|
def disableGroup(self):
"""Disables all radio buttons in the group"""
radioButtonListInGroup = PygWidgetsRadioButton.__PygWidgets__Radio__Buttons__Groups__Dicts__[self.group]
for radioButton in radioButtonListInGroup:
radioButton.disable()
|
[
"def",
"disableGroup",
"(",
"self",
")",
":",
"radioButtonListInGroup",
"=",
"PygWidgetsRadioButton",
".",
"__PygWidgets__Radio__Buttons__Groups__Dicts__",
"[",
"self",
".",
"group",
"]",
"for",
"radioButton",
"in",
"radioButtonListInGroup",
":",
"radioButton",
".",
"disable",
"(",
")"
] |
Disables all radio buttons in the group
|
[
"Disables",
"all",
"radio",
"buttons",
"in",
"the",
"group"
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L1301-L1305
|
240,753
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
DisplayText.draw
|
def draw(self):
"""Draws the current text in the window"""
if not self.visible:
return
self.window.blit(self.textImage, self.loc)
|
python
|
def draw(self):
"""Draws the current text in the window"""
if not self.visible:
return
self.window.blit(self.textImage, self.loc)
|
[
"def",
"draw",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"visible",
":",
"return",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"textImage",
",",
"self",
".",
"loc",
")"
] |
Draws the current text in the window
|
[
"Draws",
"the",
"current",
"text",
"in",
"the",
"window"
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L1678-L1683
|
240,754
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
InputText._updateImage
|
def _updateImage(self):
"""Internal method to render text as an image."""
# Fill the background of the image
if self.backgroundColor is not None:
self.textImage.fill(self.backgroundColor)
# Render the text as a single line, and blit it onto the textImage surface
if self.mask is None:
lineSurface = self.font.render(self.text, True, self.textColor)
else:
nChars = len(self.text)
maskedText = self.mask * nChars
lineSurface = self.font.render(maskedText, True, self.textColor)
self.textImage.blit(lineSurface, (0, 0))
|
python
|
def _updateImage(self):
"""Internal method to render text as an image."""
# Fill the background of the image
if self.backgroundColor is not None:
self.textImage.fill(self.backgroundColor)
# Render the text as a single line, and blit it onto the textImage surface
if self.mask is None:
lineSurface = self.font.render(self.text, True, self.textColor)
else:
nChars = len(self.text)
maskedText = self.mask * nChars
lineSurface = self.font.render(maskedText, True, self.textColor)
self.textImage.blit(lineSurface, (0, 0))
|
[
"def",
"_updateImage",
"(",
"self",
")",
":",
"# Fill the background of the image\r",
"if",
"self",
".",
"backgroundColor",
"is",
"not",
"None",
":",
"self",
".",
"textImage",
".",
"fill",
"(",
"self",
".",
"backgroundColor",
")",
"# Render the text as a single line, and blit it onto the textImage surface\r",
"if",
"self",
".",
"mask",
"is",
"None",
":",
"lineSurface",
"=",
"self",
".",
"font",
".",
"render",
"(",
"self",
".",
"text",
",",
"True",
",",
"self",
".",
"textColor",
")",
"else",
":",
"nChars",
"=",
"len",
"(",
"self",
".",
"text",
")",
"maskedText",
"=",
"self",
".",
"mask",
"*",
"nChars",
"lineSurface",
"=",
"self",
".",
"font",
".",
"render",
"(",
"maskedText",
",",
"True",
",",
"self",
".",
"textColor",
")",
"self",
".",
"textImage",
".",
"blit",
"(",
"lineSurface",
",",
"(",
"0",
",",
"0",
")",
")"
] |
Internal method to render text as an image.
|
[
"Internal",
"method",
"to",
"render",
"text",
"as",
"an",
"image",
"."
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L1783-L1796
|
240,755
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
InputText.draw
|
def draw(self):
"""Draws the Text in the window."""
if not self.visible:
return
# If this input text has focus, draw an outline around the text image
if self.focus:
pygame.draw.rect(self.window, self.focusColor, self.focusedImageRect, 1)
# Blit in the image of text (set earlier in _updateImage)
self.window.blit(self.textImage, self.loc)
# If this field has focus, see if it is time to blink the cursor
if self.focus:
self.cursorMsCounter = self.cursorMsCounter + self.clock.get_time()
if self.cursorMsCounter >= self.cursorSwitchMs:
self.cursorMsCounter = self.cursorMsCounter % self.cursorSwitchMs
self.cursorVisible = not self.cursorVisible
if self.cursorVisible:
cursorOffset = self.font.size(self.text[:self.cursorPosition])[0]
if self.cursorPosition > 0: # Try to get between characters
cursorOffset = cursorOffset - 1
if cursorOffset < self.width: # if the loc is within the text area, draw it
self.cursorLoc[0] = self.loc[0] + cursorOffset
self.window.blit(self.cursorSurface, self.cursorLoc)
self.clock.tick()
|
python
|
def draw(self):
"""Draws the Text in the window."""
if not self.visible:
return
# If this input text has focus, draw an outline around the text image
if self.focus:
pygame.draw.rect(self.window, self.focusColor, self.focusedImageRect, 1)
# Blit in the image of text (set earlier in _updateImage)
self.window.blit(self.textImage, self.loc)
# If this field has focus, see if it is time to blink the cursor
if self.focus:
self.cursorMsCounter = self.cursorMsCounter + self.clock.get_time()
if self.cursorMsCounter >= self.cursorSwitchMs:
self.cursorMsCounter = self.cursorMsCounter % self.cursorSwitchMs
self.cursorVisible = not self.cursorVisible
if self.cursorVisible:
cursorOffset = self.font.size(self.text[:self.cursorPosition])[0]
if self.cursorPosition > 0: # Try to get between characters
cursorOffset = cursorOffset - 1
if cursorOffset < self.width: # if the loc is within the text area, draw it
self.cursorLoc[0] = self.loc[0] + cursorOffset
self.window.blit(self.cursorSurface, self.cursorLoc)
self.clock.tick()
|
[
"def",
"draw",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"visible",
":",
"return",
"# If this input text has focus, draw an outline around the text image\r",
"if",
"self",
".",
"focus",
":",
"pygame",
".",
"draw",
".",
"rect",
"(",
"self",
".",
"window",
",",
"self",
".",
"focusColor",
",",
"self",
".",
"focusedImageRect",
",",
"1",
")",
"# Blit in the image of text (set earlier in _updateImage)\r",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"textImage",
",",
"self",
".",
"loc",
")",
"# If this field has focus, see if it is time to blink the cursor\r",
"if",
"self",
".",
"focus",
":",
"self",
".",
"cursorMsCounter",
"=",
"self",
".",
"cursorMsCounter",
"+",
"self",
".",
"clock",
".",
"get_time",
"(",
")",
"if",
"self",
".",
"cursorMsCounter",
">=",
"self",
".",
"cursorSwitchMs",
":",
"self",
".",
"cursorMsCounter",
"=",
"self",
".",
"cursorMsCounter",
"%",
"self",
".",
"cursorSwitchMs",
"self",
".",
"cursorVisible",
"=",
"not",
"self",
".",
"cursorVisible",
"if",
"self",
".",
"cursorVisible",
":",
"cursorOffset",
"=",
"self",
".",
"font",
".",
"size",
"(",
"self",
".",
"text",
"[",
":",
"self",
".",
"cursorPosition",
"]",
")",
"[",
"0",
"]",
"if",
"self",
".",
"cursorPosition",
">",
"0",
":",
"# Try to get between characters\r",
"cursorOffset",
"=",
"cursorOffset",
"-",
"1",
"if",
"cursorOffset",
"<",
"self",
".",
"width",
":",
"# if the loc is within the text area, draw it\r",
"self",
".",
"cursorLoc",
"[",
"0",
"]",
"=",
"self",
".",
"loc",
"[",
"0",
"]",
"+",
"cursorOffset",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"cursorSurface",
",",
"self",
".",
"cursorLoc",
")",
"self",
".",
"clock",
".",
"tick",
"(",
")"
] |
Draws the Text in the window.
|
[
"Draws",
"the",
"Text",
"in",
"the",
"window",
"."
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L1924-L1951
|
240,756
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
InputText.setValue
|
def setValue(self, newText):
"""Sets new text into the field"""
self.text = newText
self.cursorPosition = len(self.text)
self._updateImage()
|
python
|
def setValue(self, newText):
"""Sets new text into the field"""
self.text = newText
self.cursorPosition = len(self.text)
self._updateImage()
|
[
"def",
"setValue",
"(",
"self",
",",
"newText",
")",
":",
"self",
".",
"text",
"=",
"newText",
"self",
".",
"cursorPosition",
"=",
"len",
"(",
"self",
".",
"text",
")",
"self",
".",
"_updateImage",
"(",
")"
] |
Sets new text into the field
|
[
"Sets",
"new",
"text",
"into",
"the",
"field"
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L1963-L1967
|
240,757
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
InputText.clearText
|
def clearText(self, keepFocus=False):
"""Clear the text in the field"""
self.text = ''
self.focus = keepFocus
self._updateImage()
|
python
|
def clearText(self, keepFocus=False):
"""Clear the text in the field"""
self.text = ''
self.focus = keepFocus
self._updateImage()
|
[
"def",
"clearText",
"(",
"self",
",",
"keepFocus",
"=",
"False",
")",
":",
"self",
".",
"text",
"=",
"''",
"self",
".",
"focus",
"=",
"keepFocus",
"self",
".",
"_updateImage",
"(",
")"
] |
Clear the text in the field
|
[
"Clear",
"the",
"text",
"in",
"the",
"field"
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L1977-L1981
|
240,758
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
Dragger.resetToPreviousLoc
|
def resetToPreviousLoc(self):
"""Resets the loc of the dragger to place where dragging started.
This could be used in a test situation if the dragger was dragged to an incorrect location.
"""
self.rect.left = self.startDraggingX
self.rect.top = self.startDraggingY
|
python
|
def resetToPreviousLoc(self):
"""Resets the loc of the dragger to place where dragging started.
This could be used in a test situation if the dragger was dragged to an incorrect location.
"""
self.rect.left = self.startDraggingX
self.rect.top = self.startDraggingY
|
[
"def",
"resetToPreviousLoc",
"(",
"self",
")",
":",
"self",
".",
"rect",
".",
"left",
"=",
"self",
".",
"startDraggingX",
"self",
".",
"rect",
".",
"top",
"=",
"self",
".",
"startDraggingY"
] |
Resets the loc of the dragger to place where dragging started.
This could be used in a test situation if the dragger was dragged to an incorrect location.
|
[
"Resets",
"the",
"loc",
"of",
"the",
"dragger",
"to",
"place",
"where",
"dragging",
"started",
".",
"This",
"could",
"be",
"used",
"in",
"a",
"test",
"situation",
"if",
"the",
"dragger",
"was",
"dragged",
"to",
"an",
"incorrect",
"location",
"."
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2124-L2131
|
240,759
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
Dragger.draw
|
def draw(self):
"""Draws the dragger at the current mouse location.
Should be called in every frame.
"""
if not self.visible:
return
if self.isEnabled:
# Draw the dragger's current appearance to the window.
if self.dragging:
self.window.blit(self.surfaceDown, self.rect)
else: # mouse is up
if self.mouseOver:
self.window.blit(self.surfaceOver, self.rect)
else:
self.window.blit(self.surfaceUp, self.rect)
else:
self.window.blit(self.surfaceDisabled, self.rect)
|
python
|
def draw(self):
"""Draws the dragger at the current mouse location.
Should be called in every frame.
"""
if not self.visible:
return
if self.isEnabled:
# Draw the dragger's current appearance to the window.
if self.dragging:
self.window.blit(self.surfaceDown, self.rect)
else: # mouse is up
if self.mouseOver:
self.window.blit(self.surfaceOver, self.rect)
else:
self.window.blit(self.surfaceUp, self.rect)
else:
self.window.blit(self.surfaceDisabled, self.rect)
|
[
"def",
"draw",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"visible",
":",
"return",
"if",
"self",
".",
"isEnabled",
":",
"# Draw the dragger's current appearance to the window.\r",
"if",
"self",
".",
"dragging",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceDown",
",",
"self",
".",
"rect",
")",
"else",
":",
"# mouse is up\r",
"if",
"self",
".",
"mouseOver",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceOver",
",",
"self",
".",
"rect",
")",
"else",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceUp",
",",
"self",
".",
"rect",
")",
"else",
":",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"surfaceDisabled",
",",
"self",
".",
"rect",
")"
] |
Draws the dragger at the current mouse location.
Should be called in every frame.
|
[
"Draws",
"the",
"dragger",
"at",
"the",
"current",
"mouse",
"location",
".",
"Should",
"be",
"called",
"in",
"every",
"frame",
"."
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2133-L2153
|
240,760
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
Image.flipHorizontal
|
def flipHorizontal(self):
""" flips an image object horizontally
"""
self.flipH = not self.flipH
self._transmogrophy(self.angle, self.percent, self.scaleFromCenter, self.flipH, self.flipV)
|
python
|
def flipHorizontal(self):
""" flips an image object horizontally
"""
self.flipH = not self.flipH
self._transmogrophy(self.angle, self.percent, self.scaleFromCenter, self.flipH, self.flipV)
|
[
"def",
"flipHorizontal",
"(",
"self",
")",
":",
"self",
".",
"flipH",
"=",
"not",
"self",
".",
"flipH",
"self",
".",
"_transmogrophy",
"(",
"self",
".",
"angle",
",",
"self",
".",
"percent",
",",
"self",
".",
"scaleFromCenter",
",",
"self",
".",
"flipH",
",",
"self",
".",
"flipV",
")"
] |
flips an image object horizontally
|
[
"flips",
"an",
"image",
"object",
"horizontally"
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2205-L2210
|
240,761
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
Image.flipVertical
|
def flipVertical(self):
""" flips an image object vertically
"""
self.flipV = not self.flipV
self._transmogrophy(self.angle, self.percent, self.scaleFromCenter, self.flipH, self.flipV)
|
python
|
def flipVertical(self):
""" flips an image object vertically
"""
self.flipV = not self.flipV
self._transmogrophy(self.angle, self.percent, self.scaleFromCenter, self.flipH, self.flipV)
|
[
"def",
"flipVertical",
"(",
"self",
")",
":",
"self",
".",
"flipV",
"=",
"not",
"self",
".",
"flipV",
"self",
".",
"_transmogrophy",
"(",
"self",
".",
"angle",
",",
"self",
".",
"percent",
",",
"self",
".",
"scaleFromCenter",
",",
"self",
".",
"flipH",
",",
"self",
".",
"flipV",
")"
] |
flips an image object vertically
|
[
"flips",
"an",
"image",
"object",
"vertically"
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2212-L2217
|
240,762
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
Image._transmogrophy
|
def _transmogrophy(self, angle, percent, scaleFromCenter, flipH, flipV):
'''
Internal method to scale and rotate
'''
self.angle = angle % 360
self.percent = percent
self.scaleFromCenter = scaleFromCenter
previousRect = self.rect
previousCenter = previousRect.center
previousX = previousRect.x
previousY = previousRect.y
# Rotate - pygame rotates in the opposite direction
pygameAngle = -self.angle
rotatedImage = pygame.transform.rotate(self.originalImage, pygameAngle)
rotatedRect = rotatedImage.get_rect()
rotatedWidth = rotatedRect.width
rotatedHeight = rotatedRect.height
# Scale
newWidth = int(rotatedWidth * .01 * self.percent)
newHeight = int(rotatedHeight * .01 * self.percent)
self.image = pygame.transform.scale(rotatedImage, (newWidth, newHeight))
# Flip
if flipH:
self.image = pygame.transform.flip(self.image, True, False)
if flipV:
self.image = pygame.transform.flip(self.image, False, True)
# Placement
self.rect = self.image.get_rect()
if self.scaleFromCenter:
self.rect.center = previousCenter
else: # use previous X, Y
self.rect.x = previousX
self.rect.y = previousY
self.setLoc((self.rect.left, self.rect.top))
|
python
|
def _transmogrophy(self, angle, percent, scaleFromCenter, flipH, flipV):
'''
Internal method to scale and rotate
'''
self.angle = angle % 360
self.percent = percent
self.scaleFromCenter = scaleFromCenter
previousRect = self.rect
previousCenter = previousRect.center
previousX = previousRect.x
previousY = previousRect.y
# Rotate - pygame rotates in the opposite direction
pygameAngle = -self.angle
rotatedImage = pygame.transform.rotate(self.originalImage, pygameAngle)
rotatedRect = rotatedImage.get_rect()
rotatedWidth = rotatedRect.width
rotatedHeight = rotatedRect.height
# Scale
newWidth = int(rotatedWidth * .01 * self.percent)
newHeight = int(rotatedHeight * .01 * self.percent)
self.image = pygame.transform.scale(rotatedImage, (newWidth, newHeight))
# Flip
if flipH:
self.image = pygame.transform.flip(self.image, True, False)
if flipV:
self.image = pygame.transform.flip(self.image, False, True)
# Placement
self.rect = self.image.get_rect()
if self.scaleFromCenter:
self.rect.center = previousCenter
else: # use previous X, Y
self.rect.x = previousX
self.rect.y = previousY
self.setLoc((self.rect.left, self.rect.top))
|
[
"def",
"_transmogrophy",
"(",
"self",
",",
"angle",
",",
"percent",
",",
"scaleFromCenter",
",",
"flipH",
",",
"flipV",
")",
":",
"self",
".",
"angle",
"=",
"angle",
"%",
"360",
"self",
".",
"percent",
"=",
"percent",
"self",
".",
"scaleFromCenter",
"=",
"scaleFromCenter",
"previousRect",
"=",
"self",
".",
"rect",
"previousCenter",
"=",
"previousRect",
".",
"center",
"previousX",
"=",
"previousRect",
".",
"x",
"previousY",
"=",
"previousRect",
".",
"y",
"# Rotate - pygame rotates in the opposite direction\r",
"pygameAngle",
"=",
"-",
"self",
".",
"angle",
"rotatedImage",
"=",
"pygame",
".",
"transform",
".",
"rotate",
"(",
"self",
".",
"originalImage",
",",
"pygameAngle",
")",
"rotatedRect",
"=",
"rotatedImage",
".",
"get_rect",
"(",
")",
"rotatedWidth",
"=",
"rotatedRect",
".",
"width",
"rotatedHeight",
"=",
"rotatedRect",
".",
"height",
"# Scale\r",
"newWidth",
"=",
"int",
"(",
"rotatedWidth",
"*",
".01",
"*",
"self",
".",
"percent",
")",
"newHeight",
"=",
"int",
"(",
"rotatedHeight",
"*",
".01",
"*",
"self",
".",
"percent",
")",
"self",
".",
"image",
"=",
"pygame",
".",
"transform",
".",
"scale",
"(",
"rotatedImage",
",",
"(",
"newWidth",
",",
"newHeight",
")",
")",
"# Flip\r",
"if",
"flipH",
":",
"self",
".",
"image",
"=",
"pygame",
".",
"transform",
".",
"flip",
"(",
"self",
".",
"image",
",",
"True",
",",
"False",
")",
"if",
"flipV",
":",
"self",
".",
"image",
"=",
"pygame",
".",
"transform",
".",
"flip",
"(",
"self",
".",
"image",
",",
"False",
",",
"True",
")",
"# Placement\r",
"self",
".",
"rect",
"=",
"self",
".",
"image",
".",
"get_rect",
"(",
")",
"if",
"self",
".",
"scaleFromCenter",
":",
"self",
".",
"rect",
".",
"center",
"=",
"previousCenter",
"else",
":",
"# use previous X, Y\r",
"self",
".",
"rect",
".",
"x",
"=",
"previousX",
"self",
".",
"rect",
".",
"y",
"=",
"previousY",
"self",
".",
"setLoc",
"(",
"(",
"self",
".",
"rect",
".",
"left",
",",
"self",
".",
"rect",
".",
"top",
")",
")"
] |
Internal method to scale and rotate
|
[
"Internal",
"method",
"to",
"scale",
"and",
"rotate"
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2258-L2301
|
240,763
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
Image.draw
|
def draw(self):
"""Draws the image at the given location."""
if not self.visible:
return
self.window.blit(self.image, self.loc)
|
python
|
def draw(self):
"""Draws the image at the given location."""
if not self.visible:
return
self.window.blit(self.image, self.loc)
|
[
"def",
"draw",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"visible",
":",
"return",
"self",
".",
"window",
".",
"blit",
"(",
"self",
".",
"image",
",",
"self",
".",
"loc",
")"
] |
Draws the image at the given location.
|
[
"Draws",
"the",
"image",
"at",
"the",
"given",
"location",
"."
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2304-L2309
|
240,764
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
PygAnimation.play
|
def play(self):
"""Starts an animation playing."""
if self.state == PygAnimation.PLAYING:
pass # nothing to do
elif self.state == PygAnimation.STOPPED: # restart from beginning of animation
self.index = 0 # first image in list
self.elapsed = 0
self.playingStartTime = time.time()
self.elapsedStopTime = self.endTimesList[-1] # end of last animation image time
self.nextElapsedThreshold = self.endTimesList[0]
self.nIterationsLeft = self.nTimes # typically 1
elif self.state == PygAnimation.PAUSED: # restart where we left off
self.playingStartTime = time.time() - self.elapsedAtPause # recalc start time
self.elapsed = self.elapsedAtPause
self.elapsedStopTime = self.endTimesList[-1] # end of last animation image time
self.nextElapsedThreshold = self.endTimesList[self.index]
self.state = PygAnimation.PLAYING
|
python
|
def play(self):
"""Starts an animation playing."""
if self.state == PygAnimation.PLAYING:
pass # nothing to do
elif self.state == PygAnimation.STOPPED: # restart from beginning of animation
self.index = 0 # first image in list
self.elapsed = 0
self.playingStartTime = time.time()
self.elapsedStopTime = self.endTimesList[-1] # end of last animation image time
self.nextElapsedThreshold = self.endTimesList[0]
self.nIterationsLeft = self.nTimes # typically 1
elif self.state == PygAnimation.PAUSED: # restart where we left off
self.playingStartTime = time.time() - self.elapsedAtPause # recalc start time
self.elapsed = self.elapsedAtPause
self.elapsedStopTime = self.endTimesList[-1] # end of last animation image time
self.nextElapsedThreshold = self.endTimesList[self.index]
self.state = PygAnimation.PLAYING
|
[
"def",
"play",
"(",
"self",
")",
":",
"if",
"self",
".",
"state",
"==",
"PygAnimation",
".",
"PLAYING",
":",
"pass",
"# nothing to do\r",
"elif",
"self",
".",
"state",
"==",
"PygAnimation",
".",
"STOPPED",
":",
"# restart from beginning of animation\r",
"self",
".",
"index",
"=",
"0",
"# first image in list\r",
"self",
".",
"elapsed",
"=",
"0",
"self",
".",
"playingStartTime",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"elapsedStopTime",
"=",
"self",
".",
"endTimesList",
"[",
"-",
"1",
"]",
"# end of last animation image time\r",
"self",
".",
"nextElapsedThreshold",
"=",
"self",
".",
"endTimesList",
"[",
"0",
"]",
"self",
".",
"nIterationsLeft",
"=",
"self",
".",
"nTimes",
"# typically 1\r",
"elif",
"self",
".",
"state",
"==",
"PygAnimation",
".",
"PAUSED",
":",
"# restart where we left off\r",
"self",
".",
"playingStartTime",
"=",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"elapsedAtPause",
"# recalc start time\r",
"self",
".",
"elapsed",
"=",
"self",
".",
"elapsedAtPause",
"self",
".",
"elapsedStopTime",
"=",
"self",
".",
"endTimesList",
"[",
"-",
"1",
"]",
"# end of last animation image time\r",
"self",
".",
"nextElapsedThreshold",
"=",
"self",
".",
"endTimesList",
"[",
"self",
".",
"index",
"]",
"self",
".",
"state",
"=",
"PygAnimation",
".",
"PLAYING"
] |
Starts an animation playing.
|
[
"Starts",
"an",
"animation",
"playing",
"."
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2471-L2490
|
240,765
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
PygAnimation.stop
|
def stop(self):
"""Stops a a playing animation. A subsequent call to play will start from the beginning."""
if self.state == PygAnimation.PLAYING:
self.index = 0 # set up for first image in list
self.elapsed = 0
self.nIterationsLeft = 0
elif self.state == PygAnimation.STOPPED:
pass # nothing to do
elif self.state == PygAnimation.PAUSED:
self.index = 0 # set up for first image in list
self.elapsed = 0
self.state = PygAnimation.STOPPED
|
python
|
def stop(self):
"""Stops a a playing animation. A subsequent call to play will start from the beginning."""
if self.state == PygAnimation.PLAYING:
self.index = 0 # set up for first image in list
self.elapsed = 0
self.nIterationsLeft = 0
elif self.state == PygAnimation.STOPPED:
pass # nothing to do
elif self.state == PygAnimation.PAUSED:
self.index = 0 # set up for first image in list
self.elapsed = 0
self.state = PygAnimation.STOPPED
|
[
"def",
"stop",
"(",
"self",
")",
":",
"if",
"self",
".",
"state",
"==",
"PygAnimation",
".",
"PLAYING",
":",
"self",
".",
"index",
"=",
"0",
"# set up for first image in list\r",
"self",
".",
"elapsed",
"=",
"0",
"self",
".",
"nIterationsLeft",
"=",
"0",
"elif",
"self",
".",
"state",
"==",
"PygAnimation",
".",
"STOPPED",
":",
"pass",
"# nothing to do\r",
"elif",
"self",
".",
"state",
"==",
"PygAnimation",
".",
"PAUSED",
":",
"self",
".",
"index",
"=",
"0",
"# set up for first image in list\r",
"self",
".",
"elapsed",
"=",
"0",
"self",
".",
"state",
"=",
"PygAnimation",
".",
"STOPPED"
] |
Stops a a playing animation. A subsequent call to play will start from the beginning.
|
[
"Stops",
"a",
"a",
"playing",
"animation",
".",
"A",
"subsequent",
"call",
"to",
"play",
"will",
"start",
"from",
"the",
"beginning",
"."
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2492-L2506
|
240,766
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
PygAnimation.pause
|
def pause(self):
"""Pauses a playing animation. A subsequent call to play will continue where it left off."""
if self.state == PygAnimation.PLAYING:
self.elapsedAtPause = self.elapsed
# only change state if it was playing
self.state = PygAnimation.PAUSED
elif self.state == PygAnimation.STOPPED:
pass # nothing to do
elif self.state == PygAnimation.PAUSED:
pass
|
python
|
def pause(self):
"""Pauses a playing animation. A subsequent call to play will continue where it left off."""
if self.state == PygAnimation.PLAYING:
self.elapsedAtPause = self.elapsed
# only change state if it was playing
self.state = PygAnimation.PAUSED
elif self.state == PygAnimation.STOPPED:
pass # nothing to do
elif self.state == PygAnimation.PAUSED:
pass
|
[
"def",
"pause",
"(",
"self",
")",
":",
"if",
"self",
".",
"state",
"==",
"PygAnimation",
".",
"PLAYING",
":",
"self",
".",
"elapsedAtPause",
"=",
"self",
".",
"elapsed",
"# only change state if it was playing\r",
"self",
".",
"state",
"=",
"PygAnimation",
".",
"PAUSED",
"elif",
"self",
".",
"state",
"==",
"PygAnimation",
".",
"STOPPED",
":",
"pass",
"# nothing to do\r",
"elif",
"self",
".",
"state",
"==",
"PygAnimation",
".",
"PAUSED",
":",
"pass"
] |
Pauses a playing animation. A subsequent call to play will continue where it left off.
|
[
"Pauses",
"a",
"playing",
"animation",
".",
"A",
"subsequent",
"call",
"to",
"play",
"will",
"continue",
"where",
"it",
"left",
"off",
"."
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2508-L2519
|
240,767
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
PygAnimation.update
|
def update(self):
"""Updates the currently running animation.
This method should be called in every frame where you want an animation to run.
Its job is to figure out if it is time to move onto the next image in the animation.
"""
returnValue = False # typical return value
if self.state != PygAnimation.PLAYING:
return returnValue
# The job here is to figure out the index of the image to show
# and the matching elapsed time threshold for the current image
self.elapsed = (time.time() - self.playingStartTime)
if self.elapsed > self.elapsedStopTime: # anim finished
if self.loop: # restart the animation
self.playingStartTime = time.time()
self.nextElapsedThreshold = self.endTimesList[0]
else: # not looping
self.nIterationsLeft = self.nIterationsLeft - 1
if self.nIterationsLeft == 0: # done
self.state = PygAnimation.STOPPED
if self.callBack != None: # if there is a callBack
self.callBack(self.nickname) # do it
returnValue = True # animation has ended
else: # another iteration - start over again
self.playingStartTime = time.time()
self.nextElapsedThreshold = self.endTimesList[0]
self.index = 0
elif self.elapsed > self.nextElapsedThreshold:
# Time to move on to next picture
self.index = self.index + 1
self.nextElapsedThreshold = self.endTimesList[self.index]
return returnValue
|
python
|
def update(self):
"""Updates the currently running animation.
This method should be called in every frame where you want an animation to run.
Its job is to figure out if it is time to move onto the next image in the animation.
"""
returnValue = False # typical return value
if self.state != PygAnimation.PLAYING:
return returnValue
# The job here is to figure out the index of the image to show
# and the matching elapsed time threshold for the current image
self.elapsed = (time.time() - self.playingStartTime)
if self.elapsed > self.elapsedStopTime: # anim finished
if self.loop: # restart the animation
self.playingStartTime = time.time()
self.nextElapsedThreshold = self.endTimesList[0]
else: # not looping
self.nIterationsLeft = self.nIterationsLeft - 1
if self.nIterationsLeft == 0: # done
self.state = PygAnimation.STOPPED
if self.callBack != None: # if there is a callBack
self.callBack(self.nickname) # do it
returnValue = True # animation has ended
else: # another iteration - start over again
self.playingStartTime = time.time()
self.nextElapsedThreshold = self.endTimesList[0]
self.index = 0
elif self.elapsed > self.nextElapsedThreshold:
# Time to move on to next picture
self.index = self.index + 1
self.nextElapsedThreshold = self.endTimesList[self.index]
return returnValue
|
[
"def",
"update",
"(",
"self",
")",
":",
"returnValue",
"=",
"False",
"# typical return value\r",
"if",
"self",
".",
"state",
"!=",
"PygAnimation",
".",
"PLAYING",
":",
"return",
"returnValue",
"# The job here is to figure out the index of the image to show\r",
"# and the matching elapsed time threshold for the current image\r",
"self",
".",
"elapsed",
"=",
"(",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"playingStartTime",
")",
"if",
"self",
".",
"elapsed",
">",
"self",
".",
"elapsedStopTime",
":",
"# anim finished\r",
"if",
"self",
".",
"loop",
":",
"# restart the animation\r",
"self",
".",
"playingStartTime",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"nextElapsedThreshold",
"=",
"self",
".",
"endTimesList",
"[",
"0",
"]",
"else",
":",
"# not looping\r",
"self",
".",
"nIterationsLeft",
"=",
"self",
".",
"nIterationsLeft",
"-",
"1",
"if",
"self",
".",
"nIterationsLeft",
"==",
"0",
":",
"# done\r",
"self",
".",
"state",
"=",
"PygAnimation",
".",
"STOPPED",
"if",
"self",
".",
"callBack",
"!=",
"None",
":",
"# if there is a callBack\r",
"self",
".",
"callBack",
"(",
"self",
".",
"nickname",
")",
"# do it\r",
"returnValue",
"=",
"True",
"# animation has ended\r",
"else",
":",
"# another iteration - start over again\r",
"self",
".",
"playingStartTime",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"nextElapsedThreshold",
"=",
"self",
".",
"endTimesList",
"[",
"0",
"]",
"self",
".",
"index",
"=",
"0",
"elif",
"self",
".",
"elapsed",
">",
"self",
".",
"nextElapsedThreshold",
":",
"# Time to move on to next picture\r",
"self",
".",
"index",
"=",
"self",
".",
"index",
"+",
"1",
"self",
".",
"nextElapsedThreshold",
"=",
"self",
".",
"endTimesList",
"[",
"self",
".",
"index",
"]",
"return",
"returnValue"
] |
Updates the currently running animation.
This method should be called in every frame where you want an animation to run.
Its job is to figure out if it is time to move onto the next image in the animation.
|
[
"Updates",
"the",
"currently",
"running",
"animation",
".",
"This",
"method",
"should",
"be",
"called",
"in",
"every",
"frame",
"where",
"you",
"want",
"an",
"animation",
"to",
"run",
".",
"Its",
"job",
"is",
"to",
"figure",
"out",
"if",
"it",
"is",
"time",
"to",
"move",
"onto",
"the",
"next",
"image",
"in",
"the",
"animation",
"."
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2521-L2557
|
240,768
|
IrvKalb/pygwidgets
|
pygwidgets/pygwidgets.py
|
PygAnimation.draw
|
def draw(self):
"""Draws the current frame of the animation
Should be called in every frame.
"""
# Assumes that self.index has been set earlier (typically in update method)
# it is used as the index of the current image/endTime/loc
theImage = self.imagesList[self.index] # choose the image to show
if theImage is None: # if there is no image to show
return
if self.visible:
theOffset = self.offsetsList[self.index]
theLoc = ((self.loc[0] + theOffset[0]), (self.loc[1] + theOffset[1]))
self.window.blit(theImage, theLoc)
|
python
|
def draw(self):
"""Draws the current frame of the animation
Should be called in every frame.
"""
# Assumes that self.index has been set earlier (typically in update method)
# it is used as the index of the current image/endTime/loc
theImage = self.imagesList[self.index] # choose the image to show
if theImage is None: # if there is no image to show
return
if self.visible:
theOffset = self.offsetsList[self.index]
theLoc = ((self.loc[0] + theOffset[0]), (self.loc[1] + theOffset[1]))
self.window.blit(theImage, theLoc)
|
[
"def",
"draw",
"(",
"self",
")",
":",
"# Assumes that self.index has been set earlier (typically in update method)\r",
"# it is used as the index of the current image/endTime/loc\r",
"theImage",
"=",
"self",
".",
"imagesList",
"[",
"self",
".",
"index",
"]",
"# choose the image to show\r",
"if",
"theImage",
"is",
"None",
":",
"# if there is no image to show\r",
"return",
"if",
"self",
".",
"visible",
":",
"theOffset",
"=",
"self",
".",
"offsetsList",
"[",
"self",
".",
"index",
"]",
"theLoc",
"=",
"(",
"(",
"self",
".",
"loc",
"[",
"0",
"]",
"+",
"theOffset",
"[",
"0",
"]",
")",
",",
"(",
"self",
".",
"loc",
"[",
"1",
"]",
"+",
"theOffset",
"[",
"1",
"]",
")",
")",
"self",
".",
"window",
".",
"blit",
"(",
"theImage",
",",
"theLoc",
")"
] |
Draws the current frame of the animation
Should be called in every frame.
|
[
"Draws",
"the",
"current",
"frame",
"of",
"the",
"animation",
"Should",
"be",
"called",
"in",
"every",
"frame",
"."
] |
a830d8885d4d209e471cb53816277d30db56273c
|
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2559-L2575
|
240,769
|
ironfroggy/django-better-cache
|
bettercache/decorators.py
|
CachedMethod.cache
|
def cache(cls, key_attrs, expires=None):
"""Decorates a method to provide cached-memoization using a
combination of the positional arguments, keyword argments, and
whitelisted instance attributes.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
module = type(self).__module__
classname = type(self).__name__
method_name = func.__name__
data = {}
if isinstance(key_attrs, basestring):
_key_attrs = key_attrs.split()
else:
_key_attrs = key_attrs
for key_attr in _key_attrs:
key_value = getattr(self, key_attr)
if isinstance(key_value, dict):
key_value = ('dict', sorted(key_value.items()))
elif isinstance(key_value, set):
key_value = ('set', sorted(key_value))
else:
key_value = (type(key_value).__name__, key_value)
data[key_attr] = key_value
data = sorted(data.items())
result_cache, new = cls.get_or_create(
module=module,
classname=classname,
method_name=method_name,
data=data,
args=args,
kwargs=sorted(kwargs.items()),
)
if new:
result_cache.result = func(self, *args, **kwargs)
result_cache.save(expires)
return result_cache.result
return wrapper
return decorator
|
python
|
def cache(cls, key_attrs, expires=None):
"""Decorates a method to provide cached-memoization using a
combination of the positional arguments, keyword argments, and
whitelisted instance attributes.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
module = type(self).__module__
classname = type(self).__name__
method_name = func.__name__
data = {}
if isinstance(key_attrs, basestring):
_key_attrs = key_attrs.split()
else:
_key_attrs = key_attrs
for key_attr in _key_attrs:
key_value = getattr(self, key_attr)
if isinstance(key_value, dict):
key_value = ('dict', sorted(key_value.items()))
elif isinstance(key_value, set):
key_value = ('set', sorted(key_value))
else:
key_value = (type(key_value).__name__, key_value)
data[key_attr] = key_value
data = sorted(data.items())
result_cache, new = cls.get_or_create(
module=module,
classname=classname,
method_name=method_name,
data=data,
args=args,
kwargs=sorted(kwargs.items()),
)
if new:
result_cache.result = func(self, *args, **kwargs)
result_cache.save(expires)
return result_cache.result
return wrapper
return decorator
|
[
"def",
"cache",
"(",
"cls",
",",
"key_attrs",
",",
"expires",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"module",
"=",
"type",
"(",
"self",
")",
".",
"__module__",
"classname",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
"method_name",
"=",
"func",
".",
"__name__",
"data",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"key_attrs",
",",
"basestring",
")",
":",
"_key_attrs",
"=",
"key_attrs",
".",
"split",
"(",
")",
"else",
":",
"_key_attrs",
"=",
"key_attrs",
"for",
"key_attr",
"in",
"_key_attrs",
":",
"key_value",
"=",
"getattr",
"(",
"self",
",",
"key_attr",
")",
"if",
"isinstance",
"(",
"key_value",
",",
"dict",
")",
":",
"key_value",
"=",
"(",
"'dict'",
",",
"sorted",
"(",
"key_value",
".",
"items",
"(",
")",
")",
")",
"elif",
"isinstance",
"(",
"key_value",
",",
"set",
")",
":",
"key_value",
"=",
"(",
"'set'",
",",
"sorted",
"(",
"key_value",
")",
")",
"else",
":",
"key_value",
"=",
"(",
"type",
"(",
"key_value",
")",
".",
"__name__",
",",
"key_value",
")",
"data",
"[",
"key_attr",
"]",
"=",
"key_value",
"data",
"=",
"sorted",
"(",
"data",
".",
"items",
"(",
")",
")",
"result_cache",
",",
"new",
"=",
"cls",
".",
"get_or_create",
"(",
"module",
"=",
"module",
",",
"classname",
"=",
"classname",
",",
"method_name",
"=",
"method_name",
",",
"data",
"=",
"data",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"sorted",
"(",
"kwargs",
".",
"items",
"(",
")",
")",
",",
")",
"if",
"new",
":",
"result_cache",
".",
"result",
"=",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"result_cache",
".",
"save",
"(",
"expires",
")",
"return",
"result_cache",
".",
"result",
"return",
"wrapper",
"return",
"decorator"
] |
Decorates a method to provide cached-memoization using a
combination of the positional arguments, keyword argments, and
whitelisted instance attributes.
|
[
"Decorates",
"a",
"method",
"to",
"provide",
"cached",
"-",
"memoization",
"using",
"a",
"combination",
"of",
"the",
"positional",
"arguments",
"keyword",
"argments",
"and",
"whitelisted",
"instance",
"attributes",
"."
] |
5350e8c646cef1c1ca74eab176f856ddd9eaf5c3
|
https://github.com/ironfroggy/django-better-cache/blob/5350e8c646cef1c1ca74eab176f856ddd9eaf5c3/bettercache/decorators.py#L40-L87
|
240,770
|
crcresearch/py-utils
|
crc_nd/utils/error_mixins.py
|
CallerErrorMixin.assertCallerError
|
def assertCallerError(self, expected_arg0, callable_obj, *args, **kwargs):
"""
Assert that a callable raises an AssertionError with a particular argument.
:param expected_arg0: The expected value for the AssertionError instance's first argument
(i.e., instance.args[0]).
"""
try:
callable_obj(*args, **kwargs)
self.fail('Expected AssertionError, but no exception raised')
except AssertionError, exc:
self.assertEqual(exc.args[0], expected_arg0)
except Exception, exc:
self.fail('Expected AssertionError, but got %s' % repr(exc))
|
python
|
def assertCallerError(self, expected_arg0, callable_obj, *args, **kwargs):
"""
Assert that a callable raises an AssertionError with a particular argument.
:param expected_arg0: The expected value for the AssertionError instance's first argument
(i.e., instance.args[0]).
"""
try:
callable_obj(*args, **kwargs)
self.fail('Expected AssertionError, but no exception raised')
except AssertionError, exc:
self.assertEqual(exc.args[0], expected_arg0)
except Exception, exc:
self.fail('Expected AssertionError, but got %s' % repr(exc))
|
[
"def",
"assertCallerError",
"(",
"self",
",",
"expected_arg0",
",",
"callable_obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"callable_obj",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"fail",
"(",
"'Expected AssertionError, but no exception raised'",
")",
"except",
"AssertionError",
",",
"exc",
":",
"self",
".",
"assertEqual",
"(",
"exc",
".",
"args",
"[",
"0",
"]",
",",
"expected_arg0",
")",
"except",
"Exception",
",",
"exc",
":",
"self",
".",
"fail",
"(",
"'Expected AssertionError, but got %s'",
"%",
"repr",
"(",
"exc",
")",
")"
] |
Assert that a callable raises an AssertionError with a particular argument.
:param expected_arg0: The expected value for the AssertionError instance's first argument
(i.e., instance.args[0]).
|
[
"Assert",
"that",
"a",
"callable",
"raises",
"an",
"AssertionError",
"with",
"a",
"particular",
"argument",
"."
] |
04caf0425a047baf900da726cf47c42413b0dd81
|
https://github.com/crcresearch/py-utils/blob/04caf0425a047baf900da726cf47c42413b0dd81/crc_nd/utils/error_mixins.py#L23-L36
|
240,771
|
cdeboever3/cdpybio
|
cdpybio/pysamext.py
|
get_region_nt_counts
|
def get_region_nt_counts(region, bam, stranded=False):
"""
Get counts of each nucleotide from a bam file for a given region. If R1 and
R2 reads both overlap a position, only one count will be added. If the R1
and R2 reads disagree at a position they both overlap, that read pair is not
used for that position. Can optionally output strand-specific counts.
Parameters
----------
region : str or list
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
start, end]. The strand is ignored for chrom:start-end:strand. For
chrom:start-end, the coordinates are one-based inclusive. For example,
the query chr1:10-11 will give you the counts for the 10th and 11th
bases of chr1. For [chrom, start, end], the coordinates are zero-based
and end exclusive (like a bed file). The query [chr1, 9, 11] will give
you the coverage of the 10th and 11th bases of chr1. The region value is
passed directly to pysam's pileup function.
bam : pysam.calignmentfile.AlignmentFile or str
Bam file opened with pysam or path to bam file (must be sorted and
indexed).
stranded : boolean
Boolean indicating whether read data is stranded and stranded nucleotide
counts should be returned. Assumes R1 read on reverse strand implies +
strand coverage etc.
Returns
-------
counts : pandas.DataFrame
Data frame with the counts for each base in the region. The index of
this data frame is one-based for compatibility with VCF files.
"""
# TODO: I should figure out what the different possible values are that
# pysam could give me back (so far I only have ATCGN). Can I get deletions
# and insertions?
# TODO: This could probably be parallelized.
if type(bam) == str:
bam = pysam.AlignmentFile(bam, 'rb')
if type(region) is str:
r = parse_region(region)
if len(r) == 3:
chrom, start, end = r
elif len(r) == 4:
chrom, start, end, strand = r
start = int(start)
end = int(end)
ind = ['{}:{}'.format(chrom, x) for
x in range(start, end + 1)]
pp = bam.pileup(region=region, truncate=True)
elif type(region) is (list or tuple):
chrom, start, end = region
ind = ['{}:{}'.format(chrom, x) for
x in range(int(start) + 1, int(end) + 1)]
pp = bam.pileup(chrom, start, end, truncate=True)
cols = ['A', 'T', 'C', 'G', 'N']
if stranded:
cols = ['{}+'.format(x) for x in cols] + ['{}-'.format(x) for x in cols]
counts = pd.DataFrame(0, index=ind, columns=cols)
for pc in pp:
# Most of this code deals with R1 and R2 reads that overlap so that we
# don't get two counts from one fragment.
pos = pc.reference_pos + 1
r1_qnames = []
r1_nts = []
r2_qnames = []
r2_nts = []
for pr in pc.pileups:
qnames = [r1_qnames, r2_qnames][pr.alignment.is_read2]
nts = [r1_nts, r2_nts][pr.alignment.is_read2]
nt = _pos_nt(pr, pc.reference_pos, stranded)
if nt:
qnames.append(pr.alignment.qname)
nts.append(nt)
r1 = pd.Series(r1_nts, index=r1_qnames)
r2 = pd.Series(r2_nts, index=r2_qnames)
df = pd.DataFrame([r1, r2], index=['R1', 'R2']).T
singles = df[df.isnull().sum(axis=1) == 1]
doubles = df.dropna()
vcs = []
vcs.append(singles['R1'].value_counts())
vcs.append(singles['R2'].value_counts())
doubles = doubles[doubles.R1 == doubles.R2]
vcs.append(doubles.R1.value_counts())
for vc in vcs:
counts.ix['{}:{}'.format(chrom, pos), vc.index] += vc
return counts
|
python
|
def get_region_nt_counts(region, bam, stranded=False):
"""
Get counts of each nucleotide from a bam file for a given region. If R1 and
R2 reads both overlap a position, only one count will be added. If the R1
and R2 reads disagree at a position they both overlap, that read pair is not
used for that position. Can optionally output strand-specific counts.
Parameters
----------
region : str or list
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
start, end]. The strand is ignored for chrom:start-end:strand. For
chrom:start-end, the coordinates are one-based inclusive. For example,
the query chr1:10-11 will give you the counts for the 10th and 11th
bases of chr1. For [chrom, start, end], the coordinates are zero-based
and end exclusive (like a bed file). The query [chr1, 9, 11] will give
you the coverage of the 10th and 11th bases of chr1. The region value is
passed directly to pysam's pileup function.
bam : pysam.calignmentfile.AlignmentFile or str
Bam file opened with pysam or path to bam file (must be sorted and
indexed).
stranded : boolean
Boolean indicating whether read data is stranded and stranded nucleotide
counts should be returned. Assumes R1 read on reverse strand implies +
strand coverage etc.
Returns
-------
counts : pandas.DataFrame
Data frame with the counts for each base in the region. The index of
this data frame is one-based for compatibility with VCF files.
"""
# TODO: I should figure out what the different possible values are that
# pysam could give me back (so far I only have ATCGN). Can I get deletions
# and insertions?
# TODO: This could probably be parallelized.
if type(bam) == str:
bam = pysam.AlignmentFile(bam, 'rb')
if type(region) is str:
r = parse_region(region)
if len(r) == 3:
chrom, start, end = r
elif len(r) == 4:
chrom, start, end, strand = r
start = int(start)
end = int(end)
ind = ['{}:{}'.format(chrom, x) for
x in range(start, end + 1)]
pp = bam.pileup(region=region, truncate=True)
elif type(region) is (list or tuple):
chrom, start, end = region
ind = ['{}:{}'.format(chrom, x) for
x in range(int(start) + 1, int(end) + 1)]
pp = bam.pileup(chrom, start, end, truncate=True)
cols = ['A', 'T', 'C', 'G', 'N']
if stranded:
cols = ['{}+'.format(x) for x in cols] + ['{}-'.format(x) for x in cols]
counts = pd.DataFrame(0, index=ind, columns=cols)
for pc in pp:
# Most of this code deals with R1 and R2 reads that overlap so that we
# don't get two counts from one fragment.
pos = pc.reference_pos + 1
r1_qnames = []
r1_nts = []
r2_qnames = []
r2_nts = []
for pr in pc.pileups:
qnames = [r1_qnames, r2_qnames][pr.alignment.is_read2]
nts = [r1_nts, r2_nts][pr.alignment.is_read2]
nt = _pos_nt(pr, pc.reference_pos, stranded)
if nt:
qnames.append(pr.alignment.qname)
nts.append(nt)
r1 = pd.Series(r1_nts, index=r1_qnames)
r2 = pd.Series(r2_nts, index=r2_qnames)
df = pd.DataFrame([r1, r2], index=['R1', 'R2']).T
singles = df[df.isnull().sum(axis=1) == 1]
doubles = df.dropna()
vcs = []
vcs.append(singles['R1'].value_counts())
vcs.append(singles['R2'].value_counts())
doubles = doubles[doubles.R1 == doubles.R2]
vcs.append(doubles.R1.value_counts())
for vc in vcs:
counts.ix['{}:{}'.format(chrom, pos), vc.index] += vc
return counts
|
[
"def",
"get_region_nt_counts",
"(",
"region",
",",
"bam",
",",
"stranded",
"=",
"False",
")",
":",
"# TODO: I should figure out what the different possible values are that",
"# pysam could give me back (so far I only have ATCGN). Can I get deletions",
"# and insertions? ",
"# TODO: This could probably be parallelized.",
"if",
"type",
"(",
"bam",
")",
"==",
"str",
":",
"bam",
"=",
"pysam",
".",
"AlignmentFile",
"(",
"bam",
",",
"'rb'",
")",
"if",
"type",
"(",
"region",
")",
"is",
"str",
":",
"r",
"=",
"parse_region",
"(",
"region",
")",
"if",
"len",
"(",
"r",
")",
"==",
"3",
":",
"chrom",
",",
"start",
",",
"end",
"=",
"r",
"elif",
"len",
"(",
"r",
")",
"==",
"4",
":",
"chrom",
",",
"start",
",",
"end",
",",
"strand",
"=",
"r",
"start",
"=",
"int",
"(",
"start",
")",
"end",
"=",
"int",
"(",
"end",
")",
"ind",
"=",
"[",
"'{}:{}'",
".",
"format",
"(",
"chrom",
",",
"x",
")",
"for",
"x",
"in",
"range",
"(",
"start",
",",
"end",
"+",
"1",
")",
"]",
"pp",
"=",
"bam",
".",
"pileup",
"(",
"region",
"=",
"region",
",",
"truncate",
"=",
"True",
")",
"elif",
"type",
"(",
"region",
")",
"is",
"(",
"list",
"or",
"tuple",
")",
":",
"chrom",
",",
"start",
",",
"end",
"=",
"region",
"ind",
"=",
"[",
"'{}:{}'",
".",
"format",
"(",
"chrom",
",",
"x",
")",
"for",
"x",
"in",
"range",
"(",
"int",
"(",
"start",
")",
"+",
"1",
",",
"int",
"(",
"end",
")",
"+",
"1",
")",
"]",
"pp",
"=",
"bam",
".",
"pileup",
"(",
"chrom",
",",
"start",
",",
"end",
",",
"truncate",
"=",
"True",
")",
"cols",
"=",
"[",
"'A'",
",",
"'T'",
",",
"'C'",
",",
"'G'",
",",
"'N'",
"]",
"if",
"stranded",
":",
"cols",
"=",
"[",
"'{}+'",
".",
"format",
"(",
"x",
")",
"for",
"x",
"in",
"cols",
"]",
"+",
"[",
"'{}-'",
".",
"format",
"(",
"x",
")",
"for",
"x",
"in",
"cols",
"]",
"counts",
"=",
"pd",
".",
"DataFrame",
"(",
"0",
",",
"index",
"=",
"ind",
",",
"columns",
"=",
"cols",
")",
"for",
"pc",
"in",
"pp",
":",
"# Most of this code deals with R1 and R2 reads that overlap so that we",
"# don't get two counts from one fragment.",
"pos",
"=",
"pc",
".",
"reference_pos",
"+",
"1",
"r1_qnames",
"=",
"[",
"]",
"r1_nts",
"=",
"[",
"]",
"r2_qnames",
"=",
"[",
"]",
"r2_nts",
"=",
"[",
"]",
"for",
"pr",
"in",
"pc",
".",
"pileups",
":",
"qnames",
"=",
"[",
"r1_qnames",
",",
"r2_qnames",
"]",
"[",
"pr",
".",
"alignment",
".",
"is_read2",
"]",
"nts",
"=",
"[",
"r1_nts",
",",
"r2_nts",
"]",
"[",
"pr",
".",
"alignment",
".",
"is_read2",
"]",
"nt",
"=",
"_pos_nt",
"(",
"pr",
",",
"pc",
".",
"reference_pos",
",",
"stranded",
")",
"if",
"nt",
":",
"qnames",
".",
"append",
"(",
"pr",
".",
"alignment",
".",
"qname",
")",
"nts",
".",
"append",
"(",
"nt",
")",
"r1",
"=",
"pd",
".",
"Series",
"(",
"r1_nts",
",",
"index",
"=",
"r1_qnames",
")",
"r2",
"=",
"pd",
".",
"Series",
"(",
"r2_nts",
",",
"index",
"=",
"r2_qnames",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"r1",
",",
"r2",
"]",
",",
"index",
"=",
"[",
"'R1'",
",",
"'R2'",
"]",
")",
".",
"T",
"singles",
"=",
"df",
"[",
"df",
".",
"isnull",
"(",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"==",
"1",
"]",
"doubles",
"=",
"df",
".",
"dropna",
"(",
")",
"vcs",
"=",
"[",
"]",
"vcs",
".",
"append",
"(",
"singles",
"[",
"'R1'",
"]",
".",
"value_counts",
"(",
")",
")",
"vcs",
".",
"append",
"(",
"singles",
"[",
"'R2'",
"]",
".",
"value_counts",
"(",
")",
")",
"doubles",
"=",
"doubles",
"[",
"doubles",
".",
"R1",
"==",
"doubles",
".",
"R2",
"]",
"vcs",
".",
"append",
"(",
"doubles",
".",
"R1",
".",
"value_counts",
"(",
")",
")",
"for",
"vc",
"in",
"vcs",
":",
"counts",
".",
"ix",
"[",
"'{}:{}'",
".",
"format",
"(",
"chrom",
",",
"pos",
")",
",",
"vc",
".",
"index",
"]",
"+=",
"vc",
"return",
"counts"
] |
Get counts of each nucleotide from a bam file for a given region. If R1 and
R2 reads both overlap a position, only one count will be added. If the R1
and R2 reads disagree at a position they both overlap, that read pair is not
used for that position. Can optionally output strand-specific counts.
Parameters
----------
region : str or list
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
start, end]. The strand is ignored for chrom:start-end:strand. For
chrom:start-end, the coordinates are one-based inclusive. For example,
the query chr1:10-11 will give you the counts for the 10th and 11th
bases of chr1. For [chrom, start, end], the coordinates are zero-based
and end exclusive (like a bed file). The query [chr1, 9, 11] will give
you the coverage of the 10th and 11th bases of chr1. The region value is
passed directly to pysam's pileup function.
bam : pysam.calignmentfile.AlignmentFile or str
Bam file opened with pysam or path to bam file (must be sorted and
indexed).
stranded : boolean
Boolean indicating whether read data is stranded and stranded nucleotide
counts should be returned. Assumes R1 read on reverse strand implies +
strand coverage etc.
Returns
-------
counts : pandas.DataFrame
Data frame with the counts for each base in the region. The index of
this data frame is one-based for compatibility with VCF files.
|
[
"Get",
"counts",
"of",
"each",
"nucleotide",
"from",
"a",
"bam",
"file",
"for",
"a",
"given",
"region",
".",
"If",
"R1",
"and",
"R2",
"reads",
"both",
"overlap",
"a",
"position",
"only",
"one",
"count",
"will",
"be",
"added",
".",
"If",
"the",
"R1",
"and",
"R2",
"reads",
"disagree",
"at",
"a",
"position",
"they",
"both",
"overlap",
"that",
"read",
"pair",
"is",
"not",
"used",
"for",
"that",
"position",
".",
"Can",
"optionally",
"output",
"strand",
"-",
"specific",
"counts",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/pysamext.py#L11-L103
|
240,772
|
cdeboever3/cdpybio
|
cdpybio/pysamext.py
|
_pos_nt
|
def _pos_nt(pr, pos, stranded=False):
"""
Given a pileup read and a position, return the base that is covered by the
read at the given position if the position is covered.
Parameters
----------
pr : pysam.calignmentfile.PileupRead
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
pos : int
Zero-based position of the nucleotide of interest in genomic
coordinates.
stranded : boolean
Boolean indicating whether data is stranded and stranded nucleotide
should be returned. Assumes R1 read on reverse strand implies + strand
coverage etc.
Returns
-------
nt : str or None
If None, then the read did not cover the position. If not None, returns
the nucleotide at that position (with + or - appended to indicate strand
if desired).
"""
nt = None
bases = dict(zip(pr.alignment.get_reference_positions(),
list(pr.alignment.seq.upper())))
if pos in bases.keys():
nt = bases[pos]
if nt and stranded:
strand = None
if pr.alignment.is_read1 and pr.alignment.is_reverse:
strand = '+'
if pr.alignment.is_read2 and not pr.alignment.is_reverse:
strand = '+'
if pr.alignment.is_read1 and not pr.alignment.is_reverse:
nt = str(Seq(nt).reverse_complement())
strand = '-'
if pr.alignment.is_read2 and pr.alignment.is_reverse:
nt = str(Seq(nt).reverse_complement())
strand = '-'
nt = '{}{}'.format(nt, strand)
return nt
|
python
|
def _pos_nt(pr, pos, stranded=False):
"""
Given a pileup read and a position, return the base that is covered by the
read at the given position if the position is covered.
Parameters
----------
pr : pysam.calignmentfile.PileupRead
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
pos : int
Zero-based position of the nucleotide of interest in genomic
coordinates.
stranded : boolean
Boolean indicating whether data is stranded and stranded nucleotide
should be returned. Assumes R1 read on reverse strand implies + strand
coverage etc.
Returns
-------
nt : str or None
If None, then the read did not cover the position. If not None, returns
the nucleotide at that position (with + or - appended to indicate strand
if desired).
"""
nt = None
bases = dict(zip(pr.alignment.get_reference_positions(),
list(pr.alignment.seq.upper())))
if pos in bases.keys():
nt = bases[pos]
if nt and stranded:
strand = None
if pr.alignment.is_read1 and pr.alignment.is_reverse:
strand = '+'
if pr.alignment.is_read2 and not pr.alignment.is_reverse:
strand = '+'
if pr.alignment.is_read1 and not pr.alignment.is_reverse:
nt = str(Seq(nt).reverse_complement())
strand = '-'
if pr.alignment.is_read2 and pr.alignment.is_reverse:
nt = str(Seq(nt).reverse_complement())
strand = '-'
nt = '{}{}'.format(nt, strand)
return nt
|
[
"def",
"_pos_nt",
"(",
"pr",
",",
"pos",
",",
"stranded",
"=",
"False",
")",
":",
"nt",
"=",
"None",
"bases",
"=",
"dict",
"(",
"zip",
"(",
"pr",
".",
"alignment",
".",
"get_reference_positions",
"(",
")",
",",
"list",
"(",
"pr",
".",
"alignment",
".",
"seq",
".",
"upper",
"(",
")",
")",
")",
")",
"if",
"pos",
"in",
"bases",
".",
"keys",
"(",
")",
":",
"nt",
"=",
"bases",
"[",
"pos",
"]",
"if",
"nt",
"and",
"stranded",
":",
"strand",
"=",
"None",
"if",
"pr",
".",
"alignment",
".",
"is_read1",
"and",
"pr",
".",
"alignment",
".",
"is_reverse",
":",
"strand",
"=",
"'+'",
"if",
"pr",
".",
"alignment",
".",
"is_read2",
"and",
"not",
"pr",
".",
"alignment",
".",
"is_reverse",
":",
"strand",
"=",
"'+'",
"if",
"pr",
".",
"alignment",
".",
"is_read1",
"and",
"not",
"pr",
".",
"alignment",
".",
"is_reverse",
":",
"nt",
"=",
"str",
"(",
"Seq",
"(",
"nt",
")",
".",
"reverse_complement",
"(",
")",
")",
"strand",
"=",
"'-'",
"if",
"pr",
".",
"alignment",
".",
"is_read2",
"and",
"pr",
".",
"alignment",
".",
"is_reverse",
":",
"nt",
"=",
"str",
"(",
"Seq",
"(",
"nt",
")",
".",
"reverse_complement",
"(",
")",
")",
"strand",
"=",
"'-'",
"nt",
"=",
"'{}{}'",
".",
"format",
"(",
"nt",
",",
"strand",
")",
"return",
"nt"
] |
Given a pileup read and a position, return the base that is covered by the
read at the given position if the position is covered.
Parameters
----------
pr : pysam.calignmentfile.PileupRead
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
pos : int
Zero-based position of the nucleotide of interest in genomic
coordinates.
stranded : boolean
Boolean indicating whether data is stranded and stranded nucleotide
should be returned. Assumes R1 read on reverse strand implies + strand
coverage etc.
Returns
-------
nt : str or None
If None, then the read did not cover the position. If not None, returns
the nucleotide at that position (with + or - appended to indicate strand
if desired).
|
[
"Given",
"a",
"pileup",
"read",
"and",
"a",
"position",
"return",
"the",
"base",
"that",
"is",
"covered",
"by",
"the",
"read",
"at",
"the",
"given",
"position",
"if",
"the",
"position",
"is",
"covered",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/pysamext.py#L105-L150
|
240,773
|
cdeboever3/cdpybio
|
cdpybio/pysamext.py
|
nt_counts
|
def nt_counts(bam, positions, stranded=False, vcf=False, bed=False):
"""
Find the number of nucleotides covered at all positions in a bed or vcf
file.
Parameters
----------
bam : str or pysam.calignmentfile.AlignmentFile
Bam file opened with pysam or path to bam file (must
be sorted and indexed).
positions : str or pybedtools.BedTool
Path to bed or vcf file or pybedtools.BedTool object. The extension is
used to determine whether the file is a bed or vcf (.bed vs .vcf).
stranded : boolean
Boolean indicating whether read data is stranded and stranded nucleotide
counts should be returned. Assumes R1 read on reverse strand implies +
strand coverage etc.
vcf : boolean
Set to True if you are providing a vcf file that doesn't have a .vcf
suffix.
bed : boolean
Set to True if you are providing a bed file that doesn't have a .bed
suffix.
Returns
-------
counts : pandas.DataFrame
Data frame with the counts for each base in the region. The index of
this data frame is one-based for compatibility with VCF files.
"""
if not bed and not vcf:
if type(positions) == pbt.bedtool.BedTool:
df = positions.to_dataframe()
elif positions[-4:] == '.bed':
bed = True
elif positions[-4:] == '.vcf':
vcf = True
else:
sys.stderr.write('Positions must be BedTool, bed file, or vcf '
'file.\n')
if bed:
df = pbt.BedTool(positions).to_dataframe()
elif vcf:
from variants import vcf_as_df
tdf = vcf_as_df(positions)
df = pd.DataFrame(index=tdf.index)
df['chrom'] = tdf.CHROM
df['start'] = tdf.POS - 1
df['end'] = tdf.POS
res = []
for i in df.index:
region = [df.ix[i, 'chrom'], df.ix[i, 'start'], df.ix[i, 'end']]
res.append(get_region_nt_counts(region, bam, stranded))
res = pd.concat(res)
return res
|
python
|
def nt_counts(bam, positions, stranded=False, vcf=False, bed=False):
"""
Find the number of nucleotides covered at all positions in a bed or vcf
file.
Parameters
----------
bam : str or pysam.calignmentfile.AlignmentFile
Bam file opened with pysam or path to bam file (must
be sorted and indexed).
positions : str or pybedtools.BedTool
Path to bed or vcf file or pybedtools.BedTool object. The extension is
used to determine whether the file is a bed or vcf (.bed vs .vcf).
stranded : boolean
Boolean indicating whether read data is stranded and stranded nucleotide
counts should be returned. Assumes R1 read on reverse strand implies +
strand coverage etc.
vcf : boolean
Set to True if you are providing a vcf file that doesn't have a .vcf
suffix.
bed : boolean
Set to True if you are providing a bed file that doesn't have a .bed
suffix.
Returns
-------
counts : pandas.DataFrame
Data frame with the counts for each base in the region. The index of
this data frame is one-based for compatibility with VCF files.
"""
if not bed and not vcf:
if type(positions) == pbt.bedtool.BedTool:
df = positions.to_dataframe()
elif positions[-4:] == '.bed':
bed = True
elif positions[-4:] == '.vcf':
vcf = True
else:
sys.stderr.write('Positions must be BedTool, bed file, or vcf '
'file.\n')
if bed:
df = pbt.BedTool(positions).to_dataframe()
elif vcf:
from variants import vcf_as_df
tdf = vcf_as_df(positions)
df = pd.DataFrame(index=tdf.index)
df['chrom'] = tdf.CHROM
df['start'] = tdf.POS - 1
df['end'] = tdf.POS
res = []
for i in df.index:
region = [df.ix[i, 'chrom'], df.ix[i, 'start'], df.ix[i, 'end']]
res.append(get_region_nt_counts(region, bam, stranded))
res = pd.concat(res)
return res
|
[
"def",
"nt_counts",
"(",
"bam",
",",
"positions",
",",
"stranded",
"=",
"False",
",",
"vcf",
"=",
"False",
",",
"bed",
"=",
"False",
")",
":",
"if",
"not",
"bed",
"and",
"not",
"vcf",
":",
"if",
"type",
"(",
"positions",
")",
"==",
"pbt",
".",
"bedtool",
".",
"BedTool",
":",
"df",
"=",
"positions",
".",
"to_dataframe",
"(",
")",
"elif",
"positions",
"[",
"-",
"4",
":",
"]",
"==",
"'.bed'",
":",
"bed",
"=",
"True",
"elif",
"positions",
"[",
"-",
"4",
":",
"]",
"==",
"'.vcf'",
":",
"vcf",
"=",
"True",
"else",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Positions must be BedTool, bed file, or vcf '",
"'file.\\n'",
")",
"if",
"bed",
":",
"df",
"=",
"pbt",
".",
"BedTool",
"(",
"positions",
")",
".",
"to_dataframe",
"(",
")",
"elif",
"vcf",
":",
"from",
"variants",
"import",
"vcf_as_df",
"tdf",
"=",
"vcf_as_df",
"(",
"positions",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"tdf",
".",
"index",
")",
"df",
"[",
"'chrom'",
"]",
"=",
"tdf",
".",
"CHROM",
"df",
"[",
"'start'",
"]",
"=",
"tdf",
".",
"POS",
"-",
"1",
"df",
"[",
"'end'",
"]",
"=",
"tdf",
".",
"POS",
"res",
"=",
"[",
"]",
"for",
"i",
"in",
"df",
".",
"index",
":",
"region",
"=",
"[",
"df",
".",
"ix",
"[",
"i",
",",
"'chrom'",
"]",
",",
"df",
".",
"ix",
"[",
"i",
",",
"'start'",
"]",
",",
"df",
".",
"ix",
"[",
"i",
",",
"'end'",
"]",
"]",
"res",
".",
"append",
"(",
"get_region_nt_counts",
"(",
"region",
",",
"bam",
",",
"stranded",
")",
")",
"res",
"=",
"pd",
".",
"concat",
"(",
"res",
")",
"return",
"res"
] |
Find the number of nucleotides covered at all positions in a bed or vcf
file.
Parameters
----------
bam : str or pysam.calignmentfile.AlignmentFile
Bam file opened with pysam or path to bam file (must
be sorted and indexed).
positions : str or pybedtools.BedTool
Path to bed or vcf file or pybedtools.BedTool object. The extension is
used to determine whether the file is a bed or vcf (.bed vs .vcf).
stranded : boolean
Boolean indicating whether read data is stranded and stranded nucleotide
counts should be returned. Assumes R1 read on reverse strand implies +
strand coverage etc.
vcf : boolean
Set to True if you are providing a vcf file that doesn't have a .vcf
suffix.
bed : boolean
Set to True if you are providing a bed file that doesn't have a .bed
suffix.
Returns
-------
counts : pandas.DataFrame
Data frame with the counts for each base in the region. The index of
this data frame is one-based for compatibility with VCF files.
|
[
"Find",
"the",
"number",
"of",
"nucleotides",
"covered",
"at",
"all",
"positions",
"in",
"a",
"bed",
"or",
"vcf",
"file",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/pysamext.py#L152-L213
|
240,774
|
logston/py3s3
|
py3s3/storage.py
|
Storage.save
|
def save(self, name, file):
"""
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = file.name
if not hasattr(file, 'chunks'):
file = File(file, name=name)
name = self.get_available_name(name)
name = self._save(name, file)
# Store filenames with forward slashes, even on Windows
return name.replace('\\', '/')
|
python
|
def save(self, name, file):
"""
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = file.name
if not hasattr(file, 'chunks'):
file = File(file, name=name)
name = self.get_available_name(name)
name = self._save(name, file)
# Store filenames with forward slashes, even on Windows
return name.replace('\\', '/')
|
[
"def",
"save",
"(",
"self",
",",
"name",
",",
"file",
")",
":",
"# Get the proper name for the file, as it will actually be saved.",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"file",
".",
"name",
"if",
"not",
"hasattr",
"(",
"file",
",",
"'chunks'",
")",
":",
"file",
"=",
"File",
"(",
"file",
",",
"name",
"=",
"name",
")",
"name",
"=",
"self",
".",
"get_available_name",
"(",
"name",
")",
"name",
"=",
"self",
".",
"_save",
"(",
"name",
",",
"file",
")",
"# Store filenames with forward slashes, even on Windows",
"return",
"name",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")"
] |
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
|
[
"Saves",
"new",
"content",
"to",
"the",
"file",
"specified",
"by",
"name",
".",
"The",
"content",
"should",
"be",
"a",
"proper",
"File",
"object",
"or",
"any",
"python",
"file",
"-",
"like",
"object",
"ready",
"to",
"be",
"read",
"from",
"the",
"beginning",
"."
] |
1910ca60c53a53d839d6f7b09c05b555f3bfccf4
|
https://github.com/logston/py3s3/blob/1910ca60c53a53d839d6f7b09c05b555f3bfccf4/py3s3/storage.py#L50-L67
|
240,775
|
logston/py3s3
|
py3s3/storage.py
|
S3Storage.request_signature
|
def request_signature(self, stringtosign):
"""
Construct a signature by making an RFC2104 HMAC-SHA1
of the following and converting it to Base64 UTF-8 encoded string.
"""
digest = hmac.new(
self.secret_key.encode(ENCODING),
stringtosign.encode(ENCODING),
hashlib.sha1
).digest()
return b64_string(digest)
|
python
|
def request_signature(self, stringtosign):
"""
Construct a signature by making an RFC2104 HMAC-SHA1
of the following and converting it to Base64 UTF-8 encoded string.
"""
digest = hmac.new(
self.secret_key.encode(ENCODING),
stringtosign.encode(ENCODING),
hashlib.sha1
).digest()
return b64_string(digest)
|
[
"def",
"request_signature",
"(",
"self",
",",
"stringtosign",
")",
":",
"digest",
"=",
"hmac",
".",
"new",
"(",
"self",
".",
"secret_key",
".",
"encode",
"(",
"ENCODING",
")",
",",
"stringtosign",
".",
"encode",
"(",
"ENCODING",
")",
",",
"hashlib",
".",
"sha1",
")",
".",
"digest",
"(",
")",
"return",
"b64_string",
"(",
"digest",
")"
] |
Construct a signature by making an RFC2104 HMAC-SHA1
of the following and converting it to Base64 UTF-8 encoded string.
|
[
"Construct",
"a",
"signature",
"by",
"making",
"an",
"RFC2104",
"HMAC",
"-",
"SHA1",
"of",
"the",
"following",
"and",
"converting",
"it",
"to",
"Base64",
"UTF",
"-",
"8",
"encoded",
"string",
"."
] |
1910ca60c53a53d839d6f7b09c05b555f3bfccf4
|
https://github.com/logston/py3s3/blob/1910ca60c53a53d839d6f7b09c05b555f3bfccf4/py3s3/storage.py#L205-L215
|
240,776
|
logston/py3s3
|
py3s3/storage.py
|
S3Storage._get_content_type
|
def _get_content_type(self, file):
"""
Return content type of file. If file does not
have a content type, make a guess.
"""
if file.mimetype:
return file.mimetype
# get file extension
_, extension = os.path.splitext(file.name)
extension = extension.strip('.')
# Make an educated guess about what the Content-Type should be.
return media_types[extension] if extension in media_types else 'binary/octet-stream'
|
python
|
def _get_content_type(self, file):
"""
Return content type of file. If file does not
have a content type, make a guess.
"""
if file.mimetype:
return file.mimetype
# get file extension
_, extension = os.path.splitext(file.name)
extension = extension.strip('.')
# Make an educated guess about what the Content-Type should be.
return media_types[extension] if extension in media_types else 'binary/octet-stream'
|
[
"def",
"_get_content_type",
"(",
"self",
",",
"file",
")",
":",
"if",
"file",
".",
"mimetype",
":",
"return",
"file",
".",
"mimetype",
"# get file extension",
"_",
",",
"extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file",
".",
"name",
")",
"extension",
"=",
"extension",
".",
"strip",
"(",
"'.'",
")",
"# Make an educated guess about what the Content-Type should be.",
"return",
"media_types",
"[",
"extension",
"]",
"if",
"extension",
"in",
"media_types",
"else",
"'binary/octet-stream'"
] |
Return content type of file. If file does not
have a content type, make a guess.
|
[
"Return",
"content",
"type",
"of",
"file",
".",
"If",
"file",
"does",
"not",
"have",
"a",
"content",
"type",
"make",
"a",
"guess",
"."
] |
1910ca60c53a53d839d6f7b09c05b555f3bfccf4
|
https://github.com/logston/py3s3/blob/1910ca60c53a53d839d6f7b09c05b555f3bfccf4/py3s3/storage.py#L261-L274
|
240,777
|
logston/py3s3
|
py3s3/storage.py
|
S3Storage._put_file
|
def _put_file(self, file):
"""Send PUT request to S3 with file contents"""
post_params = {
'file_size': file.size,
'file_hash': file.md5hash(),
'content_type': self._get_content_type(file),
}
headers = self._request_headers('PUT', file.prefixed_name, post_params=post_params)
with closing(HTTPConnection(self.netloc)) as conn:
conn.request('PUT', file.prefixed_name, file.read(), headers=headers)
response = conn.getresponse()
if response.status not in (200,):
raise S3IOError(
'py3s3 PUT error. '
'Response status: {}. '
'Reason: {}. '
'Response Text: \n'
'{}'.format(response.status, response.reason, response.read()))
|
python
|
def _put_file(self, file):
"""Send PUT request to S3 with file contents"""
post_params = {
'file_size': file.size,
'file_hash': file.md5hash(),
'content_type': self._get_content_type(file),
}
headers = self._request_headers('PUT', file.prefixed_name, post_params=post_params)
with closing(HTTPConnection(self.netloc)) as conn:
conn.request('PUT', file.prefixed_name, file.read(), headers=headers)
response = conn.getresponse()
if response.status not in (200,):
raise S3IOError(
'py3s3 PUT error. '
'Response status: {}. '
'Reason: {}. '
'Response Text: \n'
'{}'.format(response.status, response.reason, response.read()))
|
[
"def",
"_put_file",
"(",
"self",
",",
"file",
")",
":",
"post_params",
"=",
"{",
"'file_size'",
":",
"file",
".",
"size",
",",
"'file_hash'",
":",
"file",
".",
"md5hash",
"(",
")",
",",
"'content_type'",
":",
"self",
".",
"_get_content_type",
"(",
"file",
")",
",",
"}",
"headers",
"=",
"self",
".",
"_request_headers",
"(",
"'PUT'",
",",
"file",
".",
"prefixed_name",
",",
"post_params",
"=",
"post_params",
")",
"with",
"closing",
"(",
"HTTPConnection",
"(",
"self",
".",
"netloc",
")",
")",
"as",
"conn",
":",
"conn",
".",
"request",
"(",
"'PUT'",
",",
"file",
".",
"prefixed_name",
",",
"file",
".",
"read",
"(",
")",
",",
"headers",
"=",
"headers",
")",
"response",
"=",
"conn",
".",
"getresponse",
"(",
")",
"if",
"response",
".",
"status",
"not",
"in",
"(",
"200",
",",
")",
":",
"raise",
"S3IOError",
"(",
"'py3s3 PUT error. '",
"'Response status: {}. '",
"'Reason: {}. '",
"'Response Text: \\n'",
"'{}'",
".",
"format",
"(",
"response",
".",
"status",
",",
"response",
".",
"reason",
",",
"response",
".",
"read",
"(",
")",
")",
")"
] |
Send PUT request to S3 with file contents
|
[
"Send",
"PUT",
"request",
"to",
"S3",
"with",
"file",
"contents"
] |
1910ca60c53a53d839d6f7b09c05b555f3bfccf4
|
https://github.com/logston/py3s3/blob/1910ca60c53a53d839d6f7b09c05b555f3bfccf4/py3s3/storage.py#L276-L298
|
240,778
|
logston/py3s3
|
py3s3/storage.py
|
S3Storage._get_file
|
def _get_file(self, prefixed_name):
"""
Return a signature for use in GET requests
"""
headers = self._request_headers('GET', prefixed_name)
file = S3ContentFile('')
with closing(HTTPConnection(self.netloc)) as conn:
conn.request('GET', prefixed_name, headers=headers)
response = conn.getresponse()
if not response.status in (200,):
if response.length is None:
# length == None seems to be returned from GET requests
# to non-existing files
raise S3FileDoesNotExistError(prefixed_name)
# catch all other cases
raise S3IOError(
'py3s3 GET error. '
'Response status: {}. '
'Reason: {}. '
'Response Text: \n'
'{}'.format(response.status, response.reason, response.read()))
file = S3ContentFile(response.read())
return file
|
python
|
def _get_file(self, prefixed_name):
"""
Return a signature for use in GET requests
"""
headers = self._request_headers('GET', prefixed_name)
file = S3ContentFile('')
with closing(HTTPConnection(self.netloc)) as conn:
conn.request('GET', prefixed_name, headers=headers)
response = conn.getresponse()
if not response.status in (200,):
if response.length is None:
# length == None seems to be returned from GET requests
# to non-existing files
raise S3FileDoesNotExistError(prefixed_name)
# catch all other cases
raise S3IOError(
'py3s3 GET error. '
'Response status: {}. '
'Reason: {}. '
'Response Text: \n'
'{}'.format(response.status, response.reason, response.read()))
file = S3ContentFile(response.read())
return file
|
[
"def",
"_get_file",
"(",
"self",
",",
"prefixed_name",
")",
":",
"headers",
"=",
"self",
".",
"_request_headers",
"(",
"'GET'",
",",
"prefixed_name",
")",
"file",
"=",
"S3ContentFile",
"(",
"''",
")",
"with",
"closing",
"(",
"HTTPConnection",
"(",
"self",
".",
"netloc",
")",
")",
"as",
"conn",
":",
"conn",
".",
"request",
"(",
"'GET'",
",",
"prefixed_name",
",",
"headers",
"=",
"headers",
")",
"response",
"=",
"conn",
".",
"getresponse",
"(",
")",
"if",
"not",
"response",
".",
"status",
"in",
"(",
"200",
",",
")",
":",
"if",
"response",
".",
"length",
"is",
"None",
":",
"# length == None seems to be returned from GET requests",
"# to non-existing files",
"raise",
"S3FileDoesNotExistError",
"(",
"prefixed_name",
")",
"# catch all other cases",
"raise",
"S3IOError",
"(",
"'py3s3 GET error. '",
"'Response status: {}. '",
"'Reason: {}. '",
"'Response Text: \\n'",
"'{}'",
".",
"format",
"(",
"response",
".",
"status",
",",
"response",
".",
"reason",
",",
"response",
".",
"read",
"(",
")",
")",
")",
"file",
"=",
"S3ContentFile",
"(",
"response",
".",
"read",
"(",
")",
")",
"return",
"file"
] |
Return a signature for use in GET requests
|
[
"Return",
"a",
"signature",
"for",
"use",
"in",
"GET",
"requests"
] |
1910ca60c53a53d839d6f7b09c05b555f3bfccf4
|
https://github.com/logston/py3s3/blob/1910ca60c53a53d839d6f7b09c05b555f3bfccf4/py3s3/storage.py#L300-L323
|
240,779
|
logston/py3s3
|
py3s3/storage.py
|
S3Storage.url
|
def url(self, name):
"""Return URL of resource"""
scheme = 'http'
path = self._prepend_name_prefix(name)
query = ''
fragment = ''
url_tuple = (scheme, self.netloc, path, query, fragment)
return urllib.parse.urlunsplit(url_tuple)
|
python
|
def url(self, name):
"""Return URL of resource"""
scheme = 'http'
path = self._prepend_name_prefix(name)
query = ''
fragment = ''
url_tuple = (scheme, self.netloc, path, query, fragment)
return urllib.parse.urlunsplit(url_tuple)
|
[
"def",
"url",
"(",
"self",
",",
"name",
")",
":",
"scheme",
"=",
"'http'",
"path",
"=",
"self",
".",
"_prepend_name_prefix",
"(",
"name",
")",
"query",
"=",
"''",
"fragment",
"=",
"''",
"url_tuple",
"=",
"(",
"scheme",
",",
"self",
".",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
")",
"return",
"urllib",
".",
"parse",
".",
"urlunsplit",
"(",
"url_tuple",
")"
] |
Return URL of resource
|
[
"Return",
"URL",
"of",
"resource"
] |
1910ca60c53a53d839d6f7b09c05b555f3bfccf4
|
https://github.com/logston/py3s3/blob/1910ca60c53a53d839d6f7b09c05b555f3bfccf4/py3s3/storage.py#L378-L385
|
240,780
|
pablorecio/Cobaya
|
src/cobaya/app.py
|
get_all_tasks
|
def get_all_tasks(conf):
"""Returns a list with every task registred on Hamster.
"""
db = HamsterDB(conf)
fact_list = db.all_facts_id
security_days = int(conf.get_option('tasks.security_days'))
today = datetime.today()
tasks = {}
for fact_id in fact_list:
ht = HamsterTask(fact_id, conf, db)
if ht.end_time:
end_time = ht.get_object_dates()[1]
if today - timedelta(security_days) <= end_time:
rt = ht.get_remote_task()
tasks[rt.task_id] = rt
db.close_connection()
print 'Obtained %d tasks' % len(tasks)
return tasks
|
python
|
def get_all_tasks(conf):
"""Returns a list with every task registred on Hamster.
"""
db = HamsterDB(conf)
fact_list = db.all_facts_id
security_days = int(conf.get_option('tasks.security_days'))
today = datetime.today()
tasks = {}
for fact_id in fact_list:
ht = HamsterTask(fact_id, conf, db)
if ht.end_time:
end_time = ht.get_object_dates()[1]
if today - timedelta(security_days) <= end_time:
rt = ht.get_remote_task()
tasks[rt.task_id] = rt
db.close_connection()
print 'Obtained %d tasks' % len(tasks)
return tasks
|
[
"def",
"get_all_tasks",
"(",
"conf",
")",
":",
"db",
"=",
"HamsterDB",
"(",
"conf",
")",
"fact_list",
"=",
"db",
".",
"all_facts_id",
"security_days",
"=",
"int",
"(",
"conf",
".",
"get_option",
"(",
"'tasks.security_days'",
")",
")",
"today",
"=",
"datetime",
".",
"today",
"(",
")",
"tasks",
"=",
"{",
"}",
"for",
"fact_id",
"in",
"fact_list",
":",
"ht",
"=",
"HamsterTask",
"(",
"fact_id",
",",
"conf",
",",
"db",
")",
"if",
"ht",
".",
"end_time",
":",
"end_time",
"=",
"ht",
".",
"get_object_dates",
"(",
")",
"[",
"1",
"]",
"if",
"today",
"-",
"timedelta",
"(",
"security_days",
")",
"<=",
"end_time",
":",
"rt",
"=",
"ht",
".",
"get_remote_task",
"(",
")",
"tasks",
"[",
"rt",
".",
"task_id",
"]",
"=",
"rt",
"db",
".",
"close_connection",
"(",
")",
"print",
"'Obtained %d tasks'",
"%",
"len",
"(",
"tasks",
")",
"return",
"tasks"
] |
Returns a list with every task registred on Hamster.
|
[
"Returns",
"a",
"list",
"with",
"every",
"task",
"registred",
"on",
"Hamster",
"."
] |
70b107dea5f31f51e7b6738da3c2a1df5b9f3f20
|
https://github.com/pablorecio/Cobaya/blob/70b107dea5f31f51e7b6738da3c2a1df5b9f3f20/src/cobaya/app.py#L94-L117
|
240,781
|
edwards-lab/libGWAS
|
libgwas/impute_parser.py
|
SetEncoding
|
def SetEncoding(sval):
"""Sets the encoding variable according to the text passed
:param sval: text specification for the desired model
"""
global encoding
s=sval.lower()
if s == "additive":
encoding = Encoding.Additive
elif s == "dominant":
encoding = Encoding.Dominant
elif s == "recessive":
encoding = Encoding.Recessive
elif s == "genotype":
encoding = Encoding.Genotype
elif s == "raw":
encoding = Encoding.Raw
else:
raise InvalidSelection("Invalid encoding, %s, selected" % (sval))
|
python
|
def SetEncoding(sval):
"""Sets the encoding variable according to the text passed
:param sval: text specification for the desired model
"""
global encoding
s=sval.lower()
if s == "additive":
encoding = Encoding.Additive
elif s == "dominant":
encoding = Encoding.Dominant
elif s == "recessive":
encoding = Encoding.Recessive
elif s == "genotype":
encoding = Encoding.Genotype
elif s == "raw":
encoding = Encoding.Raw
else:
raise InvalidSelection("Invalid encoding, %s, selected" % (sval))
|
[
"def",
"SetEncoding",
"(",
"sval",
")",
":",
"global",
"encoding",
"s",
"=",
"sval",
".",
"lower",
"(",
")",
"if",
"s",
"==",
"\"additive\"",
":",
"encoding",
"=",
"Encoding",
".",
"Additive",
"elif",
"s",
"==",
"\"dominant\"",
":",
"encoding",
"=",
"Encoding",
".",
"Dominant",
"elif",
"s",
"==",
"\"recessive\"",
":",
"encoding",
"=",
"Encoding",
".",
"Recessive",
"elif",
"s",
"==",
"\"genotype\"",
":",
"encoding",
"=",
"Encoding",
".",
"Genotype",
"elif",
"s",
"==",
"\"raw\"",
":",
"encoding",
"=",
"Encoding",
".",
"Raw",
"else",
":",
"raise",
"InvalidSelection",
"(",
"\"Invalid encoding, %s, selected\"",
"%",
"(",
"sval",
")",
")"
] |
Sets the encoding variable according to the text passed
:param sval: text specification for the desired model
|
[
"Sets",
"the",
"encoding",
"variable",
"according",
"to",
"the",
"text",
"passed"
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/impute_parser.py#L39-L57
|
240,782
|
edwards-lab/libGWAS
|
libgwas/impute_parser.py
|
Parser.load_family_details
|
def load_family_details(self, pheno_covar):
"""Load family data updating the pheno_covar with family ids found.
:param pheno_covar: Phenotype/covariate object
:return: None
"""
file = open(self.fam_details)
header = file.readline()
format = file.readline()
self.file_index = 0
mask_components = [] # 1s indicate an individual is to be masked out
for line in file:
words = line.strip().split()
indid = ":".join(words[0:2])
if DataParser.valid_indid(indid):
mask_components.append(0)
sex = int(words[5])
pheno = float(words[6])
pheno_covar.add_subject(indid, sex, pheno)
else:
mask_components.append(1)
mask_components = numpy.array(mask_components)
self.ind_mask = numpy.zeros(len(mask_components) * 2, dtype=numpy.int8).reshape(-1, 2)
self.ind_mask[0:, 0] = mask_components
self.ind_mask[0:, 1] = mask_components
self.ind_count = self.ind_mask.shape[0]
pheno_covar.freeze_subjects()
|
python
|
def load_family_details(self, pheno_covar):
"""Load family data updating the pheno_covar with family ids found.
:param pheno_covar: Phenotype/covariate object
:return: None
"""
file = open(self.fam_details)
header = file.readline()
format = file.readline()
self.file_index = 0
mask_components = [] # 1s indicate an individual is to be masked out
for line in file:
words = line.strip().split()
indid = ":".join(words[0:2])
if DataParser.valid_indid(indid):
mask_components.append(0)
sex = int(words[5])
pheno = float(words[6])
pheno_covar.add_subject(indid, sex, pheno)
else:
mask_components.append(1)
mask_components = numpy.array(mask_components)
self.ind_mask = numpy.zeros(len(mask_components) * 2, dtype=numpy.int8).reshape(-1, 2)
self.ind_mask[0:, 0] = mask_components
self.ind_mask[0:, 1] = mask_components
self.ind_count = self.ind_mask.shape[0]
pheno_covar.freeze_subjects()
|
[
"def",
"load_family_details",
"(",
"self",
",",
"pheno_covar",
")",
":",
"file",
"=",
"open",
"(",
"self",
".",
"fam_details",
")",
"header",
"=",
"file",
".",
"readline",
"(",
")",
"format",
"=",
"file",
".",
"readline",
"(",
")",
"self",
".",
"file_index",
"=",
"0",
"mask_components",
"=",
"[",
"]",
"# 1s indicate an individual is to be masked out",
"for",
"line",
"in",
"file",
":",
"words",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"indid",
"=",
"\":\"",
".",
"join",
"(",
"words",
"[",
"0",
":",
"2",
"]",
")",
"if",
"DataParser",
".",
"valid_indid",
"(",
"indid",
")",
":",
"mask_components",
".",
"append",
"(",
"0",
")",
"sex",
"=",
"int",
"(",
"words",
"[",
"5",
"]",
")",
"pheno",
"=",
"float",
"(",
"words",
"[",
"6",
"]",
")",
"pheno_covar",
".",
"add_subject",
"(",
"indid",
",",
"sex",
",",
"pheno",
")",
"else",
":",
"mask_components",
".",
"append",
"(",
"1",
")",
"mask_components",
"=",
"numpy",
".",
"array",
"(",
"mask_components",
")",
"self",
".",
"ind_mask",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"mask_components",
")",
"*",
"2",
",",
"dtype",
"=",
"numpy",
".",
"int8",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"2",
")",
"self",
".",
"ind_mask",
"[",
"0",
":",
",",
"0",
"]",
"=",
"mask_components",
"self",
".",
"ind_mask",
"[",
"0",
":",
",",
"1",
"]",
"=",
"mask_components",
"self",
".",
"ind_count",
"=",
"self",
".",
"ind_mask",
".",
"shape",
"[",
"0",
"]",
"pheno_covar",
".",
"freeze_subjects",
"(",
")"
] |
Load family data updating the pheno_covar with family ids found.
:param pheno_covar: Phenotype/covariate object
:return: None
|
[
"Load",
"family",
"data",
"updating",
"the",
"pheno_covar",
"with",
"family",
"ids",
"found",
"."
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/impute_parser.py#L149-L176
|
240,783
|
edwards-lab/libGWAS
|
libgwas/impute_parser.py
|
Parser.load_genotypes
|
def load_genotypes(self):
"""Prepares the files for genotype parsing.
:return: None
"""
if self.file_index < len(self.archives):
self.current_file = self.archives[self.file_index]
info_filename = self.current_file.replace(Parser.gen_ext, Parser.info_ext)
if len(self.info_files) > 0:
info_filename = self.info_files[self.file_index]
self.info_file = open(info_filename)
self.info_file.readline() # Dump the header
if DataParser.compressed_pedigree:
self.freq_file = gzip.open("%s" % (self.current_file), 'rb')
else:
self.freq_file = open(self.current_file)
self.current_chrom = self.chroms[self.file_index]
self.file_index += 1
else:
raise StopIteration
|
python
|
def load_genotypes(self):
"""Prepares the files for genotype parsing.
:return: None
"""
if self.file_index < len(self.archives):
self.current_file = self.archives[self.file_index]
info_filename = self.current_file.replace(Parser.gen_ext, Parser.info_ext)
if len(self.info_files) > 0:
info_filename = self.info_files[self.file_index]
self.info_file = open(info_filename)
self.info_file.readline() # Dump the header
if DataParser.compressed_pedigree:
self.freq_file = gzip.open("%s" % (self.current_file), 'rb')
else:
self.freq_file = open(self.current_file)
self.current_chrom = self.chroms[self.file_index]
self.file_index += 1
else:
raise StopIteration
|
[
"def",
"load_genotypes",
"(",
"self",
")",
":",
"if",
"self",
".",
"file_index",
"<",
"len",
"(",
"self",
".",
"archives",
")",
":",
"self",
".",
"current_file",
"=",
"self",
".",
"archives",
"[",
"self",
".",
"file_index",
"]",
"info_filename",
"=",
"self",
".",
"current_file",
".",
"replace",
"(",
"Parser",
".",
"gen_ext",
",",
"Parser",
".",
"info_ext",
")",
"if",
"len",
"(",
"self",
".",
"info_files",
")",
">",
"0",
":",
"info_filename",
"=",
"self",
".",
"info_files",
"[",
"self",
".",
"file_index",
"]",
"self",
".",
"info_file",
"=",
"open",
"(",
"info_filename",
")",
"self",
".",
"info_file",
".",
"readline",
"(",
")",
"# Dump the header",
"if",
"DataParser",
".",
"compressed_pedigree",
":",
"self",
".",
"freq_file",
"=",
"gzip",
".",
"open",
"(",
"\"%s\"",
"%",
"(",
"self",
".",
"current_file",
")",
",",
"'rb'",
")",
"else",
":",
"self",
".",
"freq_file",
"=",
"open",
"(",
"self",
".",
"current_file",
")",
"self",
".",
"current_chrom",
"=",
"self",
".",
"chroms",
"[",
"self",
".",
"file_index",
"]",
"self",
".",
"file_index",
"+=",
"1",
"else",
":",
"raise",
"StopIteration"
] |
Prepares the files for genotype parsing.
:return: None
|
[
"Prepares",
"the",
"files",
"for",
"genotype",
"parsing",
"."
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/impute_parser.py#L178-L200
|
240,784
|
edwards-lab/libGWAS
|
libgwas/impute_parser.py
|
Parser.get_next_line
|
def get_next_line(self):
"""If we reach the end of the file, we simply open the next, until we \
run out of archives to process"""
line = self.freq_file.readline().strip().split()
if len(line) < 1:
self.load_genotypes()
line = self.freq_file.readline().strip().split()
info_line = self.info_file.readline().strip().split()
info = float(info_line[4])
exp_freq = float(info_line[3])
return line, info, exp_freq
|
python
|
def get_next_line(self):
"""If we reach the end of the file, we simply open the next, until we \
run out of archives to process"""
line = self.freq_file.readline().strip().split()
if len(line) < 1:
self.load_genotypes()
line = self.freq_file.readline().strip().split()
info_line = self.info_file.readline().strip().split()
info = float(info_line[4])
exp_freq = float(info_line[3])
return line, info, exp_freq
|
[
"def",
"get_next_line",
"(",
"self",
")",
":",
"line",
"=",
"self",
".",
"freq_file",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"len",
"(",
"line",
")",
"<",
"1",
":",
"self",
".",
"load_genotypes",
"(",
")",
"line",
"=",
"self",
".",
"freq_file",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"info_line",
"=",
"self",
".",
"info_file",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"info",
"=",
"float",
"(",
"info_line",
"[",
"4",
"]",
")",
"exp_freq",
"=",
"float",
"(",
"info_line",
"[",
"3",
"]",
")",
"return",
"line",
",",
"info",
",",
"exp_freq"
] |
If we reach the end of the file, we simply open the next, until we \
run out of archives to process
|
[
"If",
"we",
"reach",
"the",
"end",
"of",
"the",
"file",
"we",
"simply",
"open",
"the",
"next",
"until",
"we",
"\\",
"run",
"out",
"of",
"archives",
"to",
"process"
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/impute_parser.py#L202-L213
|
240,785
|
mayfield/shellish
|
shellish/command/command.py
|
parse_docstring
|
def parse_docstring(entity):
""" Return sanitized docstring from an entity. The first line of the
docstring is the title, and remaining lines are the details, aka git
style. """
doc = inspect.getdoc(entity)
if not doc:
return None, None
doc = doc.splitlines(keepends=True)
if not doc[0].strip():
doc.pop(0)
title = (doc and doc.pop(0).strip()) or None
if doc and not doc[0].strip():
doc.pop(0)
desc = ''.join(doc).rstrip() or None
return title, desc
|
python
|
def parse_docstring(entity):
""" Return sanitized docstring from an entity. The first line of the
docstring is the title, and remaining lines are the details, aka git
style. """
doc = inspect.getdoc(entity)
if not doc:
return None, None
doc = doc.splitlines(keepends=True)
if not doc[0].strip():
doc.pop(0)
title = (doc and doc.pop(0).strip()) or None
if doc and not doc[0].strip():
doc.pop(0)
desc = ''.join(doc).rstrip() or None
return title, desc
|
[
"def",
"parse_docstring",
"(",
"entity",
")",
":",
"doc",
"=",
"inspect",
".",
"getdoc",
"(",
"entity",
")",
"if",
"not",
"doc",
":",
"return",
"None",
",",
"None",
"doc",
"=",
"doc",
".",
"splitlines",
"(",
"keepends",
"=",
"True",
")",
"if",
"not",
"doc",
"[",
"0",
"]",
".",
"strip",
"(",
")",
":",
"doc",
".",
"pop",
"(",
"0",
")",
"title",
"=",
"(",
"doc",
"and",
"doc",
".",
"pop",
"(",
"0",
")",
".",
"strip",
"(",
")",
")",
"or",
"None",
"if",
"doc",
"and",
"not",
"doc",
"[",
"0",
"]",
".",
"strip",
"(",
")",
":",
"doc",
".",
"pop",
"(",
"0",
")",
"desc",
"=",
"''",
".",
"join",
"(",
"doc",
")",
".",
"rstrip",
"(",
")",
"or",
"None",
"return",
"title",
",",
"desc"
] |
Return sanitized docstring from an entity. The first line of the
docstring is the title, and remaining lines are the details, aka git
style.
|
[
"Return",
"sanitized",
"docstring",
"from",
"an",
"entity",
".",
"The",
"first",
"line",
"of",
"the",
"docstring",
"is",
"the",
"title",
"and",
"remaining",
"lines",
"are",
"the",
"details",
"aka",
"git",
"style",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L16-L30
|
240,786
|
mayfield/shellish
|
shellish/command/command.py
|
Command.parse_args
|
def parse_args(self, argv=None):
""" Return an argparse.Namespace of the argv string or sys.argv if
argv is None. """
arg_input = shlex.split(argv) if argv is not None else None
self.get_or_create_session()
return self.argparser.parse_args(arg_input)
|
python
|
def parse_args(self, argv=None):
""" Return an argparse.Namespace of the argv string or sys.argv if
argv is None. """
arg_input = shlex.split(argv) if argv is not None else None
self.get_or_create_session()
return self.argparser.parse_args(arg_input)
|
[
"def",
"parse_args",
"(",
"self",
",",
"argv",
"=",
"None",
")",
":",
"arg_input",
"=",
"shlex",
".",
"split",
"(",
"argv",
")",
"if",
"argv",
"is",
"not",
"None",
"else",
"None",
"self",
".",
"get_or_create_session",
"(",
")",
"return",
"self",
".",
"argparser",
".",
"parse_args",
"(",
"arg_input",
")"
] |
Return an argparse.Namespace of the argv string or sys.argv if
argv is None.
|
[
"Return",
"an",
"argparse",
".",
"Namespace",
"of",
"the",
"argv",
"string",
"or",
"sys",
".",
"argv",
"if",
"argv",
"is",
"None",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L105-L110
|
240,787
|
mayfield/shellish
|
shellish/command/command.py
|
Command.get_pager_spec
|
def get_pager_spec(self):
""" Find the best pager settings for this command. If the user has
specified overrides in the INI config file we prefer those. """
self_config = self.get_config()
pagercmd = self_config.get('pager')
istty = self_config.getboolean('pager_istty')
core_config = self.get_config('core')
if pagercmd is None:
pagercmd = core_config.get('pager')
if istty is None:
istty = core_config.get('pager_istty')
return {
"pagercmd": pagercmd,
"istty": istty
}
|
python
|
def get_pager_spec(self):
""" Find the best pager settings for this command. If the user has
specified overrides in the INI config file we prefer those. """
self_config = self.get_config()
pagercmd = self_config.get('pager')
istty = self_config.getboolean('pager_istty')
core_config = self.get_config('core')
if pagercmd is None:
pagercmd = core_config.get('pager')
if istty is None:
istty = core_config.get('pager_istty')
return {
"pagercmd": pagercmd,
"istty": istty
}
|
[
"def",
"get_pager_spec",
"(",
"self",
")",
":",
"self_config",
"=",
"self",
".",
"get_config",
"(",
")",
"pagercmd",
"=",
"self_config",
".",
"get",
"(",
"'pager'",
")",
"istty",
"=",
"self_config",
".",
"getboolean",
"(",
"'pager_istty'",
")",
"core_config",
"=",
"self",
".",
"get_config",
"(",
"'core'",
")",
"if",
"pagercmd",
"is",
"None",
":",
"pagercmd",
"=",
"core_config",
".",
"get",
"(",
"'pager'",
")",
"if",
"istty",
"is",
"None",
":",
"istty",
"=",
"core_config",
".",
"get",
"(",
"'pager_istty'",
")",
"return",
"{",
"\"pagercmd\"",
":",
"pagercmd",
",",
"\"istty\"",
":",
"istty",
"}"
] |
Find the best pager settings for this command. If the user has
specified overrides in the INI config file we prefer those.
|
[
"Find",
"the",
"best",
"pager",
"settings",
"for",
"this",
"command",
".",
"If",
"the",
"user",
"has",
"specified",
"overrides",
"in",
"the",
"INI",
"config",
"file",
"we",
"prefer",
"those",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L135-L149
|
240,788
|
mayfield/shellish
|
shellish/command/command.py
|
Command.run_wrap
|
def run_wrap(self, args):
""" Wrap some standard protocol around a command's run method. This
wrapper should generally never capture exceptions. It can look at
them and do things but prerun and postrun should always be symmetric.
Any exception suppression should happen in the `session.execute`. """
self.fire_event('prerun', args)
self.prerun(args)
try:
if self.session.allow_pager and self.use_pager:
desc = 'Command\: %s' % '-'.join(self.prog.split())
with paging.pager_redirect(desc, **self.get_pager_spec()):
result = self.run(args)
else:
result = self.run(args)
except (SystemExit, Exception) as e:
self.postrun(args, exc=e)
self.fire_event('postrun', args, exc=e)
raise e
else:
self.postrun(args, result=result)
self.fire_event('postrun', args, result=result)
return result
|
python
|
def run_wrap(self, args):
""" Wrap some standard protocol around a command's run method. This
wrapper should generally never capture exceptions. It can look at
them and do things but prerun and postrun should always be symmetric.
Any exception suppression should happen in the `session.execute`. """
self.fire_event('prerun', args)
self.prerun(args)
try:
if self.session.allow_pager and self.use_pager:
desc = 'Command\: %s' % '-'.join(self.prog.split())
with paging.pager_redirect(desc, **self.get_pager_spec()):
result = self.run(args)
else:
result = self.run(args)
except (SystemExit, Exception) as e:
self.postrun(args, exc=e)
self.fire_event('postrun', args, exc=e)
raise e
else:
self.postrun(args, result=result)
self.fire_event('postrun', args, result=result)
return result
|
[
"def",
"run_wrap",
"(",
"self",
",",
"args",
")",
":",
"self",
".",
"fire_event",
"(",
"'prerun'",
",",
"args",
")",
"self",
".",
"prerun",
"(",
"args",
")",
"try",
":",
"if",
"self",
".",
"session",
".",
"allow_pager",
"and",
"self",
".",
"use_pager",
":",
"desc",
"=",
"'Command\\: %s'",
"%",
"'-'",
".",
"join",
"(",
"self",
".",
"prog",
".",
"split",
"(",
")",
")",
"with",
"paging",
".",
"pager_redirect",
"(",
"desc",
",",
"*",
"*",
"self",
".",
"get_pager_spec",
"(",
")",
")",
":",
"result",
"=",
"self",
".",
"run",
"(",
"args",
")",
"else",
":",
"result",
"=",
"self",
".",
"run",
"(",
"args",
")",
"except",
"(",
"SystemExit",
",",
"Exception",
")",
"as",
"e",
":",
"self",
".",
"postrun",
"(",
"args",
",",
"exc",
"=",
"e",
")",
"self",
".",
"fire_event",
"(",
"'postrun'",
",",
"args",
",",
"exc",
"=",
"e",
")",
"raise",
"e",
"else",
":",
"self",
".",
"postrun",
"(",
"args",
",",
"result",
"=",
"result",
")",
"self",
".",
"fire_event",
"(",
"'postrun'",
",",
"args",
",",
"result",
"=",
"result",
")",
"return",
"result"
] |
Wrap some standard protocol around a command's run method. This
wrapper should generally never capture exceptions. It can look at
them and do things but prerun and postrun should always be symmetric.
Any exception suppression should happen in the `session.execute`.
|
[
"Wrap",
"some",
"standard",
"protocol",
"around",
"a",
"command",
"s",
"run",
"method",
".",
"This",
"wrapper",
"should",
"generally",
"never",
"capture",
"exceptions",
".",
"It",
"can",
"look",
"at",
"them",
"and",
"do",
"things",
"but",
"prerun",
"and",
"postrun",
"should",
"always",
"be",
"symmetric",
".",
"Any",
"exception",
"suppression",
"should",
"happen",
"in",
"the",
"session",
".",
"execute",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L151-L172
|
240,789
|
mayfield/shellish
|
shellish/command/command.py
|
Command.get_config
|
def get_config(self, section=None):
""" Return the merged end-user configuration for this command or a
specific section if set in `section`. """
config = self.session.config
section = self.config_section() if section is None else section
try:
return config[section]
except KeyError:
config.add_section(section)
return config[section]
|
python
|
def get_config(self, section=None):
""" Return the merged end-user configuration for this command or a
specific section if set in `section`. """
config = self.session.config
section = self.config_section() if section is None else section
try:
return config[section]
except KeyError:
config.add_section(section)
return config[section]
|
[
"def",
"get_config",
"(",
"self",
",",
"section",
"=",
"None",
")",
":",
"config",
"=",
"self",
".",
"session",
".",
"config",
"section",
"=",
"self",
".",
"config_section",
"(",
")",
"if",
"section",
"is",
"None",
"else",
"section",
"try",
":",
"return",
"config",
"[",
"section",
"]",
"except",
"KeyError",
":",
"config",
".",
"add_section",
"(",
"section",
")",
"return",
"config",
"[",
"section",
"]"
] |
Return the merged end-user configuration for this command or a
specific section if set in `section`.
|
[
"Return",
"the",
"merged",
"end",
"-",
"user",
"configuration",
"for",
"this",
"command",
"or",
"a",
"specific",
"section",
"if",
"set",
"in",
"section",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L194-L203
|
240,790
|
mayfield/shellish
|
shellish/command/command.py
|
Command.parent
|
def parent(self, parent):
""" Copy context from the parent into this instance as well as
adjusting or depth value to indicate where we exist in a command
tree. """
self._parent = parent
if parent:
pctx = dict((x, getattr(parent, x)) for x in parent.context_keys)
self.inject_context(pctx)
self.depth = parent.depth + 1
for command in self.subcommands.values():
command.parent = self # bump.
else:
self.depth = 0
|
python
|
def parent(self, parent):
""" Copy context from the parent into this instance as well as
adjusting or depth value to indicate where we exist in a command
tree. """
self._parent = parent
if parent:
pctx = dict((x, getattr(parent, x)) for x in parent.context_keys)
self.inject_context(pctx)
self.depth = parent.depth + 1
for command in self.subcommands.values():
command.parent = self # bump.
else:
self.depth = 0
|
[
"def",
"parent",
"(",
"self",
",",
"parent",
")",
":",
"self",
".",
"_parent",
"=",
"parent",
"if",
"parent",
":",
"pctx",
"=",
"dict",
"(",
"(",
"x",
",",
"getattr",
"(",
"parent",
",",
"x",
")",
")",
"for",
"x",
"in",
"parent",
".",
"context_keys",
")",
"self",
".",
"inject_context",
"(",
"pctx",
")",
"self",
".",
"depth",
"=",
"parent",
".",
"depth",
"+",
"1",
"for",
"command",
"in",
"self",
".",
"subcommands",
".",
"values",
"(",
")",
":",
"command",
".",
"parent",
"=",
"self",
"# bump.",
"else",
":",
"self",
".",
"depth",
"=",
"0"
] |
Copy context from the parent into this instance as well as
adjusting or depth value to indicate where we exist in a command
tree.
|
[
"Copy",
"context",
"from",
"the",
"parent",
"into",
"this",
"instance",
"as",
"well",
"as",
"adjusting",
"or",
"depth",
"value",
"to",
"indicate",
"where",
"we",
"exist",
"in",
"a",
"command",
"tree",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L210-L222
|
240,791
|
mayfield/shellish
|
shellish/command/command.py
|
Command.find_root
|
def find_root(self):
""" Traverse parent refs to top. """
cmd = self
while cmd.parent:
cmd = cmd.parent
return cmd
|
python
|
def find_root(self):
""" Traverse parent refs to top. """
cmd = self
while cmd.parent:
cmd = cmd.parent
return cmd
|
[
"def",
"find_root",
"(",
"self",
")",
":",
"cmd",
"=",
"self",
"while",
"cmd",
".",
"parent",
":",
"cmd",
"=",
"cmd",
".",
"parent",
"return",
"cmd"
] |
Traverse parent refs to top.
|
[
"Traverse",
"parent",
"refs",
"to",
"top",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L224-L229
|
240,792
|
mayfield/shellish
|
shellish/command/command.py
|
Command.inject_context
|
def inject_context(self, __context_dict__=None, **context):
""" Map context dict to this instance as attributes and keep note of
the keys being set so we can pass this along to any subcommands. """
context = context or __context_dict__
self.context_keys |= set(context.keys())
for key, value in context.items():
setattr(self, key, value)
for command in self.subcommands.values():
command.inject_context(context)
|
python
|
def inject_context(self, __context_dict__=None, **context):
""" Map context dict to this instance as attributes and keep note of
the keys being set so we can pass this along to any subcommands. """
context = context or __context_dict__
self.context_keys |= set(context.keys())
for key, value in context.items():
setattr(self, key, value)
for command in self.subcommands.values():
command.inject_context(context)
|
[
"def",
"inject_context",
"(",
"self",
",",
"__context_dict__",
"=",
"None",
",",
"*",
"*",
"context",
")",
":",
"context",
"=",
"context",
"or",
"__context_dict__",
"self",
".",
"context_keys",
"|=",
"set",
"(",
"context",
".",
"keys",
"(",
")",
")",
"for",
"key",
",",
"value",
"in",
"context",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"self",
",",
"key",
",",
"value",
")",
"for",
"command",
"in",
"self",
".",
"subcommands",
".",
"values",
"(",
")",
":",
"command",
".",
"inject_context",
"(",
"context",
")"
] |
Map context dict to this instance as attributes and keep note of
the keys being set so we can pass this along to any subcommands.
|
[
"Map",
"context",
"dict",
"to",
"this",
"instance",
"as",
"attributes",
"and",
"keep",
"note",
"of",
"the",
"keys",
"being",
"set",
"so",
"we",
"can",
"pass",
"this",
"along",
"to",
"any",
"subcommands",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L231-L239
|
240,793
|
mayfield/shellish
|
shellish/command/command.py
|
Command.add_argument
|
def add_argument(self, *args, parser=None, autoenv=False, env=None,
complete=None, **kwargs):
""" Allow cleaner action supplementation. Autoenv will generate an
environment variable to be usable as a defaults setter based on the
command name and the dest property of the action. """
if parser is None:
parser = self.argparser
action = parser.add_argument(*args, **kwargs)
if autoenv:
if env is not None:
raise TypeError('Arguments `env` and `autoenv` are mutually '
'exclusive')
env = self._make_autoenv(action)
if env:
self.argparser.bind_env(action, env)
if autoenv:
self._autoenv_actions.add(action)
if complete:
action.complete = complete
return action
|
python
|
def add_argument(self, *args, parser=None, autoenv=False, env=None,
complete=None, **kwargs):
""" Allow cleaner action supplementation. Autoenv will generate an
environment variable to be usable as a defaults setter based on the
command name and the dest property of the action. """
if parser is None:
parser = self.argparser
action = parser.add_argument(*args, **kwargs)
if autoenv:
if env is not None:
raise TypeError('Arguments `env` and `autoenv` are mutually '
'exclusive')
env = self._make_autoenv(action)
if env:
self.argparser.bind_env(action, env)
if autoenv:
self._autoenv_actions.add(action)
if complete:
action.complete = complete
return action
|
[
"def",
"add_argument",
"(",
"self",
",",
"*",
"args",
",",
"parser",
"=",
"None",
",",
"autoenv",
"=",
"False",
",",
"env",
"=",
"None",
",",
"complete",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"parser",
"is",
"None",
":",
"parser",
"=",
"self",
".",
"argparser",
"action",
"=",
"parser",
".",
"add_argument",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"autoenv",
":",
"if",
"env",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"'Arguments `env` and `autoenv` are mutually '",
"'exclusive'",
")",
"env",
"=",
"self",
".",
"_make_autoenv",
"(",
"action",
")",
"if",
"env",
":",
"self",
".",
"argparser",
".",
"bind_env",
"(",
"action",
",",
"env",
")",
"if",
"autoenv",
":",
"self",
".",
"_autoenv_actions",
".",
"add",
"(",
"action",
")",
"if",
"complete",
":",
"action",
".",
"complete",
"=",
"complete",
"return",
"action"
] |
Allow cleaner action supplementation. Autoenv will generate an
environment variable to be usable as a defaults setter based on the
command name and the dest property of the action.
|
[
"Allow",
"cleaner",
"action",
"supplementation",
".",
"Autoenv",
"will",
"generate",
"an",
"environment",
"variable",
"to",
"be",
"usable",
"as",
"a",
"defaults",
"setter",
"based",
"on",
"the",
"command",
"name",
"and",
"the",
"dest",
"property",
"of",
"the",
"action",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L270-L289
|
240,794
|
mayfield/shellish
|
shellish/command/command.py
|
Command._make_autoenv
|
def _make_autoenv(self, action):
""" Generate a suitable env variable for this action. This is
dependant on our subcommand hierarchy. Review the prog setter for
details. """
env = ('%s_%s' % (self.prog, action.dest)).upper()
env = re.sub(self.env_scrub_re, '', env.strip())
env = re.sub(self.env_flatten_re, '_', env)
if re.match('^[0-9]', env):
# Handle leading numbers.
env = '_%s' % env
return env
|
python
|
def _make_autoenv(self, action):
""" Generate a suitable env variable for this action. This is
dependant on our subcommand hierarchy. Review the prog setter for
details. """
env = ('%s_%s' % (self.prog, action.dest)).upper()
env = re.sub(self.env_scrub_re, '', env.strip())
env = re.sub(self.env_flatten_re, '_', env)
if re.match('^[0-9]', env):
# Handle leading numbers.
env = '_%s' % env
return env
|
[
"def",
"_make_autoenv",
"(",
"self",
",",
"action",
")",
":",
"env",
"=",
"(",
"'%s_%s'",
"%",
"(",
"self",
".",
"prog",
",",
"action",
".",
"dest",
")",
")",
".",
"upper",
"(",
")",
"env",
"=",
"re",
".",
"sub",
"(",
"self",
".",
"env_scrub_re",
",",
"''",
",",
"env",
".",
"strip",
"(",
")",
")",
"env",
"=",
"re",
".",
"sub",
"(",
"self",
".",
"env_flatten_re",
",",
"'_'",
",",
"env",
")",
"if",
"re",
".",
"match",
"(",
"'^[0-9]'",
",",
"env",
")",
":",
"# Handle leading numbers.",
"env",
"=",
"'_%s'",
"%",
"env",
"return",
"env"
] |
Generate a suitable env variable for this action. This is
dependant on our subcommand hierarchy. Review the prog setter for
details.
|
[
"Generate",
"a",
"suitable",
"env",
"variable",
"for",
"this",
"action",
".",
"This",
"is",
"dependant",
"on",
"our",
"subcommand",
"hierarchy",
".",
"Review",
"the",
"prog",
"setter",
"for",
"details",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L291-L301
|
240,795
|
mayfield/shellish
|
shellish/command/command.py
|
Command.add_file_argument
|
def add_file_argument(self, *args, mode='r', buffering=1,
filetype_options=None, **kwargs):
""" Add a tab-completion safe FileType argument. This argument
differs from a normal argparse.FileType based argument in that the
value is a factory function that returns a file handle instead of
providing an already open file handle. There are various reasons
why this is a better approach but it is also required to avoid
erroneous creation of files with shellish tab completion. """
type_ = supplement.SafeFileType(mode=mode, bufsize=buffering,
**filetype_options or {})
return self.add_argument(*args, type=type_, **kwargs)
|
python
|
def add_file_argument(self, *args, mode='r', buffering=1,
filetype_options=None, **kwargs):
""" Add a tab-completion safe FileType argument. This argument
differs from a normal argparse.FileType based argument in that the
value is a factory function that returns a file handle instead of
providing an already open file handle. There are various reasons
why this is a better approach but it is also required to avoid
erroneous creation of files with shellish tab completion. """
type_ = supplement.SafeFileType(mode=mode, bufsize=buffering,
**filetype_options or {})
return self.add_argument(*args, type=type_, **kwargs)
|
[
"def",
"add_file_argument",
"(",
"self",
",",
"*",
"args",
",",
"mode",
"=",
"'r'",
",",
"buffering",
"=",
"1",
",",
"filetype_options",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"type_",
"=",
"supplement",
".",
"SafeFileType",
"(",
"mode",
"=",
"mode",
",",
"bufsize",
"=",
"buffering",
",",
"*",
"*",
"filetype_options",
"or",
"{",
"}",
")",
"return",
"self",
".",
"add_argument",
"(",
"*",
"args",
",",
"type",
"=",
"type_",
",",
"*",
"*",
"kwargs",
")"
] |
Add a tab-completion safe FileType argument. This argument
differs from a normal argparse.FileType based argument in that the
value is a factory function that returns a file handle instead of
providing an already open file handle. There are various reasons
why this is a better approach but it is also required to avoid
erroneous creation of files with shellish tab completion.
|
[
"Add",
"a",
"tab",
"-",
"completion",
"safe",
"FileType",
"argument",
".",
"This",
"argument",
"differs",
"from",
"a",
"normal",
"argparse",
".",
"FileType",
"based",
"argument",
"in",
"that",
"the",
"value",
"is",
"a",
"factory",
"function",
"that",
"returns",
"a",
"file",
"handle",
"instead",
"of",
"providing",
"an",
"already",
"open",
"file",
"handle",
".",
"There",
"are",
"various",
"reasons",
"why",
"this",
"is",
"a",
"better",
"approach",
"but",
"it",
"is",
"also",
"required",
"to",
"avoid",
"erroneous",
"creation",
"of",
"files",
"with",
"shellish",
"tab",
"completion",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L303-L313
|
240,796
|
mayfield/shellish
|
shellish/command/command.py
|
Command.create_argparser
|
def create_argparser(self):
""" Factory for arg parser. Can be overridden as long as it returns
an ArgParser compatible instance. """
if self.desc:
if self.title:
fulldesc = '%s\n\n%s' % (self.title, self.desc)
else:
fulldesc = self.desc
else:
fulldesc = self.title
return self.ArgumentParser(command=self, prog=self.name,
description=fulldesc)
|
python
|
def create_argparser(self):
""" Factory for arg parser. Can be overridden as long as it returns
an ArgParser compatible instance. """
if self.desc:
if self.title:
fulldesc = '%s\n\n%s' % (self.title, self.desc)
else:
fulldesc = self.desc
else:
fulldesc = self.title
return self.ArgumentParser(command=self, prog=self.name,
description=fulldesc)
|
[
"def",
"create_argparser",
"(",
"self",
")",
":",
"if",
"self",
".",
"desc",
":",
"if",
"self",
".",
"title",
":",
"fulldesc",
"=",
"'%s\\n\\n%s'",
"%",
"(",
"self",
".",
"title",
",",
"self",
".",
"desc",
")",
"else",
":",
"fulldesc",
"=",
"self",
".",
"desc",
"else",
":",
"fulldesc",
"=",
"self",
".",
"title",
"return",
"self",
".",
"ArgumentParser",
"(",
"command",
"=",
"self",
",",
"prog",
"=",
"self",
".",
"name",
",",
"description",
"=",
"fulldesc",
")"
] |
Factory for arg parser. Can be overridden as long as it returns
an ArgParser compatible instance.
|
[
"Factory",
"for",
"arg",
"parser",
".",
"Can",
"be",
"overridden",
"as",
"long",
"as",
"it",
"returns",
"an",
"ArgParser",
"compatible",
"instance",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L320-L331
|
240,797
|
mayfield/shellish
|
shellish/command/command.py
|
Command.attach_session
|
def attach_session(self):
""" Create a session and inject it as context for this command and any
subcommands. """
assert self.session is None
root = self.find_root()
session = self.Session(root)
root.inject_context(session=session)
return session
|
python
|
def attach_session(self):
""" Create a session and inject it as context for this command and any
subcommands. """
assert self.session is None
root = self.find_root()
session = self.Session(root)
root.inject_context(session=session)
return session
|
[
"def",
"attach_session",
"(",
"self",
")",
":",
"assert",
"self",
".",
"session",
"is",
"None",
"root",
"=",
"self",
".",
"find_root",
"(",
")",
"session",
"=",
"self",
".",
"Session",
"(",
"root",
")",
"root",
".",
"inject_context",
"(",
"session",
"=",
"session",
")",
"return",
"session"
] |
Create a session and inject it as context for this command and any
subcommands.
|
[
"Create",
"a",
"session",
"and",
"inject",
"it",
"as",
"context",
"for",
"this",
"command",
"and",
"any",
"subcommands",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L333-L340
|
240,798
|
mayfield/shellish
|
shellish/command/command.py
|
Command._complete
|
def _complete(self, text, line, begin, end):
""" Do naive argument parsing so the completer has better ability to
understand expansion rules. """
line = line[:end] # Ignore characters following the cursor.
fullargs = self.split_line(line)[1:]
args = fullargs[:]
options = self.deep_scan_parser(self.argparser)
# Walk into options tree if subcommands are detected.
last_subcommand = None
while True:
for key, completers in options.items():
if key in args and hasattr(completers[0], 'items'):
args.remove(key)
last_subcommand = key
options = completers[0]
break
else:
break
if text == last_subcommand:
# We have to specially catch the case where the last argument is
# the key used to find our subparser. More specifically when the
# cursor is not preceded by a space too, as this prevents the
# completion routines from continuing. The simplest way without
# complicating the algo for coming up with our options list is to
# simply shortcut the completer by returning a single item.
# Subsequent tabs will work normally.
return {text}
# Look for incomplete actions.
choices = set(x for x in options
if x is not None and x.startswith(text))
arg_buf = []
pos_args = []
trailing_action = None
# The slice below skips the last arg if it is 'active'.
for x in reversed(args[:-1 if text else None]):
if x in options:
action = options[x][0]
action.consume(arg_buf)
pos_args.extend(arg_buf)
del arg_buf[:]
if action.full:
choices -= {action.key}
if not trailing_action:
trailing_action = action
if not action.full:
if action.reached_min:
choices |= action(self, text, fullargs)
choices -= {action.key}
else:
choices = action(self, text, fullargs)
break
else:
arg_buf.insert(0, x)
pos_args.extend(arg_buf)
# Feed any remaining arguments in the buffer to positionals so long as
# there isn't a trailing action that can still consume.
if None in options and (not trailing_action or trailing_action.full):
for x_action in options[None]:
x_action.consume(pos_args)
if not x_action.reached_min:
choices = x_action(self, text, fullargs)
break
elif not x_action.full:
choices |= x_action(self, text, fullargs)
return choices
|
python
|
def _complete(self, text, line, begin, end):
""" Do naive argument parsing so the completer has better ability to
understand expansion rules. """
line = line[:end] # Ignore characters following the cursor.
fullargs = self.split_line(line)[1:]
args = fullargs[:]
options = self.deep_scan_parser(self.argparser)
# Walk into options tree if subcommands are detected.
last_subcommand = None
while True:
for key, completers in options.items():
if key in args and hasattr(completers[0], 'items'):
args.remove(key)
last_subcommand = key
options = completers[0]
break
else:
break
if text == last_subcommand:
# We have to specially catch the case where the last argument is
# the key used to find our subparser. More specifically when the
# cursor is not preceded by a space too, as this prevents the
# completion routines from continuing. The simplest way without
# complicating the algo for coming up with our options list is to
# simply shortcut the completer by returning a single item.
# Subsequent tabs will work normally.
return {text}
# Look for incomplete actions.
choices = set(x for x in options
if x is not None and x.startswith(text))
arg_buf = []
pos_args = []
trailing_action = None
# The slice below skips the last arg if it is 'active'.
for x in reversed(args[:-1 if text else None]):
if x in options:
action = options[x][0]
action.consume(arg_buf)
pos_args.extend(arg_buf)
del arg_buf[:]
if action.full:
choices -= {action.key}
if not trailing_action:
trailing_action = action
if not action.full:
if action.reached_min:
choices |= action(self, text, fullargs)
choices -= {action.key}
else:
choices = action(self, text, fullargs)
break
else:
arg_buf.insert(0, x)
pos_args.extend(arg_buf)
# Feed any remaining arguments in the buffer to positionals so long as
# there isn't a trailing action that can still consume.
if None in options and (not trailing_action or trailing_action.full):
for x_action in options[None]:
x_action.consume(pos_args)
if not x_action.reached_min:
choices = x_action(self, text, fullargs)
break
elif not x_action.full:
choices |= x_action(self, text, fullargs)
return choices
|
[
"def",
"_complete",
"(",
"self",
",",
"text",
",",
"line",
",",
"begin",
",",
"end",
")",
":",
"line",
"=",
"line",
"[",
":",
"end",
"]",
"# Ignore characters following the cursor.",
"fullargs",
"=",
"self",
".",
"split_line",
"(",
"line",
")",
"[",
"1",
":",
"]",
"args",
"=",
"fullargs",
"[",
":",
"]",
"options",
"=",
"self",
".",
"deep_scan_parser",
"(",
"self",
".",
"argparser",
")",
"# Walk into options tree if subcommands are detected.",
"last_subcommand",
"=",
"None",
"while",
"True",
":",
"for",
"key",
",",
"completers",
"in",
"options",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"args",
"and",
"hasattr",
"(",
"completers",
"[",
"0",
"]",
",",
"'items'",
")",
":",
"args",
".",
"remove",
"(",
"key",
")",
"last_subcommand",
"=",
"key",
"options",
"=",
"completers",
"[",
"0",
"]",
"break",
"else",
":",
"break",
"if",
"text",
"==",
"last_subcommand",
":",
"# We have to specially catch the case where the last argument is",
"# the key used to find our subparser. More specifically when the",
"# cursor is not preceded by a space too, as this prevents the",
"# completion routines from continuing. The simplest way without",
"# complicating the algo for coming up with our options list is to",
"# simply shortcut the completer by returning a single item.",
"# Subsequent tabs will work normally.",
"return",
"{",
"text",
"}",
"# Look for incomplete actions.",
"choices",
"=",
"set",
"(",
"x",
"for",
"x",
"in",
"options",
"if",
"x",
"is",
"not",
"None",
"and",
"x",
".",
"startswith",
"(",
"text",
")",
")",
"arg_buf",
"=",
"[",
"]",
"pos_args",
"=",
"[",
"]",
"trailing_action",
"=",
"None",
"# The slice below skips the last arg if it is 'active'.",
"for",
"x",
"in",
"reversed",
"(",
"args",
"[",
":",
"-",
"1",
"if",
"text",
"else",
"None",
"]",
")",
":",
"if",
"x",
"in",
"options",
":",
"action",
"=",
"options",
"[",
"x",
"]",
"[",
"0",
"]",
"action",
".",
"consume",
"(",
"arg_buf",
")",
"pos_args",
".",
"extend",
"(",
"arg_buf",
")",
"del",
"arg_buf",
"[",
":",
"]",
"if",
"action",
".",
"full",
":",
"choices",
"-=",
"{",
"action",
".",
"key",
"}",
"if",
"not",
"trailing_action",
":",
"trailing_action",
"=",
"action",
"if",
"not",
"action",
".",
"full",
":",
"if",
"action",
".",
"reached_min",
":",
"choices",
"|=",
"action",
"(",
"self",
",",
"text",
",",
"fullargs",
")",
"choices",
"-=",
"{",
"action",
".",
"key",
"}",
"else",
":",
"choices",
"=",
"action",
"(",
"self",
",",
"text",
",",
"fullargs",
")",
"break",
"else",
":",
"arg_buf",
".",
"insert",
"(",
"0",
",",
"x",
")",
"pos_args",
".",
"extend",
"(",
"arg_buf",
")",
"# Feed any remaining arguments in the buffer to positionals so long as",
"# there isn't a trailing action that can still consume.",
"if",
"None",
"in",
"options",
"and",
"(",
"not",
"trailing_action",
"or",
"trailing_action",
".",
"full",
")",
":",
"for",
"x_action",
"in",
"options",
"[",
"None",
"]",
":",
"x_action",
".",
"consume",
"(",
"pos_args",
")",
"if",
"not",
"x_action",
".",
"reached_min",
":",
"choices",
"=",
"x_action",
"(",
"self",
",",
"text",
",",
"fullargs",
")",
"break",
"elif",
"not",
"x_action",
".",
"full",
":",
"choices",
"|=",
"x_action",
"(",
"self",
",",
"text",
",",
"fullargs",
")",
"return",
"choices"
] |
Do naive argument parsing so the completer has better ability to
understand expansion rules.
|
[
"Do",
"naive",
"argument",
"parsing",
"so",
"the",
"completer",
"has",
"better",
"ability",
"to",
"understand",
"expansion",
"rules",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L360-L428
|
240,799
|
mayfield/shellish
|
shellish/command/command.py
|
Command.split_line
|
def split_line(self, line):
""" Try to do pure shlex.split unless it can't parse the line. In that
case we trim the input line until shlex can split the args and tack the
unparsable portion on as the last argument. """
remainder = []
while True:
try:
args = shlex.split(line)
except ValueError:
remainder.append(line[-1])
line = line[:-1]
else:
if remainder:
args.append(''.join(reversed(remainder)))
return args
|
python
|
def split_line(self, line):
""" Try to do pure shlex.split unless it can't parse the line. In that
case we trim the input line until shlex can split the args and tack the
unparsable portion on as the last argument. """
remainder = []
while True:
try:
args = shlex.split(line)
except ValueError:
remainder.append(line[-1])
line = line[:-1]
else:
if remainder:
args.append(''.join(reversed(remainder)))
return args
|
[
"def",
"split_line",
"(",
"self",
",",
"line",
")",
":",
"remainder",
"=",
"[",
"]",
"while",
"True",
":",
"try",
":",
"args",
"=",
"shlex",
".",
"split",
"(",
"line",
")",
"except",
"ValueError",
":",
"remainder",
".",
"append",
"(",
"line",
"[",
"-",
"1",
"]",
")",
"line",
"=",
"line",
"[",
":",
"-",
"1",
"]",
"else",
":",
"if",
"remainder",
":",
"args",
".",
"append",
"(",
"''",
".",
"join",
"(",
"reversed",
"(",
"remainder",
")",
")",
")",
"return",
"args"
] |
Try to do pure shlex.split unless it can't parse the line. In that
case we trim the input line until shlex can split the args and tack the
unparsable portion on as the last argument.
|
[
"Try",
"to",
"do",
"pure",
"shlex",
".",
"split",
"unless",
"it",
"can",
"t",
"parse",
"the",
"line",
".",
"In",
"that",
"case",
"we",
"trim",
"the",
"input",
"line",
"until",
"shlex",
"can",
"split",
"the",
"args",
"and",
"tack",
"the",
"unparsable",
"portion",
"on",
"as",
"the",
"last",
"argument",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L430-L444
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.