id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
240,600
|
universalcore/unicore.hub.client
|
unicore/hub/client/appclient.py
|
App.set
|
def set(self, field, value):
"""
Sets the value of an app field.
:param str field:
The name of the app field. Trying to set immutable fields
``uuid`` or ``key`` will raise a ValueError.
:param value:
The new value of the app field.
:raises: ValueError
"""
if field == 'uuid':
raise ValueError('uuid cannot be set')
elif field == 'key':
raise ValueError(
'key cannot be set. Use \'reset_key\' method')
else:
self.data[field] = value
|
python
|
def set(self, field, value):
"""
Sets the value of an app field.
:param str field:
The name of the app field. Trying to set immutable fields
``uuid`` or ``key`` will raise a ValueError.
:param value:
The new value of the app field.
:raises: ValueError
"""
if field == 'uuid':
raise ValueError('uuid cannot be set')
elif field == 'key':
raise ValueError(
'key cannot be set. Use \'reset_key\' method')
else:
self.data[field] = value
|
[
"def",
"set",
"(",
"self",
",",
"field",
",",
"value",
")",
":",
"if",
"field",
"==",
"'uuid'",
":",
"raise",
"ValueError",
"(",
"'uuid cannot be set'",
")",
"elif",
"field",
"==",
"'key'",
":",
"raise",
"ValueError",
"(",
"'key cannot be set. Use \\'reset_key\\' method'",
")",
"else",
":",
"self",
".",
"data",
"[",
"field",
"]",
"=",
"value"
] |
Sets the value of an app field.
:param str field:
The name of the app field. Trying to set immutable fields
``uuid`` or ``key`` will raise a ValueError.
:param value:
The new value of the app field.
:raises: ValueError
|
[
"Sets",
"the",
"value",
"of",
"an",
"app",
"field",
"."
] |
c706f4d31e493bd4e7ea8236780a9b271b850b8b
|
https://github.com/universalcore/unicore.hub.client/blob/c706f4d31e493bd4e7ea8236780a9b271b850b8b/unicore/hub/client/appclient.py#L66-L83
|
240,601
|
universalcore/unicore.hub.client
|
unicore/hub/client/appclient.py
|
App.reset_key
|
def reset_key(self):
"""
Resets the app's key on the `unicore.hub` server.
:returns: str -- the new key
"""
new_key = self.client.reset_app_key(self.get('uuid'))
self.data['key'] = new_key
return new_key
|
python
|
def reset_key(self):
"""
Resets the app's key on the `unicore.hub` server.
:returns: str -- the new key
"""
new_key = self.client.reset_app_key(self.get('uuid'))
self.data['key'] = new_key
return new_key
|
[
"def",
"reset_key",
"(",
"self",
")",
":",
"new_key",
"=",
"self",
".",
"client",
".",
"reset_app_key",
"(",
"self",
".",
"get",
"(",
"'uuid'",
")",
")",
"self",
".",
"data",
"[",
"'key'",
"]",
"=",
"new_key",
"return",
"new_key"
] |
Resets the app's key on the `unicore.hub` server.
:returns: str -- the new key
|
[
"Resets",
"the",
"app",
"s",
"key",
"on",
"the",
"unicore",
".",
"hub",
"server",
"."
] |
c706f4d31e493bd4e7ea8236780a9b271b850b8b
|
https://github.com/universalcore/unicore.hub.client/blob/c706f4d31e493bd4e7ea8236780a9b271b850b8b/unicore/hub/client/appclient.py#L97-L105
|
240,602
|
mikerhodes/actionqueues
|
actionqueues/exceptionfactory.py
|
DoublingBackoffExceptionFactory.raise_exception
|
def raise_exception(self, original_exception=None):
"""Raise a retry exception if under the max retries. After, raise the
original_exception provided to this method or a generic Exception if
none provided.
"""
if self._executed_retries < self._max_retries:
curr_backoff = self._ms_backoff
self._executed_retries += 1
self._ms_backoff = self._ms_backoff * 2
raise ActionRetryException(curr_backoff)
else:
raise original_exception or Exception()
|
python
|
def raise_exception(self, original_exception=None):
"""Raise a retry exception if under the max retries. After, raise the
original_exception provided to this method or a generic Exception if
none provided.
"""
if self._executed_retries < self._max_retries:
curr_backoff = self._ms_backoff
self._executed_retries += 1
self._ms_backoff = self._ms_backoff * 2
raise ActionRetryException(curr_backoff)
else:
raise original_exception or Exception()
|
[
"def",
"raise_exception",
"(",
"self",
",",
"original_exception",
"=",
"None",
")",
":",
"if",
"self",
".",
"_executed_retries",
"<",
"self",
".",
"_max_retries",
":",
"curr_backoff",
"=",
"self",
".",
"_ms_backoff",
"self",
".",
"_executed_retries",
"+=",
"1",
"self",
".",
"_ms_backoff",
"=",
"self",
".",
"_ms_backoff",
"*",
"2",
"raise",
"ActionRetryException",
"(",
"curr_backoff",
")",
"else",
":",
"raise",
"original_exception",
"or",
"Exception",
"(",
")"
] |
Raise a retry exception if under the max retries. After, raise the
original_exception provided to this method or a generic Exception if
none provided.
|
[
"Raise",
"a",
"retry",
"exception",
"if",
"under",
"the",
"max",
"retries",
".",
"After",
"raise",
"the",
"original_exception",
"provided",
"to",
"this",
"method",
"or",
"a",
"generic",
"Exception",
"if",
"none",
"provided",
"."
] |
a7a78ab116abe88af95b5315dc9f34d40ce81eb2
|
https://github.com/mikerhodes/actionqueues/blob/a7a78ab116abe88af95b5315dc9f34d40ce81eb2/actionqueues/exceptionfactory.py#L20-L31
|
240,603
|
datakortet/dkfileutils
|
dkfileutils/listfiles.py
|
main
|
def main(): # pragma: nocover
"""Print checksum and file name for all files in the directory.
"""
p = argparse.ArgumentParser(add_help="Recursively list interesting files.")
p.add_argument(
'directory', nargs="?", default="",
help="The directory to process (current dir if omitted)."
)
p.add_argument(
'--verbose', '-v', action='store_true',
help="Increase verbosity."
)
args = p.parse_args()
args.curdir = os.getcwd()
if not args.directory:
args.direcotry = args.curdir
if args.verbose:
print(args)
for chsm, fname in list_files(args.directory):
print(chsm, fname)
|
python
|
def main(): # pragma: nocover
"""Print checksum and file name for all files in the directory.
"""
p = argparse.ArgumentParser(add_help="Recursively list interesting files.")
p.add_argument(
'directory', nargs="?", default="",
help="The directory to process (current dir if omitted)."
)
p.add_argument(
'--verbose', '-v', action='store_true',
help="Increase verbosity."
)
args = p.parse_args()
args.curdir = os.getcwd()
if not args.directory:
args.direcotry = args.curdir
if args.verbose:
print(args)
for chsm, fname in list_files(args.directory):
print(chsm, fname)
|
[
"def",
"main",
"(",
")",
":",
"# pragma: nocover",
"p",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"add_help",
"=",
"\"Recursively list interesting files.\"",
")",
"p",
".",
"add_argument",
"(",
"'directory'",
",",
"nargs",
"=",
"\"?\"",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"The directory to process (current dir if omitted).\"",
")",
"p",
".",
"add_argument",
"(",
"'--verbose'",
",",
"'-v'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Increase verbosity.\"",
")",
"args",
"=",
"p",
".",
"parse_args",
"(",
")",
"args",
".",
"curdir",
"=",
"os",
".",
"getcwd",
"(",
")",
"if",
"not",
"args",
".",
"directory",
":",
"args",
".",
"direcotry",
"=",
"args",
".",
"curdir",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"args",
")",
"for",
"chsm",
",",
"fname",
"in",
"list_files",
"(",
"args",
".",
"directory",
")",
":",
"print",
"(",
"chsm",
",",
"fname",
")"
] |
Print checksum and file name for all files in the directory.
|
[
"Print",
"checksum",
"and",
"file",
"name",
"for",
"all",
"files",
"in",
"the",
"directory",
"."
] |
924098d6e2edf88ad9b3ffdec9c74530f80a7d77
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/listfiles.py#L85-L106
|
240,604
|
timeyyy/apptools
|
peasoup/pidutil.py
|
process_exists
|
def process_exists(pid=None):
"""
Evaluates a Pid Value defaults to the currently foucsed window
against the current open programs,
if there is a match returns the process name and pid
otherwise returns None, None
"""
if not pid:
pid = current_pid()
elif callable(pid):
pid = pid()
if pid and psutil.pid_exists(pid):
pname = psutil.Process(pid).name()
if os.name == 'nt':
return os.path.splitext(pname)[0], pid
return pname, pid
return None, None
|
python
|
def process_exists(pid=None):
"""
Evaluates a Pid Value defaults to the currently foucsed window
against the current open programs,
if there is a match returns the process name and pid
otherwise returns None, None
"""
if not pid:
pid = current_pid()
elif callable(pid):
pid = pid()
if pid and psutil.pid_exists(pid):
pname = psutil.Process(pid).name()
if os.name == 'nt':
return os.path.splitext(pname)[0], pid
return pname, pid
return None, None
|
[
"def",
"process_exists",
"(",
"pid",
"=",
"None",
")",
":",
"if",
"not",
"pid",
":",
"pid",
"=",
"current_pid",
"(",
")",
"elif",
"callable",
"(",
"pid",
")",
":",
"pid",
"=",
"pid",
"(",
")",
"if",
"pid",
"and",
"psutil",
".",
"pid_exists",
"(",
"pid",
")",
":",
"pname",
"=",
"psutil",
".",
"Process",
"(",
"pid",
")",
".",
"name",
"(",
")",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"return",
"os",
".",
"path",
".",
"splitext",
"(",
"pname",
")",
"[",
"0",
"]",
",",
"pid",
"return",
"pname",
",",
"pid",
"return",
"None",
",",
"None"
] |
Evaluates a Pid Value defaults to the currently foucsed window
against the current open programs,
if there is a match returns the process name and pid
otherwise returns None, None
|
[
"Evaluates",
"a",
"Pid",
"Value",
"defaults",
"to",
"the",
"currently",
"foucsed",
"window",
"against",
"the",
"current",
"open",
"programs",
"if",
"there",
"is",
"a",
"match",
"returns",
"the",
"process",
"name",
"and",
"pid",
"otherwise",
"returns",
"None",
"None"
] |
d3c0f324b0c2689c35f5601348276f4efd6cb240
|
https://github.com/timeyyy/apptools/blob/d3c0f324b0c2689c35f5601348276f4efd6cb240/peasoup/pidutil.py#L22-L39
|
240,605
|
timeyyy/apptools
|
peasoup/pidutil.py
|
get_active_window_pos
|
def get_active_window_pos():
'''screen coordinates massaged so that movewindow command works to
restore the window to the same position
returns x, y
'''
# http://stackoverflow.com/questions/26050788/in-bash-on-ubuntu-14-04-unity-how-can-i-get-the-total-size-of-an-open-window-i/26060527#26060527
cmd = ['xdotool','getactivewindow', 'getwindowgeometry']
res = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate()
stdout = res[0].decode('utf-8').splitlines()
pos = stdout[1].split(':')[1].split(',')
geo = stdout[2].split(':')[1].split('x')
x, y = int(pos[0].strip()), int(pos[1].split('(')[0].strip())
w, h = int(geo[0].strip()), int(geo[1].strip())
# get the window decorations
window_id = get_window_id()
cmd = ['xprop', '_NET_FRAME_EXTENTS', '-id', window_id]
res = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate()
decos = res[0].decode('utf-8').split('=')[1].split(',')
l, r = int(decos[0].strip()), int(decos[1].strip())
t, b = int(decos[2].strip()), int(decos[3].strip())
return x-l, y-t
|
python
|
def get_active_window_pos():
'''screen coordinates massaged so that movewindow command works to
restore the window to the same position
returns x, y
'''
# http://stackoverflow.com/questions/26050788/in-bash-on-ubuntu-14-04-unity-how-can-i-get-the-total-size-of-an-open-window-i/26060527#26060527
cmd = ['xdotool','getactivewindow', 'getwindowgeometry']
res = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate()
stdout = res[0].decode('utf-8').splitlines()
pos = stdout[1].split(':')[1].split(',')
geo = stdout[2].split(':')[1].split('x')
x, y = int(pos[0].strip()), int(pos[1].split('(')[0].strip())
w, h = int(geo[0].strip()), int(geo[1].strip())
# get the window decorations
window_id = get_window_id()
cmd = ['xprop', '_NET_FRAME_EXTENTS', '-id', window_id]
res = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate()
decos = res[0].decode('utf-8').split('=')[1].split(',')
l, r = int(decos[0].strip()), int(decos[1].strip())
t, b = int(decos[2].strip()), int(decos[3].strip())
return x-l, y-t
|
[
"def",
"get_active_window_pos",
"(",
")",
":",
"# http://stackoverflow.com/questions/26050788/in-bash-on-ubuntu-14-04-unity-how-can-i-get-the-total-size-of-an-open-window-i/26060527#26060527",
"cmd",
"=",
"[",
"'xdotool'",
",",
"'getactivewindow'",
",",
"'getwindowgeometry'",
"]",
"res",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
".",
"communicate",
"(",
")",
"stdout",
"=",
"res",
"[",
"0",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"splitlines",
"(",
")",
"pos",
"=",
"stdout",
"[",
"1",
"]",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
".",
"split",
"(",
"','",
")",
"geo",
"=",
"stdout",
"[",
"2",
"]",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
".",
"split",
"(",
"'x'",
")",
"x",
",",
"y",
"=",
"int",
"(",
"pos",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
",",
"int",
"(",
"pos",
"[",
"1",
"]",
".",
"split",
"(",
"'('",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
"w",
",",
"h",
"=",
"int",
"(",
"geo",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
",",
"int",
"(",
"geo",
"[",
"1",
"]",
".",
"strip",
"(",
")",
")",
"# get the window decorations",
"window_id",
"=",
"get_window_id",
"(",
")",
"cmd",
"=",
"[",
"'xprop'",
",",
"'_NET_FRAME_EXTENTS'",
",",
"'-id'",
",",
"window_id",
"]",
"res",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
".",
"communicate",
"(",
")",
"decos",
"=",
"res",
"[",
"0",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"split",
"(",
"'='",
")",
"[",
"1",
"]",
".",
"split",
"(",
"','",
")",
"l",
",",
"r",
"=",
"int",
"(",
"decos",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
",",
"int",
"(",
"decos",
"[",
"1",
"]",
".",
"strip",
"(",
")",
")",
"t",
",",
"b",
"=",
"int",
"(",
"decos",
"[",
"2",
"]",
".",
"strip",
"(",
")",
")",
",",
"int",
"(",
"decos",
"[",
"3",
"]",
".",
"strip",
"(",
")",
")",
"return",
"x",
"-",
"l",
",",
"y",
"-",
"t"
] |
screen coordinates massaged so that movewindow command works to
restore the window to the same position
returns x, y
|
[
"screen",
"coordinates",
"massaged",
"so",
"that",
"movewindow",
"command",
"works",
"to",
"restore",
"the",
"window",
"to",
"the",
"same",
"position",
"returns",
"x",
"y"
] |
d3c0f324b0c2689c35f5601348276f4efd6cb240
|
https://github.com/timeyyy/apptools/blob/d3c0f324b0c2689c35f5601348276f4efd6cb240/peasoup/pidutil.py#L77-L99
|
240,606
|
timeyyy/apptools
|
peasoup/pidutil.py
|
get_active_title
|
def get_active_title():
'''returns the window title of the active window'''
if os.name == 'posix':
cmd = ['xdotool','getactivewindow','getwindowname']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
title = proc.communicate()[0].decode('utf-8')
else:
raise NotImplementedError
return title
|
python
|
def get_active_title():
'''returns the window title of the active window'''
if os.name == 'posix':
cmd = ['xdotool','getactivewindow','getwindowname']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
title = proc.communicate()[0].decode('utf-8')
else:
raise NotImplementedError
return title
|
[
"def",
"get_active_title",
"(",
")",
":",
"if",
"os",
".",
"name",
"==",
"'posix'",
":",
"cmd",
"=",
"[",
"'xdotool'",
",",
"'getactivewindow'",
",",
"'getwindowname'",
"]",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"title",
"=",
"proc",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
"else",
":",
"raise",
"NotImplementedError",
"return",
"title"
] |
returns the window title of the active window
|
[
"returns",
"the",
"window",
"title",
"of",
"the",
"active",
"window"
] |
d3c0f324b0c2689c35f5601348276f4efd6cb240
|
https://github.com/timeyyy/apptools/blob/d3c0f324b0c2689c35f5601348276f4efd6cb240/peasoup/pidutil.py#L166-L174
|
240,607
|
timeyyy/apptools
|
peasoup/pidutil.py
|
get_processes
|
def get_processes():
'''returns process names owned by the user'''
user = getpass.getuser()
for proc in psutil.process_iter():
if proc.username() != user:
continue
pname = psutil.Process(proc.pid).name()
if os.name == 'nt':
pname = pname[:-4] # removiing .exe from end
yield pname
|
python
|
def get_processes():
'''returns process names owned by the user'''
user = getpass.getuser()
for proc in psutil.process_iter():
if proc.username() != user:
continue
pname = psutil.Process(proc.pid).name()
if os.name == 'nt':
pname = pname[:-4] # removiing .exe from end
yield pname
|
[
"def",
"get_processes",
"(",
")",
":",
"user",
"=",
"getpass",
".",
"getuser",
"(",
")",
"for",
"proc",
"in",
"psutil",
".",
"process_iter",
"(",
")",
":",
"if",
"proc",
".",
"username",
"(",
")",
"!=",
"user",
":",
"continue",
"pname",
"=",
"psutil",
".",
"Process",
"(",
"proc",
".",
"pid",
")",
".",
"name",
"(",
")",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"pname",
"=",
"pname",
"[",
":",
"-",
"4",
"]",
"# removiing .exe from end",
"yield",
"pname"
] |
returns process names owned by the user
|
[
"returns",
"process",
"names",
"owned",
"by",
"the",
"user"
] |
d3c0f324b0c2689c35f5601348276f4efd6cb240
|
https://github.com/timeyyy/apptools/blob/d3c0f324b0c2689c35f5601348276f4efd6cb240/peasoup/pidutil.py#L176-L185
|
240,608
|
timeyyy/apptools
|
peasoup/pidutil.py
|
get_titles
|
def get_titles():
'''returns titles of all open windows'''
if os.name == 'posix':
for proc in get_processes():
cmd = ['xdotool','search','--name', proc]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
window_ids = proc.communicate()[0].decode('utf-8')
if window_ids:
for window_id in window_ids.split('\n'):
cmd = ['xdotool','getwindowname',window_id]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
title = proc.communicate()[0].decode('utf-8')
try:
if title[-1] == '\n':
title = title[:-1]
yield title
except IndexError:
pass
else:
raise NotImplementedError
|
python
|
def get_titles():
'''returns titles of all open windows'''
if os.name == 'posix':
for proc in get_processes():
cmd = ['xdotool','search','--name', proc]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
window_ids = proc.communicate()[0].decode('utf-8')
if window_ids:
for window_id in window_ids.split('\n'):
cmd = ['xdotool','getwindowname',window_id]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
title = proc.communicate()[0].decode('utf-8')
try:
if title[-1] == '\n':
title = title[:-1]
yield title
except IndexError:
pass
else:
raise NotImplementedError
|
[
"def",
"get_titles",
"(",
")",
":",
"if",
"os",
".",
"name",
"==",
"'posix'",
":",
"for",
"proc",
"in",
"get_processes",
"(",
")",
":",
"cmd",
"=",
"[",
"'xdotool'",
",",
"'search'",
",",
"'--name'",
",",
"proc",
"]",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"window_ids",
"=",
"proc",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"window_ids",
":",
"for",
"window_id",
"in",
"window_ids",
".",
"split",
"(",
"'\\n'",
")",
":",
"cmd",
"=",
"[",
"'xdotool'",
",",
"'getwindowname'",
",",
"window_id",
"]",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"title",
"=",
"proc",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
"try",
":",
"if",
"title",
"[",
"-",
"1",
"]",
"==",
"'\\n'",
":",
"title",
"=",
"title",
"[",
":",
"-",
"1",
"]",
"yield",
"title",
"except",
"IndexError",
":",
"pass",
"else",
":",
"raise",
"NotImplementedError"
] |
returns titles of all open windows
|
[
"returns",
"titles",
"of",
"all",
"open",
"windows"
] |
d3c0f324b0c2689c35f5601348276f4efd6cb240
|
https://github.com/timeyyy/apptools/blob/d3c0f324b0c2689c35f5601348276f4efd6cb240/peasoup/pidutil.py#L187-L208
|
240,609
|
mirukan/whratio
|
whratio/ratio.py
|
get_gcd
|
def get_gcd(a, b):
"Return greatest common divisor for a and b."
while a:
a, b = b % a, a
return b
|
python
|
def get_gcd(a, b):
"Return greatest common divisor for a and b."
while a:
a, b = b % a, a
return b
|
[
"def",
"get_gcd",
"(",
"a",
",",
"b",
")",
":",
"while",
"a",
":",
"a",
",",
"b",
"=",
"b",
"%",
"a",
",",
"a",
"return",
"b"
] |
Return greatest common divisor for a and b.
|
[
"Return",
"greatest",
"common",
"divisor",
"for",
"a",
"and",
"b",
"."
] |
e19cf7346351649d196d2eb3369870841f7bfea5
|
https://github.com/mirukan/whratio/blob/e19cf7346351649d196d2eb3369870841f7bfea5/whratio/ratio.py#L8-L12
|
240,610
|
tBaxter/tango-articles
|
build/lib/articles/signals.py
|
auto_tweet
|
def auto_tweet(sender, instance, *args, **kwargs):
"""
Allows auto-tweeting newly created object to twitter
on accounts configured in settings.
You MUST create an app to allow oAuth authentication to work:
-- https://dev.twitter.com/apps/
You also must set the app to "Read and Write" access level,
and create an access token. Whew.
"""
if not twitter or getattr(settings, 'TWITTER_SETTINGS') is False:
#print 'WARNING: Twitter account not configured.'
return False
if not kwargs.get('created'):
return False
twitter_key = settings.TWITTER_SETTINGS
try:
api = twitter.Api(
consumer_key = twitter_key['consumer_key'],
consumer_secret = twitter_key['consumer_secret'],
access_token_key = twitter_key['access_token_key'],
access_token_secret = twitter_key['access_token_secret']
)
except Exception as error:
print("failed to authenticate: {}".format(error))
text = instance.text
if instance.link:
link = instance.link
else:
link = instance.get_absolute_url()
text = '{} {}'.format(text, link)
try:
api.PostUpdate(text)
except Exception as error:
print("Error posting to twitter: {}".format(error))
|
python
|
def auto_tweet(sender, instance, *args, **kwargs):
"""
Allows auto-tweeting newly created object to twitter
on accounts configured in settings.
You MUST create an app to allow oAuth authentication to work:
-- https://dev.twitter.com/apps/
You also must set the app to "Read and Write" access level,
and create an access token. Whew.
"""
if not twitter or getattr(settings, 'TWITTER_SETTINGS') is False:
#print 'WARNING: Twitter account not configured.'
return False
if not kwargs.get('created'):
return False
twitter_key = settings.TWITTER_SETTINGS
try:
api = twitter.Api(
consumer_key = twitter_key['consumer_key'],
consumer_secret = twitter_key['consumer_secret'],
access_token_key = twitter_key['access_token_key'],
access_token_secret = twitter_key['access_token_secret']
)
except Exception as error:
print("failed to authenticate: {}".format(error))
text = instance.text
if instance.link:
link = instance.link
else:
link = instance.get_absolute_url()
text = '{} {}'.format(text, link)
try:
api.PostUpdate(text)
except Exception as error:
print("Error posting to twitter: {}".format(error))
|
[
"def",
"auto_tweet",
"(",
"sender",
",",
"instance",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"twitter",
"or",
"getattr",
"(",
"settings",
",",
"'TWITTER_SETTINGS'",
")",
"is",
"False",
":",
"#print 'WARNING: Twitter account not configured.'",
"return",
"False",
"if",
"not",
"kwargs",
".",
"get",
"(",
"'created'",
")",
":",
"return",
"False",
"twitter_key",
"=",
"settings",
".",
"TWITTER_SETTINGS",
"try",
":",
"api",
"=",
"twitter",
".",
"Api",
"(",
"consumer_key",
"=",
"twitter_key",
"[",
"'consumer_key'",
"]",
",",
"consumer_secret",
"=",
"twitter_key",
"[",
"'consumer_secret'",
"]",
",",
"access_token_key",
"=",
"twitter_key",
"[",
"'access_token_key'",
"]",
",",
"access_token_secret",
"=",
"twitter_key",
"[",
"'access_token_secret'",
"]",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"\"failed to authenticate: {}\"",
".",
"format",
"(",
"error",
")",
")",
"text",
"=",
"instance",
".",
"text",
"if",
"instance",
".",
"link",
":",
"link",
"=",
"instance",
".",
"link",
"else",
":",
"link",
"=",
"instance",
".",
"get_absolute_url",
"(",
")",
"text",
"=",
"'{} {}'",
".",
"format",
"(",
"text",
",",
"link",
")",
"try",
":",
"api",
".",
"PostUpdate",
"(",
"text",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"\"Error posting to twitter: {}\"",
".",
"format",
"(",
"error",
")",
")"
] |
Allows auto-tweeting newly created object to twitter
on accounts configured in settings.
You MUST create an app to allow oAuth authentication to work:
-- https://dev.twitter.com/apps/
You also must set the app to "Read and Write" access level,
and create an access token. Whew.
|
[
"Allows",
"auto",
"-",
"tweeting",
"newly",
"created",
"object",
"to",
"twitter",
"on",
"accounts",
"configured",
"in",
"settings",
"."
] |
93818dcca1b62042a4fc19af63474691b0fe931c
|
https://github.com/tBaxter/tango-articles/blob/93818dcca1b62042a4fc19af63474691b0fe931c/build/lib/articles/signals.py#L10-L52
|
240,611
|
lvh/txgeonames
|
txgeonames/client.py
|
GeonamesClient.postalCodeLookup
|
def postalCodeLookup(self, countryCode, postalCode):
"""
Looks up locations for this country and postal code.
"""
params = {"country": countryCode, "postalcode": postalCode}
d = self._call("postalCodeLookupJSON", params)
d.addCallback(operator.itemgetter("postalcodes"))
return d
|
python
|
def postalCodeLookup(self, countryCode, postalCode):
"""
Looks up locations for this country and postal code.
"""
params = {"country": countryCode, "postalcode": postalCode}
d = self._call("postalCodeLookupJSON", params)
d.addCallback(operator.itemgetter("postalcodes"))
return d
|
[
"def",
"postalCodeLookup",
"(",
"self",
",",
"countryCode",
",",
"postalCode",
")",
":",
"params",
"=",
"{",
"\"country\"",
":",
"countryCode",
",",
"\"postalcode\"",
":",
"postalCode",
"}",
"d",
"=",
"self",
".",
"_call",
"(",
"\"postalCodeLookupJSON\"",
",",
"params",
")",
"d",
".",
"addCallback",
"(",
"operator",
".",
"itemgetter",
"(",
"\"postalcodes\"",
")",
")",
"return",
"d"
] |
Looks up locations for this country and postal code.
|
[
"Looks",
"up",
"locations",
"for",
"this",
"country",
"and",
"postal",
"code",
"."
] |
0f1e6dbba0ee2c32563e12c9b8d654626848fb18
|
https://github.com/lvh/txgeonames/blob/0f1e6dbba0ee2c32563e12c9b8d654626848fb18/txgeonames/client.py#L34-L41
|
240,612
|
listen-lavender/webcrawl
|
webcrawl/prettyprint.py
|
_print
|
def _print(*args):
"""
Print txt by coding GBK.
*args
list, list of printing contents
"""
if not CFG.debug:
return
if not args:
return
encoding = 'gbk'
args = [_cs(a, encoding) for a in args]
f_back = None
try:
raise Exception
except:
f_back = sys.exc_traceback.tb_frame.f_back
f_name = f_back.f_code.co_name
filename = os.path.basename(f_back.f_code.co_filename)
m_name = os.path.splitext(filename)[0]
prefix = ('[%s.%s]'%(m_name, f_name)).ljust(20, ' ')
if os.name == 'nt':
for i in range(len(args)):
v = args [i]
if isinstance(v, str):
args[i] = v #v.decode('utf8').encode('gbk')
elif isinstance(v, unicode):
args[i] = v.encode('gbk')
print '[%s]'%str(datetime.datetime.now()), prefix, ' '.join(args)
|
python
|
def _print(*args):
"""
Print txt by coding GBK.
*args
list, list of printing contents
"""
if not CFG.debug:
return
if not args:
return
encoding = 'gbk'
args = [_cs(a, encoding) for a in args]
f_back = None
try:
raise Exception
except:
f_back = sys.exc_traceback.tb_frame.f_back
f_name = f_back.f_code.co_name
filename = os.path.basename(f_back.f_code.co_filename)
m_name = os.path.splitext(filename)[0]
prefix = ('[%s.%s]'%(m_name, f_name)).ljust(20, ' ')
if os.name == 'nt':
for i in range(len(args)):
v = args [i]
if isinstance(v, str):
args[i] = v #v.decode('utf8').encode('gbk')
elif isinstance(v, unicode):
args[i] = v.encode('gbk')
print '[%s]'%str(datetime.datetime.now()), prefix, ' '.join(args)
|
[
"def",
"_print",
"(",
"*",
"args",
")",
":",
"if",
"not",
"CFG",
".",
"debug",
":",
"return",
"if",
"not",
"args",
":",
"return",
"encoding",
"=",
"'gbk'",
"args",
"=",
"[",
"_cs",
"(",
"a",
",",
"encoding",
")",
"for",
"a",
"in",
"args",
"]",
"f_back",
"=",
"None",
"try",
":",
"raise",
"Exception",
"except",
":",
"f_back",
"=",
"sys",
".",
"exc_traceback",
".",
"tb_frame",
".",
"f_back",
"f_name",
"=",
"f_back",
".",
"f_code",
".",
"co_name",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"f_back",
".",
"f_code",
".",
"co_filename",
")",
"m_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"0",
"]",
"prefix",
"=",
"(",
"'[%s.%s]'",
"%",
"(",
"m_name",
",",
"f_name",
")",
")",
".",
"ljust",
"(",
"20",
",",
"' '",
")",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"args",
")",
")",
":",
"v",
"=",
"args",
"[",
"i",
"]",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"args",
"[",
"i",
"]",
"=",
"v",
"#v.decode('utf8').encode('gbk')",
"elif",
"isinstance",
"(",
"v",
",",
"unicode",
")",
":",
"args",
"[",
"i",
"]",
"=",
"v",
".",
"encode",
"(",
"'gbk'",
")",
"print",
"'[%s]'",
"%",
"str",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
")",
",",
"prefix",
",",
"' '",
".",
"join",
"(",
"args",
")"
] |
Print txt by coding GBK.
*args
list, list of printing contents
|
[
"Print",
"txt",
"by",
"coding",
"GBK",
"."
] |
905dcfa6e6934aac764045660c0efcef28eae1e6
|
https://github.com/listen-lavender/webcrawl/blob/905dcfa6e6934aac764045660c0efcef28eae1e6/webcrawl/prettyprint.py#L20-L50
|
240,613
|
listen-lavender/webcrawl
|
webcrawl/prettyprint.py
|
_print_err
|
def _print_err(*args):
"""
Print errors.
*args
list, list of printing contents
"""
if not CFG.debug:
return
if not args:
return
encoding = 'utf8' if os.name == 'posix' else 'gbk'
args = [_cs(a, encoding) for a in args]
f_back = None
try:
raise Exception
except:
f_back = sys.exc_traceback.tb_frame.f_back
f_name = f_back.f_code.co_name
filename = os.path.basename(f_back.f_code.co_filename)
m_name = os.path.splitext(filename)[0]
prefix = ('[%s.%s]'%(m_name, f_name)).ljust(20, ' ')
print bcolors.FAIL+'[%s]'%str(datetime.datetime.now()), prefix, ' '.join(args) + bcolors.ENDC
|
python
|
def _print_err(*args):
"""
Print errors.
*args
list, list of printing contents
"""
if not CFG.debug:
return
if not args:
return
encoding = 'utf8' if os.name == 'posix' else 'gbk'
args = [_cs(a, encoding) for a in args]
f_back = None
try:
raise Exception
except:
f_back = sys.exc_traceback.tb_frame.f_back
f_name = f_back.f_code.co_name
filename = os.path.basename(f_back.f_code.co_filename)
m_name = os.path.splitext(filename)[0]
prefix = ('[%s.%s]'%(m_name, f_name)).ljust(20, ' ')
print bcolors.FAIL+'[%s]'%str(datetime.datetime.now()), prefix, ' '.join(args) + bcolors.ENDC
|
[
"def",
"_print_err",
"(",
"*",
"args",
")",
":",
"if",
"not",
"CFG",
".",
"debug",
":",
"return",
"if",
"not",
"args",
":",
"return",
"encoding",
"=",
"'utf8'",
"if",
"os",
".",
"name",
"==",
"'posix'",
"else",
"'gbk'",
"args",
"=",
"[",
"_cs",
"(",
"a",
",",
"encoding",
")",
"for",
"a",
"in",
"args",
"]",
"f_back",
"=",
"None",
"try",
":",
"raise",
"Exception",
"except",
":",
"f_back",
"=",
"sys",
".",
"exc_traceback",
".",
"tb_frame",
".",
"f_back",
"f_name",
"=",
"f_back",
".",
"f_code",
".",
"co_name",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"f_back",
".",
"f_code",
".",
"co_filename",
")",
"m_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"0",
"]",
"prefix",
"=",
"(",
"'[%s.%s]'",
"%",
"(",
"m_name",
",",
"f_name",
")",
")",
".",
"ljust",
"(",
"20",
",",
"' '",
")",
"print",
"bcolors",
".",
"FAIL",
"+",
"'[%s]'",
"%",
"str",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
")",
",",
"prefix",
",",
"' '",
".",
"join",
"(",
"args",
")",
"+",
"bcolors",
".",
"ENDC"
] |
Print errors.
*args
list, list of printing contents
|
[
"Print",
"errors",
"."
] |
905dcfa6e6934aac764045660c0efcef28eae1e6
|
https://github.com/listen-lavender/webcrawl/blob/905dcfa6e6934aac764045660c0efcef28eae1e6/webcrawl/prettyprint.py#L53-L76
|
240,614
|
listen-lavender/webcrawl
|
webcrawl/prettyprint.py
|
fileprint
|
def fileprint(filename, category, level=logging.DEBUG, maxBytes=1024*10124*100,
backupCount=0):
"""
Print files by file size.
filename
string, file name
category
string, category path of logs file in log directory
level
enumerated type of logging module, restrict whether logs to be printed or not
maxBytes
int, max limit of file size
backupCount
int, allowed numbers of file copys
"""
path = os.path.join(CFG.filedir, category, filename)
# Initialize filer
filer = logging.getLogger(filename)
frt = logging.Formatter('%(message)s')
hdr = RotatingFileHandler(path, 'a', maxBytes, backupCount, 'utf-8')
hdr.setFormatter(frt)
hdr._name = '##_rfh_##'
already_in = False
for _hdr in filer.handlers:
if _hdr._name == '##_rfh_##':
already_in = True
break
if not already_in:
filer.addHandler(hdr)
hdr = logging.StreamHandler(sys.stdout)
hdr.setFormatter(frt)
hdr._name = '##_sh_##'
already_in = False
for _hdr in filer.handlers:
if _hdr._name == '##_sh_##':
already_in = True
if not already_in:
filer.addHandler(hdr)
filer.setLevel(level)
def _wraper(*args):
if not args:
return
encoding = 'utf8' if os.name == 'posix' else 'gbk'
args = [_cu(a, encoding) for a in args]
filer.info(' '.join(args))
return _wraper, filer
|
python
|
def fileprint(filename, category, level=logging.DEBUG, maxBytes=1024*10124*100,
backupCount=0):
"""
Print files by file size.
filename
string, file name
category
string, category path of logs file in log directory
level
enumerated type of logging module, restrict whether logs to be printed or not
maxBytes
int, max limit of file size
backupCount
int, allowed numbers of file copys
"""
path = os.path.join(CFG.filedir, category, filename)
# Initialize filer
filer = logging.getLogger(filename)
frt = logging.Formatter('%(message)s')
hdr = RotatingFileHandler(path, 'a', maxBytes, backupCount, 'utf-8')
hdr.setFormatter(frt)
hdr._name = '##_rfh_##'
already_in = False
for _hdr in filer.handlers:
if _hdr._name == '##_rfh_##':
already_in = True
break
if not already_in:
filer.addHandler(hdr)
hdr = logging.StreamHandler(sys.stdout)
hdr.setFormatter(frt)
hdr._name = '##_sh_##'
already_in = False
for _hdr in filer.handlers:
if _hdr._name == '##_sh_##':
already_in = True
if not already_in:
filer.addHandler(hdr)
filer.setLevel(level)
def _wraper(*args):
if not args:
return
encoding = 'utf8' if os.name == 'posix' else 'gbk'
args = [_cu(a, encoding) for a in args]
filer.info(' '.join(args))
return _wraper, filer
|
[
"def",
"fileprint",
"(",
"filename",
",",
"category",
",",
"level",
"=",
"logging",
".",
"DEBUG",
",",
"maxBytes",
"=",
"1024",
"*",
"10124",
"*",
"100",
",",
"backupCount",
"=",
"0",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"CFG",
".",
"filedir",
",",
"category",
",",
"filename",
")",
"# Initialize filer",
"filer",
"=",
"logging",
".",
"getLogger",
"(",
"filename",
")",
"frt",
"=",
"logging",
".",
"Formatter",
"(",
"'%(message)s'",
")",
"hdr",
"=",
"RotatingFileHandler",
"(",
"path",
",",
"'a'",
",",
"maxBytes",
",",
"backupCount",
",",
"'utf-8'",
")",
"hdr",
".",
"setFormatter",
"(",
"frt",
")",
"hdr",
".",
"_name",
"=",
"'##_rfh_##'",
"already_in",
"=",
"False",
"for",
"_hdr",
"in",
"filer",
".",
"handlers",
":",
"if",
"_hdr",
".",
"_name",
"==",
"'##_rfh_##'",
":",
"already_in",
"=",
"True",
"break",
"if",
"not",
"already_in",
":",
"filer",
".",
"addHandler",
"(",
"hdr",
")",
"hdr",
"=",
"logging",
".",
"StreamHandler",
"(",
"sys",
".",
"stdout",
")",
"hdr",
".",
"setFormatter",
"(",
"frt",
")",
"hdr",
".",
"_name",
"=",
"'##_sh_##'",
"already_in",
"=",
"False",
"for",
"_hdr",
"in",
"filer",
".",
"handlers",
":",
"if",
"_hdr",
".",
"_name",
"==",
"'##_sh_##'",
":",
"already_in",
"=",
"True",
"if",
"not",
"already_in",
":",
"filer",
".",
"addHandler",
"(",
"hdr",
")",
"filer",
".",
"setLevel",
"(",
"level",
")",
"def",
"_wraper",
"(",
"*",
"args",
")",
":",
"if",
"not",
"args",
":",
"return",
"encoding",
"=",
"'utf8'",
"if",
"os",
".",
"name",
"==",
"'posix'",
"else",
"'gbk'",
"args",
"=",
"[",
"_cu",
"(",
"a",
",",
"encoding",
")",
"for",
"a",
"in",
"args",
"]",
"filer",
".",
"info",
"(",
"' '",
".",
"join",
"(",
"args",
")",
")",
"return",
"_wraper",
",",
"filer"
] |
Print files by file size.
filename
string, file name
category
string, category path of logs file in log directory
level
enumerated type of logging module, restrict whether logs to be printed or not
maxBytes
int, max limit of file size
backupCount
int, allowed numbers of file copys
|
[
"Print",
"files",
"by",
"file",
"size",
"."
] |
905dcfa6e6934aac764045660c0efcef28eae1e6
|
https://github.com/listen-lavender/webcrawl/blob/905dcfa6e6934aac764045660c0efcef28eae1e6/webcrawl/prettyprint.py#L185-L236
|
240,615
|
DaveMcEwan/ndim
|
ndim_bezier.py
|
pt_on_bezier_curve
|
def pt_on_bezier_curve(P=[(0.0, 0.0)], t=0.5):
'''Return point at t on bezier curve defined by control points P.
'''
assert isinstance(P, list)
assert len(P) > 0
for p in P:
assert isinstance(p, tuple)
for i in p:
assert len(p) > 1
assert isinstance(i, float)
assert isinstance(t, float)
assert 0 <= t <= 1
O = len(P) - 1 # Order of curve
# Recurse down the orders calculating the next set of control points until
# there is only one left, which is the point we want.
Q = P
while O > 0:
Q = [pt_between_pts(Q[l], Q[l+1], t) for l in range(O)]
O -= 1
assert len(Q) == 1
return Q[0]
|
python
|
def pt_on_bezier_curve(P=[(0.0, 0.0)], t=0.5):
'''Return point at t on bezier curve defined by control points P.
'''
assert isinstance(P, list)
assert len(P) > 0
for p in P:
assert isinstance(p, tuple)
for i in p:
assert len(p) > 1
assert isinstance(i, float)
assert isinstance(t, float)
assert 0 <= t <= 1
O = len(P) - 1 # Order of curve
# Recurse down the orders calculating the next set of control points until
# there is only one left, which is the point we want.
Q = P
while O > 0:
Q = [pt_between_pts(Q[l], Q[l+1], t) for l in range(O)]
O -= 1
assert len(Q) == 1
return Q[0]
|
[
"def",
"pt_on_bezier_curve",
"(",
"P",
"=",
"[",
"(",
"0.0",
",",
"0.0",
")",
"]",
",",
"t",
"=",
"0.5",
")",
":",
"assert",
"isinstance",
"(",
"P",
",",
"list",
")",
"assert",
"len",
"(",
"P",
")",
">",
"0",
"for",
"p",
"in",
"P",
":",
"assert",
"isinstance",
"(",
"p",
",",
"tuple",
")",
"for",
"i",
"in",
"p",
":",
"assert",
"len",
"(",
"p",
")",
">",
"1",
"assert",
"isinstance",
"(",
"i",
",",
"float",
")",
"assert",
"isinstance",
"(",
"t",
",",
"float",
")",
"assert",
"0",
"<=",
"t",
"<=",
"1",
"O",
"=",
"len",
"(",
"P",
")",
"-",
"1",
"# Order of curve",
"# Recurse down the orders calculating the next set of control points until",
"# there is only one left, which is the point we want.",
"Q",
"=",
"P",
"while",
"O",
">",
"0",
":",
"Q",
"=",
"[",
"pt_between_pts",
"(",
"Q",
"[",
"l",
"]",
",",
"Q",
"[",
"l",
"+",
"1",
"]",
",",
"t",
")",
"for",
"l",
"in",
"range",
"(",
"O",
")",
"]",
"O",
"-=",
"1",
"assert",
"len",
"(",
"Q",
")",
"==",
"1",
"return",
"Q",
"[",
"0",
"]"
] |
Return point at t on bezier curve defined by control points P.
|
[
"Return",
"point",
"at",
"t",
"on",
"bezier",
"curve",
"defined",
"by",
"control",
"points",
"P",
"."
] |
f1ea023d3e597160fc1e9e11921de07af659f9d2
|
https://github.com/DaveMcEwan/ndim/blob/f1ea023d3e597160fc1e9e11921de07af659f9d2/ndim_bezier.py#L6-L29
|
240,616
|
DaveMcEwan/ndim
|
ndim_bezier.py
|
pts_on_bezier_curve
|
def pts_on_bezier_curve(P=[(0.0, 0.0)], n_seg=0):
'''Return list N+1 points representing N line segments on bezier curve
defined by control points P.
'''
assert isinstance(P, list)
assert len(P) > 0
for p in P:
assert isinstance(p, tuple)
for i in p:
assert len(p) > 1
assert isinstance(i, float)
assert isinstance(n_seg, int)
assert n_seg >= 0
return [pt_on_bezier_curve(P, float(i)/n_seg) for i in range(n_seg)] + [P[-1]]
|
python
|
def pts_on_bezier_curve(P=[(0.0, 0.0)], n_seg=0):
'''Return list N+1 points representing N line segments on bezier curve
defined by control points P.
'''
assert isinstance(P, list)
assert len(P) > 0
for p in P:
assert isinstance(p, tuple)
for i in p:
assert len(p) > 1
assert isinstance(i, float)
assert isinstance(n_seg, int)
assert n_seg >= 0
return [pt_on_bezier_curve(P, float(i)/n_seg) for i in range(n_seg)] + [P[-1]]
|
[
"def",
"pts_on_bezier_curve",
"(",
"P",
"=",
"[",
"(",
"0.0",
",",
"0.0",
")",
"]",
",",
"n_seg",
"=",
"0",
")",
":",
"assert",
"isinstance",
"(",
"P",
",",
"list",
")",
"assert",
"len",
"(",
"P",
")",
">",
"0",
"for",
"p",
"in",
"P",
":",
"assert",
"isinstance",
"(",
"p",
",",
"tuple",
")",
"for",
"i",
"in",
"p",
":",
"assert",
"len",
"(",
"p",
")",
">",
"1",
"assert",
"isinstance",
"(",
"i",
",",
"float",
")",
"assert",
"isinstance",
"(",
"n_seg",
",",
"int",
")",
"assert",
"n_seg",
">=",
"0",
"return",
"[",
"pt_on_bezier_curve",
"(",
"P",
",",
"float",
"(",
"i",
")",
"/",
"n_seg",
")",
"for",
"i",
"in",
"range",
"(",
"n_seg",
")",
"]",
"+",
"[",
"P",
"[",
"-",
"1",
"]",
"]"
] |
Return list N+1 points representing N line segments on bezier curve
defined by control points P.
|
[
"Return",
"list",
"N",
"+",
"1",
"points",
"representing",
"N",
"line",
"segments",
"on",
"bezier",
"curve",
"defined",
"by",
"control",
"points",
"P",
"."
] |
f1ea023d3e597160fc1e9e11921de07af659f9d2
|
https://github.com/DaveMcEwan/ndim/blob/f1ea023d3e597160fc1e9e11921de07af659f9d2/ndim_bezier.py#L32-L46
|
240,617
|
DaveMcEwan/ndim
|
ndim_bezier.py
|
bezier_curve_approx_len
|
def bezier_curve_approx_len(P=[(0.0, 0.0)]):
'''Return approximate length of a bezier curve defined by control points P.
Segment curve into N lines where N is the order of the curve, and accumulate
the length of the segments.
'''
assert isinstance(P, list)
assert len(P) > 0
for p in P:
assert isinstance(p, tuple)
for i in p:
assert len(p) > 1
assert isinstance(i, float)
n_seg = len(P) - 1
pts = pts_on_bezier_curve(P, n_seg)
return sum([distance_between_pts(pts[i], pts[i+1]) for i in range(n_seg)])
|
python
|
def bezier_curve_approx_len(P=[(0.0, 0.0)]):
'''Return approximate length of a bezier curve defined by control points P.
Segment curve into N lines where N is the order of the curve, and accumulate
the length of the segments.
'''
assert isinstance(P, list)
assert len(P) > 0
for p in P:
assert isinstance(p, tuple)
for i in p:
assert len(p) > 1
assert isinstance(i, float)
n_seg = len(P) - 1
pts = pts_on_bezier_curve(P, n_seg)
return sum([distance_between_pts(pts[i], pts[i+1]) for i in range(n_seg)])
|
[
"def",
"bezier_curve_approx_len",
"(",
"P",
"=",
"[",
"(",
"0.0",
",",
"0.0",
")",
"]",
")",
":",
"assert",
"isinstance",
"(",
"P",
",",
"list",
")",
"assert",
"len",
"(",
"P",
")",
">",
"0",
"for",
"p",
"in",
"P",
":",
"assert",
"isinstance",
"(",
"p",
",",
"tuple",
")",
"for",
"i",
"in",
"p",
":",
"assert",
"len",
"(",
"p",
")",
">",
"1",
"assert",
"isinstance",
"(",
"i",
",",
"float",
")",
"n_seg",
"=",
"len",
"(",
"P",
")",
"-",
"1",
"pts",
"=",
"pts_on_bezier_curve",
"(",
"P",
",",
"n_seg",
")",
"return",
"sum",
"(",
"[",
"distance_between_pts",
"(",
"pts",
"[",
"i",
"]",
",",
"pts",
"[",
"i",
"+",
"1",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"n_seg",
")",
"]",
")"
] |
Return approximate length of a bezier curve defined by control points P.
Segment curve into N lines where N is the order of the curve, and accumulate
the length of the segments.
|
[
"Return",
"approximate",
"length",
"of",
"a",
"bezier",
"curve",
"defined",
"by",
"control",
"points",
"P",
".",
"Segment",
"curve",
"into",
"N",
"lines",
"where",
"N",
"is",
"the",
"order",
"of",
"the",
"curve",
"and",
"accumulate",
"the",
"length",
"of",
"the",
"segments",
"."
] |
f1ea023d3e597160fc1e9e11921de07af659f9d2
|
https://github.com/DaveMcEwan/ndim/blob/f1ea023d3e597160fc1e9e11921de07af659f9d2/ndim_bezier.py#L49-L64
|
240,618
|
pwyliu/clancy
|
clancy/engage.py
|
engage
|
def engage(args, password):
"""
Construct payloads and POST to Red October
"""
if args['create']:
payload = {'Name': args['--user'], 'Password': password}
goodquit_json(api_call('create', args, payload))
elif args['delegate']:
payload = {
'Name': args['--user'], 'Password': password,
'Time': args['--time'], 'Uses': args['--uses']
}
goodquit_json(api_call('delegate', args, payload))
elif args['encrypt']:
payload = {
'Name': args['--user'], 'Password': password,
'Minimum': args['--min'], 'Owners': args['--owners'].split(','),
'Data': (args['--str'] if args['--file'] is None
else read_file(args['--file']))
}
goodquit_json(api_call('encrypt', args, payload))
elif args['decrypt']:
payload = {
'Name': args['--user'], 'Password': password,
'Data': (args['--str'] if args['--file'] is None
else read_file(args['--file']))
}
goodquit_json(api_call('decrypt', args, payload))
elif args['summary']:
payload = {'Name': args['--user'], 'Password': password}
goodquit_json(api_call('summary', args, payload))
elif args['change-password']:
args['newpass'] = getpass.getpass('New Password: ')
payload = {
'Name': args['--user'], 'Password': password,
'NewPassword': args['newpass']
}
goodquit_json(api_call('password', args, payload))
elif args['modify']:
payload = {
'Name': args['--user'], 'Password': password,
'Command': args['--action'], 'ToModify': args['--target']
}
goodquit_json(api_call('modify', args, payload))
|
python
|
def engage(args, password):
"""
Construct payloads and POST to Red October
"""
if args['create']:
payload = {'Name': args['--user'], 'Password': password}
goodquit_json(api_call('create', args, payload))
elif args['delegate']:
payload = {
'Name': args['--user'], 'Password': password,
'Time': args['--time'], 'Uses': args['--uses']
}
goodquit_json(api_call('delegate', args, payload))
elif args['encrypt']:
payload = {
'Name': args['--user'], 'Password': password,
'Minimum': args['--min'], 'Owners': args['--owners'].split(','),
'Data': (args['--str'] if args['--file'] is None
else read_file(args['--file']))
}
goodquit_json(api_call('encrypt', args, payload))
elif args['decrypt']:
payload = {
'Name': args['--user'], 'Password': password,
'Data': (args['--str'] if args['--file'] is None
else read_file(args['--file']))
}
goodquit_json(api_call('decrypt', args, payload))
elif args['summary']:
payload = {'Name': args['--user'], 'Password': password}
goodquit_json(api_call('summary', args, payload))
elif args['change-password']:
args['newpass'] = getpass.getpass('New Password: ')
payload = {
'Name': args['--user'], 'Password': password,
'NewPassword': args['newpass']
}
goodquit_json(api_call('password', args, payload))
elif args['modify']:
payload = {
'Name': args['--user'], 'Password': password,
'Command': args['--action'], 'ToModify': args['--target']
}
goodquit_json(api_call('modify', args, payload))
|
[
"def",
"engage",
"(",
"args",
",",
"password",
")",
":",
"if",
"args",
"[",
"'create'",
"]",
":",
"payload",
"=",
"{",
"'Name'",
":",
"args",
"[",
"'--user'",
"]",
",",
"'Password'",
":",
"password",
"}",
"goodquit_json",
"(",
"api_call",
"(",
"'create'",
",",
"args",
",",
"payload",
")",
")",
"elif",
"args",
"[",
"'delegate'",
"]",
":",
"payload",
"=",
"{",
"'Name'",
":",
"args",
"[",
"'--user'",
"]",
",",
"'Password'",
":",
"password",
",",
"'Time'",
":",
"args",
"[",
"'--time'",
"]",
",",
"'Uses'",
":",
"args",
"[",
"'--uses'",
"]",
"}",
"goodquit_json",
"(",
"api_call",
"(",
"'delegate'",
",",
"args",
",",
"payload",
")",
")",
"elif",
"args",
"[",
"'encrypt'",
"]",
":",
"payload",
"=",
"{",
"'Name'",
":",
"args",
"[",
"'--user'",
"]",
",",
"'Password'",
":",
"password",
",",
"'Minimum'",
":",
"args",
"[",
"'--min'",
"]",
",",
"'Owners'",
":",
"args",
"[",
"'--owners'",
"]",
".",
"split",
"(",
"','",
")",
",",
"'Data'",
":",
"(",
"args",
"[",
"'--str'",
"]",
"if",
"args",
"[",
"'--file'",
"]",
"is",
"None",
"else",
"read_file",
"(",
"args",
"[",
"'--file'",
"]",
")",
")",
"}",
"goodquit_json",
"(",
"api_call",
"(",
"'encrypt'",
",",
"args",
",",
"payload",
")",
")",
"elif",
"args",
"[",
"'decrypt'",
"]",
":",
"payload",
"=",
"{",
"'Name'",
":",
"args",
"[",
"'--user'",
"]",
",",
"'Password'",
":",
"password",
",",
"'Data'",
":",
"(",
"args",
"[",
"'--str'",
"]",
"if",
"args",
"[",
"'--file'",
"]",
"is",
"None",
"else",
"read_file",
"(",
"args",
"[",
"'--file'",
"]",
")",
")",
"}",
"goodquit_json",
"(",
"api_call",
"(",
"'decrypt'",
",",
"args",
",",
"payload",
")",
")",
"elif",
"args",
"[",
"'summary'",
"]",
":",
"payload",
"=",
"{",
"'Name'",
":",
"args",
"[",
"'--user'",
"]",
",",
"'Password'",
":",
"password",
"}",
"goodquit_json",
"(",
"api_call",
"(",
"'summary'",
",",
"args",
",",
"payload",
")",
")",
"elif",
"args",
"[",
"'change-password'",
"]",
":",
"args",
"[",
"'newpass'",
"]",
"=",
"getpass",
".",
"getpass",
"(",
"'New Password: '",
")",
"payload",
"=",
"{",
"'Name'",
":",
"args",
"[",
"'--user'",
"]",
",",
"'Password'",
":",
"password",
",",
"'NewPassword'",
":",
"args",
"[",
"'newpass'",
"]",
"}",
"goodquit_json",
"(",
"api_call",
"(",
"'password'",
",",
"args",
",",
"payload",
")",
")",
"elif",
"args",
"[",
"'modify'",
"]",
":",
"payload",
"=",
"{",
"'Name'",
":",
"args",
"[",
"'--user'",
"]",
",",
"'Password'",
":",
"password",
",",
"'Command'",
":",
"args",
"[",
"'--action'",
"]",
",",
"'ToModify'",
":",
"args",
"[",
"'--target'",
"]",
"}",
"goodquit_json",
"(",
"api_call",
"(",
"'modify'",
",",
"args",
",",
"payload",
")",
")"
] |
Construct payloads and POST to Red October
|
[
"Construct",
"payloads",
"and",
"POST",
"to",
"Red",
"October"
] |
cb15a5e2bb735ffce7a84b8413b04faa78c5039c
|
https://github.com/pwyliu/clancy/blob/cb15a5e2bb735ffce7a84b8413b04faa78c5039c/clancy/engage.py#L7-L56
|
240,619
|
honzamach/pynspect
|
pynspect/gparser.py
|
PynspectFilterParser.parse
|
def parse(self, data, filename='', debuglevel=0):
"""
Parse given data.
data:
A string containing the filter definition
filename:
Name of the file being parsed (for meaningful
error messages)
debuglevel:
Debug level to yacc
"""
self.lexer.filename = filename
self.lexer.reset_lineno()
if not data or data.isspace():
return []
return self.parser.parse(data, lexer=self.lexer, debug=debuglevel)
|
python
|
def parse(self, data, filename='', debuglevel=0):
"""
Parse given data.
data:
A string containing the filter definition
filename:
Name of the file being parsed (for meaningful
error messages)
debuglevel:
Debug level to yacc
"""
self.lexer.filename = filename
self.lexer.reset_lineno()
if not data or data.isspace():
return []
return self.parser.parse(data, lexer=self.lexer, debug=debuglevel)
|
[
"def",
"parse",
"(",
"self",
",",
"data",
",",
"filename",
"=",
"''",
",",
"debuglevel",
"=",
"0",
")",
":",
"self",
".",
"lexer",
".",
"filename",
"=",
"filename",
"self",
".",
"lexer",
".",
"reset_lineno",
"(",
")",
"if",
"not",
"data",
"or",
"data",
".",
"isspace",
"(",
")",
":",
"return",
"[",
"]",
"return",
"self",
".",
"parser",
".",
"parse",
"(",
"data",
",",
"lexer",
"=",
"self",
".",
"lexer",
",",
"debug",
"=",
"debuglevel",
")"
] |
Parse given data.
data:
A string containing the filter definition
filename:
Name of the file being parsed (for meaningful
error messages)
debuglevel:
Debug level to yacc
|
[
"Parse",
"given",
"data",
"."
] |
0582dcc1f7aafe50e25a21c792ea1b3367ea5881
|
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/gparser.py#L219-L235
|
240,620
|
honzamach/pynspect
|
pynspect/gparser.py
|
PynspectFilterParser._create_factor_rule
|
def _create_factor_rule(tok):
"""
Simple helper method for creating factor node objects based on node name.
"""
if tok[0] == 'IPV4':
return IPV4Rule(tok[1])
if tok[0] == 'IPV6':
return IPV6Rule(tok[1])
if tok[0] == 'DATETIME':
return DatetimeRule(tok[1])
if tok[0] == 'TIMEDELTA':
return TimedeltaRule(tok[1])
if tok[0] == 'INTEGER':
return IntegerRule(tok[1])
if tok[0] == 'FLOAT':
return FloatRule(tok[1])
if tok[0] == 'VARIABLE':
return VariableRule(tok[1])
return ConstantRule(tok[1])
|
python
|
def _create_factor_rule(tok):
"""
Simple helper method for creating factor node objects based on node name.
"""
if tok[0] == 'IPV4':
return IPV4Rule(tok[1])
if tok[0] == 'IPV6':
return IPV6Rule(tok[1])
if tok[0] == 'DATETIME':
return DatetimeRule(tok[1])
if tok[0] == 'TIMEDELTA':
return TimedeltaRule(tok[1])
if tok[0] == 'INTEGER':
return IntegerRule(tok[1])
if tok[0] == 'FLOAT':
return FloatRule(tok[1])
if tok[0] == 'VARIABLE':
return VariableRule(tok[1])
return ConstantRule(tok[1])
|
[
"def",
"_create_factor_rule",
"(",
"tok",
")",
":",
"if",
"tok",
"[",
"0",
"]",
"==",
"'IPV4'",
":",
"return",
"IPV4Rule",
"(",
"tok",
"[",
"1",
"]",
")",
"if",
"tok",
"[",
"0",
"]",
"==",
"'IPV6'",
":",
"return",
"IPV6Rule",
"(",
"tok",
"[",
"1",
"]",
")",
"if",
"tok",
"[",
"0",
"]",
"==",
"'DATETIME'",
":",
"return",
"DatetimeRule",
"(",
"tok",
"[",
"1",
"]",
")",
"if",
"tok",
"[",
"0",
"]",
"==",
"'TIMEDELTA'",
":",
"return",
"TimedeltaRule",
"(",
"tok",
"[",
"1",
"]",
")",
"if",
"tok",
"[",
"0",
"]",
"==",
"'INTEGER'",
":",
"return",
"IntegerRule",
"(",
"tok",
"[",
"1",
"]",
")",
"if",
"tok",
"[",
"0",
"]",
"==",
"'FLOAT'",
":",
"return",
"FloatRule",
"(",
"tok",
"[",
"1",
"]",
")",
"if",
"tok",
"[",
"0",
"]",
"==",
"'VARIABLE'",
":",
"return",
"VariableRule",
"(",
"tok",
"[",
"1",
"]",
")",
"return",
"ConstantRule",
"(",
"tok",
"[",
"1",
"]",
")"
] |
Simple helper method for creating factor node objects based on node name.
|
[
"Simple",
"helper",
"method",
"for",
"creating",
"factor",
"node",
"objects",
"based",
"on",
"node",
"name",
"."
] |
0582dcc1f7aafe50e25a21c792ea1b3367ea5881
|
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/gparser.py#L242-L260
|
240,621
|
bitlabstudio/django-unshorten
|
unshorten/utils.py
|
unshorten_url
|
def unshorten_url(short_url):
"""Unshortens the short_url or returns None if not possible."""
short_url = short_url.strip()
if not short_url.startswith('http'):
short_url = 'http://{0}'.format(short_url)
try:
cached_url = UnshortenURL.objects.get(short_url=short_url)
except UnshortenURL.DoesNotExist:
cached_url = UnshortenURL(short_url=short_url)
else:
return cached_url.long_url
try:
resp = urllib2.urlopen(short_url)
except (
urllib2.HTTPError, urllib2.URLError,
httplib.HTTPException):
return None
if resp.code == 200:
cached_url.long_url = resp.url
cached_url.save()
return resp.url
|
python
|
def unshorten_url(short_url):
"""Unshortens the short_url or returns None if not possible."""
short_url = short_url.strip()
if not short_url.startswith('http'):
short_url = 'http://{0}'.format(short_url)
try:
cached_url = UnshortenURL.objects.get(short_url=short_url)
except UnshortenURL.DoesNotExist:
cached_url = UnshortenURL(short_url=short_url)
else:
return cached_url.long_url
try:
resp = urllib2.urlopen(short_url)
except (
urllib2.HTTPError, urllib2.URLError,
httplib.HTTPException):
return None
if resp.code == 200:
cached_url.long_url = resp.url
cached_url.save()
return resp.url
|
[
"def",
"unshorten_url",
"(",
"short_url",
")",
":",
"short_url",
"=",
"short_url",
".",
"strip",
"(",
")",
"if",
"not",
"short_url",
".",
"startswith",
"(",
"'http'",
")",
":",
"short_url",
"=",
"'http://{0}'",
".",
"format",
"(",
"short_url",
")",
"try",
":",
"cached_url",
"=",
"UnshortenURL",
".",
"objects",
".",
"get",
"(",
"short_url",
"=",
"short_url",
")",
"except",
"UnshortenURL",
".",
"DoesNotExist",
":",
"cached_url",
"=",
"UnshortenURL",
"(",
"short_url",
"=",
"short_url",
")",
"else",
":",
"return",
"cached_url",
".",
"long_url",
"try",
":",
"resp",
"=",
"urllib2",
".",
"urlopen",
"(",
"short_url",
")",
"except",
"(",
"urllib2",
".",
"HTTPError",
",",
"urllib2",
".",
"URLError",
",",
"httplib",
".",
"HTTPException",
")",
":",
"return",
"None",
"if",
"resp",
".",
"code",
"==",
"200",
":",
"cached_url",
".",
"long_url",
"=",
"resp",
".",
"url",
"cached_url",
".",
"save",
"(",
")",
"return",
"resp",
".",
"url"
] |
Unshortens the short_url or returns None if not possible.
|
[
"Unshortens",
"the",
"short_url",
"or",
"returns",
"None",
"if",
"not",
"possible",
"."
] |
6d184de908bb9df3aad5ac3fd9732d976afb6953
|
https://github.com/bitlabstudio/django-unshorten/blob/6d184de908bb9df3aad5ac3fd9732d976afb6953/unshorten/utils.py#L8-L31
|
240,622
|
hobson/pug-dj
|
pug/dj/crawler/views.py
|
CrawlerRPC.stop
|
def stop(self, spider_name=None):
"""Stop the named running spider, or the first spider found, if spider_name is None"""
if spider_name is None:
spider_name = self.spider_name
else:
self.spider_name = spider_name
if self.spider_name is None:
self.spider_name = self.list_running()[0].split(':')[-1]
self.jsonrpc_call('crawler/engine', 'close_spider', self.spider_name)
|
python
|
def stop(self, spider_name=None):
"""Stop the named running spider, or the first spider found, if spider_name is None"""
if spider_name is None:
spider_name = self.spider_name
else:
self.spider_name = spider_name
if self.spider_name is None:
self.spider_name = self.list_running()[0].split(':')[-1]
self.jsonrpc_call('crawler/engine', 'close_spider', self.spider_name)
|
[
"def",
"stop",
"(",
"self",
",",
"spider_name",
"=",
"None",
")",
":",
"if",
"spider_name",
"is",
"None",
":",
"spider_name",
"=",
"self",
".",
"spider_name",
"else",
":",
"self",
".",
"spider_name",
"=",
"spider_name",
"if",
"self",
".",
"spider_name",
"is",
"None",
":",
"self",
".",
"spider_name",
"=",
"self",
".",
"list_running",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
"':'",
")",
"[",
"-",
"1",
"]",
"self",
".",
"jsonrpc_call",
"(",
"'crawler/engine'",
",",
"'close_spider'",
",",
"self",
".",
"spider_name",
")"
] |
Stop the named running spider, or the first spider found, if spider_name is None
|
[
"Stop",
"the",
"named",
"running",
"spider",
"or",
"the",
"first",
"spider",
"found",
"if",
"spider_name",
"is",
"None"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/crawler/views.py#L16-L24
|
240,623
|
salimm/pylods
|
pylods/backend/pylodsc/mapper.py
|
CObjectMapper.copy
|
def copy(self):
'''
makes a clone copy of the mapper. It won't clone the serializers or deserializers and it won't copy the events
'''
try:
tmp = self.__class__()
except Exception:
tmp = self.__class__(self._pdict)
tmp._serializers = self._serializers
tmp.__deserializers = self.__deserializers
return tmp
|
python
|
def copy(self):
'''
makes a clone copy of the mapper. It won't clone the serializers or deserializers and it won't copy the events
'''
try:
tmp = self.__class__()
except Exception:
tmp = self.__class__(self._pdict)
tmp._serializers = self._serializers
tmp.__deserializers = self.__deserializers
return tmp
|
[
"def",
"copy",
"(",
"self",
")",
":",
"try",
":",
"tmp",
"=",
"self",
".",
"__class__",
"(",
")",
"except",
"Exception",
":",
"tmp",
"=",
"self",
".",
"__class__",
"(",
"self",
".",
"_pdict",
")",
"tmp",
".",
"_serializers",
"=",
"self",
".",
"_serializers",
"tmp",
".",
"__deserializers",
"=",
"self",
".",
"__deserializers",
"return",
"tmp"
] |
makes a clone copy of the mapper. It won't clone the serializers or deserializers and it won't copy the events
|
[
"makes",
"a",
"clone",
"copy",
"of",
"the",
"mapper",
".",
"It",
"won",
"t",
"clone",
"the",
"serializers",
"or",
"deserializers",
"and",
"it",
"won",
"t",
"copy",
"the",
"events"
] |
d089e2a9afb1fa8cb6c754933fc574b512757c40
|
https://github.com/salimm/pylods/blob/d089e2a9afb1fa8cb6c754933fc574b512757c40/pylods/backend/pylodsc/mapper.py#L95-L107
|
240,624
|
edeposit/edeposit.amqp.ltp
|
src/edeposit/amqp/ltp/checksum_generator.py
|
_get_required_fn
|
def _get_required_fn(fn, root_path):
"""
Definition of the MD5 file requires, that all paths will be absolute
for the package directory, not for the filesystem.
This function converts filesystem-absolute paths to package-absolute paths.
Args:
fn (str): Local/absolute path to the file.
root_path (str): Local/absolute path to the package directory.
Returns:
str: Package-absolute path to the file.
Raises:
ValueError: When `fn` is absolute and `root_path` relative or \
conversely.
"""
if not fn.startswith(root_path):
raise ValueError("Both paths have to be absolute or local!")
replacer = "/" if root_path.endswith("/") else ""
return fn.replace(root_path, replacer, 1)
|
python
|
def _get_required_fn(fn, root_path):
"""
Definition of the MD5 file requires, that all paths will be absolute
for the package directory, not for the filesystem.
This function converts filesystem-absolute paths to package-absolute paths.
Args:
fn (str): Local/absolute path to the file.
root_path (str): Local/absolute path to the package directory.
Returns:
str: Package-absolute path to the file.
Raises:
ValueError: When `fn` is absolute and `root_path` relative or \
conversely.
"""
if not fn.startswith(root_path):
raise ValueError("Both paths have to be absolute or local!")
replacer = "/" if root_path.endswith("/") else ""
return fn.replace(root_path, replacer, 1)
|
[
"def",
"_get_required_fn",
"(",
"fn",
",",
"root_path",
")",
":",
"if",
"not",
"fn",
".",
"startswith",
"(",
"root_path",
")",
":",
"raise",
"ValueError",
"(",
"\"Both paths have to be absolute or local!\"",
")",
"replacer",
"=",
"\"/\"",
"if",
"root_path",
".",
"endswith",
"(",
"\"/\"",
")",
"else",
"\"\"",
"return",
"fn",
".",
"replace",
"(",
"root_path",
",",
"replacer",
",",
"1",
")"
] |
Definition of the MD5 file requires, that all paths will be absolute
for the package directory, not for the filesystem.
This function converts filesystem-absolute paths to package-absolute paths.
Args:
fn (str): Local/absolute path to the file.
root_path (str): Local/absolute path to the package directory.
Returns:
str: Package-absolute path to the file.
Raises:
ValueError: When `fn` is absolute and `root_path` relative or \
conversely.
|
[
"Definition",
"of",
"the",
"MD5",
"file",
"requires",
"that",
"all",
"paths",
"will",
"be",
"absolute",
"for",
"the",
"package",
"directory",
"not",
"for",
"the",
"filesystem",
"."
] |
df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e
|
https://github.com/edeposit/edeposit.amqp.ltp/blob/df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e/src/edeposit/amqp/ltp/checksum_generator.py#L21-L44
|
240,625
|
AN3223/fpbox
|
fpbox/funcs.py
|
partition
|
def partition(f, xs):
"""
Works similar to filter, except it returns a two-item tuple where the
first item is the sequence of items that passed the filter and the
second is a sequence of items that didn't pass the filter
"""
t = type(xs)
true = filter(f, xs)
false = [x for x in xs if x not in true]
return t(true), t(false)
|
python
|
def partition(f, xs):
"""
Works similar to filter, except it returns a two-item tuple where the
first item is the sequence of items that passed the filter and the
second is a sequence of items that didn't pass the filter
"""
t = type(xs)
true = filter(f, xs)
false = [x for x in xs if x not in true]
return t(true), t(false)
|
[
"def",
"partition",
"(",
"f",
",",
"xs",
")",
":",
"t",
"=",
"type",
"(",
"xs",
")",
"true",
"=",
"filter",
"(",
"f",
",",
"xs",
")",
"false",
"=",
"[",
"x",
"for",
"x",
"in",
"xs",
"if",
"x",
"not",
"in",
"true",
"]",
"return",
"t",
"(",
"true",
")",
",",
"t",
"(",
"false",
")"
] |
Works similar to filter, except it returns a two-item tuple where the
first item is the sequence of items that passed the filter and the
second is a sequence of items that didn't pass the filter
|
[
"Works",
"similar",
"to",
"filter",
"except",
"it",
"returns",
"a",
"two",
"-",
"item",
"tuple",
"where",
"the",
"first",
"item",
"is",
"the",
"sequence",
"of",
"items",
"that",
"passed",
"the",
"filter",
"and",
"the",
"second",
"is",
"a",
"sequence",
"of",
"items",
"that",
"didn",
"t",
"pass",
"the",
"filter"
] |
d3b88fa6d68b7673c58edf46c89a552a9aedd162
|
https://github.com/AN3223/fpbox/blob/d3b88fa6d68b7673c58edf46c89a552a9aedd162/fpbox/funcs.py#L61-L70
|
240,626
|
AN3223/fpbox
|
fpbox/funcs.py
|
lazy_binmap
|
def lazy_binmap(f, xs):
"""
Maps a binary function over a sequence. The function is applied to each item
and the item after it until the last item is reached.
"""
return (f(x, y) for x, y in zip(xs, xs[1:]))
|
python
|
def lazy_binmap(f, xs):
"""
Maps a binary function over a sequence. The function is applied to each item
and the item after it until the last item is reached.
"""
return (f(x, y) for x, y in zip(xs, xs[1:]))
|
[
"def",
"lazy_binmap",
"(",
"f",
",",
"xs",
")",
":",
"return",
"(",
"f",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"xs",
",",
"xs",
"[",
"1",
":",
"]",
")",
")"
] |
Maps a binary function over a sequence. The function is applied to each item
and the item after it until the last item is reached.
|
[
"Maps",
"a",
"binary",
"function",
"over",
"a",
"sequence",
".",
"The",
"function",
"is",
"applied",
"to",
"each",
"item",
"and",
"the",
"item",
"after",
"it",
"until",
"the",
"last",
"item",
"is",
"reached",
"."
] |
d3b88fa6d68b7673c58edf46c89a552a9aedd162
|
https://github.com/AN3223/fpbox/blob/d3b88fa6d68b7673c58edf46c89a552a9aedd162/fpbox/funcs.py#L73-L78
|
240,627
|
AN3223/fpbox
|
fpbox/funcs.py
|
lazy_reverse_binmap
|
def lazy_reverse_binmap(f, xs):
"""
Same as lazy_binmap, except the parameters are flipped for the binary function
"""
return (f(y, x) for x, y in zip(xs, xs[1:]))
|
python
|
def lazy_reverse_binmap(f, xs):
"""
Same as lazy_binmap, except the parameters are flipped for the binary function
"""
return (f(y, x) for x, y in zip(xs, xs[1:]))
|
[
"def",
"lazy_reverse_binmap",
"(",
"f",
",",
"xs",
")",
":",
"return",
"(",
"f",
"(",
"y",
",",
"x",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"xs",
",",
"xs",
"[",
"1",
":",
"]",
")",
")"
] |
Same as lazy_binmap, except the parameters are flipped for the binary function
|
[
"Same",
"as",
"lazy_binmap",
"except",
"the",
"parameters",
"are",
"flipped",
"for",
"the",
"binary",
"function"
] |
d3b88fa6d68b7673c58edf46c89a552a9aedd162
|
https://github.com/AN3223/fpbox/blob/d3b88fa6d68b7673c58edf46c89a552a9aedd162/fpbox/funcs.py#L81-L85
|
240,628
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
analog_linear2_ramp
|
def analog_linear2_ramp(ramp_data, start_time, end_time, value_final,
time_subarray):
"""Use this when you want a discontinuous jump at the end of the linear ramp."""
value_initial = ramp_data["value"]
value_final2 = ramp_data["value_final"]
interp = (time_subarray - start_time)/(end_time - start_time)
return value_initial*(1.0 - interp) + value_final2*interp
|
python
|
def analog_linear2_ramp(ramp_data, start_time, end_time, value_final,
time_subarray):
"""Use this when you want a discontinuous jump at the end of the linear ramp."""
value_initial = ramp_data["value"]
value_final2 = ramp_data["value_final"]
interp = (time_subarray - start_time)/(end_time - start_time)
return value_initial*(1.0 - interp) + value_final2*interp
|
[
"def",
"analog_linear2_ramp",
"(",
"ramp_data",
",",
"start_time",
",",
"end_time",
",",
"value_final",
",",
"time_subarray",
")",
":",
"value_initial",
"=",
"ramp_data",
"[",
"\"value\"",
"]",
"value_final2",
"=",
"ramp_data",
"[",
"\"value_final\"",
"]",
"interp",
"=",
"(",
"time_subarray",
"-",
"start_time",
")",
"/",
"(",
"end_time",
"-",
"start_time",
")",
"return",
"value_initial",
"*",
"(",
"1.0",
"-",
"interp",
")",
"+",
"value_final2",
"*",
"interp"
] |
Use this when you want a discontinuous jump at the end of the linear ramp.
|
[
"Use",
"this",
"when",
"you",
"want",
"a",
"discontinuous",
"jump",
"at",
"the",
"end",
"of",
"the",
"linear",
"ramp",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L477-L483
|
240,629
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
KeyFrameList.bake
|
def bake(self):
"""Find absolute times for all keys.
Absolute time is stored in the KeyFrame dictionary as the variable
__abs_time__.
"""
self.unbake()
for key in self.dct:
self.get_absolute_time(key)
self.is_baked = True
|
python
|
def bake(self):
"""Find absolute times for all keys.
Absolute time is stored in the KeyFrame dictionary as the variable
__abs_time__.
"""
self.unbake()
for key in self.dct:
self.get_absolute_time(key)
self.is_baked = True
|
[
"def",
"bake",
"(",
"self",
")",
":",
"self",
".",
"unbake",
"(",
")",
"for",
"key",
"in",
"self",
".",
"dct",
":",
"self",
".",
"get_absolute_time",
"(",
"key",
")",
"self",
".",
"is_baked",
"=",
"True"
] |
Find absolute times for all keys.
Absolute time is stored in the KeyFrame dictionary as the variable
__abs_time__.
|
[
"Find",
"absolute",
"times",
"for",
"all",
"keys",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L54-L63
|
240,630
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
KeyFrameList.unbake
|
def unbake(self):
"""Remove absolute times for all keys."""
for key in self.dct:
# pop __abs_time__ if it exists
self.dct[key].pop('__abs_time__', None)
self.is_baked = False
|
python
|
def unbake(self):
"""Remove absolute times for all keys."""
for key in self.dct:
# pop __abs_time__ if it exists
self.dct[key].pop('__abs_time__', None)
self.is_baked = False
|
[
"def",
"unbake",
"(",
"self",
")",
":",
"for",
"key",
"in",
"self",
".",
"dct",
":",
"# pop __abs_time__ if it exists",
"self",
".",
"dct",
"[",
"key",
"]",
".",
"pop",
"(",
"'__abs_time__'",
",",
"None",
")",
"self",
".",
"is_baked",
"=",
"False"
] |
Remove absolute times for all keys.
|
[
"Remove",
"absolute",
"times",
"for",
"all",
"keys",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L65-L70
|
240,631
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
KeyFrameList.get_absolute_time
|
def get_absolute_time(self, key):
"""Returns the absolute time position of the key.
If absolute time positions are not calculated, then this function
calculates it.
"""
keyframe = self.dct[key]
try:
# if absolute time is already calculated, return that
return keyframe['__abs_time__']
except KeyError:
# if not, calculate by adding relative time to parent's time
if keyframe['parent'] is None:
keyframe['__abs_time__'] = keyframe['time']
else:
parent_time = self.get_absolute_time(keyframe['parent'])
abs_time = keyframe['time'] + parent_time
keyframe['__abs_time__'] = abs_time
return keyframe['__abs_time__']
|
python
|
def get_absolute_time(self, key):
"""Returns the absolute time position of the key.
If absolute time positions are not calculated, then this function
calculates it.
"""
keyframe = self.dct[key]
try:
# if absolute time is already calculated, return that
return keyframe['__abs_time__']
except KeyError:
# if not, calculate by adding relative time to parent's time
if keyframe['parent'] is None:
keyframe['__abs_time__'] = keyframe['time']
else:
parent_time = self.get_absolute_time(keyframe['parent'])
abs_time = keyframe['time'] + parent_time
keyframe['__abs_time__'] = abs_time
return keyframe['__abs_time__']
|
[
"def",
"get_absolute_time",
"(",
"self",
",",
"key",
")",
":",
"keyframe",
"=",
"self",
".",
"dct",
"[",
"key",
"]",
"try",
":",
"# if absolute time is already calculated, return that",
"return",
"keyframe",
"[",
"'__abs_time__'",
"]",
"except",
"KeyError",
":",
"# if not, calculate by adding relative time to parent's time",
"if",
"keyframe",
"[",
"'parent'",
"]",
"is",
"None",
":",
"keyframe",
"[",
"'__abs_time__'",
"]",
"=",
"keyframe",
"[",
"'time'",
"]",
"else",
":",
"parent_time",
"=",
"self",
".",
"get_absolute_time",
"(",
"keyframe",
"[",
"'parent'",
"]",
")",
"abs_time",
"=",
"keyframe",
"[",
"'time'",
"]",
"+",
"parent_time",
"keyframe",
"[",
"'__abs_time__'",
"]",
"=",
"abs_time",
"return",
"keyframe",
"[",
"'__abs_time__'",
"]"
] |
Returns the absolute time position of the key.
If absolute time positions are not calculated, then this function
calculates it.
|
[
"Returns",
"the",
"absolute",
"time",
"position",
"of",
"the",
"key",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L72-L90
|
240,632
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
KeyFrameList.sorted_key_list
|
def sorted_key_list(self):
"""Returns list of keys sorted according to their absolute time."""
if not self.is_baked:
self.bake()
key_value_tuple = sorted(self.dct.items(),
key=lambda x: x[1]['__abs_time__'])
skl = [k[0] for k in key_value_tuple]
return skl
|
python
|
def sorted_key_list(self):
"""Returns list of keys sorted according to their absolute time."""
if not self.is_baked:
self.bake()
key_value_tuple = sorted(self.dct.items(),
key=lambda x: x[1]['__abs_time__'])
skl = [k[0] for k in key_value_tuple]
return skl
|
[
"def",
"sorted_key_list",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_baked",
":",
"self",
".",
"bake",
"(",
")",
"key_value_tuple",
"=",
"sorted",
"(",
"self",
".",
"dct",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
"[",
"'__abs_time__'",
"]",
")",
"skl",
"=",
"[",
"k",
"[",
"0",
"]",
"for",
"k",
"in",
"key_value_tuple",
"]",
"return",
"skl"
] |
Returns list of keys sorted according to their absolute time.
|
[
"Returns",
"list",
"of",
"keys",
"sorted",
"according",
"to",
"their",
"absolute",
"time",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L92-L99
|
240,633
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
KeyFrameList.set_time
|
def set_time(self, key_name, new_time):
"""Sets the time of key."""
self.unbake()
kf = self.dct[key_name]
kf['time'] = new_time
self.bake()
|
python
|
def set_time(self, key_name, new_time):
"""Sets the time of key."""
self.unbake()
kf = self.dct[key_name]
kf['time'] = new_time
self.bake()
|
[
"def",
"set_time",
"(",
"self",
",",
"key_name",
",",
"new_time",
")",
":",
"self",
".",
"unbake",
"(",
")",
"kf",
"=",
"self",
".",
"dct",
"[",
"key_name",
"]",
"kf",
"[",
"'time'",
"]",
"=",
"new_time",
"self",
".",
"bake",
"(",
")"
] |
Sets the time of key.
|
[
"Sets",
"the",
"time",
"of",
"key",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L101-L106
|
240,634
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
KeyFrameList.set_comment
|
def set_comment(self, key_name, new_comment):
"""Sets the comment of key."""
kf = self.dct[key_name]
kf['comment'] = new_comment
|
python
|
def set_comment(self, key_name, new_comment):
"""Sets the comment of key."""
kf = self.dct[key_name]
kf['comment'] = new_comment
|
[
"def",
"set_comment",
"(",
"self",
",",
"key_name",
",",
"new_comment",
")",
":",
"kf",
"=",
"self",
".",
"dct",
"[",
"key_name",
"]",
"kf",
"[",
"'comment'",
"]",
"=",
"new_comment"
] |
Sets the comment of key.
|
[
"Sets",
"the",
"comment",
"of",
"key",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L108-L111
|
240,635
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
KeyFrameList.set_parent
|
def set_parent(self, key_name, new_parent):
"""Sets the parent of the key."""
self.unbake()
kf = self.dct[key_name]
kf['parent'] = new_parent
self.bake()
|
python
|
def set_parent(self, key_name, new_parent):
"""Sets the parent of the key."""
self.unbake()
kf = self.dct[key_name]
kf['parent'] = new_parent
self.bake()
|
[
"def",
"set_parent",
"(",
"self",
",",
"key_name",
",",
"new_parent",
")",
":",
"self",
".",
"unbake",
"(",
")",
"kf",
"=",
"self",
".",
"dct",
"[",
"key_name",
"]",
"kf",
"[",
"'parent'",
"]",
"=",
"new_parent",
"self",
".",
"bake",
"(",
")"
] |
Sets the parent of the key.
|
[
"Sets",
"the",
"parent",
"of",
"the",
"key",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L113-L118
|
240,636
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
KeyFrameList.is_ancestor
|
def is_ancestor(self, child_key_name, ancestor_key_name):
"""Returns True if ancestor lies in the ancestry tree of child."""
# all keys are descendents of None
if ancestor_key_name is None:
return True
one_up_parent = self.dct[child_key_name]['parent']
if child_key_name == ancestor_key_name:
# debatable semantics, but a person lies in his/her own
# ancestry tree
return True
elif one_up_parent is None:
return False
else:
return self.is_ancestor(one_up_parent, ancestor_key_name)
|
python
|
def is_ancestor(self, child_key_name, ancestor_key_name):
"""Returns True if ancestor lies in the ancestry tree of child."""
# all keys are descendents of None
if ancestor_key_name is None:
return True
one_up_parent = self.dct[child_key_name]['parent']
if child_key_name == ancestor_key_name:
# debatable semantics, but a person lies in his/her own
# ancestry tree
return True
elif one_up_parent is None:
return False
else:
return self.is_ancestor(one_up_parent, ancestor_key_name)
|
[
"def",
"is_ancestor",
"(",
"self",
",",
"child_key_name",
",",
"ancestor_key_name",
")",
":",
"# all keys are descendents of None",
"if",
"ancestor_key_name",
"is",
"None",
":",
"return",
"True",
"one_up_parent",
"=",
"self",
".",
"dct",
"[",
"child_key_name",
"]",
"[",
"'parent'",
"]",
"if",
"child_key_name",
"==",
"ancestor_key_name",
":",
"# debatable semantics, but a person lies in his/her own",
"# ancestry tree",
"return",
"True",
"elif",
"one_up_parent",
"is",
"None",
":",
"return",
"False",
"else",
":",
"return",
"self",
".",
"is_ancestor",
"(",
"one_up_parent",
",",
"ancestor_key_name",
")"
] |
Returns True if ancestor lies in the ancestry tree of child.
|
[
"Returns",
"True",
"if",
"ancestor",
"lies",
"in",
"the",
"ancestry",
"tree",
"of",
"child",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L149-L164
|
240,637
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
KeyFrameList.add_hook
|
def add_hook(self, key_name, hook_name, hook_dict):
"""Add hook to the keyframe key_name."""
kf = self.dct[key_name]
if 'hooks' not in kf:
kf['hooks'] = {}
kf['hooks'][str(hook_name)] = hook_dict
|
python
|
def add_hook(self, key_name, hook_name, hook_dict):
"""Add hook to the keyframe key_name."""
kf = self.dct[key_name]
if 'hooks' not in kf:
kf['hooks'] = {}
kf['hooks'][str(hook_name)] = hook_dict
|
[
"def",
"add_hook",
"(",
"self",
",",
"key_name",
",",
"hook_name",
",",
"hook_dict",
")",
":",
"kf",
"=",
"self",
".",
"dct",
"[",
"key_name",
"]",
"if",
"'hooks'",
"not",
"in",
"kf",
":",
"kf",
"[",
"'hooks'",
"]",
"=",
"{",
"}",
"kf",
"[",
"'hooks'",
"]",
"[",
"str",
"(",
"hook_name",
")",
"]",
"=",
"hook_dict"
] |
Add hook to the keyframe key_name.
|
[
"Add",
"hook",
"to",
"the",
"keyframe",
"key_name",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L166-L171
|
240,638
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
KeyFrameList.remove_hook
|
def remove_hook(self, key_name, hook_name):
"""Remove hook from the keyframe key_name."""
kf = self.dct[key_name]
if 'hooks' in kf:
if hook_name in kf['hooks']:
return kf['hooks'].pop(hook_name)
|
python
|
def remove_hook(self, key_name, hook_name):
"""Remove hook from the keyframe key_name."""
kf = self.dct[key_name]
if 'hooks' in kf:
if hook_name in kf['hooks']:
return kf['hooks'].pop(hook_name)
|
[
"def",
"remove_hook",
"(",
"self",
",",
"key_name",
",",
"hook_name",
")",
":",
"kf",
"=",
"self",
".",
"dct",
"[",
"key_name",
"]",
"if",
"'hooks'",
"in",
"kf",
":",
"if",
"hook_name",
"in",
"kf",
"[",
"'hooks'",
"]",
":",
"return",
"kf",
"[",
"'hooks'",
"]",
".",
"pop",
"(",
"hook_name",
")"
] |
Remove hook from the keyframe key_name.
|
[
"Remove",
"hook",
"from",
"the",
"keyframe",
"key_name",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L173-L178
|
240,639
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
KeyFrameList.list_hooks
|
def list_hooks(self, key_name):
"""Return list of all hooks attached to key_name."""
kf = self.dct[key_name]
if 'hooks' not in kf:
return []
else:
return kf['hooks'].iterkeys()
|
python
|
def list_hooks(self, key_name):
"""Return list of all hooks attached to key_name."""
kf = self.dct[key_name]
if 'hooks' not in kf:
return []
else:
return kf['hooks'].iterkeys()
|
[
"def",
"list_hooks",
"(",
"self",
",",
"key_name",
")",
":",
"kf",
"=",
"self",
".",
"dct",
"[",
"key_name",
"]",
"if",
"'hooks'",
"not",
"in",
"kf",
":",
"return",
"[",
"]",
"else",
":",
"return",
"kf",
"[",
"'hooks'",
"]",
".",
"iterkeys",
"(",
")"
] |
Return list of all hooks attached to key_name.
|
[
"Return",
"list",
"of",
"all",
"hooks",
"attached",
"to",
"key_name",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L180-L186
|
240,640
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
KeyFrameList.do_keyframes_overlap
|
def do_keyframes_overlap(self):
"""Checks for keyframs timing overlap.
Returns the name of the first keyframs that overlapped."""
skl = self.sorted_key_list()
for i in range(len(skl)-1):
this_time = self.dct[skl[i]]['__abs_time__']
next_time = self.dct[skl[i+1]]['__abs_time__']
if abs(next_time-this_time) < 1e-6:
# key frame times overlap
return skl[i]
# Return None if all passed
return None
|
python
|
def do_keyframes_overlap(self):
"""Checks for keyframs timing overlap.
Returns the name of the first keyframs that overlapped."""
skl = self.sorted_key_list()
for i in range(len(skl)-1):
this_time = self.dct[skl[i]]['__abs_time__']
next_time = self.dct[skl[i+1]]['__abs_time__']
if abs(next_time-this_time) < 1e-6:
# key frame times overlap
return skl[i]
# Return None if all passed
return None
|
[
"def",
"do_keyframes_overlap",
"(",
"self",
")",
":",
"skl",
"=",
"self",
".",
"sorted_key_list",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"skl",
")",
"-",
"1",
")",
":",
"this_time",
"=",
"self",
".",
"dct",
"[",
"skl",
"[",
"i",
"]",
"]",
"[",
"'__abs_time__'",
"]",
"next_time",
"=",
"self",
".",
"dct",
"[",
"skl",
"[",
"i",
"+",
"1",
"]",
"]",
"[",
"'__abs_time__'",
"]",
"if",
"abs",
"(",
"next_time",
"-",
"this_time",
")",
"<",
"1e-6",
":",
"# key frame times overlap",
"return",
"skl",
"[",
"i",
"]",
"# Return None if all passed",
"return",
"None"
] |
Checks for keyframs timing overlap.
Returns the name of the first keyframs that overlapped.
|
[
"Checks",
"for",
"keyframs",
"timing",
"overlap",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L199-L211
|
240,641
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
Channel.del_unused_keyframes
|
def del_unused_keyframes(self):
"""Scans through list of keyframes in the channel and removes those
which are not in self.key_frame_list."""
skl = self.key_frame_list.sorted_key_list()
unused_keys = [k for k in self.dct['keys']
if k not in skl]
for k in unused_keys:
del self.dct['keys'][k]
|
python
|
def del_unused_keyframes(self):
"""Scans through list of keyframes in the channel and removes those
which are not in self.key_frame_list."""
skl = self.key_frame_list.sorted_key_list()
unused_keys = [k for k in self.dct['keys']
if k not in skl]
for k in unused_keys:
del self.dct['keys'][k]
|
[
"def",
"del_unused_keyframes",
"(",
"self",
")",
":",
"skl",
"=",
"self",
".",
"key_frame_list",
".",
"sorted_key_list",
"(",
")",
"unused_keys",
"=",
"[",
"k",
"for",
"k",
"in",
"self",
".",
"dct",
"[",
"'keys'",
"]",
"if",
"k",
"not",
"in",
"skl",
"]",
"for",
"k",
"in",
"unused_keys",
":",
"del",
"self",
".",
"dct",
"[",
"'keys'",
"]",
"[",
"k",
"]"
] |
Scans through list of keyframes in the channel and removes those
which are not in self.key_frame_list.
|
[
"Scans",
"through",
"list",
"of",
"keyframes",
"in",
"the",
"channel",
"and",
"removes",
"those",
"which",
"are",
"not",
"in",
"self",
".",
"key_frame_list",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L270-L277
|
240,642
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
Channel.get_used_key_frames
|
def get_used_key_frames(self):
"""Returns a list of the keyframes used by this channel, sorted with
time. Each element in the list is a tuple. The first element is the
key_name and the second is the channel data at that keyframe."""
skl = self.key_frame_list.sorted_key_list()
# each element in used_key_frames is a tuple (key_name, key_dict)
used_key_frames = []
for kf in skl:
if kf in self.dct['keys']:
used_key_frames.append((kf, self.dct['keys'][kf]))
return used_key_frames
|
python
|
def get_used_key_frames(self):
"""Returns a list of the keyframes used by this channel, sorted with
time. Each element in the list is a tuple. The first element is the
key_name and the second is the channel data at that keyframe."""
skl = self.key_frame_list.sorted_key_list()
# each element in used_key_frames is a tuple (key_name, key_dict)
used_key_frames = []
for kf in skl:
if kf in self.dct['keys']:
used_key_frames.append((kf, self.dct['keys'][kf]))
return used_key_frames
|
[
"def",
"get_used_key_frames",
"(",
"self",
")",
":",
"skl",
"=",
"self",
".",
"key_frame_list",
".",
"sorted_key_list",
"(",
")",
"# each element in used_key_frames is a tuple (key_name, key_dict)",
"used_key_frames",
"=",
"[",
"]",
"for",
"kf",
"in",
"skl",
":",
"if",
"kf",
"in",
"self",
".",
"dct",
"[",
"'keys'",
"]",
":",
"used_key_frames",
".",
"append",
"(",
"(",
"kf",
",",
"self",
".",
"dct",
"[",
"'keys'",
"]",
"[",
"kf",
"]",
")",
")",
"return",
"used_key_frames"
] |
Returns a list of the keyframes used by this channel, sorted with
time. Each element in the list is a tuple. The first element is the
key_name and the second is the channel data at that keyframe.
|
[
"Returns",
"a",
"list",
"of",
"the",
"keyframes",
"used",
"by",
"this",
"channel",
"sorted",
"with",
"time",
".",
"Each",
"element",
"in",
"the",
"list",
"is",
"a",
"tuple",
".",
"The",
"first",
"element",
"is",
"the",
"key_name",
"and",
"the",
"second",
"is",
"the",
"channel",
"data",
"at",
"that",
"keyframe",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L284-L295
|
240,643
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
Channel.get_ramp_regions
|
def get_ramp_regions(self):
"""Returns a numpy array where each element corresponds to whether to
ramp in that region or jump."""
skl = self.key_frame_list.sorted_key_list()
ramp_or_jump = np.zeros(len(skl) - 1)
used_key_frames = self.get_used_key_frame_list()
for region_number, start_key in enumerate(skl[:-1]):
if start_key in used_key_frames:
key_data = self.dct['keys'][start_key]
ramp_type = key_data['ramp_type']
if ramp_type != "jump":
# this means that a ramp starts in this region. Figure
# out where it ends
curr_key_num = used_key_frames.index(start_key)
end_key_number = curr_key_num + 1
# figure out if the current key was the last key
if end_key_number < len(used_key_frames):
# if it wasnt, then find the end region
end_key_name = used_key_frames[end_key_number]
end_region_index = skl.index(end_key_name)
ramp_or_jump[region_number:end_region_index] = 1
return ramp_or_jump
|
python
|
def get_ramp_regions(self):
"""Returns a numpy array where each element corresponds to whether to
ramp in that region or jump."""
skl = self.key_frame_list.sorted_key_list()
ramp_or_jump = np.zeros(len(skl) - 1)
used_key_frames = self.get_used_key_frame_list()
for region_number, start_key in enumerate(skl[:-1]):
if start_key in used_key_frames:
key_data = self.dct['keys'][start_key]
ramp_type = key_data['ramp_type']
if ramp_type != "jump":
# this means that a ramp starts in this region. Figure
# out where it ends
curr_key_num = used_key_frames.index(start_key)
end_key_number = curr_key_num + 1
# figure out if the current key was the last key
if end_key_number < len(used_key_frames):
# if it wasnt, then find the end region
end_key_name = used_key_frames[end_key_number]
end_region_index = skl.index(end_key_name)
ramp_or_jump[region_number:end_region_index] = 1
return ramp_or_jump
|
[
"def",
"get_ramp_regions",
"(",
"self",
")",
":",
"skl",
"=",
"self",
".",
"key_frame_list",
".",
"sorted_key_list",
"(",
")",
"ramp_or_jump",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"skl",
")",
"-",
"1",
")",
"used_key_frames",
"=",
"self",
".",
"get_used_key_frame_list",
"(",
")",
"for",
"region_number",
",",
"start_key",
"in",
"enumerate",
"(",
"skl",
"[",
":",
"-",
"1",
"]",
")",
":",
"if",
"start_key",
"in",
"used_key_frames",
":",
"key_data",
"=",
"self",
".",
"dct",
"[",
"'keys'",
"]",
"[",
"start_key",
"]",
"ramp_type",
"=",
"key_data",
"[",
"'ramp_type'",
"]",
"if",
"ramp_type",
"!=",
"\"jump\"",
":",
"# this means that a ramp starts in this region. Figure",
"# out where it ends",
"curr_key_num",
"=",
"used_key_frames",
".",
"index",
"(",
"start_key",
")",
"end_key_number",
"=",
"curr_key_num",
"+",
"1",
"# figure out if the current key was the last key",
"if",
"end_key_number",
"<",
"len",
"(",
"used_key_frames",
")",
":",
"# if it wasnt, then find the end region",
"end_key_name",
"=",
"used_key_frames",
"[",
"end_key_number",
"]",
"end_region_index",
"=",
"skl",
".",
"index",
"(",
"end_key_name",
")",
"ramp_or_jump",
"[",
"region_number",
":",
"end_region_index",
"]",
"=",
"1",
"return",
"ramp_or_jump"
] |
Returns a numpy array where each element corresponds to whether to
ramp in that region or jump.
|
[
"Returns",
"a",
"numpy",
"array",
"where",
"each",
"element",
"corresponds",
"to",
"whether",
"to",
"ramp",
"in",
"that",
"region",
"or",
"jump",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L308-L329
|
240,644
|
shreyaspotnis/rampage
|
rampage/ramps.py
|
Channel.generate_ramp
|
def generate_ramp(self, time_div=4e-3):
"""Returns the generated ramp and a time array.
This function assumes a uniform time division throughout.
time_div - time resolution of the ramp.
"""
if self.dct['type'] == 'analog':
is_analog = True
else:
is_analog = False
skl = self.key_frame_list.sorted_key_list()
# each element in used_key_frames is a tuple (key_name, key_dict)
used_key_frames = self.get_used_key_frames()
max_time = self.key_frame_list.get_absolute_time(skl[-1]) + time_div
num_points = int(round(max_time/time_div))
time = np.arange(num_points) * time_div
# time = np.arange(0.0, max_time, time_div)
if is_analog:
voltage = np.zeros(time.shape, dtype=float)
else:
voltage = np.zeros(time.shape, dtype='uint32')
kf_times = np.array([self.key_frame_list.get_absolute_time(ukf[0])
for ukf in used_key_frames])
kf_positions = kf_times/time_div
if is_analog:
# set the start and the end part of the ramp
start_voltage = used_key_frames[0][1]['ramp_data']['value']
end_voltage = used_key_frames[-1][1]['ramp_data']['value']
voltage[0:kf_positions[0]] = start_voltage
voltage[kf_positions[-1]:] = end_voltage
else:
start_voltage = int(used_key_frames[0][1]['state'])
end_voltage = int(used_key_frames[-1][1]['state'])
voltage[0:kf_positions[0]] = start_voltage
voltage[kf_positions[-1]:] = end_voltage
for i in range(len(kf_times)-1):
start_time = kf_times[i]
end_time = kf_times[i+1]
start_index = kf_positions[i]
end_index = kf_positions[i+1]
time_subarray = time[start_index:end_index]
ramp_type = used_key_frames[i][1]['ramp_type']
ramp_data = used_key_frames[i][1]['ramp_data']
if is_analog:
value_final = used_key_frames[i+1][1]['ramp_data']['value']
else:
state = used_key_frames[i][1]['state']
if is_analog:
parms_tuple = (ramp_data, start_time, end_time, value_final,
time_subarray)
else:
parms_tuple = (ramp_data, start_time, end_time, state,
time_subarray)
if is_analog:
ramp_function = analog_ramp_functions[ramp_type]
else:
ramp_function = digital_ramp_functions[ramp_type]
voltage_sub = ramp_function(*parms_tuple)
voltage[start_index:end_index] = voltage_sub
# finally use the conversion and return the voltage
return time, self.convert_voltage(voltage, time)
|
python
|
def generate_ramp(self, time_div=4e-3):
"""Returns the generated ramp and a time array.
This function assumes a uniform time division throughout.
time_div - time resolution of the ramp.
"""
if self.dct['type'] == 'analog':
is_analog = True
else:
is_analog = False
skl = self.key_frame_list.sorted_key_list()
# each element in used_key_frames is a tuple (key_name, key_dict)
used_key_frames = self.get_used_key_frames()
max_time = self.key_frame_list.get_absolute_time(skl[-1]) + time_div
num_points = int(round(max_time/time_div))
time = np.arange(num_points) * time_div
# time = np.arange(0.0, max_time, time_div)
if is_analog:
voltage = np.zeros(time.shape, dtype=float)
else:
voltage = np.zeros(time.shape, dtype='uint32')
kf_times = np.array([self.key_frame_list.get_absolute_time(ukf[0])
for ukf in used_key_frames])
kf_positions = kf_times/time_div
if is_analog:
# set the start and the end part of the ramp
start_voltage = used_key_frames[0][1]['ramp_data']['value']
end_voltage = used_key_frames[-1][1]['ramp_data']['value']
voltage[0:kf_positions[0]] = start_voltage
voltage[kf_positions[-1]:] = end_voltage
else:
start_voltage = int(used_key_frames[0][1]['state'])
end_voltage = int(used_key_frames[-1][1]['state'])
voltage[0:kf_positions[0]] = start_voltage
voltage[kf_positions[-1]:] = end_voltage
for i in range(len(kf_times)-1):
start_time = kf_times[i]
end_time = kf_times[i+1]
start_index = kf_positions[i]
end_index = kf_positions[i+1]
time_subarray = time[start_index:end_index]
ramp_type = used_key_frames[i][1]['ramp_type']
ramp_data = used_key_frames[i][1]['ramp_data']
if is_analog:
value_final = used_key_frames[i+1][1]['ramp_data']['value']
else:
state = used_key_frames[i][1]['state']
if is_analog:
parms_tuple = (ramp_data, start_time, end_time, value_final,
time_subarray)
else:
parms_tuple = (ramp_data, start_time, end_time, state,
time_subarray)
if is_analog:
ramp_function = analog_ramp_functions[ramp_type]
else:
ramp_function = digital_ramp_functions[ramp_type]
voltage_sub = ramp_function(*parms_tuple)
voltage[start_index:end_index] = voltage_sub
# finally use the conversion and return the voltage
return time, self.convert_voltage(voltage, time)
|
[
"def",
"generate_ramp",
"(",
"self",
",",
"time_div",
"=",
"4e-3",
")",
":",
"if",
"self",
".",
"dct",
"[",
"'type'",
"]",
"==",
"'analog'",
":",
"is_analog",
"=",
"True",
"else",
":",
"is_analog",
"=",
"False",
"skl",
"=",
"self",
".",
"key_frame_list",
".",
"sorted_key_list",
"(",
")",
"# each element in used_key_frames is a tuple (key_name, key_dict)",
"used_key_frames",
"=",
"self",
".",
"get_used_key_frames",
"(",
")",
"max_time",
"=",
"self",
".",
"key_frame_list",
".",
"get_absolute_time",
"(",
"skl",
"[",
"-",
"1",
"]",
")",
"+",
"time_div",
"num_points",
"=",
"int",
"(",
"round",
"(",
"max_time",
"/",
"time_div",
")",
")",
"time",
"=",
"np",
".",
"arange",
"(",
"num_points",
")",
"*",
"time_div",
"# time = np.arange(0.0, max_time, time_div)",
"if",
"is_analog",
":",
"voltage",
"=",
"np",
".",
"zeros",
"(",
"time",
".",
"shape",
",",
"dtype",
"=",
"float",
")",
"else",
":",
"voltage",
"=",
"np",
".",
"zeros",
"(",
"time",
".",
"shape",
",",
"dtype",
"=",
"'uint32'",
")",
"kf_times",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"key_frame_list",
".",
"get_absolute_time",
"(",
"ukf",
"[",
"0",
"]",
")",
"for",
"ukf",
"in",
"used_key_frames",
"]",
")",
"kf_positions",
"=",
"kf_times",
"/",
"time_div",
"if",
"is_analog",
":",
"# set the start and the end part of the ramp",
"start_voltage",
"=",
"used_key_frames",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"'ramp_data'",
"]",
"[",
"'value'",
"]",
"end_voltage",
"=",
"used_key_frames",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"[",
"'ramp_data'",
"]",
"[",
"'value'",
"]",
"voltage",
"[",
"0",
":",
"kf_positions",
"[",
"0",
"]",
"]",
"=",
"start_voltage",
"voltage",
"[",
"kf_positions",
"[",
"-",
"1",
"]",
":",
"]",
"=",
"end_voltage",
"else",
":",
"start_voltage",
"=",
"int",
"(",
"used_key_frames",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"'state'",
"]",
")",
"end_voltage",
"=",
"int",
"(",
"used_key_frames",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"[",
"'state'",
"]",
")",
"voltage",
"[",
"0",
":",
"kf_positions",
"[",
"0",
"]",
"]",
"=",
"start_voltage",
"voltage",
"[",
"kf_positions",
"[",
"-",
"1",
"]",
":",
"]",
"=",
"end_voltage",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"kf_times",
")",
"-",
"1",
")",
":",
"start_time",
"=",
"kf_times",
"[",
"i",
"]",
"end_time",
"=",
"kf_times",
"[",
"i",
"+",
"1",
"]",
"start_index",
"=",
"kf_positions",
"[",
"i",
"]",
"end_index",
"=",
"kf_positions",
"[",
"i",
"+",
"1",
"]",
"time_subarray",
"=",
"time",
"[",
"start_index",
":",
"end_index",
"]",
"ramp_type",
"=",
"used_key_frames",
"[",
"i",
"]",
"[",
"1",
"]",
"[",
"'ramp_type'",
"]",
"ramp_data",
"=",
"used_key_frames",
"[",
"i",
"]",
"[",
"1",
"]",
"[",
"'ramp_data'",
"]",
"if",
"is_analog",
":",
"value_final",
"=",
"used_key_frames",
"[",
"i",
"+",
"1",
"]",
"[",
"1",
"]",
"[",
"'ramp_data'",
"]",
"[",
"'value'",
"]",
"else",
":",
"state",
"=",
"used_key_frames",
"[",
"i",
"]",
"[",
"1",
"]",
"[",
"'state'",
"]",
"if",
"is_analog",
":",
"parms_tuple",
"=",
"(",
"ramp_data",
",",
"start_time",
",",
"end_time",
",",
"value_final",
",",
"time_subarray",
")",
"else",
":",
"parms_tuple",
"=",
"(",
"ramp_data",
",",
"start_time",
",",
"end_time",
",",
"state",
",",
"time_subarray",
")",
"if",
"is_analog",
":",
"ramp_function",
"=",
"analog_ramp_functions",
"[",
"ramp_type",
"]",
"else",
":",
"ramp_function",
"=",
"digital_ramp_functions",
"[",
"ramp_type",
"]",
"voltage_sub",
"=",
"ramp_function",
"(",
"*",
"parms_tuple",
")",
"voltage",
"[",
"start_index",
":",
"end_index",
"]",
"=",
"voltage_sub",
"# finally use the conversion and return the voltage",
"return",
"time",
",",
"self",
".",
"convert_voltage",
"(",
"voltage",
",",
"time",
")"
] |
Returns the generated ramp and a time array.
This function assumes a uniform time division throughout.
time_div - time resolution of the ramp.
|
[
"Returns",
"the",
"generated",
"ramp",
"and",
"a",
"time",
"array",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L390-L457
|
240,645
|
PonteIneptique/collatinus-python
|
pycollatinus/lemme.py
|
Lemme.possible_forms
|
def possible_forms(self):
""" Generate a list of possible forms for the current lemma
:returns: List of possible forms for the current lemma
:rtype: [str]
"""
forms = []
for morph in self.modele().morphos():
for desinence in self.modele().desinences(morph):
radicaux = self.radical(desinence.numRad())
if isinstance(radicaux, Radical):
forms.append(radicaux.gr() + desinence.gr())
else:
for rad in radicaux:
forms.append(rad.gr() + desinence.gr())
return list(set(forms))
|
python
|
def possible_forms(self):
""" Generate a list of possible forms for the current lemma
:returns: List of possible forms for the current lemma
:rtype: [str]
"""
forms = []
for morph in self.modele().morphos():
for desinence in self.modele().desinences(morph):
radicaux = self.radical(desinence.numRad())
if isinstance(radicaux, Radical):
forms.append(radicaux.gr() + desinence.gr())
else:
for rad in radicaux:
forms.append(rad.gr() + desinence.gr())
return list(set(forms))
|
[
"def",
"possible_forms",
"(",
"self",
")",
":",
"forms",
"=",
"[",
"]",
"for",
"morph",
"in",
"self",
".",
"modele",
"(",
")",
".",
"morphos",
"(",
")",
":",
"for",
"desinence",
"in",
"self",
".",
"modele",
"(",
")",
".",
"desinences",
"(",
"morph",
")",
":",
"radicaux",
"=",
"self",
".",
"radical",
"(",
"desinence",
".",
"numRad",
"(",
")",
")",
"if",
"isinstance",
"(",
"radicaux",
",",
"Radical",
")",
":",
"forms",
".",
"append",
"(",
"radicaux",
".",
"gr",
"(",
")",
"+",
"desinence",
".",
"gr",
"(",
")",
")",
"else",
":",
"for",
"rad",
"in",
"radicaux",
":",
"forms",
".",
"append",
"(",
"rad",
".",
"gr",
"(",
")",
"+",
"desinence",
".",
"gr",
"(",
")",
")",
"return",
"list",
"(",
"set",
"(",
"forms",
")",
")"
] |
Generate a list of possible forms for the current lemma
:returns: List of possible forms for the current lemma
:rtype: [str]
|
[
"Generate",
"a",
"list",
"of",
"possible",
"forms",
"for",
"the",
"current",
"lemma"
] |
fca37b0b77bc60f47d3c24ab42f6d0bdca6ba0f5
|
https://github.com/PonteIneptique/collatinus-python/blob/fca37b0b77bc60f47d3c24ab42f6d0bdca6ba0f5/pycollatinus/lemme.py#L338-L353
|
240,646
|
JonLiuFYI/pkdx
|
pkdx/pkdx/scrape_moves.py
|
get_moves
|
def get_moves():
"""Visit Bulbapedia and pull names and descriptions from the table, 'list of moves.' Save as JSON."""
page = requests.get('http://bulbapedia.bulbagarden.net/wiki/List_of_moves')
soup = bs4.BeautifulSoup(page.text)
table = soup.table.table
tablerows = [tr for tr in table.children if tr != '\n'][1:]
moves = {}
for tr in tablerows:
cells = tr.find_all('td')
move_name = cells[1].get_text().strip(' \n*').replace(' ', '-').lower()
move_id = int(cells[0].get_text().strip())
move_type = cells[2].get_text().strip()
move_ps = cells[3].get_text().strip()
moves[move_name] = {'id':move_id, 'type':move_type, 'ps':move_ps}
srcpath = path.dirname(__file__)
with io.open(path.join(srcpath, 'moves.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(moves, ensure_ascii=False))
|
python
|
def get_moves():
"""Visit Bulbapedia and pull names and descriptions from the table, 'list of moves.' Save as JSON."""
page = requests.get('http://bulbapedia.bulbagarden.net/wiki/List_of_moves')
soup = bs4.BeautifulSoup(page.text)
table = soup.table.table
tablerows = [tr for tr in table.children if tr != '\n'][1:]
moves = {}
for tr in tablerows:
cells = tr.find_all('td')
move_name = cells[1].get_text().strip(' \n*').replace(' ', '-').lower()
move_id = int(cells[0].get_text().strip())
move_type = cells[2].get_text().strip()
move_ps = cells[3].get_text().strip()
moves[move_name] = {'id':move_id, 'type':move_type, 'ps':move_ps}
srcpath = path.dirname(__file__)
with io.open(path.join(srcpath, 'moves.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(moves, ensure_ascii=False))
|
[
"def",
"get_moves",
"(",
")",
":",
"page",
"=",
"requests",
".",
"get",
"(",
"'http://bulbapedia.bulbagarden.net/wiki/List_of_moves'",
")",
"soup",
"=",
"bs4",
".",
"BeautifulSoup",
"(",
"page",
".",
"text",
")",
"table",
"=",
"soup",
".",
"table",
".",
"table",
"tablerows",
"=",
"[",
"tr",
"for",
"tr",
"in",
"table",
".",
"children",
"if",
"tr",
"!=",
"'\\n'",
"]",
"[",
"1",
":",
"]",
"moves",
"=",
"{",
"}",
"for",
"tr",
"in",
"tablerows",
":",
"cells",
"=",
"tr",
".",
"find_all",
"(",
"'td'",
")",
"move_name",
"=",
"cells",
"[",
"1",
"]",
".",
"get_text",
"(",
")",
".",
"strip",
"(",
"' \\n*'",
")",
".",
"replace",
"(",
"' '",
",",
"'-'",
")",
".",
"lower",
"(",
")",
"move_id",
"=",
"int",
"(",
"cells",
"[",
"0",
"]",
".",
"get_text",
"(",
")",
".",
"strip",
"(",
")",
")",
"move_type",
"=",
"cells",
"[",
"2",
"]",
".",
"get_text",
"(",
")",
".",
"strip",
"(",
")",
"move_ps",
"=",
"cells",
"[",
"3",
"]",
".",
"get_text",
"(",
")",
".",
"strip",
"(",
")",
"moves",
"[",
"move_name",
"]",
"=",
"{",
"'id'",
":",
"move_id",
",",
"'type'",
":",
"move_type",
",",
"'ps'",
":",
"move_ps",
"}",
"srcpath",
"=",
"path",
".",
"dirname",
"(",
"__file__",
")",
"with",
"io",
".",
"open",
"(",
"path",
".",
"join",
"(",
"srcpath",
",",
"'moves.json'",
")",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"moves",
",",
"ensure_ascii",
"=",
"False",
")",
")"
] |
Visit Bulbapedia and pull names and descriptions from the table, 'list of moves.' Save as JSON.
|
[
"Visit",
"Bulbapedia",
"and",
"pull",
"names",
"and",
"descriptions",
"from",
"the",
"table",
"list",
"of",
"moves",
".",
"Save",
"as",
"JSON",
"."
] |
269e9814df074e0df25972fad04539a644d73a3c
|
https://github.com/JonLiuFYI/pkdx/blob/269e9814df074e0df25972fad04539a644d73a3c/pkdx/pkdx/scrape_moves.py#L12-L31
|
240,647
|
robertchase/ergaleia
|
ergaleia/load_from_path.py
|
load_from_path
|
def load_from_path(path, filetype=None, has_filetype=True):
""" load file content from a file specified as dot-separated
The file is located according to logic in normalize_path,
and the contents are returned. (See Note 1)
Parameters: (see normalize_path)
path - dot-separated path
filetype - optional filetype
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. If path is a file-like object, then data is read directly
from path, without trying to open it.
2. Non-string paths are returned immediately (excluding the
case in Note 1).
3. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
"""
if not isinstance(path, str):
try:
return path.read()
except AttributeError:
return path
path = normalize_path(path, filetype, has_filetype)
with open(path) as data:
return data.read()
|
python
|
def load_from_path(path, filetype=None, has_filetype=True):
""" load file content from a file specified as dot-separated
The file is located according to logic in normalize_path,
and the contents are returned. (See Note 1)
Parameters: (see normalize_path)
path - dot-separated path
filetype - optional filetype
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. If path is a file-like object, then data is read directly
from path, without trying to open it.
2. Non-string paths are returned immediately (excluding the
case in Note 1).
3. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
"""
if not isinstance(path, str):
try:
return path.read()
except AttributeError:
return path
path = normalize_path(path, filetype, has_filetype)
with open(path) as data:
return data.read()
|
[
"def",
"load_from_path",
"(",
"path",
",",
"filetype",
"=",
"None",
",",
"has_filetype",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"path",
",",
"str",
")",
":",
"try",
":",
"return",
"path",
".",
"read",
"(",
")",
"except",
"AttributeError",
":",
"return",
"path",
"path",
"=",
"normalize_path",
"(",
"path",
",",
"filetype",
",",
"has_filetype",
")",
"with",
"open",
"(",
"path",
")",
"as",
"data",
":",
"return",
"data",
".",
"read",
"(",
")"
] |
load file content from a file specified as dot-separated
The file is located according to logic in normalize_path,
and the contents are returned. (See Note 1)
Parameters: (see normalize_path)
path - dot-separated path
filetype - optional filetype
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. If path is a file-like object, then data is read directly
from path, without trying to open it.
2. Non-string paths are returned immediately (excluding the
case in Note 1).
3. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
|
[
"load",
"file",
"content",
"from",
"a",
"file",
"specified",
"as",
"dot",
"-",
"separated"
] |
df8e9a4b18c563022a503faa27e822c9a5755490
|
https://github.com/robertchase/ergaleia/blob/df8e9a4b18c563022a503faa27e822c9a5755490/ergaleia/load_from_path.py#L9-L36
|
240,648
|
robertchase/ergaleia
|
ergaleia/load_from_path.py
|
load_lines_from_path
|
def load_lines_from_path(path, filetype=None, has_filetype=True):
""" load lines from a file specified as dot-separated
The file is located according to logic in normalize_path,
and a list of lines is returned. (See Note 1)
Parameters: (see normalize_path)
path - dot-separated path
filetype - optional filetype
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. If path is a file-like object, then lines are read directly
from path, without trying to open it.
2. Non-string paths are returned immediately (excluding the
case in Note 1).
3. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
"""
if not isinstance(path, str):
try:
return path.readlines()
except AttributeError:
return path
path = normalize_path(path, filetype)
with open(path) as data:
return data.readlines()
|
python
|
def load_lines_from_path(path, filetype=None, has_filetype=True):
""" load lines from a file specified as dot-separated
The file is located according to logic in normalize_path,
and a list of lines is returned. (See Note 1)
Parameters: (see normalize_path)
path - dot-separated path
filetype - optional filetype
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. If path is a file-like object, then lines are read directly
from path, without trying to open it.
2. Non-string paths are returned immediately (excluding the
case in Note 1).
3. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
"""
if not isinstance(path, str):
try:
return path.readlines()
except AttributeError:
return path
path = normalize_path(path, filetype)
with open(path) as data:
return data.readlines()
|
[
"def",
"load_lines_from_path",
"(",
"path",
",",
"filetype",
"=",
"None",
",",
"has_filetype",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"path",
",",
"str",
")",
":",
"try",
":",
"return",
"path",
".",
"readlines",
"(",
")",
"except",
"AttributeError",
":",
"return",
"path",
"path",
"=",
"normalize_path",
"(",
"path",
",",
"filetype",
")",
"with",
"open",
"(",
"path",
")",
"as",
"data",
":",
"return",
"data",
".",
"readlines",
"(",
")"
] |
load lines from a file specified as dot-separated
The file is located according to logic in normalize_path,
and a list of lines is returned. (See Note 1)
Parameters: (see normalize_path)
path - dot-separated path
filetype - optional filetype
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. If path is a file-like object, then lines are read directly
from path, without trying to open it.
2. Non-string paths are returned immediately (excluding the
case in Note 1).
3. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
|
[
"load",
"lines",
"from",
"a",
"file",
"specified",
"as",
"dot",
"-",
"separated"
] |
df8e9a4b18c563022a503faa27e822c9a5755490
|
https://github.com/robertchase/ergaleia/blob/df8e9a4b18c563022a503faa27e822c9a5755490/ergaleia/load_from_path.py#L39-L66
|
240,649
|
iamFIREcracker/aadbook
|
aadbook/contacts.py
|
Cache.save
|
def save(self):
"""Pickle the addressbook and a timestamp
"""
if self.contacts: # never write a empty addressbook
cache = {'contacts': self.contacts,
'aadbook_cache': CACHE_FORMAT_VERSION}
pickle.dump(cache, open(self._config.cache_filename, 'wb'))
|
python
|
def save(self):
"""Pickle the addressbook and a timestamp
"""
if self.contacts: # never write a empty addressbook
cache = {'contacts': self.contacts,
'aadbook_cache': CACHE_FORMAT_VERSION}
pickle.dump(cache, open(self._config.cache_filename, 'wb'))
|
[
"def",
"save",
"(",
"self",
")",
":",
"if",
"self",
".",
"contacts",
":",
"# never write a empty addressbook",
"cache",
"=",
"{",
"'contacts'",
":",
"self",
".",
"contacts",
",",
"'aadbook_cache'",
":",
"CACHE_FORMAT_VERSION",
"}",
"pickle",
".",
"dump",
"(",
"cache",
",",
"open",
"(",
"self",
".",
"_config",
".",
"cache_filename",
",",
"'wb'",
")",
")"
] |
Pickle the addressbook and a timestamp
|
[
"Pickle",
"the",
"addressbook",
"and",
"a",
"timestamp"
] |
d191e9d36a2309449ab91c1728eaf5901b7ef91c
|
https://github.com/iamFIREcracker/aadbook/blob/d191e9d36a2309449ab91c1728eaf5901b7ef91c/aadbook/contacts.py#L52-L59
|
240,650
|
ArabellaTech/aa-intercom
|
aa_intercom/mixins.py
|
IntercomUserMixin.get_intercom_data
|
def get_intercom_data(self):
"""Specify the user data sent to Intercom API"""
return {
"user_id": self.intercom_id,
"email": self.email,
"name": self.get_full_name(),
"last_request_at": self.last_login.strftime("%s") if self.last_login else "",
"created_at": self.date_joined.strftime("%s"),
"custom_attributes": {
"is_admin": self.is_superuser
}
}
|
python
|
def get_intercom_data(self):
"""Specify the user data sent to Intercom API"""
return {
"user_id": self.intercom_id,
"email": self.email,
"name": self.get_full_name(),
"last_request_at": self.last_login.strftime("%s") if self.last_login else "",
"created_at": self.date_joined.strftime("%s"),
"custom_attributes": {
"is_admin": self.is_superuser
}
}
|
[
"def",
"get_intercom_data",
"(",
"self",
")",
":",
"return",
"{",
"\"user_id\"",
":",
"self",
".",
"intercom_id",
",",
"\"email\"",
":",
"self",
".",
"email",
",",
"\"name\"",
":",
"self",
".",
"get_full_name",
"(",
")",
",",
"\"last_request_at\"",
":",
"self",
".",
"last_login",
".",
"strftime",
"(",
"\"%s\"",
")",
"if",
"self",
".",
"last_login",
"else",
"\"\"",
",",
"\"created_at\"",
":",
"self",
".",
"date_joined",
".",
"strftime",
"(",
"\"%s\"",
")",
",",
"\"custom_attributes\"",
":",
"{",
"\"is_admin\"",
":",
"self",
".",
"is_superuser",
"}",
"}"
] |
Specify the user data sent to Intercom API
|
[
"Specify",
"the",
"user",
"data",
"sent",
"to",
"Intercom",
"API"
] |
f7e2ab63967529660f9c2fe4f1d0bf3cec1502c2
|
https://github.com/ArabellaTech/aa-intercom/blob/f7e2ab63967529660f9c2fe4f1d0bf3cec1502c2/aa_intercom/mixins.py#L17-L28
|
240,651
|
noobermin/lspreader
|
lspreader/lspreader.py
|
get_list
|
def get_list(file,fmt):
'''makes a list out of the fmt from the LspOutput f using the format
i for int
f for float
d for double
s for string'''
out=[]
for i in fmt:
if i == 'i':
out.append(get_int(file));
elif i == 'f' or i == 'd':
out.append(get_float(file));
elif i == 's':
out.append(get_str(file));
else:
raise ValueError("Unexpected flag '{}'".format(i));
return out;
|
python
|
def get_list(file,fmt):
'''makes a list out of the fmt from the LspOutput f using the format
i for int
f for float
d for double
s for string'''
out=[]
for i in fmt:
if i == 'i':
out.append(get_int(file));
elif i == 'f' or i == 'd':
out.append(get_float(file));
elif i == 's':
out.append(get_str(file));
else:
raise ValueError("Unexpected flag '{}'".format(i));
return out;
|
[
"def",
"get_list",
"(",
"file",
",",
"fmt",
")",
":",
"out",
"=",
"[",
"]",
"for",
"i",
"in",
"fmt",
":",
"if",
"i",
"==",
"'i'",
":",
"out",
".",
"append",
"(",
"get_int",
"(",
"file",
")",
")",
"elif",
"i",
"==",
"'f'",
"or",
"i",
"==",
"'d'",
":",
"out",
".",
"append",
"(",
"get_float",
"(",
"file",
")",
")",
"elif",
"i",
"==",
"'s'",
":",
"out",
".",
"append",
"(",
"get_str",
"(",
"file",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unexpected flag '{}'\"",
".",
"format",
"(",
"i",
")",
")",
"return",
"out"
] |
makes a list out of the fmt from the LspOutput f using the format
i for int
f for float
d for double
s for string
|
[
"makes",
"a",
"list",
"out",
"of",
"the",
"fmt",
"from",
"the",
"LspOutput",
"f",
"using",
"the",
"format",
"i",
"for",
"int",
"f",
"for",
"float",
"d",
"for",
"double",
"s",
"for",
"string"
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/lspreader.py#L43-L59
|
240,652
|
noobermin/lspreader
|
lspreader/lspreader.py
|
flds_firstsort
|
def flds_firstsort(d):
'''
Perform a lexsort and return the sort indices and shape as a tuple.
'''
shape = [ len( np.unique(d[l]) )
for l in ['xs', 'ys', 'zs'] ];
si = np.lexsort((d['z'],d['y'],d['x']));
return si,shape;
|
python
|
def flds_firstsort(d):
'''
Perform a lexsort and return the sort indices and shape as a tuple.
'''
shape = [ len( np.unique(d[l]) )
for l in ['xs', 'ys', 'zs'] ];
si = np.lexsort((d['z'],d['y'],d['x']));
return si,shape;
|
[
"def",
"flds_firstsort",
"(",
"d",
")",
":",
"shape",
"=",
"[",
"len",
"(",
"np",
".",
"unique",
"(",
"d",
"[",
"l",
"]",
")",
")",
"for",
"l",
"in",
"[",
"'xs'",
",",
"'ys'",
",",
"'zs'",
"]",
"]",
"si",
"=",
"np",
".",
"lexsort",
"(",
"(",
"d",
"[",
"'z'",
"]",
",",
"d",
"[",
"'y'",
"]",
",",
"d",
"[",
"'x'",
"]",
")",
")",
"return",
"si",
",",
"shape"
] |
Perform a lexsort and return the sort indices and shape as a tuple.
|
[
"Perform",
"a",
"lexsort",
"and",
"return",
"the",
"sort",
"indices",
"and",
"shape",
"as",
"a",
"tuple",
"."
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/lspreader.py#L155-L162
|
240,653
|
noobermin/lspreader
|
lspreader/lspreader.py
|
flds_sort
|
def flds_sort(d,s):
'''
Sort based on position. Sort with s as a tuple of the sort
indices and shape from first sort.
Parameters:
-----------
d -- the flds/sclr data
s -- (si, shape) sorting and shaping data from firstsort.
'''
labels = [ key for key in d.keys()
if key not in ['t', 'xs', 'ys', 'zs', 'fd', 'sd'] ];
si,shape = s;
for l in labels:
d[l] = d[l][si].reshape(shape);
d[l] = np.squeeze(d[l]);
return d;
|
python
|
def flds_sort(d,s):
'''
Sort based on position. Sort with s as a tuple of the sort
indices and shape from first sort.
Parameters:
-----------
d -- the flds/sclr data
s -- (si, shape) sorting and shaping data from firstsort.
'''
labels = [ key for key in d.keys()
if key not in ['t', 'xs', 'ys', 'zs', 'fd', 'sd'] ];
si,shape = s;
for l in labels:
d[l] = d[l][si].reshape(shape);
d[l] = np.squeeze(d[l]);
return d;
|
[
"def",
"flds_sort",
"(",
"d",
",",
"s",
")",
":",
"labels",
"=",
"[",
"key",
"for",
"key",
"in",
"d",
".",
"keys",
"(",
")",
"if",
"key",
"not",
"in",
"[",
"'t'",
",",
"'xs'",
",",
"'ys'",
",",
"'zs'",
",",
"'fd'",
",",
"'sd'",
"]",
"]",
"si",
",",
"shape",
"=",
"s",
"for",
"l",
"in",
"labels",
":",
"d",
"[",
"l",
"]",
"=",
"d",
"[",
"l",
"]",
"[",
"si",
"]",
".",
"reshape",
"(",
"shape",
")",
"d",
"[",
"l",
"]",
"=",
"np",
".",
"squeeze",
"(",
"d",
"[",
"l",
"]",
")",
"return",
"d"
] |
Sort based on position. Sort with s as a tuple of the sort
indices and shape from first sort.
Parameters:
-----------
d -- the flds/sclr data
s -- (si, shape) sorting and shaping data from firstsort.
|
[
"Sort",
"based",
"on",
"position",
".",
"Sort",
"with",
"s",
"as",
"a",
"tuple",
"of",
"the",
"sort",
"indices",
"and",
"shape",
"from",
"first",
"sort",
"."
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/lspreader.py#L163-L180
|
240,654
|
noobermin/lspreader
|
lspreader/lspreader.py
|
read
|
def read(fname,**kw):
'''
Reads an lsp output file and returns a raw dump of data,
sectioned into quantities either as an dictionary or a typed numpy array.
Parameters:
-----------
fname -- filename of thing to read
Keyword Arguments:
------------------
vprint -- Verbose printer. Used in scripts
override -- (type, start) => A tuple of a dump type and a place to start
in the passed file, useful to attempting to read semicorrupted
files.
gzip -- Read as a gzip file.
flds/sclr Specific Arguments:
-----------------------------
var -- list of quantities to be read. For fields, this can consist
of strings that include vector components, e.g., 'Ex'. If
None (default), read all quantities.
keep_edges -- If set to truthy, then don't remove the edges from domains before
concatenation and don't reshape the flds data.
sort -- If not None, sort using these indices, useful for avoiding
resorting. If True and not an ndarray, just sort.
first_sort -- If truthy, sort, and return the sort data for future flds
that should have the same shape.
keep_xs -- Keep the xs's, that is, the grid information. Usually redundant
with x,y,z returned.
return_array -- If set to truthy, then try to return a numpy array with a dtype.
Requires of course that the quantities have the same shape.
'''
if test(kw,'gzip') and kw['gzip'] == 'guess':
kw['gzip'] = re.search(r'\.gz$', fname) is not None;
openf = gzip.open if test(kw, 'gzip') else open;
with openf(fname,'rb') as file:
if test(kw,'override'):
dump, start = kw['override'];
file.seek(start);
header = {'dump_type': dump};
if not test(kw, 'var') and 2 <= header['dump_type'] <= 3 :
raise ValueError(
"If you want to force to read as a scalar, you need to supply the quantities"
);
else:
header = get_header(file);
vprint = kw['vprint'] if test(kw, 'vprint') else lambda s: None;
if 2 <= header['dump_type'] <= 3 :
if not test(kw, 'var'):
var=[i[0] for i in header['quantities']];
else:
var=kw['var'];
keep_edges = test(kw, 'keep_edges');
first_sort = test(kw, 'first_sort');
if test(kw,'sort'):
sort = kw['sort']
else:
sort = None;
keep_xs = test(kw, 'keep_xs');
return_array = test(kw, 'return_array');
readers = {
1: lambda: read_particles(file, header),
2: lambda: read_flds(
file,header,var,vprint,
keep_edges=keep_edges,
first_sort=first_sort,
sort=sort,
keep_xs=keep_xs,
return_array=return_array),
3: lambda: read_flds(
file,header,var, vprint,
keep_edges=keep_edges,
first_sort=first_sort,
sort=sort,
keep_xs=keep_xs,
return_array=return_array,
vector=False),
6: lambda: read_movie(file, header),
10:lambda: read_pext(file,header)
};
try:
d = readers[header['dump_type']]();
except KeyError:
raise NotImplementedError("Other file types not implemented yet!");
return d;
|
python
|
def read(fname,**kw):
'''
Reads an lsp output file and returns a raw dump of data,
sectioned into quantities either as an dictionary or a typed numpy array.
Parameters:
-----------
fname -- filename of thing to read
Keyword Arguments:
------------------
vprint -- Verbose printer. Used in scripts
override -- (type, start) => A tuple of a dump type and a place to start
in the passed file, useful to attempting to read semicorrupted
files.
gzip -- Read as a gzip file.
flds/sclr Specific Arguments:
-----------------------------
var -- list of quantities to be read. For fields, this can consist
of strings that include vector components, e.g., 'Ex'. If
None (default), read all quantities.
keep_edges -- If set to truthy, then don't remove the edges from domains before
concatenation and don't reshape the flds data.
sort -- If not None, sort using these indices, useful for avoiding
resorting. If True and not an ndarray, just sort.
first_sort -- If truthy, sort, and return the sort data for future flds
that should have the same shape.
keep_xs -- Keep the xs's, that is, the grid information. Usually redundant
with x,y,z returned.
return_array -- If set to truthy, then try to return a numpy array with a dtype.
Requires of course that the quantities have the same shape.
'''
if test(kw,'gzip') and kw['gzip'] == 'guess':
kw['gzip'] = re.search(r'\.gz$', fname) is not None;
openf = gzip.open if test(kw, 'gzip') else open;
with openf(fname,'rb') as file:
if test(kw,'override'):
dump, start = kw['override'];
file.seek(start);
header = {'dump_type': dump};
if not test(kw, 'var') and 2 <= header['dump_type'] <= 3 :
raise ValueError(
"If you want to force to read as a scalar, you need to supply the quantities"
);
else:
header = get_header(file);
vprint = kw['vprint'] if test(kw, 'vprint') else lambda s: None;
if 2 <= header['dump_type'] <= 3 :
if not test(kw, 'var'):
var=[i[0] for i in header['quantities']];
else:
var=kw['var'];
keep_edges = test(kw, 'keep_edges');
first_sort = test(kw, 'first_sort');
if test(kw,'sort'):
sort = kw['sort']
else:
sort = None;
keep_xs = test(kw, 'keep_xs');
return_array = test(kw, 'return_array');
readers = {
1: lambda: read_particles(file, header),
2: lambda: read_flds(
file,header,var,vprint,
keep_edges=keep_edges,
first_sort=first_sort,
sort=sort,
keep_xs=keep_xs,
return_array=return_array),
3: lambda: read_flds(
file,header,var, vprint,
keep_edges=keep_edges,
first_sort=first_sort,
sort=sort,
keep_xs=keep_xs,
return_array=return_array,
vector=False),
6: lambda: read_movie(file, header),
10:lambda: read_pext(file,header)
};
try:
d = readers[header['dump_type']]();
except KeyError:
raise NotImplementedError("Other file types not implemented yet!");
return d;
|
[
"def",
"read",
"(",
"fname",
",",
"*",
"*",
"kw",
")",
":",
"if",
"test",
"(",
"kw",
",",
"'gzip'",
")",
"and",
"kw",
"[",
"'gzip'",
"]",
"==",
"'guess'",
":",
"kw",
"[",
"'gzip'",
"]",
"=",
"re",
".",
"search",
"(",
"r'\\.gz$'",
",",
"fname",
")",
"is",
"not",
"None",
"openf",
"=",
"gzip",
".",
"open",
"if",
"test",
"(",
"kw",
",",
"'gzip'",
")",
"else",
"open",
"with",
"openf",
"(",
"fname",
",",
"'rb'",
")",
"as",
"file",
":",
"if",
"test",
"(",
"kw",
",",
"'override'",
")",
":",
"dump",
",",
"start",
"=",
"kw",
"[",
"'override'",
"]",
"file",
".",
"seek",
"(",
"start",
")",
"header",
"=",
"{",
"'dump_type'",
":",
"dump",
"}",
"if",
"not",
"test",
"(",
"kw",
",",
"'var'",
")",
"and",
"2",
"<=",
"header",
"[",
"'dump_type'",
"]",
"<=",
"3",
":",
"raise",
"ValueError",
"(",
"\"If you want to force to read as a scalar, you need to supply the quantities\"",
")",
"else",
":",
"header",
"=",
"get_header",
"(",
"file",
")",
"vprint",
"=",
"kw",
"[",
"'vprint'",
"]",
"if",
"test",
"(",
"kw",
",",
"'vprint'",
")",
"else",
"lambda",
"s",
":",
"None",
"if",
"2",
"<=",
"header",
"[",
"'dump_type'",
"]",
"<=",
"3",
":",
"if",
"not",
"test",
"(",
"kw",
",",
"'var'",
")",
":",
"var",
"=",
"[",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"header",
"[",
"'quantities'",
"]",
"]",
"else",
":",
"var",
"=",
"kw",
"[",
"'var'",
"]",
"keep_edges",
"=",
"test",
"(",
"kw",
",",
"'keep_edges'",
")",
"first_sort",
"=",
"test",
"(",
"kw",
",",
"'first_sort'",
")",
"if",
"test",
"(",
"kw",
",",
"'sort'",
")",
":",
"sort",
"=",
"kw",
"[",
"'sort'",
"]",
"else",
":",
"sort",
"=",
"None",
"keep_xs",
"=",
"test",
"(",
"kw",
",",
"'keep_xs'",
")",
"return_array",
"=",
"test",
"(",
"kw",
",",
"'return_array'",
")",
"readers",
"=",
"{",
"1",
":",
"lambda",
":",
"read_particles",
"(",
"file",
",",
"header",
")",
",",
"2",
":",
"lambda",
":",
"read_flds",
"(",
"file",
",",
"header",
",",
"var",
",",
"vprint",
",",
"keep_edges",
"=",
"keep_edges",
",",
"first_sort",
"=",
"first_sort",
",",
"sort",
"=",
"sort",
",",
"keep_xs",
"=",
"keep_xs",
",",
"return_array",
"=",
"return_array",
")",
",",
"3",
":",
"lambda",
":",
"read_flds",
"(",
"file",
",",
"header",
",",
"var",
",",
"vprint",
",",
"keep_edges",
"=",
"keep_edges",
",",
"first_sort",
"=",
"first_sort",
",",
"sort",
"=",
"sort",
",",
"keep_xs",
"=",
"keep_xs",
",",
"return_array",
"=",
"return_array",
",",
"vector",
"=",
"False",
")",
",",
"6",
":",
"lambda",
":",
"read_movie",
"(",
"file",
",",
"header",
")",
",",
"10",
":",
"lambda",
":",
"read_pext",
"(",
"file",
",",
"header",
")",
"}",
"try",
":",
"d",
"=",
"readers",
"[",
"header",
"[",
"'dump_type'",
"]",
"]",
"(",
")",
"except",
"KeyError",
":",
"raise",
"NotImplementedError",
"(",
"\"Other file types not implemented yet!\"",
")",
"return",
"d"
] |
Reads an lsp output file and returns a raw dump of data,
sectioned into quantities either as an dictionary or a typed numpy array.
Parameters:
-----------
fname -- filename of thing to read
Keyword Arguments:
------------------
vprint -- Verbose printer. Used in scripts
override -- (type, start) => A tuple of a dump type and a place to start
in the passed file, useful to attempting to read semicorrupted
files.
gzip -- Read as a gzip file.
flds/sclr Specific Arguments:
-----------------------------
var -- list of quantities to be read. For fields, this can consist
of strings that include vector components, e.g., 'Ex'. If
None (default), read all quantities.
keep_edges -- If set to truthy, then don't remove the edges from domains before
concatenation and don't reshape the flds data.
sort -- If not None, sort using these indices, useful for avoiding
resorting. If True and not an ndarray, just sort.
first_sort -- If truthy, sort, and return the sort data for future flds
that should have the same shape.
keep_xs -- Keep the xs's, that is, the grid information. Usually redundant
with x,y,z returned.
return_array -- If set to truthy, then try to return a numpy array with a dtype.
Requires of course that the quantities have the same shape.
|
[
"Reads",
"an",
"lsp",
"output",
"file",
"and",
"returns",
"a",
"raw",
"dump",
"of",
"data",
"sectioned",
"into",
"quantities",
"either",
"as",
"an",
"dictionary",
"or",
"a",
"typed",
"numpy",
"array",
"."
] |
903b9d6427513b07986ffacf76cbca54e18d8be6
|
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/lspreader.py#L319-L407
|
240,655
|
decryptus/httpdis
|
httpdis/httpdis.py
|
sigterm_handler
|
def sigterm_handler(signum, stack_frame):
"""
Just tell the server to exit.
WARNING: There are race conditions, for example with TimeoutSocket.accept.
We don't care: the user can just rekill the process after like 1 sec. if
the first kill did not work.
"""
# pylint: disable-msg=W0613
global _KILLED
for name, cmd in _COMMANDS.iteritems():
if cmd.at_stop:
LOG.info("at_stop: %r", name)
cmd.at_stop()
_KILLED = True
if _HTTP_SERVER:
_HTTP_SERVER.kill()
_HTTP_SERVER.server_close()
|
python
|
def sigterm_handler(signum, stack_frame):
"""
Just tell the server to exit.
WARNING: There are race conditions, for example with TimeoutSocket.accept.
We don't care: the user can just rekill the process after like 1 sec. if
the first kill did not work.
"""
# pylint: disable-msg=W0613
global _KILLED
for name, cmd in _COMMANDS.iteritems():
if cmd.at_stop:
LOG.info("at_stop: %r", name)
cmd.at_stop()
_KILLED = True
if _HTTP_SERVER:
_HTTP_SERVER.kill()
_HTTP_SERVER.server_close()
|
[
"def",
"sigterm_handler",
"(",
"signum",
",",
"stack_frame",
")",
":",
"# pylint: disable-msg=W0613",
"global",
"_KILLED",
"for",
"name",
",",
"cmd",
"in",
"_COMMANDS",
".",
"iteritems",
"(",
")",
":",
"if",
"cmd",
".",
"at_stop",
":",
"LOG",
".",
"info",
"(",
"\"at_stop: %r\"",
",",
"name",
")",
"cmd",
".",
"at_stop",
"(",
")",
"_KILLED",
"=",
"True",
"if",
"_HTTP_SERVER",
":",
"_HTTP_SERVER",
".",
"kill",
"(",
")",
"_HTTP_SERVER",
".",
"server_close",
"(",
")"
] |
Just tell the server to exit.
WARNING: There are race conditions, for example with TimeoutSocket.accept.
We don't care: the user can just rekill the process after like 1 sec. if
the first kill did not work.
|
[
"Just",
"tell",
"the",
"server",
"to",
"exit",
"."
] |
5d198cdc5558f416634602689b3df2c8aeb34984
|
https://github.com/decryptus/httpdis/blob/5d198cdc5558f416634602689b3df2c8aeb34984/httpdis/httpdis.py#L1114-L1134
|
240,656
|
decryptus/httpdis
|
httpdis/httpdis.py
|
run
|
def run(options, http_req_handler = HttpReqHandler):
"""
Start and execute the server
"""
# pylint: disable-msg=W0613
global _HTTP_SERVER
for x in ('server_version', 'sys_version'):
if _OPTIONS.get(x) is not None:
setattr(http_req_handler, x, _OPTIONS[x])
_HTTP_SERVER = threading_tcp_server.KillableThreadingHTTPServer(
_OPTIONS,
(_OPTIONS['listen_addr'], _OPTIONS['listen_port']),
http_req_handler,
name = "httpdis")
for name, cmd in _COMMANDS.iteritems():
if cmd.at_start:
LOG.info("at_start: %r", name)
cmd.at_start(options)
LOG.info("will now serve")
while not _KILLED:
try:
_HTTP_SERVER.serve_until_killed()
except (socket.error, select.error), why:
if errno.EINTR == why[0]:
LOG.debug("interrupted system call")
elif errno.EBADF == why[0] and _KILLED:
LOG.debug("server close")
else:
raise
LOG.info("exiting")
|
python
|
def run(options, http_req_handler = HttpReqHandler):
"""
Start and execute the server
"""
# pylint: disable-msg=W0613
global _HTTP_SERVER
for x in ('server_version', 'sys_version'):
if _OPTIONS.get(x) is not None:
setattr(http_req_handler, x, _OPTIONS[x])
_HTTP_SERVER = threading_tcp_server.KillableThreadingHTTPServer(
_OPTIONS,
(_OPTIONS['listen_addr'], _OPTIONS['listen_port']),
http_req_handler,
name = "httpdis")
for name, cmd in _COMMANDS.iteritems():
if cmd.at_start:
LOG.info("at_start: %r", name)
cmd.at_start(options)
LOG.info("will now serve")
while not _KILLED:
try:
_HTTP_SERVER.serve_until_killed()
except (socket.error, select.error), why:
if errno.EINTR == why[0]:
LOG.debug("interrupted system call")
elif errno.EBADF == why[0] and _KILLED:
LOG.debug("server close")
else:
raise
LOG.info("exiting")
|
[
"def",
"run",
"(",
"options",
",",
"http_req_handler",
"=",
"HttpReqHandler",
")",
":",
"# pylint: disable-msg=W0613",
"global",
"_HTTP_SERVER",
"for",
"x",
"in",
"(",
"'server_version'",
",",
"'sys_version'",
")",
":",
"if",
"_OPTIONS",
".",
"get",
"(",
"x",
")",
"is",
"not",
"None",
":",
"setattr",
"(",
"http_req_handler",
",",
"x",
",",
"_OPTIONS",
"[",
"x",
"]",
")",
"_HTTP_SERVER",
"=",
"threading_tcp_server",
".",
"KillableThreadingHTTPServer",
"(",
"_OPTIONS",
",",
"(",
"_OPTIONS",
"[",
"'listen_addr'",
"]",
",",
"_OPTIONS",
"[",
"'listen_port'",
"]",
")",
",",
"http_req_handler",
",",
"name",
"=",
"\"httpdis\"",
")",
"for",
"name",
",",
"cmd",
"in",
"_COMMANDS",
".",
"iteritems",
"(",
")",
":",
"if",
"cmd",
".",
"at_start",
":",
"LOG",
".",
"info",
"(",
"\"at_start: %r\"",
",",
"name",
")",
"cmd",
".",
"at_start",
"(",
"options",
")",
"LOG",
".",
"info",
"(",
"\"will now serve\"",
")",
"while",
"not",
"_KILLED",
":",
"try",
":",
"_HTTP_SERVER",
".",
"serve_until_killed",
"(",
")",
"except",
"(",
"socket",
".",
"error",
",",
"select",
".",
"error",
")",
",",
"why",
":",
"if",
"errno",
".",
"EINTR",
"==",
"why",
"[",
"0",
"]",
":",
"LOG",
".",
"debug",
"(",
"\"interrupted system call\"",
")",
"elif",
"errno",
".",
"EBADF",
"==",
"why",
"[",
"0",
"]",
"and",
"_KILLED",
":",
"LOG",
".",
"debug",
"(",
"\"server close\"",
")",
"else",
":",
"raise",
"LOG",
".",
"info",
"(",
"\"exiting\"",
")"
] |
Start and execute the server
|
[
"Start",
"and",
"execute",
"the",
"server"
] |
5d198cdc5558f416634602689b3df2c8aeb34984
|
https://github.com/decryptus/httpdis/blob/5d198cdc5558f416634602689b3df2c8aeb34984/httpdis/httpdis.py#L1139-L1173
|
240,657
|
decryptus/httpdis
|
httpdis/httpdis.py
|
init
|
def init(options, use_sigterm_handler=True):
"""
Must be called just after registration, before anything else
"""
# pylint: disable-msg=W0613
global _AUTH, _OPTIONS
if isinstance(options, dict):
_OPTIONS = DEFAULT_OPTIONS.copy()
_OPTIONS.update(options)
else:
for optname, optvalue in DEFAULT_OPTIONS.iteritems():
if hasattr(options, optname):
_OPTIONS[optname] = getattr(options, optname)
else:
_OPTIONS[optname] = optvalue
if _OPTIONS['testmethods']:
def fortytwo(request):
"test GET method"
return 42
def ping(request):
"test POST method"
return request.payload_params()
register(fortytwo, 'GET')
register(ping, 'POST')
if _OPTIONS['auth_basic_file']:
_AUTH = HttpAuthentication(_OPTIONS['auth_basic_file'],
realm = _OPTIONS['auth_basic']).parse_file()
for name, cmd in _COMMANDS.iteritems():
if cmd.safe_init:
LOG.info("safe_init: %r", name)
cmd.safe_init(_OPTIONS)
if use_sigterm_handler:
# signal.signal(signal.SIGHUP, lambda *x: None) # XXX
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
|
python
|
def init(options, use_sigterm_handler=True):
"""
Must be called just after registration, before anything else
"""
# pylint: disable-msg=W0613
global _AUTH, _OPTIONS
if isinstance(options, dict):
_OPTIONS = DEFAULT_OPTIONS.copy()
_OPTIONS.update(options)
else:
for optname, optvalue in DEFAULT_OPTIONS.iteritems():
if hasattr(options, optname):
_OPTIONS[optname] = getattr(options, optname)
else:
_OPTIONS[optname] = optvalue
if _OPTIONS['testmethods']:
def fortytwo(request):
"test GET method"
return 42
def ping(request):
"test POST method"
return request.payload_params()
register(fortytwo, 'GET')
register(ping, 'POST')
if _OPTIONS['auth_basic_file']:
_AUTH = HttpAuthentication(_OPTIONS['auth_basic_file'],
realm = _OPTIONS['auth_basic']).parse_file()
for name, cmd in _COMMANDS.iteritems():
if cmd.safe_init:
LOG.info("safe_init: %r", name)
cmd.safe_init(_OPTIONS)
if use_sigterm_handler:
# signal.signal(signal.SIGHUP, lambda *x: None) # XXX
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
|
[
"def",
"init",
"(",
"options",
",",
"use_sigterm_handler",
"=",
"True",
")",
":",
"# pylint: disable-msg=W0613",
"global",
"_AUTH",
",",
"_OPTIONS",
"if",
"isinstance",
"(",
"options",
",",
"dict",
")",
":",
"_OPTIONS",
"=",
"DEFAULT_OPTIONS",
".",
"copy",
"(",
")",
"_OPTIONS",
".",
"update",
"(",
"options",
")",
"else",
":",
"for",
"optname",
",",
"optvalue",
"in",
"DEFAULT_OPTIONS",
".",
"iteritems",
"(",
")",
":",
"if",
"hasattr",
"(",
"options",
",",
"optname",
")",
":",
"_OPTIONS",
"[",
"optname",
"]",
"=",
"getattr",
"(",
"options",
",",
"optname",
")",
"else",
":",
"_OPTIONS",
"[",
"optname",
"]",
"=",
"optvalue",
"if",
"_OPTIONS",
"[",
"'testmethods'",
"]",
":",
"def",
"fortytwo",
"(",
"request",
")",
":",
"\"test GET method\"",
"return",
"42",
"def",
"ping",
"(",
"request",
")",
":",
"\"test POST method\"",
"return",
"request",
".",
"payload_params",
"(",
")",
"register",
"(",
"fortytwo",
",",
"'GET'",
")",
"register",
"(",
"ping",
",",
"'POST'",
")",
"if",
"_OPTIONS",
"[",
"'auth_basic_file'",
"]",
":",
"_AUTH",
"=",
"HttpAuthentication",
"(",
"_OPTIONS",
"[",
"'auth_basic_file'",
"]",
",",
"realm",
"=",
"_OPTIONS",
"[",
"'auth_basic'",
"]",
")",
".",
"parse_file",
"(",
")",
"for",
"name",
",",
"cmd",
"in",
"_COMMANDS",
".",
"iteritems",
"(",
")",
":",
"if",
"cmd",
".",
"safe_init",
":",
"LOG",
".",
"info",
"(",
"\"safe_init: %r\"",
",",
"name",
")",
"cmd",
".",
"safe_init",
"(",
"_OPTIONS",
")",
"if",
"use_sigterm_handler",
":",
"# signal.signal(signal.SIGHUP, lambda *x: None) # XXX",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"sigterm_handler",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"sigterm_handler",
")"
] |
Must be called just after registration, before anything else
|
[
"Must",
"be",
"called",
"just",
"after",
"registration",
"before",
"anything",
"else"
] |
5d198cdc5558f416634602689b3df2c8aeb34984
|
https://github.com/decryptus/httpdis/blob/5d198cdc5558f416634602689b3df2c8aeb34984/httpdis/httpdis.py#L1178-L1217
|
240,658
|
decryptus/httpdis
|
httpdis/httpdis.py
|
HttpReqError.report
|
def report(self, req_handler):
"Send a response corresponding to this error to the client"
if self.exc:
req_handler.send_exception(self.code, self.exc, self.headers)
return
text = (self.text
or BaseHTTPRequestHandler.responses[self.code][1]
or "Unknown error")
getattr(req_handler, "send_error_%s" % self.ctype, 'send_error_msg')(self.code, text, self.headers)
|
python
|
def report(self, req_handler):
"Send a response corresponding to this error to the client"
if self.exc:
req_handler.send_exception(self.code, self.exc, self.headers)
return
text = (self.text
or BaseHTTPRequestHandler.responses[self.code][1]
or "Unknown error")
getattr(req_handler, "send_error_%s" % self.ctype, 'send_error_msg')(self.code, text, self.headers)
|
[
"def",
"report",
"(",
"self",
",",
"req_handler",
")",
":",
"if",
"self",
".",
"exc",
":",
"req_handler",
".",
"send_exception",
"(",
"self",
".",
"code",
",",
"self",
".",
"exc",
",",
"self",
".",
"headers",
")",
"return",
"text",
"=",
"(",
"self",
".",
"text",
"or",
"BaseHTTPRequestHandler",
".",
"responses",
"[",
"self",
".",
"code",
"]",
"[",
"1",
"]",
"or",
"\"Unknown error\"",
")",
"getattr",
"(",
"req_handler",
",",
"\"send_error_%s\"",
"%",
"self",
".",
"ctype",
",",
"'send_error_msg'",
")",
"(",
"self",
".",
"code",
",",
"text",
",",
"self",
".",
"headers",
")"
] |
Send a response corresponding to this error to the client
|
[
"Send",
"a",
"response",
"corresponding",
"to",
"this",
"error",
"to",
"the",
"client"
] |
5d198cdc5558f416634602689b3df2c8aeb34984
|
https://github.com/decryptus/httpdis/blob/5d198cdc5558f416634602689b3df2c8aeb34984/httpdis/httpdis.py#L224-L234
|
240,659
|
decryptus/httpdis
|
httpdis/httpdis.py
|
HttpReqHandler.send_error_explain
|
def send_error_explain(self, code, message=None, headers=None, content_type=None):
"do not use directly"
if headers is None:
headers = {}
if code in self.responses:
if message is None:
message = self.responses[code][0]
explain = self.responses[code][1]
else:
explain = ""
if message is None:
message = ""
if not isinstance(headers, dict):
headers = {}
if not content_type:
if self._cmd and self._cmd.content_type:
content_type = self._cmd.content_type
else:
content_type = self._DEFAULT_CONTENT_TYPE
if self._cmd and self._cmd.charset:
charset = self._cmd.charset
else:
charset = DEFAULT_CHARSET
headers['Content-type'] = "%s; charset=%s" % (content_type, charset)
data = self._mk_error_explain_data(code, message, explain)
self.end_response(self.build_response(code, data, headers))
|
python
|
def send_error_explain(self, code, message=None, headers=None, content_type=None):
"do not use directly"
if headers is None:
headers = {}
if code in self.responses:
if message is None:
message = self.responses[code][0]
explain = self.responses[code][1]
else:
explain = ""
if message is None:
message = ""
if not isinstance(headers, dict):
headers = {}
if not content_type:
if self._cmd and self._cmd.content_type:
content_type = self._cmd.content_type
else:
content_type = self._DEFAULT_CONTENT_TYPE
if self._cmd and self._cmd.charset:
charset = self._cmd.charset
else:
charset = DEFAULT_CHARSET
headers['Content-type'] = "%s; charset=%s" % (content_type, charset)
data = self._mk_error_explain_data(code, message, explain)
self.end_response(self.build_response(code, data, headers))
|
[
"def",
"send_error_explain",
"(",
"self",
",",
"code",
",",
"message",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"content_type",
"=",
"None",
")",
":",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"{",
"}",
"if",
"code",
"in",
"self",
".",
"responses",
":",
"if",
"message",
"is",
"None",
":",
"message",
"=",
"self",
".",
"responses",
"[",
"code",
"]",
"[",
"0",
"]",
"explain",
"=",
"self",
".",
"responses",
"[",
"code",
"]",
"[",
"1",
"]",
"else",
":",
"explain",
"=",
"\"\"",
"if",
"message",
"is",
"None",
":",
"message",
"=",
"\"\"",
"if",
"not",
"isinstance",
"(",
"headers",
",",
"dict",
")",
":",
"headers",
"=",
"{",
"}",
"if",
"not",
"content_type",
":",
"if",
"self",
".",
"_cmd",
"and",
"self",
".",
"_cmd",
".",
"content_type",
":",
"content_type",
"=",
"self",
".",
"_cmd",
".",
"content_type",
"else",
":",
"content_type",
"=",
"self",
".",
"_DEFAULT_CONTENT_TYPE",
"if",
"self",
".",
"_cmd",
"and",
"self",
".",
"_cmd",
".",
"charset",
":",
"charset",
"=",
"self",
".",
"_cmd",
".",
"charset",
"else",
":",
"charset",
"=",
"DEFAULT_CHARSET",
"headers",
"[",
"'Content-type'",
"]",
"=",
"\"%s; charset=%s\"",
"%",
"(",
"content_type",
",",
"charset",
")",
"data",
"=",
"self",
".",
"_mk_error_explain_data",
"(",
"code",
",",
"message",
",",
"explain",
")",
"self",
".",
"end_response",
"(",
"self",
".",
"build_response",
"(",
"code",
",",
"data",
",",
"headers",
")",
")"
] |
do not use directly
|
[
"do",
"not",
"use",
"directly"
] |
5d198cdc5558f416634602689b3df2c8aeb34984
|
https://github.com/decryptus/httpdis/blob/5d198cdc5558f416634602689b3df2c8aeb34984/httpdis/httpdis.py#L489-L523
|
240,660
|
decryptus/httpdis
|
httpdis/httpdis.py
|
HttpReqHandler.send_exception
|
def send_exception(self, code, exc_info=None, headers=None):
"send an error response including a backtrace to the client"
if headers is None:
headers = {}
if not exc_info:
exc_info = sys.exc_info()
self.send_error_msg(code,
traceback.format_exception(*exc_info),
headers)
|
python
|
def send_exception(self, code, exc_info=None, headers=None):
"send an error response including a backtrace to the client"
if headers is None:
headers = {}
if not exc_info:
exc_info = sys.exc_info()
self.send_error_msg(code,
traceback.format_exception(*exc_info),
headers)
|
[
"def",
"send_exception",
"(",
"self",
",",
"code",
",",
"exc_info",
"=",
"None",
",",
"headers",
"=",
"None",
")",
":",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"{",
"}",
"if",
"not",
"exc_info",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"self",
".",
"send_error_msg",
"(",
"code",
",",
"traceback",
".",
"format_exception",
"(",
"*",
"exc_info",
")",
",",
"headers",
")"
] |
send an error response including a backtrace to the client
|
[
"send",
"an",
"error",
"response",
"including",
"a",
"backtrace",
"to",
"the",
"client"
] |
5d198cdc5558f416634602689b3df2c8aeb34984
|
https://github.com/decryptus/httpdis/blob/5d198cdc5558f416634602689b3df2c8aeb34984/httpdis/httpdis.py#L540-L550
|
240,661
|
decryptus/httpdis
|
httpdis/httpdis.py
|
HttpReqHandler.send_error_json
|
def send_error_json(self, code, message, headers=None):
"send an error to the client. text message is formatted in a json stream"
if headers is None:
headers = {}
self.end_response(HttpResponseJson(code,
{'code': code,
'message': message},
headers))
|
python
|
def send_error_json(self, code, message, headers=None):
"send an error to the client. text message is formatted in a json stream"
if headers is None:
headers = {}
self.end_response(HttpResponseJson(code,
{'code': code,
'message': message},
headers))
|
[
"def",
"send_error_json",
"(",
"self",
",",
"code",
",",
"message",
",",
"headers",
"=",
"None",
")",
":",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"{",
"}",
"self",
".",
"end_response",
"(",
"HttpResponseJson",
"(",
"code",
",",
"{",
"'code'",
":",
"code",
",",
"'message'",
":",
"message",
"}",
",",
"headers",
")",
")"
] |
send an error to the client. text message is formatted in a json stream
|
[
"send",
"an",
"error",
"to",
"the",
"client",
".",
"text",
"message",
"is",
"formatted",
"in",
"a",
"json",
"stream"
] |
5d198cdc5558f416634602689b3df2c8aeb34984
|
https://github.com/decryptus/httpdis/blob/5d198cdc5558f416634602689b3df2c8aeb34984/httpdis/httpdis.py#L552-L560
|
240,662
|
decryptus/httpdis
|
httpdis/httpdis.py
|
HttpReqHandler.common_req
|
def common_req(self, execute, send_body=True):
"Common code for GET and POST requests"
self._SERVER = {'CLIENT_ADDR_HOST': self.client_address[0],
'CLIENT_ADDR_PORT': self.client_address[1]}
self._to_log = True
self._cmd = None
self._payload = None
self._path = None
self._payload_params = None
self._query_params = {}
self._fragment = None
(cmd, res, req) = (None, None, None)
try:
try:
path = self._pathify() # pylint: disable-msg=W0612
cmd = path[1:]
res = execute(cmd)
except HttpReqError, e:
e.report(self)
except Exception:
try:
self.send_exception(500) # XXX 500
except Exception: # pylint: disable-msg=W0703
pass
raise
else:
if not isinstance(res, HttpResponse):
req = self.build_response()
if send_body:
req.add_data(res)
req.set_send_body(send_body)
else:
req = res
self.end_response(req)
except socket.error, e:
if e.errno in (errno.ECONNRESET, errno.EPIPE):
return
LOG.exception("exception - cmd=%r - method=%r", cmd, self.command)
except Exception: # pylint: disable-msg=W0703
LOG.exception("exception - cmd=%r - method=%r", cmd, self.command)
finally:
del req, res
|
python
|
def common_req(self, execute, send_body=True):
"Common code for GET and POST requests"
self._SERVER = {'CLIENT_ADDR_HOST': self.client_address[0],
'CLIENT_ADDR_PORT': self.client_address[1]}
self._to_log = True
self._cmd = None
self._payload = None
self._path = None
self._payload_params = None
self._query_params = {}
self._fragment = None
(cmd, res, req) = (None, None, None)
try:
try:
path = self._pathify() # pylint: disable-msg=W0612
cmd = path[1:]
res = execute(cmd)
except HttpReqError, e:
e.report(self)
except Exception:
try:
self.send_exception(500) # XXX 500
except Exception: # pylint: disable-msg=W0703
pass
raise
else:
if not isinstance(res, HttpResponse):
req = self.build_response()
if send_body:
req.add_data(res)
req.set_send_body(send_body)
else:
req = res
self.end_response(req)
except socket.error, e:
if e.errno in (errno.ECONNRESET, errno.EPIPE):
return
LOG.exception("exception - cmd=%r - method=%r", cmd, self.command)
except Exception: # pylint: disable-msg=W0703
LOG.exception("exception - cmd=%r - method=%r", cmd, self.command)
finally:
del req, res
|
[
"def",
"common_req",
"(",
"self",
",",
"execute",
",",
"send_body",
"=",
"True",
")",
":",
"self",
".",
"_SERVER",
"=",
"{",
"'CLIENT_ADDR_HOST'",
":",
"self",
".",
"client_address",
"[",
"0",
"]",
",",
"'CLIENT_ADDR_PORT'",
":",
"self",
".",
"client_address",
"[",
"1",
"]",
"}",
"self",
".",
"_to_log",
"=",
"True",
"self",
".",
"_cmd",
"=",
"None",
"self",
".",
"_payload",
"=",
"None",
"self",
".",
"_path",
"=",
"None",
"self",
".",
"_payload_params",
"=",
"None",
"self",
".",
"_query_params",
"=",
"{",
"}",
"self",
".",
"_fragment",
"=",
"None",
"(",
"cmd",
",",
"res",
",",
"req",
")",
"=",
"(",
"None",
",",
"None",
",",
"None",
")",
"try",
":",
"try",
":",
"path",
"=",
"self",
".",
"_pathify",
"(",
")",
"# pylint: disable-msg=W0612",
"cmd",
"=",
"path",
"[",
"1",
":",
"]",
"res",
"=",
"execute",
"(",
"cmd",
")",
"except",
"HttpReqError",
",",
"e",
":",
"e",
".",
"report",
"(",
"self",
")",
"except",
"Exception",
":",
"try",
":",
"self",
".",
"send_exception",
"(",
"500",
")",
"# XXX 500",
"except",
"Exception",
":",
"# pylint: disable-msg=W0703",
"pass",
"raise",
"else",
":",
"if",
"not",
"isinstance",
"(",
"res",
",",
"HttpResponse",
")",
":",
"req",
"=",
"self",
".",
"build_response",
"(",
")",
"if",
"send_body",
":",
"req",
".",
"add_data",
"(",
"res",
")",
"req",
".",
"set_send_body",
"(",
"send_body",
")",
"else",
":",
"req",
"=",
"res",
"self",
".",
"end_response",
"(",
"req",
")",
"except",
"socket",
".",
"error",
",",
"e",
":",
"if",
"e",
".",
"errno",
"in",
"(",
"errno",
".",
"ECONNRESET",
",",
"errno",
".",
"EPIPE",
")",
":",
"return",
"LOG",
".",
"exception",
"(",
"\"exception - cmd=%r - method=%r\"",
",",
"cmd",
",",
"self",
".",
"command",
")",
"except",
"Exception",
":",
"# pylint: disable-msg=W0703",
"LOG",
".",
"exception",
"(",
"\"exception - cmd=%r - method=%r\"",
",",
"cmd",
",",
"self",
".",
"command",
")",
"finally",
":",
"del",
"req",
",",
"res"
] |
Common code for GET and POST requests
|
[
"Common",
"code",
"for",
"GET",
"and",
"POST",
"requests"
] |
5d198cdc5558f416634602689b3df2c8aeb34984
|
https://github.com/decryptus/httpdis/blob/5d198cdc5558f416634602689b3df2c8aeb34984/httpdis/httpdis.py#L936-L982
|
240,663
|
skitazaki/python-clitool
|
clitool/cli.py
|
base_parser
|
def base_parser():
""" Create arguments parser with basic options and no help message.
* -c, --config: load configuration file.
* -v, --verbose: increase logging verbosity. `-v`, `-vv`, and `-vvv`.
* -q, --quiet: quiet logging except critical level.
* -o, --output: output file. (default=sys.stdout)
* --basedir: base directory. (default=os.getcwd)
* --input-encoding: input data encoding. (default=utf-8)
* --output-encoding: output data encoding. (default=utf-8)
* --processes: count of processes.
* --chunksize: a number of chunks submitted to the process pool.
:rtype: :class:`argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-c", "--config", dest="config",
type=argparse.FileType('r'),
metavar="FILE",
help="configuration file")
parser.add_argument("-o", "--output", dest="output",
type=argparse.FileType('w'),
metavar="FILE",
default=sys.stdout,
help="output file")
parser.add_argument("--basedir", dest="basedir",
default=os.getcwd(),
help="base directory")
parser.add_argument("--input-encoding", dest="input_encoding",
default=DEFAULT_ENCODING,
help="encoding of input source")
parser.add_argument("--output-encoding", dest="output_encoding",
default=DEFAULT_ENCODING,
help="encoding of output distination")
parser.add_argument("--processes", dest="processes", type=int,
help="number of processes")
parser.add_argument("--chunksize", dest="chunksize", type=int,
default=1,
help="number of chunks submitted to the process pool")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", dest="verbose",
action="count", default=0,
help="increase logging verbosity")
group.add_argument("-q", "--quiet", dest="quiet",
default=False, action="store_true",
help="set logging to quiet mode")
return parser
|
python
|
def base_parser():
""" Create arguments parser with basic options and no help message.
* -c, --config: load configuration file.
* -v, --verbose: increase logging verbosity. `-v`, `-vv`, and `-vvv`.
* -q, --quiet: quiet logging except critical level.
* -o, --output: output file. (default=sys.stdout)
* --basedir: base directory. (default=os.getcwd)
* --input-encoding: input data encoding. (default=utf-8)
* --output-encoding: output data encoding. (default=utf-8)
* --processes: count of processes.
* --chunksize: a number of chunks submitted to the process pool.
:rtype: :class:`argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-c", "--config", dest="config",
type=argparse.FileType('r'),
metavar="FILE",
help="configuration file")
parser.add_argument("-o", "--output", dest="output",
type=argparse.FileType('w'),
metavar="FILE",
default=sys.stdout,
help="output file")
parser.add_argument("--basedir", dest="basedir",
default=os.getcwd(),
help="base directory")
parser.add_argument("--input-encoding", dest="input_encoding",
default=DEFAULT_ENCODING,
help="encoding of input source")
parser.add_argument("--output-encoding", dest="output_encoding",
default=DEFAULT_ENCODING,
help="encoding of output distination")
parser.add_argument("--processes", dest="processes", type=int,
help="number of processes")
parser.add_argument("--chunksize", dest="chunksize", type=int,
default=1,
help="number of chunks submitted to the process pool")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", dest="verbose",
action="count", default=0,
help="increase logging verbosity")
group.add_argument("-q", "--quiet", dest="quiet",
default=False, action="store_true",
help="set logging to quiet mode")
return parser
|
[
"def",
"base_parser",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"add_help",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"\"-c\"",
",",
"\"--config\"",
",",
"dest",
"=",
"\"config\"",
",",
"type",
"=",
"argparse",
".",
"FileType",
"(",
"'r'",
")",
",",
"metavar",
"=",
"\"FILE\"",
",",
"help",
"=",
"\"configuration file\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--output\"",
",",
"dest",
"=",
"\"output\"",
",",
"type",
"=",
"argparse",
".",
"FileType",
"(",
"'w'",
")",
",",
"metavar",
"=",
"\"FILE\"",
",",
"default",
"=",
"sys",
".",
"stdout",
",",
"help",
"=",
"\"output file\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--basedir\"",
",",
"dest",
"=",
"\"basedir\"",
",",
"default",
"=",
"os",
".",
"getcwd",
"(",
")",
",",
"help",
"=",
"\"base directory\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--input-encoding\"",
",",
"dest",
"=",
"\"input_encoding\"",
",",
"default",
"=",
"DEFAULT_ENCODING",
",",
"help",
"=",
"\"encoding of input source\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--output-encoding\"",
",",
"dest",
"=",
"\"output_encoding\"",
",",
"default",
"=",
"DEFAULT_ENCODING",
",",
"help",
"=",
"\"encoding of output distination\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--processes\"",
",",
"dest",
"=",
"\"processes\"",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"number of processes\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--chunksize\"",
",",
"dest",
"=",
"\"chunksize\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"\"number of chunks submitted to the process pool\"",
")",
"group",
"=",
"parser",
".",
"add_mutually_exclusive_group",
"(",
")",
"group",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--verbose\"",
",",
"dest",
"=",
"\"verbose\"",
",",
"action",
"=",
"\"count\"",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"increase logging verbosity\"",
")",
"group",
".",
"add_argument",
"(",
"\"-q\"",
",",
"\"--quiet\"",
",",
"dest",
"=",
"\"quiet\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"set logging to quiet mode\"",
")",
"return",
"parser"
] |
Create arguments parser with basic options and no help message.
* -c, --config: load configuration file.
* -v, --verbose: increase logging verbosity. `-v`, `-vv`, and `-vvv`.
* -q, --quiet: quiet logging except critical level.
* -o, --output: output file. (default=sys.stdout)
* --basedir: base directory. (default=os.getcwd)
* --input-encoding: input data encoding. (default=utf-8)
* --output-encoding: output data encoding. (default=utf-8)
* --processes: count of processes.
* --chunksize: a number of chunks submitted to the process pool.
:rtype: :class:`argparse.ArgumentParser`
|
[
"Create",
"arguments",
"parser",
"with",
"basic",
"options",
"and",
"no",
"help",
"message",
"."
] |
4971f8d093d51c6fd0e6cc536bbb597f78b570ab
|
https://github.com/skitazaki/python-clitool/blob/4971f8d093d51c6fd0e6cc536bbb597f78b570ab/clitool/cli.py#L25-L82
|
240,664
|
skitazaki/python-clitool
|
clitool/cli.py
|
cliconfig
|
def cliconfig(fp, env=None):
""" Load configuration data.
Given pointer is closed internally.
If ``None`` is given, force to exit.
More detailed information is available on underlying feature,
:mod:`clitool.config`.
:param fp: opened file pointer of configuration
:type fp: FileType
:param env: environment to load
:type env: str
:rtype: dict
"""
if fp is None:
raise SystemExit('No configuration file is given.')
from clitool.config import ConfigLoader
loader = ConfigLoader(fp)
cfg = loader.load(env)
if not fp.closed:
fp.close()
if not cfg:
logging.warn('Configuration may be empty.')
return cfg
|
python
|
def cliconfig(fp, env=None):
""" Load configuration data.
Given pointer is closed internally.
If ``None`` is given, force to exit.
More detailed information is available on underlying feature,
:mod:`clitool.config`.
:param fp: opened file pointer of configuration
:type fp: FileType
:param env: environment to load
:type env: str
:rtype: dict
"""
if fp is None:
raise SystemExit('No configuration file is given.')
from clitool.config import ConfigLoader
loader = ConfigLoader(fp)
cfg = loader.load(env)
if not fp.closed:
fp.close()
if not cfg:
logging.warn('Configuration may be empty.')
return cfg
|
[
"def",
"cliconfig",
"(",
"fp",
",",
"env",
"=",
"None",
")",
":",
"if",
"fp",
"is",
"None",
":",
"raise",
"SystemExit",
"(",
"'No configuration file is given.'",
")",
"from",
"clitool",
".",
"config",
"import",
"ConfigLoader",
"loader",
"=",
"ConfigLoader",
"(",
"fp",
")",
"cfg",
"=",
"loader",
".",
"load",
"(",
"env",
")",
"if",
"not",
"fp",
".",
"closed",
":",
"fp",
".",
"close",
"(",
")",
"if",
"not",
"cfg",
":",
"logging",
".",
"warn",
"(",
"'Configuration may be empty.'",
")",
"return",
"cfg"
] |
Load configuration data.
Given pointer is closed internally.
If ``None`` is given, force to exit.
More detailed information is available on underlying feature,
:mod:`clitool.config`.
:param fp: opened file pointer of configuration
:type fp: FileType
:param env: environment to load
:type env: str
:rtype: dict
|
[
"Load",
"configuration",
"data",
".",
"Given",
"pointer",
"is",
"closed",
"internally",
".",
"If",
"None",
"is",
"given",
"force",
"to",
"exit",
"."
] |
4971f8d093d51c6fd0e6cc536bbb597f78b570ab
|
https://github.com/skitazaki/python-clitool/blob/4971f8d093d51c6fd0e6cc536bbb597f78b570ab/clitool/cli.py#L172-L195
|
240,665
|
skitazaki/python-clitool
|
clitool/cli.py
|
clistream
|
def clistream(reporter, *args, **kwargs):
""" Handle stream data on command line interface,
and returns statistics of success, error, and total amount.
More detailed information is available on underlying feature,
:mod:`clitool.processor`.
:param Handler: [DEPRECATED] Handler for file-like streams.
(default: :class:`clitool.processor.CliHandler`)
:type Handler: object which supports `handle` method.
:param reporter: callback to report processed value
:type reporter: callable
:param delimiter: line delimiter [optional]
:type delimiter: string
:param args: functions to parse each item in the stream.
:param kwargs: keywords, including ``files`` and ``input_encoding``.
:rtype: list
"""
# Follow the rule of `parse_arguments()`
files = kwargs.get('files')
encoding = kwargs.get('input_encoding', DEFAULT_ENCODING)
processes = kwargs.get('processes')
chunksize = kwargs.get('chunksize')
from clitool.processor import CliHandler, Streamer
Handler = kwargs.get('Handler')
if Handler:
warnings.warn('"Handler" keyword will be removed from next release.',
DeprecationWarning)
else:
Handler = CliHandler
s = Streamer(reporter, processes=processes, *args)
handler = Handler(s, kwargs.get('delimiter'))
return handler.handle(files, encoding, chunksize)
|
python
|
def clistream(reporter, *args, **kwargs):
""" Handle stream data on command line interface,
and returns statistics of success, error, and total amount.
More detailed information is available on underlying feature,
:mod:`clitool.processor`.
:param Handler: [DEPRECATED] Handler for file-like streams.
(default: :class:`clitool.processor.CliHandler`)
:type Handler: object which supports `handle` method.
:param reporter: callback to report processed value
:type reporter: callable
:param delimiter: line delimiter [optional]
:type delimiter: string
:param args: functions to parse each item in the stream.
:param kwargs: keywords, including ``files`` and ``input_encoding``.
:rtype: list
"""
# Follow the rule of `parse_arguments()`
files = kwargs.get('files')
encoding = kwargs.get('input_encoding', DEFAULT_ENCODING)
processes = kwargs.get('processes')
chunksize = kwargs.get('chunksize')
from clitool.processor import CliHandler, Streamer
Handler = kwargs.get('Handler')
if Handler:
warnings.warn('"Handler" keyword will be removed from next release.',
DeprecationWarning)
else:
Handler = CliHandler
s = Streamer(reporter, processes=processes, *args)
handler = Handler(s, kwargs.get('delimiter'))
return handler.handle(files, encoding, chunksize)
|
[
"def",
"clistream",
"(",
"reporter",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Follow the rule of `parse_arguments()`",
"files",
"=",
"kwargs",
".",
"get",
"(",
"'files'",
")",
"encoding",
"=",
"kwargs",
".",
"get",
"(",
"'input_encoding'",
",",
"DEFAULT_ENCODING",
")",
"processes",
"=",
"kwargs",
".",
"get",
"(",
"'processes'",
")",
"chunksize",
"=",
"kwargs",
".",
"get",
"(",
"'chunksize'",
")",
"from",
"clitool",
".",
"processor",
"import",
"CliHandler",
",",
"Streamer",
"Handler",
"=",
"kwargs",
".",
"get",
"(",
"'Handler'",
")",
"if",
"Handler",
":",
"warnings",
".",
"warn",
"(",
"'\"Handler\" keyword will be removed from next release.'",
",",
"DeprecationWarning",
")",
"else",
":",
"Handler",
"=",
"CliHandler",
"s",
"=",
"Streamer",
"(",
"reporter",
",",
"processes",
"=",
"processes",
",",
"*",
"args",
")",
"handler",
"=",
"Handler",
"(",
"s",
",",
"kwargs",
".",
"get",
"(",
"'delimiter'",
")",
")",
"return",
"handler",
".",
"handle",
"(",
"files",
",",
"encoding",
",",
"chunksize",
")"
] |
Handle stream data on command line interface,
and returns statistics of success, error, and total amount.
More detailed information is available on underlying feature,
:mod:`clitool.processor`.
:param Handler: [DEPRECATED] Handler for file-like streams.
(default: :class:`clitool.processor.CliHandler`)
:type Handler: object which supports `handle` method.
:param reporter: callback to report processed value
:type reporter: callable
:param delimiter: line delimiter [optional]
:type delimiter: string
:param args: functions to parse each item in the stream.
:param kwargs: keywords, including ``files`` and ``input_encoding``.
:rtype: list
|
[
"Handle",
"stream",
"data",
"on",
"command",
"line",
"interface",
"and",
"returns",
"statistics",
"of",
"success",
"error",
"and",
"total",
"amount",
"."
] |
4971f8d093d51c6fd0e6cc536bbb597f78b570ab
|
https://github.com/skitazaki/python-clitool/blob/4971f8d093d51c6fd0e6cc536bbb597f78b570ab/clitool/cli.py#L198-L232
|
240,666
|
jlesquembre/jlle
|
jlle/releaser/pypi.py
|
SetupConfig.no_input
|
def no_input(self):
"""Return whether the user wants to run in no-input mode.
Enable this mode by adding a ``no-input`` option::
[zest.releaser]
no-input = yes
The default when this option has not been set is False.
Standard config rules apply, so you can use upper or lower or
mixed case and specify 0, false, no or off for boolean False,
and 1, on, true or yes for boolean True.
"""
default = False
if self.config is None:
return default
try:
result = self.config.getboolean('zest.releaser', 'no-input')
except (NoSectionError, NoOptionError, ValueError):
return default
return result
|
python
|
def no_input(self):
"""Return whether the user wants to run in no-input mode.
Enable this mode by adding a ``no-input`` option::
[zest.releaser]
no-input = yes
The default when this option has not been set is False.
Standard config rules apply, so you can use upper or lower or
mixed case and specify 0, false, no or off for boolean False,
and 1, on, true or yes for boolean True.
"""
default = False
if self.config is None:
return default
try:
result = self.config.getboolean('zest.releaser', 'no-input')
except (NoSectionError, NoOptionError, ValueError):
return default
return result
|
[
"def",
"no_input",
"(",
"self",
")",
":",
"default",
"=",
"False",
"if",
"self",
".",
"config",
"is",
"None",
":",
"return",
"default",
"try",
":",
"result",
"=",
"self",
".",
"config",
".",
"getboolean",
"(",
"'zest.releaser'",
",",
"'no-input'",
")",
"except",
"(",
"NoSectionError",
",",
"NoOptionError",
",",
"ValueError",
")",
":",
"return",
"default",
"return",
"result"
] |
Return whether the user wants to run in no-input mode.
Enable this mode by adding a ``no-input`` option::
[zest.releaser]
no-input = yes
The default when this option has not been set is False.
Standard config rules apply, so you can use upper or lower or
mixed case and specify 0, false, no or off for boolean False,
and 1, on, true or yes for boolean True.
|
[
"Return",
"whether",
"the",
"user",
"wants",
"to",
"run",
"in",
"no",
"-",
"input",
"mode",
"."
] |
3645d8f203708355853ef911f4b887ae4d794826
|
https://github.com/jlesquembre/jlle/blob/3645d8f203708355853ef911f4b887ae4d794826/jlle/releaser/pypi.py#L112-L133
|
240,667
|
jlesquembre/jlle
|
jlle/releaser/pypi.py
|
PypiConfig.distutils_servers
|
def distutils_servers(self):
"""Return a list of known distutils servers for collective.dist.
If the config has an old pypi config, remove the default pypi
server from the list.
"""
if not multiple_pypi_support():
return []
try:
raw_index_servers = self.config.get('distutils', 'index-servers')
except (NoSectionError, NoOptionError):
return []
ignore_servers = ['']
if self.is_old_pypi_config():
# We have already asked about uploading to pypi using the normal
# upload.
ignore_servers.append('pypi')
# Yes, you can even have an old pypi config with a
# [distutils] server list.
index_servers = [
server.strip() for server in raw_index_servers.split('\n')
if server.strip() not in ignore_servers]
return index_servers
|
python
|
def distutils_servers(self):
"""Return a list of known distutils servers for collective.dist.
If the config has an old pypi config, remove the default pypi
server from the list.
"""
if not multiple_pypi_support():
return []
try:
raw_index_servers = self.config.get('distutils', 'index-servers')
except (NoSectionError, NoOptionError):
return []
ignore_servers = ['']
if self.is_old_pypi_config():
# We have already asked about uploading to pypi using the normal
# upload.
ignore_servers.append('pypi')
# Yes, you can even have an old pypi config with a
# [distutils] server list.
index_servers = [
server.strip() for server in raw_index_servers.split('\n')
if server.strip() not in ignore_servers]
return index_servers
|
[
"def",
"distutils_servers",
"(",
"self",
")",
":",
"if",
"not",
"multiple_pypi_support",
"(",
")",
":",
"return",
"[",
"]",
"try",
":",
"raw_index_servers",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'distutils'",
",",
"'index-servers'",
")",
"except",
"(",
"NoSectionError",
",",
"NoOptionError",
")",
":",
"return",
"[",
"]",
"ignore_servers",
"=",
"[",
"''",
"]",
"if",
"self",
".",
"is_old_pypi_config",
"(",
")",
":",
"# We have already asked about uploading to pypi using the normal",
"# upload.",
"ignore_servers",
".",
"append",
"(",
"'pypi'",
")",
"# Yes, you can even have an old pypi config with a",
"# [distutils] server list.",
"index_servers",
"=",
"[",
"server",
".",
"strip",
"(",
")",
"for",
"server",
"in",
"raw_index_servers",
".",
"split",
"(",
"'\\n'",
")",
"if",
"server",
".",
"strip",
"(",
")",
"not",
"in",
"ignore_servers",
"]",
"return",
"index_servers"
] |
Return a list of known distutils servers for collective.dist.
If the config has an old pypi config, remove the default pypi
server from the list.
|
[
"Return",
"a",
"list",
"of",
"known",
"distutils",
"servers",
"for",
"collective",
".",
"dist",
"."
] |
3645d8f203708355853ef911f4b887ae4d794826
|
https://github.com/jlesquembre/jlle/blob/3645d8f203708355853ef911f4b887ae4d794826/jlle/releaser/pypi.py#L211-L233
|
240,668
|
jlesquembre/jlle
|
jlle/releaser/pypi.py
|
PypiConfig.want_release
|
def want_release(self):
"""Does the user normally want to release this package.
Some colleagues find it irritating to have to remember to
answer the question "Check out the tag (for tweaks or
pypi/distutils server upload)" with the non-default 'no' when
in 99 percent of the cases they just make a release specific
for a customer, so they always answer 'no' here. This is
where an extra config option comes in handy: you can influence
the default answer so you can just keep hitting 'Enter' until
zest.releaser is done.
Either in your ~/.pypirc or in a setup.cfg in a specific
package, add this when you want the default answer to this
question to be 'no':
[zest.releaser]
release = no
The default when this option has not been set is True.
Standard config rules apply, so you can use upper or lower or
mixed case and specify 0, false, no or off for boolean False,
and 1, on, true or yes for boolean True.
"""
default = True
if self.config is None:
return default
try:
result = self.config.getboolean('zest.releaser', 'release')
except (NoSectionError, NoOptionError, ValueError):
return default
return result
|
python
|
def want_release(self):
"""Does the user normally want to release this package.
Some colleagues find it irritating to have to remember to
answer the question "Check out the tag (for tweaks or
pypi/distutils server upload)" with the non-default 'no' when
in 99 percent of the cases they just make a release specific
for a customer, so they always answer 'no' here. This is
where an extra config option comes in handy: you can influence
the default answer so you can just keep hitting 'Enter' until
zest.releaser is done.
Either in your ~/.pypirc or in a setup.cfg in a specific
package, add this when you want the default answer to this
question to be 'no':
[zest.releaser]
release = no
The default when this option has not been set is True.
Standard config rules apply, so you can use upper or lower or
mixed case and specify 0, false, no or off for boolean False,
and 1, on, true or yes for boolean True.
"""
default = True
if self.config is None:
return default
try:
result = self.config.getboolean('zest.releaser', 'release')
except (NoSectionError, NoOptionError, ValueError):
return default
return result
|
[
"def",
"want_release",
"(",
"self",
")",
":",
"default",
"=",
"True",
"if",
"self",
".",
"config",
"is",
"None",
":",
"return",
"default",
"try",
":",
"result",
"=",
"self",
".",
"config",
".",
"getboolean",
"(",
"'zest.releaser'",
",",
"'release'",
")",
"except",
"(",
"NoSectionError",
",",
"NoOptionError",
",",
"ValueError",
")",
":",
"return",
"default",
"return",
"result"
] |
Does the user normally want to release this package.
Some colleagues find it irritating to have to remember to
answer the question "Check out the tag (for tweaks or
pypi/distutils server upload)" with the non-default 'no' when
in 99 percent of the cases they just make a release specific
for a customer, so they always answer 'no' here. This is
where an extra config option comes in handy: you can influence
the default answer so you can just keep hitting 'Enter' until
zest.releaser is done.
Either in your ~/.pypirc or in a setup.cfg in a specific
package, add this when you want the default answer to this
question to be 'no':
[zest.releaser]
release = no
The default when this option has not been set is True.
Standard config rules apply, so you can use upper or lower or
mixed case and specify 0, false, no or off for boolean False,
and 1, on, true or yes for boolean True.
|
[
"Does",
"the",
"user",
"normally",
"want",
"to",
"release",
"this",
"package",
"."
] |
3645d8f203708355853ef911f4b887ae4d794826
|
https://github.com/jlesquembre/jlle/blob/3645d8f203708355853ef911f4b887ae4d794826/jlle/releaser/pypi.py#L235-L267
|
240,669
|
olsoneric/pedemath
|
pedemath/matrix.py
|
transpose_mat44
|
def transpose_mat44(src_mat, transpose_mat=None):
"""Create a transpose of a matrix."""
if not transpose_mat:
transpose_mat = Matrix44()
for i in range(4):
for j in range(4):
transpose_mat.data[i][j] = src_mat.data[j][i]
return transpose_mat
|
python
|
def transpose_mat44(src_mat, transpose_mat=None):
"""Create a transpose of a matrix."""
if not transpose_mat:
transpose_mat = Matrix44()
for i in range(4):
for j in range(4):
transpose_mat.data[i][j] = src_mat.data[j][i]
return transpose_mat
|
[
"def",
"transpose_mat44",
"(",
"src_mat",
",",
"transpose_mat",
"=",
"None",
")",
":",
"if",
"not",
"transpose_mat",
":",
"transpose_mat",
"=",
"Matrix44",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"for",
"j",
"in",
"range",
"(",
"4",
")",
":",
"transpose_mat",
".",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"src_mat",
".",
"data",
"[",
"j",
"]",
"[",
"i",
"]",
"return",
"transpose_mat"
] |
Create a transpose of a matrix.
|
[
"Create",
"a",
"transpose",
"of",
"a",
"matrix",
"."
] |
4bffcfe7089e421d603eb0a9708b84789c2d16be
|
https://github.com/olsoneric/pedemath/blob/4bffcfe7089e421d603eb0a9708b84789c2d16be/pedemath/matrix.py#L42-L52
|
240,670
|
olsoneric/pedemath
|
pedemath/matrix.py
|
is_affine_mat44
|
def is_affine_mat44(mat):
"""Return True if only tranlsate, rotate, and uniform scale components."""
# Ensure scale is uniform
if not (mat.data[0][0] == mat.data[1][1] == mat.data[2][2]):
return False
# Ensure row [3] is 0, 0, 0, 1
return (mat.data[0][3] == 0 and
mat.data[1][3] == 0 and
mat.data[2][3] == 0 and
mat.data[3][3] == 1)
|
python
|
def is_affine_mat44(mat):
"""Return True if only tranlsate, rotate, and uniform scale components."""
# Ensure scale is uniform
if not (mat.data[0][0] == mat.data[1][1] == mat.data[2][2]):
return False
# Ensure row [3] is 0, 0, 0, 1
return (mat.data[0][3] == 0 and
mat.data[1][3] == 0 and
mat.data[2][3] == 0 and
mat.data[3][3] == 1)
|
[
"def",
"is_affine_mat44",
"(",
"mat",
")",
":",
"# Ensure scale is uniform",
"if",
"not",
"(",
"mat",
".",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"mat",
".",
"data",
"[",
"1",
"]",
"[",
"1",
"]",
"==",
"mat",
".",
"data",
"[",
"2",
"]",
"[",
"2",
"]",
")",
":",
"return",
"False",
"# Ensure row [3] is 0, 0, 0, 1",
"return",
"(",
"mat",
".",
"data",
"[",
"0",
"]",
"[",
"3",
"]",
"==",
"0",
"and",
"mat",
".",
"data",
"[",
"1",
"]",
"[",
"3",
"]",
"==",
"0",
"and",
"mat",
".",
"data",
"[",
"2",
"]",
"[",
"3",
"]",
"==",
"0",
"and",
"mat",
".",
"data",
"[",
"3",
"]",
"[",
"3",
"]",
"==",
"1",
")"
] |
Return True if only tranlsate, rotate, and uniform scale components.
|
[
"Return",
"True",
"if",
"only",
"tranlsate",
"rotate",
"and",
"uniform",
"scale",
"components",
"."
] |
4bffcfe7089e421d603eb0a9708b84789c2d16be
|
https://github.com/olsoneric/pedemath/blob/4bffcfe7089e421d603eb0a9708b84789c2d16be/pedemath/matrix.py#L55-L66
|
240,671
|
olsoneric/pedemath
|
pedemath/matrix.py
|
invert_affine_mat44
|
def invert_affine_mat44(mat):
"""Assumes there is only rotate, translate, and uniform scale componenets
to the matrix.
"""
inverted = Matrix44()
# Transpose the 3x3 rotation component
for i in range(3):
for j in range(3):
inverted.data[i][j] = mat.data[j][i]
# Set translation: inverted_trans_vec3 = -inv(rot_mat33) * trans_vec3
for row in range(3):
inverted.data[3][row] = (
-inverted.data[0][row] * mat.data[3][0] +
-inverted.data[1][row] * mat.data[3][1] +
-inverted.data[2][row] * mat.data[3][2])
return inverted
|
python
|
def invert_affine_mat44(mat):
"""Assumes there is only rotate, translate, and uniform scale componenets
to the matrix.
"""
inverted = Matrix44()
# Transpose the 3x3 rotation component
for i in range(3):
for j in range(3):
inverted.data[i][j] = mat.data[j][i]
# Set translation: inverted_trans_vec3 = -inv(rot_mat33) * trans_vec3
for row in range(3):
inverted.data[3][row] = (
-inverted.data[0][row] * mat.data[3][0] +
-inverted.data[1][row] * mat.data[3][1] +
-inverted.data[2][row] * mat.data[3][2])
return inverted
|
[
"def",
"invert_affine_mat44",
"(",
"mat",
")",
":",
"inverted",
"=",
"Matrix44",
"(",
")",
"# Transpose the 3x3 rotation component",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"for",
"j",
"in",
"range",
"(",
"3",
")",
":",
"inverted",
".",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"mat",
".",
"data",
"[",
"j",
"]",
"[",
"i",
"]",
"# Set translation: inverted_trans_vec3 = -inv(rot_mat33) * trans_vec3",
"for",
"row",
"in",
"range",
"(",
"3",
")",
":",
"inverted",
".",
"data",
"[",
"3",
"]",
"[",
"row",
"]",
"=",
"(",
"-",
"inverted",
".",
"data",
"[",
"0",
"]",
"[",
"row",
"]",
"*",
"mat",
".",
"data",
"[",
"3",
"]",
"[",
"0",
"]",
"+",
"-",
"inverted",
".",
"data",
"[",
"1",
"]",
"[",
"row",
"]",
"*",
"mat",
".",
"data",
"[",
"3",
"]",
"[",
"1",
"]",
"+",
"-",
"inverted",
".",
"data",
"[",
"2",
"]",
"[",
"row",
"]",
"*",
"mat",
".",
"data",
"[",
"3",
"]",
"[",
"2",
"]",
")",
"return",
"inverted"
] |
Assumes there is only rotate, translate, and uniform scale componenets
to the matrix.
|
[
"Assumes",
"there",
"is",
"only",
"rotate",
"translate",
"and",
"uniform",
"scale",
"componenets",
"to",
"the",
"matrix",
"."
] |
4bffcfe7089e421d603eb0a9708b84789c2d16be
|
https://github.com/olsoneric/pedemath/blob/4bffcfe7089e421d603eb0a9708b84789c2d16be/pedemath/matrix.py#L69-L88
|
240,672
|
olsoneric/pedemath
|
pedemath/matrix.py
|
Matrix44.almost_equal
|
def almost_equal(self, mat2, places=7):
"""Return True if the values in mat2 equal the values in this
matrix.
"""
if not hasattr(mat2, "data"):
return False
for i in range(4):
if not (_float_almost_equal(self.data[i][0],
mat2.data[i][0], places) and
_float_almost_equal(self.data[i][1],
mat2.data[i][1], places) and
_float_almost_equal(self.data[i][2],
mat2.data[i][2], places) and
_float_almost_equal(self.data[i][3],
mat2.data[i][3], places)):
return False
return True
|
python
|
def almost_equal(self, mat2, places=7):
"""Return True if the values in mat2 equal the values in this
matrix.
"""
if not hasattr(mat2, "data"):
return False
for i in range(4):
if not (_float_almost_equal(self.data[i][0],
mat2.data[i][0], places) and
_float_almost_equal(self.data[i][1],
mat2.data[i][1], places) and
_float_almost_equal(self.data[i][2],
mat2.data[i][2], places) and
_float_almost_equal(self.data[i][3],
mat2.data[i][3], places)):
return False
return True
|
[
"def",
"almost_equal",
"(",
"self",
",",
"mat2",
",",
"places",
"=",
"7",
")",
":",
"if",
"not",
"hasattr",
"(",
"mat2",
",",
"\"data\"",
")",
":",
"return",
"False",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"if",
"not",
"(",
"_float_almost_equal",
"(",
"self",
".",
"data",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"mat2",
".",
"data",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"places",
")",
"and",
"_float_almost_equal",
"(",
"self",
".",
"data",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"mat2",
".",
"data",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"places",
")",
"and",
"_float_almost_equal",
"(",
"self",
".",
"data",
"[",
"i",
"]",
"[",
"2",
"]",
",",
"mat2",
".",
"data",
"[",
"i",
"]",
"[",
"2",
"]",
",",
"places",
")",
"and",
"_float_almost_equal",
"(",
"self",
".",
"data",
"[",
"i",
"]",
"[",
"3",
"]",
",",
"mat2",
".",
"data",
"[",
"i",
"]",
"[",
"3",
"]",
",",
"places",
")",
")",
":",
"return",
"False",
"return",
"True"
] |
Return True if the values in mat2 equal the values in this
matrix.
|
[
"Return",
"True",
"if",
"the",
"values",
"in",
"mat2",
"equal",
"the",
"values",
"in",
"this",
"matrix",
"."
] |
4bffcfe7089e421d603eb0a9708b84789c2d16be
|
https://github.com/olsoneric/pedemath/blob/4bffcfe7089e421d603eb0a9708b84789c2d16be/pedemath/matrix.py#L217-L236
|
240,673
|
olsoneric/pedemath
|
pedemath/matrix.py
|
Matrix44.rot_from_vectors
|
def rot_from_vectors(start_vec, end_vec):
"""Return the rotation matrix to rotate from one vector to another."""
dot = start_vec.dot(end_vec)
# TODO: check if dot is a valid number
angle = math.acos(dot)
# TODO: check if angle is a valid number
cross = start_vec.cross(end_vec)
cross.normalize
rot_matrix = Matrix44.from_axis_angle(cross, angle)
# TODO: catch exception and return identity for invalid numbers
return rot_matrix
|
python
|
def rot_from_vectors(start_vec, end_vec):
"""Return the rotation matrix to rotate from one vector to another."""
dot = start_vec.dot(end_vec)
# TODO: check if dot is a valid number
angle = math.acos(dot)
# TODO: check if angle is a valid number
cross = start_vec.cross(end_vec)
cross.normalize
rot_matrix = Matrix44.from_axis_angle(cross, angle)
# TODO: catch exception and return identity for invalid numbers
return rot_matrix
|
[
"def",
"rot_from_vectors",
"(",
"start_vec",
",",
"end_vec",
")",
":",
"dot",
"=",
"start_vec",
".",
"dot",
"(",
"end_vec",
")",
"# TODO: check if dot is a valid number",
"angle",
"=",
"math",
".",
"acos",
"(",
"dot",
")",
"# TODO: check if angle is a valid number",
"cross",
"=",
"start_vec",
".",
"cross",
"(",
"end_vec",
")",
"cross",
".",
"normalize",
"rot_matrix",
"=",
"Matrix44",
".",
"from_axis_angle",
"(",
"cross",
",",
"angle",
")",
"# TODO: catch exception and return identity for invalid numbers",
"return",
"rot_matrix"
] |
Return the rotation matrix to rotate from one vector to another.
|
[
"Return",
"the",
"rotation",
"matrix",
"to",
"rotate",
"from",
"one",
"vector",
"to",
"another",
"."
] |
4bffcfe7089e421d603eb0a9708b84789c2d16be
|
https://github.com/olsoneric/pedemath/blob/4bffcfe7089e421d603eb0a9708b84789c2d16be/pedemath/matrix.py#L299-L311
|
240,674
|
olsoneric/pedemath
|
pedemath/matrix.py
|
Matrix44.get_trans
|
def get_trans(self, out_vec=None):
"""Return the translation portion of the matrix as a vector.
If out_vec is provided, store in out_vec instead of creating a new Vec3.
"""
if out_vec:
return out_vec.set(*self.data[3][:3])
return Vec3(*self.data[3][:3])
|
python
|
def get_trans(self, out_vec=None):
"""Return the translation portion of the matrix as a vector.
If out_vec is provided, store in out_vec instead of creating a new Vec3.
"""
if out_vec:
return out_vec.set(*self.data[3][:3])
return Vec3(*self.data[3][:3])
|
[
"def",
"get_trans",
"(",
"self",
",",
"out_vec",
"=",
"None",
")",
":",
"if",
"out_vec",
":",
"return",
"out_vec",
".",
"set",
"(",
"*",
"self",
".",
"data",
"[",
"3",
"]",
"[",
":",
"3",
"]",
")",
"return",
"Vec3",
"(",
"*",
"self",
".",
"data",
"[",
"3",
"]",
"[",
":",
"3",
"]",
")"
] |
Return the translation portion of the matrix as a vector.
If out_vec is provided, store in out_vec instead of creating a new Vec3.
|
[
"Return",
"the",
"translation",
"portion",
"of",
"the",
"matrix",
"as",
"a",
"vector",
"."
] |
4bffcfe7089e421d603eb0a9708b84789c2d16be
|
https://github.com/olsoneric/pedemath/blob/4bffcfe7089e421d603eb0a9708b84789c2d16be/pedemath/matrix.py#L322-L331
|
240,675
|
olsoneric/pedemath
|
pedemath/matrix.py
|
Matrix44.set_trans
|
def set_trans(self, trans_vec):
"""Set the translation components of the matrix."""
# Column major, translation components in column 3.
self.data[3][0] = trans_vec[0]
self.data[3][1] = trans_vec[1]
self.data[3][2] = trans_vec[2]
|
python
|
def set_trans(self, trans_vec):
"""Set the translation components of the matrix."""
# Column major, translation components in column 3.
self.data[3][0] = trans_vec[0]
self.data[3][1] = trans_vec[1]
self.data[3][2] = trans_vec[2]
|
[
"def",
"set_trans",
"(",
"self",
",",
"trans_vec",
")",
":",
"# Column major, translation components in column 3.",
"self",
".",
"data",
"[",
"3",
"]",
"[",
"0",
"]",
"=",
"trans_vec",
"[",
"0",
"]",
"self",
".",
"data",
"[",
"3",
"]",
"[",
"1",
"]",
"=",
"trans_vec",
"[",
"1",
"]",
"self",
".",
"data",
"[",
"3",
"]",
"[",
"2",
"]",
"=",
"trans_vec",
"[",
"2",
"]"
] |
Set the translation components of the matrix.
|
[
"Set",
"the",
"translation",
"components",
"of",
"the",
"matrix",
"."
] |
4bffcfe7089e421d603eb0a9708b84789c2d16be
|
https://github.com/olsoneric/pedemath/blob/4bffcfe7089e421d603eb0a9708b84789c2d16be/pedemath/matrix.py#L333-L339
|
240,676
|
kumar303/desub
|
desub/desub.py
|
Desub.is_running
|
def is_running(self):
"""
True if the subprocess is running.
If it's a zombie then we call
:func:`desub.Desub.stop` to kill it with fire and return False.
"""
pp = self.pid
if pp:
try:
proc = psutil.Process(pp)
# Possible status:
# "STATUS_RUNNING", "STATUS_IDLE",
# "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
# "STATUS_STOPPED", "STATUS_TRACING_STOP",
# "STATUS_ZOMBIE", "STATUS_DEAD",
# "STATUS_WAKING", "STATUS_LOCKED",
if proc.status in (psutil.STATUS_STOPPED,
psutil.STATUS_DEAD,
psutil.STATUS_ZOMBIE):
# The PID is still in the process table so call stop to
# remove the PID.
self.stop()
return False
else:
# OK, it's running.
return True
except psutil.NoSuchProcess:
pass
return False
|
python
|
def is_running(self):
"""
True if the subprocess is running.
If it's a zombie then we call
:func:`desub.Desub.stop` to kill it with fire and return False.
"""
pp = self.pid
if pp:
try:
proc = psutil.Process(pp)
# Possible status:
# "STATUS_RUNNING", "STATUS_IDLE",
# "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
# "STATUS_STOPPED", "STATUS_TRACING_STOP",
# "STATUS_ZOMBIE", "STATUS_DEAD",
# "STATUS_WAKING", "STATUS_LOCKED",
if proc.status in (psutil.STATUS_STOPPED,
psutil.STATUS_DEAD,
psutil.STATUS_ZOMBIE):
# The PID is still in the process table so call stop to
# remove the PID.
self.stop()
return False
else:
# OK, it's running.
return True
except psutil.NoSuchProcess:
pass
return False
|
[
"def",
"is_running",
"(",
"self",
")",
":",
"pp",
"=",
"self",
".",
"pid",
"if",
"pp",
":",
"try",
":",
"proc",
"=",
"psutil",
".",
"Process",
"(",
"pp",
")",
"# Possible status:",
"# \"STATUS_RUNNING\", \"STATUS_IDLE\",",
"# \"STATUS_SLEEPING\", \"STATUS_DISK_SLEEP\",",
"# \"STATUS_STOPPED\", \"STATUS_TRACING_STOP\",",
"# \"STATUS_ZOMBIE\", \"STATUS_DEAD\",",
"# \"STATUS_WAKING\", \"STATUS_LOCKED\",",
"if",
"proc",
".",
"status",
"in",
"(",
"psutil",
".",
"STATUS_STOPPED",
",",
"psutil",
".",
"STATUS_DEAD",
",",
"psutil",
".",
"STATUS_ZOMBIE",
")",
":",
"# The PID is still in the process table so call stop to",
"# remove the PID.",
"self",
".",
"stop",
"(",
")",
"return",
"False",
"else",
":",
"# OK, it's running.",
"return",
"True",
"except",
"psutil",
".",
"NoSuchProcess",
":",
"pass",
"return",
"False"
] |
True if the subprocess is running.
If it's a zombie then we call
:func:`desub.Desub.stop` to kill it with fire and return False.
|
[
"True",
"if",
"the",
"subprocess",
"is",
"running",
"."
] |
2f495219b7d6d92e10a861d2a95cc10fe2a3483d
|
https://github.com/kumar303/desub/blob/2f495219b7d6d92e10a861d2a95cc10fe2a3483d/desub/desub.py#L117-L149
|
240,677
|
kumar303/desub
|
desub/desub.py
|
Desub.pid
|
def pid(self):
"""
The integer PID of the subprocess or None.
"""
pf = self.path('cmd.pid')
if not os.path.exists(pf):
return None
with open(pf, 'r') as f:
return int(f.read())
|
python
|
def pid(self):
"""
The integer PID of the subprocess or None.
"""
pf = self.path('cmd.pid')
if not os.path.exists(pf):
return None
with open(pf, 'r') as f:
return int(f.read())
|
[
"def",
"pid",
"(",
"self",
")",
":",
"pf",
"=",
"self",
".",
"path",
"(",
"'cmd.pid'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"pf",
")",
":",
"return",
"None",
"with",
"open",
"(",
"pf",
",",
"'r'",
")",
"as",
"f",
":",
"return",
"int",
"(",
"f",
".",
"read",
"(",
")",
")"
] |
The integer PID of the subprocess or None.
|
[
"The",
"integer",
"PID",
"of",
"the",
"subprocess",
"or",
"None",
"."
] |
2f495219b7d6d92e10a861d2a95cc10fe2a3483d
|
https://github.com/kumar303/desub/blob/2f495219b7d6d92e10a861d2a95cc10fe2a3483d/desub/desub.py#L162-L170
|
240,678
|
kumar303/desub
|
desub/desub.py
|
Desub.start
|
def start(self):
"""Start the subprocess."""
c_out, c_err = (open(self.path('cmd.stdout'), 'w'),
open(self.path('cmd.stderr'), 'w'))
kw = self.kw.copy()
kw['stdout'] = c_out
kw['stderr'] = c_err
if not kw.get('cwd', None):
kw['cwd'] = os.getcwd()
pr = subprocess.Popen(self.cmd_args, **kw)
with open(self.path('cmd.pid'), 'w') as f:
f.write(str(pr.pid))
|
python
|
def start(self):
"""Start the subprocess."""
c_out, c_err = (open(self.path('cmd.stdout'), 'w'),
open(self.path('cmd.stderr'), 'w'))
kw = self.kw.copy()
kw['stdout'] = c_out
kw['stderr'] = c_err
if not kw.get('cwd', None):
kw['cwd'] = os.getcwd()
pr = subprocess.Popen(self.cmd_args, **kw)
with open(self.path('cmd.pid'), 'w') as f:
f.write(str(pr.pid))
|
[
"def",
"start",
"(",
"self",
")",
":",
"c_out",
",",
"c_err",
"=",
"(",
"open",
"(",
"self",
".",
"path",
"(",
"'cmd.stdout'",
")",
",",
"'w'",
")",
",",
"open",
"(",
"self",
".",
"path",
"(",
"'cmd.stderr'",
")",
",",
"'w'",
")",
")",
"kw",
"=",
"self",
".",
"kw",
".",
"copy",
"(",
")",
"kw",
"[",
"'stdout'",
"]",
"=",
"c_out",
"kw",
"[",
"'stderr'",
"]",
"=",
"c_err",
"if",
"not",
"kw",
".",
"get",
"(",
"'cwd'",
",",
"None",
")",
":",
"kw",
"[",
"'cwd'",
"]",
"=",
"os",
".",
"getcwd",
"(",
")",
"pr",
"=",
"subprocess",
".",
"Popen",
"(",
"self",
".",
"cmd_args",
",",
"*",
"*",
"kw",
")",
"with",
"open",
"(",
"self",
".",
"path",
"(",
"'cmd.pid'",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"str",
"(",
"pr",
".",
"pid",
")",
")"
] |
Start the subprocess.
|
[
"Start",
"the",
"subprocess",
"."
] |
2f495219b7d6d92e10a861d2a95cc10fe2a3483d
|
https://github.com/kumar303/desub/blob/2f495219b7d6d92e10a861d2a95cc10fe2a3483d/desub/desub.py#L172-L183
|
240,679
|
kumar303/desub
|
desub/desub.py
|
Desub.stop
|
def stop(self, timeout=15):
"""Stop the subprocess.
Keyword Arguments
**timeout**
Time in seconds to wait for a process and its
children to exit.
"""
pp = self.pid
if pp:
try:
kill_process_nicely(pp, timeout=timeout)
except psutil.NoSuchProcess:
pass
|
python
|
def stop(self, timeout=15):
"""Stop the subprocess.
Keyword Arguments
**timeout**
Time in seconds to wait for a process and its
children to exit.
"""
pp = self.pid
if pp:
try:
kill_process_nicely(pp, timeout=timeout)
except psutil.NoSuchProcess:
pass
|
[
"def",
"stop",
"(",
"self",
",",
"timeout",
"=",
"15",
")",
":",
"pp",
"=",
"self",
".",
"pid",
"if",
"pp",
":",
"try",
":",
"kill_process_nicely",
"(",
"pp",
",",
"timeout",
"=",
"timeout",
")",
"except",
"psutil",
".",
"NoSuchProcess",
":",
"pass"
] |
Stop the subprocess.
Keyword Arguments
**timeout**
Time in seconds to wait for a process and its
children to exit.
|
[
"Stop",
"the",
"subprocess",
"."
] |
2f495219b7d6d92e10a861d2a95cc10fe2a3483d
|
https://github.com/kumar303/desub/blob/2f495219b7d6d92e10a861d2a95cc10fe2a3483d/desub/desub.py#L185-L199
|
240,680
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/api.py
|
query_get_kb_by_type
|
def query_get_kb_by_type(kbtype):
"""Return a query to filter kb by type.
:param kbtype: type to filter (e.g: taxonomy)
:return: query to filter kb
"""
return models.KnwKB.query.filter_by(
kbtype=models.KnwKB.KNWKB_TYPES[kbtype])
|
python
|
def query_get_kb_by_type(kbtype):
"""Return a query to filter kb by type.
:param kbtype: type to filter (e.g: taxonomy)
:return: query to filter kb
"""
return models.KnwKB.query.filter_by(
kbtype=models.KnwKB.KNWKB_TYPES[kbtype])
|
[
"def",
"query_get_kb_by_type",
"(",
"kbtype",
")",
":",
"return",
"models",
".",
"KnwKB",
".",
"query",
".",
"filter_by",
"(",
"kbtype",
"=",
"models",
".",
"KnwKB",
".",
"KNWKB_TYPES",
"[",
"kbtype",
"]",
")"
] |
Return a query to filter kb by type.
:param kbtype: type to filter (e.g: taxonomy)
:return: query to filter kb
|
[
"Return",
"a",
"query",
"to",
"filter",
"kb",
"by",
"type",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L100-L107
|
240,681
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/api.py
|
get_kb_mapping
|
def get_kb_mapping(kb_name="", key="", value="", match_type="e", default="",
limit=None):
"""Get one unique mapping. If not found, return default.
:param kb_name: the name of the kb
:param key: include only lines matching this on left side in the results
:param value: include only lines matching this on right side in the results
:param match_type: s = substring match, e = exact match
:param default: default value if no mapping is found
:return: a mapping
"""
mappings = get_kb_mappings(kb_name, key=key, value=value,
match_type=match_type, limit=limit)
if len(mappings) == 0:
return default
else:
return mappings[0]
|
python
|
def get_kb_mapping(kb_name="", key="", value="", match_type="e", default="",
limit=None):
"""Get one unique mapping. If not found, return default.
:param kb_name: the name of the kb
:param key: include only lines matching this on left side in the results
:param value: include only lines matching this on right side in the results
:param match_type: s = substring match, e = exact match
:param default: default value if no mapping is found
:return: a mapping
"""
mappings = get_kb_mappings(kb_name, key=key, value=value,
match_type=match_type, limit=limit)
if len(mappings) == 0:
return default
else:
return mappings[0]
|
[
"def",
"get_kb_mapping",
"(",
"kb_name",
"=",
"\"\"",
",",
"key",
"=",
"\"\"",
",",
"value",
"=",
"\"\"",
",",
"match_type",
"=",
"\"e\"",
",",
"default",
"=",
"\"\"",
",",
"limit",
"=",
"None",
")",
":",
"mappings",
"=",
"get_kb_mappings",
"(",
"kb_name",
",",
"key",
"=",
"key",
",",
"value",
"=",
"value",
",",
"match_type",
"=",
"match_type",
",",
"limit",
"=",
"limit",
")",
"if",
"len",
"(",
"mappings",
")",
"==",
"0",
":",
"return",
"default",
"else",
":",
"return",
"mappings",
"[",
"0",
"]"
] |
Get one unique mapping. If not found, return default.
:param kb_name: the name of the kb
:param key: include only lines matching this on left side in the results
:param value: include only lines matching this on right side in the results
:param match_type: s = substring match, e = exact match
:param default: default value if no mapping is found
:return: a mapping
|
[
"Get",
"one",
"unique",
"mapping",
".",
"If",
"not",
"found",
"return",
"default",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L170-L187
|
240,682
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/api.py
|
add_kb_mapping
|
def add_kb_mapping(kb_name, key, value=""):
"""Add a new mapping to given kb.
:param kb_name: the name of the kb where to insert the new value
:param key: the key of the mapping
:param value: the value of the mapping
"""
kb = get_kb_by_name(kb_name)
if key in kb.kbrvals:
# update
kb.kbrvals[key].m_value = value
else:
# insert
kb.kbrvals.set(models.KnwKBRVAL(m_key=key, m_value=value))
|
python
|
def add_kb_mapping(kb_name, key, value=""):
"""Add a new mapping to given kb.
:param kb_name: the name of the kb where to insert the new value
:param key: the key of the mapping
:param value: the value of the mapping
"""
kb = get_kb_by_name(kb_name)
if key in kb.kbrvals:
# update
kb.kbrvals[key].m_value = value
else:
# insert
kb.kbrvals.set(models.KnwKBRVAL(m_key=key, m_value=value))
|
[
"def",
"add_kb_mapping",
"(",
"kb_name",
",",
"key",
",",
"value",
"=",
"\"\"",
")",
":",
"kb",
"=",
"get_kb_by_name",
"(",
"kb_name",
")",
"if",
"key",
"in",
"kb",
".",
"kbrvals",
":",
"# update",
"kb",
".",
"kbrvals",
"[",
"key",
"]",
".",
"m_value",
"=",
"value",
"else",
":",
"# insert",
"kb",
".",
"kbrvals",
".",
"set",
"(",
"models",
".",
"KnwKBRVAL",
"(",
"m_key",
"=",
"key",
",",
"m_value",
"=",
"value",
")",
")"
] |
Add a new mapping to given kb.
:param kb_name: the name of the kb where to insert the new value
:param key: the key of the mapping
:param value: the value of the mapping
|
[
"Add",
"a",
"new",
"mapping",
"to",
"given",
"kb",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L191-L204
|
240,683
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/api.py
|
update_kb_mapping
|
def update_kb_mapping(kb_name, old_key, key, value):
"""Update an existing kb mapping with key old_key with a new key and value.
:param kb_name: the name of the kb where to insert the new value
:param old_key: the key of the mapping in the kb
:param key: the new key of the mapping
:param value: the new value of the mapping
"""
db.session.query(models.KnwKBRVAL).join(models.KnwKB) \
.filter(models.KnwKB.name == kb_name,
models.KnwKBRVAL.m_key == old_key) \
.update({"m_key": key, "m_value": value}, synchronize_session=False)
|
python
|
def update_kb_mapping(kb_name, old_key, key, value):
"""Update an existing kb mapping with key old_key with a new key and value.
:param kb_name: the name of the kb where to insert the new value
:param old_key: the key of the mapping in the kb
:param key: the new key of the mapping
:param value: the new value of the mapping
"""
db.session.query(models.KnwKBRVAL).join(models.KnwKB) \
.filter(models.KnwKB.name == kb_name,
models.KnwKBRVAL.m_key == old_key) \
.update({"m_key": key, "m_value": value}, synchronize_session=False)
|
[
"def",
"update_kb_mapping",
"(",
"kb_name",
",",
"old_key",
",",
"key",
",",
"value",
")",
":",
"db",
".",
"session",
".",
"query",
"(",
"models",
".",
"KnwKBRVAL",
")",
".",
"join",
"(",
"models",
".",
"KnwKB",
")",
".",
"filter",
"(",
"models",
".",
"KnwKB",
".",
"name",
"==",
"kb_name",
",",
"models",
".",
"KnwKBRVAL",
".",
"m_key",
"==",
"old_key",
")",
".",
"update",
"(",
"{",
"\"m_key\"",
":",
"key",
",",
"\"m_value\"",
":",
"value",
"}",
",",
"synchronize_session",
"=",
"False",
")"
] |
Update an existing kb mapping with key old_key with a new key and value.
:param kb_name: the name of the kb where to insert the new value
:param old_key: the key of the mapping in the kb
:param key: the new key of the mapping
:param value: the new value of the mapping
|
[
"Update",
"an",
"existing",
"kb",
"mapping",
"with",
"key",
"old_key",
"with",
"a",
"new",
"key",
"and",
"value",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L219-L230
|
240,684
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/api.py
|
add_kb
|
def add_kb(kb_name=u"Untitled", kb_type=None, tries=10):
"""Add a new kb in database, return the id.
Add a new kb in database, and returns its id
The name of the kb will be 'Untitled#'
such that it is unique.
:param kb_name: the name of the kb
:param kb_type: the type of the kb, incl 'taxonomy' and 'dynamic'.
None for typical (leftside-rightside).
:param tries: exit after <n> retry
:return: the id of the newly created kb
"""
created = False
name = kb_name
i = 0
while(i < tries and created is False):
try:
kb = models.KnwKB(name=name, description="", kbtype=kb_type)
created = True
db.session.add(kb)
db.session.commit()
except IntegrityError:
db.session.rollback()
# get the highest id to calculate the new name
result = db.session.execute(
db.select([models.KnwKB.id])
.order_by(db.desc(models.KnwKB.id))
.limit(1)).first()
index = result[0] + 1 if result is not None else 1
name = kb_name + " " + str(index)
i = i + 1
created = False
except Exception:
db.session.rollback()
raise
if created is False:
# TODO raise the right exception
raise Exception(_("Can't create knowledge base \"%(name)s\".\n"
"Probabily the server is busy! "
"Try again later.", name=kb_name))
return kb.id
|
python
|
def add_kb(kb_name=u"Untitled", kb_type=None, tries=10):
"""Add a new kb in database, return the id.
Add a new kb in database, and returns its id
The name of the kb will be 'Untitled#'
such that it is unique.
:param kb_name: the name of the kb
:param kb_type: the type of the kb, incl 'taxonomy' and 'dynamic'.
None for typical (leftside-rightside).
:param tries: exit after <n> retry
:return: the id of the newly created kb
"""
created = False
name = kb_name
i = 0
while(i < tries and created is False):
try:
kb = models.KnwKB(name=name, description="", kbtype=kb_type)
created = True
db.session.add(kb)
db.session.commit()
except IntegrityError:
db.session.rollback()
# get the highest id to calculate the new name
result = db.session.execute(
db.select([models.KnwKB.id])
.order_by(db.desc(models.KnwKB.id))
.limit(1)).first()
index = result[0] + 1 if result is not None else 1
name = kb_name + " " + str(index)
i = i + 1
created = False
except Exception:
db.session.rollback()
raise
if created is False:
# TODO raise the right exception
raise Exception(_("Can't create knowledge base \"%(name)s\".\n"
"Probabily the server is busy! "
"Try again later.", name=kb_name))
return kb.id
|
[
"def",
"add_kb",
"(",
"kb_name",
"=",
"u\"Untitled\"",
",",
"kb_type",
"=",
"None",
",",
"tries",
"=",
"10",
")",
":",
"created",
"=",
"False",
"name",
"=",
"kb_name",
"i",
"=",
"0",
"while",
"(",
"i",
"<",
"tries",
"and",
"created",
"is",
"False",
")",
":",
"try",
":",
"kb",
"=",
"models",
".",
"KnwKB",
"(",
"name",
"=",
"name",
",",
"description",
"=",
"\"\"",
",",
"kbtype",
"=",
"kb_type",
")",
"created",
"=",
"True",
"db",
".",
"session",
".",
"add",
"(",
"kb",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"except",
"IntegrityError",
":",
"db",
".",
"session",
".",
"rollback",
"(",
")",
"# get the highest id to calculate the new name",
"result",
"=",
"db",
".",
"session",
".",
"execute",
"(",
"db",
".",
"select",
"(",
"[",
"models",
".",
"KnwKB",
".",
"id",
"]",
")",
".",
"order_by",
"(",
"db",
".",
"desc",
"(",
"models",
".",
"KnwKB",
".",
"id",
")",
")",
".",
"limit",
"(",
"1",
")",
")",
".",
"first",
"(",
")",
"index",
"=",
"result",
"[",
"0",
"]",
"+",
"1",
"if",
"result",
"is",
"not",
"None",
"else",
"1",
"name",
"=",
"kb_name",
"+",
"\" \"",
"+",
"str",
"(",
"index",
")",
"i",
"=",
"i",
"+",
"1",
"created",
"=",
"False",
"except",
"Exception",
":",
"db",
".",
"session",
".",
"rollback",
"(",
")",
"raise",
"if",
"created",
"is",
"False",
":",
"# TODO raise the right exception",
"raise",
"Exception",
"(",
"_",
"(",
"\"Can't create knowledge base \\\"%(name)s\\\".\\n\"",
"\"Probabily the server is busy! \"",
"\"Try again later.\"",
",",
"name",
"=",
"kb_name",
")",
")",
"return",
"kb",
".",
"id"
] |
Add a new kb in database, return the id.
Add a new kb in database, and returns its id
The name of the kb will be 'Untitled#'
such that it is unique.
:param kb_name: the name of the kb
:param kb_type: the type of the kb, incl 'taxonomy' and 'dynamic'.
None for typical (leftside-rightside).
:param tries: exit after <n> retry
:return: the id of the newly created kb
|
[
"Add",
"a",
"new",
"kb",
"in",
"database",
"return",
"the",
"id",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L315-L358
|
240,685
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/api.py
|
save_kb_dyn_config
|
def save_kb_dyn_config(kb_id, field, expression, collection=None):
"""Save a dynamic knowledge base configuration.
:param kb_id: the id
:param field: the field where values are extracted
:param expression: ..using this expression
:param collection: ..in a certain collection (default is all)
"""
# check that collection exists
if collection:
collection = Collection.query.filter_by(name=collection).one()
kb = get_kb_by_id(kb_id)
kb.set_dyn_config(field, expression, collection)
|
python
|
def save_kb_dyn_config(kb_id, field, expression, collection=None):
"""Save a dynamic knowledge base configuration.
:param kb_id: the id
:param field: the field where values are extracted
:param expression: ..using this expression
:param collection: ..in a certain collection (default is all)
"""
# check that collection exists
if collection:
collection = Collection.query.filter_by(name=collection).one()
kb = get_kb_by_id(kb_id)
kb.set_dyn_config(field, expression, collection)
|
[
"def",
"save_kb_dyn_config",
"(",
"kb_id",
",",
"field",
",",
"expression",
",",
"collection",
"=",
"None",
")",
":",
"# check that collection exists",
"if",
"collection",
":",
"collection",
"=",
"Collection",
".",
"query",
".",
"filter_by",
"(",
"name",
"=",
"collection",
")",
".",
"one",
"(",
")",
"kb",
"=",
"get_kb_by_id",
"(",
"kb_id",
")",
"kb",
".",
"set_dyn_config",
"(",
"field",
",",
"expression",
",",
"collection",
")"
] |
Save a dynamic knowledge base configuration.
:param kb_id: the id
:param field: the field where values are extracted
:param expression: ..using this expression
:param collection: ..in a certain collection (default is all)
|
[
"Save",
"a",
"dynamic",
"knowledge",
"base",
"configuration",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L368-L381
|
240,686
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/api.py
|
kb_mapping_exists
|
def kb_mapping_exists(kb_name, key):
"""Return the information if a mapping exists.
:param kb_name: knowledge base name
:param key: left side (mapFrom)
"""
try:
kb = get_kb_by_name(kb_name)
except NoResultFound:
return False
return key in kb.kbrvals
|
python
|
def kb_mapping_exists(kb_name, key):
"""Return the information if a mapping exists.
:param kb_name: knowledge base name
:param key: left side (mapFrom)
"""
try:
kb = get_kb_by_name(kb_name)
except NoResultFound:
return False
return key in kb.kbrvals
|
[
"def",
"kb_mapping_exists",
"(",
"kb_name",
",",
"key",
")",
":",
"try",
":",
"kb",
"=",
"get_kb_by_name",
"(",
"kb_name",
")",
"except",
"NoResultFound",
":",
"return",
"False",
"return",
"key",
"in",
"kb",
".",
"kbrvals"
] |
Return the information if a mapping exists.
:param kb_name: knowledge base name
:param key: left side (mapFrom)
|
[
"Return",
"the",
"information",
"if",
"a",
"mapping",
"exists",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L384-L394
|
240,687
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/api.py
|
delete_kb
|
def delete_kb(kb_name):
"""Delete given kb from database.
:param kb_name: knowledge base name
"""
db.session.delete(models.KnwKB.query.filter_by(
name=kb_name).one())
|
python
|
def delete_kb(kb_name):
"""Delete given kb from database.
:param kb_name: knowledge base name
"""
db.session.delete(models.KnwKB.query.filter_by(
name=kb_name).one())
|
[
"def",
"delete_kb",
"(",
"kb_name",
")",
":",
"db",
".",
"session",
".",
"delete",
"(",
"models",
".",
"KnwKB",
".",
"query",
".",
"filter_by",
"(",
"name",
"=",
"kb_name",
")",
".",
"one",
"(",
")",
")"
] |
Delete given kb from database.
:param kb_name: knowledge base name
|
[
"Delete",
"given",
"kb",
"from",
"database",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L398-L404
|
240,688
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/api.py
|
get_kba_values
|
def get_kba_values(kb_name, searchname="", searchtype="s"):
"""Return an array of values "authority file" type = just values.
:param kb_name: name of kb
:param searchname: get these values, according to searchtype
:param searchtype: s=substring, e=exact, , sw=startswith
"""
if searchtype == 's' and searchname:
searchname = '%'+searchname+'%'
if searchtype == 'sw' and searchname: # startswith
searchname = searchname+'%'
if not searchname:
searchname = '%'
query = db.session.query(models.KnwKBRVAL).join(models.KnwKB) \
.filter(models.KnwKBRVAL.m_value.like(searchname),
models.KnwKB.name.like(kb_name))
return [(k.m_value,) for k in query.all()]
|
python
|
def get_kba_values(kb_name, searchname="", searchtype="s"):
"""Return an array of values "authority file" type = just values.
:param kb_name: name of kb
:param searchname: get these values, according to searchtype
:param searchtype: s=substring, e=exact, , sw=startswith
"""
if searchtype == 's' and searchname:
searchname = '%'+searchname+'%'
if searchtype == 'sw' and searchname: # startswith
searchname = searchname+'%'
if not searchname:
searchname = '%'
query = db.session.query(models.KnwKBRVAL).join(models.KnwKB) \
.filter(models.KnwKBRVAL.m_value.like(searchname),
models.KnwKB.name.like(kb_name))
return [(k.m_value,) for k in query.all()]
|
[
"def",
"get_kba_values",
"(",
"kb_name",
",",
"searchname",
"=",
"\"\"",
",",
"searchtype",
"=",
"\"s\"",
")",
":",
"if",
"searchtype",
"==",
"'s'",
"and",
"searchname",
":",
"searchname",
"=",
"'%'",
"+",
"searchname",
"+",
"'%'",
"if",
"searchtype",
"==",
"'sw'",
"and",
"searchname",
":",
"# startswith",
"searchname",
"=",
"searchname",
"+",
"'%'",
"if",
"not",
"searchname",
":",
"searchname",
"=",
"'%'",
"query",
"=",
"db",
".",
"session",
".",
"query",
"(",
"models",
".",
"KnwKBRVAL",
")",
".",
"join",
"(",
"models",
".",
"KnwKB",
")",
".",
"filter",
"(",
"models",
".",
"KnwKBRVAL",
".",
"m_value",
".",
"like",
"(",
"searchname",
")",
",",
"models",
".",
"KnwKB",
".",
"name",
".",
"like",
"(",
"kb_name",
")",
")",
"return",
"[",
"(",
"k",
".",
"m_value",
",",
")",
"for",
"k",
"in",
"query",
".",
"all",
"(",
")",
"]"
] |
Return an array of values "authority file" type = just values.
:param kb_name: name of kb
:param searchname: get these values, according to searchtype
:param searchtype: s=substring, e=exact, , sw=startswith
|
[
"Return",
"an",
"array",
"of",
"values",
"authority",
"file",
"type",
"=",
"just",
"values",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L427-L446
|
240,689
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/api.py
|
get_kbr_keys
|
def get_kbr_keys(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""Return an array of keys.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s = substring, e=exact
"""
if searchtype == 's' and searchkey:
searchkey = '%'+searchkey+'%'
if searchtype == 's' and searchvalue:
searchvalue = '%'+searchvalue+'%'
if searchtype == 'sw' and searchvalue: # startswith
searchvalue = searchvalue+'%'
if not searchvalue:
searchvalue = '%'
if not searchkey:
searchkey = '%'
query = db.session.query(models.KnwKBRVAL).join(models.KnwKB) \
.filter(models.KnwKBRVAL.m_key.like(searchkey),
models.KnwKBRVAL.m_value.like(searchvalue),
models.KnwKB.name.like(kb_name))
return [(k.m_key,) for k in query.all()]
|
python
|
def get_kbr_keys(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""Return an array of keys.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s = substring, e=exact
"""
if searchtype == 's' and searchkey:
searchkey = '%'+searchkey+'%'
if searchtype == 's' and searchvalue:
searchvalue = '%'+searchvalue+'%'
if searchtype == 'sw' and searchvalue: # startswith
searchvalue = searchvalue+'%'
if not searchvalue:
searchvalue = '%'
if not searchkey:
searchkey = '%'
query = db.session.query(models.KnwKBRVAL).join(models.KnwKB) \
.filter(models.KnwKBRVAL.m_key.like(searchkey),
models.KnwKBRVAL.m_value.like(searchvalue),
models.KnwKB.name.like(kb_name))
return [(k.m_key,) for k in query.all()]
|
[
"def",
"get_kbr_keys",
"(",
"kb_name",
",",
"searchkey",
"=",
"\"\"",
",",
"searchvalue",
"=",
"\"\"",
",",
"searchtype",
"=",
"'s'",
")",
":",
"if",
"searchtype",
"==",
"'s'",
"and",
"searchkey",
":",
"searchkey",
"=",
"'%'",
"+",
"searchkey",
"+",
"'%'",
"if",
"searchtype",
"==",
"'s'",
"and",
"searchvalue",
":",
"searchvalue",
"=",
"'%'",
"+",
"searchvalue",
"+",
"'%'",
"if",
"searchtype",
"==",
"'sw'",
"and",
"searchvalue",
":",
"# startswith",
"searchvalue",
"=",
"searchvalue",
"+",
"'%'",
"if",
"not",
"searchvalue",
":",
"searchvalue",
"=",
"'%'",
"if",
"not",
"searchkey",
":",
"searchkey",
"=",
"'%'",
"query",
"=",
"db",
".",
"session",
".",
"query",
"(",
"models",
".",
"KnwKBRVAL",
")",
".",
"join",
"(",
"models",
".",
"KnwKB",
")",
".",
"filter",
"(",
"models",
".",
"KnwKBRVAL",
".",
"m_key",
".",
"like",
"(",
"searchkey",
")",
",",
"models",
".",
"KnwKBRVAL",
".",
"m_value",
".",
"like",
"(",
"searchvalue",
")",
",",
"models",
".",
"KnwKB",
".",
"name",
".",
"like",
"(",
"kb_name",
")",
")",
"return",
"[",
"(",
"k",
".",
"m_key",
",",
")",
"for",
"k",
"in",
"query",
".",
"all",
"(",
")",
"]"
] |
Return an array of keys.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s = substring, e=exact
|
[
"Return",
"an",
"array",
"of",
"keys",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L449-L473
|
240,690
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/api.py
|
get_kbr_values
|
def get_kbr_values(kb_name, searchkey="", searchvalue="", searchtype='s',
use_memoise=False):
"""Return a tuple of values from key-value mapping kb.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s=substring; e=exact
:param use_memoise: can we memoise while doing lookups?
:type use_memoise: bool
"""
try:
if use_memoise:
kb = get_kb_by_name_memoised(kb_name)
else:
kb = get_kb_by_name(kb_name)
except NoResultFound:
return []
return list(kb.get_kbr_values(searchkey, searchvalue, searchtype))
|
python
|
def get_kbr_values(kb_name, searchkey="", searchvalue="", searchtype='s',
use_memoise=False):
"""Return a tuple of values from key-value mapping kb.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s=substring; e=exact
:param use_memoise: can we memoise while doing lookups?
:type use_memoise: bool
"""
try:
if use_memoise:
kb = get_kb_by_name_memoised(kb_name)
else:
kb = get_kb_by_name(kb_name)
except NoResultFound:
return []
return list(kb.get_kbr_values(searchkey, searchvalue, searchtype))
|
[
"def",
"get_kbr_values",
"(",
"kb_name",
",",
"searchkey",
"=",
"\"\"",
",",
"searchvalue",
"=",
"\"\"",
",",
"searchtype",
"=",
"'s'",
",",
"use_memoise",
"=",
"False",
")",
":",
"try",
":",
"if",
"use_memoise",
":",
"kb",
"=",
"get_kb_by_name_memoised",
"(",
"kb_name",
")",
"else",
":",
"kb",
"=",
"get_kb_by_name",
"(",
"kb_name",
")",
"except",
"NoResultFound",
":",
"return",
"[",
"]",
"return",
"list",
"(",
"kb",
".",
"get_kbr_values",
"(",
"searchkey",
",",
"searchvalue",
",",
"searchtype",
")",
")"
] |
Return a tuple of values from key-value mapping kb.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s=substring; e=exact
:param use_memoise: can we memoise while doing lookups?
:type use_memoise: bool
|
[
"Return",
"a",
"tuple",
"of",
"values",
"from",
"key",
"-",
"value",
"mapping",
"kb",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L476-L494
|
240,691
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/api.py
|
get_kbr_items
|
def get_kbr_items(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""Return a list of dictionaries that match the search.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s = substring, e=exact
:return: a list of dictionaries [{'key'=>x, 'value'=>y},..]
"""
kb = get_kb_by_name(kb_name)
return kb.get_kbr_items(searchkey, searchvalue, searchtype)
|
python
|
def get_kbr_items(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""Return a list of dictionaries that match the search.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s = substring, e=exact
:return: a list of dictionaries [{'key'=>x, 'value'=>y},..]
"""
kb = get_kb_by_name(kb_name)
return kb.get_kbr_items(searchkey, searchvalue, searchtype)
|
[
"def",
"get_kbr_items",
"(",
"kb_name",
",",
"searchkey",
"=",
"\"\"",
",",
"searchvalue",
"=",
"\"\"",
",",
"searchtype",
"=",
"'s'",
")",
":",
"kb",
"=",
"get_kb_by_name",
"(",
"kb_name",
")",
"return",
"kb",
".",
"get_kbr_items",
"(",
"searchkey",
",",
"searchvalue",
",",
"searchtype",
")"
] |
Return a list of dictionaries that match the search.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s = substring, e=exact
:return: a list of dictionaries [{'key'=>x, 'value'=>y},..]
|
[
"Return",
"a",
"list",
"of",
"dictionaries",
"that",
"match",
"the",
"search",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L497-L507
|
240,692
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/api.py
|
get_kbd_values_json
|
def get_kbd_values_json(kbname, searchwith=""):
"""Return values from searching a dynamic kb as a json-formatted string.
This IS probably the method you want.
:param kbname: name of the knowledge base
:param searchwith: a term to search with
"""
res = get_kbd_values(kbname, searchwith)
return json.dumps(res)
|
python
|
def get_kbd_values_json(kbname, searchwith=""):
"""Return values from searching a dynamic kb as a json-formatted string.
This IS probably the method you want.
:param kbname: name of the knowledge base
:param searchwith: a term to search with
"""
res = get_kbd_values(kbname, searchwith)
return json.dumps(res)
|
[
"def",
"get_kbd_values_json",
"(",
"kbname",
",",
"searchwith",
"=",
"\"\"",
")",
":",
"res",
"=",
"get_kbd_values",
"(",
"kbname",
",",
"searchwith",
")",
"return",
"json",
".",
"dumps",
"(",
"res",
")"
] |
Return values from searching a dynamic kb as a json-formatted string.
This IS probably the method you want.
:param kbname: name of the knowledge base
:param searchwith: a term to search with
|
[
"Return",
"values",
"from",
"searching",
"a",
"dynamic",
"kb",
"as",
"a",
"json",
"-",
"formatted",
"string",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L580-L589
|
240,693
|
inveniosoftware-attic/invenio-knowledge
|
invenio_knowledge/api.py
|
get_kbt_items
|
def get_kbt_items(taxonomyfilename, templatefilename, searchwith=""):
"""
Get items from taxonomy file using a templatefile.
If searchwith is defined, return only items that match with it.
:param taxonomyfilename: full path+name of the RDF file
:param templatefile: full path+name of the XSLT file
:param searchwith: a term to search with
"""
if processor_type == 1:
# lxml
doc = etree.XML(taxonomyfilename)
styledoc = etree.XML(templatefilename)
style = etree.XSLT(styledoc)
result = style(doc)
strres = str(result)
del result
del style
del styledoc
del doc
elif processor_type == 2:
# libxml2 & libxslt
styledoc = libxml2.parseFile(templatefilename)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseFile(taxonomyfilename)
result = style.applyStylesheet(doc, None)
strres = style.saveResultToString(result)
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
else:
# no xml parser found
strres = ""
ritems = []
if len(strres) == 0:
return []
else:
lines = strres.split("\n")
for line in lines:
if searchwith:
if line.count(searchwith) > 0:
ritems.append(line)
else:
if len(line) > 0:
ritems.append(line)
return ritems
|
python
|
def get_kbt_items(taxonomyfilename, templatefilename, searchwith=""):
"""
Get items from taxonomy file using a templatefile.
If searchwith is defined, return only items that match with it.
:param taxonomyfilename: full path+name of the RDF file
:param templatefile: full path+name of the XSLT file
:param searchwith: a term to search with
"""
if processor_type == 1:
# lxml
doc = etree.XML(taxonomyfilename)
styledoc = etree.XML(templatefilename)
style = etree.XSLT(styledoc)
result = style(doc)
strres = str(result)
del result
del style
del styledoc
del doc
elif processor_type == 2:
# libxml2 & libxslt
styledoc = libxml2.parseFile(templatefilename)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseFile(taxonomyfilename)
result = style.applyStylesheet(doc, None)
strres = style.saveResultToString(result)
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
else:
# no xml parser found
strres = ""
ritems = []
if len(strres) == 0:
return []
else:
lines = strres.split("\n")
for line in lines:
if searchwith:
if line.count(searchwith) > 0:
ritems.append(line)
else:
if len(line) > 0:
ritems.append(line)
return ritems
|
[
"def",
"get_kbt_items",
"(",
"taxonomyfilename",
",",
"templatefilename",
",",
"searchwith",
"=",
"\"\"",
")",
":",
"if",
"processor_type",
"==",
"1",
":",
"# lxml",
"doc",
"=",
"etree",
".",
"XML",
"(",
"taxonomyfilename",
")",
"styledoc",
"=",
"etree",
".",
"XML",
"(",
"templatefilename",
")",
"style",
"=",
"etree",
".",
"XSLT",
"(",
"styledoc",
")",
"result",
"=",
"style",
"(",
"doc",
")",
"strres",
"=",
"str",
"(",
"result",
")",
"del",
"result",
"del",
"style",
"del",
"styledoc",
"del",
"doc",
"elif",
"processor_type",
"==",
"2",
":",
"# libxml2 & libxslt",
"styledoc",
"=",
"libxml2",
".",
"parseFile",
"(",
"templatefilename",
")",
"style",
"=",
"libxslt",
".",
"parseStylesheetDoc",
"(",
"styledoc",
")",
"doc",
"=",
"libxml2",
".",
"parseFile",
"(",
"taxonomyfilename",
")",
"result",
"=",
"style",
".",
"applyStylesheet",
"(",
"doc",
",",
"None",
")",
"strres",
"=",
"style",
".",
"saveResultToString",
"(",
"result",
")",
"style",
".",
"freeStylesheet",
"(",
")",
"doc",
".",
"freeDoc",
"(",
")",
"result",
".",
"freeDoc",
"(",
")",
"else",
":",
"# no xml parser found",
"strres",
"=",
"\"\"",
"ritems",
"=",
"[",
"]",
"if",
"len",
"(",
"strres",
")",
"==",
"0",
":",
"return",
"[",
"]",
"else",
":",
"lines",
"=",
"strres",
".",
"split",
"(",
"\"\\n\"",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"searchwith",
":",
"if",
"line",
".",
"count",
"(",
"searchwith",
")",
">",
"0",
":",
"ritems",
".",
"append",
"(",
"line",
")",
"else",
":",
"if",
"len",
"(",
"line",
")",
">",
"0",
":",
"ritems",
".",
"append",
"(",
"line",
")",
"return",
"ritems"
] |
Get items from taxonomy file using a templatefile.
If searchwith is defined, return only items that match with it.
:param taxonomyfilename: full path+name of the RDF file
:param templatefile: full path+name of the XSLT file
:param searchwith: a term to search with
|
[
"Get",
"items",
"from",
"taxonomy",
"file",
"using",
"a",
"templatefile",
"."
] |
b31722dc14243ca8f626f8b3bce9718d0119de55
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L592-L639
|
240,694
|
campbellr/smashrun-client
|
smashrun/client.py
|
to_timestamp
|
def to_timestamp(dt):
"""Convert a datetime object to a unix timestamp.
Note that unlike a typical unix timestamp, this is seconds since 1970
*local time*, not UTC.
If the passed in object is already a timestamp, then that value is
simply returned unmodified.
"""
if isinstance(dt, int):
return dt
return int(total_seconds(dt.replace(tzinfo=None) -
datetime.datetime(1970, 1, 1)))
|
python
|
def to_timestamp(dt):
"""Convert a datetime object to a unix timestamp.
Note that unlike a typical unix timestamp, this is seconds since 1970
*local time*, not UTC.
If the passed in object is already a timestamp, then that value is
simply returned unmodified.
"""
if isinstance(dt, int):
return dt
return int(total_seconds(dt.replace(tzinfo=None) -
datetime.datetime(1970, 1, 1)))
|
[
"def",
"to_timestamp",
"(",
"dt",
")",
":",
"if",
"isinstance",
"(",
"dt",
",",
"int",
")",
":",
"return",
"dt",
"return",
"int",
"(",
"total_seconds",
"(",
"dt",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"-",
"datetime",
".",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
")",
")",
")"
] |
Convert a datetime object to a unix timestamp.
Note that unlike a typical unix timestamp, this is seconds since 1970
*local time*, not UTC.
If the passed in object is already a timestamp, then that value is
simply returned unmodified.
|
[
"Convert",
"a",
"datetime",
"object",
"to",
"a",
"unix",
"timestamp",
"."
] |
2522cb4d0545cf482a49a9533f12aac94c5aecdc
|
https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L257-L269
|
240,695
|
campbellr/smashrun-client
|
smashrun/client.py
|
Smashrun.fetch_token
|
def fetch_token(self, **kwargs):
"""Fetch a new token using the supplied code.
:param str code: A previously obtained auth code.
"""
if 'client_secret' not in kwargs:
kwargs.update(client_secret=self.client_secret)
return self.session.fetch_token(token_url, **kwargs)
|
python
|
def fetch_token(self, **kwargs):
"""Fetch a new token using the supplied code.
:param str code: A previously obtained auth code.
"""
if 'client_secret' not in kwargs:
kwargs.update(client_secret=self.client_secret)
return self.session.fetch_token(token_url, **kwargs)
|
[
"def",
"fetch_token",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'client_secret'",
"not",
"in",
"kwargs",
":",
"kwargs",
".",
"update",
"(",
"client_secret",
"=",
"self",
".",
"client_secret",
")",
"return",
"self",
".",
"session",
".",
"fetch_token",
"(",
"token_url",
",",
"*",
"*",
"kwargs",
")"
] |
Fetch a new token using the supplied code.
:param str code: A previously obtained auth code.
|
[
"Fetch",
"a",
"new",
"token",
"using",
"the",
"supplied",
"code",
"."
] |
2522cb4d0545cf482a49a9533f12aac94c5aecdc
|
https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L43-L51
|
240,696
|
campbellr/smashrun-client
|
smashrun/client.py
|
Smashrun.refresh_token
|
def refresh_token(self, **kwargs):
"""Refresh the authentication token.
:param str refresh_token: The refresh token to use. May be empty if
retrieved with ``fetch_token``.
"""
if 'client_secret' not in kwargs:
kwargs.update(client_secret=self.client_secret)
if 'client_id' not in kwargs:
kwargs.update(client_id=self.client_id)
return self.session.refresh_token(token_url, **kwargs)
|
python
|
def refresh_token(self, **kwargs):
"""Refresh the authentication token.
:param str refresh_token: The refresh token to use. May be empty if
retrieved with ``fetch_token``.
"""
if 'client_secret' not in kwargs:
kwargs.update(client_secret=self.client_secret)
if 'client_id' not in kwargs:
kwargs.update(client_id=self.client_id)
return self.session.refresh_token(token_url, **kwargs)
|
[
"def",
"refresh_token",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'client_secret'",
"not",
"in",
"kwargs",
":",
"kwargs",
".",
"update",
"(",
"client_secret",
"=",
"self",
".",
"client_secret",
")",
"if",
"'client_id'",
"not",
"in",
"kwargs",
":",
"kwargs",
".",
"update",
"(",
"client_id",
"=",
"self",
".",
"client_id",
")",
"return",
"self",
".",
"session",
".",
"refresh_token",
"(",
"token_url",
",",
"*",
"*",
"kwargs",
")"
] |
Refresh the authentication token.
:param str refresh_token: The refresh token to use. May be empty if
retrieved with ``fetch_token``.
|
[
"Refresh",
"the",
"authentication",
"token",
"."
] |
2522cb4d0545cf482a49a9533f12aac94c5aecdc
|
https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L53-L64
|
240,697
|
campbellr/smashrun-client
|
smashrun/client.py
|
Smashrun.get_activity
|
def get_activity(self, id_num):
"""Return the activity with the given id.
Note that this contains more detailed information than returned
by `get_activities`.
"""
url = self._build_url('my', 'activities', id_num)
return self._json(url)
|
python
|
def get_activity(self, id_num):
"""Return the activity with the given id.
Note that this contains more detailed information than returned
by `get_activities`.
"""
url = self._build_url('my', 'activities', id_num)
return self._json(url)
|
[
"def",
"get_activity",
"(",
"self",
",",
"id_num",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"'my'",
",",
"'activities'",
",",
"id_num",
")",
"return",
"self",
".",
"_json",
"(",
"url",
")"
] |
Return the activity with the given id.
Note that this contains more detailed information than returned
by `get_activities`.
|
[
"Return",
"the",
"activity",
"with",
"the",
"given",
"id",
"."
] |
2522cb4d0545cf482a49a9533f12aac94c5aecdc
|
https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L66-L74
|
240,698
|
campbellr/smashrun-client
|
smashrun/client.py
|
Smashrun.get_activities
|
def get_activities(self, count=10, since=None, style='summary',
limit=None):
"""Iterate over all activities, from newest to oldest.
:param count: The number of results to retrieve per page. If set to
``None``, pagination is disabled.
:param since: Return only activities since this date. Can be either
a timestamp or a datetime object.
:param style: The type of records to return. May be one of
'summary', 'briefs', 'ids', or 'extended'.
:param limit: The maximum number of activities to return for the given
query.
"""
params = {}
if since:
params.update(fromDate=to_timestamp(since))
parts = ['my', 'activities', 'search']
if style != 'summary':
parts.append(style)
url = self._build_url(*parts)
# TODO: return an Activity (or ActivitySummary?) class that can do
# things like convert date and time fields to proper datetime objects
return islice(self._iter(url, count, **params), limit)
|
python
|
def get_activities(self, count=10, since=None, style='summary',
limit=None):
"""Iterate over all activities, from newest to oldest.
:param count: The number of results to retrieve per page. If set to
``None``, pagination is disabled.
:param since: Return only activities since this date. Can be either
a timestamp or a datetime object.
:param style: The type of records to return. May be one of
'summary', 'briefs', 'ids', or 'extended'.
:param limit: The maximum number of activities to return for the given
query.
"""
params = {}
if since:
params.update(fromDate=to_timestamp(since))
parts = ['my', 'activities', 'search']
if style != 'summary':
parts.append(style)
url = self._build_url(*parts)
# TODO: return an Activity (or ActivitySummary?) class that can do
# things like convert date and time fields to proper datetime objects
return islice(self._iter(url, count, **params), limit)
|
[
"def",
"get_activities",
"(",
"self",
",",
"count",
"=",
"10",
",",
"since",
"=",
"None",
",",
"style",
"=",
"'summary'",
",",
"limit",
"=",
"None",
")",
":",
"params",
"=",
"{",
"}",
"if",
"since",
":",
"params",
".",
"update",
"(",
"fromDate",
"=",
"to_timestamp",
"(",
"since",
")",
")",
"parts",
"=",
"[",
"'my'",
",",
"'activities'",
",",
"'search'",
"]",
"if",
"style",
"!=",
"'summary'",
":",
"parts",
".",
"append",
"(",
"style",
")",
"url",
"=",
"self",
".",
"_build_url",
"(",
"*",
"parts",
")",
"# TODO: return an Activity (or ActivitySummary?) class that can do",
"# things like convert date and time fields to proper datetime objects",
"return",
"islice",
"(",
"self",
".",
"_iter",
"(",
"url",
",",
"count",
",",
"*",
"*",
"params",
")",
",",
"limit",
")"
] |
Iterate over all activities, from newest to oldest.
:param count: The number of results to retrieve per page. If set to
``None``, pagination is disabled.
:param since: Return only activities since this date. Can be either
a timestamp or a datetime object.
:param style: The type of records to return. May be one of
'summary', 'briefs', 'ids', or 'extended'.
:param limit: The maximum number of activities to return for the given
query.
|
[
"Iterate",
"over",
"all",
"activities",
"from",
"newest",
"to",
"oldest",
"."
] |
2522cb4d0545cf482a49a9533f12aac94c5aecdc
|
https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L76-L102
|
240,699
|
campbellr/smashrun-client
|
smashrun/client.py
|
Smashrun.get_notables
|
def get_notables(self, id_num):
"""Return the notables of the activity with the given id.
"""
url = self._build_url('my', 'activities', id_num, 'notables')
return self._json(url)
|
python
|
def get_notables(self, id_num):
"""Return the notables of the activity with the given id.
"""
url = self._build_url('my', 'activities', id_num, 'notables')
return self._json(url)
|
[
"def",
"get_notables",
"(",
"self",
",",
"id_num",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"'my'",
",",
"'activities'",
",",
"id_num",
",",
"'notables'",
")",
"return",
"self",
".",
"_json",
"(",
"url",
")"
] |
Return the notables of the activity with the given id.
|
[
"Return",
"the",
"notables",
"of",
"the",
"activity",
"with",
"the",
"given",
"id",
"."
] |
2522cb4d0545cf482a49a9533f12aac94c5aecdc
|
https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L109-L113
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.