id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
238,200
|
emilydolson/avida-spatial-tools
|
avidaspatial/environment_file_components.py
|
genRandResources
|
def genRandResources(args, resources):
"""
Generates a list of the appropriate length containing a roughly equal
number of all resources in a random order
"""
randResources = []
nEach = int(args.nPatches // len(resources))
extras = int(args.nPatches % len(resources))
for i in range(nEach):
for res in resources:
randResources.append(res + str(i))
additional = random.sample(resources, extras)
for res in additional:
randResources.append(res + str(nEach))
random.shuffle(randResources)
return randResources
|
python
|
def genRandResources(args, resources):
"""
Generates a list of the appropriate length containing a roughly equal
number of all resources in a random order
"""
randResources = []
nEach = int(args.nPatches // len(resources))
extras = int(args.nPatches % len(resources))
for i in range(nEach):
for res in resources:
randResources.append(res + str(i))
additional = random.sample(resources, extras)
for res in additional:
randResources.append(res + str(nEach))
random.shuffle(randResources)
return randResources
|
[
"def",
"genRandResources",
"(",
"args",
",",
"resources",
")",
":",
"randResources",
"=",
"[",
"]",
"nEach",
"=",
"int",
"(",
"args",
".",
"nPatches",
"//",
"len",
"(",
"resources",
")",
")",
"extras",
"=",
"int",
"(",
"args",
".",
"nPatches",
"%",
"len",
"(",
"resources",
")",
")",
"for",
"i",
"in",
"range",
"(",
"nEach",
")",
":",
"for",
"res",
"in",
"resources",
":",
"randResources",
".",
"append",
"(",
"res",
"+",
"str",
"(",
"i",
")",
")",
"additional",
"=",
"random",
".",
"sample",
"(",
"resources",
",",
"extras",
")",
"for",
"res",
"in",
"additional",
":",
"randResources",
".",
"append",
"(",
"res",
"+",
"str",
"(",
"nEach",
")",
")",
"random",
".",
"shuffle",
"(",
"randResources",
")",
"return",
"randResources"
] |
Generates a list of the appropriate length containing a roughly equal
number of all resources in a random order
|
[
"Generates",
"a",
"list",
"of",
"the",
"appropriate",
"length",
"containing",
"a",
"roughly",
"equal",
"number",
"of",
"all",
"resources",
"in",
"a",
"random",
"order"
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/environment_file_components.py#L210-L227
|
238,201
|
mlavin/argyle
|
argyle/system.py
|
add_ppas_from_file
|
def add_ppas_from_file(file_name, update=True):
"""Add personal package archive from a file list."""
for ppa in _read_lines_from_file(file_name):
add_ppa(ppa, update=False)
if update:
update_apt_sources()
|
python
|
def add_ppas_from_file(file_name, update=True):
"""Add personal package archive from a file list."""
for ppa in _read_lines_from_file(file_name):
add_ppa(ppa, update=False)
if update:
update_apt_sources()
|
[
"def",
"add_ppas_from_file",
"(",
"file_name",
",",
"update",
"=",
"True",
")",
":",
"for",
"ppa",
"in",
"_read_lines_from_file",
"(",
"file_name",
")",
":",
"add_ppa",
"(",
"ppa",
",",
"update",
"=",
"False",
")",
"if",
"update",
":",
"update_apt_sources",
"(",
")"
] |
Add personal package archive from a file list.
|
[
"Add",
"personal",
"package",
"archive",
"from",
"a",
"file",
"list",
"."
] |
92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72
|
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/system.py#L66-L72
|
238,202
|
mlavin/argyle
|
argyle/system.py
|
add_apt_source
|
def add_apt_source(source, key=None, update=True):
"""Adds source url to apt sources.list. Optional to pass the key url."""
# Make a backup of list
source_list = u'/etc/apt/sources.list'
sudo("cp %s{,.bak}" % source_list)
files.append(source_list, source, use_sudo=True)
if key:
# Fecth key from url and add
sudo(u"wget -q %s -O - | sudo apt-key add -" % key)
if update:
update_apt_sources()
|
python
|
def add_apt_source(source, key=None, update=True):
"""Adds source url to apt sources.list. Optional to pass the key url."""
# Make a backup of list
source_list = u'/etc/apt/sources.list'
sudo("cp %s{,.bak}" % source_list)
files.append(source_list, source, use_sudo=True)
if key:
# Fecth key from url and add
sudo(u"wget -q %s -O - | sudo apt-key add -" % key)
if update:
update_apt_sources()
|
[
"def",
"add_apt_source",
"(",
"source",
",",
"key",
"=",
"None",
",",
"update",
"=",
"True",
")",
":",
"# Make a backup of list",
"source_list",
"=",
"u'/etc/apt/sources.list'",
"sudo",
"(",
"\"cp %s{,.bak}\"",
"%",
"source_list",
")",
"files",
".",
"append",
"(",
"source_list",
",",
"source",
",",
"use_sudo",
"=",
"True",
")",
"if",
"key",
":",
"# Fecth key from url and add",
"sudo",
"(",
"u\"wget -q %s -O - | sudo apt-key add -\"",
"%",
"key",
")",
"if",
"update",
":",
"update_apt_sources",
"(",
")"
] |
Adds source url to apt sources.list. Optional to pass the key url.
|
[
"Adds",
"source",
"url",
"to",
"apt",
"sources",
".",
"list",
".",
"Optional",
"to",
"pass",
"the",
"key",
"url",
"."
] |
92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72
|
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/system.py#L76-L87
|
238,203
|
mlavin/argyle
|
argyle/system.py
|
add_sources_from_file
|
def add_sources_from_file(file_name, update=True):
"""
Add source urls from a file list.
The file should contain the source line to add followed by the
key url, if any, enclosed in parentheses.
Ex:
deb http://example.com/deb lucid main (http://example.com/key)
"""
key_regex = re.compile(r'(?P<source>[^()]*)(\s+\((?P<key>.*)\))?$')
for line in _read_lines_from_file(file_name):
kwargs = key_regex.match(line).groupdict()
kwargs['update'] = False
add_apt_source(**kwargs)
if update:
update_apt_sources()
|
python
|
def add_sources_from_file(file_name, update=True):
"""
Add source urls from a file list.
The file should contain the source line to add followed by the
key url, if any, enclosed in parentheses.
Ex:
deb http://example.com/deb lucid main (http://example.com/key)
"""
key_regex = re.compile(r'(?P<source>[^()]*)(\s+\((?P<key>.*)\))?$')
for line in _read_lines_from_file(file_name):
kwargs = key_regex.match(line).groupdict()
kwargs['update'] = False
add_apt_source(**kwargs)
if update:
update_apt_sources()
|
[
"def",
"add_sources_from_file",
"(",
"file_name",
",",
"update",
"=",
"True",
")",
":",
"key_regex",
"=",
"re",
".",
"compile",
"(",
"r'(?P<source>[^()]*)(\\s+\\((?P<key>.*)\\))?$'",
")",
"for",
"line",
"in",
"_read_lines_from_file",
"(",
"file_name",
")",
":",
"kwargs",
"=",
"key_regex",
".",
"match",
"(",
"line",
")",
".",
"groupdict",
"(",
")",
"kwargs",
"[",
"'update'",
"]",
"=",
"False",
"add_apt_source",
"(",
"*",
"*",
"kwargs",
")",
"if",
"update",
":",
"update_apt_sources",
"(",
")"
] |
Add source urls from a file list.
The file should contain the source line to add followed by the
key url, if any, enclosed in parentheses.
Ex:
deb http://example.com/deb lucid main (http://example.com/key)
|
[
"Add",
"source",
"urls",
"from",
"a",
"file",
"list",
".",
"The",
"file",
"should",
"contain",
"the",
"source",
"line",
"to",
"add",
"followed",
"by",
"the",
"key",
"url",
"if",
"any",
"enclosed",
"in",
"parentheses",
"."
] |
92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72
|
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/system.py#L91-L107
|
238,204
|
mlavin/argyle
|
argyle/system.py
|
create_user
|
def create_user(name, groups=None, key_file=None):
"""Create a user. Adds a key file to authorized_keys if given."""
groups = groups or []
if not user_exists(name):
for group in groups:
if not group_exists(group):
sudo(u"addgroup %s" % group)
groups = groups and u'-G %s' % u','.join(groups) or ''
sudo(u"useradd -m %s -s /bin/bash %s" % (groups, name))
sudo(u"passwd -d %s" % name)
if key_file:
sudo(u"mkdir -p /home/%s/.ssh" % name)
put(key_file, u"/home/%s/.ssh/authorized_keys" % name, use_sudo=True)
sudo(u"chown -R %(name)s:%(name)s /home/%(name)s/.ssh" % {'name': name})
|
python
|
def create_user(name, groups=None, key_file=None):
"""Create a user. Adds a key file to authorized_keys if given."""
groups = groups or []
if not user_exists(name):
for group in groups:
if not group_exists(group):
sudo(u"addgroup %s" % group)
groups = groups and u'-G %s' % u','.join(groups) or ''
sudo(u"useradd -m %s -s /bin/bash %s" % (groups, name))
sudo(u"passwd -d %s" % name)
if key_file:
sudo(u"mkdir -p /home/%s/.ssh" % name)
put(key_file, u"/home/%s/.ssh/authorized_keys" % name, use_sudo=True)
sudo(u"chown -R %(name)s:%(name)s /home/%(name)s/.ssh" % {'name': name})
|
[
"def",
"create_user",
"(",
"name",
",",
"groups",
"=",
"None",
",",
"key_file",
"=",
"None",
")",
":",
"groups",
"=",
"groups",
"or",
"[",
"]",
"if",
"not",
"user_exists",
"(",
"name",
")",
":",
"for",
"group",
"in",
"groups",
":",
"if",
"not",
"group_exists",
"(",
"group",
")",
":",
"sudo",
"(",
"u\"addgroup %s\"",
"%",
"group",
")",
"groups",
"=",
"groups",
"and",
"u'-G %s'",
"%",
"u','",
".",
"join",
"(",
"groups",
")",
"or",
"''",
"sudo",
"(",
"u\"useradd -m %s -s /bin/bash %s\"",
"%",
"(",
"groups",
",",
"name",
")",
")",
"sudo",
"(",
"u\"passwd -d %s\"",
"%",
"name",
")",
"if",
"key_file",
":",
"sudo",
"(",
"u\"mkdir -p /home/%s/.ssh\"",
"%",
"name",
")",
"put",
"(",
"key_file",
",",
"u\"/home/%s/.ssh/authorized_keys\"",
"%",
"name",
",",
"use_sudo",
"=",
"True",
")",
"sudo",
"(",
"u\"chown -R %(name)s:%(name)s /home/%(name)s/.ssh\"",
"%",
"{",
"'name'",
":",
"name",
"}",
")"
] |
Create a user. Adds a key file to authorized_keys if given.
|
[
"Create",
"a",
"user",
".",
"Adds",
"a",
"key",
"file",
"to",
"authorized_keys",
"if",
"given",
"."
] |
92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72
|
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/system.py#L111-L125
|
238,205
|
epfl-idevelop/epfl-ldap
|
epflldap/ldap_authenticate.py
|
Authenticator.authenticate
|
def authenticate(self, username, password):
"""
Authenticate the user with a bind on the LDAP server
"""
if username is None or password is None:
return False
# check the username
if not re.match("^[A-Za-z0-9_-]*$", username):
return False
user_dn = self.get_user_dn(username)
server = ldap3.Server(
self.uri,
use_ssl=self.use_ssl
)
connection = ldap3.Connection(server, user=user_dn, password=password)
return connection.bind()
|
python
|
def authenticate(self, username, password):
"""
Authenticate the user with a bind on the LDAP server
"""
if username is None or password is None:
return False
# check the username
if not re.match("^[A-Za-z0-9_-]*$", username):
return False
user_dn = self.get_user_dn(username)
server = ldap3.Server(
self.uri,
use_ssl=self.use_ssl
)
connection = ldap3.Connection(server, user=user_dn, password=password)
return connection.bind()
|
[
"def",
"authenticate",
"(",
"self",
",",
"username",
",",
"password",
")",
":",
"if",
"username",
"is",
"None",
"or",
"password",
"is",
"None",
":",
"return",
"False",
"# check the username",
"if",
"not",
"re",
".",
"match",
"(",
"\"^[A-Za-z0-9_-]*$\"",
",",
"username",
")",
":",
"return",
"False",
"user_dn",
"=",
"self",
".",
"get_user_dn",
"(",
"username",
")",
"server",
"=",
"ldap3",
".",
"Server",
"(",
"self",
".",
"uri",
",",
"use_ssl",
"=",
"self",
".",
"use_ssl",
")",
"connection",
"=",
"ldap3",
".",
"Connection",
"(",
"server",
",",
"user",
"=",
"user_dn",
",",
"password",
"=",
"password",
")",
"return",
"connection",
".",
"bind",
"(",
")"
] |
Authenticate the user with a bind on the LDAP server
|
[
"Authenticate",
"the",
"user",
"with",
"a",
"bind",
"on",
"the",
"LDAP",
"server"
] |
bebb94da3609d358bd83f31672eeaddcda872c5d
|
https://github.com/epfl-idevelop/epfl-ldap/blob/bebb94da3609d358bd83f31672eeaddcda872c5d/epflldap/ldap_authenticate.py#L34-L55
|
238,206
|
Brazelton-Lab/bio_utils
|
bio_utils/verifiers/binary.py
|
binary_guesser
|
def binary_guesser(handle, num_bytes=512):
"""Raise error if file not likely binary
Guesses if a file is binary, raises error if file is not likely binary,
then returns to location in file when handle passed to binary_guesser.
Args:
handle (file): File handle of file thought to be binary
num_bytes (int): Bytes of file to read to guess binary, more bytes
is often better but takes longer
Raises:
FormatError: Error raised if file is not likely binary
Example:
The following example demonstrate how to use binary_guesser.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> binary_guesser(open('test.binary'))
"""
text_chars = ''.join(map(chr, range(32, 127))) + '\n\r\t\b'
byte_chars = text_chars.encode()
handle_location = handle.tell()
first_block = handle.read(num_bytes)
if type(first_block) is str:
first_block = first_block.encode()
filtered_block = first_block.translate(None, delete=byte_chars)
handle.seek(handle_location) # Return to original handle location
if float(len(filtered_block)) / float(len(first_block)) > 0.30:
pass # File is likely binary
else:
msg = '{0} is probably not a binary file'.format(handle.name)
raise FormatError(message=msg)
|
python
|
def binary_guesser(handle, num_bytes=512):
"""Raise error if file not likely binary
Guesses if a file is binary, raises error if file is not likely binary,
then returns to location in file when handle passed to binary_guesser.
Args:
handle (file): File handle of file thought to be binary
num_bytes (int): Bytes of file to read to guess binary, more bytes
is often better but takes longer
Raises:
FormatError: Error raised if file is not likely binary
Example:
The following example demonstrate how to use binary_guesser.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> binary_guesser(open('test.binary'))
"""
text_chars = ''.join(map(chr, range(32, 127))) + '\n\r\t\b'
byte_chars = text_chars.encode()
handle_location = handle.tell()
first_block = handle.read(num_bytes)
if type(first_block) is str:
first_block = first_block.encode()
filtered_block = first_block.translate(None, delete=byte_chars)
handle.seek(handle_location) # Return to original handle location
if float(len(filtered_block)) / float(len(first_block)) > 0.30:
pass # File is likely binary
else:
msg = '{0} is probably not a binary file'.format(handle.name)
raise FormatError(message=msg)
|
[
"def",
"binary_guesser",
"(",
"handle",
",",
"num_bytes",
"=",
"512",
")",
":",
"text_chars",
"=",
"''",
".",
"join",
"(",
"map",
"(",
"chr",
",",
"range",
"(",
"32",
",",
"127",
")",
")",
")",
"+",
"'\\n\\r\\t\\b'",
"byte_chars",
"=",
"text_chars",
".",
"encode",
"(",
")",
"handle_location",
"=",
"handle",
".",
"tell",
"(",
")",
"first_block",
"=",
"handle",
".",
"read",
"(",
"num_bytes",
")",
"if",
"type",
"(",
"first_block",
")",
"is",
"str",
":",
"first_block",
"=",
"first_block",
".",
"encode",
"(",
")",
"filtered_block",
"=",
"first_block",
".",
"translate",
"(",
"None",
",",
"delete",
"=",
"byte_chars",
")",
"handle",
".",
"seek",
"(",
"handle_location",
")",
"# Return to original handle location",
"if",
"float",
"(",
"len",
"(",
"filtered_block",
")",
")",
"/",
"float",
"(",
"len",
"(",
"first_block",
")",
")",
">",
"0.30",
":",
"pass",
"# File is likely binary",
"else",
":",
"msg",
"=",
"'{0} is probably not a binary file'",
".",
"format",
"(",
"handle",
".",
"name",
")",
"raise",
"FormatError",
"(",
"message",
"=",
"msg",
")"
] |
Raise error if file not likely binary
Guesses if a file is binary, raises error if file is not likely binary,
then returns to location in file when handle passed to binary_guesser.
Args:
handle (file): File handle of file thought to be binary
num_bytes (int): Bytes of file to read to guess binary, more bytes
is often better but takes longer
Raises:
FormatError: Error raised if file is not likely binary
Example:
The following example demonstrate how to use binary_guesser.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> binary_guesser(open('test.binary'))
|
[
"Raise",
"error",
"if",
"file",
"not",
"likely",
"binary"
] |
5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7
|
https://github.com/Brazelton-Lab/bio_utils/blob/5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7/bio_utils/verifiers/binary.py#L45-L80
|
238,207
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/qtcompile.py
|
compile_ui
|
def compile_ui(uifile):
"""Compile the given Qt designer file. The compiled file will be in the same directory but ends with _ui.py.
:param uifile: filepath to the uifile
:type uifile: str
:returns: None
:rtype: None
:raises: None
"""
print "Compileing: %s" % uifile
outputpath = uifile.rsplit(os.path.extsep, 1)[0] + "_ui.py"
print "Outputfile: %s" % outputpath
outputfile = open(os.path.abspath(outputpath), "w")
pysideuic.compileUi(os.path.abspath(uifile), outputfile)
print "Done!"
|
python
|
def compile_ui(uifile):
"""Compile the given Qt designer file. The compiled file will be in the same directory but ends with _ui.py.
:param uifile: filepath to the uifile
:type uifile: str
:returns: None
:rtype: None
:raises: None
"""
print "Compileing: %s" % uifile
outputpath = uifile.rsplit(os.path.extsep, 1)[0] + "_ui.py"
print "Outputfile: %s" % outputpath
outputfile = open(os.path.abspath(outputpath), "w")
pysideuic.compileUi(os.path.abspath(uifile), outputfile)
print "Done!"
|
[
"def",
"compile_ui",
"(",
"uifile",
")",
":",
"print",
"\"Compileing: %s\"",
"%",
"uifile",
"outputpath",
"=",
"uifile",
".",
"rsplit",
"(",
"os",
".",
"path",
".",
"extsep",
",",
"1",
")",
"[",
"0",
"]",
"+",
"\"_ui.py\"",
"print",
"\"Outputfile: %s\"",
"%",
"outputpath",
"outputfile",
"=",
"open",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"outputpath",
")",
",",
"\"w\"",
")",
"pysideuic",
".",
"compileUi",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"uifile",
")",
",",
"outputfile",
")",
"print",
"\"Done!\""
] |
Compile the given Qt designer file. The compiled file will be in the same directory but ends with _ui.py.
:param uifile: filepath to the uifile
:type uifile: str
:returns: None
:rtype: None
:raises: None
|
[
"Compile",
"the",
"given",
"Qt",
"designer",
"file",
".",
"The",
"compiled",
"file",
"will",
"be",
"in",
"the",
"same",
"directory",
"but",
"ends",
"with",
"_ui",
".",
"py",
"."
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/qtcompile.py#L10-L24
|
238,208
|
JoaoFelipe/pyposast
|
pyposast/__init__.py
|
parse
|
def parse(code, filename='<unknown>', mode='exec', tree=None):
"""Parse the source into an AST node with PyPosAST.
Enhance nodes with positions
Arguments:
code -- code text
Keyword Arguments:
filename -- code path
mode -- execution mode (exec, eval, single)
tree -- current tree, if it was optimized
"""
visitor = Visitor(code, filename, mode, tree=tree)
return visitor.tree
|
python
|
def parse(code, filename='<unknown>', mode='exec', tree=None):
"""Parse the source into an AST node with PyPosAST.
Enhance nodes with positions
Arguments:
code -- code text
Keyword Arguments:
filename -- code path
mode -- execution mode (exec, eval, single)
tree -- current tree, if it was optimized
"""
visitor = Visitor(code, filename, mode, tree=tree)
return visitor.tree
|
[
"def",
"parse",
"(",
"code",
",",
"filename",
"=",
"'<unknown>'",
",",
"mode",
"=",
"'exec'",
",",
"tree",
"=",
"None",
")",
":",
"visitor",
"=",
"Visitor",
"(",
"code",
",",
"filename",
",",
"mode",
",",
"tree",
"=",
"tree",
")",
"return",
"visitor",
".",
"tree"
] |
Parse the source into an AST node with PyPosAST.
Enhance nodes with positions
Arguments:
code -- code text
Keyword Arguments:
filename -- code path
mode -- execution mode (exec, eval, single)
tree -- current tree, if it was optimized
|
[
"Parse",
"the",
"source",
"into",
"an",
"AST",
"node",
"with",
"PyPosAST",
".",
"Enhance",
"nodes",
"with",
"positions"
] |
497c88c66b451ff2cd7354be1af070c92e119f41
|
https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/pyposast/__init__.py#L12-L27
|
238,209
|
JoaoFelipe/pyposast
|
pyposast/__init__.py
|
get_nodes
|
def get_nodes(code, desired_type, path="__main__", mode="exec", tree=None):
"""Find all nodes of a given type
Arguments:
code -- code text
desired_type -- ast Node or tuple
Keyword Arguments:
path -- code path
mode -- execution mode (exec, eval, single)
tree -- current tree, if it was optimized
"""
return _GetVisitor(parse(code, path, mode, tree), desired_type).result
|
python
|
def get_nodes(code, desired_type, path="__main__", mode="exec", tree=None):
"""Find all nodes of a given type
Arguments:
code -- code text
desired_type -- ast Node or tuple
Keyword Arguments:
path -- code path
mode -- execution mode (exec, eval, single)
tree -- current tree, if it was optimized
"""
return _GetVisitor(parse(code, path, mode, tree), desired_type).result
|
[
"def",
"get_nodes",
"(",
"code",
",",
"desired_type",
",",
"path",
"=",
"\"__main__\"",
",",
"mode",
"=",
"\"exec\"",
",",
"tree",
"=",
"None",
")",
":",
"return",
"_GetVisitor",
"(",
"parse",
"(",
"code",
",",
"path",
",",
"mode",
",",
"tree",
")",
",",
"desired_type",
")",
".",
"result"
] |
Find all nodes of a given type
Arguments:
code -- code text
desired_type -- ast Node or tuple
Keyword Arguments:
path -- code path
mode -- execution mode (exec, eval, single)
tree -- current tree, if it was optimized
|
[
"Find",
"all",
"nodes",
"of",
"a",
"given",
"type"
] |
497c88c66b451ff2cd7354be1af070c92e119f41
|
https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/pyposast/__init__.py#L44-L58
|
238,210
|
mikeboers/sitetools
|
sitetools/sites.py
|
SysPathInserter.add
|
def add(self, path):
"""Add the given path to the decided place in sys.path"""
# sys.path always has absolute paths.
path = os.path.abspath(path)
# It must exist.
if not os.path.exists(path):
return
# It must not already be in sys.path.
if path in sys.path:
return
if self.index is not None:
sys.path.insert(self.index, path)
self.index += 1
else:
sys.path.append(path)
|
python
|
def add(self, path):
"""Add the given path to the decided place in sys.path"""
# sys.path always has absolute paths.
path = os.path.abspath(path)
# It must exist.
if not os.path.exists(path):
return
# It must not already be in sys.path.
if path in sys.path:
return
if self.index is not None:
sys.path.insert(self.index, path)
self.index += 1
else:
sys.path.append(path)
|
[
"def",
"add",
"(",
"self",
",",
"path",
")",
":",
"# sys.path always has absolute paths.",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"# It must exist.",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"# It must not already be in sys.path.",
"if",
"path",
"in",
"sys",
".",
"path",
":",
"return",
"if",
"self",
".",
"index",
"is",
"not",
"None",
":",
"sys",
".",
"path",
".",
"insert",
"(",
"self",
".",
"index",
",",
"path",
")",
"self",
".",
"index",
"+=",
"1",
"else",
":",
"sys",
".",
"path",
".",
"append",
"(",
"path",
")"
] |
Add the given path to the decided place in sys.path
|
[
"Add",
"the",
"given",
"path",
"to",
"the",
"decided",
"place",
"in",
"sys",
".",
"path"
] |
1ec4eea6902b4a276f868a711b783dd965c123b7
|
https://github.com/mikeboers/sitetools/blob/1ec4eea6902b4a276f868a711b783dd965c123b7/sitetools/sites.py#L184-L202
|
238,211
|
moreati/subresource-integrity
|
subresource_integrity.py
|
generate
|
def generate(data, algorithms=(DEFAULT_ALOGRITHM,)):
"""Yields subresource integrity Hash objects for the given data &
algorithms
>>> for ihash in generate(b"alert('Hello, world.');"):
... print ('%s %s' % (ihash.algorithm, ihash.b58digest))
sha384 H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO
>>> list(generate(b"alert('Hello, world.');", ['sha256', 'sha384']))
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[subresource_integrity.Hash('sha256', 'qz.../Tng=', ''),
subresource_integrity.Hash('sha384', 'H8BR...+eX6xO', '')]
"""
return (Hash.fromresource(data, algorithm) for algorithm in algorithms)
|
python
|
def generate(data, algorithms=(DEFAULT_ALOGRITHM,)):
"""Yields subresource integrity Hash objects for the given data &
algorithms
>>> for ihash in generate(b"alert('Hello, world.');"):
... print ('%s %s' % (ihash.algorithm, ihash.b58digest))
sha384 H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO
>>> list(generate(b"alert('Hello, world.');", ['sha256', 'sha384']))
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[subresource_integrity.Hash('sha256', 'qz.../Tng=', ''),
subresource_integrity.Hash('sha384', 'H8BR...+eX6xO', '')]
"""
return (Hash.fromresource(data, algorithm) for algorithm in algorithms)
|
[
"def",
"generate",
"(",
"data",
",",
"algorithms",
"=",
"(",
"DEFAULT_ALOGRITHM",
",",
")",
")",
":",
"return",
"(",
"Hash",
".",
"fromresource",
"(",
"data",
",",
"algorithm",
")",
"for",
"algorithm",
"in",
"algorithms",
")"
] |
Yields subresource integrity Hash objects for the given data &
algorithms
>>> for ihash in generate(b"alert('Hello, world.');"):
... print ('%s %s' % (ihash.algorithm, ihash.b58digest))
sha384 H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO
>>> list(generate(b"alert('Hello, world.');", ['sha256', 'sha384']))
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[subresource_integrity.Hash('sha256', 'qz.../Tng=', ''),
subresource_integrity.Hash('sha384', 'H8BR...+eX6xO', '')]
|
[
"Yields",
"subresource",
"integrity",
"Hash",
"objects",
"for",
"the",
"given",
"data",
"&",
"algorithms"
] |
c9f6cecddea85f1c7bb5562551a41b9678fbda21
|
https://github.com/moreati/subresource-integrity/blob/c9f6cecddea85f1c7bb5562551a41b9678fbda21/subresource_integrity.py#L102-L115
|
238,212
|
moreati/subresource-integrity
|
subresource_integrity.py
|
render
|
def render(data, algorithms=(DEFAULT_ALOGRITHM,), seperator=' '):
"""Returns a subresource integrity string for the given data &
algorithms
>>> data = b"alert('Hello, world.');"
>>> render(data)
'sha384-H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO'
>>> print(render(data, ['sha256', 'sha384'], seperator='\\n'))
sha256-qznLcsROx4GACP2dm0UCKCzCG+HiZ1guq6ZZDob/Tng=
sha384-H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO
"""
return seperator.join(str(ihash) for ihash in generate(data, algorithms))
|
python
|
def render(data, algorithms=(DEFAULT_ALOGRITHM,), seperator=' '):
"""Returns a subresource integrity string for the given data &
algorithms
>>> data = b"alert('Hello, world.');"
>>> render(data)
'sha384-H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO'
>>> print(render(data, ['sha256', 'sha384'], seperator='\\n'))
sha256-qznLcsROx4GACP2dm0UCKCzCG+HiZ1guq6ZZDob/Tng=
sha384-H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO
"""
return seperator.join(str(ihash) for ihash in generate(data, algorithms))
|
[
"def",
"render",
"(",
"data",
",",
"algorithms",
"=",
"(",
"DEFAULT_ALOGRITHM",
",",
")",
",",
"seperator",
"=",
"' '",
")",
":",
"return",
"seperator",
".",
"join",
"(",
"str",
"(",
"ihash",
")",
"for",
"ihash",
"in",
"generate",
"(",
"data",
",",
"algorithms",
")",
")"
] |
Returns a subresource integrity string for the given data &
algorithms
>>> data = b"alert('Hello, world.');"
>>> render(data)
'sha384-H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO'
>>> print(render(data, ['sha256', 'sha384'], seperator='\\n'))
sha256-qznLcsROx4GACP2dm0UCKCzCG+HiZ1guq6ZZDob/Tng=
sha384-H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t+eX6xO
|
[
"Returns",
"a",
"subresource",
"integrity",
"string",
"for",
"the",
"given",
"data",
"&",
"algorithms"
] |
c9f6cecddea85f1c7bb5562551a41b9678fbda21
|
https://github.com/moreati/subresource-integrity/blob/c9f6cecddea85f1c7bb5562551a41b9678fbda21/subresource_integrity.py#L118-L130
|
238,213
|
moreati/subresource-integrity
|
subresource_integrity.py
|
parse
|
def parse(integrity):
"""Returns a list of subresource integrity Hash objects parsed from a str
>>> parse(' sha256-47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU= ')
... # doctest: +ELLIPSIS
[subresource_integrity.Hash('sha256', '47DEQp...SuFU=', '')]
Hash objects are put in descending order of algorithmic strength
>>> parse('sha384-dOTZf16X8p34q2/kYyEFm0jh89uTjikhnzjeLeF0FHsEaYKb'
... '1A1cv+Lyv4Hk8vHd'
... ' '
... 'sha512-Q2bFTOhEALkN8hOms2FKTDLy7eugP2zFZ1T8LCvX42Fp3WoN'
... 'r3bjZSAHeOsHrbV1Fu9/A0EzCinRE7Af1ofPrw=='
... )
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[subresource_integrity.Hash('sha512', 'Q2b...zCinRE7Af1ofPrw==', ''),
subresource_integrity.Hash('sha384', 'dOT...Hk8vHd', '')]
Unrecognised hash algorithms are discarded
>>> parse('sha1-2jmj7l5rSw0yVb/vlWAYkK/YBwk=')
[]
"""
matches = _INTEGRITY_PATTERN.findall(integrity)
matches.sort(key=lambda t: RECOGNISED_ALGORITHMS.index(t[0]))
return [Hash.fromhash(*match) for match in matches]
|
python
|
def parse(integrity):
"""Returns a list of subresource integrity Hash objects parsed from a str
>>> parse(' sha256-47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU= ')
... # doctest: +ELLIPSIS
[subresource_integrity.Hash('sha256', '47DEQp...SuFU=', '')]
Hash objects are put in descending order of algorithmic strength
>>> parse('sha384-dOTZf16X8p34q2/kYyEFm0jh89uTjikhnzjeLeF0FHsEaYKb'
... '1A1cv+Lyv4Hk8vHd'
... ' '
... 'sha512-Q2bFTOhEALkN8hOms2FKTDLy7eugP2zFZ1T8LCvX42Fp3WoN'
... 'r3bjZSAHeOsHrbV1Fu9/A0EzCinRE7Af1ofPrw=='
... )
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[subresource_integrity.Hash('sha512', 'Q2b...zCinRE7Af1ofPrw==', ''),
subresource_integrity.Hash('sha384', 'dOT...Hk8vHd', '')]
Unrecognised hash algorithms are discarded
>>> parse('sha1-2jmj7l5rSw0yVb/vlWAYkK/YBwk=')
[]
"""
matches = _INTEGRITY_PATTERN.findall(integrity)
matches.sort(key=lambda t: RECOGNISED_ALGORITHMS.index(t[0]))
return [Hash.fromhash(*match) for match in matches]
|
[
"def",
"parse",
"(",
"integrity",
")",
":",
"matches",
"=",
"_INTEGRITY_PATTERN",
".",
"findall",
"(",
"integrity",
")",
"matches",
".",
"sort",
"(",
"key",
"=",
"lambda",
"t",
":",
"RECOGNISED_ALGORITHMS",
".",
"index",
"(",
"t",
"[",
"0",
"]",
")",
")",
"return",
"[",
"Hash",
".",
"fromhash",
"(",
"*",
"match",
")",
"for",
"match",
"in",
"matches",
"]"
] |
Returns a list of subresource integrity Hash objects parsed from a str
>>> parse(' sha256-47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU= ')
... # doctest: +ELLIPSIS
[subresource_integrity.Hash('sha256', '47DEQp...SuFU=', '')]
Hash objects are put in descending order of algorithmic strength
>>> parse('sha384-dOTZf16X8p34q2/kYyEFm0jh89uTjikhnzjeLeF0FHsEaYKb'
... '1A1cv+Lyv4Hk8vHd'
... ' '
... 'sha512-Q2bFTOhEALkN8hOms2FKTDLy7eugP2zFZ1T8LCvX42Fp3WoN'
... 'r3bjZSAHeOsHrbV1Fu9/A0EzCinRE7Af1ofPrw=='
... )
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[subresource_integrity.Hash('sha512', 'Q2b...zCinRE7Af1ofPrw==', ''),
subresource_integrity.Hash('sha384', 'dOT...Hk8vHd', '')]
Unrecognised hash algorithms are discarded
>>> parse('sha1-2jmj7l5rSw0yVb/vlWAYkK/YBwk=')
[]
|
[
"Returns",
"a",
"list",
"of",
"subresource",
"integrity",
"Hash",
"objects",
"parsed",
"from",
"a",
"str"
] |
c9f6cecddea85f1c7bb5562551a41b9678fbda21
|
https://github.com/moreati/subresource-integrity/blob/c9f6cecddea85f1c7bb5562551a41b9678fbda21/subresource_integrity.py#L133-L159
|
238,214
|
SeabornGames/RequestClient
|
seaborn/request_client/intellisense.py
|
ConnectionBasic.login
|
def login(self, username=None, password=None, login_url=None,
auth_url=None):
"""
This will automatically log the user into the pre-defined account
Feel free to overwrite this with an endpoint on endpoint load
:param username: str of the user name to login in as
:param password: str of the password to login as
:param login_url: str of the url for the server's login
:param auth_url: str of the url for the server's authorization login
:return: str of self._status
"""
return super(ConnectionBasic, self).login(username, password,
login_url, auth_url)
|
python
|
def login(self, username=None, password=None, login_url=None,
auth_url=None):
"""
This will automatically log the user into the pre-defined account
Feel free to overwrite this with an endpoint on endpoint load
:param username: str of the user name to login in as
:param password: str of the password to login as
:param login_url: str of the url for the server's login
:param auth_url: str of the url for the server's authorization login
:return: str of self._status
"""
return super(ConnectionBasic, self).login(username, password,
login_url, auth_url)
|
[
"def",
"login",
"(",
"self",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"login_url",
"=",
"None",
",",
"auth_url",
"=",
"None",
")",
":",
"return",
"super",
"(",
"ConnectionBasic",
",",
"self",
")",
".",
"login",
"(",
"username",
",",
"password",
",",
"login_url",
",",
"auth_url",
")"
] |
This will automatically log the user into the pre-defined account
Feel free to overwrite this with an endpoint on endpoint load
:param username: str of the user name to login in as
:param password: str of the password to login as
:param login_url: str of the url for the server's login
:param auth_url: str of the url for the server's authorization login
:return: str of self._status
|
[
"This",
"will",
"automatically",
"log",
"the",
"user",
"into",
"the",
"pre",
"-",
"defined",
"account"
] |
21aeb951ddfdb6ee453ad0edc896ff224e06425d
|
https://github.com/SeabornGames/RequestClient/blob/21aeb951ddfdb6ee453ad0edc896ff224e06425d/seaborn/request_client/intellisense.py#L47-L61
|
238,215
|
20c/django-handleref
|
django_handleref/manager.py
|
HandleRefQuerySet.last_change
|
def last_change(self):
"""
queries the database for the most recent time an object was either created or
updated
returns datetime or None if db is empty
"""
try:
cdt = self.latest('created')
udt = self.latest('updated')
#print cdt, udt
return max(cdt.created, udt.updated)
except ObjectDoesNotExist:
return None
|
python
|
def last_change(self):
"""
queries the database for the most recent time an object was either created or
updated
returns datetime or None if db is empty
"""
try:
cdt = self.latest('created')
udt = self.latest('updated')
#print cdt, udt
return max(cdt.created, udt.updated)
except ObjectDoesNotExist:
return None
|
[
"def",
"last_change",
"(",
"self",
")",
":",
"try",
":",
"cdt",
"=",
"self",
".",
"latest",
"(",
"'created'",
")",
"udt",
"=",
"self",
".",
"latest",
"(",
"'updated'",
")",
"#print cdt, udt",
"return",
"max",
"(",
"cdt",
".",
"created",
",",
"udt",
".",
"updated",
")",
"except",
"ObjectDoesNotExist",
":",
"return",
"None"
] |
queries the database for the most recent time an object was either created or
updated
returns datetime or None if db is empty
|
[
"queries",
"the",
"database",
"for",
"the",
"most",
"recent",
"time",
"an",
"object",
"was",
"either",
"created",
"or",
"updated"
] |
ff4ca6ad39c68947e8a6d8e478daae4cd43663ca
|
https://github.com/20c/django-handleref/blob/ff4ca6ad39c68947e8a6d8e478daae4cd43663ca/django_handleref/manager.py#L15-L29
|
238,216
|
20c/django-handleref
|
django_handleref/manager.py
|
HandleRefQuerySet.since
|
def since(self, timestamp=None, version=None, deleted=False):
"""
Queries the database for objects updated since timestamp or version
Arguments:
timestamp <DateTime=None|int=None> if specified return all objects modified since
that specified time. If integer is submitted it is treated like a unix timestamp
version <int=None> if specified return all objects with a version greater
then the one specified
deleted <bool=False> if true include soft-deleted objects in the result
Either timestamp or version needs to be provided
"""
qset = self
if timestamp is not None:
if isinstance(timestamp, numbers.Real):
timestamp = datetime.datetime.fromtimestamp(timestamp)
qset = qset.filter(
models.Q(created__gt=timestamp) |
models.Q(updated__gt=timestamp)
)
if version is not None:
qset = qset.filter(version__gt=version)
if not deleted:
qset = qset.undeleted()
return qset
|
python
|
def since(self, timestamp=None, version=None, deleted=False):
"""
Queries the database for objects updated since timestamp or version
Arguments:
timestamp <DateTime=None|int=None> if specified return all objects modified since
that specified time. If integer is submitted it is treated like a unix timestamp
version <int=None> if specified return all objects with a version greater
then the one specified
deleted <bool=False> if true include soft-deleted objects in the result
Either timestamp or version needs to be provided
"""
qset = self
if timestamp is not None:
if isinstance(timestamp, numbers.Real):
timestamp = datetime.datetime.fromtimestamp(timestamp)
qset = qset.filter(
models.Q(created__gt=timestamp) |
models.Q(updated__gt=timestamp)
)
if version is not None:
qset = qset.filter(version__gt=version)
if not deleted:
qset = qset.undeleted()
return qset
|
[
"def",
"since",
"(",
"self",
",",
"timestamp",
"=",
"None",
",",
"version",
"=",
"None",
",",
"deleted",
"=",
"False",
")",
":",
"qset",
"=",
"self",
"if",
"timestamp",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"timestamp",
",",
"numbers",
".",
"Real",
")",
":",
"timestamp",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"timestamp",
")",
"qset",
"=",
"qset",
".",
"filter",
"(",
"models",
".",
"Q",
"(",
"created__gt",
"=",
"timestamp",
")",
"|",
"models",
".",
"Q",
"(",
"updated__gt",
"=",
"timestamp",
")",
")",
"if",
"version",
"is",
"not",
"None",
":",
"qset",
"=",
"qset",
".",
"filter",
"(",
"version__gt",
"=",
"version",
")",
"if",
"not",
"deleted",
":",
"qset",
"=",
"qset",
".",
"undeleted",
"(",
")",
"return",
"qset"
] |
Queries the database for objects updated since timestamp or version
Arguments:
timestamp <DateTime=None|int=None> if specified return all objects modified since
that specified time. If integer is submitted it is treated like a unix timestamp
version <int=None> if specified return all objects with a version greater
then the one specified
deleted <bool=False> if true include soft-deleted objects in the result
Either timestamp or version needs to be provided
|
[
"Queries",
"the",
"database",
"for",
"objects",
"updated",
"since",
"timestamp",
"or",
"version"
] |
ff4ca6ad39c68947e8a6d8e478daae4cd43663ca
|
https://github.com/20c/django-handleref/blob/ff4ca6ad39c68947e8a6d8e478daae4cd43663ca/django_handleref/manager.py#L31-L68
|
238,217
|
Brazelton-Lab/bio_utils
|
bio_utils/verifiers/fastq.py
|
fastq_verifier
|
def fastq_verifier(entries, ambiguous=False):
"""Raises error if invalid FASTQ format detected
Args:
entries (list): A list of FastqEntry instances
ambiguous (bool): Permit ambiguous bases, i.e. permit non-ACGTU bases
Raises:
FormatError: Error when FASTQ format incorrect with descriptive message
Example:
>>> from bio_utils.iterators import fastq_iter
>>> import os
>>> entries = r'@entry1{0}AAGGATTCG{0}+{0}112234432{0}' \
... r'@entry{0}AGGTCCCCCG{0}+{0}4229888884{0}' \
... r'@entry3{0}GCCTAGC{0}9ddsa5n'.format(os.linesep)
>>> fastq_entries = fastq_iter(iter(entries.split(os.linesep)))
>>> fastq_verifier(fastq_entries)
"""
if ambiguous:
regex = r'^@.+{0}[ACGTURYKMSWBDHVNX]+{0}' \
r'\+.*{0}[!"#$%&\'()*+,-./0123456' \
r'789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ' \
r'[\]^_`abcdefghijklmnopqrstuvwxyz' \
r'{{|}}~]+{0}$'.format(os.linesep)
else:
regex = r'^@.+{0}[ACGTU]+{0}' \
r'\+.*{0}[!-~]+{0}$'.format(os.linesep)
delimiter = r'{0}'.format(os.linesep)
for entry in entries:
if len(entry.sequence) != len(entry.quality):
msg = 'The number of bases in {0} does not match the number ' \
'of quality scores'.format(entry.id)
raise FormatError(message=msg)
try:
entry_verifier([entry.write()], regex, delimiter)
except FormatError as error:
if error.part == 0:
msg = 'Unknown Header Error with {0}'.format(entry.id)
raise FormatError(message=msg)
elif error.part == 1 and ambiguous:
msg = '{0} contains a base not in ' \
'[ACGTURYKMSWBDHVNX]'.format(entry.id)
raise FormatError(message=msg)
elif error.part == 1 and not ambiguous:
msg = '{0} contains a base not in ' \
'[ACGTU]'.format(entry.id)
raise FormatError(message=msg)
elif error.part == 2:
msg = 'Unknown error with line 3 of {0}'.format(entry.id)
raise FormatError(message=msg)
elif error.part == 3:
msg = r'{0} contains a quality score not in ' \
r'[!-~]'.format(entry.id)
raise FormatError(message=msg)
else:
msg = '{0}: Unknown Error: Likely a Bug'.format(entry.id)
raise FormatError(message=msg)
|
python
|
def fastq_verifier(entries, ambiguous=False):
"""Raises error if invalid FASTQ format detected
Args:
entries (list): A list of FastqEntry instances
ambiguous (bool): Permit ambiguous bases, i.e. permit non-ACGTU bases
Raises:
FormatError: Error when FASTQ format incorrect with descriptive message
Example:
>>> from bio_utils.iterators import fastq_iter
>>> import os
>>> entries = r'@entry1{0}AAGGATTCG{0}+{0}112234432{0}' \
... r'@entry{0}AGGTCCCCCG{0}+{0}4229888884{0}' \
... r'@entry3{0}GCCTAGC{0}9ddsa5n'.format(os.linesep)
>>> fastq_entries = fastq_iter(iter(entries.split(os.linesep)))
>>> fastq_verifier(fastq_entries)
"""
if ambiguous:
regex = r'^@.+{0}[ACGTURYKMSWBDHVNX]+{0}' \
r'\+.*{0}[!"#$%&\'()*+,-./0123456' \
r'789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ' \
r'[\]^_`abcdefghijklmnopqrstuvwxyz' \
r'{{|}}~]+{0}$'.format(os.linesep)
else:
regex = r'^@.+{0}[ACGTU]+{0}' \
r'\+.*{0}[!-~]+{0}$'.format(os.linesep)
delimiter = r'{0}'.format(os.linesep)
for entry in entries:
if len(entry.sequence) != len(entry.quality):
msg = 'The number of bases in {0} does not match the number ' \
'of quality scores'.format(entry.id)
raise FormatError(message=msg)
try:
entry_verifier([entry.write()], regex, delimiter)
except FormatError as error:
if error.part == 0:
msg = 'Unknown Header Error with {0}'.format(entry.id)
raise FormatError(message=msg)
elif error.part == 1 and ambiguous:
msg = '{0} contains a base not in ' \
'[ACGTURYKMSWBDHVNX]'.format(entry.id)
raise FormatError(message=msg)
elif error.part == 1 and not ambiguous:
msg = '{0} contains a base not in ' \
'[ACGTU]'.format(entry.id)
raise FormatError(message=msg)
elif error.part == 2:
msg = 'Unknown error with line 3 of {0}'.format(entry.id)
raise FormatError(message=msg)
elif error.part == 3:
msg = r'{0} contains a quality score not in ' \
r'[!-~]'.format(entry.id)
raise FormatError(message=msg)
else:
msg = '{0}: Unknown Error: Likely a Bug'.format(entry.id)
raise FormatError(message=msg)
|
[
"def",
"fastq_verifier",
"(",
"entries",
",",
"ambiguous",
"=",
"False",
")",
":",
"if",
"ambiguous",
":",
"regex",
"=",
"r'^@.+{0}[ACGTURYKMSWBDHVNX]+{0}'",
"r'\\+.*{0}[!\"#$%&\\'()*+,-./0123456'",
"r'789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'",
"r'[\\]^_`abcdefghijklmnopqrstuvwxyz'",
"r'{{|}}~]+{0}$'",
".",
"format",
"(",
"os",
".",
"linesep",
")",
"else",
":",
"regex",
"=",
"r'^@.+{0}[ACGTU]+{0}'",
"r'\\+.*{0}[!-~]+{0}$'",
".",
"format",
"(",
"os",
".",
"linesep",
")",
"delimiter",
"=",
"r'{0}'",
".",
"format",
"(",
"os",
".",
"linesep",
")",
"for",
"entry",
"in",
"entries",
":",
"if",
"len",
"(",
"entry",
".",
"sequence",
")",
"!=",
"len",
"(",
"entry",
".",
"quality",
")",
":",
"msg",
"=",
"'The number of bases in {0} does not match the number '",
"'of quality scores'",
".",
"format",
"(",
"entry",
".",
"id",
")",
"raise",
"FormatError",
"(",
"message",
"=",
"msg",
")",
"try",
":",
"entry_verifier",
"(",
"[",
"entry",
".",
"write",
"(",
")",
"]",
",",
"regex",
",",
"delimiter",
")",
"except",
"FormatError",
"as",
"error",
":",
"if",
"error",
".",
"part",
"==",
"0",
":",
"msg",
"=",
"'Unknown Header Error with {0}'",
".",
"format",
"(",
"entry",
".",
"id",
")",
"raise",
"FormatError",
"(",
"message",
"=",
"msg",
")",
"elif",
"error",
".",
"part",
"==",
"1",
"and",
"ambiguous",
":",
"msg",
"=",
"'{0} contains a base not in '",
"'[ACGTURYKMSWBDHVNX]'",
".",
"format",
"(",
"entry",
".",
"id",
")",
"raise",
"FormatError",
"(",
"message",
"=",
"msg",
")",
"elif",
"error",
".",
"part",
"==",
"1",
"and",
"not",
"ambiguous",
":",
"msg",
"=",
"'{0} contains a base not in '",
"'[ACGTU]'",
".",
"format",
"(",
"entry",
".",
"id",
")",
"raise",
"FormatError",
"(",
"message",
"=",
"msg",
")",
"elif",
"error",
".",
"part",
"==",
"2",
":",
"msg",
"=",
"'Unknown error with line 3 of {0}'",
".",
"format",
"(",
"entry",
".",
"id",
")",
"raise",
"FormatError",
"(",
"message",
"=",
"msg",
")",
"elif",
"error",
".",
"part",
"==",
"3",
":",
"msg",
"=",
"r'{0} contains a quality score not in '",
"r'[!-~]'",
".",
"format",
"(",
"entry",
".",
"id",
")",
"raise",
"FormatError",
"(",
"message",
"=",
"msg",
")",
"else",
":",
"msg",
"=",
"'{0}: Unknown Error: Likely a Bug'",
".",
"format",
"(",
"entry",
".",
"id",
")",
"raise",
"FormatError",
"(",
"message",
"=",
"msg",
")"
] |
Raises error if invalid FASTQ format detected
Args:
entries (list): A list of FastqEntry instances
ambiguous (bool): Permit ambiguous bases, i.e. permit non-ACGTU bases
Raises:
FormatError: Error when FASTQ format incorrect with descriptive message
Example:
>>> from bio_utils.iterators import fastq_iter
>>> import os
>>> entries = r'@entry1{0}AAGGATTCG{0}+{0}112234432{0}' \
... r'@entry{0}AGGTCCCCCG{0}+{0}4229888884{0}' \
... r'@entry3{0}GCCTAGC{0}9ddsa5n'.format(os.linesep)
>>> fastq_entries = fastq_iter(iter(entries.split(os.linesep)))
>>> fastq_verifier(fastq_entries)
|
[
"Raises",
"error",
"if",
"invalid",
"FASTQ",
"format",
"detected"
] |
5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7
|
https://github.com/Brazelton-Lab/bio_utils/blob/5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7/bio_utils/verifiers/fastq.py#L46-L106
|
238,218
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.validate_configuration
|
def validate_configuration(self):
"""
Validates the provided settings.
* Checks ``inherit_image`` format.
* Checks ``use_registry_name`` format.
* Checks that ``apt_dependencies`` is not set when ``inherit_image`` is set.
:raise ArcaMisconfigured: If some of the settings aren't valid.
"""
super().validate_configuration()
if self.inherit_image is not None:
try:
assert len(str(self.inherit_image).split(":")) == 2
except (ValueError, AssertionError):
raise ArcaMisconfigured(f"Image '{self.inherit_image}' is not a valid value for the 'inherit_image'"
f"setting")
if self.inherit_image is not None and self.get_dependencies() is not None:
raise ArcaMisconfigured("An external image is used as a base image, "
"therefore Arca can't install dependencies.")
if self.use_registry_name is not None:
try:
assert 2 >= len(str(self.inherit_image).split("/")) <= 3
except ValueError:
raise ArcaMisconfigured(f"Registry '{self.use_registry_name}' is not valid value for the "
f"'use_registry_name' setting.")
|
python
|
def validate_configuration(self):
"""
Validates the provided settings.
* Checks ``inherit_image`` format.
* Checks ``use_registry_name`` format.
* Checks that ``apt_dependencies`` is not set when ``inherit_image`` is set.
:raise ArcaMisconfigured: If some of the settings aren't valid.
"""
super().validate_configuration()
if self.inherit_image is not None:
try:
assert len(str(self.inherit_image).split(":")) == 2
except (ValueError, AssertionError):
raise ArcaMisconfigured(f"Image '{self.inherit_image}' is not a valid value for the 'inherit_image'"
f"setting")
if self.inherit_image is not None and self.get_dependencies() is not None:
raise ArcaMisconfigured("An external image is used as a base image, "
"therefore Arca can't install dependencies.")
if self.use_registry_name is not None:
try:
assert 2 >= len(str(self.inherit_image).split("/")) <= 3
except ValueError:
raise ArcaMisconfigured(f"Registry '{self.use_registry_name}' is not valid value for the "
f"'use_registry_name' setting.")
|
[
"def",
"validate_configuration",
"(",
"self",
")",
":",
"super",
"(",
")",
".",
"validate_configuration",
"(",
")",
"if",
"self",
".",
"inherit_image",
"is",
"not",
"None",
":",
"try",
":",
"assert",
"len",
"(",
"str",
"(",
"self",
".",
"inherit_image",
")",
".",
"split",
"(",
"\":\"",
")",
")",
"==",
"2",
"except",
"(",
"ValueError",
",",
"AssertionError",
")",
":",
"raise",
"ArcaMisconfigured",
"(",
"f\"Image '{self.inherit_image}' is not a valid value for the 'inherit_image'\"",
"f\"setting\"",
")",
"if",
"self",
".",
"inherit_image",
"is",
"not",
"None",
"and",
"self",
".",
"get_dependencies",
"(",
")",
"is",
"not",
"None",
":",
"raise",
"ArcaMisconfigured",
"(",
"\"An external image is used as a base image, \"",
"\"therefore Arca can't install dependencies.\"",
")",
"if",
"self",
".",
"use_registry_name",
"is",
"not",
"None",
":",
"try",
":",
"assert",
"2",
">=",
"len",
"(",
"str",
"(",
"self",
".",
"inherit_image",
")",
".",
"split",
"(",
"\"/\"",
")",
")",
"<=",
"3",
"except",
"ValueError",
":",
"raise",
"ArcaMisconfigured",
"(",
"f\"Registry '{self.use_registry_name}' is not valid value for the \"",
"f\"'use_registry_name' setting.\"",
")"
] |
Validates the provided settings.
* Checks ``inherit_image`` format.
* Checks ``use_registry_name`` format.
* Checks that ``apt_dependencies`` is not set when ``inherit_image`` is set.
:raise ArcaMisconfigured: If some of the settings aren't valid.
|
[
"Validates",
"the",
"provided",
"settings",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L88-L116
|
238,219
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.get_dependencies
|
def get_dependencies(self) -> Optional[List[str]]:
""" Returns the ``apt_dependencies`` setting to a standardized format.
:raise ArcaMisconfigured: if the dependencies can't be converted into a list of strings
:return: List of dependencies, ``None`` if there are none.
"""
if not self.apt_dependencies:
return None
try:
dependencies = list([str(x).strip() for x in self.apt_dependencies])
except (TypeError, ValueError):
raise ArcaMisconfigured("Apk dependencies can't be converted into a list of strings")
if not len(dependencies):
return None
dependencies.sort()
return dependencies
|
python
|
def get_dependencies(self) -> Optional[List[str]]:
""" Returns the ``apt_dependencies`` setting to a standardized format.
:raise ArcaMisconfigured: if the dependencies can't be converted into a list of strings
:return: List of dependencies, ``None`` if there are none.
"""
if not self.apt_dependencies:
return None
try:
dependencies = list([str(x).strip() for x in self.apt_dependencies])
except (TypeError, ValueError):
raise ArcaMisconfigured("Apk dependencies can't be converted into a list of strings")
if not len(dependencies):
return None
dependencies.sort()
return dependencies
|
[
"def",
"get_dependencies",
"(",
"self",
")",
"->",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
":",
"if",
"not",
"self",
".",
"apt_dependencies",
":",
"return",
"None",
"try",
":",
"dependencies",
"=",
"list",
"(",
"[",
"str",
"(",
"x",
")",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"self",
".",
"apt_dependencies",
"]",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ArcaMisconfigured",
"(",
"\"Apk dependencies can't be converted into a list of strings\"",
")",
"if",
"not",
"len",
"(",
"dependencies",
")",
":",
"return",
"None",
"dependencies",
".",
"sort",
"(",
")",
"return",
"dependencies"
] |
Returns the ``apt_dependencies`` setting to a standardized format.
:raise ArcaMisconfigured: if the dependencies can't be converted into a list of strings
:return: List of dependencies, ``None`` if there are none.
|
[
"Returns",
"the",
"apt_dependencies",
"setting",
"to",
"a",
"standardized",
"format",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L131-L151
|
238,220
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.get_image_name
|
def get_image_name(self,
repo_path: Path,
requirements_option: RequirementsOptions,
dependencies: Optional[List[str]]) -> str:
""" Returns the name for images with installed requirements and dependencies.
"""
if self.inherit_image is None:
return self.get_arca_base_name()
else:
name, tag = str(self.inherit_image).split(":")
return f"arca_{name}_{tag}"
|
python
|
def get_image_name(self,
repo_path: Path,
requirements_option: RequirementsOptions,
dependencies: Optional[List[str]]) -> str:
""" Returns the name for images with installed requirements and dependencies.
"""
if self.inherit_image is None:
return self.get_arca_base_name()
else:
name, tag = str(self.inherit_image).split(":")
return f"arca_{name}_{tag}"
|
[
"def",
"get_image_name",
"(",
"self",
",",
"repo_path",
":",
"Path",
",",
"requirements_option",
":",
"RequirementsOptions",
",",
"dependencies",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
")",
"->",
"str",
":",
"if",
"self",
".",
"inherit_image",
"is",
"None",
":",
"return",
"self",
".",
"get_arca_base_name",
"(",
")",
"else",
":",
"name",
",",
"tag",
"=",
"str",
"(",
"self",
".",
"inherit_image",
")",
".",
"split",
"(",
"\":\"",
")",
"return",
"f\"arca_{name}_{tag}\""
] |
Returns the name for images with installed requirements and dependencies.
|
[
"Returns",
"the",
"name",
"for",
"images",
"with",
"installed",
"requirements",
"and",
"dependencies",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L163-L174
|
238,221
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.get_image_tag
|
def get_image_tag(self,
requirements_option: RequirementsOptions,
requirements_hash: Optional[str],
dependencies: Optional[List[str]]) -> str:
""" Returns the tag for images with the dependencies and requirements installed.
64-byte hexadecimal strings cannot be used as docker tags, so the prefixes are necessary.
Double hashing the dependencies and requirements hash to make the final tag shorter.
Prefixes:
* Image type:
* i – Inherited image
* a – Arca base image
* Requirements:
* r – Does have some kind of requirements
* s – Doesn't have requirements
* Dependencies:
* d – Does have dependencies
* e – Doesn't have dependencies
Possible outputs:
* Inherited images:
* `ise` – no requirements
* `ide_<hash(requirements)>` – with requirements
* From Arca base image:
* `<Arca version>_<Python version>_ase` – no requirements and no dependencies
* `<Arca version>_<Python version>_asd_<hash(dependencies)>` – only dependencies
* `<Arca version>_<Python version>_are_<hash(requirements)>` – only requirements
* `<Arca version>_<Python version>_ard_<hash(hash(dependencies) + hash(requirements))>`
– both requirements and dependencies
"""
prefix = ""
if self.inherit_image is None:
prefix = "{}_{}_".format(arca.__version__, self.get_python_version())
prefix += "i" if self.inherit_image is not None else "a"
prefix += "r" if requirements_option != RequirementsOptions.no_requirements else "s"
prefix += "d" if dependencies is not None else "e"
if self.inherit_image is not None:
if requirements_hash:
return prefix + "_" + requirements_hash
return prefix
if dependencies is None:
dependencies_hash = ""
else:
dependencies_hash = self.get_dependencies_hash(dependencies)
if requirements_hash and dependencies_hash:
return prefix + "_" + hashlib.sha256(bytes(requirements_hash + dependencies_hash, "utf-8")).hexdigest()
elif requirements_hash:
return f"{prefix}_{requirements_hash}"
elif dependencies_hash:
return f"{prefix}_{dependencies_hash}"
else:
return prefix
|
python
|
def get_image_tag(self,
requirements_option: RequirementsOptions,
requirements_hash: Optional[str],
dependencies: Optional[List[str]]) -> str:
""" Returns the tag for images with the dependencies and requirements installed.
64-byte hexadecimal strings cannot be used as docker tags, so the prefixes are necessary.
Double hashing the dependencies and requirements hash to make the final tag shorter.
Prefixes:
* Image type:
* i – Inherited image
* a – Arca base image
* Requirements:
* r – Does have some kind of requirements
* s – Doesn't have requirements
* Dependencies:
* d – Does have dependencies
* e – Doesn't have dependencies
Possible outputs:
* Inherited images:
* `ise` – no requirements
* `ide_<hash(requirements)>` – with requirements
* From Arca base image:
* `<Arca version>_<Python version>_ase` – no requirements and no dependencies
* `<Arca version>_<Python version>_asd_<hash(dependencies)>` – only dependencies
* `<Arca version>_<Python version>_are_<hash(requirements)>` – only requirements
* `<Arca version>_<Python version>_ard_<hash(hash(dependencies) + hash(requirements))>`
– both requirements and dependencies
"""
prefix = ""
if self.inherit_image is None:
prefix = "{}_{}_".format(arca.__version__, self.get_python_version())
prefix += "i" if self.inherit_image is not None else "a"
prefix += "r" if requirements_option != RequirementsOptions.no_requirements else "s"
prefix += "d" if dependencies is not None else "e"
if self.inherit_image is not None:
if requirements_hash:
return prefix + "_" + requirements_hash
return prefix
if dependencies is None:
dependencies_hash = ""
else:
dependencies_hash = self.get_dependencies_hash(dependencies)
if requirements_hash and dependencies_hash:
return prefix + "_" + hashlib.sha256(bytes(requirements_hash + dependencies_hash, "utf-8")).hexdigest()
elif requirements_hash:
return f"{prefix}_{requirements_hash}"
elif dependencies_hash:
return f"{prefix}_{dependencies_hash}"
else:
return prefix
|
[
"def",
"get_image_tag",
"(",
"self",
",",
"requirements_option",
":",
"RequirementsOptions",
",",
"requirements_hash",
":",
"Optional",
"[",
"str",
"]",
",",
"dependencies",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
")",
"->",
"str",
":",
"prefix",
"=",
"\"\"",
"if",
"self",
".",
"inherit_image",
"is",
"None",
":",
"prefix",
"=",
"\"{}_{}_\"",
".",
"format",
"(",
"arca",
".",
"__version__",
",",
"self",
".",
"get_python_version",
"(",
")",
")",
"prefix",
"+=",
"\"i\"",
"if",
"self",
".",
"inherit_image",
"is",
"not",
"None",
"else",
"\"a\"",
"prefix",
"+=",
"\"r\"",
"if",
"requirements_option",
"!=",
"RequirementsOptions",
".",
"no_requirements",
"else",
"\"s\"",
"prefix",
"+=",
"\"d\"",
"if",
"dependencies",
"is",
"not",
"None",
"else",
"\"e\"",
"if",
"self",
".",
"inherit_image",
"is",
"not",
"None",
":",
"if",
"requirements_hash",
":",
"return",
"prefix",
"+",
"\"_\"",
"+",
"requirements_hash",
"return",
"prefix",
"if",
"dependencies",
"is",
"None",
":",
"dependencies_hash",
"=",
"\"\"",
"else",
":",
"dependencies_hash",
"=",
"self",
".",
"get_dependencies_hash",
"(",
"dependencies",
")",
"if",
"requirements_hash",
"and",
"dependencies_hash",
":",
"return",
"prefix",
"+",
"\"_\"",
"+",
"hashlib",
".",
"sha256",
"(",
"bytes",
"(",
"requirements_hash",
"+",
"dependencies_hash",
",",
"\"utf-8\"",
")",
")",
".",
"hexdigest",
"(",
")",
"elif",
"requirements_hash",
":",
"return",
"f\"{prefix}_{requirements_hash}\"",
"elif",
"dependencies_hash",
":",
"return",
"f\"{prefix}_{dependencies_hash}\"",
"else",
":",
"return",
"prefix"
] |
Returns the tag for images with the dependencies and requirements installed.
64-byte hexadecimal strings cannot be used as docker tags, so the prefixes are necessary.
Double hashing the dependencies and requirements hash to make the final tag shorter.
Prefixes:
* Image type:
* i – Inherited image
* a – Arca base image
* Requirements:
* r – Does have some kind of requirements
* s – Doesn't have requirements
* Dependencies:
* d – Does have dependencies
* e – Doesn't have dependencies
Possible outputs:
* Inherited images:
* `ise` – no requirements
* `ide_<hash(requirements)>` – with requirements
* From Arca base image:
* `<Arca version>_<Python version>_ase` – no requirements and no dependencies
* `<Arca version>_<Python version>_asd_<hash(dependencies)>` – only dependencies
* `<Arca version>_<Python version>_are_<hash(requirements)>` – only requirements
* `<Arca version>_<Python version>_ard_<hash(hash(dependencies) + hash(requirements))>`
– both requirements and dependencies
|
[
"Returns",
"the",
"tag",
"for",
"images",
"with",
"the",
"dependencies",
"and",
"requirements",
"installed",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L181-L249
|
238,222
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.get_or_build_image
|
def get_or_build_image(self, name: str, tag: str, dockerfile: Union[str, Callable[..., str]], *,
pull=True, build_context: Optional[Path]=None):
"""
A proxy for commonly built images, returns them from the local system if they exist, tries to pull them if
pull isn't disabled, otherwise builds them by the definition in ``dockerfile``.
:param name: Name of the image
:param tag: Image tag
:param dockerfile: Dockerfile text or a callable (no arguments) that produces Dockerfile text
:param pull: If the image is not present locally, allow pulling from registry (default is ``True``)
:param build_context: A path to a folder. If it's provided, docker will build the image in the context
of this folder. (eg. if ``ADD`` is needed)
"""
if self.image_exists(name, tag):
logger.info("Image %s:%s exists", name, tag)
return
elif pull:
logger.info("Trying to pull image %s:%s", name, tag)
try:
self.client.images.pull(name, tag=tag)
logger.info("The image %s:%s was pulled from registry", name, tag)
return
except docker.errors.APIError:
logger.info("The image %s:%s can't be pulled, building locally.", name, tag)
if callable(dockerfile):
dockerfile = dockerfile()
try:
if build_context is None:
fileobj = BytesIO(bytes(dockerfile, "utf-8")) # required by the docker library
self.client.images.build(
fileobj=fileobj,
tag=f"{name}:{tag}"
)
else:
dockerfile_file = build_context / "dockerfile"
dockerfile_file.write_text(dockerfile)
self.client.images.build(
path=str(build_context.resolve()),
dockerfile=dockerfile_file.name,
tag=f"{name}:{tag}"
)
dockerfile_file.unlink()
except docker.errors.BuildError as e:
for line in e.build_log:
if isinstance(line, dict) and line.get("errorDetail") and line["errorDetail"].get("code") in {124, 143}:
raise BuildTimeoutError(f"Installing of requirements timeouted after "
f"{self.requirements_timeout} seconds.")
logger.exception(e)
raise BuildError("Building docker image failed, see extra info for details.", extra_info={
"build_log": e.build_log
})
|
python
|
def get_or_build_image(self, name: str, tag: str, dockerfile: Union[str, Callable[..., str]], *,
pull=True, build_context: Optional[Path]=None):
"""
A proxy for commonly built images, returns them from the local system if they exist, tries to pull them if
pull isn't disabled, otherwise builds them by the definition in ``dockerfile``.
:param name: Name of the image
:param tag: Image tag
:param dockerfile: Dockerfile text or a callable (no arguments) that produces Dockerfile text
:param pull: If the image is not present locally, allow pulling from registry (default is ``True``)
:param build_context: A path to a folder. If it's provided, docker will build the image in the context
of this folder. (eg. if ``ADD`` is needed)
"""
if self.image_exists(name, tag):
logger.info("Image %s:%s exists", name, tag)
return
elif pull:
logger.info("Trying to pull image %s:%s", name, tag)
try:
self.client.images.pull(name, tag=tag)
logger.info("The image %s:%s was pulled from registry", name, tag)
return
except docker.errors.APIError:
logger.info("The image %s:%s can't be pulled, building locally.", name, tag)
if callable(dockerfile):
dockerfile = dockerfile()
try:
if build_context is None:
fileobj = BytesIO(bytes(dockerfile, "utf-8")) # required by the docker library
self.client.images.build(
fileobj=fileobj,
tag=f"{name}:{tag}"
)
else:
dockerfile_file = build_context / "dockerfile"
dockerfile_file.write_text(dockerfile)
self.client.images.build(
path=str(build_context.resolve()),
dockerfile=dockerfile_file.name,
tag=f"{name}:{tag}"
)
dockerfile_file.unlink()
except docker.errors.BuildError as e:
for line in e.build_log:
if isinstance(line, dict) and line.get("errorDetail") and line["errorDetail"].get("code") in {124, 143}:
raise BuildTimeoutError(f"Installing of requirements timeouted after "
f"{self.requirements_timeout} seconds.")
logger.exception(e)
raise BuildError("Building docker image failed, see extra info for details.", extra_info={
"build_log": e.build_log
})
|
[
"def",
"get_or_build_image",
"(",
"self",
",",
"name",
":",
"str",
",",
"tag",
":",
"str",
",",
"dockerfile",
":",
"Union",
"[",
"str",
",",
"Callable",
"[",
"...",
",",
"str",
"]",
"]",
",",
"*",
",",
"pull",
"=",
"True",
",",
"build_context",
":",
"Optional",
"[",
"Path",
"]",
"=",
"None",
")",
":",
"if",
"self",
".",
"image_exists",
"(",
"name",
",",
"tag",
")",
":",
"logger",
".",
"info",
"(",
"\"Image %s:%s exists\"",
",",
"name",
",",
"tag",
")",
"return",
"elif",
"pull",
":",
"logger",
".",
"info",
"(",
"\"Trying to pull image %s:%s\"",
",",
"name",
",",
"tag",
")",
"try",
":",
"self",
".",
"client",
".",
"images",
".",
"pull",
"(",
"name",
",",
"tag",
"=",
"tag",
")",
"logger",
".",
"info",
"(",
"\"The image %s:%s was pulled from registry\"",
",",
"name",
",",
"tag",
")",
"return",
"except",
"docker",
".",
"errors",
".",
"APIError",
":",
"logger",
".",
"info",
"(",
"\"The image %s:%s can't be pulled, building locally.\"",
",",
"name",
",",
"tag",
")",
"if",
"callable",
"(",
"dockerfile",
")",
":",
"dockerfile",
"=",
"dockerfile",
"(",
")",
"try",
":",
"if",
"build_context",
"is",
"None",
":",
"fileobj",
"=",
"BytesIO",
"(",
"bytes",
"(",
"dockerfile",
",",
"\"utf-8\"",
")",
")",
"# required by the docker library",
"self",
".",
"client",
".",
"images",
".",
"build",
"(",
"fileobj",
"=",
"fileobj",
",",
"tag",
"=",
"f\"{name}:{tag}\"",
")",
"else",
":",
"dockerfile_file",
"=",
"build_context",
"/",
"\"dockerfile\"",
"dockerfile_file",
".",
"write_text",
"(",
"dockerfile",
")",
"self",
".",
"client",
".",
"images",
".",
"build",
"(",
"path",
"=",
"str",
"(",
"build_context",
".",
"resolve",
"(",
")",
")",
",",
"dockerfile",
"=",
"dockerfile_file",
".",
"name",
",",
"tag",
"=",
"f\"{name}:{tag}\"",
")",
"dockerfile_file",
".",
"unlink",
"(",
")",
"except",
"docker",
".",
"errors",
".",
"BuildError",
"as",
"e",
":",
"for",
"line",
"in",
"e",
".",
"build_log",
":",
"if",
"isinstance",
"(",
"line",
",",
"dict",
")",
"and",
"line",
".",
"get",
"(",
"\"errorDetail\"",
")",
"and",
"line",
"[",
"\"errorDetail\"",
"]",
".",
"get",
"(",
"\"code\"",
")",
"in",
"{",
"124",
",",
"143",
"}",
":",
"raise",
"BuildTimeoutError",
"(",
"f\"Installing of requirements timeouted after \"",
"f\"{self.requirements_timeout} seconds.\"",
")",
"logger",
".",
"exception",
"(",
"e",
")",
"raise",
"BuildError",
"(",
"\"Building docker image failed, see extra info for details.\"",
",",
"extra_info",
"=",
"{",
"\"build_log\"",
":",
"e",
".",
"build_log",
"}",
")"
] |
A proxy for commonly built images, returns them from the local system if they exist, tries to pull them if
pull isn't disabled, otherwise builds them by the definition in ``dockerfile``.
:param name: Name of the image
:param tag: Image tag
:param dockerfile: Dockerfile text or a callable (no arguments) that produces Dockerfile text
:param pull: If the image is not present locally, allow pulling from registry (default is ``True``)
:param build_context: A path to a folder. If it's provided, docker will build the image in the context
of this folder. (eg. if ``ADD`` is needed)
|
[
"A",
"proxy",
"for",
"commonly",
"built",
"images",
"returns",
"them",
"from",
"the",
"local",
"system",
"if",
"they",
"exist",
"tries",
"to",
"pull",
"them",
"if",
"pull",
"isn",
"t",
"disabled",
"otherwise",
"builds",
"them",
"by",
"the",
"definition",
"in",
"dockerfile",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L257-L316
|
238,223
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.get_inherit_image
|
def get_inherit_image(self) -> Tuple[str, str]:
""" Parses the ``inherit_image`` setting, checks if the image is present locally and pulls it otherwise.
:return: Returns the name and the tag of the image.
:raise ArcaMisconfiguration: If the image can't be pulled from registries.
"""
name, tag = str(self.inherit_image).split(":")
if self.image_exists(name, tag):
return name, tag
try:
self.client.images.pull(name, tag)
except docker.errors.APIError:
raise ArcaMisconfigured(f"The specified image {self.inherit_image} from which Arca should inherit "
f"can't be pulled")
return name, tag
|
python
|
def get_inherit_image(self) -> Tuple[str, str]:
""" Parses the ``inherit_image`` setting, checks if the image is present locally and pulls it otherwise.
:return: Returns the name and the tag of the image.
:raise ArcaMisconfiguration: If the image can't be pulled from registries.
"""
name, tag = str(self.inherit_image).split(":")
if self.image_exists(name, tag):
return name, tag
try:
self.client.images.pull(name, tag)
except docker.errors.APIError:
raise ArcaMisconfigured(f"The specified image {self.inherit_image} from which Arca should inherit "
f"can't be pulled")
return name, tag
|
[
"def",
"get_inherit_image",
"(",
"self",
")",
"->",
"Tuple",
"[",
"str",
",",
"str",
"]",
":",
"name",
",",
"tag",
"=",
"str",
"(",
"self",
".",
"inherit_image",
")",
".",
"split",
"(",
"\":\"",
")",
"if",
"self",
".",
"image_exists",
"(",
"name",
",",
"tag",
")",
":",
"return",
"name",
",",
"tag",
"try",
":",
"self",
".",
"client",
".",
"images",
".",
"pull",
"(",
"name",
",",
"tag",
")",
"except",
"docker",
".",
"errors",
".",
"APIError",
":",
"raise",
"ArcaMisconfigured",
"(",
"f\"The specified image {self.inherit_image} from which Arca should inherit \"",
"f\"can't be pulled\"",
")",
"return",
"name",
",",
"tag"
] |
Parses the ``inherit_image`` setting, checks if the image is present locally and pulls it otherwise.
:return: Returns the name and the tag of the image.
:raise ArcaMisconfiguration: If the image can't be pulled from registries.
|
[
"Parses",
"the",
"inherit_image",
"setting",
"checks",
"if",
"the",
"image",
"is",
"present",
"locally",
"and",
"pulls",
"it",
"otherwise",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L394-L410
|
238,224
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.get_image_with_installed_dependencies
|
def get_image_with_installed_dependencies(self, image_name: str,
dependencies: Optional[List[str]]) -> Tuple[str, str]:
"""
Return name and tag of a image, based on the Arca python image, with installed dependencies defined
by ``apt_dependencies``.
:param image_name: Name of the image which will be ultimately used for the image.
:param dependencies: List of dependencies in the standardized format.
"""
python_version = self.get_python_version()
if dependencies is not None:
def install_dependencies_dockerfile():
python_name, python_tag = self.get_python_base(python_version,
pull=not self.disable_pull)
return self.INSTALL_DEPENDENCIES.format(
name=python_name,
tag=python_tag,
dependencies=" ".join(self.get_dependencies())
)
image_tag = self.get_image_tag(RequirementsOptions.no_requirements, None, dependencies)
self.get_or_build_image(image_name, image_tag, install_dependencies_dockerfile,
pull=not self.disable_pull)
return image_name, image_tag
else:
return self.get_python_base(python_version, pull=not self.disable_pull)
|
python
|
def get_image_with_installed_dependencies(self, image_name: str,
dependencies: Optional[List[str]]) -> Tuple[str, str]:
"""
Return name and tag of a image, based on the Arca python image, with installed dependencies defined
by ``apt_dependencies``.
:param image_name: Name of the image which will be ultimately used for the image.
:param dependencies: List of dependencies in the standardized format.
"""
python_version = self.get_python_version()
if dependencies is not None:
def install_dependencies_dockerfile():
python_name, python_tag = self.get_python_base(python_version,
pull=not self.disable_pull)
return self.INSTALL_DEPENDENCIES.format(
name=python_name,
tag=python_tag,
dependencies=" ".join(self.get_dependencies())
)
image_tag = self.get_image_tag(RequirementsOptions.no_requirements, None, dependencies)
self.get_or_build_image(image_name, image_tag, install_dependencies_dockerfile,
pull=not self.disable_pull)
return image_name, image_tag
else:
return self.get_python_base(python_version, pull=not self.disable_pull)
|
[
"def",
"get_image_with_installed_dependencies",
"(",
"self",
",",
"image_name",
":",
"str",
",",
"dependencies",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
")",
"->",
"Tuple",
"[",
"str",
",",
"str",
"]",
":",
"python_version",
"=",
"self",
".",
"get_python_version",
"(",
")",
"if",
"dependencies",
"is",
"not",
"None",
":",
"def",
"install_dependencies_dockerfile",
"(",
")",
":",
"python_name",
",",
"python_tag",
"=",
"self",
".",
"get_python_base",
"(",
"python_version",
",",
"pull",
"=",
"not",
"self",
".",
"disable_pull",
")",
"return",
"self",
".",
"INSTALL_DEPENDENCIES",
".",
"format",
"(",
"name",
"=",
"python_name",
",",
"tag",
"=",
"python_tag",
",",
"dependencies",
"=",
"\" \"",
".",
"join",
"(",
"self",
".",
"get_dependencies",
"(",
")",
")",
")",
"image_tag",
"=",
"self",
".",
"get_image_tag",
"(",
"RequirementsOptions",
".",
"no_requirements",
",",
"None",
",",
"dependencies",
")",
"self",
".",
"get_or_build_image",
"(",
"image_name",
",",
"image_tag",
",",
"install_dependencies_dockerfile",
",",
"pull",
"=",
"not",
"self",
".",
"disable_pull",
")",
"return",
"image_name",
",",
"image_tag",
"else",
":",
"return",
"self",
".",
"get_python_base",
"(",
"python_version",
",",
"pull",
"=",
"not",
"self",
".",
"disable_pull",
")"
] |
Return name and tag of a image, based on the Arca python image, with installed dependencies defined
by ``apt_dependencies``.
:param image_name: Name of the image which will be ultimately used for the image.
:param dependencies: List of dependencies in the standardized format.
|
[
"Return",
"name",
"and",
"tag",
"of",
"a",
"image",
"based",
"on",
"the",
"Arca",
"python",
"image",
"with",
"installed",
"dependencies",
"defined",
"by",
"apt_dependencies",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L438-L466
|
238,225
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.build_image
|
def build_image(self, image_name: str, image_tag: str,
repo_path: Path,
requirements_option: RequirementsOptions,
dependencies: Optional[List[str]]):
""" Builds an image for specific requirements and dependencies, based on the settings.
:param image_name: How the image should be named
:param image_tag: And what tag it should have.
:param repo_path: Path to the cloned repository.
:param requirements_option: How requirements are set in the repository.
:param dependencies: List of dependencies (in the formalized format)
:return: The Image instance.
:rtype: docker.models.images.Image
"""
if self.inherit_image is not None:
return self.build_image_from_inherited_image(image_name, image_tag, repo_path, requirements_option)
if requirements_option == RequirementsOptions.no_requirements:
python_version = self.get_python_version()
# no requirements and no dependencies, just return the basic image with the correct python installed
if dependencies is None:
base_name, base_tag = self.get_python_base(python_version, pull=not self.disable_pull)
image = self.get_image(base_name, base_tag)
# tag the image so ``build_image`` doesn't have to be called next time
image.tag(image_name, image_tag)
return image
# extend the image with correct python by installing the dependencies
def install_dependencies_dockerfile():
base_name, base_tag = self.get_python_base(python_version, pull=not self.disable_pull)
return self.INSTALL_DEPENDENCIES.format(
name=base_name,
tag=base_tag,
dependencies=" ".join(dependencies)
)
self.get_or_build_image(image_name, image_tag, install_dependencies_dockerfile)
return self.get_image(image_name, image_tag)
else: # doesn't have to be here, but the return right above was confusing
def install_requirements_dockerfile():
""" Returns a Dockerfile for installing pip requirements,
based on a image with installed dependencies (or no extra dependencies)
"""
dependencies_name, dependencies_tag = self.get_image_with_installed_dependencies(image_name,
dependencies)
return self.get_install_requirements_dockerfile(
name=dependencies_name,
tag=dependencies_tag,
repo_path=repo_path,
requirements_option=requirements_option,
)
self.get_or_build_image(image_name, image_tag, install_requirements_dockerfile,
build_context=repo_path.parent, pull=False)
return self.get_image(image_name, image_tag)
|
python
|
def build_image(self, image_name: str, image_tag: str,
repo_path: Path,
requirements_option: RequirementsOptions,
dependencies: Optional[List[str]]):
""" Builds an image for specific requirements and dependencies, based on the settings.
:param image_name: How the image should be named
:param image_tag: And what tag it should have.
:param repo_path: Path to the cloned repository.
:param requirements_option: How requirements are set in the repository.
:param dependencies: List of dependencies (in the formalized format)
:return: The Image instance.
:rtype: docker.models.images.Image
"""
if self.inherit_image is not None:
return self.build_image_from_inherited_image(image_name, image_tag, repo_path, requirements_option)
if requirements_option == RequirementsOptions.no_requirements:
python_version = self.get_python_version()
# no requirements and no dependencies, just return the basic image with the correct python installed
if dependencies is None:
base_name, base_tag = self.get_python_base(python_version, pull=not self.disable_pull)
image = self.get_image(base_name, base_tag)
# tag the image so ``build_image`` doesn't have to be called next time
image.tag(image_name, image_tag)
return image
# extend the image with correct python by installing the dependencies
def install_dependencies_dockerfile():
base_name, base_tag = self.get_python_base(python_version, pull=not self.disable_pull)
return self.INSTALL_DEPENDENCIES.format(
name=base_name,
tag=base_tag,
dependencies=" ".join(dependencies)
)
self.get_or_build_image(image_name, image_tag, install_dependencies_dockerfile)
return self.get_image(image_name, image_tag)
else: # doesn't have to be here, but the return right above was confusing
def install_requirements_dockerfile():
""" Returns a Dockerfile for installing pip requirements,
based on a image with installed dependencies (or no extra dependencies)
"""
dependencies_name, dependencies_tag = self.get_image_with_installed_dependencies(image_name,
dependencies)
return self.get_install_requirements_dockerfile(
name=dependencies_name,
tag=dependencies_tag,
repo_path=repo_path,
requirements_option=requirements_option,
)
self.get_or_build_image(image_name, image_tag, install_requirements_dockerfile,
build_context=repo_path.parent, pull=False)
return self.get_image(image_name, image_tag)
|
[
"def",
"build_image",
"(",
"self",
",",
"image_name",
":",
"str",
",",
"image_tag",
":",
"str",
",",
"repo_path",
":",
"Path",
",",
"requirements_option",
":",
"RequirementsOptions",
",",
"dependencies",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
")",
":",
"if",
"self",
".",
"inherit_image",
"is",
"not",
"None",
":",
"return",
"self",
".",
"build_image_from_inherited_image",
"(",
"image_name",
",",
"image_tag",
",",
"repo_path",
",",
"requirements_option",
")",
"if",
"requirements_option",
"==",
"RequirementsOptions",
".",
"no_requirements",
":",
"python_version",
"=",
"self",
".",
"get_python_version",
"(",
")",
"# no requirements and no dependencies, just return the basic image with the correct python installed",
"if",
"dependencies",
"is",
"None",
":",
"base_name",
",",
"base_tag",
"=",
"self",
".",
"get_python_base",
"(",
"python_version",
",",
"pull",
"=",
"not",
"self",
".",
"disable_pull",
")",
"image",
"=",
"self",
".",
"get_image",
"(",
"base_name",
",",
"base_tag",
")",
"# tag the image so ``build_image`` doesn't have to be called next time",
"image",
".",
"tag",
"(",
"image_name",
",",
"image_tag",
")",
"return",
"image",
"# extend the image with correct python by installing the dependencies",
"def",
"install_dependencies_dockerfile",
"(",
")",
":",
"base_name",
",",
"base_tag",
"=",
"self",
".",
"get_python_base",
"(",
"python_version",
",",
"pull",
"=",
"not",
"self",
".",
"disable_pull",
")",
"return",
"self",
".",
"INSTALL_DEPENDENCIES",
".",
"format",
"(",
"name",
"=",
"base_name",
",",
"tag",
"=",
"base_tag",
",",
"dependencies",
"=",
"\" \"",
".",
"join",
"(",
"dependencies",
")",
")",
"self",
".",
"get_or_build_image",
"(",
"image_name",
",",
"image_tag",
",",
"install_dependencies_dockerfile",
")",
"return",
"self",
".",
"get_image",
"(",
"image_name",
",",
"image_tag",
")",
"else",
":",
"# doesn't have to be here, but the return right above was confusing",
"def",
"install_requirements_dockerfile",
"(",
")",
":",
"\"\"\" Returns a Dockerfile for installing pip requirements,\n based on a image with installed dependencies (or no extra dependencies)\n \"\"\"",
"dependencies_name",
",",
"dependencies_tag",
"=",
"self",
".",
"get_image_with_installed_dependencies",
"(",
"image_name",
",",
"dependencies",
")",
"return",
"self",
".",
"get_install_requirements_dockerfile",
"(",
"name",
"=",
"dependencies_name",
",",
"tag",
"=",
"dependencies_tag",
",",
"repo_path",
"=",
"repo_path",
",",
"requirements_option",
"=",
"requirements_option",
",",
")",
"self",
".",
"get_or_build_image",
"(",
"image_name",
",",
"image_tag",
",",
"install_requirements_dockerfile",
",",
"build_context",
"=",
"repo_path",
".",
"parent",
",",
"pull",
"=",
"False",
")",
"return",
"self",
".",
"get_image",
"(",
"image_name",
",",
"image_tag",
")"
] |
Builds an image for specific requirements and dependencies, based on the settings.
:param image_name: How the image should be named
:param image_tag: And what tag it should have.
:param repo_path: Path to the cloned repository.
:param requirements_option: How requirements are set in the repository.
:param dependencies: List of dependencies (in the formalized format)
:return: The Image instance.
:rtype: docker.models.images.Image
|
[
"Builds",
"an",
"image",
"for",
"specific",
"requirements",
"and",
"dependencies",
"based",
"on",
"the",
"settings",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L468-L531
|
238,226
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.push_to_registry
|
def push_to_registry(self, image, image_tag: str):
""" Pushes a local image to a registry based on the ``use_registry_name`` setting.
:type image: docker.models.images.Image
:raise PushToRegistryError: If the push fails.
"""
# already tagged, so it's already pushed
if f"{self.use_registry_name}:{image_tag}" in image.tags:
return
image.tag(self.use_registry_name, image_tag)
result = self.client.images.push(self.use_registry_name, image_tag)
result = result.strip() # remove empty line at the end of output
# the last can have one of two outputs, either
# {"progressDetail":{},"aux":{"Tag":"<tag>","Digest":"sha256:<hash>","Size":<size>}}
# when the push is successful, or
# {"errorDetail": {"message":"<error_msg>"},"error":"<error_msg>"}
# when the push is not successful
last_line = json.loads(result.split("\n")[-1])
if "error" in last_line:
self.client.images.remove(f"{self.use_registry_name}:{image_tag}")
raise PushToRegistryError(f"Push of the image failed because of: {last_line['error']}", full_output=result)
logger.info("Pushed image to registry %s:%s", self.use_registry_name, image_tag)
logger.debug("Info:\n%s", result)
|
python
|
def push_to_registry(self, image, image_tag: str):
""" Pushes a local image to a registry based on the ``use_registry_name`` setting.
:type image: docker.models.images.Image
:raise PushToRegistryError: If the push fails.
"""
# already tagged, so it's already pushed
if f"{self.use_registry_name}:{image_tag}" in image.tags:
return
image.tag(self.use_registry_name, image_tag)
result = self.client.images.push(self.use_registry_name, image_tag)
result = result.strip() # remove empty line at the end of output
# the last can have one of two outputs, either
# {"progressDetail":{},"aux":{"Tag":"<tag>","Digest":"sha256:<hash>","Size":<size>}}
# when the push is successful, or
# {"errorDetail": {"message":"<error_msg>"},"error":"<error_msg>"}
# when the push is not successful
last_line = json.loads(result.split("\n")[-1])
if "error" in last_line:
self.client.images.remove(f"{self.use_registry_name}:{image_tag}")
raise PushToRegistryError(f"Push of the image failed because of: {last_line['error']}", full_output=result)
logger.info("Pushed image to registry %s:%s", self.use_registry_name, image_tag)
logger.debug("Info:\n%s", result)
|
[
"def",
"push_to_registry",
"(",
"self",
",",
"image",
",",
"image_tag",
":",
"str",
")",
":",
"# already tagged, so it's already pushed",
"if",
"f\"{self.use_registry_name}:{image_tag}\"",
"in",
"image",
".",
"tags",
":",
"return",
"image",
".",
"tag",
"(",
"self",
".",
"use_registry_name",
",",
"image_tag",
")",
"result",
"=",
"self",
".",
"client",
".",
"images",
".",
"push",
"(",
"self",
".",
"use_registry_name",
",",
"image_tag",
")",
"result",
"=",
"result",
".",
"strip",
"(",
")",
"# remove empty line at the end of output",
"# the last can have one of two outputs, either",
"# {\"progressDetail\":{},\"aux\":{\"Tag\":\"<tag>\",\"Digest\":\"sha256:<hash>\",\"Size\":<size>}}",
"# when the push is successful, or",
"# {\"errorDetail\": {\"message\":\"<error_msg>\"},\"error\":\"<error_msg>\"}",
"# when the push is not successful",
"last_line",
"=",
"json",
".",
"loads",
"(",
"result",
".",
"split",
"(",
"\"\\n\"",
")",
"[",
"-",
"1",
"]",
")",
"if",
"\"error\"",
"in",
"last_line",
":",
"self",
".",
"client",
".",
"images",
".",
"remove",
"(",
"f\"{self.use_registry_name}:{image_tag}\"",
")",
"raise",
"PushToRegistryError",
"(",
"f\"Push of the image failed because of: {last_line['error']}\"",
",",
"full_output",
"=",
"result",
")",
"logger",
".",
"info",
"(",
"\"Pushed image to registry %s:%s\"",
",",
"self",
".",
"use_registry_name",
",",
"image_tag",
")",
"logger",
".",
"debug",
"(",
"\"Info:\\n%s\"",
",",
"result",
")"
] |
Pushes a local image to a registry based on the ``use_registry_name`` setting.
:type image: docker.models.images.Image
:raise PushToRegistryError: If the push fails.
|
[
"Pushes",
"a",
"local",
"image",
"to",
"a",
"registry",
"based",
"on",
"the",
"use_registry_name",
"setting",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L533-L562
|
238,227
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.image_exists
|
def image_exists(self, image_name, image_tag):
""" Returns if the image exists locally.
"""
try:
self.get_image(image_name, image_tag)
return True
except docker.errors.ImageNotFound:
return False
|
python
|
def image_exists(self, image_name, image_tag):
""" Returns if the image exists locally.
"""
try:
self.get_image(image_name, image_tag)
return True
except docker.errors.ImageNotFound:
return False
|
[
"def",
"image_exists",
"(",
"self",
",",
"image_name",
",",
"image_tag",
")",
":",
"try",
":",
"self",
".",
"get_image",
"(",
"image_name",
",",
"image_tag",
")",
"return",
"True",
"except",
"docker",
".",
"errors",
".",
"ImageNotFound",
":",
"return",
"False"
] |
Returns if the image exists locally.
|
[
"Returns",
"if",
"the",
"image",
"exists",
"locally",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L564-L571
|
238,228
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.container_running
|
def container_running(self, container_name):
"""
Finds out if a container with name ``container_name`` is running.
:return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise.
:rtype: Optional[docker.models.container.Container]
"""
filters = {
"name": container_name,
"status": "running",
}
for container in self.client.containers.list(filters=filters):
if container_name == container.name:
return container
return None
|
python
|
def container_running(self, container_name):
"""
Finds out if a container with name ``container_name`` is running.
:return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise.
:rtype: Optional[docker.models.container.Container]
"""
filters = {
"name": container_name,
"status": "running",
}
for container in self.client.containers.list(filters=filters):
if container_name == container.name:
return container
return None
|
[
"def",
"container_running",
"(",
"self",
",",
"container_name",
")",
":",
"filters",
"=",
"{",
"\"name\"",
":",
"container_name",
",",
"\"status\"",
":",
"\"running\"",
",",
"}",
"for",
"container",
"in",
"self",
".",
"client",
".",
"containers",
".",
"list",
"(",
"filters",
"=",
"filters",
")",
":",
"if",
"container_name",
"==",
"container",
".",
"name",
":",
"return",
"container",
"return",
"None"
] |
Finds out if a container with name ``container_name`` is running.
:return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise.
:rtype: Optional[docker.models.container.Container]
|
[
"Finds",
"out",
"if",
"a",
"container",
"with",
"name",
"container_name",
"is",
"running",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L603-L618
|
238,229
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.tar_files
|
def tar_files(self, path: Path) -> bytes:
""" Returns a tar with the git repository.
"""
tarstream = BytesIO()
tar = tarfile.TarFile(fileobj=tarstream, mode='w')
tar.add(str(path), arcname="data", recursive=True)
tar.close()
return tarstream.getvalue()
|
python
|
def tar_files(self, path: Path) -> bytes:
""" Returns a tar with the git repository.
"""
tarstream = BytesIO()
tar = tarfile.TarFile(fileobj=tarstream, mode='w')
tar.add(str(path), arcname="data", recursive=True)
tar.close()
return tarstream.getvalue()
|
[
"def",
"tar_files",
"(",
"self",
",",
"path",
":",
"Path",
")",
"->",
"bytes",
":",
"tarstream",
"=",
"BytesIO",
"(",
")",
"tar",
"=",
"tarfile",
".",
"TarFile",
"(",
"fileobj",
"=",
"tarstream",
",",
"mode",
"=",
"'w'",
")",
"tar",
".",
"add",
"(",
"str",
"(",
"path",
")",
",",
"arcname",
"=",
"\"data\"",
",",
"recursive",
"=",
"True",
")",
"tar",
".",
"close",
"(",
")",
"return",
"tarstream",
".",
"getvalue",
"(",
")"
] |
Returns a tar with the git repository.
|
[
"Returns",
"a",
"tar",
"with",
"the",
"git",
"repository",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L620-L627
|
238,230
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.tar_runner
|
def tar_runner(self):
""" Returns a tar with the runner script.
"""
script_bytes = self.RUNNER.read_bytes()
tarstream = BytesIO()
tar = tarfile.TarFile(fileobj=tarstream, mode='w')
tarinfo = tarfile.TarInfo(name="runner.py")
tarinfo.size = len(script_bytes)
tarinfo.mtime = int(time.time())
tar.addfile(tarinfo, BytesIO(script_bytes))
tar.close()
return tarstream.getvalue()
|
python
|
def tar_runner(self):
""" Returns a tar with the runner script.
"""
script_bytes = self.RUNNER.read_bytes()
tarstream = BytesIO()
tar = tarfile.TarFile(fileobj=tarstream, mode='w')
tarinfo = tarfile.TarInfo(name="runner.py")
tarinfo.size = len(script_bytes)
tarinfo.mtime = int(time.time())
tar.addfile(tarinfo, BytesIO(script_bytes))
tar.close()
return tarstream.getvalue()
|
[
"def",
"tar_runner",
"(",
"self",
")",
":",
"script_bytes",
"=",
"self",
".",
"RUNNER",
".",
"read_bytes",
"(",
")",
"tarstream",
"=",
"BytesIO",
"(",
")",
"tar",
"=",
"tarfile",
".",
"TarFile",
"(",
"fileobj",
"=",
"tarstream",
",",
"mode",
"=",
"'w'",
")",
"tarinfo",
"=",
"tarfile",
".",
"TarInfo",
"(",
"name",
"=",
"\"runner.py\"",
")",
"tarinfo",
".",
"size",
"=",
"len",
"(",
"script_bytes",
")",
"tarinfo",
".",
"mtime",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"tar",
".",
"addfile",
"(",
"tarinfo",
",",
"BytesIO",
"(",
"script_bytes",
")",
")",
"tar",
".",
"close",
"(",
")",
"return",
"tarstream",
".",
"getvalue",
"(",
")"
] |
Returns a tar with the runner script.
|
[
"Returns",
"a",
"tar",
"with",
"the",
"runner",
"script",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L629-L644
|
238,231
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.tar_task_definition
|
def tar_task_definition(self, name: str, contents: str) -> bytes:
""" Returns a tar with the task definition.
:param name: Name of the file
:param contents: Contens of the definition, utf-8
"""
tarstream = BytesIO()
tar = tarfile.TarFile(fileobj=tarstream, mode='w')
tarinfo = tarfile.TarInfo(name=name)
script_bytes = contents.encode("utf-8")
tarinfo.size = len(script_bytes)
tarinfo.mtime = int(time.time())
tar.addfile(tarinfo, BytesIO(script_bytes))
tar.close()
return tarstream.getvalue()
|
python
|
def tar_task_definition(self, name: str, contents: str) -> bytes:
""" Returns a tar with the task definition.
:param name: Name of the file
:param contents: Contens of the definition, utf-8
"""
tarstream = BytesIO()
tar = tarfile.TarFile(fileobj=tarstream, mode='w')
tarinfo = tarfile.TarInfo(name=name)
script_bytes = contents.encode("utf-8")
tarinfo.size = len(script_bytes)
tarinfo.mtime = int(time.time())
tar.addfile(tarinfo, BytesIO(script_bytes))
tar.close()
return tarstream.getvalue()
|
[
"def",
"tar_task_definition",
"(",
"self",
",",
"name",
":",
"str",
",",
"contents",
":",
"str",
")",
"->",
"bytes",
":",
"tarstream",
"=",
"BytesIO",
"(",
")",
"tar",
"=",
"tarfile",
".",
"TarFile",
"(",
"fileobj",
"=",
"tarstream",
",",
"mode",
"=",
"'w'",
")",
"tarinfo",
"=",
"tarfile",
".",
"TarInfo",
"(",
"name",
"=",
"name",
")",
"script_bytes",
"=",
"contents",
".",
"encode",
"(",
"\"utf-8\"",
")",
"tarinfo",
".",
"size",
"=",
"len",
"(",
"script_bytes",
")",
"tarinfo",
".",
"mtime",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"tar",
".",
"addfile",
"(",
"tarinfo",
",",
"BytesIO",
"(",
"script_bytes",
")",
")",
"tar",
".",
"close",
"(",
")",
"return",
"tarstream",
".",
"getvalue",
"(",
")"
] |
Returns a tar with the task definition.
:param name: Name of the file
:param contents: Contens of the definition, utf-8
|
[
"Returns",
"a",
"tar",
"with",
"the",
"task",
"definition",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L646-L663
|
238,232
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.start_container
|
def start_container(self, image, container_name: str, repo_path: Path):
""" Starts a container with the image and name ``container_name`` and copies the repository into the container.
:type image: docker.models.images.Image
:rtype: docker.models.container.Container
"""
command = "bash -i"
if self.inherit_image:
command = "sh -i"
container = self.client.containers.run(image, command=command, detach=True, tty=True, name=container_name,
working_dir=str((Path("/srv/data") / self.cwd).resolve()),
auto_remove=True)
container.exec_run(["mkdir", "-p", "/srv/scripts"])
container.put_archive("/srv", self.tar_files(repo_path))
container.put_archive("/srv/scripts", self.tar_runner())
return container
|
python
|
def start_container(self, image, container_name: str, repo_path: Path):
""" Starts a container with the image and name ``container_name`` and copies the repository into the container.
:type image: docker.models.images.Image
:rtype: docker.models.container.Container
"""
command = "bash -i"
if self.inherit_image:
command = "sh -i"
container = self.client.containers.run(image, command=command, detach=True, tty=True, name=container_name,
working_dir=str((Path("/srv/data") / self.cwd).resolve()),
auto_remove=True)
container.exec_run(["mkdir", "-p", "/srv/scripts"])
container.put_archive("/srv", self.tar_files(repo_path))
container.put_archive("/srv/scripts", self.tar_runner())
return container
|
[
"def",
"start_container",
"(",
"self",
",",
"image",
",",
"container_name",
":",
"str",
",",
"repo_path",
":",
"Path",
")",
":",
"command",
"=",
"\"bash -i\"",
"if",
"self",
".",
"inherit_image",
":",
"command",
"=",
"\"sh -i\"",
"container",
"=",
"self",
".",
"client",
".",
"containers",
".",
"run",
"(",
"image",
",",
"command",
"=",
"command",
",",
"detach",
"=",
"True",
",",
"tty",
"=",
"True",
",",
"name",
"=",
"container_name",
",",
"working_dir",
"=",
"str",
"(",
"(",
"Path",
"(",
"\"/srv/data\"",
")",
"/",
"self",
".",
"cwd",
")",
".",
"resolve",
"(",
")",
")",
",",
"auto_remove",
"=",
"True",
")",
"container",
".",
"exec_run",
"(",
"[",
"\"mkdir\"",
",",
"\"-p\"",
",",
"\"/srv/scripts\"",
"]",
")",
"container",
".",
"put_archive",
"(",
"\"/srv\"",
",",
"self",
".",
"tar_files",
"(",
"repo_path",
")",
")",
"container",
".",
"put_archive",
"(",
"\"/srv/scripts\"",
",",
"self",
".",
"tar_runner",
"(",
")",
")",
"return",
"container"
] |
Starts a container with the image and name ``container_name`` and copies the repository into the container.
:type image: docker.models.images.Image
:rtype: docker.models.container.Container
|
[
"Starts",
"a",
"container",
"with",
"the",
"image",
"and",
"name",
"container_name",
"and",
"copies",
"the",
"repository",
"into",
"the",
"container",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L665-L684
|
238,233
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.get_container_name
|
def get_container_name(self, repo: str, branch: str, git_repo: Repo):
""" Returns the name of the container used for the repo.
"""
return "arca_{}_{}_{}".format(
self._arca.repo_id(repo),
branch,
self._arca.current_git_hash(repo, branch, git_repo, short=True)
)
|
python
|
def get_container_name(self, repo: str, branch: str, git_repo: Repo):
""" Returns the name of the container used for the repo.
"""
return "arca_{}_{}_{}".format(
self._arca.repo_id(repo),
branch,
self._arca.current_git_hash(repo, branch, git_repo, short=True)
)
|
[
"def",
"get_container_name",
"(",
"self",
",",
"repo",
":",
"str",
",",
"branch",
":",
"str",
",",
"git_repo",
":",
"Repo",
")",
":",
"return",
"\"arca_{}_{}_{}\"",
".",
"format",
"(",
"self",
".",
"_arca",
".",
"repo_id",
"(",
"repo",
")",
",",
"branch",
",",
"self",
".",
"_arca",
".",
"current_git_hash",
"(",
"repo",
",",
"branch",
",",
"git_repo",
",",
"short",
"=",
"True",
")",
")"
] |
Returns the name of the container used for the repo.
|
[
"Returns",
"the",
"name",
"of",
"the",
"container",
"used",
"for",
"the",
"repo",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L732-L739
|
238,234
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.run
|
def run(self, repo: str, branch: str, task: Task, git_repo: Repo, repo_path: Path) -> Result:
""" Gets or builds an image for the repo, gets or starts a container for the image and runs the script.
:param repo: Repository URL
:param branch: Branch ane
:param task: :class:`Task` to run.
:param git_repo: :class:`Repo <git.repo.base.Repo>` of the cloned repository.
:param repo_path: :class:`Path <pathlib.Path>` to the cloned location.
"""
self.check_docker_access()
container_name = self.get_container_name(repo, branch, git_repo)
container = self.container_running(container_name)
if container is None:
image = self.get_image_for_repo(repo, branch, git_repo, repo_path)
container = self.start_container(image, container_name, repo_path)
task_filename, task_json = self.serialized_task(task)
container.put_archive("/srv/scripts", self.tar_task_definition(task_filename, task_json))
res = None
try:
command = ["timeout"]
if self.inherit_image:
if self.alpine_inherited or b"Alpine" in container.exec_run(["cat", "/etc/issue"], tty=True).output:
self.alpine_inherited = True
command = ["timeout", "-t"]
command += [str(task.timeout),
"python",
"/srv/scripts/runner.py",
f"/srv/scripts/{task_filename}"]
logger.debug("Running command %s", " ".join(command))
res = container.exec_run(command, tty=True)
# 124 is the standard, 143 on alpine
if res.exit_code in {124, 143}:
raise BuildTimeoutError(f"The task timeouted after {task.timeout} seconds.")
return Result(res.output)
except BuildError: # can be raised by :meth:`Result.__init__`
raise
except Exception as e:
logger.exception(e)
if res is not None:
logger.warning(res.output)
raise BuildError("The build failed", extra_info={
"exception": e,
"output": res if res is None else res.output
})
finally:
if not self.keep_container_running:
container.kill(signal.SIGKILL)
else:
self._containers.add(container)
|
python
|
def run(self, repo: str, branch: str, task: Task, git_repo: Repo, repo_path: Path) -> Result:
""" Gets or builds an image for the repo, gets or starts a container for the image and runs the script.
:param repo: Repository URL
:param branch: Branch ane
:param task: :class:`Task` to run.
:param git_repo: :class:`Repo <git.repo.base.Repo>` of the cloned repository.
:param repo_path: :class:`Path <pathlib.Path>` to the cloned location.
"""
self.check_docker_access()
container_name = self.get_container_name(repo, branch, git_repo)
container = self.container_running(container_name)
if container is None:
image = self.get_image_for_repo(repo, branch, git_repo, repo_path)
container = self.start_container(image, container_name, repo_path)
task_filename, task_json = self.serialized_task(task)
container.put_archive("/srv/scripts", self.tar_task_definition(task_filename, task_json))
res = None
try:
command = ["timeout"]
if self.inherit_image:
if self.alpine_inherited or b"Alpine" in container.exec_run(["cat", "/etc/issue"], tty=True).output:
self.alpine_inherited = True
command = ["timeout", "-t"]
command += [str(task.timeout),
"python",
"/srv/scripts/runner.py",
f"/srv/scripts/{task_filename}"]
logger.debug("Running command %s", " ".join(command))
res = container.exec_run(command, tty=True)
# 124 is the standard, 143 on alpine
if res.exit_code in {124, 143}:
raise BuildTimeoutError(f"The task timeouted after {task.timeout} seconds.")
return Result(res.output)
except BuildError: # can be raised by :meth:`Result.__init__`
raise
except Exception as e:
logger.exception(e)
if res is not None:
logger.warning(res.output)
raise BuildError("The build failed", extra_info={
"exception": e,
"output": res if res is None else res.output
})
finally:
if not self.keep_container_running:
container.kill(signal.SIGKILL)
else:
self._containers.add(container)
|
[
"def",
"run",
"(",
"self",
",",
"repo",
":",
"str",
",",
"branch",
":",
"str",
",",
"task",
":",
"Task",
",",
"git_repo",
":",
"Repo",
",",
"repo_path",
":",
"Path",
")",
"->",
"Result",
":",
"self",
".",
"check_docker_access",
"(",
")",
"container_name",
"=",
"self",
".",
"get_container_name",
"(",
"repo",
",",
"branch",
",",
"git_repo",
")",
"container",
"=",
"self",
".",
"container_running",
"(",
"container_name",
")",
"if",
"container",
"is",
"None",
":",
"image",
"=",
"self",
".",
"get_image_for_repo",
"(",
"repo",
",",
"branch",
",",
"git_repo",
",",
"repo_path",
")",
"container",
"=",
"self",
".",
"start_container",
"(",
"image",
",",
"container_name",
",",
"repo_path",
")",
"task_filename",
",",
"task_json",
"=",
"self",
".",
"serialized_task",
"(",
"task",
")",
"container",
".",
"put_archive",
"(",
"\"/srv/scripts\"",
",",
"self",
".",
"tar_task_definition",
"(",
"task_filename",
",",
"task_json",
")",
")",
"res",
"=",
"None",
"try",
":",
"command",
"=",
"[",
"\"timeout\"",
"]",
"if",
"self",
".",
"inherit_image",
":",
"if",
"self",
".",
"alpine_inherited",
"or",
"b\"Alpine\"",
"in",
"container",
".",
"exec_run",
"(",
"[",
"\"cat\"",
",",
"\"/etc/issue\"",
"]",
",",
"tty",
"=",
"True",
")",
".",
"output",
":",
"self",
".",
"alpine_inherited",
"=",
"True",
"command",
"=",
"[",
"\"timeout\"",
",",
"\"-t\"",
"]",
"command",
"+=",
"[",
"str",
"(",
"task",
".",
"timeout",
")",
",",
"\"python\"",
",",
"\"/srv/scripts/runner.py\"",
",",
"f\"/srv/scripts/{task_filename}\"",
"]",
"logger",
".",
"debug",
"(",
"\"Running command %s\"",
",",
"\" \"",
".",
"join",
"(",
"command",
")",
")",
"res",
"=",
"container",
".",
"exec_run",
"(",
"command",
",",
"tty",
"=",
"True",
")",
"# 124 is the standard, 143 on alpine",
"if",
"res",
".",
"exit_code",
"in",
"{",
"124",
",",
"143",
"}",
":",
"raise",
"BuildTimeoutError",
"(",
"f\"The task timeouted after {task.timeout} seconds.\"",
")",
"return",
"Result",
"(",
"res",
".",
"output",
")",
"except",
"BuildError",
":",
"# can be raised by :meth:`Result.__init__`",
"raise",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"e",
")",
"if",
"res",
"is",
"not",
"None",
":",
"logger",
".",
"warning",
"(",
"res",
".",
"output",
")",
"raise",
"BuildError",
"(",
"\"The build failed\"",
",",
"extra_info",
"=",
"{",
"\"exception\"",
":",
"e",
",",
"\"output\"",
":",
"res",
"if",
"res",
"is",
"None",
"else",
"res",
".",
"output",
"}",
")",
"finally",
":",
"if",
"not",
"self",
".",
"keep_container_running",
":",
"container",
".",
"kill",
"(",
"signal",
".",
"SIGKILL",
")",
"else",
":",
"self",
".",
"_containers",
".",
"add",
"(",
"container",
")"
] |
Gets or builds an image for the repo, gets or starts a container for the image and runs the script.
:param repo: Repository URL
:param branch: Branch ane
:param task: :class:`Task` to run.
:param git_repo: :class:`Repo <git.repo.base.Repo>` of the cloned repository.
:param repo_path: :class:`Path <pathlib.Path>` to the cloned location.
|
[
"Gets",
"or",
"builds",
"an",
"image",
"for",
"the",
"repo",
"gets",
"or",
"starts",
"a",
"container",
"for",
"the",
"image",
"and",
"runs",
"the",
"script",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L741-L803
|
238,235
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.stop_containers
|
def stop_containers(self):
""" Stops all containers used by this instance of the backend.
"""
while len(self._containers):
container = self._containers.pop()
try:
container.kill(signal.SIGKILL)
except docker.errors.APIError: # probably doesn't exist anymore
pass
|
python
|
def stop_containers(self):
""" Stops all containers used by this instance of the backend.
"""
while len(self._containers):
container = self._containers.pop()
try:
container.kill(signal.SIGKILL)
except docker.errors.APIError: # probably doesn't exist anymore
pass
|
[
"def",
"stop_containers",
"(",
"self",
")",
":",
"while",
"len",
"(",
"self",
".",
"_containers",
")",
":",
"container",
"=",
"self",
".",
"_containers",
".",
"pop",
"(",
")",
"try",
":",
"container",
".",
"kill",
"(",
"signal",
".",
"SIGKILL",
")",
"except",
"docker",
".",
"errors",
".",
"APIError",
":",
"# probably doesn't exist anymore",
"pass"
] |
Stops all containers used by this instance of the backend.
|
[
"Stops",
"all",
"containers",
"used",
"by",
"this",
"instance",
"of",
"the",
"backend",
"."
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L805-L813
|
238,236
|
mikicz/arca
|
arca/backend/docker.py
|
DockerBackend.get_install_requirements_dockerfile
|
def get_install_requirements_dockerfile(self, name: str, tag: str, repo_path: Path,
requirements_option: RequirementsOptions) -> str:
"""
Returns the content of a Dockerfile that will install requirements based on the repository,
prioritizing Pipfile or Pipfile.lock and falling back on requirements.txt files
"""
if requirements_option == RequirementsOptions.requirements_txt:
target_file = "requirements.txt"
requirements_files = [repo_path / self.requirements_location]
install_cmd = "pip"
cmd_arguments = "install -r /srv/requirements.txt"
elif requirements_option == RequirementsOptions.pipfile:
target_file = ""
requirements_files = [repo_path / self.pipfile_location / "Pipfile",
repo_path / self.pipfile_location / "Pipfile.lock"]
install_cmd = "pipenv"
cmd_arguments = "install --system --ignore-pipfile --deploy"
else:
raise ValueError("Invalid requirements_option")
dockerfile = self.INSTALL_REQUIREMENTS.format(
name=name,
tag=tag,
timeout=self.requirements_timeout,
target_file=target_file,
requirements_files=" ".join(str(x.relative_to(repo_path.parent)) for x in requirements_files),
cmd_arguments=cmd_arguments,
install_cmd=install_cmd
)
logger.debug("Installing Python requirements with Dockerfile: %s", dockerfile)
return dockerfile
|
python
|
def get_install_requirements_dockerfile(self, name: str, tag: str, repo_path: Path,
requirements_option: RequirementsOptions) -> str:
"""
Returns the content of a Dockerfile that will install requirements based on the repository,
prioritizing Pipfile or Pipfile.lock and falling back on requirements.txt files
"""
if requirements_option == RequirementsOptions.requirements_txt:
target_file = "requirements.txt"
requirements_files = [repo_path / self.requirements_location]
install_cmd = "pip"
cmd_arguments = "install -r /srv/requirements.txt"
elif requirements_option == RequirementsOptions.pipfile:
target_file = ""
requirements_files = [repo_path / self.pipfile_location / "Pipfile",
repo_path / self.pipfile_location / "Pipfile.lock"]
install_cmd = "pipenv"
cmd_arguments = "install --system --ignore-pipfile --deploy"
else:
raise ValueError("Invalid requirements_option")
dockerfile = self.INSTALL_REQUIREMENTS.format(
name=name,
tag=tag,
timeout=self.requirements_timeout,
target_file=target_file,
requirements_files=" ".join(str(x.relative_to(repo_path.parent)) for x in requirements_files),
cmd_arguments=cmd_arguments,
install_cmd=install_cmd
)
logger.debug("Installing Python requirements with Dockerfile: %s", dockerfile)
return dockerfile
|
[
"def",
"get_install_requirements_dockerfile",
"(",
"self",
",",
"name",
":",
"str",
",",
"tag",
":",
"str",
",",
"repo_path",
":",
"Path",
",",
"requirements_option",
":",
"RequirementsOptions",
")",
"->",
"str",
":",
"if",
"requirements_option",
"==",
"RequirementsOptions",
".",
"requirements_txt",
":",
"target_file",
"=",
"\"requirements.txt\"",
"requirements_files",
"=",
"[",
"repo_path",
"/",
"self",
".",
"requirements_location",
"]",
"install_cmd",
"=",
"\"pip\"",
"cmd_arguments",
"=",
"\"install -r /srv/requirements.txt\"",
"elif",
"requirements_option",
"==",
"RequirementsOptions",
".",
"pipfile",
":",
"target_file",
"=",
"\"\"",
"requirements_files",
"=",
"[",
"repo_path",
"/",
"self",
".",
"pipfile_location",
"/",
"\"Pipfile\"",
",",
"repo_path",
"/",
"self",
".",
"pipfile_location",
"/",
"\"Pipfile.lock\"",
"]",
"install_cmd",
"=",
"\"pipenv\"",
"cmd_arguments",
"=",
"\"install --system --ignore-pipfile --deploy\"",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid requirements_option\"",
")",
"dockerfile",
"=",
"self",
".",
"INSTALL_REQUIREMENTS",
".",
"format",
"(",
"name",
"=",
"name",
",",
"tag",
"=",
"tag",
",",
"timeout",
"=",
"self",
".",
"requirements_timeout",
",",
"target_file",
"=",
"target_file",
",",
"requirements_files",
"=",
"\" \"",
".",
"join",
"(",
"str",
"(",
"x",
".",
"relative_to",
"(",
"repo_path",
".",
"parent",
")",
")",
"for",
"x",
"in",
"requirements_files",
")",
",",
"cmd_arguments",
"=",
"cmd_arguments",
",",
"install_cmd",
"=",
"install_cmd",
")",
"logger",
".",
"debug",
"(",
"\"Installing Python requirements with Dockerfile: %s\"",
",",
"dockerfile",
")",
"return",
"dockerfile"
] |
Returns the content of a Dockerfile that will install requirements based on the repository,
prioritizing Pipfile or Pipfile.lock and falling back on requirements.txt files
|
[
"Returns",
"the",
"content",
"of",
"a",
"Dockerfile",
"that",
"will",
"install",
"requirements",
"based",
"on",
"the",
"repository",
"prioritizing",
"Pipfile",
"or",
"Pipfile",
".",
"lock",
"and",
"falling",
"back",
"on",
"requirements",
".",
"txt",
"files"
] |
e67fdc00be473ecf8ec16d024e1a3f2c47ca882c
|
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L815-L850
|
238,237
|
mapmyfitness/jtime
|
jtime/jtime.py
|
status
|
def status():
"""
Gets the worklog status for the current branch
"""
import pdb; pdb.set_trace()
branch = git.branch
issue = jira.get_issue(branch)
if not issue:
return
# Print the title
title = issue.fields.summary
print "(%s) %s" % (branch, title)
# Print the status
status = issue.fields.status.name
assignee = issue.fields.assignee.name
in_progress = jira.get_datetime_issue_in_progress(issue)
if in_progress:
in_progress_string = in_progress.strftime("%a %x %I:%M %p")
print ' Status: %s as of %s' % (status, in_progress_string)
else:
print ' Status: %s' % status
print ' Assignee: %s' % assignee
# Print the worklogs
# Get the timespent and return 0m if it does not exist
time_spent = '0m'
try:
time_spent = issue.fields.timetracking.timeSpent
except:
pass
worklogs = jira.get_worklog(issue)
print "\nTime logged (%s):" % time_spent
if worklogs:
for worklog in worklogs:
worklog_hash = worklog.raw
author = worklog_hash['author']['name']
time_spent = worklog_hash.get('timeSpent', '0m')
created = dateutil.parser.parse(worklog_hash['started'])
created_pattern = '%a %x ' # Adding extra space for formatting
if not created.hour == created.minute == created.second == 0:
created = created.astimezone(tzlocal())
created_pattern = '%a %x %I:%M %p'
comment = worklog_hash.get('comment', '<no comment>')
updated_string = created.strftime(created_pattern)
print " %s - %s (%s): %s" % (updated_string, author, time_spent, comment)
else:
print " No worklogs"
cycle_time = jira.get_cycle_time(issue)
if cycle_time:
print '\nCycle Time: %.1f days' % cycle_time
# Print the time elapsed since the last mark
elapsed_time = jira.get_elapsed_time(issue)
if elapsed_time:
print '\n\033[0;32m%s elapsed\033[00m (use "jtime log ." to log elapsed time or "jtime log <duration> (ex. 30m, 1h etc.)" to log a specific amount of time)' % (elapsed_time)
else:
print '\n\033[0;32m0m elapsed\033[00m'
|
python
|
def status():
"""
Gets the worklog status for the current branch
"""
import pdb; pdb.set_trace()
branch = git.branch
issue = jira.get_issue(branch)
if not issue:
return
# Print the title
title = issue.fields.summary
print "(%s) %s" % (branch, title)
# Print the status
status = issue.fields.status.name
assignee = issue.fields.assignee.name
in_progress = jira.get_datetime_issue_in_progress(issue)
if in_progress:
in_progress_string = in_progress.strftime("%a %x %I:%M %p")
print ' Status: %s as of %s' % (status, in_progress_string)
else:
print ' Status: %s' % status
print ' Assignee: %s' % assignee
# Print the worklogs
# Get the timespent and return 0m if it does not exist
time_spent = '0m'
try:
time_spent = issue.fields.timetracking.timeSpent
except:
pass
worklogs = jira.get_worklog(issue)
print "\nTime logged (%s):" % time_spent
if worklogs:
for worklog in worklogs:
worklog_hash = worklog.raw
author = worklog_hash['author']['name']
time_spent = worklog_hash.get('timeSpent', '0m')
created = dateutil.parser.parse(worklog_hash['started'])
created_pattern = '%a %x ' # Adding extra space for formatting
if not created.hour == created.minute == created.second == 0:
created = created.astimezone(tzlocal())
created_pattern = '%a %x %I:%M %p'
comment = worklog_hash.get('comment', '<no comment>')
updated_string = created.strftime(created_pattern)
print " %s - %s (%s): %s" % (updated_string, author, time_spent, comment)
else:
print " No worklogs"
cycle_time = jira.get_cycle_time(issue)
if cycle_time:
print '\nCycle Time: %.1f days' % cycle_time
# Print the time elapsed since the last mark
elapsed_time = jira.get_elapsed_time(issue)
if elapsed_time:
print '\n\033[0;32m%s elapsed\033[00m (use "jtime log ." to log elapsed time or "jtime log <duration> (ex. 30m, 1h etc.)" to log a specific amount of time)' % (elapsed_time)
else:
print '\n\033[0;32m0m elapsed\033[00m'
|
[
"def",
"status",
"(",
")",
":",
"import",
"pdb",
"pdb",
".",
"set_trace",
"(",
")",
"branch",
"=",
"git",
".",
"branch",
"issue",
"=",
"jira",
".",
"get_issue",
"(",
"branch",
")",
"if",
"not",
"issue",
":",
"return",
"# Print the title",
"title",
"=",
"issue",
".",
"fields",
".",
"summary",
"print",
"\"(%s) %s\"",
"%",
"(",
"branch",
",",
"title",
")",
"# Print the status",
"status",
"=",
"issue",
".",
"fields",
".",
"status",
".",
"name",
"assignee",
"=",
"issue",
".",
"fields",
".",
"assignee",
".",
"name",
"in_progress",
"=",
"jira",
".",
"get_datetime_issue_in_progress",
"(",
"issue",
")",
"if",
"in_progress",
":",
"in_progress_string",
"=",
"in_progress",
".",
"strftime",
"(",
"\"%a %x %I:%M %p\"",
")",
"print",
"' Status: %s as of %s'",
"%",
"(",
"status",
",",
"in_progress_string",
")",
"else",
":",
"print",
"' Status: %s'",
"%",
"status",
"print",
"' Assignee: %s'",
"%",
"assignee",
"# Print the worklogs",
"# Get the timespent and return 0m if it does not exist",
"time_spent",
"=",
"'0m'",
"try",
":",
"time_spent",
"=",
"issue",
".",
"fields",
".",
"timetracking",
".",
"timeSpent",
"except",
":",
"pass",
"worklogs",
"=",
"jira",
".",
"get_worklog",
"(",
"issue",
")",
"print",
"\"\\nTime logged (%s):\"",
"%",
"time_spent",
"if",
"worklogs",
":",
"for",
"worklog",
"in",
"worklogs",
":",
"worklog_hash",
"=",
"worklog",
".",
"raw",
"author",
"=",
"worklog_hash",
"[",
"'author'",
"]",
"[",
"'name'",
"]",
"time_spent",
"=",
"worklog_hash",
".",
"get",
"(",
"'timeSpent'",
",",
"'0m'",
")",
"created",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"worklog_hash",
"[",
"'started'",
"]",
")",
"created_pattern",
"=",
"'%a %x '",
"# Adding extra space for formatting",
"if",
"not",
"created",
".",
"hour",
"==",
"created",
".",
"minute",
"==",
"created",
".",
"second",
"==",
"0",
":",
"created",
"=",
"created",
".",
"astimezone",
"(",
"tzlocal",
"(",
")",
")",
"created_pattern",
"=",
"'%a %x %I:%M %p'",
"comment",
"=",
"worklog_hash",
".",
"get",
"(",
"'comment'",
",",
"'<no comment>'",
")",
"updated_string",
"=",
"created",
".",
"strftime",
"(",
"created_pattern",
")",
"print",
"\" %s - %s (%s): %s\"",
"%",
"(",
"updated_string",
",",
"author",
",",
"time_spent",
",",
"comment",
")",
"else",
":",
"print",
"\" No worklogs\"",
"cycle_time",
"=",
"jira",
".",
"get_cycle_time",
"(",
"issue",
")",
"if",
"cycle_time",
":",
"print",
"'\\nCycle Time: %.1f days'",
"%",
"cycle_time",
"# Print the time elapsed since the last mark",
"elapsed_time",
"=",
"jira",
".",
"get_elapsed_time",
"(",
"issue",
")",
"if",
"elapsed_time",
":",
"print",
"'\\n\\033[0;32m%s elapsed\\033[00m (use \"jtime log .\" to log elapsed time or \"jtime log <duration> (ex. 30m, 1h etc.)\" to log a specific amount of time)'",
"%",
"(",
"elapsed_time",
")",
"else",
":",
"print",
"'\\n\\033[0;32m0m elapsed\\033[00m'"
] |
Gets the worklog status for the current branch
|
[
"Gets",
"the",
"worklog",
"status",
"for",
"the",
"current",
"branch"
] |
402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd
|
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/jtime.py#L58-L125
|
238,238
|
mapmyfitness/jtime
|
jtime/jtime.py
|
log
|
def log(duration, message=None, use_last_commit_message=False):
"""
Log time against the current active issue
"""
branch = git.branch
issue = jira.get_issue(branch)
# Create the comment
comment = "Working on issue %s" % branch
if message:
comment = message
elif use_last_commit_message:
comment = git.get_last_commit_message()
if issue:
# If the duration is provided use it, otherwise use the elapsed time since the last mark
duration = jira.get_elapsed_time(issue) if duration == '.' else duration
if duration:
# Add the worklog
jira.add_worklog(issue, timeSpent=duration, adjustEstimate=None, newEstimate=None, reduceBy=None,
comment=comment)
print "Logged %s against issue %s (%s)" % (duration, branch, comment)
else:
print "No time logged, less than 0m elapsed."
|
python
|
def log(duration, message=None, use_last_commit_message=False):
"""
Log time against the current active issue
"""
branch = git.branch
issue = jira.get_issue(branch)
# Create the comment
comment = "Working on issue %s" % branch
if message:
comment = message
elif use_last_commit_message:
comment = git.get_last_commit_message()
if issue:
# If the duration is provided use it, otherwise use the elapsed time since the last mark
duration = jira.get_elapsed_time(issue) if duration == '.' else duration
if duration:
# Add the worklog
jira.add_worklog(issue, timeSpent=duration, adjustEstimate=None, newEstimate=None, reduceBy=None,
comment=comment)
print "Logged %s against issue %s (%s)" % (duration, branch, comment)
else:
print "No time logged, less than 0m elapsed."
|
[
"def",
"log",
"(",
"duration",
",",
"message",
"=",
"None",
",",
"use_last_commit_message",
"=",
"False",
")",
":",
"branch",
"=",
"git",
".",
"branch",
"issue",
"=",
"jira",
".",
"get_issue",
"(",
"branch",
")",
"# Create the comment",
"comment",
"=",
"\"Working on issue %s\"",
"%",
"branch",
"if",
"message",
":",
"comment",
"=",
"message",
"elif",
"use_last_commit_message",
":",
"comment",
"=",
"git",
".",
"get_last_commit_message",
"(",
")",
"if",
"issue",
":",
"# If the duration is provided use it, otherwise use the elapsed time since the last mark",
"duration",
"=",
"jira",
".",
"get_elapsed_time",
"(",
"issue",
")",
"if",
"duration",
"==",
"'.'",
"else",
"duration",
"if",
"duration",
":",
"# Add the worklog",
"jira",
".",
"add_worklog",
"(",
"issue",
",",
"timeSpent",
"=",
"duration",
",",
"adjustEstimate",
"=",
"None",
",",
"newEstimate",
"=",
"None",
",",
"reduceBy",
"=",
"None",
",",
"comment",
"=",
"comment",
")",
"print",
"\"Logged %s against issue %s (%s)\"",
"%",
"(",
"duration",
",",
"branch",
",",
"comment",
")",
"else",
":",
"print",
"\"No time logged, less than 0m elapsed.\""
] |
Log time against the current active issue
|
[
"Log",
"time",
"against",
"the",
"current",
"active",
"issue"
] |
402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd
|
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/jtime.py#L135-L159
|
238,239
|
mapmyfitness/jtime
|
jtime/jtime.py
|
mark
|
def mark():
"""
Mark the start time for active work on an issue
"""
branch = git.branch
issue = jira.get_issue(branch)
if not issue:
return
worklogs = jira.get_worklog(issue)
marked = False
if worklogs:
# If we have worklogs, change the updated time of the last log to the mark
marked = jira.touch_last_worklog(issue)
mark_time = datetime.datetime.now(dateutil.tz.tzlocal()).strftime("%I:%M %p")
print "Set mark at %s on %s by touching last work log" % (mark_time, branch)
else:
# If we don't have worklogs, mark the issue as in progress if that is an available transition
jira.workflow_transition(issue, 'Open')
marked = jira.workflow_transition(issue, 'In Progress')
mark_time = datetime.datetime.now(dateutil.tz.tzlocal()).strftime("%I:%M %p")
print 'Set mark at %s on %s by changing status to "In Progress"' % (mark_time, branch)
if not marked:
print "ERROR: Issue %s is has a status of %s and has no worklogs. You must log some time or re-open the issue to proceed." % \
(branch, issue.fields.status.name)
|
python
|
def mark():
"""
Mark the start time for active work on an issue
"""
branch = git.branch
issue = jira.get_issue(branch)
if not issue:
return
worklogs = jira.get_worklog(issue)
marked = False
if worklogs:
# If we have worklogs, change the updated time of the last log to the mark
marked = jira.touch_last_worklog(issue)
mark_time = datetime.datetime.now(dateutil.tz.tzlocal()).strftime("%I:%M %p")
print "Set mark at %s on %s by touching last work log" % (mark_time, branch)
else:
# If we don't have worklogs, mark the issue as in progress if that is an available transition
jira.workflow_transition(issue, 'Open')
marked = jira.workflow_transition(issue, 'In Progress')
mark_time = datetime.datetime.now(dateutil.tz.tzlocal()).strftime("%I:%M %p")
print 'Set mark at %s on %s by changing status to "In Progress"' % (mark_time, branch)
if not marked:
print "ERROR: Issue %s is has a status of %s and has no worklogs. You must log some time or re-open the issue to proceed." % \
(branch, issue.fields.status.name)
|
[
"def",
"mark",
"(",
")",
":",
"branch",
"=",
"git",
".",
"branch",
"issue",
"=",
"jira",
".",
"get_issue",
"(",
"branch",
")",
"if",
"not",
"issue",
":",
"return",
"worklogs",
"=",
"jira",
".",
"get_worklog",
"(",
"issue",
")",
"marked",
"=",
"False",
"if",
"worklogs",
":",
"# If we have worklogs, change the updated time of the last log to the mark",
"marked",
"=",
"jira",
".",
"touch_last_worklog",
"(",
"issue",
")",
"mark_time",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
"dateutil",
".",
"tz",
".",
"tzlocal",
"(",
")",
")",
".",
"strftime",
"(",
"\"%I:%M %p\"",
")",
"print",
"\"Set mark at %s on %s by touching last work log\"",
"%",
"(",
"mark_time",
",",
"branch",
")",
"else",
":",
"# If we don't have worklogs, mark the issue as in progress if that is an available transition",
"jira",
".",
"workflow_transition",
"(",
"issue",
",",
"'Open'",
")",
"marked",
"=",
"jira",
".",
"workflow_transition",
"(",
"issue",
",",
"'In Progress'",
")",
"mark_time",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
"dateutil",
".",
"tz",
".",
"tzlocal",
"(",
")",
")",
".",
"strftime",
"(",
"\"%I:%M %p\"",
")",
"print",
"'Set mark at %s on %s by changing status to \"In Progress\"'",
"%",
"(",
"mark_time",
",",
"branch",
")",
"if",
"not",
"marked",
":",
"print",
"\"ERROR: Issue %s is has a status of %s and has no worklogs. You must log some time or re-open the issue to proceed.\"",
"%",
"(",
"branch",
",",
"issue",
".",
"fields",
".",
"status",
".",
"name",
")"
] |
Mark the start time for active work on an issue
|
[
"Mark",
"the",
"start",
"time",
"for",
"active",
"work",
"on",
"an",
"issue"
] |
402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd
|
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/jtime.py#L162-L187
|
238,240
|
mapmyfitness/jtime
|
jtime/jtime.py
|
main
|
def main():
"""
Set up the context and connectors
"""
try:
init()
except custom_exceptions.NotConfigured:
configure()
init()
# Adding this in case users are trying to run without adding a jira url.
# I would like to take this out in a release or two.
# TODO: REMOVE
except (AttributeError, ConfigParser.NoOptionError):
logging.error('It appears that your configuration is invalid, please reconfigure the app and try again.')
configure()
init()
parser = argparse.ArgumentParser()
# Now simply auto-discovering the methods listed in this module
current_module = sys.modules[__name__]
module_methods = [getattr(current_module, a, None) for a in dir(current_module)
if isinstance(getattr(current_module, a, None), types.FunctionType)
and a != 'main']
argh.add_commands(parser, module_methods)
# Putting the error logging after the app is initialized because
# we want to adhere to the user's preferences
try:
argh.dispatch(parser)
# We don't want to report keyboard interrupts to rollbar
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
if isinstance(e, jira.exceptions.JIRAError) and "HTTP 400" in e:
logging.warning('It appears that your authentication with {0} is invalid. Please re-configure jtime: `jtime configure` with the correct credentials'.format(configuration.load_config['jira'].get('url')))
elif configured.get('jira').get('error_reporting', True):
# Configure rollbar so that we report errors
import rollbar
from . import __version__ as version
root_path = os.path.dirname(os.path.realpath(__file__))
rollbar.init('7541b8e188044831b6728fa8475eab9f', 'v%s' % version, root=root_path)
logging.error('Sorry. It appears that there was an error when handling your command. '
'This error has been reported to our error tracking system. To disable '
'this reporting, please re-configure the app: `jtime config`.')
extra_data = {
# grab the command that we're running
'cmd': sys.argv[1],
# we really don't want to see jtime in the args
'args': sys.argv[2:],
# lets grab anything useful, python version?
'python': str(sys.version),
}
# We really shouldn't thit this line of code when running tests, so let's not cover it.
rollbar.report_exc_info(extra_data=extra_data) # pragma: no cover
else:
logging.error('It appears that there was an error when handling your command.')
raise
|
python
|
def main():
"""
Set up the context and connectors
"""
try:
init()
except custom_exceptions.NotConfigured:
configure()
init()
# Adding this in case users are trying to run without adding a jira url.
# I would like to take this out in a release or two.
# TODO: REMOVE
except (AttributeError, ConfigParser.NoOptionError):
logging.error('It appears that your configuration is invalid, please reconfigure the app and try again.')
configure()
init()
parser = argparse.ArgumentParser()
# Now simply auto-discovering the methods listed in this module
current_module = sys.modules[__name__]
module_methods = [getattr(current_module, a, None) for a in dir(current_module)
if isinstance(getattr(current_module, a, None), types.FunctionType)
and a != 'main']
argh.add_commands(parser, module_methods)
# Putting the error logging after the app is initialized because
# we want to adhere to the user's preferences
try:
argh.dispatch(parser)
# We don't want to report keyboard interrupts to rollbar
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
if isinstance(e, jira.exceptions.JIRAError) and "HTTP 400" in e:
logging.warning('It appears that your authentication with {0} is invalid. Please re-configure jtime: `jtime configure` with the correct credentials'.format(configuration.load_config['jira'].get('url')))
elif configured.get('jira').get('error_reporting', True):
# Configure rollbar so that we report errors
import rollbar
from . import __version__ as version
root_path = os.path.dirname(os.path.realpath(__file__))
rollbar.init('7541b8e188044831b6728fa8475eab9f', 'v%s' % version, root=root_path)
logging.error('Sorry. It appears that there was an error when handling your command. '
'This error has been reported to our error tracking system. To disable '
'this reporting, please re-configure the app: `jtime config`.')
extra_data = {
# grab the command that we're running
'cmd': sys.argv[1],
# we really don't want to see jtime in the args
'args': sys.argv[2:],
# lets grab anything useful, python version?
'python': str(sys.version),
}
# We really shouldn't thit this line of code when running tests, so let's not cover it.
rollbar.report_exc_info(extra_data=extra_data) # pragma: no cover
else:
logging.error('It appears that there was an error when handling your command.')
raise
|
[
"def",
"main",
"(",
")",
":",
"try",
":",
"init",
"(",
")",
"except",
"custom_exceptions",
".",
"NotConfigured",
":",
"configure",
"(",
")",
"init",
"(",
")",
"# Adding this in case users are trying to run without adding a jira url.",
"# I would like to take this out in a release or two.",
"# TODO: REMOVE",
"except",
"(",
"AttributeError",
",",
"ConfigParser",
".",
"NoOptionError",
")",
":",
"logging",
".",
"error",
"(",
"'It appears that your configuration is invalid, please reconfigure the app and try again.'",
")",
"configure",
"(",
")",
"init",
"(",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"# Now simply auto-discovering the methods listed in this module",
"current_module",
"=",
"sys",
".",
"modules",
"[",
"__name__",
"]",
"module_methods",
"=",
"[",
"getattr",
"(",
"current_module",
",",
"a",
",",
"None",
")",
"for",
"a",
"in",
"dir",
"(",
"current_module",
")",
"if",
"isinstance",
"(",
"getattr",
"(",
"current_module",
",",
"a",
",",
"None",
")",
",",
"types",
".",
"FunctionType",
")",
"and",
"a",
"!=",
"'main'",
"]",
"argh",
".",
"add_commands",
"(",
"parser",
",",
"module_methods",
")",
"# Putting the error logging after the app is initialized because",
"# we want to adhere to the user's preferences",
"try",
":",
"argh",
".",
"dispatch",
"(",
"parser",
")",
"# We don't want to report keyboard interrupts to rollbar",
"except",
"(",
"KeyboardInterrupt",
",",
"SystemExit",
")",
":",
"raise",
"except",
"Exception",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
",",
"jira",
".",
"exceptions",
".",
"JIRAError",
")",
"and",
"\"HTTP 400\"",
"in",
"e",
":",
"logging",
".",
"warning",
"(",
"'It appears that your authentication with {0} is invalid. Please re-configure jtime: `jtime configure` with the correct credentials'",
".",
"format",
"(",
"configuration",
".",
"load_config",
"[",
"'jira'",
"]",
".",
"get",
"(",
"'url'",
")",
")",
")",
"elif",
"configured",
".",
"get",
"(",
"'jira'",
")",
".",
"get",
"(",
"'error_reporting'",
",",
"True",
")",
":",
"# Configure rollbar so that we report errors",
"import",
"rollbar",
"from",
".",
"import",
"__version__",
"as",
"version",
"root_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"rollbar",
".",
"init",
"(",
"'7541b8e188044831b6728fa8475eab9f'",
",",
"'v%s'",
"%",
"version",
",",
"root",
"=",
"root_path",
")",
"logging",
".",
"error",
"(",
"'Sorry. It appears that there was an error when handling your command. '",
"'This error has been reported to our error tracking system. To disable '",
"'this reporting, please re-configure the app: `jtime config`.'",
")",
"extra_data",
"=",
"{",
"# grab the command that we're running",
"'cmd'",
":",
"sys",
".",
"argv",
"[",
"1",
"]",
",",
"# we really don't want to see jtime in the args",
"'args'",
":",
"sys",
".",
"argv",
"[",
"2",
":",
"]",
",",
"# lets grab anything useful, python version?",
"'python'",
":",
"str",
"(",
"sys",
".",
"version",
")",
",",
"}",
"# We really shouldn't thit this line of code when running tests, so let's not cover it.",
"rollbar",
".",
"report_exc_info",
"(",
"extra_data",
"=",
"extra_data",
")",
"# pragma: no cover",
"else",
":",
"logging",
".",
"error",
"(",
"'It appears that there was an error when handling your command.'",
")",
"raise"
] |
Set up the context and connectors
|
[
"Set",
"up",
"the",
"context",
"and",
"connectors"
] |
402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd
|
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/jtime.py#L264-L321
|
238,241
|
anlutro/russell
|
russell/content.py
|
Entry.from_string
|
def from_string(cls, contents, **kwargs):
"""
Given a markdown string, create an Entry object.
Usually subclasses will want to customize the parts of the markdown
where you provide values for attributes like public - this can be done
by overriding the process_meta method.
"""
lines = contents.splitlines()
title = None
description = None
line = lines.pop(0)
while line != '':
if not title and line.startswith('#'):
title = line[1:].strip()
elif line.startswith('title:'):
title = line[6:].strip()
elif line.startswith('description:'):
description = line[12:].strip()
elif line.startswith('subtitle:'):
kwargs['subtitle'] = line[9:].strip()
elif line.startswith('comments:'):
try:
kwargs['allow_comments'] = _str_to_bool(line[9:])
except ValueError:
LOG.warning('invalid boolean value for comments', exc_info=True)
cls.process_meta(line, kwargs)
line = lines.pop(0)
# the only lines left should be the actual contents
body = '\n'.join(lines).strip()
excerpt = _get_excerpt(body)
if description is None:
description = _get_description(excerpt, 160)
if issubclass(cls, Post):
kwargs['excerpt'] = render_markdown(excerpt)
body = render_markdown(body)
return cls(title=title, body=body, description=description, **kwargs)
|
python
|
def from_string(cls, contents, **kwargs):
"""
Given a markdown string, create an Entry object.
Usually subclasses will want to customize the parts of the markdown
where you provide values for attributes like public - this can be done
by overriding the process_meta method.
"""
lines = contents.splitlines()
title = None
description = None
line = lines.pop(0)
while line != '':
if not title and line.startswith('#'):
title = line[1:].strip()
elif line.startswith('title:'):
title = line[6:].strip()
elif line.startswith('description:'):
description = line[12:].strip()
elif line.startswith('subtitle:'):
kwargs['subtitle'] = line[9:].strip()
elif line.startswith('comments:'):
try:
kwargs['allow_comments'] = _str_to_bool(line[9:])
except ValueError:
LOG.warning('invalid boolean value for comments', exc_info=True)
cls.process_meta(line, kwargs)
line = lines.pop(0)
# the only lines left should be the actual contents
body = '\n'.join(lines).strip()
excerpt = _get_excerpt(body)
if description is None:
description = _get_description(excerpt, 160)
if issubclass(cls, Post):
kwargs['excerpt'] = render_markdown(excerpt)
body = render_markdown(body)
return cls(title=title, body=body, description=description, **kwargs)
|
[
"def",
"from_string",
"(",
"cls",
",",
"contents",
",",
"*",
"*",
"kwargs",
")",
":",
"lines",
"=",
"contents",
".",
"splitlines",
"(",
")",
"title",
"=",
"None",
"description",
"=",
"None",
"line",
"=",
"lines",
".",
"pop",
"(",
"0",
")",
"while",
"line",
"!=",
"''",
":",
"if",
"not",
"title",
"and",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"title",
"=",
"line",
"[",
"1",
":",
"]",
".",
"strip",
"(",
")",
"elif",
"line",
".",
"startswith",
"(",
"'title:'",
")",
":",
"title",
"=",
"line",
"[",
"6",
":",
"]",
".",
"strip",
"(",
")",
"elif",
"line",
".",
"startswith",
"(",
"'description:'",
")",
":",
"description",
"=",
"line",
"[",
"12",
":",
"]",
".",
"strip",
"(",
")",
"elif",
"line",
".",
"startswith",
"(",
"'subtitle:'",
")",
":",
"kwargs",
"[",
"'subtitle'",
"]",
"=",
"line",
"[",
"9",
":",
"]",
".",
"strip",
"(",
")",
"elif",
"line",
".",
"startswith",
"(",
"'comments:'",
")",
":",
"try",
":",
"kwargs",
"[",
"'allow_comments'",
"]",
"=",
"_str_to_bool",
"(",
"line",
"[",
"9",
":",
"]",
")",
"except",
"ValueError",
":",
"LOG",
".",
"warning",
"(",
"'invalid boolean value for comments'",
",",
"exc_info",
"=",
"True",
")",
"cls",
".",
"process_meta",
"(",
"line",
",",
"kwargs",
")",
"line",
"=",
"lines",
".",
"pop",
"(",
"0",
")",
"# the only lines left should be the actual contents",
"body",
"=",
"'\\n'",
".",
"join",
"(",
"lines",
")",
".",
"strip",
"(",
")",
"excerpt",
"=",
"_get_excerpt",
"(",
"body",
")",
"if",
"description",
"is",
"None",
":",
"description",
"=",
"_get_description",
"(",
"excerpt",
",",
"160",
")",
"if",
"issubclass",
"(",
"cls",
",",
"Post",
")",
":",
"kwargs",
"[",
"'excerpt'",
"]",
"=",
"render_markdown",
"(",
"excerpt",
")",
"body",
"=",
"render_markdown",
"(",
"body",
")",
"return",
"cls",
"(",
"title",
"=",
"title",
",",
"body",
"=",
"body",
",",
"description",
"=",
"description",
",",
"*",
"*",
"kwargs",
")"
] |
Given a markdown string, create an Entry object.
Usually subclasses will want to customize the parts of the markdown
where you provide values for attributes like public - this can be done
by overriding the process_meta method.
|
[
"Given",
"a",
"markdown",
"string",
"create",
"an",
"Entry",
"object",
"."
] |
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/content.py#L107-L148
|
238,242
|
anlutro/russell
|
russell/content.py
|
Entry.process_meta
|
def process_meta(cls, line, kwargs):
"""
Process a line of metadata found in the markdown.
Lines are usually in the format of "key: value".
Modify the kwargs dict in order to change or add new kwargs that should
be passed to the class's constructor.
"""
if line.startswith('slug:'):
kwargs['slug'] = line[5:].strip()
elif line.startswith('public:'):
try:
kwargs['public'] = _str_to_bool(line[7:])
except ValueError:
LOG.warning('invalid boolean value for public', exc_info=True)
elif line.startswith('private:'):
try:
kwargs['public'] = not _str_to_bool(line[8:])
except ValueError:
LOG.warning('invalid boolean value for private', exc_info=True)
|
python
|
def process_meta(cls, line, kwargs):
"""
Process a line of metadata found in the markdown.
Lines are usually in the format of "key: value".
Modify the kwargs dict in order to change or add new kwargs that should
be passed to the class's constructor.
"""
if line.startswith('slug:'):
kwargs['slug'] = line[5:].strip()
elif line.startswith('public:'):
try:
kwargs['public'] = _str_to_bool(line[7:])
except ValueError:
LOG.warning('invalid boolean value for public', exc_info=True)
elif line.startswith('private:'):
try:
kwargs['public'] = not _str_to_bool(line[8:])
except ValueError:
LOG.warning('invalid boolean value for private', exc_info=True)
|
[
"def",
"process_meta",
"(",
"cls",
",",
"line",
",",
"kwargs",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"'slug:'",
")",
":",
"kwargs",
"[",
"'slug'",
"]",
"=",
"line",
"[",
"5",
":",
"]",
".",
"strip",
"(",
")",
"elif",
"line",
".",
"startswith",
"(",
"'public:'",
")",
":",
"try",
":",
"kwargs",
"[",
"'public'",
"]",
"=",
"_str_to_bool",
"(",
"line",
"[",
"7",
":",
"]",
")",
"except",
"ValueError",
":",
"LOG",
".",
"warning",
"(",
"'invalid boolean value for public'",
",",
"exc_info",
"=",
"True",
")",
"elif",
"line",
".",
"startswith",
"(",
"'private:'",
")",
":",
"try",
":",
"kwargs",
"[",
"'public'",
"]",
"=",
"not",
"_str_to_bool",
"(",
"line",
"[",
"8",
":",
"]",
")",
"except",
"ValueError",
":",
"LOG",
".",
"warning",
"(",
"'invalid boolean value for private'",
",",
"exc_info",
"=",
"True",
")"
] |
Process a line of metadata found in the markdown.
Lines are usually in the format of "key: value".
Modify the kwargs dict in order to change or add new kwargs that should
be passed to the class's constructor.
|
[
"Process",
"a",
"line",
"of",
"metadata",
"found",
"in",
"the",
"markdown",
"."
] |
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/content.py#L151-L173
|
238,243
|
anlutro/russell
|
russell/content.py
|
Entry.from_file
|
def from_file(cls, path, **kwargs):
"""
Given a markdown file, get an Entry object.
"""
LOG.debug('creating %s from "%s"', cls, path)
# the filename will be the default slug - can be overridden later
kwargs['slug'] = os.path.splitext(os.path.basename(path))[0]
# TODO: ideally this should be part of the Post class.
# if a pubdate isn't explicitly passed, get it from the file metadata
# instead. note that it might still be overriden later on while reading
# the file contents.
if issubclass(cls, Post) and not kwargs.get('pubdate'):
# you would think creation always comes before modification, but you
# can manually modify a file's modification date to one earlier than
# the creation date. this lets you set a post's pubdate by running
# the command `touch`. we support this behaviour by simply finding
# the chronologically earliest date of creation and modification.
timestamp = min(os.path.getctime(path), os.path.getmtime(path))
kwargs['pubdate'] = datetime.fromtimestamp(timestamp)
with open(path, 'r') as file:
entry = cls.from_string(file.read(), **kwargs)
return entry
|
python
|
def from_file(cls, path, **kwargs):
"""
Given a markdown file, get an Entry object.
"""
LOG.debug('creating %s from "%s"', cls, path)
# the filename will be the default slug - can be overridden later
kwargs['slug'] = os.path.splitext(os.path.basename(path))[0]
# TODO: ideally this should be part of the Post class.
# if a pubdate isn't explicitly passed, get it from the file metadata
# instead. note that it might still be overriden later on while reading
# the file contents.
if issubclass(cls, Post) and not kwargs.get('pubdate'):
# you would think creation always comes before modification, but you
# can manually modify a file's modification date to one earlier than
# the creation date. this lets you set a post's pubdate by running
# the command `touch`. we support this behaviour by simply finding
# the chronologically earliest date of creation and modification.
timestamp = min(os.path.getctime(path), os.path.getmtime(path))
kwargs['pubdate'] = datetime.fromtimestamp(timestamp)
with open(path, 'r') as file:
entry = cls.from_string(file.read(), **kwargs)
return entry
|
[
"def",
"from_file",
"(",
"cls",
",",
"path",
",",
"*",
"*",
"kwargs",
")",
":",
"LOG",
".",
"debug",
"(",
"'creating %s from \"%s\"'",
",",
"cls",
",",
"path",
")",
"# the filename will be the default slug - can be overridden later",
"kwargs",
"[",
"'slug'",
"]",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
")",
"[",
"0",
"]",
"# TODO: ideally this should be part of the Post class.",
"# if a pubdate isn't explicitly passed, get it from the file metadata",
"# instead. note that it might still be overriden later on while reading",
"# the file contents.",
"if",
"issubclass",
"(",
"cls",
",",
"Post",
")",
"and",
"not",
"kwargs",
".",
"get",
"(",
"'pubdate'",
")",
":",
"# you would think creation always comes before modification, but you",
"# can manually modify a file's modification date to one earlier than",
"# the creation date. this lets you set a post's pubdate by running",
"# the command `touch`. we support this behaviour by simply finding",
"# the chronologically earliest date of creation and modification.",
"timestamp",
"=",
"min",
"(",
"os",
".",
"path",
".",
"getctime",
"(",
"path",
")",
",",
"os",
".",
"path",
".",
"getmtime",
"(",
"path",
")",
")",
"kwargs",
"[",
"'pubdate'",
"]",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"timestamp",
")",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"file",
":",
"entry",
"=",
"cls",
".",
"from_string",
"(",
"file",
".",
"read",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
"return",
"entry"
] |
Given a markdown file, get an Entry object.
|
[
"Given",
"a",
"markdown",
"file",
"get",
"an",
"Entry",
"object",
"."
] |
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/content.py#L176-L201
|
238,244
|
anlutro/russell
|
russell/content.py
|
Post.make_tag
|
def make_tag(cls, tag_name):
"""
Make a Tag object from a tag name. Registers it with the content manager
if possible.
"""
if cls.cm:
return cls.cm.make_tag(tag_name)
return Tag(tag_name.strip())
|
python
|
def make_tag(cls, tag_name):
"""
Make a Tag object from a tag name. Registers it with the content manager
if possible.
"""
if cls.cm:
return cls.cm.make_tag(tag_name)
return Tag(tag_name.strip())
|
[
"def",
"make_tag",
"(",
"cls",
",",
"tag_name",
")",
":",
"if",
"cls",
".",
"cm",
":",
"return",
"cls",
".",
"cm",
".",
"make_tag",
"(",
"tag_name",
")",
"return",
"Tag",
"(",
"tag_name",
".",
"strip",
"(",
")",
")"
] |
Make a Tag object from a tag name. Registers it with the content manager
if possible.
|
[
"Make",
"a",
"Tag",
"object",
"from",
"a",
"tag",
"name",
".",
"Registers",
"it",
"with",
"the",
"content",
"manager",
"if",
"possible",
"."
] |
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
|
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/content.py#L244-L251
|
238,245
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/browser.py
|
AbstractLevel.set_model
|
def set_model(self, m):
"""Set the model for the level
:param m: the model that the level should use
:type m: QtCore.QAbstractItemModel
:returns: None
:rtype: None
:raises: None
"""
self._model = m
self.new_root.emit(QtCore.QModelIndex())
self.model_changed(m)
|
python
|
def set_model(self, m):
"""Set the model for the level
:param m: the model that the level should use
:type m: QtCore.QAbstractItemModel
:returns: None
:rtype: None
:raises: None
"""
self._model = m
self.new_root.emit(QtCore.QModelIndex())
self.model_changed(m)
|
[
"def",
"set_model",
"(",
"self",
",",
"m",
")",
":",
"self",
".",
"_model",
"=",
"m",
"self",
".",
"new_root",
".",
"emit",
"(",
"QtCore",
".",
"QModelIndex",
"(",
")",
")",
"self",
".",
"model_changed",
"(",
"m",
")"
] |
Set the model for the level
:param m: the model that the level should use
:type m: QtCore.QAbstractItemModel
:returns: None
:rtype: None
:raises: None
|
[
"Set",
"the",
"model",
"for",
"the",
"level"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L32-L43
|
238,246
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/browser.py
|
AbstractTreeBrowser.set_model
|
def set_model(self, model):
"""Set all levels\' model to the given one
:param m: the model that the levels should use
:type m: QtCore.QAbstractItemModel
:returns: None
:rtype: None
:raises: None
"""
# do the set model in reverse!
# set model might trigger an update for the lower levels
# but the lower ones have a different model, so it will fail anyways
# this way the initial state after set_model is correct.
self.model = model
self._levels[0].set_model(model)
|
python
|
def set_model(self, model):
"""Set all levels\' model to the given one
:param m: the model that the levels should use
:type m: QtCore.QAbstractItemModel
:returns: None
:rtype: None
:raises: None
"""
# do the set model in reverse!
# set model might trigger an update for the lower levels
# but the lower ones have a different model, so it will fail anyways
# this way the initial state after set_model is correct.
self.model = model
self._levels[0].set_model(model)
|
[
"def",
"set_model",
"(",
"self",
",",
"model",
")",
":",
"# do the set model in reverse!",
"# set model might trigger an update for the lower levels",
"# but the lower ones have a different model, so it will fail anyways",
"# this way the initial state after set_model is correct.",
"self",
".",
"model",
"=",
"model",
"self",
".",
"_levels",
"[",
"0",
"]",
".",
"set_model",
"(",
"model",
")"
] |
Set all levels\' model to the given one
:param m: the model that the levels should use
:type m: QtCore.QAbstractItemModel
:returns: None
:rtype: None
:raises: None
|
[
"Set",
"all",
"levels",
"\\",
"model",
"to",
"the",
"given",
"one"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L197-L211
|
238,247
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/browser.py
|
AbstractTreeBrowser.set_root
|
def set_root(self, depth, index):
"""Set the level\'s root of the given depth to index
:param depth: the depth level
:type depth: int
:param index: the new root index
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
if depth < len(self._levels):
self._levels[depth].set_root(index)
|
python
|
def set_root(self, depth, index):
"""Set the level\'s root of the given depth to index
:param depth: the depth level
:type depth: int
:param index: the new root index
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
if depth < len(self._levels):
self._levels[depth].set_root(index)
|
[
"def",
"set_root",
"(",
"self",
",",
"depth",
",",
"index",
")",
":",
"if",
"depth",
"<",
"len",
"(",
"self",
".",
"_levels",
")",
":",
"self",
".",
"_levels",
"[",
"depth",
"]",
".",
"set_root",
"(",
"index",
")"
] |
Set the level\'s root of the given depth to index
:param depth: the depth level
:type depth: int
:param index: the new root index
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
|
[
"Set",
"the",
"level",
"\\",
"s",
"root",
"of",
"the",
"given",
"depth",
"to",
"index"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L213-L225
|
238,248
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/browser.py
|
AbstractTreeBrowser._new_level
|
def _new_level(self, depth):
"""Create a new level and header and connect signals
:param depth: the depth level
:type depth: int
:returns: None
:rtype: None
:raises: None
"""
l = self.create_level(depth)
h = self.create_header(depth)
self.add_lvl_to_ui(l, h)
l.new_root.connect(partial(self.set_root, depth+1))
self._levels.append(l)
|
python
|
def _new_level(self, depth):
"""Create a new level and header and connect signals
:param depth: the depth level
:type depth: int
:returns: None
:rtype: None
:raises: None
"""
l = self.create_level(depth)
h = self.create_header(depth)
self.add_lvl_to_ui(l, h)
l.new_root.connect(partial(self.set_root, depth+1))
self._levels.append(l)
|
[
"def",
"_new_level",
"(",
"self",
",",
"depth",
")",
":",
"l",
"=",
"self",
".",
"create_level",
"(",
"depth",
")",
"h",
"=",
"self",
".",
"create_header",
"(",
"depth",
")",
"self",
".",
"add_lvl_to_ui",
"(",
"l",
",",
"h",
")",
"l",
".",
"new_root",
".",
"connect",
"(",
"partial",
"(",
"self",
".",
"set_root",
",",
"depth",
"+",
"1",
")",
")",
"self",
".",
"_levels",
".",
"append",
"(",
"l",
")"
] |
Create a new level and header and connect signals
:param depth: the depth level
:type depth: int
:returns: None
:rtype: None
:raises: None
|
[
"Create",
"a",
"new",
"level",
"and",
"header",
"and",
"connect",
"signals"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L227-L240
|
238,249
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/browser.py
|
CBLevel.set_root
|
def set_root(self, index):
"""Set the given index as root index of the combobox
:param index: the new root index
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
if not index.isValid():
self.setCurrentIndex(-1)
return
if self.model() != index.model():
self.setModel(index.model())
self.setRootModelIndex(index)
if self.model().rowCount(index):
self.setCurrentIndex(0)
else:
self.setCurrentIndex(-1)
|
python
|
def set_root(self, index):
"""Set the given index as root index of the combobox
:param index: the new root index
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
if not index.isValid():
self.setCurrentIndex(-1)
return
if self.model() != index.model():
self.setModel(index.model())
self.setRootModelIndex(index)
if self.model().rowCount(index):
self.setCurrentIndex(0)
else:
self.setCurrentIndex(-1)
|
[
"def",
"set_root",
"(",
"self",
",",
"index",
")",
":",
"if",
"not",
"index",
".",
"isValid",
"(",
")",
":",
"self",
".",
"setCurrentIndex",
"(",
"-",
"1",
")",
"return",
"if",
"self",
".",
"model",
"(",
")",
"!=",
"index",
".",
"model",
"(",
")",
":",
"self",
".",
"setModel",
"(",
"index",
".",
"model",
"(",
")",
")",
"self",
".",
"setRootModelIndex",
"(",
"index",
")",
"if",
"self",
".",
"model",
"(",
")",
".",
"rowCount",
"(",
"index",
")",
":",
"self",
".",
"setCurrentIndex",
"(",
"0",
")",
"else",
":",
"self",
".",
"setCurrentIndex",
"(",
"-",
"1",
")"
] |
Set the given index as root index of the combobox
:param index: the new root index
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
|
[
"Set",
"the",
"given",
"index",
"as",
"root",
"index",
"of",
"the",
"combobox"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L315-L333
|
238,250
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/browser.py
|
CBLevel.selected_indexes
|
def selected_indexes(self, ):
"""Return the current index
:returns: the current index in a list
:rtype: list of QtCore.QModelIndex
:raises: None
"""
i = self.model().index(self.currentIndex(), 0, self.rootModelIndex())
return [i]
|
python
|
def selected_indexes(self, ):
"""Return the current index
:returns: the current index in a list
:rtype: list of QtCore.QModelIndex
:raises: None
"""
i = self.model().index(self.currentIndex(), 0, self.rootModelIndex())
return [i]
|
[
"def",
"selected_indexes",
"(",
"self",
",",
")",
":",
"i",
"=",
"self",
".",
"model",
"(",
")",
".",
"index",
"(",
"self",
".",
"currentIndex",
"(",
")",
",",
"0",
",",
"self",
".",
"rootModelIndex",
"(",
")",
")",
"return",
"[",
"i",
"]"
] |
Return the current index
:returns: the current index in a list
:rtype: list of QtCore.QModelIndex
:raises: None
|
[
"Return",
"the",
"current",
"index"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L335-L343
|
238,251
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/browser.py
|
ComboBoxBrowser.create_header
|
def create_header(self, depth):
"""Create and return a widget that will be used as a header for the given depth
Override this method if you want to have header widgets.
The default implementation returns None.
You can return None if you do not want a header for the given depth
:param depth: the depth level
:type depth: int
:returns: a Widget that is used for the header or None
:rtype: QtGui.QWidget|None
:raises: None
"""
if not (depth >= 0 and depth < len(self._headertexts)):
return
txt = self._headertexts[depth]
if txt is None:
return
lbl = QtGui.QLabel(txt, self)
return lbl
|
python
|
def create_header(self, depth):
"""Create and return a widget that will be used as a header for the given depth
Override this method if you want to have header widgets.
The default implementation returns None.
You can return None if you do not want a header for the given depth
:param depth: the depth level
:type depth: int
:returns: a Widget that is used for the header or None
:rtype: QtGui.QWidget|None
:raises: None
"""
if not (depth >= 0 and depth < len(self._headertexts)):
return
txt = self._headertexts[depth]
if txt is None:
return
lbl = QtGui.QLabel(txt, self)
return lbl
|
[
"def",
"create_header",
"(",
"self",
",",
"depth",
")",
":",
"if",
"not",
"(",
"depth",
">=",
"0",
"and",
"depth",
"<",
"len",
"(",
"self",
".",
"_headertexts",
")",
")",
":",
"return",
"txt",
"=",
"self",
".",
"_headertexts",
"[",
"depth",
"]",
"if",
"txt",
"is",
"None",
":",
"return",
"lbl",
"=",
"QtGui",
".",
"QLabel",
"(",
"txt",
",",
"self",
")",
"return",
"lbl"
] |
Create and return a widget that will be used as a header for the given depth
Override this method if you want to have header widgets.
The default implementation returns None.
You can return None if you do not want a header for the given depth
:param depth: the depth level
:type depth: int
:returns: a Widget that is used for the header or None
:rtype: QtGui.QWidget|None
:raises: None
|
[
"Create",
"and",
"return",
"a",
"widget",
"that",
"will",
"be",
"used",
"as",
"a",
"header",
"for",
"the",
"given",
"depth"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L423-L442
|
238,252
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/browser.py
|
ListLevel.model_changed
|
def model_changed(self, model):
"""Apply the model to the combobox
When a level instance is created, the model is None. So it has to be set afterwards.
Then this method will be called and your level should somehow use the model
:param model: the model that the level should use
:type model: QtCore.QAbstractItemModel
:returns: None
:rtype: None
:raises: None
"""
self.setModel(model)
# to update all lists belwo
# current changed is not triggered by setModel somehow
if model is not None:
self.setCurrentIndex(self.model().index(0, 0))
|
python
|
def model_changed(self, model):
"""Apply the model to the combobox
When a level instance is created, the model is None. So it has to be set afterwards.
Then this method will be called and your level should somehow use the model
:param model: the model that the level should use
:type model: QtCore.QAbstractItemModel
:returns: None
:rtype: None
:raises: None
"""
self.setModel(model)
# to update all lists belwo
# current changed is not triggered by setModel somehow
if model is not None:
self.setCurrentIndex(self.model().index(0, 0))
|
[
"def",
"model_changed",
"(",
"self",
",",
"model",
")",
":",
"self",
".",
"setModel",
"(",
"model",
")",
"# to update all lists belwo",
"# current changed is not triggered by setModel somehow",
"if",
"model",
"is",
"not",
"None",
":",
"self",
".",
"setCurrentIndex",
"(",
"self",
".",
"model",
"(",
")",
".",
"index",
"(",
"0",
",",
"0",
")",
")"
] |
Apply the model to the combobox
When a level instance is created, the model is None. So it has to be set afterwards.
Then this method will be called and your level should somehow use the model
:param model: the model that the level should use
:type model: QtCore.QAbstractItemModel
:returns: None
:rtype: None
:raises: None
|
[
"Apply",
"the",
"model",
"to",
"the",
"combobox"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L476-L492
|
238,253
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/browser.py
|
ListLevel.set_root
|
def set_root(self, index):
"""Set the given index as root index of list
:param index: the new root index
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
if not index.isValid():
self.setModel(None) # so we will not see toplevel stuff
self.setCurrentIndex(QtCore.QModelIndex())
self.new_root.emit(QtCore.QModelIndex())
return
if self.model() != index.model():
self.setModel(index.model())
self.setRootIndex(index)
if self.model().hasChildren(index):
self.setCurrentIndex(self.model().index(0, 0, index))
self.new_root.emit(self.model().index(0, 0, index))
else:
self.new_root.emit(QtCore.QModelIndex())
|
python
|
def set_root(self, index):
"""Set the given index as root index of list
:param index: the new root index
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
if not index.isValid():
self.setModel(None) # so we will not see toplevel stuff
self.setCurrentIndex(QtCore.QModelIndex())
self.new_root.emit(QtCore.QModelIndex())
return
if self.model() != index.model():
self.setModel(index.model())
self.setRootIndex(index)
if self.model().hasChildren(index):
self.setCurrentIndex(self.model().index(0, 0, index))
self.new_root.emit(self.model().index(0, 0, index))
else:
self.new_root.emit(QtCore.QModelIndex())
|
[
"def",
"set_root",
"(",
"self",
",",
"index",
")",
":",
"if",
"not",
"index",
".",
"isValid",
"(",
")",
":",
"self",
".",
"setModel",
"(",
"None",
")",
"# so we will not see toplevel stuff",
"self",
".",
"setCurrentIndex",
"(",
"QtCore",
".",
"QModelIndex",
"(",
")",
")",
"self",
".",
"new_root",
".",
"emit",
"(",
"QtCore",
".",
"QModelIndex",
"(",
")",
")",
"return",
"if",
"self",
".",
"model",
"(",
")",
"!=",
"index",
".",
"model",
"(",
")",
":",
"self",
".",
"setModel",
"(",
"index",
".",
"model",
"(",
")",
")",
"self",
".",
"setRootIndex",
"(",
"index",
")",
"if",
"self",
".",
"model",
"(",
")",
".",
"hasChildren",
"(",
"index",
")",
":",
"self",
".",
"setCurrentIndex",
"(",
"self",
".",
"model",
"(",
")",
".",
"index",
"(",
"0",
",",
"0",
",",
"index",
")",
")",
"self",
".",
"new_root",
".",
"emit",
"(",
"self",
".",
"model",
"(",
")",
".",
"index",
"(",
"0",
",",
"0",
",",
"index",
")",
")",
"else",
":",
"self",
".",
"new_root",
".",
"emit",
"(",
"QtCore",
".",
"QModelIndex",
"(",
")",
")"
] |
Set the given index as root index of list
:param index: the new root index
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
|
[
"Set",
"the",
"given",
"index",
"as",
"root",
"index",
"of",
"list"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L494-L515
|
238,254
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/browser.py
|
ListLevel.set_index
|
def set_index(self, index):
"""Set the current index to the row of the given index
:param index: the index to set the level to
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
self.setCurrentIndex(index)
self.new_root.emit(index)
self.scrollTo(index)
|
python
|
def set_index(self, index):
"""Set the current index to the row of the given index
:param index: the index to set the level to
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
self.setCurrentIndex(index)
self.new_root.emit(index)
self.scrollTo(index)
|
[
"def",
"set_index",
"(",
"self",
",",
"index",
")",
":",
"self",
".",
"setCurrentIndex",
"(",
"index",
")",
"self",
".",
"new_root",
".",
"emit",
"(",
"index",
")",
"self",
".",
"scrollTo",
"(",
"index",
")"
] |
Set the current index to the row of the given index
:param index: the index to set the level to
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
|
[
"Set",
"the",
"current",
"index",
"to",
"the",
"row",
"of",
"the",
"given",
"index"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L544-L555
|
238,255
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/browser.py
|
ListLevel.resizeEvent
|
def resizeEvent(self, event):
"""Schedules an item layout if resize mode is \"adjust\". Somehow this is
needed for correctly scaling down items.
The reason this was reimplemented was the CommentDelegate.
:param event: the resize event
:type event: QtCore.QEvent
:returns: None
:rtype: None
:raises: None
"""
if self.resizeMode() == self.Adjust:
self.scheduleDelayedItemsLayout()
return super(ListLevel, self).resizeEvent(event)
|
python
|
def resizeEvent(self, event):
"""Schedules an item layout if resize mode is \"adjust\". Somehow this is
needed for correctly scaling down items.
The reason this was reimplemented was the CommentDelegate.
:param event: the resize event
:type event: QtCore.QEvent
:returns: None
:rtype: None
:raises: None
"""
if self.resizeMode() == self.Adjust:
self.scheduleDelayedItemsLayout()
return super(ListLevel, self).resizeEvent(event)
|
[
"def",
"resizeEvent",
"(",
"self",
",",
"event",
")",
":",
"if",
"self",
".",
"resizeMode",
"(",
")",
"==",
"self",
".",
"Adjust",
":",
"self",
".",
"scheduleDelayedItemsLayout",
"(",
")",
"return",
"super",
"(",
"ListLevel",
",",
"self",
")",
".",
"resizeEvent",
"(",
"event",
")"
] |
Schedules an item layout if resize mode is \"adjust\". Somehow this is
needed for correctly scaling down items.
The reason this was reimplemented was the CommentDelegate.
:param event: the resize event
:type event: QtCore.QEvent
:returns: None
:rtype: None
:raises: None
|
[
"Schedules",
"an",
"item",
"layout",
"if",
"resize",
"mode",
"is",
"\\",
"adjust",
"\\",
".",
"Somehow",
"this",
"is",
"needed",
"for",
"correctly",
"scaling",
"down",
"items",
"."
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L557-L571
|
238,256
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgets/browser.py
|
CommentBrowser.create_level
|
def create_level(self, depth):
"""Create and return a level for the given depth
The model and root of the level will be automatically set by the browser.
:param depth: the depth level that the level should handle
:type depth: int
:returns: a new level for the given depth
:rtype: :class:`jukeboxcore.gui.widgets.browser.AbstractLevel`
:raises: None
"""
ll = ListLevel(parent=self)
ll.setEditTriggers(ll.DoubleClicked | ll.SelectedClicked | ll.CurrentChanged)
#ll.setSelectionBehavior(ll.SelectRows)
ll.setResizeMode(ll.Adjust)
self.delegate = CommentDelegate(ll)
ll.setItemDelegate(self.delegate)
ll.setVerticalScrollMode(ll.ScrollPerPixel)
return ll
|
python
|
def create_level(self, depth):
"""Create and return a level for the given depth
The model and root of the level will be automatically set by the browser.
:param depth: the depth level that the level should handle
:type depth: int
:returns: a new level for the given depth
:rtype: :class:`jukeboxcore.gui.widgets.browser.AbstractLevel`
:raises: None
"""
ll = ListLevel(parent=self)
ll.setEditTriggers(ll.DoubleClicked | ll.SelectedClicked | ll.CurrentChanged)
#ll.setSelectionBehavior(ll.SelectRows)
ll.setResizeMode(ll.Adjust)
self.delegate = CommentDelegate(ll)
ll.setItemDelegate(self.delegate)
ll.setVerticalScrollMode(ll.ScrollPerPixel)
return ll
|
[
"def",
"create_level",
"(",
"self",
",",
"depth",
")",
":",
"ll",
"=",
"ListLevel",
"(",
"parent",
"=",
"self",
")",
"ll",
".",
"setEditTriggers",
"(",
"ll",
".",
"DoubleClicked",
"|",
"ll",
".",
"SelectedClicked",
"|",
"ll",
".",
"CurrentChanged",
")",
"#ll.setSelectionBehavior(ll.SelectRows)",
"ll",
".",
"setResizeMode",
"(",
"ll",
".",
"Adjust",
")",
"self",
".",
"delegate",
"=",
"CommentDelegate",
"(",
"ll",
")",
"ll",
".",
"setItemDelegate",
"(",
"self",
".",
"delegate",
")",
"ll",
".",
"setVerticalScrollMode",
"(",
"ll",
".",
"ScrollPerPixel",
")",
"return",
"ll"
] |
Create and return a level for the given depth
The model and root of the level will be automatically set by the browser.
:param depth: the depth level that the level should handle
:type depth: int
:returns: a new level for the given depth
:rtype: :class:`jukeboxcore.gui.widgets.browser.AbstractLevel`
:raises: None
|
[
"Create",
"and",
"return",
"a",
"level",
"for",
"the",
"given",
"depth"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L677-L695
|
238,257
|
gisce/heman
|
heman/api/form/__init__.py
|
get_first_lang
|
def get_first_lang():
"""Get the first lang of Accept-Language Header.
"""
request_lang = request.headers.get('Accept-Language').split(',')
if request_lang:
lang = locale.normalize(request_lang[0]).split('.')[0]
else:
lang = False
return lang
|
python
|
def get_first_lang():
"""Get the first lang of Accept-Language Header.
"""
request_lang = request.headers.get('Accept-Language').split(',')
if request_lang:
lang = locale.normalize(request_lang[0]).split('.')[0]
else:
lang = False
return lang
|
[
"def",
"get_first_lang",
"(",
")",
":",
"request_lang",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"'Accept-Language'",
")",
".",
"split",
"(",
"','",
")",
"if",
"request_lang",
":",
"lang",
"=",
"locale",
".",
"normalize",
"(",
"request_lang",
"[",
"0",
"]",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"else",
":",
"lang",
"=",
"False",
"return",
"lang"
] |
Get the first lang of Accept-Language Header.
|
[
"Get",
"the",
"first",
"lang",
"of",
"Accept",
"-",
"Language",
"Header",
"."
] |
cf09fca09953f12454b2910ddfa9d7586709657b
|
https://github.com/gisce/heman/blob/cf09fca09953f12454b2910ddfa9d7586709657b/heman/api/form/__init__.py#L19-L27
|
238,258
|
siemens/django-dingos
|
dingos/core/utilities.py
|
set_dict
|
def set_dict(dictionary, value, command='set', *keys):
"""
set_dict takes a dictionary, the value to
enter into the dictionary, a command of what
to do with the value, and a sequence of keys.
d = {}
set_dict(d,1,'append','level 1','level 2')
-> d['level 1']['level 2'] = [1]
set_dict(d,2,'append','level 1','level 2')
-> d['level 1']['level 2'] = [1,2]
"""
existing = dictionary
for i in range(0, len(keys) - 1):
if keys[i] in existing:
existing = existing[keys[i]]
else:
existing[keys[i]] = existing.__class__()
existing = existing[keys[i]]
if command == 'set':
existing[keys[len(keys) - 1]] = value
elif command == 'append':
if keys[len(keys) - 1] in existing:
existing[keys[len(keys) - 1]].append(value)
else:
existing[keys[len(keys) - 1]] = [value]
elif command == 'set_or_append':
if keys[len(keys) - 1] in existing:
if type(keys[len(keys) - 1]) == type([]):
existing[keys[len(keys) - 1]].append(value)
else:
existing[keys[len(keys) - 1]] = [existing[keys[len(keys) - 1]], value]
else:
existing[keys[len(keys) - 1]] = value
elif command == 'insert':
if keys[len(keys) - 1] in existing:
if not value in existing[keys[len(keys) - 1]]:
existing[keys[len(keys) - 1]].append(value)
else:
existing[keys[len(keys) - 1]] = [value]
|
python
|
def set_dict(dictionary, value, command='set', *keys):
"""
set_dict takes a dictionary, the value to
enter into the dictionary, a command of what
to do with the value, and a sequence of keys.
d = {}
set_dict(d,1,'append','level 1','level 2')
-> d['level 1']['level 2'] = [1]
set_dict(d,2,'append','level 1','level 2')
-> d['level 1']['level 2'] = [1,2]
"""
existing = dictionary
for i in range(0, len(keys) - 1):
if keys[i] in existing:
existing = existing[keys[i]]
else:
existing[keys[i]] = existing.__class__()
existing = existing[keys[i]]
if command == 'set':
existing[keys[len(keys) - 1]] = value
elif command == 'append':
if keys[len(keys) - 1] in existing:
existing[keys[len(keys) - 1]].append(value)
else:
existing[keys[len(keys) - 1]] = [value]
elif command == 'set_or_append':
if keys[len(keys) - 1] in existing:
if type(keys[len(keys) - 1]) == type([]):
existing[keys[len(keys) - 1]].append(value)
else:
existing[keys[len(keys) - 1]] = [existing[keys[len(keys) - 1]], value]
else:
existing[keys[len(keys) - 1]] = value
elif command == 'insert':
if keys[len(keys) - 1] in existing:
if not value in existing[keys[len(keys) - 1]]:
existing[keys[len(keys) - 1]].append(value)
else:
existing[keys[len(keys) - 1]] = [value]
|
[
"def",
"set_dict",
"(",
"dictionary",
",",
"value",
",",
"command",
"=",
"'set'",
",",
"*",
"keys",
")",
":",
"existing",
"=",
"dictionary",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"keys",
")",
"-",
"1",
")",
":",
"if",
"keys",
"[",
"i",
"]",
"in",
"existing",
":",
"existing",
"=",
"existing",
"[",
"keys",
"[",
"i",
"]",
"]",
"else",
":",
"existing",
"[",
"keys",
"[",
"i",
"]",
"]",
"=",
"existing",
".",
"__class__",
"(",
")",
"existing",
"=",
"existing",
"[",
"keys",
"[",
"i",
"]",
"]",
"if",
"command",
"==",
"'set'",
":",
"existing",
"[",
"keys",
"[",
"len",
"(",
"keys",
")",
"-",
"1",
"]",
"]",
"=",
"value",
"elif",
"command",
"==",
"'append'",
":",
"if",
"keys",
"[",
"len",
"(",
"keys",
")",
"-",
"1",
"]",
"in",
"existing",
":",
"existing",
"[",
"keys",
"[",
"len",
"(",
"keys",
")",
"-",
"1",
"]",
"]",
".",
"append",
"(",
"value",
")",
"else",
":",
"existing",
"[",
"keys",
"[",
"len",
"(",
"keys",
")",
"-",
"1",
"]",
"]",
"=",
"[",
"value",
"]",
"elif",
"command",
"==",
"'set_or_append'",
":",
"if",
"keys",
"[",
"len",
"(",
"keys",
")",
"-",
"1",
"]",
"in",
"existing",
":",
"if",
"type",
"(",
"keys",
"[",
"len",
"(",
"keys",
")",
"-",
"1",
"]",
")",
"==",
"type",
"(",
"[",
"]",
")",
":",
"existing",
"[",
"keys",
"[",
"len",
"(",
"keys",
")",
"-",
"1",
"]",
"]",
".",
"append",
"(",
"value",
")",
"else",
":",
"existing",
"[",
"keys",
"[",
"len",
"(",
"keys",
")",
"-",
"1",
"]",
"]",
"=",
"[",
"existing",
"[",
"keys",
"[",
"len",
"(",
"keys",
")",
"-",
"1",
"]",
"]",
",",
"value",
"]",
"else",
":",
"existing",
"[",
"keys",
"[",
"len",
"(",
"keys",
")",
"-",
"1",
"]",
"]",
"=",
"value",
"elif",
"command",
"==",
"'insert'",
":",
"if",
"keys",
"[",
"len",
"(",
"keys",
")",
"-",
"1",
"]",
"in",
"existing",
":",
"if",
"not",
"value",
"in",
"existing",
"[",
"keys",
"[",
"len",
"(",
"keys",
")",
"-",
"1",
"]",
"]",
":",
"existing",
"[",
"keys",
"[",
"len",
"(",
"keys",
")",
"-",
"1",
"]",
"]",
".",
"append",
"(",
"value",
")",
"else",
":",
"existing",
"[",
"keys",
"[",
"len",
"(",
"keys",
")",
"-",
"1",
"]",
"]",
"=",
"[",
"value",
"]"
] |
set_dict takes a dictionary, the value to
enter into the dictionary, a command of what
to do with the value, and a sequence of keys.
d = {}
set_dict(d,1,'append','level 1','level 2')
-> d['level 1']['level 2'] = [1]
set_dict(d,2,'append','level 1','level 2')
-> d['level 1']['level 2'] = [1,2]
|
[
"set_dict",
"takes",
"a",
"dictionary",
"the",
"value",
"to",
"enter",
"into",
"the",
"dictionary",
"a",
"command",
"of",
"what",
"to",
"do",
"with",
"the",
"value",
"and",
"a",
"sequence",
"of",
"keys",
"."
] |
7154f75b06d2538568e2f2455a76f3d0db0b7d70
|
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/core/utilities.py#L58-L103
|
238,259
|
siemens/django-dingos
|
dingos/core/utilities.py
|
search_by_re_list
|
def search_by_re_list(re_list, text):
"""
Given a list of compiled regular expressions,
try to search in text with each matcher until the first
match occurs. Return the group-dict for that first match.
"""
for matcher in re_list:
m = matcher.search(text)
if m:
return m.groupdict()
return None
|
python
|
def search_by_re_list(re_list, text):
"""
Given a list of compiled regular expressions,
try to search in text with each matcher until the first
match occurs. Return the group-dict for that first match.
"""
for matcher in re_list:
m = matcher.search(text)
if m:
return m.groupdict()
return None
|
[
"def",
"search_by_re_list",
"(",
"re_list",
",",
"text",
")",
":",
"for",
"matcher",
"in",
"re_list",
":",
"m",
"=",
"matcher",
".",
"search",
"(",
"text",
")",
"if",
"m",
":",
"return",
"m",
".",
"groupdict",
"(",
")",
"return",
"None"
] |
Given a list of compiled regular expressions,
try to search in text with each matcher until the first
match occurs. Return the group-dict for that first match.
|
[
"Given",
"a",
"list",
"of",
"compiled",
"regular",
"expressions",
"try",
"to",
"search",
"in",
"text",
"with",
"each",
"matcher",
"until",
"the",
"first",
"match",
"occurs",
".",
"Return",
"the",
"group",
"-",
"dict",
"for",
"that",
"first",
"match",
"."
] |
7154f75b06d2538568e2f2455a76f3d0db0b7d70
|
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/core/utilities.py#L132-L142
|
238,260
|
JoaoFelipe/pyposast
|
pyposast/visitor.py
|
extract_code
|
def extract_code(lines, node, lstrip="", ljoin="\n", strip=""):
"""Get corresponding text in the code
Arguments:
lines -- code splitted by linebreak
node -- PyPosAST enhanced node
Keyword Arguments:
lstrip -- During extraction, strip lines with this arg (default="")
ljoin -- During extraction, join lines with this arg (default="\n")
strip -- After extraction, strip all code with this arg (default="")
"""
first_line, first_col = node.first_line - 1, node.first_col
last_line, last_col = node.last_line - 1, node.last_col
if first_line == last_line:
return lines[first_line][first_col:last_col].strip(strip)
result = []
# Add first line
result.append(lines[first_line][first_col:].strip(lstrip))
# Add middle lines
if first_line + 1 != last_line:
for line in range(first_line + 1, last_line):
result.append(lines[line].strip(lstrip))
# Add last line
result.append(lines[last_line][:last_col].strip(lstrip))
return ljoin.join(result).strip(strip)
|
python
|
def extract_code(lines, node, lstrip="", ljoin="\n", strip=""):
"""Get corresponding text in the code
Arguments:
lines -- code splitted by linebreak
node -- PyPosAST enhanced node
Keyword Arguments:
lstrip -- During extraction, strip lines with this arg (default="")
ljoin -- During extraction, join lines with this arg (default="\n")
strip -- After extraction, strip all code with this arg (default="")
"""
first_line, first_col = node.first_line - 1, node.first_col
last_line, last_col = node.last_line - 1, node.last_col
if first_line == last_line:
return lines[first_line][first_col:last_col].strip(strip)
result = []
# Add first line
result.append(lines[first_line][first_col:].strip(lstrip))
# Add middle lines
if first_line + 1 != last_line:
for line in range(first_line + 1, last_line):
result.append(lines[line].strip(lstrip))
# Add last line
result.append(lines[last_line][:last_col].strip(lstrip))
return ljoin.join(result).strip(strip)
|
[
"def",
"extract_code",
"(",
"lines",
",",
"node",
",",
"lstrip",
"=",
"\"\"",
",",
"ljoin",
"=",
"\"\\n\"",
",",
"strip",
"=",
"\"\"",
")",
":",
"first_line",
",",
"first_col",
"=",
"node",
".",
"first_line",
"-",
"1",
",",
"node",
".",
"first_col",
"last_line",
",",
"last_col",
"=",
"node",
".",
"last_line",
"-",
"1",
",",
"node",
".",
"last_col",
"if",
"first_line",
"==",
"last_line",
":",
"return",
"lines",
"[",
"first_line",
"]",
"[",
"first_col",
":",
"last_col",
"]",
".",
"strip",
"(",
"strip",
")",
"result",
"=",
"[",
"]",
"# Add first line",
"result",
".",
"append",
"(",
"lines",
"[",
"first_line",
"]",
"[",
"first_col",
":",
"]",
".",
"strip",
"(",
"lstrip",
")",
")",
"# Add middle lines",
"if",
"first_line",
"+",
"1",
"!=",
"last_line",
":",
"for",
"line",
"in",
"range",
"(",
"first_line",
"+",
"1",
",",
"last_line",
")",
":",
"result",
".",
"append",
"(",
"lines",
"[",
"line",
"]",
".",
"strip",
"(",
"lstrip",
")",
")",
"# Add last line",
"result",
".",
"append",
"(",
"lines",
"[",
"last_line",
"]",
"[",
":",
"last_col",
"]",
".",
"strip",
"(",
"lstrip",
")",
")",
"return",
"ljoin",
".",
"join",
"(",
"result",
")",
".",
"strip",
"(",
"strip",
")"
] |
Get corresponding text in the code
Arguments:
lines -- code splitted by linebreak
node -- PyPosAST enhanced node
Keyword Arguments:
lstrip -- During extraction, strip lines with this arg (default="")
ljoin -- During extraction, join lines with this arg (default="\n")
strip -- After extraction, strip all code with this arg (default="")
|
[
"Get",
"corresponding",
"text",
"in",
"the",
"code"
] |
497c88c66b451ff2cd7354be1af070c92e119f41
|
https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/pyposast/visitor.py#L30-L58
|
238,261
|
JoaoFelipe/pyposast
|
pyposast/visitor.py
|
LineProvenanceVisitor.dnode
|
def dnode(self, node):
"""Duplicate node and adjust it for deslocated line and column"""
new_node = copy(node)
new_node.lineno += self.dline
new_node.col_offset += self.dcol
return new_node
|
python
|
def dnode(self, node):
"""Duplicate node and adjust it for deslocated line and column"""
new_node = copy(node)
new_node.lineno += self.dline
new_node.col_offset += self.dcol
return new_node
|
[
"def",
"dnode",
"(",
"self",
",",
"node",
")",
":",
"new_node",
"=",
"copy",
"(",
"node",
")",
"new_node",
".",
"lineno",
"+=",
"self",
".",
"dline",
"new_node",
".",
"col_offset",
"+=",
"self",
".",
"dcol",
"return",
"new_node"
] |
Duplicate node and adjust it for deslocated line and column
|
[
"Duplicate",
"node",
"and",
"adjust",
"it",
"for",
"deslocated",
"line",
"and",
"column"
] |
497c88c66b451ff2cd7354be1af070c92e119f41
|
https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/pyposast/visitor.py#L119-L124
|
238,262
|
JoaoFelipe/pyposast
|
pyposast/visitor.py
|
LineProvenanceVisitor.dposition
|
def dposition(self, node, dcol=0):
"""Return deslocated line and column"""
nnode = self.dnode(node)
return (nnode.lineno, nnode.col_offset + dcol)
|
python
|
def dposition(self, node, dcol=0):
"""Return deslocated line and column"""
nnode = self.dnode(node)
return (nnode.lineno, nnode.col_offset + dcol)
|
[
"def",
"dposition",
"(",
"self",
",",
"node",
",",
"dcol",
"=",
"0",
")",
":",
"nnode",
"=",
"self",
".",
"dnode",
"(",
"node",
")",
"return",
"(",
"nnode",
".",
"lineno",
",",
"nnode",
".",
"col_offset",
"+",
"dcol",
")"
] |
Return deslocated line and column
|
[
"Return",
"deslocated",
"line",
"and",
"column"
] |
497c88c66b451ff2cd7354be1af070c92e119f41
|
https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/pyposast/visitor.py#L126-L129
|
238,263
|
JoaoFelipe/pyposast
|
pyposast/visitor.py
|
LineProvenanceVisitor.calculate_infixop
|
def calculate_infixop(self, node, previous, next_node):
"""Create new node for infixop"""
previous_position = (previous.last_line, previous.last_col - 1)
position = (next_node.first_line, next_node.first_col + 1)
possible = []
for ch in OPERATORS[node.__class__]:
try:
pos = self.operators[ch].find_previous(position)
if previous_position < pos[1] < position:
possible.append(pos)
except KeyError:
pass
if not possible:
raise ValueError("not a single {} between {} and {}".format(
OPERATORS[node.__class__], previous_position, position))
return NodeWithPosition(
*min(possible, key=lambda x: tuple(map(sub, position, x[0])))
)
|
python
|
def calculate_infixop(self, node, previous, next_node):
"""Create new node for infixop"""
previous_position = (previous.last_line, previous.last_col - 1)
position = (next_node.first_line, next_node.first_col + 1)
possible = []
for ch in OPERATORS[node.__class__]:
try:
pos = self.operators[ch].find_previous(position)
if previous_position < pos[1] < position:
possible.append(pos)
except KeyError:
pass
if not possible:
raise ValueError("not a single {} between {} and {}".format(
OPERATORS[node.__class__], previous_position, position))
return NodeWithPosition(
*min(possible, key=lambda x: tuple(map(sub, position, x[0])))
)
|
[
"def",
"calculate_infixop",
"(",
"self",
",",
"node",
",",
"previous",
",",
"next_node",
")",
":",
"previous_position",
"=",
"(",
"previous",
".",
"last_line",
",",
"previous",
".",
"last_col",
"-",
"1",
")",
"position",
"=",
"(",
"next_node",
".",
"first_line",
",",
"next_node",
".",
"first_col",
"+",
"1",
")",
"possible",
"=",
"[",
"]",
"for",
"ch",
"in",
"OPERATORS",
"[",
"node",
".",
"__class__",
"]",
":",
"try",
":",
"pos",
"=",
"self",
".",
"operators",
"[",
"ch",
"]",
".",
"find_previous",
"(",
"position",
")",
"if",
"previous_position",
"<",
"pos",
"[",
"1",
"]",
"<",
"position",
":",
"possible",
".",
"append",
"(",
"pos",
")",
"except",
"KeyError",
":",
"pass",
"if",
"not",
"possible",
":",
"raise",
"ValueError",
"(",
"\"not a single {} between {} and {}\"",
".",
"format",
"(",
"OPERATORS",
"[",
"node",
".",
"__class__",
"]",
",",
"previous_position",
",",
"position",
")",
")",
"return",
"NodeWithPosition",
"(",
"*",
"min",
"(",
"possible",
",",
"key",
"=",
"lambda",
"x",
":",
"tuple",
"(",
"map",
"(",
"sub",
",",
"position",
",",
"x",
"[",
"0",
"]",
")",
")",
")",
")"
] |
Create new node for infixop
|
[
"Create",
"new",
"node",
"for",
"infixop"
] |
497c88c66b451ff2cd7354be1af070c92e119f41
|
https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/pyposast/visitor.py#L131-L150
|
238,264
|
JoaoFelipe/pyposast
|
pyposast/visitor.py
|
LineProvenanceVisitor.calculate_unaryop
|
def calculate_unaryop(self, node, next_node):
"""Create new node for unaryop"""
position = (next_node.first_line, next_node.first_col + 1)
possible = []
for ch in OPERATORS[node.__class__]:
try:
pos = self.operators[ch].find_previous(position)
if pos[1] < position:
possible.append(pos)
except KeyError:
pass
return NodeWithPosition(
*min(possible, key=lambda x: tuple(map(sub, position, x[0])))
)
|
python
|
def calculate_unaryop(self, node, next_node):
"""Create new node for unaryop"""
position = (next_node.first_line, next_node.first_col + 1)
possible = []
for ch in OPERATORS[node.__class__]:
try:
pos = self.operators[ch].find_previous(position)
if pos[1] < position:
possible.append(pos)
except KeyError:
pass
return NodeWithPosition(
*min(possible, key=lambda x: tuple(map(sub, position, x[0])))
)
|
[
"def",
"calculate_unaryop",
"(",
"self",
",",
"node",
",",
"next_node",
")",
":",
"position",
"=",
"(",
"next_node",
".",
"first_line",
",",
"next_node",
".",
"first_col",
"+",
"1",
")",
"possible",
"=",
"[",
"]",
"for",
"ch",
"in",
"OPERATORS",
"[",
"node",
".",
"__class__",
"]",
":",
"try",
":",
"pos",
"=",
"self",
".",
"operators",
"[",
"ch",
"]",
".",
"find_previous",
"(",
"position",
")",
"if",
"pos",
"[",
"1",
"]",
"<",
"position",
":",
"possible",
".",
"append",
"(",
"pos",
")",
"except",
"KeyError",
":",
"pass",
"return",
"NodeWithPosition",
"(",
"*",
"min",
"(",
"possible",
",",
"key",
"=",
"lambda",
"x",
":",
"tuple",
"(",
"map",
"(",
"sub",
",",
"position",
",",
"x",
"[",
"0",
"]",
")",
")",
")",
")"
] |
Create new node for unaryop
|
[
"Create",
"new",
"node",
"for",
"unaryop"
] |
497c88c66b451ff2cd7354be1af070c92e119f41
|
https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/pyposast/visitor.py#L152-L166
|
238,265
|
JoaoFelipe/pyposast
|
pyposast/visitor.py
|
LineProvenanceVisitor.uid_something_colon
|
def uid_something_colon(self, node):
""" Creates op_pos for node from uid to colon """
node.op_pos = [
NodeWithPosition(node.uid, (node.first_line, node.first_col))
]
position = (node.body[0].first_line, node.body[0].first_col)
last, first = self.operators[':'].find_previous(position)
node.op_pos.append(NodeWithPosition(last, first))
return last
|
python
|
def uid_something_colon(self, node):
""" Creates op_pos for node from uid to colon """
node.op_pos = [
NodeWithPosition(node.uid, (node.first_line, node.first_col))
]
position = (node.body[0].first_line, node.body[0].first_col)
last, first = self.operators[':'].find_previous(position)
node.op_pos.append(NodeWithPosition(last, first))
return last
|
[
"def",
"uid_something_colon",
"(",
"self",
",",
"node",
")",
":",
"node",
".",
"op_pos",
"=",
"[",
"NodeWithPosition",
"(",
"node",
".",
"uid",
",",
"(",
"node",
".",
"first_line",
",",
"node",
".",
"first_col",
")",
")",
"]",
"position",
"=",
"(",
"node",
".",
"body",
"[",
"0",
"]",
".",
"first_line",
",",
"node",
".",
"body",
"[",
"0",
"]",
".",
"first_col",
")",
"last",
",",
"first",
"=",
"self",
".",
"operators",
"[",
"':'",
"]",
".",
"find_previous",
"(",
"position",
")",
"node",
".",
"op_pos",
".",
"append",
"(",
"NodeWithPosition",
"(",
"last",
",",
"first",
")",
")",
"return",
"last"
] |
Creates op_pos for node from uid to colon
|
[
"Creates",
"op_pos",
"for",
"node",
"from",
"uid",
"to",
"colon"
] |
497c88c66b451ff2cd7354be1af070c92e119f41
|
https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/pyposast/visitor.py#L168-L176
|
238,266
|
JoaoFelipe/pyposast
|
pyposast/visitor.py
|
LineProvenanceVisitor.optional_else
|
def optional_else(self, node, last):
""" Create op_pos for optional else """
if node.orelse:
min_first_max_last(node, node.orelse[-1])
if 'else' in self.operators:
position = (node.orelse[0].first_line, node.orelse[0].first_col)
_, efirst = self.operators['else'].find_previous(position)
if efirst and efirst > last:
elast, _ = self.operators[':'].find_previous(position)
node.op_pos.append(NodeWithPosition(elast, efirst))
|
python
|
def optional_else(self, node, last):
""" Create op_pos for optional else """
if node.orelse:
min_first_max_last(node, node.orelse[-1])
if 'else' in self.operators:
position = (node.orelse[0].first_line, node.orelse[0].first_col)
_, efirst = self.operators['else'].find_previous(position)
if efirst and efirst > last:
elast, _ = self.operators[':'].find_previous(position)
node.op_pos.append(NodeWithPosition(elast, efirst))
|
[
"def",
"optional_else",
"(",
"self",
",",
"node",
",",
"last",
")",
":",
"if",
"node",
".",
"orelse",
":",
"min_first_max_last",
"(",
"node",
",",
"node",
".",
"orelse",
"[",
"-",
"1",
"]",
")",
"if",
"'else'",
"in",
"self",
".",
"operators",
":",
"position",
"=",
"(",
"node",
".",
"orelse",
"[",
"0",
"]",
".",
"first_line",
",",
"node",
".",
"orelse",
"[",
"0",
"]",
".",
"first_col",
")",
"_",
",",
"efirst",
"=",
"self",
".",
"operators",
"[",
"'else'",
"]",
".",
"find_previous",
"(",
"position",
")",
"if",
"efirst",
"and",
"efirst",
">",
"last",
":",
"elast",
",",
"_",
"=",
"self",
".",
"operators",
"[",
"':'",
"]",
".",
"find_previous",
"(",
"position",
")",
"node",
".",
"op_pos",
".",
"append",
"(",
"NodeWithPosition",
"(",
"elast",
",",
"efirst",
")",
")"
] |
Create op_pos for optional else
|
[
"Create",
"op_pos",
"for",
"optional",
"else"
] |
497c88c66b451ff2cd7354be1af070c92e119f41
|
https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/pyposast/visitor.py#L178-L187
|
238,267
|
JoaoFelipe/pyposast
|
pyposast/visitor.py
|
LineProvenanceVisitor.comma_separated_list
|
def comma_separated_list(self, node, subnodes):
"""Process comma separated list """
for item in subnodes:
position = (item.last_line, item.last_col)
first, last = find_next_comma(self.lcode, position)
if first: # comma exists
node.op_pos.append(NodeWithPosition(last, first))
|
python
|
def comma_separated_list(self, node, subnodes):
"""Process comma separated list """
for item in subnodes:
position = (item.last_line, item.last_col)
first, last = find_next_comma(self.lcode, position)
if first: # comma exists
node.op_pos.append(NodeWithPosition(last, first))
|
[
"def",
"comma_separated_list",
"(",
"self",
",",
"node",
",",
"subnodes",
")",
":",
"for",
"item",
"in",
"subnodes",
":",
"position",
"=",
"(",
"item",
".",
"last_line",
",",
"item",
".",
"last_col",
")",
"first",
",",
"last",
"=",
"find_next_comma",
"(",
"self",
".",
"lcode",
",",
"position",
")",
"if",
"first",
":",
"# comma exists",
"node",
".",
"op_pos",
".",
"append",
"(",
"NodeWithPosition",
"(",
"last",
",",
"first",
")",
")"
] |
Process comma separated list
|
[
"Process",
"comma",
"separated",
"list"
] |
497c88c66b451ff2cd7354be1af070c92e119f41
|
https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/pyposast/visitor.py#L189-L195
|
238,268
|
JoaoFelipe/pyposast
|
pyposast/visitor.py
|
LineProvenanceVisitor.find_next_comma
|
def find_next_comma(self, node, sub):
"""Find comma after sub andd add NodeWithPosition in node"""
position = (sub.last_line, sub.last_col)
first, last = find_next_comma(self.lcode, position)
if first: # comma exists
node.op_pos.append(NodeWithPosition(last, first))
|
python
|
def find_next_comma(self, node, sub):
"""Find comma after sub andd add NodeWithPosition in node"""
position = (sub.last_line, sub.last_col)
first, last = find_next_comma(self.lcode, position)
if first: # comma exists
node.op_pos.append(NodeWithPosition(last, first))
|
[
"def",
"find_next_comma",
"(",
"self",
",",
"node",
",",
"sub",
")",
":",
"position",
"=",
"(",
"sub",
".",
"last_line",
",",
"sub",
".",
"last_col",
")",
"first",
",",
"last",
"=",
"find_next_comma",
"(",
"self",
".",
"lcode",
",",
"position",
")",
"if",
"first",
":",
"# comma exists",
"node",
".",
"op_pos",
".",
"append",
"(",
"NodeWithPosition",
"(",
"last",
",",
"first",
")",
")"
] |
Find comma after sub andd add NodeWithPosition in node
|
[
"Find",
"comma",
"after",
"sub",
"andd",
"add",
"NodeWithPosition",
"in",
"node"
] |
497c88c66b451ff2cd7354be1af070c92e119f41
|
https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/pyposast/visitor.py#L586-L591
|
238,269
|
brinkframework/brink
|
brink/models.py
|
Model.fields
|
def fields(self):
"""
Provides an iterable for all model fields.
"""
for attr, value in self._meta.fields.items():
if isinstance(value, Field):
yield attr, value
|
python
|
def fields(self):
"""
Provides an iterable for all model fields.
"""
for attr, value in self._meta.fields.items():
if isinstance(value, Field):
yield attr, value
|
[
"def",
"fields",
"(",
"self",
")",
":",
"for",
"attr",
",",
"value",
"in",
"self",
".",
"_meta",
".",
"fields",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Field",
")",
":",
"yield",
"attr",
",",
"value"
] |
Provides an iterable for all model fields.
|
[
"Provides",
"an",
"iterable",
"for",
"all",
"model",
"fields",
"."
] |
e837ee35a57140994b4e761cc756af172e5d5aa1
|
https://github.com/brinkframework/brink/blob/e837ee35a57140994b4e761cc756af172e5d5aa1/brink/models.py#L100-L106
|
238,270
|
brinkframework/brink
|
brink/models.py
|
Model.wrap
|
def wrap(self, data):
"""
Wraps and consumes an arbitrary dictionary into the model.
"""
for name, field in self.fields:
try:
self._state[name] = field.consume(
self._state.get(name, None), data[name])
except KeyError:
self._state[name] = None
|
python
|
def wrap(self, data):
"""
Wraps and consumes an arbitrary dictionary into the model.
"""
for name, field in self.fields:
try:
self._state[name] = field.consume(
self._state.get(name, None), data[name])
except KeyError:
self._state[name] = None
|
[
"def",
"wrap",
"(",
"self",
",",
"data",
")",
":",
"for",
"name",
",",
"field",
"in",
"self",
".",
"fields",
":",
"try",
":",
"self",
".",
"_state",
"[",
"name",
"]",
"=",
"field",
".",
"consume",
"(",
"self",
".",
"_state",
".",
"get",
"(",
"name",
",",
"None",
")",
",",
"data",
"[",
"name",
"]",
")",
"except",
"KeyError",
":",
"self",
".",
"_state",
"[",
"name",
"]",
"=",
"None"
] |
Wraps and consumes an arbitrary dictionary into the model.
|
[
"Wraps",
"and",
"consumes",
"an",
"arbitrary",
"dictionary",
"into",
"the",
"model",
"."
] |
e837ee35a57140994b4e761cc756af172e5d5aa1
|
https://github.com/brinkframework/brink/blob/e837ee35a57140994b4e761cc756af172e5d5aa1/brink/models.py#L120-L129
|
238,271
|
brinkframework/brink
|
brink/models.py
|
Model.validate
|
def validate(self):
"""
Validates all field values for the model.
"""
errors = {}
for name, field in self.fields:
try:
field.validate(self._state.get(name))
except Exception as e:
errors[name] = e
if len(errors) is not 0:
raise Exception(errors)
return True
|
python
|
def validate(self):
"""
Validates all field values for the model.
"""
errors = {}
for name, field in self.fields:
try:
field.validate(self._state.get(name))
except Exception as e:
errors[name] = e
if len(errors) is not 0:
raise Exception(errors)
return True
|
[
"def",
"validate",
"(",
"self",
")",
":",
"errors",
"=",
"{",
"}",
"for",
"name",
",",
"field",
"in",
"self",
".",
"fields",
":",
"try",
":",
"field",
".",
"validate",
"(",
"self",
".",
"_state",
".",
"get",
"(",
"name",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"errors",
"[",
"name",
"]",
"=",
"e",
"if",
"len",
"(",
"errors",
")",
"is",
"not",
"0",
":",
"raise",
"Exception",
"(",
"errors",
")",
"return",
"True"
] |
Validates all field values for the model.
|
[
"Validates",
"all",
"field",
"values",
"for",
"the",
"model",
"."
] |
e837ee35a57140994b4e761cc756af172e5d5aa1
|
https://github.com/brinkframework/brink/blob/e837ee35a57140994b4e761cc756af172e5d5aa1/brink/models.py#L131-L147
|
238,272
|
brinkframework/brink
|
brink/models.py
|
Model.save
|
async def save(self):
"""
Persists the model to the database. If the model holds no primary key,
a new one will automatically created by RethinkDB. Otherwise it will
overwrite the current model persisted to the database.
"""
if hasattr(self, "before_save"):
self.before_save()
query = r.table(self.table_name)
if self._state.get("id"):
query = query \
.get(self._state.get("id")) \
.update(self.__db_repr, return_changes=True)
else:
query = query \
.insert(self.__db_repr, return_changes=True)
resp = await query.run(await conn.get())
try:
changes = resp["changes"]
if len(changes) > 0:
self.wrap(resp["changes"][0]["new_val"])
except KeyError:
raise UnexpectedDbResponse()
if resp["skipped"] > 0:
raise UnexpectedDbResponse(
"Model with id `%s` not found in the database." %
self._state.get("id"))
return self
|
python
|
async def save(self):
"""
Persists the model to the database. If the model holds no primary key,
a new one will automatically created by RethinkDB. Otherwise it will
overwrite the current model persisted to the database.
"""
if hasattr(self, "before_save"):
self.before_save()
query = r.table(self.table_name)
if self._state.get("id"):
query = query \
.get(self._state.get("id")) \
.update(self.__db_repr, return_changes=True)
else:
query = query \
.insert(self.__db_repr, return_changes=True)
resp = await query.run(await conn.get())
try:
changes = resp["changes"]
if len(changes) > 0:
self.wrap(resp["changes"][0]["new_val"])
except KeyError:
raise UnexpectedDbResponse()
if resp["skipped"] > 0:
raise UnexpectedDbResponse(
"Model with id `%s` not found in the database." %
self._state.get("id"))
return self
|
[
"async",
"def",
"save",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"\"before_save\"",
")",
":",
"self",
".",
"before_save",
"(",
")",
"query",
"=",
"r",
".",
"table",
"(",
"self",
".",
"table_name",
")",
"if",
"self",
".",
"_state",
".",
"get",
"(",
"\"id\"",
")",
":",
"query",
"=",
"query",
".",
"get",
"(",
"self",
".",
"_state",
".",
"get",
"(",
"\"id\"",
")",
")",
".",
"update",
"(",
"self",
".",
"__db_repr",
",",
"return_changes",
"=",
"True",
")",
"else",
":",
"query",
"=",
"query",
".",
"insert",
"(",
"self",
".",
"__db_repr",
",",
"return_changes",
"=",
"True",
")",
"resp",
"=",
"await",
"query",
".",
"run",
"(",
"await",
"conn",
".",
"get",
"(",
")",
")",
"try",
":",
"changes",
"=",
"resp",
"[",
"\"changes\"",
"]",
"if",
"len",
"(",
"changes",
")",
">",
"0",
":",
"self",
".",
"wrap",
"(",
"resp",
"[",
"\"changes\"",
"]",
"[",
"0",
"]",
"[",
"\"new_val\"",
"]",
")",
"except",
"KeyError",
":",
"raise",
"UnexpectedDbResponse",
"(",
")",
"if",
"resp",
"[",
"\"skipped\"",
"]",
">",
"0",
":",
"raise",
"UnexpectedDbResponse",
"(",
"\"Model with id `%s` not found in the database.\"",
"%",
"self",
".",
"_state",
".",
"get",
"(",
"\"id\"",
")",
")",
"return",
"self"
] |
Persists the model to the database. If the model holds no primary key,
a new one will automatically created by RethinkDB. Otherwise it will
overwrite the current model persisted to the database.
|
[
"Persists",
"the",
"model",
"to",
"the",
"database",
".",
"If",
"the",
"model",
"holds",
"no",
"primary",
"key",
"a",
"new",
"one",
"will",
"automatically",
"created",
"by",
"RethinkDB",
".",
"Otherwise",
"it",
"will",
"overwrite",
"the",
"current",
"model",
"persisted",
"to",
"the",
"database",
"."
] |
e837ee35a57140994b4e761cc756af172e5d5aa1
|
https://github.com/brinkframework/brink/blob/e837ee35a57140994b4e761cc756af172e5d5aa1/brink/models.py#L149-L184
|
238,273
|
brinkframework/brink
|
brink/models.py
|
Model.delete
|
async def delete(self):
"""
Deletes the model from the database.
"""
await r.table_name(self.table_name) \
.get(self.id) \
.delete() \
.run(await conn.get())
|
python
|
async def delete(self):
"""
Deletes the model from the database.
"""
await r.table_name(self.table_name) \
.get(self.id) \
.delete() \
.run(await conn.get())
|
[
"async",
"def",
"delete",
"(",
"self",
")",
":",
"await",
"r",
".",
"table_name",
"(",
"self",
".",
"table_name",
")",
".",
"get",
"(",
"self",
".",
"id",
")",
".",
"delete",
"(",
")",
".",
"run",
"(",
"await",
"conn",
".",
"get",
"(",
")",
")"
] |
Deletes the model from the database.
|
[
"Deletes",
"the",
"model",
"from",
"the",
"database",
"."
] |
e837ee35a57140994b4e761cc756af172e5d5aa1
|
https://github.com/brinkframework/brink/blob/e837ee35a57140994b4e761cc756af172e5d5aa1/brink/models.py#L186-L193
|
238,274
|
Bystroushaak/pyDHTMLParser
|
src/dhtmlparser/__init__.py
|
_raw_split
|
def _raw_split(itxt):
"""
Parse HTML from text into array filled with tags end text.
Source code is little bit unintutive, because it is state machine parser.
For better understanding, look at http://bit.ly/1rXRcJj
Example::
>>> dhtmlparser._raw_split('<html><tag params="true"></html>')
['<html>', '<tag params="true">', '</html>']
Args:
itxt (str): Input HTML text, which will be parsed.
Returns:
list: List of strings (input splitted to tags and text).
"""
echr = ""
buff = ["", "", "", ""]
content = ""
array = []
next_state = 0
inside_tag = False
escaped = False
COMMENT_START = ["-", "!", "<"]
COMMENT_END = ["-", "-"]
gc.disable()
for c in itxt:
# content
if next_state == StateEnum.content:
if c == "<":
if content:
array.append(content)
content = c
next_state = StateEnum.tag
inside_tag = False
else:
content += c
# html tag
elif next_state == StateEnum.tag:
if c == ">":
array.append(content + c)
content = ""
next_state = StateEnum.content
elif c == "'" or c == '"':
echr = c
content += c
next_state = StateEnum.parameter
elif c == "-" and buff[:3] == COMMENT_START:
if content[:-3]:
array.append(content[:-3])
content = content[-3:] + c
next_state = StateEnum.comment
else:
if c == "<": # jump back into tag instead of content
array.append(content)
inside_tag = True
content = ""
content += c
# quotes "" / ''
elif next_state == StateEnum.parameter:
if c == echr and not escaped: # end of quotes
next_state = StateEnum.tag
# unescaped end of line - this is good for invalid HTML like
# <a href=something">..., because it allows recovery
if c == "\n" and not escaped and buff[0] == ">":
next_state = StateEnum.content
inside_tag = False
content += c
escaped = not escaped if c == "\\" else False
# html comments
elif next_state == StateEnum.comment:
if c == ">" and buff[:2] == COMMENT_END:
next_state = StateEnum.tag if inside_tag else StateEnum.content
inside_tag = False
array.append(content + c)
content = ""
else:
content += c
# rotate buffer
buff = _rotate_buff(buff)
buff[0] = c
gc.enable()
if content:
array.append(content)
return array
|
python
|
def _raw_split(itxt):
"""
Parse HTML from text into array filled with tags end text.
Source code is little bit unintutive, because it is state machine parser.
For better understanding, look at http://bit.ly/1rXRcJj
Example::
>>> dhtmlparser._raw_split('<html><tag params="true"></html>')
['<html>', '<tag params="true">', '</html>']
Args:
itxt (str): Input HTML text, which will be parsed.
Returns:
list: List of strings (input splitted to tags and text).
"""
echr = ""
buff = ["", "", "", ""]
content = ""
array = []
next_state = 0
inside_tag = False
escaped = False
COMMENT_START = ["-", "!", "<"]
COMMENT_END = ["-", "-"]
gc.disable()
for c in itxt:
# content
if next_state == StateEnum.content:
if c == "<":
if content:
array.append(content)
content = c
next_state = StateEnum.tag
inside_tag = False
else:
content += c
# html tag
elif next_state == StateEnum.tag:
if c == ">":
array.append(content + c)
content = ""
next_state = StateEnum.content
elif c == "'" or c == '"':
echr = c
content += c
next_state = StateEnum.parameter
elif c == "-" and buff[:3] == COMMENT_START:
if content[:-3]:
array.append(content[:-3])
content = content[-3:] + c
next_state = StateEnum.comment
else:
if c == "<": # jump back into tag instead of content
array.append(content)
inside_tag = True
content = ""
content += c
# quotes "" / ''
elif next_state == StateEnum.parameter:
if c == echr and not escaped: # end of quotes
next_state = StateEnum.tag
# unescaped end of line - this is good for invalid HTML like
# <a href=something">..., because it allows recovery
if c == "\n" and not escaped and buff[0] == ">":
next_state = StateEnum.content
inside_tag = False
content += c
escaped = not escaped if c == "\\" else False
# html comments
elif next_state == StateEnum.comment:
if c == ">" and buff[:2] == COMMENT_END:
next_state = StateEnum.tag if inside_tag else StateEnum.content
inside_tag = False
array.append(content + c)
content = ""
else:
content += c
# rotate buffer
buff = _rotate_buff(buff)
buff[0] = c
gc.enable()
if content:
array.append(content)
return array
|
[
"def",
"_raw_split",
"(",
"itxt",
")",
":",
"echr",
"=",
"\"\"",
"buff",
"=",
"[",
"\"\"",
",",
"\"\"",
",",
"\"\"",
",",
"\"\"",
"]",
"content",
"=",
"\"\"",
"array",
"=",
"[",
"]",
"next_state",
"=",
"0",
"inside_tag",
"=",
"False",
"escaped",
"=",
"False",
"COMMENT_START",
"=",
"[",
"\"-\"",
",",
"\"!\"",
",",
"\"<\"",
"]",
"COMMENT_END",
"=",
"[",
"\"-\"",
",",
"\"-\"",
"]",
"gc",
".",
"disable",
"(",
")",
"for",
"c",
"in",
"itxt",
":",
"# content",
"if",
"next_state",
"==",
"StateEnum",
".",
"content",
":",
"if",
"c",
"==",
"\"<\"",
":",
"if",
"content",
":",
"array",
".",
"append",
"(",
"content",
")",
"content",
"=",
"c",
"next_state",
"=",
"StateEnum",
".",
"tag",
"inside_tag",
"=",
"False",
"else",
":",
"content",
"+=",
"c",
"# html tag",
"elif",
"next_state",
"==",
"StateEnum",
".",
"tag",
":",
"if",
"c",
"==",
"\">\"",
":",
"array",
".",
"append",
"(",
"content",
"+",
"c",
")",
"content",
"=",
"\"\"",
"next_state",
"=",
"StateEnum",
".",
"content",
"elif",
"c",
"==",
"\"'\"",
"or",
"c",
"==",
"'\"'",
":",
"echr",
"=",
"c",
"content",
"+=",
"c",
"next_state",
"=",
"StateEnum",
".",
"parameter",
"elif",
"c",
"==",
"\"-\"",
"and",
"buff",
"[",
":",
"3",
"]",
"==",
"COMMENT_START",
":",
"if",
"content",
"[",
":",
"-",
"3",
"]",
":",
"array",
".",
"append",
"(",
"content",
"[",
":",
"-",
"3",
"]",
")",
"content",
"=",
"content",
"[",
"-",
"3",
":",
"]",
"+",
"c",
"next_state",
"=",
"StateEnum",
".",
"comment",
"else",
":",
"if",
"c",
"==",
"\"<\"",
":",
"# jump back into tag instead of content",
"array",
".",
"append",
"(",
"content",
")",
"inside_tag",
"=",
"True",
"content",
"=",
"\"\"",
"content",
"+=",
"c",
"# quotes \"\" / ''",
"elif",
"next_state",
"==",
"StateEnum",
".",
"parameter",
":",
"if",
"c",
"==",
"echr",
"and",
"not",
"escaped",
":",
"# end of quotes",
"next_state",
"=",
"StateEnum",
".",
"tag",
"# unescaped end of line - this is good for invalid HTML like",
"# <a href=something\">..., because it allows recovery",
"if",
"c",
"==",
"\"\\n\"",
"and",
"not",
"escaped",
"and",
"buff",
"[",
"0",
"]",
"==",
"\">\"",
":",
"next_state",
"=",
"StateEnum",
".",
"content",
"inside_tag",
"=",
"False",
"content",
"+=",
"c",
"escaped",
"=",
"not",
"escaped",
"if",
"c",
"==",
"\"\\\\\"",
"else",
"False",
"# html comments",
"elif",
"next_state",
"==",
"StateEnum",
".",
"comment",
":",
"if",
"c",
"==",
"\">\"",
"and",
"buff",
"[",
":",
"2",
"]",
"==",
"COMMENT_END",
":",
"next_state",
"=",
"StateEnum",
".",
"tag",
"if",
"inside_tag",
"else",
"StateEnum",
".",
"content",
"inside_tag",
"=",
"False",
"array",
".",
"append",
"(",
"content",
"+",
"c",
")",
"content",
"=",
"\"\"",
"else",
":",
"content",
"+=",
"c",
"# rotate buffer",
"buff",
"=",
"_rotate_buff",
"(",
"buff",
")",
"buff",
"[",
"0",
"]",
"=",
"c",
"gc",
".",
"enable",
"(",
")",
"if",
"content",
":",
"array",
".",
"append",
"(",
"content",
")",
"return",
"array"
] |
Parse HTML from text into array filled with tags end text.
Source code is little bit unintutive, because it is state machine parser.
For better understanding, look at http://bit.ly/1rXRcJj
Example::
>>> dhtmlparser._raw_split('<html><tag params="true"></html>')
['<html>', '<tag params="true">', '</html>']
Args:
itxt (str): Input HTML text, which will be parsed.
Returns:
list: List of strings (input splitted to tags and text).
|
[
"Parse",
"HTML",
"from",
"text",
"into",
"array",
"filled",
"with",
"tags",
"end",
"text",
"."
] |
4756f93dd048500b038ece2323fe26e46b6bfdea
|
https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/__init__.py#L43-L150
|
238,275
|
Bystroushaak/pyDHTMLParser
|
src/dhtmlparser/__init__.py
|
_indexOfEndTag
|
def _indexOfEndTag(istack):
"""
Go through `istack` and search endtag. Element at first index is considered
as opening tag.
Args:
istack (list): List of :class:`.HTMLElement` objects.
Returns:
int: Index of end tag or 0 if not found.
"""
if len(istack) <= 0:
return 0
if not istack[0].isOpeningTag():
return 0
cnt = 0
opener = istack[0]
for index, el in enumerate(istack[1:]):
if el.isOpeningTag() and \
el.getTagName().lower() == opener.getTagName().lower():
cnt += 1
elif el.isEndTagTo(opener):
if cnt == 0:
return index + 1
cnt -= 1
return 0
|
python
|
def _indexOfEndTag(istack):
"""
Go through `istack` and search endtag. Element at first index is considered
as opening tag.
Args:
istack (list): List of :class:`.HTMLElement` objects.
Returns:
int: Index of end tag or 0 if not found.
"""
if len(istack) <= 0:
return 0
if not istack[0].isOpeningTag():
return 0
cnt = 0
opener = istack[0]
for index, el in enumerate(istack[1:]):
if el.isOpeningTag() and \
el.getTagName().lower() == opener.getTagName().lower():
cnt += 1
elif el.isEndTagTo(opener):
if cnt == 0:
return index + 1
cnt -= 1
return 0
|
[
"def",
"_indexOfEndTag",
"(",
"istack",
")",
":",
"if",
"len",
"(",
"istack",
")",
"<=",
"0",
":",
"return",
"0",
"if",
"not",
"istack",
"[",
"0",
"]",
".",
"isOpeningTag",
"(",
")",
":",
"return",
"0",
"cnt",
"=",
"0",
"opener",
"=",
"istack",
"[",
"0",
"]",
"for",
"index",
",",
"el",
"in",
"enumerate",
"(",
"istack",
"[",
"1",
":",
"]",
")",
":",
"if",
"el",
".",
"isOpeningTag",
"(",
")",
"and",
"el",
".",
"getTagName",
"(",
")",
".",
"lower",
"(",
")",
"==",
"opener",
".",
"getTagName",
"(",
")",
".",
"lower",
"(",
")",
":",
"cnt",
"+=",
"1",
"elif",
"el",
".",
"isEndTagTo",
"(",
"opener",
")",
":",
"if",
"cnt",
"==",
"0",
":",
"return",
"index",
"+",
"1",
"cnt",
"-=",
"1",
"return",
"0"
] |
Go through `istack` and search endtag. Element at first index is considered
as opening tag.
Args:
istack (list): List of :class:`.HTMLElement` objects.
Returns:
int: Index of end tag or 0 if not found.
|
[
"Go",
"through",
"istack",
"and",
"search",
"endtag",
".",
"Element",
"at",
"first",
"index",
"is",
"considered",
"as",
"opening",
"tag",
"."
] |
4756f93dd048500b038ece2323fe26e46b6bfdea
|
https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/__init__.py#L153-L183
|
238,276
|
Bystroushaak/pyDHTMLParser
|
src/dhtmlparser/__init__.py
|
_parseDOM
|
def _parseDOM(istack):
"""
Recursively go through element array and create DOM.
Args:
istack (list): List of :class:`.HTMLElement` objects.
Returns:
list: DOM tree as list.
"""
ostack = []
end_tag_index = 0
def neither_nonpair_or_end_or_comment(el):
return not (el.isNonPairTag() or el.isEndTag() or el.isComment())
index = 0
while index < len(istack):
el = istack[index]
# check if this is pair tag
end_tag_index = _indexOfEndTag(istack[index:])
if end_tag_index == 0 and neither_nonpair_or_end_or_comment(el):
el.isNonPairTag(True)
if end_tag_index == 0:
if not el.isEndTag():
ostack.append(el)
else:
el.childs = _parseDOM(istack[index + 1: end_tag_index + index])
el.endtag = istack[end_tag_index + index] # reference to endtag
el.endtag.openertag = el
ostack.append(el)
ostack.append(el.endtag)
index = end_tag_index + index
index += 1
return ostack
|
python
|
def _parseDOM(istack):
"""
Recursively go through element array and create DOM.
Args:
istack (list): List of :class:`.HTMLElement` objects.
Returns:
list: DOM tree as list.
"""
ostack = []
end_tag_index = 0
def neither_nonpair_or_end_or_comment(el):
return not (el.isNonPairTag() or el.isEndTag() or el.isComment())
index = 0
while index < len(istack):
el = istack[index]
# check if this is pair tag
end_tag_index = _indexOfEndTag(istack[index:])
if end_tag_index == 0 and neither_nonpair_or_end_or_comment(el):
el.isNonPairTag(True)
if end_tag_index == 0:
if not el.isEndTag():
ostack.append(el)
else:
el.childs = _parseDOM(istack[index + 1: end_tag_index + index])
el.endtag = istack[end_tag_index + index] # reference to endtag
el.endtag.openertag = el
ostack.append(el)
ostack.append(el.endtag)
index = end_tag_index + index
index += 1
return ostack
|
[
"def",
"_parseDOM",
"(",
"istack",
")",
":",
"ostack",
"=",
"[",
"]",
"end_tag_index",
"=",
"0",
"def",
"neither_nonpair_or_end_or_comment",
"(",
"el",
")",
":",
"return",
"not",
"(",
"el",
".",
"isNonPairTag",
"(",
")",
"or",
"el",
".",
"isEndTag",
"(",
")",
"or",
"el",
".",
"isComment",
"(",
")",
")",
"index",
"=",
"0",
"while",
"index",
"<",
"len",
"(",
"istack",
")",
":",
"el",
"=",
"istack",
"[",
"index",
"]",
"# check if this is pair tag",
"end_tag_index",
"=",
"_indexOfEndTag",
"(",
"istack",
"[",
"index",
":",
"]",
")",
"if",
"end_tag_index",
"==",
"0",
"and",
"neither_nonpair_or_end_or_comment",
"(",
"el",
")",
":",
"el",
".",
"isNonPairTag",
"(",
"True",
")",
"if",
"end_tag_index",
"==",
"0",
":",
"if",
"not",
"el",
".",
"isEndTag",
"(",
")",
":",
"ostack",
".",
"append",
"(",
"el",
")",
"else",
":",
"el",
".",
"childs",
"=",
"_parseDOM",
"(",
"istack",
"[",
"index",
"+",
"1",
":",
"end_tag_index",
"+",
"index",
"]",
")",
"el",
".",
"endtag",
"=",
"istack",
"[",
"end_tag_index",
"+",
"index",
"]",
"# reference to endtag",
"el",
".",
"endtag",
".",
"openertag",
"=",
"el",
"ostack",
".",
"append",
"(",
"el",
")",
"ostack",
".",
"append",
"(",
"el",
".",
"endtag",
")",
"index",
"=",
"end_tag_index",
"+",
"index",
"index",
"+=",
"1",
"return",
"ostack"
] |
Recursively go through element array and create DOM.
Args:
istack (list): List of :class:`.HTMLElement` objects.
Returns:
list: DOM tree as list.
|
[
"Recursively",
"go",
"through",
"element",
"array",
"and",
"create",
"DOM",
"."
] |
4756f93dd048500b038ece2323fe26e46b6bfdea
|
https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/__init__.py#L186-L227
|
238,277
|
Bystroushaak/pyDHTMLParser
|
src/dhtmlparser/__init__.py
|
makeDoubleLinked
|
def makeDoubleLinked(dom, parent=None):
"""
Standard output from `dhtmlparser` is single-linked tree. This will make it
double-linked.
Args:
dom (obj): :class:`.HTMLElement` instance.
parent (obj, default None): Don't use this, it is used in recursive
call.
"""
dom.parent = parent
for child in dom.childs:
child.parent = dom
makeDoubleLinked(child, dom)
|
python
|
def makeDoubleLinked(dom, parent=None):
"""
Standard output from `dhtmlparser` is single-linked tree. This will make it
double-linked.
Args:
dom (obj): :class:`.HTMLElement` instance.
parent (obj, default None): Don't use this, it is used in recursive
call.
"""
dom.parent = parent
for child in dom.childs:
child.parent = dom
makeDoubleLinked(child, dom)
|
[
"def",
"makeDoubleLinked",
"(",
"dom",
",",
"parent",
"=",
"None",
")",
":",
"dom",
".",
"parent",
"=",
"parent",
"for",
"child",
"in",
"dom",
".",
"childs",
":",
"child",
".",
"parent",
"=",
"dom",
"makeDoubleLinked",
"(",
"child",
",",
"dom",
")"
] |
Standard output from `dhtmlparser` is single-linked tree. This will make it
double-linked.
Args:
dom (obj): :class:`.HTMLElement` instance.
parent (obj, default None): Don't use this, it is used in recursive
call.
|
[
"Standard",
"output",
"from",
"dhtmlparser",
"is",
"single",
"-",
"linked",
"tree",
".",
"This",
"will",
"make",
"it",
"double",
"-",
"linked",
"."
] |
4756f93dd048500b038ece2323fe26e46b6bfdea
|
https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/__init__.py#L266-L280
|
238,278
|
Bystroushaak/pyDHTMLParser
|
src/dhtmlparser/__init__.py
|
removeTags
|
def removeTags(dom):
"""
Remove all tags from `dom` and obtain plaintext representation.
Args:
dom (str, obj, array): str, HTMLElement instance or array of elements.
Returns:
str: Plain string without tags.
"""
# python 2 / 3 shill
try:
string_type = basestring
except NameError:
string_type = str
# initialize stack with proper value (based on dom parameter)
element_stack = None
if type(dom) in [list, tuple]:
element_stack = dom
elif isinstance(dom, HTMLElement):
element_stack = dom.childs if dom.isTag() else [dom]
elif isinstance(dom, string_type):
element_stack = parseString(dom).childs
else:
element_stack = dom
# remove all tags
output = ""
while element_stack:
el = element_stack.pop(0)
if not (el.isTag() or el.isComment() or not el.getTagName()):
output += el.__str__()
if el.childs:
element_stack = el.childs + element_stack
return output
|
python
|
def removeTags(dom):
"""
Remove all tags from `dom` and obtain plaintext representation.
Args:
dom (str, obj, array): str, HTMLElement instance or array of elements.
Returns:
str: Plain string without tags.
"""
# python 2 / 3 shill
try:
string_type = basestring
except NameError:
string_type = str
# initialize stack with proper value (based on dom parameter)
element_stack = None
if type(dom) in [list, tuple]:
element_stack = dom
elif isinstance(dom, HTMLElement):
element_stack = dom.childs if dom.isTag() else [dom]
elif isinstance(dom, string_type):
element_stack = parseString(dom).childs
else:
element_stack = dom
# remove all tags
output = ""
while element_stack:
el = element_stack.pop(0)
if not (el.isTag() or el.isComment() or not el.getTagName()):
output += el.__str__()
if el.childs:
element_stack = el.childs + element_stack
return output
|
[
"def",
"removeTags",
"(",
"dom",
")",
":",
"# python 2 / 3 shill",
"try",
":",
"string_type",
"=",
"basestring",
"except",
"NameError",
":",
"string_type",
"=",
"str",
"# initialize stack with proper value (based on dom parameter)",
"element_stack",
"=",
"None",
"if",
"type",
"(",
"dom",
")",
"in",
"[",
"list",
",",
"tuple",
"]",
":",
"element_stack",
"=",
"dom",
"elif",
"isinstance",
"(",
"dom",
",",
"HTMLElement",
")",
":",
"element_stack",
"=",
"dom",
".",
"childs",
"if",
"dom",
".",
"isTag",
"(",
")",
"else",
"[",
"dom",
"]",
"elif",
"isinstance",
"(",
"dom",
",",
"string_type",
")",
":",
"element_stack",
"=",
"parseString",
"(",
"dom",
")",
".",
"childs",
"else",
":",
"element_stack",
"=",
"dom",
"# remove all tags",
"output",
"=",
"\"\"",
"while",
"element_stack",
":",
"el",
"=",
"element_stack",
".",
"pop",
"(",
"0",
")",
"if",
"not",
"(",
"el",
".",
"isTag",
"(",
")",
"or",
"el",
".",
"isComment",
"(",
")",
"or",
"not",
"el",
".",
"getTagName",
"(",
")",
")",
":",
"output",
"+=",
"el",
".",
"__str__",
"(",
")",
"if",
"el",
".",
"childs",
":",
"element_stack",
"=",
"el",
".",
"childs",
"+",
"element_stack",
"return",
"output"
] |
Remove all tags from `dom` and obtain plaintext representation.
Args:
dom (str, obj, array): str, HTMLElement instance or array of elements.
Returns:
str: Plain string without tags.
|
[
"Remove",
"all",
"tags",
"from",
"dom",
"and",
"obtain",
"plaintext",
"representation",
"."
] |
4756f93dd048500b038ece2323fe26e46b6bfdea
|
https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/__init__.py#L283-L321
|
238,279
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/reflect/MethodReflector.py
|
MethodReflector.has_method
|
def has_method(obj, name):
"""
Checks if object has a method with specified name.
:param obj: an object to introspect.
:param name: a name of the method to check.
:return: true if the object has the method and false if it doesn't.
"""
if obj == None:
raise Exception("Object cannot be null")
if name == None:
raise Exception("Method name cannot be null")
name = name.lower()
for method_name in dir(obj):
if method_name.lower() != name:
continue
method = getattr(obj, method_name)
if MethodReflector._is_method(method, method_name):
return True
return False
|
python
|
def has_method(obj, name):
"""
Checks if object has a method with specified name.
:param obj: an object to introspect.
:param name: a name of the method to check.
:return: true if the object has the method and false if it doesn't.
"""
if obj == None:
raise Exception("Object cannot be null")
if name == None:
raise Exception("Method name cannot be null")
name = name.lower()
for method_name in dir(obj):
if method_name.lower() != name:
continue
method = getattr(obj, method_name)
if MethodReflector._is_method(method, method_name):
return True
return False
|
[
"def",
"has_method",
"(",
"obj",
",",
"name",
")",
":",
"if",
"obj",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Object cannot be null\"",
")",
"if",
"name",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Method name cannot be null\"",
")",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"for",
"method_name",
"in",
"dir",
"(",
"obj",
")",
":",
"if",
"method_name",
".",
"lower",
"(",
")",
"!=",
"name",
":",
"continue",
"method",
"=",
"getattr",
"(",
"obj",
",",
"method_name",
")",
"if",
"MethodReflector",
".",
"_is_method",
"(",
"method",
",",
"method_name",
")",
":",
"return",
"True",
"return",
"False"
] |
Checks if object has a method with specified name.
:param obj: an object to introspect.
:param name: a name of the method to check.
:return: true if the object has the method and false if it doesn't.
|
[
"Checks",
"if",
"object",
"has",
"a",
"method",
"with",
"specified",
"name",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/reflect/MethodReflector.py#L42-L68
|
238,280
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/reflect/MethodReflector.py
|
MethodReflector.invoke_method
|
def invoke_method(obj, name, *args):
"""
Invokes an object method by its name with specified parameters.
:param obj: an object to invoke.
:param name: a name of the method to invoke.
:param args: a list of method arguments.
:return: the result of the method invocation or null if method returns void.
"""
if obj == None:
raise Exception("Object cannot be null")
if name == None:
raise Exception("Method name cannot be null")
name = name.lower()
try:
for method_name in dir(obj):
if method_name.lower() != name:
continue
method = getattr(obj, method_name)
if MethodReflector._is_method(method, method_name):
return method(*args)
except:
pass
return None
|
python
|
def invoke_method(obj, name, *args):
"""
Invokes an object method by its name with specified parameters.
:param obj: an object to invoke.
:param name: a name of the method to invoke.
:param args: a list of method arguments.
:return: the result of the method invocation or null if method returns void.
"""
if obj == None:
raise Exception("Object cannot be null")
if name == None:
raise Exception("Method name cannot be null")
name = name.lower()
try:
for method_name in dir(obj):
if method_name.lower() != name:
continue
method = getattr(obj, method_name)
if MethodReflector._is_method(method, method_name):
return method(*args)
except:
pass
return None
|
[
"def",
"invoke_method",
"(",
"obj",
",",
"name",
",",
"*",
"args",
")",
":",
"if",
"obj",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Object cannot be null\"",
")",
"if",
"name",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Method name cannot be null\"",
")",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"try",
":",
"for",
"method_name",
"in",
"dir",
"(",
"obj",
")",
":",
"if",
"method_name",
".",
"lower",
"(",
")",
"!=",
"name",
":",
"continue",
"method",
"=",
"getattr",
"(",
"obj",
",",
"method_name",
")",
"if",
"MethodReflector",
".",
"_is_method",
"(",
"method",
",",
"method_name",
")",
":",
"return",
"method",
"(",
"*",
"args",
")",
"except",
":",
"pass",
"return",
"None"
] |
Invokes an object method by its name with specified parameters.
:param obj: an object to invoke.
:param name: a name of the method to invoke.
:param args: a list of method arguments.
:return: the result of the method invocation or null if method returns void.
|
[
"Invokes",
"an",
"object",
"method",
"by",
"its",
"name",
"with",
"specified",
"parameters",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/reflect/MethodReflector.py#L72-L103
|
238,281
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/reflect/MethodReflector.py
|
MethodReflector.get_method_names
|
def get_method_names(obj):
"""
Gets names of all methods implemented in specified object.
:param obj: an object to introspect.
:return: a list with method names.
"""
method_names = []
for method_name in dir(obj):
method = getattr(obj, method_name)
if MethodReflector._is_method(method, method_name):
method_names.append(method_name)
return method_names
|
python
|
def get_method_names(obj):
"""
Gets names of all methods implemented in specified object.
:param obj: an object to introspect.
:return: a list with method names.
"""
method_names = []
for method_name in dir(obj):
method = getattr(obj, method_name)
if MethodReflector._is_method(method, method_name):
method_names.append(method_name)
return method_names
|
[
"def",
"get_method_names",
"(",
"obj",
")",
":",
"method_names",
"=",
"[",
"]",
"for",
"method_name",
"in",
"dir",
"(",
"obj",
")",
":",
"method",
"=",
"getattr",
"(",
"obj",
",",
"method_name",
")",
"if",
"MethodReflector",
".",
"_is_method",
"(",
"method",
",",
"method_name",
")",
":",
"method_names",
".",
"append",
"(",
"method_name",
")",
"return",
"method_names"
] |
Gets names of all methods implemented in specified object.
:param obj: an object to introspect.
:return: a list with method names.
|
[
"Gets",
"names",
"of",
"all",
"methods",
"implemented",
"in",
"specified",
"object",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/reflect/MethodReflector.py#L107-L124
|
238,282
|
humilis/humilis-lambdautils
|
lambdautils/kinesis.py
|
send_to_delivery_stream
|
def send_to_delivery_stream(events, stream_name):
"""Sends a list of events to a Firehose delivery stream."""
if not events:
logger.info("No events provided: nothing delivered to Firehose")
return
records = []
for event in events:
if not isinstance(event, str):
# csv events already have a newline
event = json.dumps(event) + "\n"
records.append({"Data": event})
firehose = boto3.client("firehose")
logger.info("Delivering %s records to Firehose stream '%s'",
len(records), stream_name)
resp = firehose.put_record_batch(
DeliveryStreamName=stream_name,
Records=records)
return resp
|
python
|
def send_to_delivery_stream(events, stream_name):
"""Sends a list of events to a Firehose delivery stream."""
if not events:
logger.info("No events provided: nothing delivered to Firehose")
return
records = []
for event in events:
if not isinstance(event, str):
# csv events already have a newline
event = json.dumps(event) + "\n"
records.append({"Data": event})
firehose = boto3.client("firehose")
logger.info("Delivering %s records to Firehose stream '%s'",
len(records), stream_name)
resp = firehose.put_record_batch(
DeliveryStreamName=stream_name,
Records=records)
return resp
|
[
"def",
"send_to_delivery_stream",
"(",
"events",
",",
"stream_name",
")",
":",
"if",
"not",
"events",
":",
"logger",
".",
"info",
"(",
"\"No events provided: nothing delivered to Firehose\"",
")",
"return",
"records",
"=",
"[",
"]",
"for",
"event",
"in",
"events",
":",
"if",
"not",
"isinstance",
"(",
"event",
",",
"str",
")",
":",
"# csv events already have a newline",
"event",
"=",
"json",
".",
"dumps",
"(",
"event",
")",
"+",
"\"\\n\"",
"records",
".",
"append",
"(",
"{",
"\"Data\"",
":",
"event",
"}",
")",
"firehose",
"=",
"boto3",
".",
"client",
"(",
"\"firehose\"",
")",
"logger",
".",
"info",
"(",
"\"Delivering %s records to Firehose stream '%s'\"",
",",
"len",
"(",
"records",
")",
",",
"stream_name",
")",
"resp",
"=",
"firehose",
".",
"put_record_batch",
"(",
"DeliveryStreamName",
"=",
"stream_name",
",",
"Records",
"=",
"records",
")",
"return",
"resp"
] |
Sends a list of events to a Firehose delivery stream.
|
[
"Sends",
"a",
"list",
"of",
"events",
"to",
"a",
"Firehose",
"delivery",
"stream",
"."
] |
58f75eb5ace23523c283708d56a9193181ea7e8e
|
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/kinesis.py#L87-L105
|
238,283
|
humilis/humilis-lambdautils
|
lambdautils/kinesis.py
|
send_to_kinesis_stream
|
def send_to_kinesis_stream(events, stream_name, partition_key=None,
packer=None, serializer=json.dumps):
"""Sends events to a Kinesis stream."""
if not events:
logger.info("No events provided: nothing delivered to Firehose")
return
records = []
for event in events:
if not partition_key:
partition_key_value = str(uuid.uuid4())
elif hasattr(partition_key, "__call__"):
partition_key_value = partition_key(event)
else:
partition_key_value = partition_key
if not isinstance(event, str):
event = serializer(event)
if packer:
event = packer(event)
record = {"Data": event,
"PartitionKey": partition_key_value}
records.append(record)
kinesis = boto3.client("kinesis")
resp = kinesis.put_records(StreamName=stream_name, Records=records)
return resp
|
python
|
def send_to_kinesis_stream(events, stream_name, partition_key=None,
packer=None, serializer=json.dumps):
"""Sends events to a Kinesis stream."""
if not events:
logger.info("No events provided: nothing delivered to Firehose")
return
records = []
for event in events:
if not partition_key:
partition_key_value = str(uuid.uuid4())
elif hasattr(partition_key, "__call__"):
partition_key_value = partition_key(event)
else:
partition_key_value = partition_key
if not isinstance(event, str):
event = serializer(event)
if packer:
event = packer(event)
record = {"Data": event,
"PartitionKey": partition_key_value}
records.append(record)
kinesis = boto3.client("kinesis")
resp = kinesis.put_records(StreamName=stream_name, Records=records)
return resp
|
[
"def",
"send_to_kinesis_stream",
"(",
"events",
",",
"stream_name",
",",
"partition_key",
"=",
"None",
",",
"packer",
"=",
"None",
",",
"serializer",
"=",
"json",
".",
"dumps",
")",
":",
"if",
"not",
"events",
":",
"logger",
".",
"info",
"(",
"\"No events provided: nothing delivered to Firehose\"",
")",
"return",
"records",
"=",
"[",
"]",
"for",
"event",
"in",
"events",
":",
"if",
"not",
"partition_key",
":",
"partition_key_value",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"elif",
"hasattr",
"(",
"partition_key",
",",
"\"__call__\"",
")",
":",
"partition_key_value",
"=",
"partition_key",
"(",
"event",
")",
"else",
":",
"partition_key_value",
"=",
"partition_key",
"if",
"not",
"isinstance",
"(",
"event",
",",
"str",
")",
":",
"event",
"=",
"serializer",
"(",
"event",
")",
"if",
"packer",
":",
"event",
"=",
"packer",
"(",
"event",
")",
"record",
"=",
"{",
"\"Data\"",
":",
"event",
",",
"\"PartitionKey\"",
":",
"partition_key_value",
"}",
"records",
".",
"append",
"(",
"record",
")",
"kinesis",
"=",
"boto3",
".",
"client",
"(",
"\"kinesis\"",
")",
"resp",
"=",
"kinesis",
".",
"put_records",
"(",
"StreamName",
"=",
"stream_name",
",",
"Records",
"=",
"records",
")",
"return",
"resp"
] |
Sends events to a Kinesis stream.
|
[
"Sends",
"events",
"to",
"a",
"Kinesis",
"stream",
"."
] |
58f75eb5ace23523c283708d56a9193181ea7e8e
|
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/kinesis.py#L108-L136
|
238,284
|
jonafato/Flask-Copilot
|
flask_copilot/__init__.py
|
Copilot.init_app
|
def init_app(self, app):
"""Register the extension with the application.
Args:
app (flask.Flask): The application to register with.
"""
app.url_rule_class = partial(NavigationRule, copilot=self)
app.context_processor(self.inject_context)
|
python
|
def init_app(self, app):
"""Register the extension with the application.
Args:
app (flask.Flask): The application to register with.
"""
app.url_rule_class = partial(NavigationRule, copilot=self)
app.context_processor(self.inject_context)
|
[
"def",
"init_app",
"(",
"self",
",",
"app",
")",
":",
"app",
".",
"url_rule_class",
"=",
"partial",
"(",
"NavigationRule",
",",
"copilot",
"=",
"self",
")",
"app",
".",
"context_processor",
"(",
"self",
".",
"inject_context",
")"
] |
Register the extension with the application.
Args:
app (flask.Flask): The application to register with.
|
[
"Register",
"the",
"extension",
"with",
"the",
"application",
"."
] |
aca87d2a981b964be75e2b3b68cd69f3949acff3
|
https://github.com/jonafato/Flask-Copilot/blob/aca87d2a981b964be75e2b3b68cd69f3949acff3/flask_copilot/__init__.py#L47-L54
|
238,285
|
jonafato/Flask-Copilot
|
flask_copilot/__init__.py
|
Copilot.inject_context
|
def inject_context(self):
"""Return a dict used for a template context."""
navbar = filter(lambda entry: entry.visible, self.navbar_entries)
return {'navbar': navbar}
|
python
|
def inject_context(self):
"""Return a dict used for a template context."""
navbar = filter(lambda entry: entry.visible, self.navbar_entries)
return {'navbar': navbar}
|
[
"def",
"inject_context",
"(",
"self",
")",
":",
"navbar",
"=",
"filter",
"(",
"lambda",
"entry",
":",
"entry",
".",
"visible",
",",
"self",
".",
"navbar_entries",
")",
"return",
"{",
"'navbar'",
":",
"navbar",
"}"
] |
Return a dict used for a template context.
|
[
"Return",
"a",
"dict",
"used",
"for",
"a",
"template",
"context",
"."
] |
aca87d2a981b964be75e2b3b68cd69f3949acff3
|
https://github.com/jonafato/Flask-Copilot/blob/aca87d2a981b964be75e2b3b68cd69f3949acff3/flask_copilot/__init__.py#L56-L59
|
238,286
|
jonafato/Flask-Copilot
|
flask_copilot/__init__.py
|
Copilot.register_entry
|
def register_entry(self, navbar_kwargs):
"""Register a navbar entry with the copilot.
Args:
navbar_kwargs (dict): Arguments passed to the
:class:`NavbarEntry` instance.
"""
# Add a new rule for each level in the path.
path = navbar_kwargs.pop('path')
# If a single object is used rather than an iterable (including
# a single string), wrap it before using.
if not hasattr(path, '__iter__') or isinstance(path, basestring):
path = [path]
entry_group = self.navbar_entries
# HACK: I'd like to intelligently replace the URL rule in the
# case where the intended rule is provided, but the function has
# already created a blank "placeholder" rule for it. There are
# probably nicer ways to approach this, but it works.
for name, is_last in iter_islast(path):
kwargs = deepcopy(navbar_kwargs)
kwargs['name'] = name
for existing_entry in entry_group:
# If there's an existing entry for this "link", use it
# instead of creating a new one. If this existing entry
# has no rule and this is the last item in ``path``, the
# rule was intended to be assigned to this entry, so
# overwrite the blank rule with the one provided via
# ``navbar_kwargs``.
if existing_entry.name == name:
entry = existing_entry
if is_last:
entry.endpoint = kwargs['endpoint']
break
else:
# If we can't find an existing entry, create one with a
# blank endpoint. If this rule is not the final one in
# the list, the endpoint was not intended for this, so
# don't assign it.
if not is_last:
kwargs['endpoint'] = None
entry = NavbarEntry(**kwargs)
entry_group.add(entry)
entry_group = entry.children
|
python
|
def register_entry(self, navbar_kwargs):
"""Register a navbar entry with the copilot.
Args:
navbar_kwargs (dict): Arguments passed to the
:class:`NavbarEntry` instance.
"""
# Add a new rule for each level in the path.
path = navbar_kwargs.pop('path')
# If a single object is used rather than an iterable (including
# a single string), wrap it before using.
if not hasattr(path, '__iter__') or isinstance(path, basestring):
path = [path]
entry_group = self.navbar_entries
# HACK: I'd like to intelligently replace the URL rule in the
# case where the intended rule is provided, but the function has
# already created a blank "placeholder" rule for it. There are
# probably nicer ways to approach this, but it works.
for name, is_last in iter_islast(path):
kwargs = deepcopy(navbar_kwargs)
kwargs['name'] = name
for existing_entry in entry_group:
# If there's an existing entry for this "link", use it
# instead of creating a new one. If this existing entry
# has no rule and this is the last item in ``path``, the
# rule was intended to be assigned to this entry, so
# overwrite the blank rule with the one provided via
# ``navbar_kwargs``.
if existing_entry.name == name:
entry = existing_entry
if is_last:
entry.endpoint = kwargs['endpoint']
break
else:
# If we can't find an existing entry, create one with a
# blank endpoint. If this rule is not the final one in
# the list, the endpoint was not intended for this, so
# don't assign it.
if not is_last:
kwargs['endpoint'] = None
entry = NavbarEntry(**kwargs)
entry_group.add(entry)
entry_group = entry.children
|
[
"def",
"register_entry",
"(",
"self",
",",
"navbar_kwargs",
")",
":",
"# Add a new rule for each level in the path.",
"path",
"=",
"navbar_kwargs",
".",
"pop",
"(",
"'path'",
")",
"# If a single object is used rather than an iterable (including",
"# a single string), wrap it before using.",
"if",
"not",
"hasattr",
"(",
"path",
",",
"'__iter__'",
")",
"or",
"isinstance",
"(",
"path",
",",
"basestring",
")",
":",
"path",
"=",
"[",
"path",
"]",
"entry_group",
"=",
"self",
".",
"navbar_entries",
"# HACK: I'd like to intelligently replace the URL rule in the",
"# case where the intended rule is provided, but the function has",
"# already created a blank \"placeholder\" rule for it. There are",
"# probably nicer ways to approach this, but it works.",
"for",
"name",
",",
"is_last",
"in",
"iter_islast",
"(",
"path",
")",
":",
"kwargs",
"=",
"deepcopy",
"(",
"navbar_kwargs",
")",
"kwargs",
"[",
"'name'",
"]",
"=",
"name",
"for",
"existing_entry",
"in",
"entry_group",
":",
"# If there's an existing entry for this \"link\", use it",
"# instead of creating a new one. If this existing entry",
"# has no rule and this is the last item in ``path``, the",
"# rule was intended to be assigned to this entry, so",
"# overwrite the blank rule with the one provided via",
"# ``navbar_kwargs``.",
"if",
"existing_entry",
".",
"name",
"==",
"name",
":",
"entry",
"=",
"existing_entry",
"if",
"is_last",
":",
"entry",
".",
"endpoint",
"=",
"kwargs",
"[",
"'endpoint'",
"]",
"break",
"else",
":",
"# If we can't find an existing entry, create one with a",
"# blank endpoint. If this rule is not the final one in",
"# the list, the endpoint was not intended for this, so",
"# don't assign it.",
"if",
"not",
"is_last",
":",
"kwargs",
"[",
"'endpoint'",
"]",
"=",
"None",
"entry",
"=",
"NavbarEntry",
"(",
"*",
"*",
"kwargs",
")",
"entry_group",
".",
"add",
"(",
"entry",
")",
"entry_group",
"=",
"entry",
".",
"children"
] |
Register a navbar entry with the copilot.
Args:
navbar_kwargs (dict): Arguments passed to the
:class:`NavbarEntry` instance.
|
[
"Register",
"a",
"navbar",
"entry",
"with",
"the",
"copilot",
"."
] |
aca87d2a981b964be75e2b3b68cd69f3949acff3
|
https://github.com/jonafato/Flask-Copilot/blob/aca87d2a981b964be75e2b3b68cd69f3949acff3/flask_copilot/__init__.py#L61-L104
|
238,287
|
cyrus-/cypy
|
cypy/np/__init__.py
|
DirectedAdjacencyMatrix.reversed
|
def reversed(self):
"""Create a connectivity matrix where each incoming edge becomes outgoing."""
n_rows = len(self)
reversed = DirectedAdjacencyMatrix(n_rows, self.dtype)
for r, row in enumerate(py.prog_iter(self)):
for c in row:
reversed[c].append(r)
return reversed
|
python
|
def reversed(self):
"""Create a connectivity matrix where each incoming edge becomes outgoing."""
n_rows = len(self)
reversed = DirectedAdjacencyMatrix(n_rows, self.dtype)
for r, row in enumerate(py.prog_iter(self)):
for c in row:
reversed[c].append(r)
return reversed
|
[
"def",
"reversed",
"(",
"self",
")",
":",
"n_rows",
"=",
"len",
"(",
"self",
")",
"reversed",
"=",
"DirectedAdjacencyMatrix",
"(",
"n_rows",
",",
"self",
".",
"dtype",
")",
"for",
"r",
",",
"row",
"in",
"enumerate",
"(",
"py",
".",
"prog_iter",
"(",
"self",
")",
")",
":",
"for",
"c",
"in",
"row",
":",
"reversed",
"[",
"c",
"]",
".",
"append",
"(",
"r",
")",
"return",
"reversed"
] |
Create a connectivity matrix where each incoming edge becomes outgoing.
|
[
"Create",
"a",
"connectivity",
"matrix",
"where",
"each",
"incoming",
"edge",
"becomes",
"outgoing",
"."
] |
04bb59e91fa314e8cf987743189c77a9b6bc371d
|
https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/np/__init__.py#L80-L88
|
238,288
|
cnobile2012/pololu-motors
|
pololu/motors/crc7.py
|
crc7
|
def crc7(data):
"""
Compute CRC of a whole message.
"""
crc = 0
for c in data:
crc = CRC7_TABLE[crc ^ c]
return crc
|
python
|
def crc7(data):
"""
Compute CRC of a whole message.
"""
crc = 0
for c in data:
crc = CRC7_TABLE[crc ^ c]
return crc
|
[
"def",
"crc7",
"(",
"data",
")",
":",
"crc",
"=",
"0",
"for",
"c",
"in",
"data",
":",
"crc",
"=",
"CRC7_TABLE",
"[",
"crc",
"^",
"c",
"]",
"return",
"crc"
] |
Compute CRC of a whole message.
|
[
"Compute",
"CRC",
"of",
"a",
"whole",
"message",
"."
] |
453d2283a63cfe15cda96cad6dffa73372d52a7c
|
https://github.com/cnobile2012/pololu-motors/blob/453d2283a63cfe15cda96cad6dffa73372d52a7c/pololu/motors/crc7.py#L26-L35
|
238,289
|
crypto101/arthur
|
arthur/ui.py
|
_unhandledInput
|
def _unhandledInput(event, workbench, launcher):
"""Handles input events that weren't handled anywhere else.
"""
if event == "ctrl w":
raise urwid.ExitMainLoop()
elif event == "esc":
workbench.clear()
workbench.display(launcher)
return True
|
python
|
def _unhandledInput(event, workbench, launcher):
"""Handles input events that weren't handled anywhere else.
"""
if event == "ctrl w":
raise urwid.ExitMainLoop()
elif event == "esc":
workbench.clear()
workbench.display(launcher)
return True
|
[
"def",
"_unhandledInput",
"(",
"event",
",",
"workbench",
",",
"launcher",
")",
":",
"if",
"event",
"==",
"\"ctrl w\"",
":",
"raise",
"urwid",
".",
"ExitMainLoop",
"(",
")",
"elif",
"event",
"==",
"\"esc\"",
":",
"workbench",
".",
"clear",
"(",
")",
"workbench",
".",
"display",
"(",
"launcher",
")",
"return",
"True"
] |
Handles input events that weren't handled anywhere else.
|
[
"Handles",
"input",
"events",
"that",
"weren",
"t",
"handled",
"anywhere",
"else",
"."
] |
c32e693fb5af17eac010e3b20f7653ed6e11eb6a
|
https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L90-L99
|
238,290
|
crypto101/arthur
|
arthur/ui.py
|
_runPopUp
|
def _runPopUp(workbench, popUp):
"""Displays the pop-up on the workbench and gets a completion
notification deferred. When that fires, undisplay the pop-up and
return the result of the notification deferred verbatim.
"""
workbench.display(popUp)
d = popUp.notifyCompleted()
d.addCallback(_popUpCompleted, workbench)
return d
|
python
|
def _runPopUp(workbench, popUp):
"""Displays the pop-up on the workbench and gets a completion
notification deferred. When that fires, undisplay the pop-up and
return the result of the notification deferred verbatim.
"""
workbench.display(popUp)
d = popUp.notifyCompleted()
d.addCallback(_popUpCompleted, workbench)
return d
|
[
"def",
"_runPopUp",
"(",
"workbench",
",",
"popUp",
")",
":",
"workbench",
".",
"display",
"(",
"popUp",
")",
"d",
"=",
"popUp",
".",
"notifyCompleted",
"(",
")",
"d",
".",
"addCallback",
"(",
"_popUpCompleted",
",",
"workbench",
")",
"return",
"d"
] |
Displays the pop-up on the workbench and gets a completion
notification deferred. When that fires, undisplay the pop-up and
return the result of the notification deferred verbatim.
|
[
"Displays",
"the",
"pop",
"-",
"up",
"on",
"the",
"workbench",
"and",
"gets",
"a",
"completion",
"notification",
"deferred",
".",
"When",
"that",
"fires",
"undisplay",
"the",
"pop",
"-",
"up",
"and",
"return",
"the",
"result",
"of",
"the",
"notification",
"deferred",
"verbatim",
"."
] |
c32e693fb5af17eac010e3b20f7653ed6e11eb6a
|
https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L330-L340
|
238,291
|
crypto101/arthur
|
arthur/ui.py
|
Workbench.display
|
def display(self, tool):
"""Displays the given tool above the current layer, and sets the
title to its name.
"""
self._tools.append(tool)
self._justDisplay(tool)
|
python
|
def display(self, tool):
"""Displays the given tool above the current layer, and sets the
title to its name.
"""
self._tools.append(tool)
self._justDisplay(tool)
|
[
"def",
"display",
"(",
"self",
",",
"tool",
")",
":",
"self",
".",
"_tools",
".",
"append",
"(",
"tool",
")",
"self",
".",
"_justDisplay",
"(",
"tool",
")"
] |
Displays the given tool above the current layer, and sets the
title to its name.
|
[
"Displays",
"the",
"given",
"tool",
"above",
"the",
"current",
"layer",
"and",
"sets",
"the",
"title",
"to",
"its",
"name",
"."
] |
c32e693fb5af17eac010e3b20f7653ed6e11eb6a
|
https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L30-L36
|
238,292
|
crypto101/arthur
|
arthur/ui.py
|
Workbench._justDisplay
|
def _justDisplay(self, tool):
"""
Displays the given tool. Does not register it in the tools list.
"""
self.header.title.set_text(tool.name)
body, _options = self.widget.contents["body"]
overlay = urwid.Overlay(tool.widget, body, *tool.position)
self._surface = urwid.AttrMap(overlay, "foreground")
self.widget.contents["body"] = self._surface, None
|
python
|
def _justDisplay(self, tool):
"""
Displays the given tool. Does not register it in the tools list.
"""
self.header.title.set_text(tool.name)
body, _options = self.widget.contents["body"]
overlay = urwid.Overlay(tool.widget, body, *tool.position)
self._surface = urwid.AttrMap(overlay, "foreground")
self.widget.contents["body"] = self._surface, None
|
[
"def",
"_justDisplay",
"(",
"self",
",",
"tool",
")",
":",
"self",
".",
"header",
".",
"title",
".",
"set_text",
"(",
"tool",
".",
"name",
")",
"body",
",",
"_options",
"=",
"self",
".",
"widget",
".",
"contents",
"[",
"\"body\"",
"]",
"overlay",
"=",
"urwid",
".",
"Overlay",
"(",
"tool",
".",
"widget",
",",
"body",
",",
"*",
"tool",
".",
"position",
")",
"self",
".",
"_surface",
"=",
"urwid",
".",
"AttrMap",
"(",
"overlay",
",",
"\"foreground\"",
")",
"self",
".",
"widget",
".",
"contents",
"[",
"\"body\"",
"]",
"=",
"self",
".",
"_surface",
",",
"None"
] |
Displays the given tool. Does not register it in the tools list.
|
[
"Displays",
"the",
"given",
"tool",
".",
"Does",
"not",
"register",
"it",
"in",
"the",
"tools",
"list",
"."
] |
c32e693fb5af17eac010e3b20f7653ed6e11eb6a
|
https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L39-L48
|
238,293
|
crypto101/arthur
|
arthur/ui.py
|
Workbench.undisplay
|
def undisplay(self):
"""Undisplays the top tool.
This actually forces a complete re-render.
"""
self._tools.pop()
self._justClear()
for tool in self._tools:
self._justDisplay(tool)
|
python
|
def undisplay(self):
"""Undisplays the top tool.
This actually forces a complete re-render.
"""
self._tools.pop()
self._justClear()
for tool in self._tools:
self._justDisplay(tool)
|
[
"def",
"undisplay",
"(",
"self",
")",
":",
"self",
".",
"_tools",
".",
"pop",
"(",
")",
"self",
".",
"_justClear",
"(",
")",
"for",
"tool",
"in",
"self",
".",
"_tools",
":",
"self",
".",
"_justDisplay",
"(",
"tool",
")"
] |
Undisplays the top tool.
This actually forces a complete re-render.
|
[
"Undisplays",
"the",
"top",
"tool",
"."
] |
c32e693fb5af17eac010e3b20f7653ed6e11eb6a
|
https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L51-L59
|
238,294
|
crypto101/arthur
|
arthur/ui.py
|
_ButtonPopUp._makeButtons
|
def _makeButtons(self):
"""Makes buttons and wires them up.
"""
self.button = button = urwid.Button(u"OK")
urwid.connect_signal(button, "click", self._completed)
return [self.button]
|
python
|
def _makeButtons(self):
"""Makes buttons and wires them up.
"""
self.button = button = urwid.Button(u"OK")
urwid.connect_signal(button, "click", self._completed)
return [self.button]
|
[
"def",
"_makeButtons",
"(",
"self",
")",
":",
"self",
".",
"button",
"=",
"button",
"=",
"urwid",
".",
"Button",
"(",
"u\"OK\"",
")",
"urwid",
".",
"connect_signal",
"(",
"button",
",",
"\"click\"",
",",
"self",
".",
"_completed",
")",
"return",
"[",
"self",
".",
"button",
"]"
] |
Makes buttons and wires them up.
|
[
"Makes",
"buttons",
"and",
"wires",
"them",
"up",
"."
] |
c32e693fb5af17eac010e3b20f7653ed6e11eb6a
|
https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L226-L232
|
238,295
|
crypto101/arthur
|
arthur/ui.py
|
_Prompt._makeTextWidgets
|
def _makeTextWidgets(self):
"""Makes an editable prompt widget.
"""
self.prompt = urwid.Edit(self.promptText, multiline=False)
return [self.prompt]
|
python
|
def _makeTextWidgets(self):
"""Makes an editable prompt widget.
"""
self.prompt = urwid.Edit(self.promptText, multiline=False)
return [self.prompt]
|
[
"def",
"_makeTextWidgets",
"(",
"self",
")",
":",
"self",
".",
"prompt",
"=",
"urwid",
".",
"Edit",
"(",
"self",
".",
"promptText",
",",
"multiline",
"=",
"False",
")",
"return",
"[",
"self",
".",
"prompt",
"]"
] |
Makes an editable prompt widget.
|
[
"Makes",
"an",
"editable",
"prompt",
"widget",
"."
] |
c32e693fb5af17eac010e3b20f7653ed6e11eb6a
|
https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L306-L311
|
238,296
|
humilis/humilis-lambdautils
|
lambdautils/state.py
|
_secrets_table_name
|
def _secrets_table_name(environment=None, stage=None):
"""Name of the secrets table associated to a humilis deployment."""
if environment is None:
environment = os.environ.get("HUMILIS_ENVIRONMENT")
if stage is None:
stage = os.environ.get("HUMILIS_STAGE")
if environment:
if stage:
return "{environment}-{stage}-secrets".format(**locals())
else:
return "{environment}-secrets".format(**locals())
|
python
|
def _secrets_table_name(environment=None, stage=None):
"""Name of the secrets table associated to a humilis deployment."""
if environment is None:
environment = os.environ.get("HUMILIS_ENVIRONMENT")
if stage is None:
stage = os.environ.get("HUMILIS_STAGE")
if environment:
if stage:
return "{environment}-{stage}-secrets".format(**locals())
else:
return "{environment}-secrets".format(**locals())
|
[
"def",
"_secrets_table_name",
"(",
"environment",
"=",
"None",
",",
"stage",
"=",
"None",
")",
":",
"if",
"environment",
"is",
"None",
":",
"environment",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"HUMILIS_ENVIRONMENT\"",
")",
"if",
"stage",
"is",
"None",
":",
"stage",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"HUMILIS_STAGE\"",
")",
"if",
"environment",
":",
"if",
"stage",
":",
"return",
"\"{environment}-{stage}-secrets\"",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
"else",
":",
"return",
"\"{environment}-secrets\"",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")"
] |
Name of the secrets table associated to a humilis deployment.
|
[
"Name",
"of",
"the",
"secrets",
"table",
"associated",
"to",
"a",
"humilis",
"deployment",
"."
] |
58f75eb5ace23523c283708d56a9193181ea7e8e
|
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L27-L39
|
238,297
|
humilis/humilis-lambdautils
|
lambdautils/state.py
|
_state_table_name
|
def _state_table_name(environment=None, layer=None, stage=None):
"""The name of the state table associated to a humilis deployment."""
if environment is None:
# For backwards compatiblity
environment = os.environ.get("HUMILIS_ENVIRONMENT")
if layer is None:
layer = os.environ.get("HUMILIS_LAYER")
if stage is None:
stage = os.environ.get("HUMILIS_STAGE")
if environment:
if stage:
return "{environment}-{layer}-{stage}-state".format(
**locals())
else:
return "{environment}-{layer}-state".format(**locals())
|
python
|
def _state_table_name(environment=None, layer=None, stage=None):
"""The name of the state table associated to a humilis deployment."""
if environment is None:
# For backwards compatiblity
environment = os.environ.get("HUMILIS_ENVIRONMENT")
if layer is None:
layer = os.environ.get("HUMILIS_LAYER")
if stage is None:
stage = os.environ.get("HUMILIS_STAGE")
if environment:
if stage:
return "{environment}-{layer}-{stage}-state".format(
**locals())
else:
return "{environment}-{layer}-state".format(**locals())
|
[
"def",
"_state_table_name",
"(",
"environment",
"=",
"None",
",",
"layer",
"=",
"None",
",",
"stage",
"=",
"None",
")",
":",
"if",
"environment",
"is",
"None",
":",
"# For backwards compatiblity",
"environment",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"HUMILIS_ENVIRONMENT\"",
")",
"if",
"layer",
"is",
"None",
":",
"layer",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"HUMILIS_LAYER\"",
")",
"if",
"stage",
"is",
"None",
":",
"stage",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"HUMILIS_STAGE\"",
")",
"if",
"environment",
":",
"if",
"stage",
":",
"return",
"\"{environment}-{layer}-{stage}-state\"",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
"else",
":",
"return",
"\"{environment}-{layer}-state\"",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")"
] |
The name of the state table associated to a humilis deployment.
|
[
"The",
"name",
"of",
"the",
"state",
"table",
"associated",
"to",
"a",
"humilis",
"deployment",
"."
] |
58f75eb5ace23523c283708d56a9193181ea7e8e
|
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L47-L63
|
238,298
|
humilis/humilis-lambdautils
|
lambdautils/state.py
|
_get_secret_from_vault
|
def _get_secret_from_vault(
key, environment=None, stage=None, namespace=None,
wait_exponential_multiplier=50, wait_exponential_max=5000,
stop_max_delay=10000):
"""Retrieves a secret from the secrets vault."""
# Get the encrypted secret from DynamoDB
table_name = _secrets_table_name(environment=environment, stage=stage)
if namespace:
key = "{}:{}".format(namespace, key)
if table_name is None:
logger.warning("Can't produce secrets table name: unable to retrieve "
"secret '{}'".format(key))
return
client = boto3.client('dynamodb')
logger.info("Retriving key '{}' from table '{}'".format(
key, table_name))
@retry(retry_on_exception=_is_critical_exception,
wait_exponential_multiplier=wait_exponential_multiplier,
wait_exponential_max=wait_exponential_max,
stop_max_delay=stop_max_delay)
def get_item():
try:
return client.get_item(
TableName=table_name,
Key={'id': {'S': key}}).get('Item', {}).get(
'value', {}).get('B')
except Exception as err:
if _is_dynamodb_critical_exception(err):
raise CriticalError(err)
else:
raise
encrypted = get_item()
if encrypted is None:
return
# Decrypt using KMS
client = boto3.client('kms')
try:
value = client.decrypt(CiphertextBlob=encrypted)['Plaintext'].decode()
except ClientError:
logger.error("KMS error when trying to decrypt secret")
traceback.print_exc()
return
try:
value = json.loads(value)
except (TypeError, ValueError):
# It's ok, the client should know how to deal with the value
pass
return value
|
python
|
def _get_secret_from_vault(
key, environment=None, stage=None, namespace=None,
wait_exponential_multiplier=50, wait_exponential_max=5000,
stop_max_delay=10000):
"""Retrieves a secret from the secrets vault."""
# Get the encrypted secret from DynamoDB
table_name = _secrets_table_name(environment=environment, stage=stage)
if namespace:
key = "{}:{}".format(namespace, key)
if table_name is None:
logger.warning("Can't produce secrets table name: unable to retrieve "
"secret '{}'".format(key))
return
client = boto3.client('dynamodb')
logger.info("Retriving key '{}' from table '{}'".format(
key, table_name))
@retry(retry_on_exception=_is_critical_exception,
wait_exponential_multiplier=wait_exponential_multiplier,
wait_exponential_max=wait_exponential_max,
stop_max_delay=stop_max_delay)
def get_item():
try:
return client.get_item(
TableName=table_name,
Key={'id': {'S': key}}).get('Item', {}).get(
'value', {}).get('B')
except Exception as err:
if _is_dynamodb_critical_exception(err):
raise CriticalError(err)
else:
raise
encrypted = get_item()
if encrypted is None:
return
# Decrypt using KMS
client = boto3.client('kms')
try:
value = client.decrypt(CiphertextBlob=encrypted)['Plaintext'].decode()
except ClientError:
logger.error("KMS error when trying to decrypt secret")
traceback.print_exc()
return
try:
value = json.loads(value)
except (TypeError, ValueError):
# It's ok, the client should know how to deal with the value
pass
return value
|
[
"def",
"_get_secret_from_vault",
"(",
"key",
",",
"environment",
"=",
"None",
",",
"stage",
"=",
"None",
",",
"namespace",
"=",
"None",
",",
"wait_exponential_multiplier",
"=",
"50",
",",
"wait_exponential_max",
"=",
"5000",
",",
"stop_max_delay",
"=",
"10000",
")",
":",
"# Get the encrypted secret from DynamoDB",
"table_name",
"=",
"_secrets_table_name",
"(",
"environment",
"=",
"environment",
",",
"stage",
"=",
"stage",
")",
"if",
"namespace",
":",
"key",
"=",
"\"{}:{}\"",
".",
"format",
"(",
"namespace",
",",
"key",
")",
"if",
"table_name",
"is",
"None",
":",
"logger",
".",
"warning",
"(",
"\"Can't produce secrets table name: unable to retrieve \"",
"\"secret '{}'\"",
".",
"format",
"(",
"key",
")",
")",
"return",
"client",
"=",
"boto3",
".",
"client",
"(",
"'dynamodb'",
")",
"logger",
".",
"info",
"(",
"\"Retriving key '{}' from table '{}'\"",
".",
"format",
"(",
"key",
",",
"table_name",
")",
")",
"@",
"retry",
"(",
"retry_on_exception",
"=",
"_is_critical_exception",
",",
"wait_exponential_multiplier",
"=",
"wait_exponential_multiplier",
",",
"wait_exponential_max",
"=",
"wait_exponential_max",
",",
"stop_max_delay",
"=",
"stop_max_delay",
")",
"def",
"get_item",
"(",
")",
":",
"try",
":",
"return",
"client",
".",
"get_item",
"(",
"TableName",
"=",
"table_name",
",",
"Key",
"=",
"{",
"'id'",
":",
"{",
"'S'",
":",
"key",
"}",
"}",
")",
".",
"get",
"(",
"'Item'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'value'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'B'",
")",
"except",
"Exception",
"as",
"err",
":",
"if",
"_is_dynamodb_critical_exception",
"(",
"err",
")",
":",
"raise",
"CriticalError",
"(",
"err",
")",
"else",
":",
"raise",
"encrypted",
"=",
"get_item",
"(",
")",
"if",
"encrypted",
"is",
"None",
":",
"return",
"# Decrypt using KMS",
"client",
"=",
"boto3",
".",
"client",
"(",
"'kms'",
")",
"try",
":",
"value",
"=",
"client",
".",
"decrypt",
"(",
"CiphertextBlob",
"=",
"encrypted",
")",
"[",
"'Plaintext'",
"]",
".",
"decode",
"(",
")",
"except",
"ClientError",
":",
"logger",
".",
"error",
"(",
"\"KMS error when trying to decrypt secret\"",
")",
"traceback",
".",
"print_exc",
"(",
")",
"return",
"try",
":",
"value",
"=",
"json",
".",
"loads",
"(",
"value",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"# It's ok, the client should know how to deal with the value",
"pass",
"return",
"value"
] |
Retrieves a secret from the secrets vault.
|
[
"Retrieves",
"a",
"secret",
"from",
"the",
"secrets",
"vault",
"."
] |
58f75eb5ace23523c283708d56a9193181ea7e8e
|
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L78-L134
|
238,299
|
humilis/humilis-lambdautils
|
lambdautils/state.py
|
get_secret
|
def get_secret(key, *args, **kwargs):
"""Retrieves a secret."""
env_value = os.environ.get(key.replace('.', '_').upper())
if not env_value:
# Backwards compatibility: the deprecated secrets vault
return _get_secret_from_vault(key, *args, **kwargs)
return env_value
|
python
|
def get_secret(key, *args, **kwargs):
"""Retrieves a secret."""
env_value = os.environ.get(key.replace('.', '_').upper())
if not env_value:
# Backwards compatibility: the deprecated secrets vault
return _get_secret_from_vault(key, *args, **kwargs)
return env_value
|
[
"def",
"get_secret",
"(",
"key",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"env_value",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"key",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
".",
"upper",
"(",
")",
")",
"if",
"not",
"env_value",
":",
"# Backwards compatibility: the deprecated secrets vault",
"return",
"_get_secret_from_vault",
"(",
"key",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"env_value"
] |
Retrieves a secret.
|
[
"Retrieves",
"a",
"secret",
"."
] |
58f75eb5ace23523c283708d56a9193181ea7e8e
|
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L137-L143
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.