id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
239,600
|
emilydolson/avida-spatial-tools
|
avidaspatial/transform_data.py
|
cluster_types
|
def cluster_types(types, max_clust=12):
"""
Generates a dictionary mapping each binary number in types to an integer
from 0 to max_clust. Hierarchical clustering is used to determine which
which binary numbers should map to the same integer.
"""
if len(types) < max_clust:
max_clust = len(types)
# Do actual clustering
cluster_dict = do_clustering(types, max_clust)
cluster_ranks = rank_clusters(cluster_dict)
# Create a dictionary mapping binary numbers to indices
ranks = {}
for key in cluster_dict:
for typ in cluster_dict[key]:
ranks[typ] = cluster_ranks[key]
return ranks
|
python
|
def cluster_types(types, max_clust=12):
"""
Generates a dictionary mapping each binary number in types to an integer
from 0 to max_clust. Hierarchical clustering is used to determine which
which binary numbers should map to the same integer.
"""
if len(types) < max_clust:
max_clust = len(types)
# Do actual clustering
cluster_dict = do_clustering(types, max_clust)
cluster_ranks = rank_clusters(cluster_dict)
# Create a dictionary mapping binary numbers to indices
ranks = {}
for key in cluster_dict:
for typ in cluster_dict[key]:
ranks[typ] = cluster_ranks[key]
return ranks
|
[
"def",
"cluster_types",
"(",
"types",
",",
"max_clust",
"=",
"12",
")",
":",
"if",
"len",
"(",
"types",
")",
"<",
"max_clust",
":",
"max_clust",
"=",
"len",
"(",
"types",
")",
"# Do actual clustering",
"cluster_dict",
"=",
"do_clustering",
"(",
"types",
",",
"max_clust",
")",
"cluster_ranks",
"=",
"rank_clusters",
"(",
"cluster_dict",
")",
"# Create a dictionary mapping binary numbers to indices",
"ranks",
"=",
"{",
"}",
"for",
"key",
"in",
"cluster_dict",
":",
"for",
"typ",
"in",
"cluster_dict",
"[",
"key",
"]",
":",
"ranks",
"[",
"typ",
"]",
"=",
"cluster_ranks",
"[",
"key",
"]",
"return",
"ranks"
] |
Generates a dictionary mapping each binary number in types to an integer
from 0 to max_clust. Hierarchical clustering is used to determine which
which binary numbers should map to the same integer.
|
[
"Generates",
"a",
"dictionary",
"mapping",
"each",
"binary",
"number",
"in",
"types",
"to",
"an",
"integer",
"from",
"0",
"to",
"max_clust",
".",
"Hierarchical",
"clustering",
"is",
"used",
"to",
"determine",
"which",
"which",
"binary",
"numbers",
"should",
"map",
"to",
"the",
"same",
"integer",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/transform_data.py#L178-L198
|
239,601
|
emilydolson/avida-spatial-tools
|
avidaspatial/transform_data.py
|
rank_types
|
def rank_types(types):
"""
Takes a list of binary numbers and returns a dictionary mapping each
binary number to an integer indicating it's rank within the list.
This is basically the better alternative to cluster_types, that works
in that perfect world where we have few enough types to represent each
as its own color.
"""
include_null = '0b0' in types
sorted_types = deepcopy(types)
for i in range(len(sorted_types)):
sorted_types[i] = int(sorted_types[i], 2)
sorted_types.sort()
ranks = {}
for t in types:
ranks[t] = sorted_types.index(eval(t)) + int(not include_null)
return ranks
|
python
|
def rank_types(types):
"""
Takes a list of binary numbers and returns a dictionary mapping each
binary number to an integer indicating it's rank within the list.
This is basically the better alternative to cluster_types, that works
in that perfect world where we have few enough types to represent each
as its own color.
"""
include_null = '0b0' in types
sorted_types = deepcopy(types)
for i in range(len(sorted_types)):
sorted_types[i] = int(sorted_types[i], 2)
sorted_types.sort()
ranks = {}
for t in types:
ranks[t] = sorted_types.index(eval(t)) + int(not include_null)
return ranks
|
[
"def",
"rank_types",
"(",
"types",
")",
":",
"include_null",
"=",
"'0b0'",
"in",
"types",
"sorted_types",
"=",
"deepcopy",
"(",
"types",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"sorted_types",
")",
")",
":",
"sorted_types",
"[",
"i",
"]",
"=",
"int",
"(",
"sorted_types",
"[",
"i",
"]",
",",
"2",
")",
"sorted_types",
".",
"sort",
"(",
")",
"ranks",
"=",
"{",
"}",
"for",
"t",
"in",
"types",
":",
"ranks",
"[",
"t",
"]",
"=",
"sorted_types",
".",
"index",
"(",
"eval",
"(",
"t",
")",
")",
"+",
"int",
"(",
"not",
"include_null",
")",
"return",
"ranks"
] |
Takes a list of binary numbers and returns a dictionary mapping each
binary number to an integer indicating it's rank within the list.
This is basically the better alternative to cluster_types, that works
in that perfect world where we have few enough types to represent each
as its own color.
|
[
"Takes",
"a",
"list",
"of",
"binary",
"numbers",
"and",
"returns",
"a",
"dictionary",
"mapping",
"each",
"binary",
"number",
"to",
"an",
"integer",
"indicating",
"it",
"s",
"rank",
"within",
"the",
"list",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/transform_data.py#L201-L220
|
239,602
|
emilydolson/avida-spatial-tools
|
avidaspatial/transform_data.py
|
make_count_grid
|
def make_count_grid(data):
"""
Takes a 2 or 3d grid of strings representing binary numbers.
Returns a grid of the same dimensions in which each binary number has been
replaced by an integer indicating the number of ones that were in that
number.
"""
data = deepcopy(data)
for i in range(len(data)):
for j in range(len(data[i])):
for k in range(len(data[i][j])):
if type(data[i][j][k]) is list:
for l in range(len(data[i][j][k])):
try:
data[i][j][k] = data[i][j][k][l].count("1")
except:
data[i][j][k] = len(data[i][j][k][l])
else:
try:
data[i][j][k] = data[i][j][k].count("1")
except:
data[i][j][k] = len(data[i][j][k])
return data
|
python
|
def make_count_grid(data):
"""
Takes a 2 or 3d grid of strings representing binary numbers.
Returns a grid of the same dimensions in which each binary number has been
replaced by an integer indicating the number of ones that were in that
number.
"""
data = deepcopy(data)
for i in range(len(data)):
for j in range(len(data[i])):
for k in range(len(data[i][j])):
if type(data[i][j][k]) is list:
for l in range(len(data[i][j][k])):
try:
data[i][j][k] = data[i][j][k][l].count("1")
except:
data[i][j][k] = len(data[i][j][k][l])
else:
try:
data[i][j][k] = data[i][j][k].count("1")
except:
data[i][j][k] = len(data[i][j][k])
return data
|
[
"def",
"make_count_grid",
"(",
"data",
")",
":",
"data",
"=",
"deepcopy",
"(",
"data",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"data",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"data",
"[",
"i",
"]",
")",
")",
":",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
")",
")",
":",
"if",
"type",
"(",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"k",
"]",
")",
"is",
"list",
":",
"for",
"l",
"in",
"range",
"(",
"len",
"(",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"k",
"]",
")",
")",
":",
"try",
":",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"k",
"]",
"[",
"l",
"]",
".",
"count",
"(",
"\"1\"",
")",
"except",
":",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"len",
"(",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"k",
"]",
"[",
"l",
"]",
")",
"else",
":",
"try",
":",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"k",
"]",
".",
"count",
"(",
"\"1\"",
")",
"except",
":",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"len",
"(",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"k",
"]",
")",
"return",
"data"
] |
Takes a 2 or 3d grid of strings representing binary numbers.
Returns a grid of the same dimensions in which each binary number has been
replaced by an integer indicating the number of ones that were in that
number.
|
[
"Takes",
"a",
"2",
"or",
"3d",
"grid",
"of",
"strings",
"representing",
"binary",
"numbers",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/transform_data.py#L223-L248
|
239,603
|
emilydolson/avida-spatial-tools
|
avidaspatial/transform_data.py
|
make_optimal_phenotype_grid
|
def make_optimal_phenotype_grid(environment, phenotypes):
"""
Takes an EnvironmentFile object and a 2d array of phenotypes and returns
a 2d array in which each location contains an index representing the
distance between the phenotype in that location and the optimal phenotype
for that location.
This is acheived by using the task list in the EnvironmentFile to convert
the phenotypes to sets of tasks, and comparing them to the sets of
resources in the environment. So if the environment file that you created
the EnvironmentFile object from for some reason doesn't contain all of the
tasks, or doesn't contain them in the right order this won't work. If this
is the environment file that you used for the run of Avida that generated
this data, you should be fine.
"""
world_size = environment.size
phenotypes = deepcopy(phenotypes)
for i in range(world_size[1]):
for j in range(world_size[0]):
for k in range(len(phenotypes[i][j])):
phenotype = phenotype_to_res_set(phenotypes[i][j][k],
environment.tasks)
diff = len(environment[i][j].symmetric_difference(phenotype))
phenotypes[i][j][k] = diff
return phenotypes
|
python
|
def make_optimal_phenotype_grid(environment, phenotypes):
"""
Takes an EnvironmentFile object and a 2d array of phenotypes and returns
a 2d array in which each location contains an index representing the
distance between the phenotype in that location and the optimal phenotype
for that location.
This is acheived by using the task list in the EnvironmentFile to convert
the phenotypes to sets of tasks, and comparing them to the sets of
resources in the environment. So if the environment file that you created
the EnvironmentFile object from for some reason doesn't contain all of the
tasks, or doesn't contain them in the right order this won't work. If this
is the environment file that you used for the run of Avida that generated
this data, you should be fine.
"""
world_size = environment.size
phenotypes = deepcopy(phenotypes)
for i in range(world_size[1]):
for j in range(world_size[0]):
for k in range(len(phenotypes[i][j])):
phenotype = phenotype_to_res_set(phenotypes[i][j][k],
environment.tasks)
diff = len(environment[i][j].symmetric_difference(phenotype))
phenotypes[i][j][k] = diff
return phenotypes
|
[
"def",
"make_optimal_phenotype_grid",
"(",
"environment",
",",
"phenotypes",
")",
":",
"world_size",
"=",
"environment",
".",
"size",
"phenotypes",
"=",
"deepcopy",
"(",
"phenotypes",
")",
"for",
"i",
"in",
"range",
"(",
"world_size",
"[",
"1",
"]",
")",
":",
"for",
"j",
"in",
"range",
"(",
"world_size",
"[",
"0",
"]",
")",
":",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"phenotypes",
"[",
"i",
"]",
"[",
"j",
"]",
")",
")",
":",
"phenotype",
"=",
"phenotype_to_res_set",
"(",
"phenotypes",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"k",
"]",
",",
"environment",
".",
"tasks",
")",
"diff",
"=",
"len",
"(",
"environment",
"[",
"i",
"]",
"[",
"j",
"]",
".",
"symmetric_difference",
"(",
"phenotype",
")",
")",
"phenotypes",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"k",
"]",
"=",
"diff",
"return",
"phenotypes"
] |
Takes an EnvironmentFile object and a 2d array of phenotypes and returns
a 2d array in which each location contains an index representing the
distance between the phenotype in that location and the optimal phenotype
for that location.
This is acheived by using the task list in the EnvironmentFile to convert
the phenotypes to sets of tasks, and comparing them to the sets of
resources in the environment. So if the environment file that you created
the EnvironmentFile object from for some reason doesn't contain all of the
tasks, or doesn't contain them in the right order this won't work. If this
is the environment file that you used for the run of Avida that generated
this data, you should be fine.
|
[
"Takes",
"an",
"EnvironmentFile",
"object",
"and",
"a",
"2d",
"array",
"of",
"phenotypes",
"and",
"returns",
"a",
"2d",
"array",
"in",
"which",
"each",
"location",
"contains",
"an",
"index",
"representing",
"the",
"distance",
"between",
"the",
"phenotype",
"in",
"that",
"location",
"and",
"the",
"optimal",
"phenotype",
"for",
"that",
"location",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/transform_data.py#L251-L277
|
239,604
|
Brazelton-Lab/bio_utils
|
bio_utils/iterators/fastq.py
|
fastq_iter
|
def fastq_iter(handle, header=None):
"""Iterate over FASTQ file and return FASTQ entries
Args:
handle (file): FASTQ file handle, can be any iterator so long as it
it returns subsequent "lines" of a FASTQ entry
header (str): Header line of next FASTQ entry, if 'handle' has been
partially read and you want to start iterating at the next entry,
read the next FASTQ header and pass it to this variable when
calling fastq_iter. See 'Examples.'
Yields:
FastqEntry: class containing all FASTQ data
Raises:
IOError: If FASTQ entry doesn't start with '@'
Examples:
The following two examples demonstrate how to use fastq_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in fastq_iter(open('test.fastq')):
... print(entry.id) # Print FASTQ id
... print(entry.description) # Print FASTQ description
... print(entry.sequence) # Print FASTQ sequence
... print(entry.quality) # Print FASTQ quality scores
... print(entry.write()) # Print full FASTQ entry
>>> fastq_handle = open('test.fastq')
>>> next(fastq_handle) # Skip first entry header
>>> next(fastq_handle) # Skip first entry sequence
>>> next(fastq_handle) # Skip line with '+'
>>> next(fastq_handle) # Skip first entry quality scores
>>> first_line = next(fastq_handle) # Read second entry header
>>> for entry in fastq_iter(fastq_handle, header=first_line):
... print(entry.id) # Print FASTQ id
... print(entry.description) # Print FASTQ description
... print(entry.sequence) # Print FASTQ sequence
... print(entry.quality) # Print FASTQ quality scores
... print(entry.write()) # Print full FASTQ entry
"""
# Speed tricks: reduces function calls
append = list.append
join = str.join
strip = str.strip
next_line = next
if header is None:
header = next(handle) # Read first FASTQ entry header
# Check if input is text or bytestream
if (isinstance(header, bytes)):
def next_line(i):
return next(i).decode('utf-8')
header = strip(header.decode('utf-8'))
else:
header = strip(header)
try: # Manually construct a for loop to improve speed by using 'next'
while True: # Loop until StopIteration Exception raised
line = strip(next_line(handle))
data = FastqEntry()
if not header[0] == '@':
raise IOError('Bad FASTQ format: no "@" at beginning of line')
try:
data.id, data.description = header[1:].split(' ', 1)
except ValueError: # No description
data.id = header[1:]
data.description = ''
# obtain sequence
sequence_list = []
while line and not line[0] == '+' and not line[0] == '#':
append(sequence_list, line)
line = strip(next_line(handle))
data.sequence = join('', sequence_list)
line = strip(next_line(handle)) # Skip line containing only '+'
# Obtain quality scores
quality_list = []
seq_len = len(data.sequence)
qual_len = 0
while line and qual_len < seq_len:
append(quality_list, line)
qual_len += len(line)
line = strip(next_line(handle)) # Raises StopIteration at EOF
header = line # Store current line so it's not lost next iteration
data.quality = join('', quality_list)
yield data
except StopIteration: # Yield last FASTQ entry
data.quality = join('', quality_list)
yield data
|
python
|
def fastq_iter(handle, header=None):
"""Iterate over FASTQ file and return FASTQ entries
Args:
handle (file): FASTQ file handle, can be any iterator so long as it
it returns subsequent "lines" of a FASTQ entry
header (str): Header line of next FASTQ entry, if 'handle' has been
partially read and you want to start iterating at the next entry,
read the next FASTQ header and pass it to this variable when
calling fastq_iter. See 'Examples.'
Yields:
FastqEntry: class containing all FASTQ data
Raises:
IOError: If FASTQ entry doesn't start with '@'
Examples:
The following two examples demonstrate how to use fastq_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in fastq_iter(open('test.fastq')):
... print(entry.id) # Print FASTQ id
... print(entry.description) # Print FASTQ description
... print(entry.sequence) # Print FASTQ sequence
... print(entry.quality) # Print FASTQ quality scores
... print(entry.write()) # Print full FASTQ entry
>>> fastq_handle = open('test.fastq')
>>> next(fastq_handle) # Skip first entry header
>>> next(fastq_handle) # Skip first entry sequence
>>> next(fastq_handle) # Skip line with '+'
>>> next(fastq_handle) # Skip first entry quality scores
>>> first_line = next(fastq_handle) # Read second entry header
>>> for entry in fastq_iter(fastq_handle, header=first_line):
... print(entry.id) # Print FASTQ id
... print(entry.description) # Print FASTQ description
... print(entry.sequence) # Print FASTQ sequence
... print(entry.quality) # Print FASTQ quality scores
... print(entry.write()) # Print full FASTQ entry
"""
# Speed tricks: reduces function calls
append = list.append
join = str.join
strip = str.strip
next_line = next
if header is None:
header = next(handle) # Read first FASTQ entry header
# Check if input is text or bytestream
if (isinstance(header, bytes)):
def next_line(i):
return next(i).decode('utf-8')
header = strip(header.decode('utf-8'))
else:
header = strip(header)
try: # Manually construct a for loop to improve speed by using 'next'
while True: # Loop until StopIteration Exception raised
line = strip(next_line(handle))
data = FastqEntry()
if not header[0] == '@':
raise IOError('Bad FASTQ format: no "@" at beginning of line')
try:
data.id, data.description = header[1:].split(' ', 1)
except ValueError: # No description
data.id = header[1:]
data.description = ''
# obtain sequence
sequence_list = []
while line and not line[0] == '+' and not line[0] == '#':
append(sequence_list, line)
line = strip(next_line(handle))
data.sequence = join('', sequence_list)
line = strip(next_line(handle)) # Skip line containing only '+'
# Obtain quality scores
quality_list = []
seq_len = len(data.sequence)
qual_len = 0
while line and qual_len < seq_len:
append(quality_list, line)
qual_len += len(line)
line = strip(next_line(handle)) # Raises StopIteration at EOF
header = line # Store current line so it's not lost next iteration
data.quality = join('', quality_list)
yield data
except StopIteration: # Yield last FASTQ entry
data.quality = join('', quality_list)
yield data
|
[
"def",
"fastq_iter",
"(",
"handle",
",",
"header",
"=",
"None",
")",
":",
"# Speed tricks: reduces function calls",
"append",
"=",
"list",
".",
"append",
"join",
"=",
"str",
".",
"join",
"strip",
"=",
"str",
".",
"strip",
"next_line",
"=",
"next",
"if",
"header",
"is",
"None",
":",
"header",
"=",
"next",
"(",
"handle",
")",
"# Read first FASTQ entry header",
"# Check if input is text or bytestream",
"if",
"(",
"isinstance",
"(",
"header",
",",
"bytes",
")",
")",
":",
"def",
"next_line",
"(",
"i",
")",
":",
"return",
"next",
"(",
"i",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"header",
"=",
"strip",
"(",
"header",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"else",
":",
"header",
"=",
"strip",
"(",
"header",
")",
"try",
":",
"# Manually construct a for loop to improve speed by using 'next'",
"while",
"True",
":",
"# Loop until StopIteration Exception raised",
"line",
"=",
"strip",
"(",
"next_line",
"(",
"handle",
")",
")",
"data",
"=",
"FastqEntry",
"(",
")",
"if",
"not",
"header",
"[",
"0",
"]",
"==",
"'@'",
":",
"raise",
"IOError",
"(",
"'Bad FASTQ format: no \"@\" at beginning of line'",
")",
"try",
":",
"data",
".",
"id",
",",
"data",
".",
"description",
"=",
"header",
"[",
"1",
":",
"]",
".",
"split",
"(",
"' '",
",",
"1",
")",
"except",
"ValueError",
":",
"# No description",
"data",
".",
"id",
"=",
"header",
"[",
"1",
":",
"]",
"data",
".",
"description",
"=",
"''",
"# obtain sequence",
"sequence_list",
"=",
"[",
"]",
"while",
"line",
"and",
"not",
"line",
"[",
"0",
"]",
"==",
"'+'",
"and",
"not",
"line",
"[",
"0",
"]",
"==",
"'#'",
":",
"append",
"(",
"sequence_list",
",",
"line",
")",
"line",
"=",
"strip",
"(",
"next_line",
"(",
"handle",
")",
")",
"data",
".",
"sequence",
"=",
"join",
"(",
"''",
",",
"sequence_list",
")",
"line",
"=",
"strip",
"(",
"next_line",
"(",
"handle",
")",
")",
"# Skip line containing only '+'",
"# Obtain quality scores",
"quality_list",
"=",
"[",
"]",
"seq_len",
"=",
"len",
"(",
"data",
".",
"sequence",
")",
"qual_len",
"=",
"0",
"while",
"line",
"and",
"qual_len",
"<",
"seq_len",
":",
"append",
"(",
"quality_list",
",",
"line",
")",
"qual_len",
"+=",
"len",
"(",
"line",
")",
"line",
"=",
"strip",
"(",
"next_line",
"(",
"handle",
")",
")",
"# Raises StopIteration at EOF",
"header",
"=",
"line",
"# Store current line so it's not lost next iteration",
"data",
".",
"quality",
"=",
"join",
"(",
"''",
",",
"quality_list",
")",
"yield",
"data",
"except",
"StopIteration",
":",
"# Yield last FASTQ entry",
"data",
".",
"quality",
"=",
"join",
"(",
"''",
",",
"quality_list",
")",
"yield",
"data"
] |
Iterate over FASTQ file and return FASTQ entries
Args:
handle (file): FASTQ file handle, can be any iterator so long as it
it returns subsequent "lines" of a FASTQ entry
header (str): Header line of next FASTQ entry, if 'handle' has been
partially read and you want to start iterating at the next entry,
read the next FASTQ header and pass it to this variable when
calling fastq_iter. See 'Examples.'
Yields:
FastqEntry: class containing all FASTQ data
Raises:
IOError: If FASTQ entry doesn't start with '@'
Examples:
The following two examples demonstrate how to use fastq_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in fastq_iter(open('test.fastq')):
... print(entry.id) # Print FASTQ id
... print(entry.description) # Print FASTQ description
... print(entry.sequence) # Print FASTQ sequence
... print(entry.quality) # Print FASTQ quality scores
... print(entry.write()) # Print full FASTQ entry
>>> fastq_handle = open('test.fastq')
>>> next(fastq_handle) # Skip first entry header
>>> next(fastq_handle) # Skip first entry sequence
>>> next(fastq_handle) # Skip line with '+'
>>> next(fastq_handle) # Skip first entry quality scores
>>> first_line = next(fastq_handle) # Read second entry header
>>> for entry in fastq_iter(fastq_handle, header=first_line):
... print(entry.id) # Print FASTQ id
... print(entry.description) # Print FASTQ description
... print(entry.sequence) # Print FASTQ sequence
... print(entry.quality) # Print FASTQ quality scores
... print(entry.write()) # Print full FASTQ entry
|
[
"Iterate",
"over",
"FASTQ",
"file",
"and",
"return",
"FASTQ",
"entries"
] |
5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7
|
https://github.com/Brazelton-Lab/bio_utils/blob/5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7/bio_utils/iterators/fastq.py#L77-L181
|
239,605
|
Brazelton-Lab/bio_utils
|
bio_utils/iterators/fastq.py
|
FastqEntry.write
|
def write(self):
"""Return FASTQ formatted string
Returns:
str: FASTQ formatted string containing entire FASTQ entry
"""
if self.description:
return '@{0} {1}{4}{2}{4}+{4}{3}{4}'.format(self.id,
self.description,
self.sequence,
self.quality,
os.linesep)
else:
return '@{0}{3}{1}{3}+{3}{2}{3}'.format(self.id,
self.sequence,
self.quality,
os.linesep)
|
python
|
def write(self):
"""Return FASTQ formatted string
Returns:
str: FASTQ formatted string containing entire FASTQ entry
"""
if self.description:
return '@{0} {1}{4}{2}{4}+{4}{3}{4}'.format(self.id,
self.description,
self.sequence,
self.quality,
os.linesep)
else:
return '@{0}{3}{1}{3}+{3}{2}{3}'.format(self.id,
self.sequence,
self.quality,
os.linesep)
|
[
"def",
"write",
"(",
"self",
")",
":",
"if",
"self",
".",
"description",
":",
"return",
"'@{0} {1}{4}{2}{4}+{4}{3}{4}'",
".",
"format",
"(",
"self",
".",
"id",
",",
"self",
".",
"description",
",",
"self",
".",
"sequence",
",",
"self",
".",
"quality",
",",
"os",
".",
"linesep",
")",
"else",
":",
"return",
"'@{0}{3}{1}{3}+{3}{2}{3}'",
".",
"format",
"(",
"self",
".",
"id",
",",
"self",
".",
"sequence",
",",
"self",
".",
"quality",
",",
"os",
".",
"linesep",
")"
] |
Return FASTQ formatted string
Returns:
str: FASTQ formatted string containing entire FASTQ entry
|
[
"Return",
"FASTQ",
"formatted",
"string"
] |
5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7
|
https://github.com/Brazelton-Lab/bio_utils/blob/5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7/bio_utils/iterators/fastq.py#L57-L74
|
239,606
|
crypto101/arthur
|
arthur/run.py
|
buildWorkbenchWithLauncher
|
def buildWorkbenchWithLauncher():
"""Builds a workbench.
The workbench has a launcher with all of the default tools. The
launcher will be displayed on the workbench.
"""
workbench = ui.Workbench()
tools = [exercises.SearchTool()]
launcher = ui.Launcher(workbench, tools)
workbench.display(launcher)
return workbench, launcher
|
python
|
def buildWorkbenchWithLauncher():
"""Builds a workbench.
The workbench has a launcher with all of the default tools. The
launcher will be displayed on the workbench.
"""
workbench = ui.Workbench()
tools = [exercises.SearchTool()]
launcher = ui.Launcher(workbench, tools)
workbench.display(launcher)
return workbench, launcher
|
[
"def",
"buildWorkbenchWithLauncher",
"(",
")",
":",
"workbench",
"=",
"ui",
".",
"Workbench",
"(",
")",
"tools",
"=",
"[",
"exercises",
".",
"SearchTool",
"(",
")",
"]",
"launcher",
"=",
"ui",
".",
"Launcher",
"(",
"workbench",
",",
"tools",
")",
"workbench",
".",
"display",
"(",
"launcher",
")",
"return",
"workbench",
",",
"launcher"
] |
Builds a workbench.
The workbench has a launcher with all of the default tools. The
launcher will be displayed on the workbench.
|
[
"Builds",
"a",
"workbench",
"."
] |
c32e693fb5af17eac010e3b20f7653ed6e11eb6a
|
https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/run.py#L7-L20
|
239,607
|
crypto101/arthur
|
arthur/run.py
|
buildMainLoop
|
def buildMainLoop(workbench, launcher, **kwargs):
"""Builds a main loop from the given workbench and launcher.
The main loop will have the default pallette, as well as the
default unused key handler. The key handler will have a reference
to the workbench and launcher so that it can clear the screen.
The extra keyword arguments are passed to the main loop.
"""
unhandledInput = partial(ui._unhandledInput,
workbench=workbench,
launcher=launcher)
mainLoop = urwid.MainLoop(widget=workbench.widget,
palette=ui.DEFAULT_PALETTE,
unhandled_input=unhandledInput,
event_loop=urwid.TwistedEventLoop(),
**kwargs)
return mainLoop
|
python
|
def buildMainLoop(workbench, launcher, **kwargs):
"""Builds a main loop from the given workbench and launcher.
The main loop will have the default pallette, as well as the
default unused key handler. The key handler will have a reference
to the workbench and launcher so that it can clear the screen.
The extra keyword arguments are passed to the main loop.
"""
unhandledInput = partial(ui._unhandledInput,
workbench=workbench,
launcher=launcher)
mainLoop = urwid.MainLoop(widget=workbench.widget,
palette=ui.DEFAULT_PALETTE,
unhandled_input=unhandledInput,
event_loop=urwid.TwistedEventLoop(),
**kwargs)
return mainLoop
|
[
"def",
"buildMainLoop",
"(",
"workbench",
",",
"launcher",
",",
"*",
"*",
"kwargs",
")",
":",
"unhandledInput",
"=",
"partial",
"(",
"ui",
".",
"_unhandledInput",
",",
"workbench",
"=",
"workbench",
",",
"launcher",
"=",
"launcher",
")",
"mainLoop",
"=",
"urwid",
".",
"MainLoop",
"(",
"widget",
"=",
"workbench",
".",
"widget",
",",
"palette",
"=",
"ui",
".",
"DEFAULT_PALETTE",
",",
"unhandled_input",
"=",
"unhandledInput",
",",
"event_loop",
"=",
"urwid",
".",
"TwistedEventLoop",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
"return",
"mainLoop"
] |
Builds a main loop from the given workbench and launcher.
The main loop will have the default pallette, as well as the
default unused key handler. The key handler will have a reference
to the workbench and launcher so that it can clear the screen.
The extra keyword arguments are passed to the main loop.
|
[
"Builds",
"a",
"main",
"loop",
"from",
"the",
"given",
"workbench",
"and",
"launcher",
"."
] |
c32e693fb5af17eac010e3b20f7653ed6e11eb6a
|
https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/run.py#L23-L40
|
239,608
|
cdumay/cdumay-rest-client
|
src/cdumay_rest_client/errors.py
|
from_status
|
def from_status(status, message=None, extra=None):
""" Try to create an error from status code
:param int status: HTTP status
:param str message: Body content
:param dict extra: Additional info
:return: An error
:rtype: cdumay_rest_client.errors.Error
"""
if status in HTTP_STATUS_CODES:
return HTTP_STATUS_CODES[status](message=message, extra=extra)
else:
return Error(
code=status, message=message if message else "Unknown Error",
extra=extra
)
|
python
|
def from_status(status, message=None, extra=None):
""" Try to create an error from status code
:param int status: HTTP status
:param str message: Body content
:param dict extra: Additional info
:return: An error
:rtype: cdumay_rest_client.errors.Error
"""
if status in HTTP_STATUS_CODES:
return HTTP_STATUS_CODES[status](message=message, extra=extra)
else:
return Error(
code=status, message=message if message else "Unknown Error",
extra=extra
)
|
[
"def",
"from_status",
"(",
"status",
",",
"message",
"=",
"None",
",",
"extra",
"=",
"None",
")",
":",
"if",
"status",
"in",
"HTTP_STATUS_CODES",
":",
"return",
"HTTP_STATUS_CODES",
"[",
"status",
"]",
"(",
"message",
"=",
"message",
",",
"extra",
"=",
"extra",
")",
"else",
":",
"return",
"Error",
"(",
"code",
"=",
"status",
",",
"message",
"=",
"message",
"if",
"message",
"else",
"\"Unknown Error\"",
",",
"extra",
"=",
"extra",
")"
] |
Try to create an error from status code
:param int status: HTTP status
:param str message: Body content
:param dict extra: Additional info
:return: An error
:rtype: cdumay_rest_client.errors.Error
|
[
"Try",
"to",
"create",
"an",
"error",
"from",
"status",
"code"
] |
bca34d45748cb8227a7492af5ccfead3d8ab435d
|
https://github.com/cdumay/cdumay-rest-client/blob/bca34d45748cb8227a7492af5ccfead3d8ab435d/src/cdumay_rest_client/errors.py#L335-L350
|
239,609
|
cdumay/cdumay-rest-client
|
src/cdumay_rest_client/errors.py
|
from_response
|
def from_response(response, url):
""" Try to create an error from a HTTP response
:param request.Response response: HTTP response
:param str url: URL attained
:return: An error
:rtype: cdumay_rest_client.errors.Error
"""
# noinspection PyBroadException
try:
data = response.json()
if not isinstance(data, dict):
return from_status(
response.status_code, response.text,
extra=dict(url=url, response=response.text)
)
code = data.get('code', response.status_code)
if code in HTTP_STATUS_CODES:
return HTTP_STATUS_CODES[code](**ErrorSchema().load(data))
else:
return Error(**ErrorSchema().load(data))
except Exception:
return from_status(
response.status_code, response.text,
extra=dict(url=url, response=response.text)
)
|
python
|
def from_response(response, url):
""" Try to create an error from a HTTP response
:param request.Response response: HTTP response
:param str url: URL attained
:return: An error
:rtype: cdumay_rest_client.errors.Error
"""
# noinspection PyBroadException
try:
data = response.json()
if not isinstance(data, dict):
return from_status(
response.status_code, response.text,
extra=dict(url=url, response=response.text)
)
code = data.get('code', response.status_code)
if code in HTTP_STATUS_CODES:
return HTTP_STATUS_CODES[code](**ErrorSchema().load(data))
else:
return Error(**ErrorSchema().load(data))
except Exception:
return from_status(
response.status_code, response.text,
extra=dict(url=url, response=response.text)
)
|
[
"def",
"from_response",
"(",
"response",
",",
"url",
")",
":",
"# noinspection PyBroadException",
"try",
":",
"data",
"=",
"response",
".",
"json",
"(",
")",
"if",
"not",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"return",
"from_status",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"text",
",",
"extra",
"=",
"dict",
"(",
"url",
"=",
"url",
",",
"response",
"=",
"response",
".",
"text",
")",
")",
"code",
"=",
"data",
".",
"get",
"(",
"'code'",
",",
"response",
".",
"status_code",
")",
"if",
"code",
"in",
"HTTP_STATUS_CODES",
":",
"return",
"HTTP_STATUS_CODES",
"[",
"code",
"]",
"(",
"*",
"*",
"ErrorSchema",
"(",
")",
".",
"load",
"(",
"data",
")",
")",
"else",
":",
"return",
"Error",
"(",
"*",
"*",
"ErrorSchema",
"(",
")",
".",
"load",
"(",
"data",
")",
")",
"except",
"Exception",
":",
"return",
"from_status",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"text",
",",
"extra",
"=",
"dict",
"(",
"url",
"=",
"url",
",",
"response",
"=",
"response",
".",
"text",
")",
")"
] |
Try to create an error from a HTTP response
:param request.Response response: HTTP response
:param str url: URL attained
:return: An error
:rtype: cdumay_rest_client.errors.Error
|
[
"Try",
"to",
"create",
"an",
"error",
"from",
"a",
"HTTP",
"response"
] |
bca34d45748cb8227a7492af5ccfead3d8ab435d
|
https://github.com/cdumay/cdumay-rest-client/blob/bca34d45748cb8227a7492af5ccfead3d8ab435d/src/cdumay_rest_client/errors.py#L353-L379
|
239,610
|
FujiMakoto/AgentML
|
agentml/__init__.py
|
AgentML.sort
|
def sort(self):
"""
Sort triggers and their associated responses
"""
# Sort triggers by word and character length first
for priority, triggers in self._triggers.items():
self._log.debug('Sorting priority {priority} triggers'.format(priority=priority))
# Get and sort our atomic and wildcard patterns
atomics = [trigger for trigger in triggers if trigger.pattern_is_atomic]
wildcards = [trigger for trigger in triggers if not trigger.pattern_is_atomic]
atomics = sorted(atomics, key=lambda trigger: (trigger.pattern_words, trigger.pattern_len), reverse=True)
wildcards = sorted(wildcards, key=lambda trigger: (trigger.pattern_words, trigger.pattern_len),
reverse=True)
# Replace our sorted triggers
self._triggers[priority] = atomics + wildcards
# Finally, sort triggers by priority
self._sorted_triggers = []
for triggers in [self._triggers[priority] for priority in sorted(self._triggers.keys(), reverse=True)]:
for trigger in triggers:
self._sorted_triggers.append(trigger)
self.sorted = True
|
python
|
def sort(self):
"""
Sort triggers and their associated responses
"""
# Sort triggers by word and character length first
for priority, triggers in self._triggers.items():
self._log.debug('Sorting priority {priority} triggers'.format(priority=priority))
# Get and sort our atomic and wildcard patterns
atomics = [trigger for trigger in triggers if trigger.pattern_is_atomic]
wildcards = [trigger for trigger in triggers if not trigger.pattern_is_atomic]
atomics = sorted(atomics, key=lambda trigger: (trigger.pattern_words, trigger.pattern_len), reverse=True)
wildcards = sorted(wildcards, key=lambda trigger: (trigger.pattern_words, trigger.pattern_len),
reverse=True)
# Replace our sorted triggers
self._triggers[priority] = atomics + wildcards
# Finally, sort triggers by priority
self._sorted_triggers = []
for triggers in [self._triggers[priority] for priority in sorted(self._triggers.keys(), reverse=True)]:
for trigger in triggers:
self._sorted_triggers.append(trigger)
self.sorted = True
|
[
"def",
"sort",
"(",
"self",
")",
":",
"# Sort triggers by word and character length first",
"for",
"priority",
",",
"triggers",
"in",
"self",
".",
"_triggers",
".",
"items",
"(",
")",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"'Sorting priority {priority} triggers'",
".",
"format",
"(",
"priority",
"=",
"priority",
")",
")",
"# Get and sort our atomic and wildcard patterns",
"atomics",
"=",
"[",
"trigger",
"for",
"trigger",
"in",
"triggers",
"if",
"trigger",
".",
"pattern_is_atomic",
"]",
"wildcards",
"=",
"[",
"trigger",
"for",
"trigger",
"in",
"triggers",
"if",
"not",
"trigger",
".",
"pattern_is_atomic",
"]",
"atomics",
"=",
"sorted",
"(",
"atomics",
",",
"key",
"=",
"lambda",
"trigger",
":",
"(",
"trigger",
".",
"pattern_words",
",",
"trigger",
".",
"pattern_len",
")",
",",
"reverse",
"=",
"True",
")",
"wildcards",
"=",
"sorted",
"(",
"wildcards",
",",
"key",
"=",
"lambda",
"trigger",
":",
"(",
"trigger",
".",
"pattern_words",
",",
"trigger",
".",
"pattern_len",
")",
",",
"reverse",
"=",
"True",
")",
"# Replace our sorted triggers",
"self",
".",
"_triggers",
"[",
"priority",
"]",
"=",
"atomics",
"+",
"wildcards",
"# Finally, sort triggers by priority",
"self",
".",
"_sorted_triggers",
"=",
"[",
"]",
"for",
"triggers",
"in",
"[",
"self",
".",
"_triggers",
"[",
"priority",
"]",
"for",
"priority",
"in",
"sorted",
"(",
"self",
".",
"_triggers",
".",
"keys",
"(",
")",
",",
"reverse",
"=",
"True",
")",
"]",
":",
"for",
"trigger",
"in",
"triggers",
":",
"self",
".",
"_sorted_triggers",
".",
"append",
"(",
"trigger",
")",
"self",
".",
"sorted",
"=",
"True"
] |
Sort triggers and their associated responses
|
[
"Sort",
"triggers",
"and",
"their",
"associated",
"responses"
] |
c8cb64b460d876666bf29ea2c682189874c7c403
|
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/__init__.py#L147-L173
|
239,611
|
FujiMakoto/AgentML
|
agentml/__init__.py
|
AgentML.interpreter
|
def interpreter(self):
"""
Launch an AML interpreter session for testing
"""
while True:
message = input('[#] ')
if message.lower().strip() == 'exit':
break
reply = self.get_reply('#interpreter#', message)
if not reply:
print('No reply received.', end='\n\n')
continue
# typewrite(reply, end='\n\n') TODO
print(reply, end='\n\n')
|
python
|
def interpreter(self):
"""
Launch an AML interpreter session for testing
"""
while True:
message = input('[#] ')
if message.lower().strip() == 'exit':
break
reply = self.get_reply('#interpreter#', message)
if not reply:
print('No reply received.', end='\n\n')
continue
# typewrite(reply, end='\n\n') TODO
print(reply, end='\n\n')
|
[
"def",
"interpreter",
"(",
"self",
")",
":",
"while",
"True",
":",
"message",
"=",
"input",
"(",
"'[#] '",
")",
"if",
"message",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"==",
"'exit'",
":",
"break",
"reply",
"=",
"self",
".",
"get_reply",
"(",
"'#interpreter#'",
",",
"message",
")",
"if",
"not",
"reply",
":",
"print",
"(",
"'No reply received.'",
",",
"end",
"=",
"'\\n\\n'",
")",
"continue",
"# typewrite(reply, end='\\n\\n') TODO",
"print",
"(",
"reply",
",",
"end",
"=",
"'\\n\\n'",
")"
] |
Launch an AML interpreter session for testing
|
[
"Launch",
"an",
"AML",
"interpreter",
"session",
"for",
"testing"
] |
c8cb64b460d876666bf29ea2c682189874c7c403
|
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/__init__.py#L546-L561
|
239,612
|
TestInABox/stackInABox
|
stackinabox/util/responses/core.py
|
responses_callback
|
def responses_callback(request):
"""Responses Request Handler.
Converts a call intercepted by Responses to
the Stack-In-A-Box infrastructure
:param request: request object
:returns: tuple - (int, dict, string) containing:
int - the HTTP response status code
dict - the headers for the HTTP response
string - HTTP string response
"""
method = request.method
headers = CaseInsensitiveDict()
request_headers = CaseInsensitiveDict()
request_headers.update(request.headers)
request.headers = request_headers
uri = request.url
return StackInABox.call_into(method,
request,
uri,
headers)
|
python
|
def responses_callback(request):
"""Responses Request Handler.
Converts a call intercepted by Responses to
the Stack-In-A-Box infrastructure
:param request: request object
:returns: tuple - (int, dict, string) containing:
int - the HTTP response status code
dict - the headers for the HTTP response
string - HTTP string response
"""
method = request.method
headers = CaseInsensitiveDict()
request_headers = CaseInsensitiveDict()
request_headers.update(request.headers)
request.headers = request_headers
uri = request.url
return StackInABox.call_into(method,
request,
uri,
headers)
|
[
"def",
"responses_callback",
"(",
"request",
")",
":",
"method",
"=",
"request",
".",
"method",
"headers",
"=",
"CaseInsensitiveDict",
"(",
")",
"request_headers",
"=",
"CaseInsensitiveDict",
"(",
")",
"request_headers",
".",
"update",
"(",
"request",
".",
"headers",
")",
"request",
".",
"headers",
"=",
"request_headers",
"uri",
"=",
"request",
".",
"url",
"return",
"StackInABox",
".",
"call_into",
"(",
"method",
",",
"request",
",",
"uri",
",",
"headers",
")"
] |
Responses Request Handler.
Converts a call intercepted by Responses to
the Stack-In-A-Box infrastructure
:param request: request object
:returns: tuple - (int, dict, string) containing:
int - the HTTP response status code
dict - the headers for the HTTP response
string - HTTP string response
|
[
"Responses",
"Request",
"Handler",
"."
] |
63ee457401e9a88d987f85f513eb512dcb12d984
|
https://github.com/TestInABox/stackInABox/blob/63ee457401e9a88d987f85f513eb512dcb12d984/stackinabox/util/responses/core.py#L19-L41
|
239,613
|
TestInABox/stackInABox
|
stackinabox/util/responses/core.py
|
registration
|
def registration(uri):
"""Responses handler registration.
Registers a handler for a given URI with Responses
so that it can be intercepted and handed to
Stack-In-A-Box.
:param uri: URI used for the base of the HTTP requests
:returns: n/a
"""
# log the URI that is used to access the Stack-In-A-Box services
logger.debug('Registering Stack-In-A-Box at {0} under Python Responses'
.format(uri))
# tell Stack-In-A-Box what URI to match with
StackInABox.update_uri(uri)
# Build the regex for the URI and register all HTTP verbs
# with Responses
regex = re.compile('(http)?s?(://)?{0}:?(\d+)?/'.format(uri),
re.I)
METHODS = [
responses.DELETE,
responses.GET,
responses.HEAD,
responses.OPTIONS,
responses.PATCH,
responses.POST,
responses.PUT
]
for method in METHODS:
responses.add_callback(method,
regex,
callback=responses_callback)
|
python
|
def registration(uri):
"""Responses handler registration.
Registers a handler for a given URI with Responses
so that it can be intercepted and handed to
Stack-In-A-Box.
:param uri: URI used for the base of the HTTP requests
:returns: n/a
"""
# log the URI that is used to access the Stack-In-A-Box services
logger.debug('Registering Stack-In-A-Box at {0} under Python Responses'
.format(uri))
# tell Stack-In-A-Box what URI to match with
StackInABox.update_uri(uri)
# Build the regex for the URI and register all HTTP verbs
# with Responses
regex = re.compile('(http)?s?(://)?{0}:?(\d+)?/'.format(uri),
re.I)
METHODS = [
responses.DELETE,
responses.GET,
responses.HEAD,
responses.OPTIONS,
responses.PATCH,
responses.POST,
responses.PUT
]
for method in METHODS:
responses.add_callback(method,
regex,
callback=responses_callback)
|
[
"def",
"registration",
"(",
"uri",
")",
":",
"# log the URI that is used to access the Stack-In-A-Box services",
"logger",
".",
"debug",
"(",
"'Registering Stack-In-A-Box at {0} under Python Responses'",
".",
"format",
"(",
"uri",
")",
")",
"# tell Stack-In-A-Box what URI to match with",
"StackInABox",
".",
"update_uri",
"(",
"uri",
")",
"# Build the regex for the URI and register all HTTP verbs",
"# with Responses",
"regex",
"=",
"re",
".",
"compile",
"(",
"'(http)?s?(://)?{0}:?(\\d+)?/'",
".",
"format",
"(",
"uri",
")",
",",
"re",
".",
"I",
")",
"METHODS",
"=",
"[",
"responses",
".",
"DELETE",
",",
"responses",
".",
"GET",
",",
"responses",
".",
"HEAD",
",",
"responses",
".",
"OPTIONS",
",",
"responses",
".",
"PATCH",
",",
"responses",
".",
"POST",
",",
"responses",
".",
"PUT",
"]",
"for",
"method",
"in",
"METHODS",
":",
"responses",
".",
"add_callback",
"(",
"method",
",",
"regex",
",",
"callback",
"=",
"responses_callback",
")"
] |
Responses handler registration.
Registers a handler for a given URI with Responses
so that it can be intercepted and handed to
Stack-In-A-Box.
:param uri: URI used for the base of the HTTP requests
:returns: n/a
|
[
"Responses",
"handler",
"registration",
"."
] |
63ee457401e9a88d987f85f513eb512dcb12d984
|
https://github.com/TestInABox/stackInABox/blob/63ee457401e9a88d987f85f513eb512dcb12d984/stackinabox/util/responses/core.py#L44-L78
|
239,614
|
bwesterb/sarah
|
src/cacheDecorators.py
|
cacheOnSameArgs
|
def cacheOnSameArgs(timeout=None):
""" Caches the return of the function until the the specified time has
elapsed or the arguments change. If timeout is None it will not
be considered. """
if isinstance(timeout, int):
timeout = datetime.timedelta(0, timeout)
def decorator(f):
_cache = [None]
def wrapper(*args, **kwargs):
if _cache[0] is not None:
cached_ret, dt, cached_args, cached_kwargs = _cache[0]
if (timeout is not None and
dt + timeout <= datetime.datetime.now()):
_cache[0] = None
if (cached_args, cached_kwargs) != (args, kwargs):
_cache[0] = None
if _cache[0] is None:
ret = f(*args, **kwargs)
_cache[0] = (ret, datetime.datetime.now(), args, kwargs)
return _cache[0][0]
return wrapper
return decorator
|
python
|
def cacheOnSameArgs(timeout=None):
""" Caches the return of the function until the the specified time has
elapsed or the arguments change. If timeout is None it will not
be considered. """
if isinstance(timeout, int):
timeout = datetime.timedelta(0, timeout)
def decorator(f):
_cache = [None]
def wrapper(*args, **kwargs):
if _cache[0] is not None:
cached_ret, dt, cached_args, cached_kwargs = _cache[0]
if (timeout is not None and
dt + timeout <= datetime.datetime.now()):
_cache[0] = None
if (cached_args, cached_kwargs) != (args, kwargs):
_cache[0] = None
if _cache[0] is None:
ret = f(*args, **kwargs)
_cache[0] = (ret, datetime.datetime.now(), args, kwargs)
return _cache[0][0]
return wrapper
return decorator
|
[
"def",
"cacheOnSameArgs",
"(",
"timeout",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"timeout",
",",
"int",
")",
":",
"timeout",
"=",
"datetime",
".",
"timedelta",
"(",
"0",
",",
"timeout",
")",
"def",
"decorator",
"(",
"f",
")",
":",
"_cache",
"=",
"[",
"None",
"]",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"_cache",
"[",
"0",
"]",
"is",
"not",
"None",
":",
"cached_ret",
",",
"dt",
",",
"cached_args",
",",
"cached_kwargs",
"=",
"_cache",
"[",
"0",
"]",
"if",
"(",
"timeout",
"is",
"not",
"None",
"and",
"dt",
"+",
"timeout",
"<=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
")",
":",
"_cache",
"[",
"0",
"]",
"=",
"None",
"if",
"(",
"cached_args",
",",
"cached_kwargs",
")",
"!=",
"(",
"args",
",",
"kwargs",
")",
":",
"_cache",
"[",
"0",
"]",
"=",
"None",
"if",
"_cache",
"[",
"0",
"]",
"is",
"None",
":",
"ret",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"_cache",
"[",
"0",
"]",
"=",
"(",
"ret",
",",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
",",
"args",
",",
"kwargs",
")",
"return",
"_cache",
"[",
"0",
"]",
"[",
"0",
"]",
"return",
"wrapper",
"return",
"decorator"
] |
Caches the return of the function until the the specified time has
elapsed or the arguments change. If timeout is None it will not
be considered.
|
[
"Caches",
"the",
"return",
"of",
"the",
"function",
"until",
"the",
"the",
"specified",
"time",
"has",
"elapsed",
"or",
"the",
"arguments",
"change",
".",
"If",
"timeout",
"is",
"None",
"it",
"will",
"not",
"be",
"considered",
"."
] |
a9e46e875dfff1dc11255d714bb736e5eb697809
|
https://github.com/bwesterb/sarah/blob/a9e46e875dfff1dc11255d714bb736e5eb697809/src/cacheDecorators.py#L5-L28
|
239,615
|
baguette-io/baguette-messaging
|
farine/discovery.py
|
import_module
|
def import_module(module):
"""
| Given a module `service`, try to import it.
| It will autodiscovers all the entrypoints
| and add them in `ENTRYPOINTS`.
:param module: The module's name to import.
:type module: str
:rtype: None
:raises ImportError: When the service/module to start is not found.
"""
try:
__import__('{0}.service'.format(module))
except ImportError:
LOGGER.error('No module/service found. Quit.')
sys.exit(0)
|
python
|
def import_module(module):
"""
| Given a module `service`, try to import it.
| It will autodiscovers all the entrypoints
| and add them in `ENTRYPOINTS`.
:param module: The module's name to import.
:type module: str
:rtype: None
:raises ImportError: When the service/module to start is not found.
"""
try:
__import__('{0}.service'.format(module))
except ImportError:
LOGGER.error('No module/service found. Quit.')
sys.exit(0)
|
[
"def",
"import_module",
"(",
"module",
")",
":",
"try",
":",
"__import__",
"(",
"'{0}.service'",
".",
"format",
"(",
"module",
")",
")",
"except",
"ImportError",
":",
"LOGGER",
".",
"error",
"(",
"'No module/service found. Quit.'",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] |
| Given a module `service`, try to import it.
| It will autodiscovers all the entrypoints
| and add them in `ENTRYPOINTS`.
:param module: The module's name to import.
:type module: str
:rtype: None
:raises ImportError: When the service/module to start is not found.
|
[
"|",
"Given",
"a",
"module",
"service",
"try",
"to",
"import",
"it",
".",
"|",
"It",
"will",
"autodiscovers",
"all",
"the",
"entrypoints",
"|",
"and",
"add",
"them",
"in",
"ENTRYPOINTS",
"."
] |
8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1
|
https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/discovery.py#L19-L34
|
239,616
|
baguette-io/baguette-messaging
|
farine/discovery.py
|
import_models
|
def import_models(module):
"""
| Given a module `service`, try to import its models module.
:param module: The module's name to import the models.
:type module: str
:rtype: list
:returns: all the models defined.
"""
try:
module = importlib.import_module('{0}.models'.format(module))
except ImportError:
return []
else:
clsmembers = inspect.getmembers(module, lambda member: inspect.isclass(member) and member.__module__ == module.__name__)
return [kls for name, kls in clsmembers]
|
python
|
def import_models(module):
"""
| Given a module `service`, try to import its models module.
:param module: The module's name to import the models.
:type module: str
:rtype: list
:returns: all the models defined.
"""
try:
module = importlib.import_module('{0}.models'.format(module))
except ImportError:
return []
else:
clsmembers = inspect.getmembers(module, lambda member: inspect.isclass(member) and member.__module__ == module.__name__)
return [kls for name, kls in clsmembers]
|
[
"def",
"import_models",
"(",
"module",
")",
":",
"try",
":",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"'{0}.models'",
".",
"format",
"(",
"module",
")",
")",
"except",
"ImportError",
":",
"return",
"[",
"]",
"else",
":",
"clsmembers",
"=",
"inspect",
".",
"getmembers",
"(",
"module",
",",
"lambda",
"member",
":",
"inspect",
".",
"isclass",
"(",
"member",
")",
"and",
"member",
".",
"__module__",
"==",
"module",
".",
"__name__",
")",
"return",
"[",
"kls",
"for",
"name",
",",
"kls",
"in",
"clsmembers",
"]"
] |
| Given a module `service`, try to import its models module.
:param module: The module's name to import the models.
:type module: str
:rtype: list
:returns: all the models defined.
|
[
"|",
"Given",
"a",
"module",
"service",
"try",
"to",
"import",
"its",
"models",
"module",
"."
] |
8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1
|
https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/discovery.py#L36-L51
|
239,617
|
baguette-io/baguette-messaging
|
farine/discovery.py
|
start
|
def start():
"""
| Start all the registered entrypoints
| that have been added to `ENTRYPOINTS`.
:rtype: None
"""
pool = gevent.threadpool.ThreadPool(len(ENTRYPOINTS))
for entrypoint, callback, args, kwargs in ENTRYPOINTS:
cname = callback.__name__
#1. Retrieve the class which owns the callback
for name, klass in inspect.getmembers(sys.modules[callback.__module__], inspect.isclass):
if hasattr(klass, cname):
service_name = name.lower()
break
#2.Start the entrypoint
callback = getattr(klass(), cname)
kwargs.update({'service':service_name, 'callback':callback, 'callback_name': cname})
LOGGER.info('Start service %s[%s].', service_name.capitalize(), cname)
obj = entrypoint(*args, **kwargs)
pool.spawn(obj.start, *args, **kwargs)
pool.join()
return True
|
python
|
def start():
"""
| Start all the registered entrypoints
| that have been added to `ENTRYPOINTS`.
:rtype: None
"""
pool = gevent.threadpool.ThreadPool(len(ENTRYPOINTS))
for entrypoint, callback, args, kwargs in ENTRYPOINTS:
cname = callback.__name__
#1. Retrieve the class which owns the callback
for name, klass in inspect.getmembers(sys.modules[callback.__module__], inspect.isclass):
if hasattr(klass, cname):
service_name = name.lower()
break
#2.Start the entrypoint
callback = getattr(klass(), cname)
kwargs.update({'service':service_name, 'callback':callback, 'callback_name': cname})
LOGGER.info('Start service %s[%s].', service_name.capitalize(), cname)
obj = entrypoint(*args, **kwargs)
pool.spawn(obj.start, *args, **kwargs)
pool.join()
return True
|
[
"def",
"start",
"(",
")",
":",
"pool",
"=",
"gevent",
".",
"threadpool",
".",
"ThreadPool",
"(",
"len",
"(",
"ENTRYPOINTS",
")",
")",
"for",
"entrypoint",
",",
"callback",
",",
"args",
",",
"kwargs",
"in",
"ENTRYPOINTS",
":",
"cname",
"=",
"callback",
".",
"__name__",
"#1. Retrieve the class which owns the callback",
"for",
"name",
",",
"klass",
"in",
"inspect",
".",
"getmembers",
"(",
"sys",
".",
"modules",
"[",
"callback",
".",
"__module__",
"]",
",",
"inspect",
".",
"isclass",
")",
":",
"if",
"hasattr",
"(",
"klass",
",",
"cname",
")",
":",
"service_name",
"=",
"name",
".",
"lower",
"(",
")",
"break",
"#2.Start the entrypoint",
"callback",
"=",
"getattr",
"(",
"klass",
"(",
")",
",",
"cname",
")",
"kwargs",
".",
"update",
"(",
"{",
"'service'",
":",
"service_name",
",",
"'callback'",
":",
"callback",
",",
"'callback_name'",
":",
"cname",
"}",
")",
"LOGGER",
".",
"info",
"(",
"'Start service %s[%s].'",
",",
"service_name",
".",
"capitalize",
"(",
")",
",",
"cname",
")",
"obj",
"=",
"entrypoint",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"pool",
".",
"spawn",
"(",
"obj",
".",
"start",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"pool",
".",
"join",
"(",
")",
"return",
"True"
] |
| Start all the registered entrypoints
| that have been added to `ENTRYPOINTS`.
:rtype: None
|
[
"|",
"Start",
"all",
"the",
"registered",
"entrypoints",
"|",
"that",
"have",
"been",
"added",
"to",
"ENTRYPOINTS",
"."
] |
8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1
|
https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/discovery.py#L53-L75
|
239,618
|
TestInABox/stackInABox
|
stackinabox/stack.py
|
StackInABox.update_uri
|
def update_uri(cls, uri):
"""Set the URI of the StackInABox framework.
:param uri: the base URI used to match the service.
"""
logger.debug('Request: Update URI to {0}'.format(uri))
local_store.instance.base_url = uri
|
python
|
def update_uri(cls, uri):
"""Set the URI of the StackInABox framework.
:param uri: the base URI used to match the service.
"""
logger.debug('Request: Update URI to {0}'.format(uri))
local_store.instance.base_url = uri
|
[
"def",
"update_uri",
"(",
"cls",
",",
"uri",
")",
":",
"logger",
".",
"debug",
"(",
"'Request: Update URI to {0}'",
".",
"format",
"(",
"uri",
")",
")",
"local_store",
".",
"instance",
".",
"base_url",
"=",
"uri"
] |
Set the URI of the StackInABox framework.
:param uri: the base URI used to match the service.
|
[
"Set",
"the",
"URI",
"of",
"the",
"StackInABox",
"framework",
"."
] |
63ee457401e9a88d987f85f513eb512dcb12d984
|
https://github.com/TestInABox/stackInABox/blob/63ee457401e9a88d987f85f513eb512dcb12d984/stackinabox/stack.py#L111-L118
|
239,619
|
TestInABox/stackInABox
|
stackinabox/stack.py
|
StackInABox.get_services_url
|
def get_services_url(url, base_url):
"""Get the URI from a given URL.
:returns: URI within the URL
"""
length = len(base_url)
checks = ['http://', 'https://']
for check in checks:
if url.startswith(check):
length = length + len(check)
break
result = url[length:]
logger.debug('{0} from {1} equals {2}'
.format(base_url, url, result))
return result
|
python
|
def get_services_url(url, base_url):
"""Get the URI from a given URL.
:returns: URI within the URL
"""
length = len(base_url)
checks = ['http://', 'https://']
for check in checks:
if url.startswith(check):
length = length + len(check)
break
result = url[length:]
logger.debug('{0} from {1} equals {2}'
.format(base_url, url, result))
return result
|
[
"def",
"get_services_url",
"(",
"url",
",",
"base_url",
")",
":",
"length",
"=",
"len",
"(",
"base_url",
")",
"checks",
"=",
"[",
"'http://'",
",",
"'https://'",
"]",
"for",
"check",
"in",
"checks",
":",
"if",
"url",
".",
"startswith",
"(",
"check",
")",
":",
"length",
"=",
"length",
"+",
"len",
"(",
"check",
")",
"break",
"result",
"=",
"url",
"[",
"length",
":",
"]",
"logger",
".",
"debug",
"(",
"'{0} from {1} equals {2}'",
".",
"format",
"(",
"base_url",
",",
"url",
",",
"result",
")",
")",
"return",
"result"
] |
Get the URI from a given URL.
:returns: URI within the URL
|
[
"Get",
"the",
"URI",
"from",
"a",
"given",
"URL",
"."
] |
63ee457401e9a88d987f85f513eb512dcb12d984
|
https://github.com/TestInABox/stackInABox/blob/63ee457401e9a88d987f85f513eb512dcb12d984/stackinabox/stack.py#L149-L165
|
239,620
|
TestInABox/stackInABox
|
stackinabox/stack.py
|
StackInABox.base_url
|
def base_url(self, value):
"""Set the Base URL property, updating all associated services."""
logger.debug('StackInABox({0}): Updating URL from {1} to {2}'
.format(self.__id, self.__base_url, value))
self.__base_url = value
for k, v in six.iteritems(self.services):
matcher, service = v
service.base_url = StackInABox.__get_service_url(value,
service.name)
logger.debug('StackInABox({0}): Service {1} has url {2}'
.format(self.__id, service.name, service.base_url))
|
python
|
def base_url(self, value):
"""Set the Base URL property, updating all associated services."""
logger.debug('StackInABox({0}): Updating URL from {1} to {2}'
.format(self.__id, self.__base_url, value))
self.__base_url = value
for k, v in six.iteritems(self.services):
matcher, service = v
service.base_url = StackInABox.__get_service_url(value,
service.name)
logger.debug('StackInABox({0}): Service {1} has url {2}'
.format(self.__id, service.name, service.base_url))
|
[
"def",
"base_url",
"(",
"self",
",",
"value",
")",
":",
"logger",
".",
"debug",
"(",
"'StackInABox({0}): Updating URL from {1} to {2}'",
".",
"format",
"(",
"self",
".",
"__id",
",",
"self",
".",
"__base_url",
",",
"value",
")",
")",
"self",
".",
"__base_url",
"=",
"value",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"services",
")",
":",
"matcher",
",",
"service",
"=",
"v",
"service",
".",
"base_url",
"=",
"StackInABox",
".",
"__get_service_url",
"(",
"value",
",",
"service",
".",
"name",
")",
"logger",
".",
"debug",
"(",
"'StackInABox({0}): Service {1} has url {2}'",
".",
"format",
"(",
"self",
".",
"__id",
",",
"service",
".",
"name",
",",
"service",
".",
"base_url",
")",
")"
] |
Set the Base URL property, updating all associated services.
|
[
"Set",
"the",
"Base",
"URL",
"property",
"updating",
"all",
"associated",
"services",
"."
] |
63ee457401e9a88d987f85f513eb512dcb12d984
|
https://github.com/TestInABox/stackInABox/blob/63ee457401e9a88d987f85f513eb512dcb12d984/stackinabox/stack.py#L173-L183
|
239,621
|
TestInABox/stackInABox
|
stackinabox/stack.py
|
StackInABox.reset
|
def reset(self):
"""Reset StackInABox to a like-new state."""
logger.debug('StackInABox({0}): Resetting...'
.format(self.__id))
for k, v in six.iteritems(self.services):
matcher, service = v
logger.debug('StackInABox({0}): Resetting Service {1}'
.format(self.__id, service.name))
service.reset()
self.services = {}
self.holds = {}
logger.debug('StackInABox({0}): Reset Complete'
.format(self.__id))
|
python
|
def reset(self):
"""Reset StackInABox to a like-new state."""
logger.debug('StackInABox({0}): Resetting...'
.format(self.__id))
for k, v in six.iteritems(self.services):
matcher, service = v
logger.debug('StackInABox({0}): Resetting Service {1}'
.format(self.__id, service.name))
service.reset()
self.services = {}
self.holds = {}
logger.debug('StackInABox({0}): Reset Complete'
.format(self.__id))
|
[
"def",
"reset",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'StackInABox({0}): Resetting...'",
".",
"format",
"(",
"self",
".",
"__id",
")",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"services",
")",
":",
"matcher",
",",
"service",
"=",
"v",
"logger",
".",
"debug",
"(",
"'StackInABox({0}): Resetting Service {1}'",
".",
"format",
"(",
"self",
".",
"__id",
",",
"service",
".",
"name",
")",
")",
"service",
".",
"reset",
"(",
")",
"self",
".",
"services",
"=",
"{",
"}",
"self",
".",
"holds",
"=",
"{",
"}",
"logger",
".",
"debug",
"(",
"'StackInABox({0}): Reset Complete'",
".",
"format",
"(",
"self",
".",
"__id",
")",
")"
] |
Reset StackInABox to a like-new state.
|
[
"Reset",
"StackInABox",
"to",
"a",
"like",
"-",
"new",
"state",
"."
] |
63ee457401e9a88d987f85f513eb512dcb12d984
|
https://github.com/TestInABox/stackInABox/blob/63ee457401e9a88d987f85f513eb512dcb12d984/stackinabox/stack.py#L185-L199
|
239,622
|
baguette-io/baguette-messaging
|
farine/mixins.py
|
EntryPointMixin.main_callback
|
def main_callback(self, *args, **kwargs):
"""
Main callback called when an event is received from an entry point.
:returns: The entry point's callback.
:rtype: function
:raises NotImplementedError: When the entrypoint doesn't have the required attributes.
"""
if not self.callback:
raise NotImplementedError('Entrypoints must declare `callback`')
if not self.settings:
raise NotImplementedError('Entrypoints must declare `settings`')
self.callback.im_self.db = None
#1. Start all the middlewares
with self.debug(*args, **kwargs):
with self.database():
#2. `Real` callback
result = self.callback(*args, **kwargs)#pylint: disable=not-callable
return result
|
python
|
def main_callback(self, *args, **kwargs):
"""
Main callback called when an event is received from an entry point.
:returns: The entry point's callback.
:rtype: function
:raises NotImplementedError: When the entrypoint doesn't have the required attributes.
"""
if not self.callback:
raise NotImplementedError('Entrypoints must declare `callback`')
if not self.settings:
raise NotImplementedError('Entrypoints must declare `settings`')
self.callback.im_self.db = None
#1. Start all the middlewares
with self.debug(*args, **kwargs):
with self.database():
#2. `Real` callback
result = self.callback(*args, **kwargs)#pylint: disable=not-callable
return result
|
[
"def",
"main_callback",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"callback",
":",
"raise",
"NotImplementedError",
"(",
"'Entrypoints must declare `callback`'",
")",
"if",
"not",
"self",
".",
"settings",
":",
"raise",
"NotImplementedError",
"(",
"'Entrypoints must declare `settings`'",
")",
"self",
".",
"callback",
".",
"im_self",
".",
"db",
"=",
"None",
"#1. Start all the middlewares",
"with",
"self",
".",
"debug",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"self",
".",
"database",
"(",
")",
":",
"#2. `Real` callback",
"result",
"=",
"self",
".",
"callback",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"#pylint: disable=not-callable",
"return",
"result"
] |
Main callback called when an event is received from an entry point.
:returns: The entry point's callback.
:rtype: function
:raises NotImplementedError: When the entrypoint doesn't have the required attributes.
|
[
"Main",
"callback",
"called",
"when",
"an",
"event",
"is",
"received",
"from",
"an",
"entry",
"point",
"."
] |
8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1
|
https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/mixins.py#L20-L40
|
239,623
|
fredericklussier/Lifepo4weredPy
|
lifepo4weredPy/functions.py
|
canRead
|
def canRead(variable):
"""
mention if an element can be read.
:param variable: the element to evaluate.
:type variable: Lifepo4weredEnum
:return: true when read access is available, otherwise false.
:rtype: bool
:raises ValueError: if parameter value is not a member of Lifepo4weredEnum.
"""
if variable not in variablesEnum:
raise ValueError('Use a lifepo4wered enum element as parameter.')
return lifepo4weredSO.access_lifepo4wered(variable.value, defines.ACCESS_READ)
|
python
|
def canRead(variable):
"""
mention if an element can be read.
:param variable: the element to evaluate.
:type variable: Lifepo4weredEnum
:return: true when read access is available, otherwise false.
:rtype: bool
:raises ValueError: if parameter value is not a member of Lifepo4weredEnum.
"""
if variable not in variablesEnum:
raise ValueError('Use a lifepo4wered enum element as parameter.')
return lifepo4weredSO.access_lifepo4wered(variable.value, defines.ACCESS_READ)
|
[
"def",
"canRead",
"(",
"variable",
")",
":",
"if",
"variable",
"not",
"in",
"variablesEnum",
":",
"raise",
"ValueError",
"(",
"'Use a lifepo4wered enum element as parameter.'",
")",
"return",
"lifepo4weredSO",
".",
"access_lifepo4wered",
"(",
"variable",
".",
"value",
",",
"defines",
".",
"ACCESS_READ",
")"
] |
mention if an element can be read.
:param variable: the element to evaluate.
:type variable: Lifepo4weredEnum
:return: true when read access is available, otherwise false.
:rtype: bool
:raises ValueError: if parameter value is not a member of Lifepo4weredEnum.
|
[
"mention",
"if",
"an",
"element",
"can",
"be",
"read",
"."
] |
f5b3fedf896d0d616bf5ae603953e5541d493640
|
https://github.com/fredericklussier/Lifepo4weredPy/blob/f5b3fedf896d0d616bf5ae603953e5541d493640/lifepo4weredPy/functions.py#L12-L25
|
239,624
|
fredericklussier/Lifepo4weredPy
|
lifepo4weredPy/functions.py
|
canWrite
|
def canWrite(variable):
"""
mention if an element can be written.
:param variable: the element to evaluate.
:type variable: Lifepo4weredEnum
:return: true when write access is available, otherwise false
:rtype: bool
:raises ValueError: if parameter value is not a member of Lifepo4weredEnum
"""
if variable not in variablesEnum:
raise ValueError('Use a lifepo4wered enum element as parameter.')
return lifepo4weredSO.access_lifepo4wered(variable.value, defines.ACCESS_WRITE)
|
python
|
def canWrite(variable):
"""
mention if an element can be written.
:param variable: the element to evaluate.
:type variable: Lifepo4weredEnum
:return: true when write access is available, otherwise false
:rtype: bool
:raises ValueError: if parameter value is not a member of Lifepo4weredEnum
"""
if variable not in variablesEnum:
raise ValueError('Use a lifepo4wered enum element as parameter.')
return lifepo4weredSO.access_lifepo4wered(variable.value, defines.ACCESS_WRITE)
|
[
"def",
"canWrite",
"(",
"variable",
")",
":",
"if",
"variable",
"not",
"in",
"variablesEnum",
":",
"raise",
"ValueError",
"(",
"'Use a lifepo4wered enum element as parameter.'",
")",
"return",
"lifepo4weredSO",
".",
"access_lifepo4wered",
"(",
"variable",
".",
"value",
",",
"defines",
".",
"ACCESS_WRITE",
")"
] |
mention if an element can be written.
:param variable: the element to evaluate.
:type variable: Lifepo4weredEnum
:return: true when write access is available, otherwise false
:rtype: bool
:raises ValueError: if parameter value is not a member of Lifepo4weredEnum
|
[
"mention",
"if",
"an",
"element",
"can",
"be",
"written",
"."
] |
f5b3fedf896d0d616bf5ae603953e5541d493640
|
https://github.com/fredericklussier/Lifepo4weredPy/blob/f5b3fedf896d0d616bf5ae603953e5541d493640/lifepo4weredPy/functions.py#L27-L40
|
239,625
|
fredericklussier/Lifepo4weredPy
|
lifepo4weredPy/functions.py
|
read
|
def read(variable):
"""
read an element from LiFePO4wered.
:param variable: the element to read.
:type variable: Lifepo4weredEnum
:return: the value of the element
:rtype: int
:raises ValueError: if parameter value is not a member of Lifepo4weredEnum
"""
if variable not in variablesEnum:
raise ValueError('Use a lifepo4wered enum element as read parameter.')
if canRead(variable):
return lifepo4weredSO.read_lifepo4wered(variable.value)
else:
raise RuntimeError('You cannot read {0} value, just write it'.format(variable.name))
|
python
|
def read(variable):
"""
read an element from LiFePO4wered.
:param variable: the element to read.
:type variable: Lifepo4weredEnum
:return: the value of the element
:rtype: int
:raises ValueError: if parameter value is not a member of Lifepo4weredEnum
"""
if variable not in variablesEnum:
raise ValueError('Use a lifepo4wered enum element as read parameter.')
if canRead(variable):
return lifepo4weredSO.read_lifepo4wered(variable.value)
else:
raise RuntimeError('You cannot read {0} value, just write it'.format(variable.name))
|
[
"def",
"read",
"(",
"variable",
")",
":",
"if",
"variable",
"not",
"in",
"variablesEnum",
":",
"raise",
"ValueError",
"(",
"'Use a lifepo4wered enum element as read parameter.'",
")",
"if",
"canRead",
"(",
"variable",
")",
":",
"return",
"lifepo4weredSO",
".",
"read_lifepo4wered",
"(",
"variable",
".",
"value",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'You cannot read {0} value, just write it'",
".",
"format",
"(",
"variable",
".",
"name",
")",
")"
] |
read an element from LiFePO4wered.
:param variable: the element to read.
:type variable: Lifepo4weredEnum
:return: the value of the element
:rtype: int
:raises ValueError: if parameter value is not a member of Lifepo4weredEnum
|
[
"read",
"an",
"element",
"from",
"LiFePO4wered",
"."
] |
f5b3fedf896d0d616bf5ae603953e5541d493640
|
https://github.com/fredericklussier/Lifepo4weredPy/blob/f5b3fedf896d0d616bf5ae603953e5541d493640/lifepo4weredPy/functions.py#L42-L58
|
239,626
|
fredericklussier/Lifepo4weredPy
|
lifepo4weredPy/functions.py
|
write
|
def write(variable, value):
"""
write an element to LiFePO4wered.
:param variable: the element.
:type variable: Lifepo4weredEnum
:param int value: the value to write.
:return: the written value
:rtype: int
:raises ValueError: if variable parameter is not a member of Lifepo4weredEnum
:raises ValueError: if value is not an int type
"""
if variable not in variablesEnum:
raise ValueError('Use a lifepo4wered enum element as write element.')
if isinstance(value, int) is False:
raise TypeError('Use a int as value.')
if canWrite(variable):
return lifepo4weredSO.write_lifepo4wered(variable.value, value)
else:
raise RuntimeError('You cannot write {0} value, just read it'.format(variable.name))
|
python
|
def write(variable, value):
"""
write an element to LiFePO4wered.
:param variable: the element.
:type variable: Lifepo4weredEnum
:param int value: the value to write.
:return: the written value
:rtype: int
:raises ValueError: if variable parameter is not a member of Lifepo4weredEnum
:raises ValueError: if value is not an int type
"""
if variable not in variablesEnum:
raise ValueError('Use a lifepo4wered enum element as write element.')
if isinstance(value, int) is False:
raise TypeError('Use a int as value.')
if canWrite(variable):
return lifepo4weredSO.write_lifepo4wered(variable.value, value)
else:
raise RuntimeError('You cannot write {0} value, just read it'.format(variable.name))
|
[
"def",
"write",
"(",
"variable",
",",
"value",
")",
":",
"if",
"variable",
"not",
"in",
"variablesEnum",
":",
"raise",
"ValueError",
"(",
"'Use a lifepo4wered enum element as write element.'",
")",
"if",
"isinstance",
"(",
"value",
",",
"int",
")",
"is",
"False",
":",
"raise",
"TypeError",
"(",
"'Use a int as value.'",
")",
"if",
"canWrite",
"(",
"variable",
")",
":",
"return",
"lifepo4weredSO",
".",
"write_lifepo4wered",
"(",
"variable",
".",
"value",
",",
"value",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'You cannot write {0} value, just read it'",
".",
"format",
"(",
"variable",
".",
"name",
")",
")"
] |
write an element to LiFePO4wered.
:param variable: the element.
:type variable: Lifepo4weredEnum
:param int value: the value to write.
:return: the written value
:rtype: int
:raises ValueError: if variable parameter is not a member of Lifepo4weredEnum
:raises ValueError: if value is not an int type
|
[
"write",
"an",
"element",
"to",
"LiFePO4wered",
"."
] |
f5b3fedf896d0d616bf5ae603953e5541d493640
|
https://github.com/fredericklussier/Lifepo4weredPy/blob/f5b3fedf896d0d616bf5ae603953e5541d493640/lifepo4weredPy/functions.py#L60-L81
|
239,627
|
majerteam/sqla_inspect
|
sqla_inspect/ascii.py
|
force_encoding
|
def force_encoding(value, encoding='utf-8'):
"""
Return a string encoded in the provided encoding
"""
if not isinstance(value, (str, unicode)):
value = str(value)
if isinstance(value, unicode):
value = value.encode(encoding)
elif encoding != 'utf-8':
value = value.decode('utf-8').encode(encoding)
return value
|
python
|
def force_encoding(value, encoding='utf-8'):
"""
Return a string encoded in the provided encoding
"""
if not isinstance(value, (str, unicode)):
value = str(value)
if isinstance(value, unicode):
value = value.encode(encoding)
elif encoding != 'utf-8':
value = value.decode('utf-8').encode(encoding)
return value
|
[
"def",
"force_encoding",
"(",
"value",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
"if",
"isinstance",
"(",
"value",
",",
"unicode",
")",
":",
"value",
"=",
"value",
".",
"encode",
"(",
"encoding",
")",
"elif",
"encoding",
"!=",
"'utf-8'",
":",
"value",
"=",
"value",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"encode",
"(",
"encoding",
")",
"return",
"value"
] |
Return a string encoded in the provided encoding
|
[
"Return",
"a",
"string",
"encoded",
"in",
"the",
"provided",
"encoding"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/ascii.py#L35-L45
|
239,628
|
majerteam/sqla_inspect
|
sqla_inspect/ascii.py
|
force_unicode
|
def force_unicode(value):
"""
return an utf-8 unicode entry
"""
if not isinstance(value, (str, unicode)):
value = unicode(value)
if isinstance(value, str):
value = value.decode('utf-8')
return value
|
python
|
def force_unicode(value):
"""
return an utf-8 unicode entry
"""
if not isinstance(value, (str, unicode)):
value = unicode(value)
if isinstance(value, str):
value = value.decode('utf-8')
return value
|
[
"def",
"force_unicode",
"(",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"value",
"=",
"unicode",
"(",
"value",
")",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"value",
"=",
"value",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"value"
] |
return an utf-8 unicode entry
|
[
"return",
"an",
"utf",
"-",
"8",
"unicode",
"entry"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/ascii.py#L48-L56
|
239,629
|
majerteam/sqla_inspect
|
sqla_inspect/ascii.py
|
camel_case_to_name
|
def camel_case_to_name(name):
"""
Used to convert a classname to a lowercase name
"""
def convert_func(val):
return "_" + val.group(0).lower()
return name[0].lower() + re.sub(r'([A-Z])', convert_func, name[1:])
|
python
|
def camel_case_to_name(name):
"""
Used to convert a classname to a lowercase name
"""
def convert_func(val):
return "_" + val.group(0).lower()
return name[0].lower() + re.sub(r'([A-Z])', convert_func, name[1:])
|
[
"def",
"camel_case_to_name",
"(",
"name",
")",
":",
"def",
"convert_func",
"(",
"val",
")",
":",
"return",
"\"_\"",
"+",
"val",
".",
"group",
"(",
"0",
")",
".",
"lower",
"(",
")",
"return",
"name",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"+",
"re",
".",
"sub",
"(",
"r'([A-Z])'",
",",
"convert_func",
",",
"name",
"[",
"1",
":",
"]",
")"
] |
Used to convert a classname to a lowercase name
|
[
"Used",
"to",
"convert",
"a",
"classname",
"to",
"a",
"lowercase",
"name"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/ascii.py#L59-L65
|
239,630
|
majerteam/sqla_inspect
|
sqla_inspect/ascii.py
|
to_utf8
|
def to_utf8(datas):
"""
Force utf8 string entries in the given datas
"""
res = datas
if isinstance(datas, dict):
res = {}
for key, value in datas.items():
key = to_utf8(key)
value = to_utf8(value)
res[key] = value
elif isinstance(datas, (list, tuple)):
res = []
for data in datas:
res.append(to_utf8(data))
elif isinstance(datas, unicode):
res = datas.encode('utf-8')
return res
|
python
|
def to_utf8(datas):
"""
Force utf8 string entries in the given datas
"""
res = datas
if isinstance(datas, dict):
res = {}
for key, value in datas.items():
key = to_utf8(key)
value = to_utf8(value)
res[key] = value
elif isinstance(datas, (list, tuple)):
res = []
for data in datas:
res.append(to_utf8(data))
elif isinstance(datas, unicode):
res = datas.encode('utf-8')
return res
|
[
"def",
"to_utf8",
"(",
"datas",
")",
":",
"res",
"=",
"datas",
"if",
"isinstance",
"(",
"datas",
",",
"dict",
")",
":",
"res",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"datas",
".",
"items",
"(",
")",
":",
"key",
"=",
"to_utf8",
"(",
"key",
")",
"value",
"=",
"to_utf8",
"(",
"value",
")",
"res",
"[",
"key",
"]",
"=",
"value",
"elif",
"isinstance",
"(",
"datas",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"res",
"=",
"[",
"]",
"for",
"data",
"in",
"datas",
":",
"res",
".",
"append",
"(",
"to_utf8",
"(",
"data",
")",
")",
"elif",
"isinstance",
"(",
"datas",
",",
"unicode",
")",
":",
"res",
"=",
"datas",
".",
"encode",
"(",
"'utf-8'",
")",
"return",
"res"
] |
Force utf8 string entries in the given datas
|
[
"Force",
"utf8",
"string",
"entries",
"in",
"the",
"given",
"datas"
] |
67edb5541e6a56b0a657d3774d1e19c1110cd402
|
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/ascii.py#L86-L106
|
239,631
|
baszoetekouw/janus-py
|
sr/sr.py
|
ServiceRegistry.list_eids
|
def list_eids(self):
"""
Returns a list of all known eids
"""
entities = self.list()
return sorted([int(eid) for eid in entities])
|
python
|
def list_eids(self):
"""
Returns a list of all known eids
"""
entities = self.list()
return sorted([int(eid) for eid in entities])
|
[
"def",
"list_eids",
"(",
"self",
")",
":",
"entities",
"=",
"self",
".",
"list",
"(",
")",
"return",
"sorted",
"(",
"[",
"int",
"(",
"eid",
")",
"for",
"eid",
"in",
"entities",
"]",
")"
] |
Returns a list of all known eids
|
[
"Returns",
"a",
"list",
"of",
"all",
"known",
"eids"
] |
4f2034436eef010ec8d77e168f6198123b5eb226
|
https://github.com/baszoetekouw/janus-py/blob/4f2034436eef010ec8d77e168f6198123b5eb226/sr/sr.py#L150-L155
|
239,632
|
baszoetekouw/janus-py
|
sr/sr.py
|
ServiceRegistry.get_by_entityid
|
def get_by_entityid(self, entityid):
"""
Returns the entity with the given entity ID as a dict
"""
data = self.list(entityid=entityid)
if len(data) == 0:
return None
eid = int( next(iter(data)) )
entity = self.get(eid)
self.debug(0x01,entity)
return entity
|
python
|
def get_by_entityid(self, entityid):
"""
Returns the entity with the given entity ID as a dict
"""
data = self.list(entityid=entityid)
if len(data) == 0:
return None
eid = int( next(iter(data)) )
entity = self.get(eid)
self.debug(0x01,entity)
return entity
|
[
"def",
"get_by_entityid",
"(",
"self",
",",
"entityid",
")",
":",
"data",
"=",
"self",
".",
"list",
"(",
"entityid",
"=",
"entityid",
")",
"if",
"len",
"(",
"data",
")",
"==",
"0",
":",
"return",
"None",
"eid",
"=",
"int",
"(",
"next",
"(",
"iter",
"(",
"data",
")",
")",
")",
"entity",
"=",
"self",
".",
"get",
"(",
"eid",
")",
"self",
".",
"debug",
"(",
"0x01",
",",
"entity",
")",
"return",
"entity"
] |
Returns the entity with the given entity ID as a dict
|
[
"Returns",
"the",
"entity",
"with",
"the",
"given",
"entity",
"ID",
"as",
"a",
"dict"
] |
4f2034436eef010ec8d77e168f6198123b5eb226
|
https://github.com/baszoetekouw/janus-py/blob/4f2034436eef010ec8d77e168f6198123b5eb226/sr/sr.py#L157-L169
|
239,633
|
baszoetekouw/janus-py
|
sr/sr.py
|
ServiceRegistry.get
|
def get(self, eid):
"""
Returns a dict with the complete record of the entity with the given eID
"""
data = self._http_req('connections/%u' % eid)
self.debug(0x01, data['decoded'])
return data['decoded']
|
python
|
def get(self, eid):
"""
Returns a dict with the complete record of the entity with the given eID
"""
data = self._http_req('connections/%u' % eid)
self.debug(0x01, data['decoded'])
return data['decoded']
|
[
"def",
"get",
"(",
"self",
",",
"eid",
")",
":",
"data",
"=",
"self",
".",
"_http_req",
"(",
"'connections/%u'",
"%",
"eid",
")",
"self",
".",
"debug",
"(",
"0x01",
",",
"data",
"[",
"'decoded'",
"]",
")",
"return",
"data",
"[",
"'decoded'",
"]"
] |
Returns a dict with the complete record of the entity with the given eID
|
[
"Returns",
"a",
"dict",
"with",
"the",
"complete",
"record",
"of",
"the",
"entity",
"with",
"the",
"given",
"eID"
] |
4f2034436eef010ec8d77e168f6198123b5eb226
|
https://github.com/baszoetekouw/janus-py/blob/4f2034436eef010ec8d77e168f6198123b5eb226/sr/sr.py#L171-L177
|
239,634
|
baszoetekouw/janus-py
|
sr/sr.py
|
ServiceRegistry.delete
|
def delete(self, eid):
"""
Removes the entity with the given eid
"""
result = self._http_req('connections/%u' % eid, method='DELETE')
status = result['status']
if not status == 302:
raise ServiceRegistryError(status, "Could not delete entity %u: %u" % (eid,status))
self.debug(0x01,result)
return result['decoded']
|
python
|
def delete(self, eid):
"""
Removes the entity with the given eid
"""
result = self._http_req('connections/%u' % eid, method='DELETE')
status = result['status']
if not status == 302:
raise ServiceRegistryError(status, "Could not delete entity %u: %u" % (eid,status))
self.debug(0x01,result)
return result['decoded']
|
[
"def",
"delete",
"(",
"self",
",",
"eid",
")",
":",
"result",
"=",
"self",
".",
"_http_req",
"(",
"'connections/%u'",
"%",
"eid",
",",
"method",
"=",
"'DELETE'",
")",
"status",
"=",
"result",
"[",
"'status'",
"]",
"if",
"not",
"status",
"==",
"302",
":",
"raise",
"ServiceRegistryError",
"(",
"status",
",",
"\"Could not delete entity %u: %u\"",
"%",
"(",
"eid",
",",
"status",
")",
")",
"self",
".",
"debug",
"(",
"0x01",
",",
"result",
")",
"return",
"result",
"[",
"'decoded'",
"]"
] |
Removes the entity with the given eid
|
[
"Removes",
"the",
"entity",
"with",
"the",
"given",
"eid"
] |
4f2034436eef010ec8d77e168f6198123b5eb226
|
https://github.com/baszoetekouw/janus-py/blob/4f2034436eef010ec8d77e168f6198123b5eb226/sr/sr.py#L233-L243
|
239,635
|
baszoetekouw/janus-py
|
sr/sr.py
|
ServiceRegistry.add
|
def add(self, entity):
"""
Adds the supplied dict as a new entity
"""
result = self._http_req('connections', method='POST', payload=entity)
status = result['status']
if not status==201:
raise ServiceRegistryError(status,"Couldn't add entity")
self.debug(0x01,result)
return result['decoded']
|
python
|
def add(self, entity):
"""
Adds the supplied dict as a new entity
"""
result = self._http_req('connections', method='POST', payload=entity)
status = result['status']
if not status==201:
raise ServiceRegistryError(status,"Couldn't add entity")
self.debug(0x01,result)
return result['decoded']
|
[
"def",
"add",
"(",
"self",
",",
"entity",
")",
":",
"result",
"=",
"self",
".",
"_http_req",
"(",
"'connections'",
",",
"method",
"=",
"'POST'",
",",
"payload",
"=",
"entity",
")",
"status",
"=",
"result",
"[",
"'status'",
"]",
"if",
"not",
"status",
"==",
"201",
":",
"raise",
"ServiceRegistryError",
"(",
"status",
",",
"\"Couldn't add entity\"",
")",
"self",
".",
"debug",
"(",
"0x01",
",",
"result",
")",
"return",
"result",
"[",
"'decoded'",
"]"
] |
Adds the supplied dict as a new entity
|
[
"Adds",
"the",
"supplied",
"dict",
"as",
"a",
"new",
"entity"
] |
4f2034436eef010ec8d77e168f6198123b5eb226
|
https://github.com/baszoetekouw/janus-py/blob/4f2034436eef010ec8d77e168f6198123b5eb226/sr/sr.py#L245-L255
|
239,636
|
baszoetekouw/janus-py
|
sr/sr.py
|
ServiceRegistry.connectiontable
|
def connectiontable(self, state='prodaccepted'):
"""
Returns a matrix of all entities showing which ones are connected together.
"""
entities = self.list_full(state)
# sort entities
idps = OrderedDict()
sps = OrderedDict()
for eid, entity in entities.items():
if entity['isActive'] and entity['state']==state:
if entity['type']=='saml20-idp':
idps[eid] = entity
elif entity['type']=='saml20-sp':
sps[eid] = entity
else:
raise(ServiceRegistryError(0,"unknown type `%s' for eid=%s" % (entity['type'],entity['id'])))
# use numpy here for the connection matrix to simplify extraction of rows and columns lateron
max_eid = max(entities.keys())
#print(max_eid)
connections = dok_matrix((max_eid+1,max_eid+1), dtype=np.bool_)
for idp_eid, idp_entity in idps.items():
#pprint(idp_entity)
sps_allowed = set([e['id'] for e in idp_entity['allowedConnections']])
for sp_eid, sp_entity in sps.items():
idps_allowed = set([ e['id'] for e in sp_entity['allowedConnections'] ])
acl = ( idp_entity['allowAllEntities'] or (sp_eid in sps_allowed ) ) \
and ( sp_entity[ 'allowAllEntities'] or (idp_eid in idps_allowed) )
connections[idp_eid,sp_eid] = acl
self._connections[state] = dict()
self._connections[state]['idp'] = idps
self._connections[state]['sp' ] = sps
self._connections[state]['acl'] = connections
return connections
|
python
|
def connectiontable(self, state='prodaccepted'):
"""
Returns a matrix of all entities showing which ones are connected together.
"""
entities = self.list_full(state)
# sort entities
idps = OrderedDict()
sps = OrderedDict()
for eid, entity in entities.items():
if entity['isActive'] and entity['state']==state:
if entity['type']=='saml20-idp':
idps[eid] = entity
elif entity['type']=='saml20-sp':
sps[eid] = entity
else:
raise(ServiceRegistryError(0,"unknown type `%s' for eid=%s" % (entity['type'],entity['id'])))
# use numpy here for the connection matrix to simplify extraction of rows and columns lateron
max_eid = max(entities.keys())
#print(max_eid)
connections = dok_matrix((max_eid+1,max_eid+1), dtype=np.bool_)
for idp_eid, idp_entity in idps.items():
#pprint(idp_entity)
sps_allowed = set([e['id'] for e in idp_entity['allowedConnections']])
for sp_eid, sp_entity in sps.items():
idps_allowed = set([ e['id'] for e in sp_entity['allowedConnections'] ])
acl = ( idp_entity['allowAllEntities'] or (sp_eid in sps_allowed ) ) \
and ( sp_entity[ 'allowAllEntities'] or (idp_eid in idps_allowed) )
connections[idp_eid,sp_eid] = acl
self._connections[state] = dict()
self._connections[state]['idp'] = idps
self._connections[state]['sp' ] = sps
self._connections[state]['acl'] = connections
return connections
|
[
"def",
"connectiontable",
"(",
"self",
",",
"state",
"=",
"'prodaccepted'",
")",
":",
"entities",
"=",
"self",
".",
"list_full",
"(",
"state",
")",
"# sort entities",
"idps",
"=",
"OrderedDict",
"(",
")",
"sps",
"=",
"OrderedDict",
"(",
")",
"for",
"eid",
",",
"entity",
"in",
"entities",
".",
"items",
"(",
")",
":",
"if",
"entity",
"[",
"'isActive'",
"]",
"and",
"entity",
"[",
"'state'",
"]",
"==",
"state",
":",
"if",
"entity",
"[",
"'type'",
"]",
"==",
"'saml20-idp'",
":",
"idps",
"[",
"eid",
"]",
"=",
"entity",
"elif",
"entity",
"[",
"'type'",
"]",
"==",
"'saml20-sp'",
":",
"sps",
"[",
"eid",
"]",
"=",
"entity",
"else",
":",
"raise",
"(",
"ServiceRegistryError",
"(",
"0",
",",
"\"unknown type `%s' for eid=%s\"",
"%",
"(",
"entity",
"[",
"'type'",
"]",
",",
"entity",
"[",
"'id'",
"]",
")",
")",
")",
"# use numpy here for the connection matrix to simplify extraction of rows and columns lateron",
"max_eid",
"=",
"max",
"(",
"entities",
".",
"keys",
"(",
")",
")",
"#print(max_eid)",
"connections",
"=",
"dok_matrix",
"(",
"(",
"max_eid",
"+",
"1",
",",
"max_eid",
"+",
"1",
")",
",",
"dtype",
"=",
"np",
".",
"bool_",
")",
"for",
"idp_eid",
",",
"idp_entity",
"in",
"idps",
".",
"items",
"(",
")",
":",
"#pprint(idp_entity)",
"sps_allowed",
"=",
"set",
"(",
"[",
"e",
"[",
"'id'",
"]",
"for",
"e",
"in",
"idp_entity",
"[",
"'allowedConnections'",
"]",
"]",
")",
"for",
"sp_eid",
",",
"sp_entity",
"in",
"sps",
".",
"items",
"(",
")",
":",
"idps_allowed",
"=",
"set",
"(",
"[",
"e",
"[",
"'id'",
"]",
"for",
"e",
"in",
"sp_entity",
"[",
"'allowedConnections'",
"]",
"]",
")",
"acl",
"=",
"(",
"idp_entity",
"[",
"'allowAllEntities'",
"]",
"or",
"(",
"sp_eid",
"in",
"sps_allowed",
")",
")",
"and",
"(",
"sp_entity",
"[",
"'allowAllEntities'",
"]",
"or",
"(",
"idp_eid",
"in",
"idps_allowed",
")",
")",
"connections",
"[",
"idp_eid",
",",
"sp_eid",
"]",
"=",
"acl",
"self",
".",
"_connections",
"[",
"state",
"]",
"=",
"dict",
"(",
")",
"self",
".",
"_connections",
"[",
"state",
"]",
"[",
"'idp'",
"]",
"=",
"idps",
"self",
".",
"_connections",
"[",
"state",
"]",
"[",
"'sp'",
"]",
"=",
"sps",
"self",
".",
"_connections",
"[",
"state",
"]",
"[",
"'acl'",
"]",
"=",
"connections",
"return",
"connections"
] |
Returns a matrix of all entities showing which ones are connected together.
|
[
"Returns",
"a",
"matrix",
"of",
"all",
"entities",
"showing",
"which",
"ones",
"are",
"connected",
"together",
"."
] |
4f2034436eef010ec8d77e168f6198123b5eb226
|
https://github.com/baszoetekouw/janus-py/blob/4f2034436eef010ec8d77e168f6198123b5eb226/sr/sr.py#L258-L296
|
239,637
|
staffanm/layeredconfig
|
layeredconfig/layeredconfig.py
|
LayeredConfig.write
|
def write(config):
"""Commits any pending modifications, ie save a configuration file if
it has been marked "dirty" as a result of an normal
assignment. The modifications are written to the first
writable source in this config object.
.. note::
This is a static method, ie not a method on any object
instance. This is because all attribute access on a
LayeredConfig object is meant to retrieve configuration
settings.
:param config: The configuration object to save
:type config: layeredconfig.LayeredConfig
"""
root = config
while root._parent:
root = root._parent
for source in root._sources:
if source.writable and source.dirty:
source.save()
|
python
|
def write(config):
"""Commits any pending modifications, ie save a configuration file if
it has been marked "dirty" as a result of an normal
assignment. The modifications are written to the first
writable source in this config object.
.. note::
This is a static method, ie not a method on any object
instance. This is because all attribute access on a
LayeredConfig object is meant to retrieve configuration
settings.
:param config: The configuration object to save
:type config: layeredconfig.LayeredConfig
"""
root = config
while root._parent:
root = root._parent
for source in root._sources:
if source.writable and source.dirty:
source.save()
|
[
"def",
"write",
"(",
"config",
")",
":",
"root",
"=",
"config",
"while",
"root",
".",
"_parent",
":",
"root",
"=",
"root",
".",
"_parent",
"for",
"source",
"in",
"root",
".",
"_sources",
":",
"if",
"source",
".",
"writable",
"and",
"source",
".",
"dirty",
":",
"source",
".",
"save",
"(",
")"
] |
Commits any pending modifications, ie save a configuration file if
it has been marked "dirty" as a result of an normal
assignment. The modifications are written to the first
writable source in this config object.
.. note::
This is a static method, ie not a method on any object
instance. This is because all attribute access on a
LayeredConfig object is meant to retrieve configuration
settings.
:param config: The configuration object to save
:type config: layeredconfig.LayeredConfig
|
[
"Commits",
"any",
"pending",
"modifications",
"ie",
"save",
"a",
"configuration",
"file",
"if",
"it",
"has",
"been",
"marked",
"dirty",
"as",
"a",
"result",
"of",
"an",
"normal",
"assignment",
".",
"The",
"modifications",
"are",
"written",
"to",
"the",
"first",
"writable",
"source",
"in",
"this",
"config",
"object",
"."
] |
f3dad66729854f5c34910c7533f88d39b223a977
|
https://github.com/staffanm/layeredconfig/blob/f3dad66729854f5c34910c7533f88d39b223a977/layeredconfig/layeredconfig.py#L102-L125
|
239,638
|
staffanm/layeredconfig
|
layeredconfig/layeredconfig.py
|
LayeredConfig.dump
|
def dump(config):
"""Returns the entire content of the config object in a way that can
be easily examined, compared or dumped to a string or file.
:param config: The configuration object to dump
:rtype: dict
"""
def _dump(element):
if not isinstance(element, config.__class__):
return element
section = dict()
for key, subsection in element._subsections.items():
section[key] = _dump(subsection)
for key in element:
section[key] = getattr(element, key)
return section
return _dump(config)
|
python
|
def dump(config):
"""Returns the entire content of the config object in a way that can
be easily examined, compared or dumped to a string or file.
:param config: The configuration object to dump
:rtype: dict
"""
def _dump(element):
if not isinstance(element, config.__class__):
return element
section = dict()
for key, subsection in element._subsections.items():
section[key] = _dump(subsection)
for key in element:
section[key] = getattr(element, key)
return section
return _dump(config)
|
[
"def",
"dump",
"(",
"config",
")",
":",
"def",
"_dump",
"(",
"element",
")",
":",
"if",
"not",
"isinstance",
"(",
"element",
",",
"config",
".",
"__class__",
")",
":",
"return",
"element",
"section",
"=",
"dict",
"(",
")",
"for",
"key",
",",
"subsection",
"in",
"element",
".",
"_subsections",
".",
"items",
"(",
")",
":",
"section",
"[",
"key",
"]",
"=",
"_dump",
"(",
"subsection",
")",
"for",
"key",
"in",
"element",
":",
"section",
"[",
"key",
"]",
"=",
"getattr",
"(",
"element",
",",
"key",
")",
"return",
"section",
"return",
"_dump",
"(",
"config",
")"
] |
Returns the entire content of the config object in a way that can
be easily examined, compared or dumped to a string or file.
:param config: The configuration object to dump
:rtype: dict
|
[
"Returns",
"the",
"entire",
"content",
"of",
"the",
"config",
"object",
"in",
"a",
"way",
"that",
"can",
"be",
"easily",
"examined",
"compared",
"or",
"dumped",
"to",
"a",
"string",
"or",
"file",
"."
] |
f3dad66729854f5c34910c7533f88d39b223a977
|
https://github.com/staffanm/layeredconfig/blob/f3dad66729854f5c34910c7533f88d39b223a977/layeredconfig/layeredconfig.py#L157-L176
|
239,639
|
jameswenzel/PokeAPI.py
|
PokeAPI.py
|
_make_methods
|
def _make_methods():
"Automagically generates methods based on the API endpoints"
for k, v in PokeAPI().get_endpoints().items():
string = "\t@BaseAPI._memoize\n"
string += ("\tdef get_{0}(self, id_or_name='', limit=None,"
.format(k.replace('-', '_')) + ' offset=None):\n')
string += ("\t\tparams = self._parse_params(locals().copy(), " +
"['id_or_name'])\n")
string += "\t\tquery_string = '{0}/'\n".format(v.split('/')[-2])
string += "\t\tquery_string += str(id_or_name) + '?'\n"
string += '\t\tquery_string += params\n'
string += '\t\treturn self._get(query_string)\n'
print(string)
|
python
|
def _make_methods():
"Automagically generates methods based on the API endpoints"
for k, v in PokeAPI().get_endpoints().items():
string = "\t@BaseAPI._memoize\n"
string += ("\tdef get_{0}(self, id_or_name='', limit=None,"
.format(k.replace('-', '_')) + ' offset=None):\n')
string += ("\t\tparams = self._parse_params(locals().copy(), " +
"['id_or_name'])\n")
string += "\t\tquery_string = '{0}/'\n".format(v.split('/')[-2])
string += "\t\tquery_string += str(id_or_name) + '?'\n"
string += '\t\tquery_string += params\n'
string += '\t\treturn self._get(query_string)\n'
print(string)
|
[
"def",
"_make_methods",
"(",
")",
":",
"for",
"k",
",",
"v",
"in",
"PokeAPI",
"(",
")",
".",
"get_endpoints",
"(",
")",
".",
"items",
"(",
")",
":",
"string",
"=",
"\"\\t@BaseAPI._memoize\\n\"",
"string",
"+=",
"(",
"\"\\tdef get_{0}(self, id_or_name='', limit=None,\"",
".",
"format",
"(",
"k",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
")",
"+",
"' offset=None):\\n'",
")",
"string",
"+=",
"(",
"\"\\t\\tparams = self._parse_params(locals().copy(), \"",
"+",
"\"['id_or_name'])\\n\"",
")",
"string",
"+=",
"\"\\t\\tquery_string = '{0}/'\\n\"",
".",
"format",
"(",
"v",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"2",
"]",
")",
"string",
"+=",
"\"\\t\\tquery_string += str(id_or_name) + '?'\\n\"",
"string",
"+=",
"'\\t\\tquery_string += params\\n'",
"string",
"+=",
"'\\t\\treturn self._get(query_string)\\n'",
"print",
"(",
"string",
")"
] |
Automagically generates methods based on the API endpoints
|
[
"Automagically",
"generates",
"methods",
"based",
"on",
"the",
"API",
"endpoints"
] |
9d0000c84456a2da4409e3e9141cc0b48ac1271f
|
https://github.com/jameswenzel/PokeAPI.py/blob/9d0000c84456a2da4409e3e9141cc0b48ac1271f/PokeAPI.py#L402-L414
|
239,640
|
emilydolson/avida-spatial-tools
|
avidaspatial/visualizations.py
|
heat_map
|
def heat_map(grid, name, **kwargs):
"""
Generic function for making a heat map based on the values in a grid.
Arguments: grid - the grid of numbers or binary strings to be visualized.
name - string indicating what the file storing the image
should be called.
kwargs:
palette - a seaborn palette (list of RGB values) indicating
how to color values. Will be converted to a continuous
colormap if necessary
denom - the maximum value of numbers in the grid (only used
if the grid actually contains numbers). This is used
to normalize values and use the full dynamic range of
the color pallete.
"""
denom, palette = get_kwargs(grid, kwargs)
if "mask_zeros" in kwargs:
mask_zeros = kwargs["mask_zeros"]
else:
mask_zeros = False
grid = color_grid(grid, palette, denom, mask_zeros)
make_imshow_plot(grid, name)
|
python
|
def heat_map(grid, name, **kwargs):
"""
Generic function for making a heat map based on the values in a grid.
Arguments: grid - the grid of numbers or binary strings to be visualized.
name - string indicating what the file storing the image
should be called.
kwargs:
palette - a seaborn palette (list of RGB values) indicating
how to color values. Will be converted to a continuous
colormap if necessary
denom - the maximum value of numbers in the grid (only used
if the grid actually contains numbers). This is used
to normalize values and use the full dynamic range of
the color pallete.
"""
denom, palette = get_kwargs(grid, kwargs)
if "mask_zeros" in kwargs:
mask_zeros = kwargs["mask_zeros"]
else:
mask_zeros = False
grid = color_grid(grid, palette, denom, mask_zeros)
make_imshow_plot(grid, name)
|
[
"def",
"heat_map",
"(",
"grid",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"denom",
",",
"palette",
"=",
"get_kwargs",
"(",
"grid",
",",
"kwargs",
")",
"if",
"\"mask_zeros\"",
"in",
"kwargs",
":",
"mask_zeros",
"=",
"kwargs",
"[",
"\"mask_zeros\"",
"]",
"else",
":",
"mask_zeros",
"=",
"False",
"grid",
"=",
"color_grid",
"(",
"grid",
",",
"palette",
",",
"denom",
",",
"mask_zeros",
")",
"make_imshow_plot",
"(",
"grid",
",",
"name",
")"
] |
Generic function for making a heat map based on the values in a grid.
Arguments: grid - the grid of numbers or binary strings to be visualized.
name - string indicating what the file storing the image
should be called.
kwargs:
palette - a seaborn palette (list of RGB values) indicating
how to color values. Will be converted to a continuous
colormap if necessary
denom - the maximum value of numbers in the grid (only used
if the grid actually contains numbers). This is used
to normalize values and use the full dynamic range of
the color pallete.
|
[
"Generic",
"function",
"for",
"making",
"a",
"heat",
"map",
"based",
"on",
"the",
"values",
"in",
"a",
"grid",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/visualizations.py#L14-L38
|
239,641
|
emilydolson/avida-spatial-tools
|
avidaspatial/visualizations.py
|
plot_phens
|
def plot_phens(phen_grid, **kwargs):
"""
Plots circles colored according to the values in phen_grid.
-1 serves as a sentinel value, indicating that a circle should not be
plotted in that location.
"""
denom, palette = get_kwargs(phen_grid, kwargs, True)
grid = color_grid(phen_grid, palette, denom)
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] != -1 and tuple(grid[i][j]) != -1:
plt.gca().add_patch(plt.Circle((j, i),
radius=.3, lw=1, ec="black",
facecolor=grid[i][j], zorder=2))
|
python
|
def plot_phens(phen_grid, **kwargs):
"""
Plots circles colored according to the values in phen_grid.
-1 serves as a sentinel value, indicating that a circle should not be
plotted in that location.
"""
denom, palette = get_kwargs(phen_grid, kwargs, True)
grid = color_grid(phen_grid, palette, denom)
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] != -1 and tuple(grid[i][j]) != -1:
plt.gca().add_patch(plt.Circle((j, i),
radius=.3, lw=1, ec="black",
facecolor=grid[i][j], zorder=2))
|
[
"def",
"plot_phens",
"(",
"phen_grid",
",",
"*",
"*",
"kwargs",
")",
":",
"denom",
",",
"palette",
"=",
"get_kwargs",
"(",
"phen_grid",
",",
"kwargs",
",",
"True",
")",
"grid",
"=",
"color_grid",
"(",
"phen_grid",
",",
"palette",
",",
"denom",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"grid",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"grid",
"[",
"i",
"]",
")",
")",
":",
"if",
"grid",
"[",
"i",
"]",
"[",
"j",
"]",
"!=",
"-",
"1",
"and",
"tuple",
"(",
"grid",
"[",
"i",
"]",
"[",
"j",
"]",
")",
"!=",
"-",
"1",
":",
"plt",
".",
"gca",
"(",
")",
".",
"add_patch",
"(",
"plt",
".",
"Circle",
"(",
"(",
"j",
",",
"i",
")",
",",
"radius",
"=",
".3",
",",
"lw",
"=",
"1",
",",
"ec",
"=",
"\"black\"",
",",
"facecolor",
"=",
"grid",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"zorder",
"=",
"2",
")",
")"
] |
Plots circles colored according to the values in phen_grid.
-1 serves as a sentinel value, indicating that a circle should not be
plotted in that location.
|
[
"Plots",
"circles",
"colored",
"according",
"to",
"the",
"values",
"in",
"phen_grid",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/visualizations.py#L163-L179
|
239,642
|
emilydolson/avida-spatial-tools
|
avidaspatial/visualizations.py
|
plot_phens_circles
|
def plot_phens_circles(phen_grid, **kwargs):
"""
Plots phenotypes represented as concentric circles. Each circle represents
one task that the phenotype can perform, with larger circles representing
more complex tasks.
Arguments: phen_grid - a 2D array of strings representing binary numbers
kwargs:
palette - a seaborn palette (list of RGB values) indicating
how to color values. Will be converted to a continuous
colormap if necessary
denom - the maximum value of numbers in the grid (only used
if the grid actually contains numbers). This is used
to normalize values and use the full dynamic range of
the color pallete.
TODO: come up with way to represent organisms that don't do any tasks.
"""
denom, palette = get_kwargs(phen_grid, kwargs, True)
n_tasks = len(palette)
grid = phen_grid
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] != -1 and int(grid[i][j], 2) != -1 and \
int(grid[i][j], 2) != 0:
first = True
b_ind = grid[i][j].find("b")
phen = grid[i][j][b_ind+1:]
for k in range(len(phen)):
if int(phen[k]) == 1:
plt.gca().add_patch(
plt.Circle(
(j, i), radius=(n_tasks - k)*.05,
lw=.1 if first else 0, ec="black",
facecolor=palette[k], zorder=2+k))
first = False
elif int(grid[i][j], 2) == 0:
plt.gca().add_patch(
plt.Circle(
(j, i), radius=(n_tasks)*.05,
lw=.1, ec="black",
facecolor="grey", zorder=2))
|
python
|
def plot_phens_circles(phen_grid, **kwargs):
"""
Plots phenotypes represented as concentric circles. Each circle represents
one task that the phenotype can perform, with larger circles representing
more complex tasks.
Arguments: phen_grid - a 2D array of strings representing binary numbers
kwargs:
palette - a seaborn palette (list of RGB values) indicating
how to color values. Will be converted to a continuous
colormap if necessary
denom - the maximum value of numbers in the grid (only used
if the grid actually contains numbers). This is used
to normalize values and use the full dynamic range of
the color pallete.
TODO: come up with way to represent organisms that don't do any tasks.
"""
denom, palette = get_kwargs(phen_grid, kwargs, True)
n_tasks = len(palette)
grid = phen_grid
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] != -1 and int(grid[i][j], 2) != -1 and \
int(grid[i][j], 2) != 0:
first = True
b_ind = grid[i][j].find("b")
phen = grid[i][j][b_ind+1:]
for k in range(len(phen)):
if int(phen[k]) == 1:
plt.gca().add_patch(
plt.Circle(
(j, i), radius=(n_tasks - k)*.05,
lw=.1 if first else 0, ec="black",
facecolor=palette[k], zorder=2+k))
first = False
elif int(grid[i][j], 2) == 0:
plt.gca().add_patch(
plt.Circle(
(j, i), radius=(n_tasks)*.05,
lw=.1, ec="black",
facecolor="grey", zorder=2))
|
[
"def",
"plot_phens_circles",
"(",
"phen_grid",
",",
"*",
"*",
"kwargs",
")",
":",
"denom",
",",
"palette",
"=",
"get_kwargs",
"(",
"phen_grid",
",",
"kwargs",
",",
"True",
")",
"n_tasks",
"=",
"len",
"(",
"palette",
")",
"grid",
"=",
"phen_grid",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"grid",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"grid",
"[",
"i",
"]",
")",
")",
":",
"if",
"grid",
"[",
"i",
"]",
"[",
"j",
"]",
"!=",
"-",
"1",
"and",
"int",
"(",
"grid",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"2",
")",
"!=",
"-",
"1",
"and",
"int",
"(",
"grid",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"2",
")",
"!=",
"0",
":",
"first",
"=",
"True",
"b_ind",
"=",
"grid",
"[",
"i",
"]",
"[",
"j",
"]",
".",
"find",
"(",
"\"b\"",
")",
"phen",
"=",
"grid",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"b_ind",
"+",
"1",
":",
"]",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"phen",
")",
")",
":",
"if",
"int",
"(",
"phen",
"[",
"k",
"]",
")",
"==",
"1",
":",
"plt",
".",
"gca",
"(",
")",
".",
"add_patch",
"(",
"plt",
".",
"Circle",
"(",
"(",
"j",
",",
"i",
")",
",",
"radius",
"=",
"(",
"n_tasks",
"-",
"k",
")",
"*",
".05",
",",
"lw",
"=",
".1",
"if",
"first",
"else",
"0",
",",
"ec",
"=",
"\"black\"",
",",
"facecolor",
"=",
"palette",
"[",
"k",
"]",
",",
"zorder",
"=",
"2",
"+",
"k",
")",
")",
"first",
"=",
"False",
"elif",
"int",
"(",
"grid",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"2",
")",
"==",
"0",
":",
"plt",
".",
"gca",
"(",
")",
".",
"add_patch",
"(",
"plt",
".",
"Circle",
"(",
"(",
"j",
",",
"i",
")",
",",
"radius",
"=",
"(",
"n_tasks",
")",
"*",
".05",
",",
"lw",
"=",
".1",
",",
"ec",
"=",
"\"black\"",
",",
"facecolor",
"=",
"\"grey\"",
",",
"zorder",
"=",
"2",
")",
")"
] |
Plots phenotypes represented as concentric circles. Each circle represents
one task that the phenotype can perform, with larger circles representing
more complex tasks.
Arguments: phen_grid - a 2D array of strings representing binary numbers
kwargs:
palette - a seaborn palette (list of RGB values) indicating
how to color values. Will be converted to a continuous
colormap if necessary
denom - the maximum value of numbers in the grid (only used
if the grid actually contains numbers). This is used
to normalize values and use the full dynamic range of
the color pallete.
TODO: come up with way to represent organisms that don't do any tasks.
|
[
"Plots",
"phenotypes",
"represented",
"as",
"concentric",
"circles",
".",
"Each",
"circle",
"represents",
"one",
"task",
"that",
"the",
"phenotype",
"can",
"perform",
"with",
"larger",
"circles",
"representing",
"more",
"complex",
"tasks",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/visualizations.py#L182-L227
|
239,643
|
emilydolson/avida-spatial-tools
|
avidaspatial/visualizations.py
|
plot_phens_blits
|
def plot_phens_blits(phen_grid, patches, **kwargs):
"""
A version of plot_phens designed to be used in animations. Takes a 2D array
of phenotypes and a list of matplotlib patch objects that have already
been added to the current axes and recolors the patches based on the array.
"""
denom, palette = get_kwargs(phen_grid, kwargs)
grid = color_grid(phen_grid, palette, denom)
for i in range(len(grid)):
for j in range(len(grid[i])):
curr_patch = patches[i * len(grid[i]) + j]
if grid[i][j] == -1:
curr_patch.set_visible(False)
else:
curr_patch.set_facecolor(grid[i][j])
curr_patch.set_visible(True)
return patches
|
python
|
def plot_phens_blits(phen_grid, patches, **kwargs):
"""
A version of plot_phens designed to be used in animations. Takes a 2D array
of phenotypes and a list of matplotlib patch objects that have already
been added to the current axes and recolors the patches based on the array.
"""
denom, palette = get_kwargs(phen_grid, kwargs)
grid = color_grid(phen_grid, palette, denom)
for i in range(len(grid)):
for j in range(len(grid[i])):
curr_patch = patches[i * len(grid[i]) + j]
if grid[i][j] == -1:
curr_patch.set_visible(False)
else:
curr_patch.set_facecolor(grid[i][j])
curr_patch.set_visible(True)
return patches
|
[
"def",
"plot_phens_blits",
"(",
"phen_grid",
",",
"patches",
",",
"*",
"*",
"kwargs",
")",
":",
"denom",
",",
"palette",
"=",
"get_kwargs",
"(",
"phen_grid",
",",
"kwargs",
")",
"grid",
"=",
"color_grid",
"(",
"phen_grid",
",",
"palette",
",",
"denom",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"grid",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"grid",
"[",
"i",
"]",
")",
")",
":",
"curr_patch",
"=",
"patches",
"[",
"i",
"*",
"len",
"(",
"grid",
"[",
"i",
"]",
")",
"+",
"j",
"]",
"if",
"grid",
"[",
"i",
"]",
"[",
"j",
"]",
"==",
"-",
"1",
":",
"curr_patch",
".",
"set_visible",
"(",
"False",
")",
"else",
":",
"curr_patch",
".",
"set_facecolor",
"(",
"grid",
"[",
"i",
"]",
"[",
"j",
"]",
")",
"curr_patch",
".",
"set_visible",
"(",
"True",
")",
"return",
"patches"
] |
A version of plot_phens designed to be used in animations. Takes a 2D array
of phenotypes and a list of matplotlib patch objects that have already
been added to the current axes and recolors the patches based on the array.
|
[
"A",
"version",
"of",
"plot_phens",
"designed",
"to",
"be",
"used",
"in",
"animations",
".",
"Takes",
"a",
"2D",
"array",
"of",
"phenotypes",
"and",
"a",
"list",
"of",
"matplotlib",
"patch",
"objects",
"that",
"have",
"already",
"been",
"added",
"to",
"the",
"current",
"axes",
"and",
"recolors",
"the",
"patches",
"based",
"on",
"the",
"array",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/visualizations.py#L230-L249
|
239,644
|
emilydolson/avida-spatial-tools
|
avidaspatial/visualizations.py
|
color_array_by_value
|
def color_array_by_value(value, palette, denom, mask_zeros):
"""
Figure out the appropriate RGB or RGBA color for the given numerical
value based on the palette, denom, and whether zeros should be masked.
"""
if value == -1: # sentinel value
return -1
if value == 0 and mask_zeros: # This value is masked
if type(palette) is list:
return (1, 1, 1)
return (1, 1, 1, 1)
if type(palette) is list: # This is a palette
return palette[value]
# This is continuous data so the palette is actually a colormap
return palette(float(value)/float(denom))
|
python
|
def color_array_by_value(value, palette, denom, mask_zeros):
"""
Figure out the appropriate RGB or RGBA color for the given numerical
value based on the palette, denom, and whether zeros should be masked.
"""
if value == -1: # sentinel value
return -1
if value == 0 and mask_zeros: # This value is masked
if type(palette) is list:
return (1, 1, 1)
return (1, 1, 1, 1)
if type(palette) is list: # This is a palette
return palette[value]
# This is continuous data so the palette is actually a colormap
return palette(float(value)/float(denom))
|
[
"def",
"color_array_by_value",
"(",
"value",
",",
"palette",
",",
"denom",
",",
"mask_zeros",
")",
":",
"if",
"value",
"==",
"-",
"1",
":",
"# sentinel value",
"return",
"-",
"1",
"if",
"value",
"==",
"0",
"and",
"mask_zeros",
":",
"# This value is masked",
"if",
"type",
"(",
"palette",
")",
"is",
"list",
":",
"return",
"(",
"1",
",",
"1",
",",
"1",
")",
"return",
"(",
"1",
",",
"1",
",",
"1",
",",
"1",
")",
"if",
"type",
"(",
"palette",
")",
"is",
"list",
":",
"# This is a palette",
"return",
"palette",
"[",
"value",
"]",
"# This is continuous data so the palette is actually a colormap",
"return",
"palette",
"(",
"float",
"(",
"value",
")",
"/",
"float",
"(",
"denom",
")",
")"
] |
Figure out the appropriate RGB or RGBA color for the given numerical
value based on the palette, denom, and whether zeros should be masked.
|
[
"Figure",
"out",
"the",
"appropriate",
"RGB",
"or",
"RGBA",
"color",
"for",
"the",
"given",
"numerical",
"value",
"based",
"on",
"the",
"palette",
"denom",
"and",
"whether",
"zeros",
"should",
"be",
"masked",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/visualizations.py#L405-L422
|
239,645
|
emilydolson/avida-spatial-tools
|
avidaspatial/visualizations.py
|
color_array_by_hue_mix
|
def color_array_by_hue_mix(value, palette):
"""
Figure out the appropriate color for a binary string value by averaging
the colors corresponding the indices of each one that it contains. Makes
for visualizations that intuitively show patch overlap.
"""
if int(value, 2) > 0:
# Convert bits to list and reverse order to avoid issues with
# differing lengths
int_list = [int(i) for i in list(value[2:])]
int_list.reverse()
# since this is a 1D array, we need the zeroth elements
# of np.nonzero.
locs = np.nonzero(int_list)[0]
# print(locs)
# print(palette)
rgb_vals = [palette[i] for i in locs]
rgb = [0]*len(rgb_vals[0]) # We don't know if it's rgb or rgba
for val in rgb_vals:
for index in range(len(val)):
rgb[index] += val[index]
for i in range(len(rgb)):
rgb[i] /= len(locs)
return tuple(rgb)
if int(value, 2) == 0:
return (1, 1, 1) if len(palette[0]) == 3 else (1, 1, 1, 1)
return -1
|
python
|
def color_array_by_hue_mix(value, palette):
"""
Figure out the appropriate color for a binary string value by averaging
the colors corresponding the indices of each one that it contains. Makes
for visualizations that intuitively show patch overlap.
"""
if int(value, 2) > 0:
# Convert bits to list and reverse order to avoid issues with
# differing lengths
int_list = [int(i) for i in list(value[2:])]
int_list.reverse()
# since this is a 1D array, we need the zeroth elements
# of np.nonzero.
locs = np.nonzero(int_list)[0]
# print(locs)
# print(palette)
rgb_vals = [palette[i] for i in locs]
rgb = [0]*len(rgb_vals[0]) # We don't know if it's rgb or rgba
for val in rgb_vals:
for index in range(len(val)):
rgb[index] += val[index]
for i in range(len(rgb)):
rgb[i] /= len(locs)
return tuple(rgb)
if int(value, 2) == 0:
return (1, 1, 1) if len(palette[0]) == 3 else (1, 1, 1, 1)
return -1
|
[
"def",
"color_array_by_hue_mix",
"(",
"value",
",",
"palette",
")",
":",
"if",
"int",
"(",
"value",
",",
"2",
")",
">",
"0",
":",
"# Convert bits to list and reverse order to avoid issues with",
"# differing lengths",
"int_list",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"list",
"(",
"value",
"[",
"2",
":",
"]",
")",
"]",
"int_list",
".",
"reverse",
"(",
")",
"# since this is a 1D array, we need the zeroth elements",
"# of np.nonzero.",
"locs",
"=",
"np",
".",
"nonzero",
"(",
"int_list",
")",
"[",
"0",
"]",
"# print(locs)",
"# print(palette)",
"rgb_vals",
"=",
"[",
"palette",
"[",
"i",
"]",
"for",
"i",
"in",
"locs",
"]",
"rgb",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"rgb_vals",
"[",
"0",
"]",
")",
"# We don't know if it's rgb or rgba",
"for",
"val",
"in",
"rgb_vals",
":",
"for",
"index",
"in",
"range",
"(",
"len",
"(",
"val",
")",
")",
":",
"rgb",
"[",
"index",
"]",
"+=",
"val",
"[",
"index",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"rgb",
")",
")",
":",
"rgb",
"[",
"i",
"]",
"/=",
"len",
"(",
"locs",
")",
"return",
"tuple",
"(",
"rgb",
")",
"if",
"int",
"(",
"value",
",",
"2",
")",
"==",
"0",
":",
"return",
"(",
"1",
",",
"1",
",",
"1",
")",
"if",
"len",
"(",
"palette",
"[",
"0",
"]",
")",
"==",
"3",
"else",
"(",
"1",
",",
"1",
",",
"1",
",",
"1",
")",
"return",
"-",
"1"
] |
Figure out the appropriate color for a binary string value by averaging
the colors corresponding the indices of each one that it contains. Makes
for visualizations that intuitively show patch overlap.
|
[
"Figure",
"out",
"the",
"appropriate",
"color",
"for",
"a",
"binary",
"string",
"value",
"by",
"averaging",
"the",
"colors",
"corresponding",
"the",
"indices",
"of",
"each",
"one",
"that",
"it",
"contains",
".",
"Makes",
"for",
"visualizations",
"that",
"intuitively",
"show",
"patch",
"overlap",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/visualizations.py#L425-L458
|
239,646
|
emilydolson/avida-spatial-tools
|
avidaspatial/visualizations.py
|
color_percentages
|
def color_percentages(file_list, n_tasks=9, file_name="color_percent.png",
intensification_factor=1.2):
"""
Creates an image in which each cell in the avida grid is represented as
a square of 9 sub-cells. Each of these 9 sub-cells represents a different
task, and is colored such that cooler colors represent more complex tasks.
The saturation of each sub-cell indicates the percentage of grids in the
given data-set in which the organism in that cell could perform the
corresponding task.
Inputs: file_list - list of names of of avida task grid files to be used
in making figure.
intensification_factor (default 1.2): A number to multiply
the percentage of organisms doing a task by in order to increase
visibility. This can be useful in cases where a lot of the
percentages are too low to be easily visualized.
Returns: Grid indicating appropriate color values for images.
"""
# Load data
data = task_percentages(load_grid_data(file_list))
# Initialize grid
grid = [[]] * len(data)*3
for i in range(len(grid)):
grid[i] = [[]]*len(data[0])*3
# Color grid
for i in range(len(data)):
for j in range(len(data[i])):
for k in range(3): # create grid of sub-cells
for l in range(3):
if len(data[i][j]) > k*3+l:
# build a color in matplotlib's preferred hsv format
arr = np.zeros((1, 1, 3))
arr[0, 0, 1] = float(data[i][j][k*3 + l]) \
* intensification_factor # saturate based on data
arr[0, 0, 0] = (k*3 + l)/9.0 # hue based on task
arr[0, 0, 2] = 1 # value is always 1
rgb = matplotlib.colors.hsv_to_rgb(arr) # convert rgb
grid[i*3+k][j*3+l] = list(rgb[0][0])
else:
grid[i*3+k][j*3+l] = (1, 1, 1, 1)
return make_imshow_plot(grid, "colorpercentages")
|
python
|
def color_percentages(file_list, n_tasks=9, file_name="color_percent.png",
intensification_factor=1.2):
"""
Creates an image in which each cell in the avida grid is represented as
a square of 9 sub-cells. Each of these 9 sub-cells represents a different
task, and is colored such that cooler colors represent more complex tasks.
The saturation of each sub-cell indicates the percentage of grids in the
given data-set in which the organism in that cell could perform the
corresponding task.
Inputs: file_list - list of names of of avida task grid files to be used
in making figure.
intensification_factor (default 1.2): A number to multiply
the percentage of organisms doing a task by in order to increase
visibility. This can be useful in cases where a lot of the
percentages are too low to be easily visualized.
Returns: Grid indicating appropriate color values for images.
"""
# Load data
data = task_percentages(load_grid_data(file_list))
# Initialize grid
grid = [[]] * len(data)*3
for i in range(len(grid)):
grid[i] = [[]]*len(data[0])*3
# Color grid
for i in range(len(data)):
for j in range(len(data[i])):
for k in range(3): # create grid of sub-cells
for l in range(3):
if len(data[i][j]) > k*3+l:
# build a color in matplotlib's preferred hsv format
arr = np.zeros((1, 1, 3))
arr[0, 0, 1] = float(data[i][j][k*3 + l]) \
* intensification_factor # saturate based on data
arr[0, 0, 0] = (k*3 + l)/9.0 # hue based on task
arr[0, 0, 2] = 1 # value is always 1
rgb = matplotlib.colors.hsv_to_rgb(arr) # convert rgb
grid[i*3+k][j*3+l] = list(rgb[0][0])
else:
grid[i*3+k][j*3+l] = (1, 1, 1, 1)
return make_imshow_plot(grid, "colorpercentages")
|
[
"def",
"color_percentages",
"(",
"file_list",
",",
"n_tasks",
"=",
"9",
",",
"file_name",
"=",
"\"color_percent.png\"",
",",
"intensification_factor",
"=",
"1.2",
")",
":",
"# Load data",
"data",
"=",
"task_percentages",
"(",
"load_grid_data",
"(",
"file_list",
")",
")",
"# Initialize grid",
"grid",
"=",
"[",
"[",
"]",
"]",
"*",
"len",
"(",
"data",
")",
"*",
"3",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"grid",
")",
")",
":",
"grid",
"[",
"i",
"]",
"=",
"[",
"[",
"]",
"]",
"*",
"len",
"(",
"data",
"[",
"0",
"]",
")",
"*",
"3",
"# Color grid",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"data",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"data",
"[",
"i",
"]",
")",
")",
":",
"for",
"k",
"in",
"range",
"(",
"3",
")",
":",
"# create grid of sub-cells",
"for",
"l",
"in",
"range",
"(",
"3",
")",
":",
"if",
"len",
"(",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
")",
">",
"k",
"*",
"3",
"+",
"l",
":",
"# build a color in matplotlib's preferred hsv format",
"arr",
"=",
"np",
".",
"zeros",
"(",
"(",
"1",
",",
"1",
",",
"3",
")",
")",
"arr",
"[",
"0",
",",
"0",
",",
"1",
"]",
"=",
"float",
"(",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"k",
"*",
"3",
"+",
"l",
"]",
")",
"*",
"intensification_factor",
"# saturate based on data",
"arr",
"[",
"0",
",",
"0",
",",
"0",
"]",
"=",
"(",
"k",
"*",
"3",
"+",
"l",
")",
"/",
"9.0",
"# hue based on task",
"arr",
"[",
"0",
",",
"0",
",",
"2",
"]",
"=",
"1",
"# value is always 1",
"rgb",
"=",
"matplotlib",
".",
"colors",
".",
"hsv_to_rgb",
"(",
"arr",
")",
"# convert rgb",
"grid",
"[",
"i",
"*",
"3",
"+",
"k",
"]",
"[",
"j",
"*",
"3",
"+",
"l",
"]",
"=",
"list",
"(",
"rgb",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"else",
":",
"grid",
"[",
"i",
"*",
"3",
"+",
"k",
"]",
"[",
"j",
"*",
"3",
"+",
"l",
"]",
"=",
"(",
"1",
",",
"1",
",",
"1",
",",
"1",
")",
"return",
"make_imshow_plot",
"(",
"grid",
",",
"\"colorpercentages\"",
")"
] |
Creates an image in which each cell in the avida grid is represented as
a square of 9 sub-cells. Each of these 9 sub-cells represents a different
task, and is colored such that cooler colors represent more complex tasks.
The saturation of each sub-cell indicates the percentage of grids in the
given data-set in which the organism in that cell could perform the
corresponding task.
Inputs: file_list - list of names of of avida task grid files to be used
in making figure.
intensification_factor (default 1.2): A number to multiply
the percentage of organisms doing a task by in order to increase
visibility. This can be useful in cases where a lot of the
percentages are too low to be easily visualized.
Returns: Grid indicating appropriate color values for images.
|
[
"Creates",
"an",
"image",
"in",
"which",
"each",
"cell",
"in",
"the",
"avida",
"grid",
"is",
"represented",
"as",
"a",
"square",
"of",
"9",
"sub",
"-",
"cells",
".",
"Each",
"of",
"these",
"9",
"sub",
"-",
"cells",
"represents",
"a",
"different",
"task",
"and",
"is",
"colored",
"such",
"that",
"cooler",
"colors",
"represent",
"more",
"complex",
"tasks",
".",
"The",
"saturation",
"of",
"each",
"sub",
"-",
"cell",
"indicates",
"the",
"percentage",
"of",
"grids",
"in",
"the",
"given",
"data",
"-",
"set",
"in",
"which",
"the",
"organism",
"in",
"that",
"cell",
"could",
"perform",
"the",
"corresponding",
"task",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/visualizations.py#L461-L507
|
239,647
|
emilydolson/avida-spatial-tools
|
avidaspatial/visualizations.py
|
make_imshow_plot
|
def make_imshow_plot(grid, name):
"""
Takes a grid of RGB or RGBA values and a filename to save the figure into.
Generates a figure by coloring all grid cells appropriately.
"""
plt.tick_params(labelbottom="off", labeltop="off", labelleft="off",
labelright="off", bottom="off", top="off", left="off",
right="off")
plt.imshow(grid, interpolation="nearest", aspect=1, zorder=1)
plt.tight_layout()
plt.savefig(name, dpi=1000, bbox_inches="tight")
|
python
|
def make_imshow_plot(grid, name):
"""
Takes a grid of RGB or RGBA values and a filename to save the figure into.
Generates a figure by coloring all grid cells appropriately.
"""
plt.tick_params(labelbottom="off", labeltop="off", labelleft="off",
labelright="off", bottom="off", top="off", left="off",
right="off")
plt.imshow(grid, interpolation="nearest", aspect=1, zorder=1)
plt.tight_layout()
plt.savefig(name, dpi=1000, bbox_inches="tight")
|
[
"def",
"make_imshow_plot",
"(",
"grid",
",",
"name",
")",
":",
"plt",
".",
"tick_params",
"(",
"labelbottom",
"=",
"\"off\"",
",",
"labeltop",
"=",
"\"off\"",
",",
"labelleft",
"=",
"\"off\"",
",",
"labelright",
"=",
"\"off\"",
",",
"bottom",
"=",
"\"off\"",
",",
"top",
"=",
"\"off\"",
",",
"left",
"=",
"\"off\"",
",",
"right",
"=",
"\"off\"",
")",
"plt",
".",
"imshow",
"(",
"grid",
",",
"interpolation",
"=",
"\"nearest\"",
",",
"aspect",
"=",
"1",
",",
"zorder",
"=",
"1",
")",
"plt",
".",
"tight_layout",
"(",
")",
"plt",
".",
"savefig",
"(",
"name",
",",
"dpi",
"=",
"1000",
",",
"bbox_inches",
"=",
"\"tight\"",
")"
] |
Takes a grid of RGB or RGBA values and a filename to save the figure into.
Generates a figure by coloring all grid cells appropriately.
|
[
"Takes",
"a",
"grid",
"of",
"RGB",
"or",
"RGBA",
"values",
"and",
"a",
"filename",
"to",
"save",
"the",
"figure",
"into",
".",
"Generates",
"a",
"figure",
"by",
"coloring",
"all",
"grid",
"cells",
"appropriately",
"."
] |
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/visualizations.py#L531-L541
|
239,648
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/filesys.py
|
copy_file
|
def copy_file(old, new):
"""Copy the old file to the location of the new file
:param old: The file to copy
:type old: :class:`JB_File`
:param new: The JB_File for the new location
:type new: :class:`JB_File`
:returns: None
:rtype: None
:raises: None
"""
oldp = old.get_fullpath()
newp = new.get_fullpath()
log.info("Copying %s to %s", oldp, newp)
new.create_directory()
shutil.copy(oldp, newp)
|
python
|
def copy_file(old, new):
"""Copy the old file to the location of the new file
:param old: The file to copy
:type old: :class:`JB_File`
:param new: The JB_File for the new location
:type new: :class:`JB_File`
:returns: None
:rtype: None
:raises: None
"""
oldp = old.get_fullpath()
newp = new.get_fullpath()
log.info("Copying %s to %s", oldp, newp)
new.create_directory()
shutil.copy(oldp, newp)
|
[
"def",
"copy_file",
"(",
"old",
",",
"new",
")",
":",
"oldp",
"=",
"old",
".",
"get_fullpath",
"(",
")",
"newp",
"=",
"new",
".",
"get_fullpath",
"(",
")",
"log",
".",
"info",
"(",
"\"Copying %s to %s\"",
",",
"oldp",
",",
"newp",
")",
"new",
".",
"create_directory",
"(",
")",
"shutil",
".",
"copy",
"(",
"oldp",
",",
"newp",
")"
] |
Copy the old file to the location of the new file
:param old: The file to copy
:type old: :class:`JB_File`
:param new: The JB_File for the new location
:type new: :class:`JB_File`
:returns: None
:rtype: None
:raises: None
|
[
"Copy",
"the",
"old",
"file",
"to",
"the",
"location",
"of",
"the",
"new",
"file"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/filesys.py#L13-L28
|
239,649
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/filesys.py
|
delete_file
|
def delete_file(f):
"""Delete the given file
:param f: the file to delete
:type f: :class:`JB_File`
:returns: None
:rtype: None
:raises: :class:`OSError`
"""
fp = f.get_fullpath()
log.info("Deleting file %s", fp)
os.remove(fp)
|
python
|
def delete_file(f):
"""Delete the given file
:param f: the file to delete
:type f: :class:`JB_File`
:returns: None
:rtype: None
:raises: :class:`OSError`
"""
fp = f.get_fullpath()
log.info("Deleting file %s", fp)
os.remove(fp)
|
[
"def",
"delete_file",
"(",
"f",
")",
":",
"fp",
"=",
"f",
".",
"get_fullpath",
"(",
")",
"log",
".",
"info",
"(",
"\"Deleting file %s\"",
",",
"fp",
")",
"os",
".",
"remove",
"(",
"fp",
")"
] |
Delete the given file
:param f: the file to delete
:type f: :class:`JB_File`
:returns: None
:rtype: None
:raises: :class:`OSError`
|
[
"Delete",
"the",
"given",
"file"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/filesys.py#L31-L42
|
239,650
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/filesys.py
|
TaskFileInfo.get_next
|
def get_next(cls, task, releasetype, typ, descriptor=None):
"""Returns a TaskFileInfo that with the next available version and the provided info
:param task: the task of the taskfile
:type task: :class:`jukeboxcore.djadapter.models.Task`
:param releasetype: the releasetype
:type releasetype: str - :data:`jukeboxcore.djadapter.RELEASETYPES`
:param typ: the file type, see :data:`TaskFileInfo.TYPES`
:type typ: str
:param descriptor: the descriptor, if the taskfile has one.
:type descriptor: str|None
:returns: taskfileinfoobject with next available version and the provided info
:rtype: :class:`TaskFileInfo`
:raises: None
"""
qs = dj.taskfiles.filter(task=task, releasetype=releasetype, descriptor=descriptor, typ=typ)
if qs.exists():
ver = qs.aggregate(Max('version'))['version__max']+1
else:
ver = 1
return TaskFileInfo(task=task, version=ver, releasetype=releasetype, typ=typ, descriptor=descriptor)
|
python
|
def get_next(cls, task, releasetype, typ, descriptor=None):
"""Returns a TaskFileInfo that with the next available version and the provided info
:param task: the task of the taskfile
:type task: :class:`jukeboxcore.djadapter.models.Task`
:param releasetype: the releasetype
:type releasetype: str - :data:`jukeboxcore.djadapter.RELEASETYPES`
:param typ: the file type, see :data:`TaskFileInfo.TYPES`
:type typ: str
:param descriptor: the descriptor, if the taskfile has one.
:type descriptor: str|None
:returns: taskfileinfoobject with next available version and the provided info
:rtype: :class:`TaskFileInfo`
:raises: None
"""
qs = dj.taskfiles.filter(task=task, releasetype=releasetype, descriptor=descriptor, typ=typ)
if qs.exists():
ver = qs.aggregate(Max('version'))['version__max']+1
else:
ver = 1
return TaskFileInfo(task=task, version=ver, releasetype=releasetype, typ=typ, descriptor=descriptor)
|
[
"def",
"get_next",
"(",
"cls",
",",
"task",
",",
"releasetype",
",",
"typ",
",",
"descriptor",
"=",
"None",
")",
":",
"qs",
"=",
"dj",
".",
"taskfiles",
".",
"filter",
"(",
"task",
"=",
"task",
",",
"releasetype",
"=",
"releasetype",
",",
"descriptor",
"=",
"descriptor",
",",
"typ",
"=",
"typ",
")",
"if",
"qs",
".",
"exists",
"(",
")",
":",
"ver",
"=",
"qs",
".",
"aggregate",
"(",
"Max",
"(",
"'version'",
")",
")",
"[",
"'version__max'",
"]",
"+",
"1",
"else",
":",
"ver",
"=",
"1",
"return",
"TaskFileInfo",
"(",
"task",
"=",
"task",
",",
"version",
"=",
"ver",
",",
"releasetype",
"=",
"releasetype",
",",
"typ",
"=",
"typ",
",",
"descriptor",
"=",
"descriptor",
")"
] |
Returns a TaskFileInfo that with the next available version and the provided info
:param task: the task of the taskfile
:type task: :class:`jukeboxcore.djadapter.models.Task`
:param releasetype: the releasetype
:type releasetype: str - :data:`jukeboxcore.djadapter.RELEASETYPES`
:param typ: the file type, see :data:`TaskFileInfo.TYPES`
:type typ: str
:param descriptor: the descriptor, if the taskfile has one.
:type descriptor: str|None
:returns: taskfileinfoobject with next available version and the provided info
:rtype: :class:`TaskFileInfo`
:raises: None
|
[
"Returns",
"a",
"TaskFileInfo",
"that",
"with",
"the",
"next",
"available",
"version",
"and",
"the",
"provided",
"info"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/filesys.py#L150-L170
|
239,651
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/filesys.py
|
TaskFileInfo.create_from_taskfile
|
def create_from_taskfile(self, taskfile):
"""Create a new TaskFileInfo and return it for the given taskfile
:param taskfile: the taskfile to represent
:type taskfile: :class:`jukeboxcore.djadapter.models.TaskFile`
:returns: a taskfileinfo
:rtype: :class:`TaskFileInfo`
:raises: None
"""
return TaskFileInfo(task=taskfile.task, version=taskfile.version, releasetype=taskfile.releasetype,
descriptor=taskfile.descriptor, typ=taskfile.typ)
|
python
|
def create_from_taskfile(self, taskfile):
"""Create a new TaskFileInfo and return it for the given taskfile
:param taskfile: the taskfile to represent
:type taskfile: :class:`jukeboxcore.djadapter.models.TaskFile`
:returns: a taskfileinfo
:rtype: :class:`TaskFileInfo`
:raises: None
"""
return TaskFileInfo(task=taskfile.task, version=taskfile.version, releasetype=taskfile.releasetype,
descriptor=taskfile.descriptor, typ=taskfile.typ)
|
[
"def",
"create_from_taskfile",
"(",
"self",
",",
"taskfile",
")",
":",
"return",
"TaskFileInfo",
"(",
"task",
"=",
"taskfile",
".",
"task",
",",
"version",
"=",
"taskfile",
".",
"version",
",",
"releasetype",
"=",
"taskfile",
".",
"releasetype",
",",
"descriptor",
"=",
"taskfile",
".",
"descriptor",
",",
"typ",
"=",
"taskfile",
".",
"typ",
")"
] |
Create a new TaskFileInfo and return it for the given taskfile
:param taskfile: the taskfile to represent
:type taskfile: :class:`jukeboxcore.djadapter.models.TaskFile`
:returns: a taskfileinfo
:rtype: :class:`TaskFileInfo`
:raises: None
|
[
"Create",
"a",
"new",
"TaskFileInfo",
"and",
"return",
"it",
"for",
"the",
"given",
"taskfile"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/filesys.py#L173-L183
|
239,652
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/filesys.py
|
TaskFileInfo.create_db_entry
|
def create_db_entry(self, comment=''):
"""Create a db entry for this task file info
and link it with a optional comment
:param comment: a comment for the task file entry
:type comment: str
:returns: The created TaskFile django instance and the comment. If the comment was empty, None is returned instead
:rtype: tuple of :class:`dj.models.TaskFile` and :class:`dj.models.Note`
:raises: ValidationError, If the comment could not be created, the TaskFile is deleted and the Exception is propagated.
"""
jbfile = JB_File(self)
p = jbfile.get_fullpath()
user = dj.get_current_user()
tf = dj.models.TaskFile(path=p, task=self.task, version=self.version,
releasetype=self.releasetype, descriptor=self.descriptor,
typ=self.typ, user=user)
tf.full_clean()
tf.save()
note = None
if comment:
try:
note = dj.models.Note(user=user, parent=tf, content=comment)
note.full_clean()
note.save()
except Exception, e:
tf.delete()
raise e
return tf, note
|
python
|
def create_db_entry(self, comment=''):
"""Create a db entry for this task file info
and link it with a optional comment
:param comment: a comment for the task file entry
:type comment: str
:returns: The created TaskFile django instance and the comment. If the comment was empty, None is returned instead
:rtype: tuple of :class:`dj.models.TaskFile` and :class:`dj.models.Note`
:raises: ValidationError, If the comment could not be created, the TaskFile is deleted and the Exception is propagated.
"""
jbfile = JB_File(self)
p = jbfile.get_fullpath()
user = dj.get_current_user()
tf = dj.models.TaskFile(path=p, task=self.task, version=self.version,
releasetype=self.releasetype, descriptor=self.descriptor,
typ=self.typ, user=user)
tf.full_clean()
tf.save()
note = None
if comment:
try:
note = dj.models.Note(user=user, parent=tf, content=comment)
note.full_clean()
note.save()
except Exception, e:
tf.delete()
raise e
return tf, note
|
[
"def",
"create_db_entry",
"(",
"self",
",",
"comment",
"=",
"''",
")",
":",
"jbfile",
"=",
"JB_File",
"(",
"self",
")",
"p",
"=",
"jbfile",
".",
"get_fullpath",
"(",
")",
"user",
"=",
"dj",
".",
"get_current_user",
"(",
")",
"tf",
"=",
"dj",
".",
"models",
".",
"TaskFile",
"(",
"path",
"=",
"p",
",",
"task",
"=",
"self",
".",
"task",
",",
"version",
"=",
"self",
".",
"version",
",",
"releasetype",
"=",
"self",
".",
"releasetype",
",",
"descriptor",
"=",
"self",
".",
"descriptor",
",",
"typ",
"=",
"self",
".",
"typ",
",",
"user",
"=",
"user",
")",
"tf",
".",
"full_clean",
"(",
")",
"tf",
".",
"save",
"(",
")",
"note",
"=",
"None",
"if",
"comment",
":",
"try",
":",
"note",
"=",
"dj",
".",
"models",
".",
"Note",
"(",
"user",
"=",
"user",
",",
"parent",
"=",
"tf",
",",
"content",
"=",
"comment",
")",
"note",
".",
"full_clean",
"(",
")",
"note",
".",
"save",
"(",
")",
"except",
"Exception",
",",
"e",
":",
"tf",
".",
"delete",
"(",
")",
"raise",
"e",
"return",
"tf",
",",
"note"
] |
Create a db entry for this task file info
and link it with a optional comment
:param comment: a comment for the task file entry
:type comment: str
:returns: The created TaskFile django instance and the comment. If the comment was empty, None is returned instead
:rtype: tuple of :class:`dj.models.TaskFile` and :class:`dj.models.Note`
:raises: ValidationError, If the comment could not be created, the TaskFile is deleted and the Exception is propagated.
|
[
"Create",
"a",
"db",
"entry",
"for",
"this",
"task",
"file",
"info",
"and",
"link",
"it",
"with",
"a",
"optional",
"comment"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/filesys.py#L195-L222
|
239,653
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/filesys.py
|
AttrElement.get_dir
|
def get_dir(self, obj):
"""Return the dirattr of obj formatted with the dirfomat specified in the constructor.
If the attr is None then ``None`` is returned not the string ``\'None\'``.
:param obj: the fileinfo with information.
:type obj: :class:`FileInfo`
:returns: the directory or None
:rtype: str|None
:raises: None
"""
if self._dirattr is None:
return
a = attrgetter(self._dirattr)(obj)
if a is None:
return
s = self._dirformat % a
return s
|
python
|
def get_dir(self, obj):
"""Return the dirattr of obj formatted with the dirfomat specified in the constructor.
If the attr is None then ``None`` is returned not the string ``\'None\'``.
:param obj: the fileinfo with information.
:type obj: :class:`FileInfo`
:returns: the directory or None
:rtype: str|None
:raises: None
"""
if self._dirattr is None:
return
a = attrgetter(self._dirattr)(obj)
if a is None:
return
s = self._dirformat % a
return s
|
[
"def",
"get_dir",
"(",
"self",
",",
"obj",
")",
":",
"if",
"self",
".",
"_dirattr",
"is",
"None",
":",
"return",
"a",
"=",
"attrgetter",
"(",
"self",
".",
"_dirattr",
")",
"(",
"obj",
")",
"if",
"a",
"is",
"None",
":",
"return",
"s",
"=",
"self",
".",
"_dirformat",
"%",
"a",
"return",
"s"
] |
Return the dirattr of obj formatted with the dirfomat specified in the constructor.
If the attr is None then ``None`` is returned not the string ``\'None\'``.
:param obj: the fileinfo with information.
:type obj: :class:`FileInfo`
:returns: the directory or None
:rtype: str|None
:raises: None
|
[
"Return",
"the",
"dirattr",
"of",
"obj",
"formatted",
"with",
"the",
"dirfomat",
"specified",
"in",
"the",
"constructor",
".",
"If",
"the",
"attr",
"is",
"None",
"then",
"None",
"is",
"returned",
"not",
"the",
"string",
"\\",
"None",
"\\",
"."
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/filesys.py#L359-L375
|
239,654
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/filesys.py
|
AttrElement.get_chunk
|
def get_chunk(self, obj):
"""Return the chunkattr of obj formatted with the chunkfomat specified in the constructor
If the attr is None then ``None`` is returned not the string ``\'None\'``.
:param obj: the fileinfo with information.
:type obj: :class:`FileInfo`
:returns: the chunk or None
:rtype: str|None
:raises: None
"""
if self._chunkattr is None:
return
a = attrgetter(self._chunkattr)(obj)
if a is None:
return
s = self._chunkformat % a
return s
|
python
|
def get_chunk(self, obj):
"""Return the chunkattr of obj formatted with the chunkfomat specified in the constructor
If the attr is None then ``None`` is returned not the string ``\'None\'``.
:param obj: the fileinfo with information.
:type obj: :class:`FileInfo`
:returns: the chunk or None
:rtype: str|None
:raises: None
"""
if self._chunkattr is None:
return
a = attrgetter(self._chunkattr)(obj)
if a is None:
return
s = self._chunkformat % a
return s
|
[
"def",
"get_chunk",
"(",
"self",
",",
"obj",
")",
":",
"if",
"self",
".",
"_chunkattr",
"is",
"None",
":",
"return",
"a",
"=",
"attrgetter",
"(",
"self",
".",
"_chunkattr",
")",
"(",
"obj",
")",
"if",
"a",
"is",
"None",
":",
"return",
"s",
"=",
"self",
".",
"_chunkformat",
"%",
"a",
"return",
"s"
] |
Return the chunkattr of obj formatted with the chunkfomat specified in the constructor
If the attr is None then ``None`` is returned not the string ``\'None\'``.
:param obj: the fileinfo with information.
:type obj: :class:`FileInfo`
:returns: the chunk or None
:rtype: str|None
:raises: None
|
[
"Return",
"the",
"chunkattr",
"of",
"obj",
"formatted",
"with",
"the",
"chunkfomat",
"specified",
"in",
"the",
"constructor",
"If",
"the",
"attr",
"is",
"None",
"then",
"None",
"is",
"returned",
"not",
"the",
"string",
"\\",
"None",
"\\",
"."
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/filesys.py#L377-L393
|
239,655
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/filesys.py
|
JB_File.get_ext
|
def get_ext(self, obj=None):
"""Return the file extension
:param obj: the fileinfo with information. If None, this will use the stored object of JB_File
:type obj: :class:`FileInfo`
:returns: the file extension
:rtype: str
:raises: None
"""
if obj is None:
obj = self._obj
return self._extel.get_ext(obj)
|
python
|
def get_ext(self, obj=None):
"""Return the file extension
:param obj: the fileinfo with information. If None, this will use the stored object of JB_File
:type obj: :class:`FileInfo`
:returns: the file extension
:rtype: str
:raises: None
"""
if obj is None:
obj = self._obj
return self._extel.get_ext(obj)
|
[
"def",
"get_ext",
"(",
"self",
",",
"obj",
"=",
"None",
")",
":",
"if",
"obj",
"is",
"None",
":",
"obj",
"=",
"self",
".",
"_obj",
"return",
"self",
".",
"_extel",
".",
"get_ext",
"(",
"obj",
")"
] |
Return the file extension
:param obj: the fileinfo with information. If None, this will use the stored object of JB_File
:type obj: :class:`FileInfo`
:returns: the file extension
:rtype: str
:raises: None
|
[
"Return",
"the",
"file",
"extension"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/filesys.py#L545-L556
|
239,656
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/filesys.py
|
JB_File.get_name
|
def get_name(self, obj=None, withext=True):
"""Return the filename
:param obj: the fileinfo with information. If None, this will use the stored object of JB_File
:type obj: :class:`FileInfo`
:param withext: If True, return with the fileextension.
:type withext: bool
:returns: the filename, default is with fileextension
:rtype: str
:raises: None
"""
if obj is None:
obj = self._obj
chunks = []
for e in self._elements:
c = e.get_chunk(obj)
if c is not None:
chunks.append(c)
name = '_'.join(chunks)
if withext:
name = os.extsep.join([name, self.get_ext(obj)])
return name
|
python
|
def get_name(self, obj=None, withext=True):
"""Return the filename
:param obj: the fileinfo with information. If None, this will use the stored object of JB_File
:type obj: :class:`FileInfo`
:param withext: If True, return with the fileextension.
:type withext: bool
:returns: the filename, default is with fileextension
:rtype: str
:raises: None
"""
if obj is None:
obj = self._obj
chunks = []
for e in self._elements:
c = e.get_chunk(obj)
if c is not None:
chunks.append(c)
name = '_'.join(chunks)
if withext:
name = os.extsep.join([name, self.get_ext(obj)])
return name
|
[
"def",
"get_name",
"(",
"self",
",",
"obj",
"=",
"None",
",",
"withext",
"=",
"True",
")",
":",
"if",
"obj",
"is",
"None",
":",
"obj",
"=",
"self",
".",
"_obj",
"chunks",
"=",
"[",
"]",
"for",
"e",
"in",
"self",
".",
"_elements",
":",
"c",
"=",
"e",
".",
"get_chunk",
"(",
"obj",
")",
"if",
"c",
"is",
"not",
"None",
":",
"chunks",
".",
"append",
"(",
"c",
")",
"name",
"=",
"'_'",
".",
"join",
"(",
"chunks",
")",
"if",
"withext",
":",
"name",
"=",
"os",
".",
"extsep",
".",
"join",
"(",
"[",
"name",
",",
"self",
".",
"get_ext",
"(",
"obj",
")",
"]",
")",
"return",
"name"
] |
Return the filename
:param obj: the fileinfo with information. If None, this will use the stored object of JB_File
:type obj: :class:`FileInfo`
:param withext: If True, return with the fileextension.
:type withext: bool
:returns: the filename, default is with fileextension
:rtype: str
:raises: None
|
[
"Return",
"the",
"filename"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/filesys.py#L577-L598
|
239,657
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/filesys.py
|
JB_File.get_fullpath
|
def get_fullpath(self, withext=True):
"""Return the filepath with the filename
:param withext: If True, return with the fileextension.
:type withext: bool
:returns: None
:rtype: None
:raises: None
"""
p = self.get_path(self._obj)
n = self.get_name(self._obj, withext)
fp = os.path.join(p,n)
return os.path.normpath(fp)
|
python
|
def get_fullpath(self, withext=True):
"""Return the filepath with the filename
:param withext: If True, return with the fileextension.
:type withext: bool
:returns: None
:rtype: None
:raises: None
"""
p = self.get_path(self._obj)
n = self.get_name(self._obj, withext)
fp = os.path.join(p,n)
return os.path.normpath(fp)
|
[
"def",
"get_fullpath",
"(",
"self",
",",
"withext",
"=",
"True",
")",
":",
"p",
"=",
"self",
".",
"get_path",
"(",
"self",
".",
"_obj",
")",
"n",
"=",
"self",
".",
"get_name",
"(",
"self",
".",
"_obj",
",",
"withext",
")",
"fp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"p",
",",
"n",
")",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"fp",
")"
] |
Return the filepath with the filename
:param withext: If True, return with the fileextension.
:type withext: bool
:returns: None
:rtype: None
:raises: None
|
[
"Return",
"the",
"filepath",
"with",
"the",
"filename"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/filesys.py#L600-L612
|
239,658
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/filesys.py
|
JB_File.create_directory
|
def create_directory(self, path=None):
"""Create the directory for the given path. If path is None use the path of this instance
:param path: the path to create
:type path: str
:returns: None
:rtype: None
:raises: OSError
"""
if path is None:
path = self.get_path()
if not os.path.exists(path):
os.makedirs(path)
|
python
|
def create_directory(self, path=None):
"""Create the directory for the given path. If path is None use the path of this instance
:param path: the path to create
:type path: str
:returns: None
:rtype: None
:raises: OSError
"""
if path is None:
path = self.get_path()
if not os.path.exists(path):
os.makedirs(path)
|
[
"def",
"create_directory",
"(",
"self",
",",
"path",
"=",
"None",
")",
":",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"self",
".",
"get_path",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"makedirs",
"(",
"path",
")"
] |
Create the directory for the given path. If path is None use the path of this instance
:param path: the path to create
:type path: str
:returns: None
:rtype: None
:raises: OSError
|
[
"Create",
"the",
"directory",
"for",
"the",
"given",
"path",
".",
"If",
"path",
"is",
"None",
"use",
"the",
"path",
"of",
"this",
"instance"
] |
bac2280ca49940355270e4b69400ce9976ab2e6f
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/filesys.py#L643-L655
|
239,659
|
sbarham/dsrt
|
build/lib/dsrt/application/Application.py
|
Application.corpus
|
def corpus(self):
'''Command to add a corpus to the dsrt library'''
# Initialize the addcorpus subcommand's argparser
description = '''The corpus subcommand has a number of subcommands of its own, including:
list\t-\tlists all available corpora in dsrt's library
add\t-\tadds a corpus to dsrt's library'''
parser = argparse.ArgumentParser(description=description)
self.init_corpus_args(parser)
# parse the args we got
args = parser.parse_args(sys.argv[2:3])
corpus_command = 'corpus_' + args.corpus_command
if not hasattr(self, corpus_command):
print('Unrecognized corpus command.')
parser.print_help()
exit(1)
getattr(self, corpus_command)()
|
python
|
def corpus(self):
'''Command to add a corpus to the dsrt library'''
# Initialize the addcorpus subcommand's argparser
description = '''The corpus subcommand has a number of subcommands of its own, including:
list\t-\tlists all available corpora in dsrt's library
add\t-\tadds a corpus to dsrt's library'''
parser = argparse.ArgumentParser(description=description)
self.init_corpus_args(parser)
# parse the args we got
args = parser.parse_args(sys.argv[2:3])
corpus_command = 'corpus_' + args.corpus_command
if not hasattr(self, corpus_command):
print('Unrecognized corpus command.')
parser.print_help()
exit(1)
getattr(self, corpus_command)()
|
[
"def",
"corpus",
"(",
"self",
")",
":",
"# Initialize the addcorpus subcommand's argparser",
"description",
"=",
"'''The corpus subcommand has a number of subcommands of its own, including:\n list\\t-\\tlists all available corpora in dsrt's library\n add\\t-\\tadds a corpus to dsrt's library'''",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"description",
")",
"self",
".",
"init_corpus_args",
"(",
"parser",
")",
"# parse the args we got",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"sys",
".",
"argv",
"[",
"2",
":",
"3",
"]",
")",
"corpus_command",
"=",
"'corpus_'",
"+",
"args",
".",
"corpus_command",
"if",
"not",
"hasattr",
"(",
"self",
",",
"corpus_command",
")",
":",
"print",
"(",
"'Unrecognized corpus command.'",
")",
"parser",
".",
"print_help",
"(",
")",
"exit",
"(",
"1",
")",
"getattr",
"(",
"self",
",",
"corpus_command",
")",
"(",
")"
] |
Command to add a corpus to the dsrt library
|
[
"Command",
"to",
"add",
"a",
"corpus",
"to",
"the",
"dsrt",
"library"
] |
bc664739f2f52839461d3e72773b71146fd56a9a
|
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/build/lib/dsrt/application/Application.py#L69-L90
|
239,660
|
sbarham/dsrt
|
build/lib/dsrt/application/Application.py
|
Application.dataset
|
def dataset(self):
'''Command for manipulating or viewing datasets; has a number of subcommands'''
# Initialize the addcorpus subcommand's argparser
description = '''The dataset subcommand has a number of subcommands of its own, including:
list\t-\tlists all available datasets in dsrt's library
prepare\t-\tprocesses a corpus into a dataset and adds the processed dataset to dsrt's library'''
parser = argparse.ArgumentParser(description=description)
self.init_dataset_args(parser)
# parse the args we got
args = parser.parse_args(sys.argv[2:3])
corpus_command = 'dataset_' + args.dataset_command
if not hasattr(self, corpus_command):
print('Unrecognized dataset command.')
parser.print_help()
exit(1)
getattr(self, corpus_command)()
|
python
|
def dataset(self):
'''Command for manipulating or viewing datasets; has a number of subcommands'''
# Initialize the addcorpus subcommand's argparser
description = '''The dataset subcommand has a number of subcommands of its own, including:
list\t-\tlists all available datasets in dsrt's library
prepare\t-\tprocesses a corpus into a dataset and adds the processed dataset to dsrt's library'''
parser = argparse.ArgumentParser(description=description)
self.init_dataset_args(parser)
# parse the args we got
args = parser.parse_args(sys.argv[2:3])
corpus_command = 'dataset_' + args.dataset_command
if not hasattr(self, corpus_command):
print('Unrecognized dataset command.')
parser.print_help()
exit(1)
getattr(self, corpus_command)()
|
[
"def",
"dataset",
"(",
"self",
")",
":",
"# Initialize the addcorpus subcommand's argparser",
"description",
"=",
"'''The dataset subcommand has a number of subcommands of its own, including:\n list\\t-\\tlists all available datasets in dsrt's library\n prepare\\t-\\tprocesses a corpus into a dataset and adds the processed dataset to dsrt's library'''",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"description",
")",
"self",
".",
"init_dataset_args",
"(",
"parser",
")",
"# parse the args we got",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"sys",
".",
"argv",
"[",
"2",
":",
"3",
"]",
")",
"corpus_command",
"=",
"'dataset_'",
"+",
"args",
".",
"dataset_command",
"if",
"not",
"hasattr",
"(",
"self",
",",
"corpus_command",
")",
":",
"print",
"(",
"'Unrecognized dataset command.'",
")",
"parser",
".",
"print_help",
"(",
")",
"exit",
"(",
"1",
")",
"getattr",
"(",
"self",
",",
"corpus_command",
")",
"(",
")"
] |
Command for manipulating or viewing datasets; has a number of subcommands
|
[
"Command",
"for",
"manipulating",
"or",
"viewing",
"datasets",
";",
"has",
"a",
"number",
"of",
"subcommands"
] |
bc664739f2f52839461d3e72773b71146fd56a9a
|
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/build/lib/dsrt/application/Application.py#L115-L136
|
239,661
|
sbarham/dsrt
|
build/lib/dsrt/application/Application.py
|
Application.dataset_prepare
|
def dataset_prepare(self):
'''Subcommand of dataset for processing a corpus into a dataset'''
# Initialize the prepare subcommand's argparser
parser = argparse.ArgumentParser(description='Preprocess a raw dialogue corpus into a dsrt dataset')
self.init_dataset_prepare_args(parser)
# Parse the args we got
args = parser.parse_args(sys.argv[3:])
args.config = ConfigurationLoader(args.config).load().data_config
print(CLI_DIVIDER + '\n')
Preprocessor(**vars(args)).run()
|
python
|
def dataset_prepare(self):
'''Subcommand of dataset for processing a corpus into a dataset'''
# Initialize the prepare subcommand's argparser
parser = argparse.ArgumentParser(description='Preprocess a raw dialogue corpus into a dsrt dataset')
self.init_dataset_prepare_args(parser)
# Parse the args we got
args = parser.parse_args(sys.argv[3:])
args.config = ConfigurationLoader(args.config).load().data_config
print(CLI_DIVIDER + '\n')
Preprocessor(**vars(args)).run()
|
[
"def",
"dataset_prepare",
"(",
"self",
")",
":",
"# Initialize the prepare subcommand's argparser",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Preprocess a raw dialogue corpus into a dsrt dataset'",
")",
"self",
".",
"init_dataset_prepare_args",
"(",
"parser",
")",
"# Parse the args we got",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"sys",
".",
"argv",
"[",
"3",
":",
"]",
")",
"args",
".",
"config",
"=",
"ConfigurationLoader",
"(",
"args",
".",
"config",
")",
".",
"load",
"(",
")",
".",
"data_config",
"print",
"(",
"CLI_DIVIDER",
"+",
"'\\n'",
")",
"Preprocessor",
"(",
"*",
"*",
"vars",
"(",
"args",
")",
")",
".",
"run",
"(",
")"
] |
Subcommand of dataset for processing a corpus into a dataset
|
[
"Subcommand",
"of",
"dataset",
"for",
"processing",
"a",
"corpus",
"into",
"a",
"dataset"
] |
bc664739f2f52839461d3e72773b71146fd56a9a
|
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/build/lib/dsrt/application/Application.py#L138-L150
|
239,662
|
sbarham/dsrt
|
build/lib/dsrt/application/Application.py
|
Application.dataset_list
|
def dataset_list(self):
'''Subcommand of dataset for listing available datasets'''
# Initialize the prepare subcommand's argparser
parser = argparse.ArgumentParser(description='Preprocess a raw dialogue corpus into a dsrt dataset')
self.init_dataset_list_args(parser)
# Parse the args we got
args = parser.parse_args(sys.argv[3:])
print(CLI_DIVIDER + '\n')
dsrt.application.utils.list_dataset()
|
python
|
def dataset_list(self):
'''Subcommand of dataset for listing available datasets'''
# Initialize the prepare subcommand's argparser
parser = argparse.ArgumentParser(description='Preprocess a raw dialogue corpus into a dsrt dataset')
self.init_dataset_list_args(parser)
# Parse the args we got
args = parser.parse_args(sys.argv[3:])
print(CLI_DIVIDER + '\n')
dsrt.application.utils.list_dataset()
|
[
"def",
"dataset_list",
"(",
"self",
")",
":",
"# Initialize the prepare subcommand's argparser",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Preprocess a raw dialogue corpus into a dsrt dataset'",
")",
"self",
".",
"init_dataset_list_args",
"(",
"parser",
")",
"# Parse the args we got",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"sys",
".",
"argv",
"[",
"3",
":",
"]",
")",
"print",
"(",
"CLI_DIVIDER",
"+",
"'\\n'",
")",
"dsrt",
".",
"application",
".",
"utils",
".",
"list_dataset",
"(",
")"
] |
Subcommand of dataset for listing available datasets
|
[
"Subcommand",
"of",
"dataset",
"for",
"listing",
"available",
"datasets"
] |
bc664739f2f52839461d3e72773b71146fd56a9a
|
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/build/lib/dsrt/application/Application.py#L152-L163
|
239,663
|
sbarham/dsrt
|
build/lib/dsrt/application/Application.py
|
Application.train
|
def train(self):
'''The 'train' subcommand'''
# Initialize the train subcommand's argparser
parser = argparse.ArgumentParser(description='Train a dialogue model on a dialogue corpus or a dsrt dataset')
self.init_train_args(parser)
# Parse the args we got
args = parser.parse_args(sys.argv[2:])
args.config = ConfigurationLoader(args.config).load().model_config
print(CLI_DIVIDER + '\n')
Trainer(**vars(args)).run()
|
python
|
def train(self):
'''The 'train' subcommand'''
# Initialize the train subcommand's argparser
parser = argparse.ArgumentParser(description='Train a dialogue model on a dialogue corpus or a dsrt dataset')
self.init_train_args(parser)
# Parse the args we got
args = parser.parse_args(sys.argv[2:])
args.config = ConfigurationLoader(args.config).load().model_config
print(CLI_DIVIDER + '\n')
Trainer(**vars(args)).run()
|
[
"def",
"train",
"(",
"self",
")",
":",
"# Initialize the train subcommand's argparser",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Train a dialogue model on a dialogue corpus or a dsrt dataset'",
")",
"self",
".",
"init_train_args",
"(",
"parser",
")",
"# Parse the args we got",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"sys",
".",
"argv",
"[",
"2",
":",
"]",
")",
"args",
".",
"config",
"=",
"ConfigurationLoader",
"(",
"args",
".",
"config",
")",
".",
"load",
"(",
")",
".",
"model_config",
"print",
"(",
"CLI_DIVIDER",
"+",
"'\\n'",
")",
"Trainer",
"(",
"*",
"*",
"vars",
"(",
"args",
")",
")",
".",
"run",
"(",
")"
] |
The 'train' subcommand
|
[
"The",
"train",
"subcommand"
] |
bc664739f2f52839461d3e72773b71146fd56a9a
|
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/build/lib/dsrt/application/Application.py#L165-L177
|
239,664
|
sbarham/dsrt
|
build/lib/dsrt/application/Application.py
|
Application.load_corpus
|
def load_corpus(self, path, config):
'''Load a dialogue corpus; eventually, support pickles and potentially other formats'''
# use the default dataset if no path is provided
# TODO -- change this to use a pre-saved dataset
if path == '':
path = self.default_path_to_corpus
self.data = Corpus(path=path, config=self.data_config)
|
python
|
def load_corpus(self, path, config):
'''Load a dialogue corpus; eventually, support pickles and potentially other formats'''
# use the default dataset if no path is provided
# TODO -- change this to use a pre-saved dataset
if path == '':
path = self.default_path_to_corpus
self.data = Corpus(path=path, config=self.data_config)
|
[
"def",
"load_corpus",
"(",
"self",
",",
"path",
",",
"config",
")",
":",
"# use the default dataset if no path is provided",
"# TODO -- change this to use a pre-saved dataset",
"if",
"path",
"==",
"''",
":",
"path",
"=",
"self",
".",
"default_path_to_corpus",
"self",
".",
"data",
"=",
"Corpus",
"(",
"path",
"=",
"path",
",",
"config",
"=",
"self",
".",
"data_config",
")"
] |
Load a dialogue corpus; eventually, support pickles and potentially other formats
|
[
"Load",
"a",
"dialogue",
"corpus",
";",
"eventually",
"support",
"pickles",
"and",
"potentially",
"other",
"formats"
] |
bc664739f2f52839461d3e72773b71146fd56a9a
|
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/build/lib/dsrt/application/Application.py#L197-L205
|
239,665
|
sbarham/dsrt
|
build/lib/dsrt/application/Application.py
|
Application.init_dataset_prepare_args
|
def init_dataset_prepare_args(self, parser):
'''Only invoked conditionally if subcommand is 'prepare' '''
parser.add_argument('-f', '--configuration', dest='config', default=DEFAULT_USER_CONFIG_PATH,
help='the path to the configuration file to use -- ./config.yaml by default')
parser.add_argument('-c', '--corpus-name', help='the name of the corpus to process')
parser.add_argument('-n', '--dataset-name', help='the name to assign the newly processed dataset')
|
python
|
def init_dataset_prepare_args(self, parser):
'''Only invoked conditionally if subcommand is 'prepare' '''
parser.add_argument('-f', '--configuration', dest='config', default=DEFAULT_USER_CONFIG_PATH,
help='the path to the configuration file to use -- ./config.yaml by default')
parser.add_argument('-c', '--corpus-name', help='the name of the corpus to process')
parser.add_argument('-n', '--dataset-name', help='the name to assign the newly processed dataset')
|
[
"def",
"init_dataset_prepare_args",
"(",
"self",
",",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'-f'",
",",
"'--configuration'",
",",
"dest",
"=",
"'config'",
",",
"default",
"=",
"DEFAULT_USER_CONFIG_PATH",
",",
"help",
"=",
"'the path to the configuration file to use -- ./config.yaml by default'",
")",
"parser",
".",
"add_argument",
"(",
"'-c'",
",",
"'--corpus-name'",
",",
"help",
"=",
"'the name of the corpus to process'",
")",
"parser",
".",
"add_argument",
"(",
"'-n'",
",",
"'--dataset-name'",
",",
"help",
"=",
"'the name to assign the newly processed dataset'",
")"
] |
Only invoked conditionally if subcommand is 'prepare'
|
[
"Only",
"invoked",
"conditionally",
"if",
"subcommand",
"is",
"prepare"
] |
bc664739f2f52839461d3e72773b71146fd56a9a
|
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/build/lib/dsrt/application/Application.py#L231-L236
|
239,666
|
steder/goose
|
goose/core.py
|
executeBatch
|
def executeBatch(cursor, sql,
regex=r"(?mx) ([^';]* (?:'[^']*'[^';]*)*)",
comment_regex=r"(?mx) (?:^\s*$)|(?:--.*$)"):
"""
Takes a SQL file and executes it as many separate statements.
TODO: replace regexes with something easier to grok and extend.
"""
# First, strip comments
sql = "\n".join([x.strip().replace("%", "%%") for x in re.split(comment_regex, sql) if x.strip()])
# Stored procedures don't work with the above regex because many of them are
# made up multiple sql statements each delimited with a single ;
# where the regexes assume each statement delimited by a ; is a complete
# statement to send to mysql and execute.
#
# Here i'm simply checking for the delimiter statements (which seem to be
# mysql-only) and then using them as markers to start accumulating statements.
# So the first delimiter is the signal to start accumulating
# and the second delimiter is the signal to combine them into
# single sql compound statement and send it to mysql.
in_proc = False
statements = []
for st in re.split(regex, sql)[1:][::2]:
if st.strip().lower().startswith("delimiter"):
in_proc = not in_proc
if statements and not in_proc:
procedure = ";".join(statements)
statements = []
cursor.execute(procedure)
# skip the delimiter line
continue
if in_proc:
statements.append(st)
else:
cursor.execute(st)
|
python
|
def executeBatch(cursor, sql,
regex=r"(?mx) ([^';]* (?:'[^']*'[^';]*)*)",
comment_regex=r"(?mx) (?:^\s*$)|(?:--.*$)"):
"""
Takes a SQL file and executes it as many separate statements.
TODO: replace regexes with something easier to grok and extend.
"""
# First, strip comments
sql = "\n".join([x.strip().replace("%", "%%") for x in re.split(comment_regex, sql) if x.strip()])
# Stored procedures don't work with the above regex because many of them are
# made up multiple sql statements each delimited with a single ;
# where the regexes assume each statement delimited by a ; is a complete
# statement to send to mysql and execute.
#
# Here i'm simply checking for the delimiter statements (which seem to be
# mysql-only) and then using them as markers to start accumulating statements.
# So the first delimiter is the signal to start accumulating
# and the second delimiter is the signal to combine them into
# single sql compound statement and send it to mysql.
in_proc = False
statements = []
for st in re.split(regex, sql)[1:][::2]:
if st.strip().lower().startswith("delimiter"):
in_proc = not in_proc
if statements and not in_proc:
procedure = ";".join(statements)
statements = []
cursor.execute(procedure)
# skip the delimiter line
continue
if in_proc:
statements.append(st)
else:
cursor.execute(st)
|
[
"def",
"executeBatch",
"(",
"cursor",
",",
"sql",
",",
"regex",
"=",
"r\"(?mx) ([^';]* (?:'[^']*'[^';]*)*)\"",
",",
"comment_regex",
"=",
"r\"(?mx) (?:^\\s*$)|(?:--.*$)\"",
")",
":",
"# First, strip comments",
"sql",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"x",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"\"%\"",
",",
"\"%%\"",
")",
"for",
"x",
"in",
"re",
".",
"split",
"(",
"comment_regex",
",",
"sql",
")",
"if",
"x",
".",
"strip",
"(",
")",
"]",
")",
"# Stored procedures don't work with the above regex because many of them are",
"# made up multiple sql statements each delimited with a single ;",
"# where the regexes assume each statement delimited by a ; is a complete",
"# statement to send to mysql and execute.",
"#",
"# Here i'm simply checking for the delimiter statements (which seem to be",
"# mysql-only) and then using them as markers to start accumulating statements.",
"# So the first delimiter is the signal to start accumulating",
"# and the second delimiter is the signal to combine them into",
"# single sql compound statement and send it to mysql.",
"in_proc",
"=",
"False",
"statements",
"=",
"[",
"]",
"for",
"st",
"in",
"re",
".",
"split",
"(",
"regex",
",",
"sql",
")",
"[",
"1",
":",
"]",
"[",
":",
":",
"2",
"]",
":",
"if",
"st",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"\"delimiter\"",
")",
":",
"in_proc",
"=",
"not",
"in_proc",
"if",
"statements",
"and",
"not",
"in_proc",
":",
"procedure",
"=",
"\";\"",
".",
"join",
"(",
"statements",
")",
"statements",
"=",
"[",
"]",
"cursor",
".",
"execute",
"(",
"procedure",
")",
"# skip the delimiter line",
"continue",
"if",
"in_proc",
":",
"statements",
".",
"append",
"(",
"st",
")",
"else",
":",
"cursor",
".",
"execute",
"(",
"st",
")"
] |
Takes a SQL file and executes it as many separate statements.
TODO: replace regexes with something easier to grok and extend.
|
[
"Takes",
"a",
"SQL",
"file",
"and",
"executes",
"it",
"as",
"many",
"separate",
"statements",
"."
] |
e9290b39fdd7b3842052e40995ee833eaf8f15db
|
https://github.com/steder/goose/blob/e9290b39fdd7b3842052e40995ee833eaf8f15db/goose/core.py#L30-L69
|
239,667
|
steder/goose
|
goose/core.py
|
DatabaseMigrator.howToMigrate
|
def howToMigrate(self, fromVersion, toVersion=None):
"""Given a starting version and an ending version
returns filenames of all the migrations in that range, exclusive.
e.g.: [fromVersion, toVersion)
"""
# slice notation [start:end:step]
# by adding a step of 1 we make a slice from 0:0 be empty
# rather than containing the whole list.
if toVersion is not None:
return self.migrations[fromVersion:toVersion:1]
else:
return self.migrations[fromVersion::1]
|
python
|
def howToMigrate(self, fromVersion, toVersion=None):
"""Given a starting version and an ending version
returns filenames of all the migrations in that range, exclusive.
e.g.: [fromVersion, toVersion)
"""
# slice notation [start:end:step]
# by adding a step of 1 we make a slice from 0:0 be empty
# rather than containing the whole list.
if toVersion is not None:
return self.migrations[fromVersion:toVersion:1]
else:
return self.migrations[fromVersion::1]
|
[
"def",
"howToMigrate",
"(",
"self",
",",
"fromVersion",
",",
"toVersion",
"=",
"None",
")",
":",
"# slice notation [start:end:step]",
"# by adding a step of 1 we make a slice from 0:0 be empty",
"# rather than containing the whole list.",
"if",
"toVersion",
"is",
"not",
"None",
":",
"return",
"self",
".",
"migrations",
"[",
"fromVersion",
":",
"toVersion",
":",
"1",
"]",
"else",
":",
"return",
"self",
".",
"migrations",
"[",
"fromVersion",
":",
":",
"1",
"]"
] |
Given a starting version and an ending version
returns filenames of all the migrations in that range, exclusive.
e.g.: [fromVersion, toVersion)
|
[
"Given",
"a",
"starting",
"version",
"and",
"an",
"ending",
"version",
"returns",
"filenames",
"of",
"all",
"the",
"migrations",
"in",
"that",
"range",
"exclusive",
"."
] |
e9290b39fdd7b3842052e40995ee833eaf8f15db
|
https://github.com/steder/goose/blob/e9290b39fdd7b3842052e40995ee833eaf8f15db/goose/core.py#L113-L125
|
239,668
|
steder/goose
|
goose/core.py
|
DatabaseMigrator.runSql
|
def runSql(self, migrationName, version):
"""
Given a migration name and version lookup the sql file and run it.
"""
sys.stdout.write("Running migration %s to version %s: ..."%(migrationName, version))
sqlPath = os.path.join(self.migrationDirectory, migrationName)
sql = open(sqlPath, "r").read()
try:
if self.session.is_active:
print "session is active"
self.session.commit()
self.session.begin()
executeBatch(self.session, sql)
self.session.add(models.Migration(version, migrationName))
except:
print "\n"
self.session.rollback()
raise
else:
self.session.commit()
sys.stdout.write("\r")
sys.stdout.flush()
sys.stdout.write("Running migration %s to version %s: SUCCESS!\n"%(migrationName, version))
|
python
|
def runSql(self, migrationName, version):
"""
Given a migration name and version lookup the sql file and run it.
"""
sys.stdout.write("Running migration %s to version %s: ..."%(migrationName, version))
sqlPath = os.path.join(self.migrationDirectory, migrationName)
sql = open(sqlPath, "r").read()
try:
if self.session.is_active:
print "session is active"
self.session.commit()
self.session.begin()
executeBatch(self.session, sql)
self.session.add(models.Migration(version, migrationName))
except:
print "\n"
self.session.rollback()
raise
else:
self.session.commit()
sys.stdout.write("\r")
sys.stdout.flush()
sys.stdout.write("Running migration %s to version %s: SUCCESS!\n"%(migrationName, version))
|
[
"def",
"runSql",
"(",
"self",
",",
"migrationName",
",",
"version",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Running migration %s to version %s: ...\"",
"%",
"(",
"migrationName",
",",
"version",
")",
")",
"sqlPath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"migrationDirectory",
",",
"migrationName",
")",
"sql",
"=",
"open",
"(",
"sqlPath",
",",
"\"r\"",
")",
".",
"read",
"(",
")",
"try",
":",
"if",
"self",
".",
"session",
".",
"is_active",
":",
"print",
"\"session is active\"",
"self",
".",
"session",
".",
"commit",
"(",
")",
"self",
".",
"session",
".",
"begin",
"(",
")",
"executeBatch",
"(",
"self",
".",
"session",
",",
"sql",
")",
"self",
".",
"session",
".",
"add",
"(",
"models",
".",
"Migration",
"(",
"version",
",",
"migrationName",
")",
")",
"except",
":",
"print",
"\"\\n\"",
"self",
".",
"session",
".",
"rollback",
"(",
")",
"raise",
"else",
":",
"self",
".",
"session",
".",
"commit",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\r\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Running migration %s to version %s: SUCCESS!\\n\"",
"%",
"(",
"migrationName",
",",
"version",
")",
")"
] |
Given a migration name and version lookup the sql file and run it.
|
[
"Given",
"a",
"migration",
"name",
"and",
"version",
"lookup",
"the",
"sql",
"file",
"and",
"run",
"it",
"."
] |
e9290b39fdd7b3842052e40995ee833eaf8f15db
|
https://github.com/steder/goose/blob/e9290b39fdd7b3842052e40995ee833eaf8f15db/goose/core.py#L140-L162
|
239,669
|
realestate-com-au/dashmat
|
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/modularinput/event_writer.py
|
EventWriter.write_event
|
def write_event(self, event):
"""Writes an ``Event`` object to Splunk.
:param event: An ``Event`` object.
"""
if not self.header_written:
self._out.write("<stream>")
self.header_written = True
event.write_to(self._out)
|
python
|
def write_event(self, event):
"""Writes an ``Event`` object to Splunk.
:param event: An ``Event`` object.
"""
if not self.header_written:
self._out.write("<stream>")
self.header_written = True
event.write_to(self._out)
|
[
"def",
"write_event",
"(",
"self",
",",
"event",
")",
":",
"if",
"not",
"self",
".",
"header_written",
":",
"self",
".",
"_out",
".",
"write",
"(",
"\"<stream>\"",
")",
"self",
".",
"header_written",
"=",
"True",
"event",
".",
"write_to",
"(",
"self",
".",
"_out",
")"
] |
Writes an ``Event`` object to Splunk.
:param event: An ``Event`` object.
|
[
"Writes",
"an",
"Event",
"object",
"to",
"Splunk",
"."
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/modularinput/event_writer.py#L50-L60
|
239,670
|
realestate-com-au/dashmat
|
dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/modularinput/event_writer.py
|
EventWriter.log
|
def log(self, severity, message):
"""Logs messages about the state of this modular input to Splunk.
These messages will show up in Splunk's internal logs.
:param severity: ``string``, severity of message, see severities defined as class constants.
:param message: ``string``, message to log.
"""
self._err.write("%s %s\n" % (severity, message))
self._err.flush()
|
python
|
def log(self, severity, message):
"""Logs messages about the state of this modular input to Splunk.
These messages will show up in Splunk's internal logs.
:param severity: ``string``, severity of message, see severities defined as class constants.
:param message: ``string``, message to log.
"""
self._err.write("%s %s\n" % (severity, message))
self._err.flush()
|
[
"def",
"log",
"(",
"self",
",",
"severity",
",",
"message",
")",
":",
"self",
".",
"_err",
".",
"write",
"(",
"\"%s %s\\n\"",
"%",
"(",
"severity",
",",
"message",
")",
")",
"self",
".",
"_err",
".",
"flush",
"(",
")"
] |
Logs messages about the state of this modular input to Splunk.
These messages will show up in Splunk's internal logs.
:param severity: ``string``, severity of message, see severities defined as class constants.
:param message: ``string``, message to log.
|
[
"Logs",
"messages",
"about",
"the",
"state",
"of",
"this",
"modular",
"input",
"to",
"Splunk",
".",
"These",
"messages",
"will",
"show",
"up",
"in",
"Splunk",
"s",
"internal",
"logs",
"."
] |
433886e52698f0ddb9956f087b76041966c3bcd1
|
https://github.com/realestate-com-au/dashmat/blob/433886e52698f0ddb9956f087b76041966c3bcd1/dashmat/core_modules/splunk/splunk-sdk-1.3.0/splunklib/modularinput/event_writer.py#L62-L71
|
239,671
|
rackerlabs/txkazoo
|
txkazoo/client.py
|
TxKazooClient
|
def TxKazooClient(reactor, pool, client):
"""Create a client for txkazoo.
:param twisted.internet.interfaces.IReactorThreads reactor: The reactor
used to interact with the thread pool.
:param ThreadPool pool: The thread pool to which blocking calls will be
deferred.
:param kazoo.client.KazooClient client: The blocking Kazoo client, whose
blocking methods will be deferred to the thread pool.
:return: An object with a similar interface to the Kazoo client, but
returning deferreds for all blocking methods. The actual method calls
will be executed in the thread pool.
"""
make_thimble = partial(Thimble, reactor, pool)
wrapper = _RunCallbacksInReactorThreadWrapper(reactor, client)
client_thimble = make_thimble(wrapper, _blocking_client_methods)
def _Lock(path, identifier=None):
"""Return a wrapped :class:`kazoo.recipe.lock.Lock` for this client."""
lock = client.Lock(path, identifier)
return Thimble(reactor, pool, lock, _blocking_lock_methods)
client_thimble.Lock = _Lock
client_thimble.SetPartitioner = partial(_SetPartitionerWrapper,
reactor, pool, client)
# Expose these so e.g. recipes can access them from the kzclient
client.reactor = reactor
client.pool = pool
client.kazoo_client = client
return client_thimble
|
python
|
def TxKazooClient(reactor, pool, client):
"""Create a client for txkazoo.
:param twisted.internet.interfaces.IReactorThreads reactor: The reactor
used to interact with the thread pool.
:param ThreadPool pool: The thread pool to which blocking calls will be
deferred.
:param kazoo.client.KazooClient client: The blocking Kazoo client, whose
blocking methods will be deferred to the thread pool.
:return: An object with a similar interface to the Kazoo client, but
returning deferreds for all blocking methods. The actual method calls
will be executed in the thread pool.
"""
make_thimble = partial(Thimble, reactor, pool)
wrapper = _RunCallbacksInReactorThreadWrapper(reactor, client)
client_thimble = make_thimble(wrapper, _blocking_client_methods)
def _Lock(path, identifier=None):
"""Return a wrapped :class:`kazoo.recipe.lock.Lock` for this client."""
lock = client.Lock(path, identifier)
return Thimble(reactor, pool, lock, _blocking_lock_methods)
client_thimble.Lock = _Lock
client_thimble.SetPartitioner = partial(_SetPartitionerWrapper,
reactor, pool, client)
# Expose these so e.g. recipes can access them from the kzclient
client.reactor = reactor
client.pool = pool
client.kazoo_client = client
return client_thimble
|
[
"def",
"TxKazooClient",
"(",
"reactor",
",",
"pool",
",",
"client",
")",
":",
"make_thimble",
"=",
"partial",
"(",
"Thimble",
",",
"reactor",
",",
"pool",
")",
"wrapper",
"=",
"_RunCallbacksInReactorThreadWrapper",
"(",
"reactor",
",",
"client",
")",
"client_thimble",
"=",
"make_thimble",
"(",
"wrapper",
",",
"_blocking_client_methods",
")",
"def",
"_Lock",
"(",
"path",
",",
"identifier",
"=",
"None",
")",
":",
"\"\"\"Return a wrapped :class:`kazoo.recipe.lock.Lock` for this client.\"\"\"",
"lock",
"=",
"client",
".",
"Lock",
"(",
"path",
",",
"identifier",
")",
"return",
"Thimble",
"(",
"reactor",
",",
"pool",
",",
"lock",
",",
"_blocking_lock_methods",
")",
"client_thimble",
".",
"Lock",
"=",
"_Lock",
"client_thimble",
".",
"SetPartitioner",
"=",
"partial",
"(",
"_SetPartitionerWrapper",
",",
"reactor",
",",
"pool",
",",
"client",
")",
"# Expose these so e.g. recipes can access them from the kzclient",
"client",
".",
"reactor",
"=",
"reactor",
"client",
".",
"pool",
"=",
"pool",
"client",
".",
"kazoo_client",
"=",
"client",
"return",
"client_thimble"
] |
Create a client for txkazoo.
:param twisted.internet.interfaces.IReactorThreads reactor: The reactor
used to interact with the thread pool.
:param ThreadPool pool: The thread pool to which blocking calls will be
deferred.
:param kazoo.client.KazooClient client: The blocking Kazoo client, whose
blocking methods will be deferred to the thread pool.
:return: An object with a similar interface to the Kazoo client, but
returning deferreds for all blocking methods. The actual method calls
will be executed in the thread pool.
|
[
"Create",
"a",
"client",
"for",
"txkazoo",
"."
] |
a0989138cc08df7acd1d410f7e48708553839f46
|
https://github.com/rackerlabs/txkazoo/blob/a0989138cc08df7acd1d410f7e48708553839f46/txkazoo/client.py#L115-L147
|
239,672
|
rackerlabs/txkazoo
|
txkazoo/client.py
|
_RunCallbacksInReactorThreadWrapper.add_listener
|
def add_listener(self, listener):
"""Add the given listener to the wrapped client.
The listener will be wrapped, so that it will be called in the reactor
thread. This way, it can safely use Twisted APIs.
"""
internal_listener = partial(self._call_in_reactor_thread, listener)
self._internal_listeners[listener] = internal_listener
return self._client.add_listener(internal_listener)
|
python
|
def add_listener(self, listener):
"""Add the given listener to the wrapped client.
The listener will be wrapped, so that it will be called in the reactor
thread. This way, it can safely use Twisted APIs.
"""
internal_listener = partial(self._call_in_reactor_thread, listener)
self._internal_listeners[listener] = internal_listener
return self._client.add_listener(internal_listener)
|
[
"def",
"add_listener",
"(",
"self",
",",
"listener",
")",
":",
"internal_listener",
"=",
"partial",
"(",
"self",
".",
"_call_in_reactor_thread",
",",
"listener",
")",
"self",
".",
"_internal_listeners",
"[",
"listener",
"]",
"=",
"internal_listener",
"return",
"self",
".",
"_client",
".",
"add_listener",
"(",
"internal_listener",
")"
] |
Add the given listener to the wrapped client.
The listener will be wrapped, so that it will be called in the reactor
thread. This way, it can safely use Twisted APIs.
|
[
"Add",
"the",
"given",
"listener",
"to",
"the",
"wrapped",
"client",
"."
] |
a0989138cc08df7acd1d410f7e48708553839f46
|
https://github.com/rackerlabs/txkazoo/blob/a0989138cc08df7acd1d410f7e48708553839f46/txkazoo/client.py#L49-L57
|
239,673
|
rackerlabs/txkazoo
|
txkazoo/client.py
|
_RunCallbacksInReactorThreadWrapper.remove_listener
|
def remove_listener(self, listener):
"""Remove the given listener from the wrapped client.
:param listener: A listener previously passed to :meth:`add_listener`.
"""
internal_listener = self._internal_listeners.pop(listener)
return self._client.remove_listener(internal_listener)
|
python
|
def remove_listener(self, listener):
"""Remove the given listener from the wrapped client.
:param listener: A listener previously passed to :meth:`add_listener`.
"""
internal_listener = self._internal_listeners.pop(listener)
return self._client.remove_listener(internal_listener)
|
[
"def",
"remove_listener",
"(",
"self",
",",
"listener",
")",
":",
"internal_listener",
"=",
"self",
".",
"_internal_listeners",
".",
"pop",
"(",
"listener",
")",
"return",
"self",
".",
"_client",
".",
"remove_listener",
"(",
"internal_listener",
")"
] |
Remove the given listener from the wrapped client.
:param listener: A listener previously passed to :meth:`add_listener`.
|
[
"Remove",
"the",
"given",
"listener",
"from",
"the",
"wrapped",
"client",
"."
] |
a0989138cc08df7acd1d410f7e48708553839f46
|
https://github.com/rackerlabs/txkazoo/blob/a0989138cc08df7acd1d410f7e48708553839f46/txkazoo/client.py#L59-L65
|
239,674
|
rackerlabs/txkazoo
|
txkazoo/client.py
|
_RunCallbacksInReactorThreadWrapper._wrapped_method_with_watch_fn
|
def _wrapped_method_with_watch_fn(self, f, *args, **kwargs):
"""A wrapped method with a watch function.
When this method is called, it will call the underlying method with
the same arguments, *except* that if the ``watch`` argument isn't
:data:`None`, it will be replaced with a wrapper around that watch
function, so that the watch function will be called in the reactor
thread. This means that the watch function can safely use Twisted
APIs.
"""
bound_args = signature(f).bind(*args, **kwargs)
orig_watch = bound_args.arguments.get("watch")
if orig_watch is not None:
wrapped_watch = partial(self._call_in_reactor_thread, orig_watch)
wrapped_watch = wraps(orig_watch)(wrapped_watch)
bound_args.arguments["watch"] = wrapped_watch
return f(**bound_args.arguments)
|
python
|
def _wrapped_method_with_watch_fn(self, f, *args, **kwargs):
"""A wrapped method with a watch function.
When this method is called, it will call the underlying method with
the same arguments, *except* that if the ``watch`` argument isn't
:data:`None`, it will be replaced with a wrapper around that watch
function, so that the watch function will be called in the reactor
thread. This means that the watch function can safely use Twisted
APIs.
"""
bound_args = signature(f).bind(*args, **kwargs)
orig_watch = bound_args.arguments.get("watch")
if orig_watch is not None:
wrapped_watch = partial(self._call_in_reactor_thread, orig_watch)
wrapped_watch = wraps(orig_watch)(wrapped_watch)
bound_args.arguments["watch"] = wrapped_watch
return f(**bound_args.arguments)
|
[
"def",
"_wrapped_method_with_watch_fn",
"(",
"self",
",",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"bound_args",
"=",
"signature",
"(",
"f",
")",
".",
"bind",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"orig_watch",
"=",
"bound_args",
".",
"arguments",
".",
"get",
"(",
"\"watch\"",
")",
"if",
"orig_watch",
"is",
"not",
"None",
":",
"wrapped_watch",
"=",
"partial",
"(",
"self",
".",
"_call_in_reactor_thread",
",",
"orig_watch",
")",
"wrapped_watch",
"=",
"wraps",
"(",
"orig_watch",
")",
"(",
"wrapped_watch",
")",
"bound_args",
".",
"arguments",
"[",
"\"watch\"",
"]",
"=",
"wrapped_watch",
"return",
"f",
"(",
"*",
"*",
"bound_args",
".",
"arguments",
")"
] |
A wrapped method with a watch function.
When this method is called, it will call the underlying method with
the same arguments, *except* that if the ``watch`` argument isn't
:data:`None`, it will be replaced with a wrapper around that watch
function, so that the watch function will be called in the reactor
thread. This means that the watch function can safely use Twisted
APIs.
|
[
"A",
"wrapped",
"method",
"with",
"a",
"watch",
"function",
"."
] |
a0989138cc08df7acd1d410f7e48708553839f46
|
https://github.com/rackerlabs/txkazoo/blob/a0989138cc08df7acd1d410f7e48708553839f46/txkazoo/client.py#L67-L85
|
239,675
|
rackerlabs/txkazoo
|
txkazoo/client.py
|
_RunCallbacksInReactorThreadWrapper._call_in_reactor_thread
|
def _call_in_reactor_thread(self, f, *args, **kwargs):
"""Call the given function with args in the reactor thread."""
self._reactor.callFromThread(f, *args, **kwargs)
|
python
|
def _call_in_reactor_thread(self, f, *args, **kwargs):
"""Call the given function with args in the reactor thread."""
self._reactor.callFromThread(f, *args, **kwargs)
|
[
"def",
"_call_in_reactor_thread",
"(",
"self",
",",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_reactor",
".",
"callFromThread",
"(",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Call the given function with args in the reactor thread.
|
[
"Call",
"the",
"given",
"function",
"with",
"args",
"in",
"the",
"reactor",
"thread",
"."
] |
a0989138cc08df7acd1d410f7e48708553839f46
|
https://github.com/rackerlabs/txkazoo/blob/a0989138cc08df7acd1d410f7e48708553839f46/txkazoo/client.py#L87-L89
|
239,676
|
sbarham/dsrt
|
dsrt/application/Application.py
|
Application.converse
|
def converse(self):
'''The 'converse' subcommand'''
# Initialize the converse subcommand's argparser
parser = argparse.ArgumentParser(description='Initiate a conversation with a trained dialogue model')
self.init_converse_args(parser)
# Parse the args we got
args = parser.parse_args(sys.argv[2:])
args.config = ConfigurationLoader(args.config).load().conversation_config
print(CLI_DIVIDER + '\n')
Conversant(**vars(args)).run()
|
python
|
def converse(self):
'''The 'converse' subcommand'''
# Initialize the converse subcommand's argparser
parser = argparse.ArgumentParser(description='Initiate a conversation with a trained dialogue model')
self.init_converse_args(parser)
# Parse the args we got
args = parser.parse_args(sys.argv[2:])
args.config = ConfigurationLoader(args.config).load().conversation_config
print(CLI_DIVIDER + '\n')
Conversant(**vars(args)).run()
|
[
"def",
"converse",
"(",
"self",
")",
":",
"# Initialize the converse subcommand's argparser",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Initiate a conversation with a trained dialogue model'",
")",
"self",
".",
"init_converse_args",
"(",
"parser",
")",
"# Parse the args we got",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"sys",
".",
"argv",
"[",
"2",
":",
"]",
")",
"args",
".",
"config",
"=",
"ConfigurationLoader",
"(",
"args",
".",
"config",
")",
".",
"load",
"(",
")",
".",
"conversation_config",
"print",
"(",
"CLI_DIVIDER",
"+",
"'\\n'",
")",
"Conversant",
"(",
"*",
"*",
"vars",
"(",
"args",
")",
")",
".",
"run",
"(",
")"
] |
The 'converse' subcommand
|
[
"The",
"converse",
"subcommand"
] |
bc664739f2f52839461d3e72773b71146fd56a9a
|
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/dsrt/application/Application.py#L179-L191
|
239,677
|
sbarham/dsrt
|
dsrt/application/Application.py
|
Application.init_converse_args
|
def init_converse_args(self, parser):
'''Only invoked conditionally if subcommand is 'converse' '''
parser.add_argument('-f', '--configuration', dest='config', default=DEFAULT_USER_CONFIG_PATH,
help='the path to the configuration file to use -- ./config.yaml by default')
parser.add_argument('-m', '--model', dest='model_name', help='the name of the (pretrained) dialogue model to use')
|
python
|
def init_converse_args(self, parser):
'''Only invoked conditionally if subcommand is 'converse' '''
parser.add_argument('-f', '--configuration', dest='config', default=DEFAULT_USER_CONFIG_PATH,
help='the path to the configuration file to use -- ./config.yaml by default')
parser.add_argument('-m', '--model', dest='model_name', help='the name of the (pretrained) dialogue model to use')
|
[
"def",
"init_converse_args",
"(",
"self",
",",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'-f'",
",",
"'--configuration'",
",",
"dest",
"=",
"'config'",
",",
"default",
"=",
"DEFAULT_USER_CONFIG_PATH",
",",
"help",
"=",
"'the path to the configuration file to use -- ./config.yaml by default'",
")",
"parser",
".",
"add_argument",
"(",
"'-m'",
",",
"'--model'",
",",
"dest",
"=",
"'model_name'",
",",
"help",
"=",
"'the name of the (pretrained) dialogue model to use'",
")"
] |
Only invoked conditionally if subcommand is 'converse'
|
[
"Only",
"invoked",
"conditionally",
"if",
"subcommand",
"is",
"converse"
] |
bc664739f2f52839461d3e72773b71146fd56a9a
|
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/dsrt/application/Application.py#L251-L255
|
239,678
|
capslockwizard/zdock-parser
|
zdock_parser/__init__.py
|
_read_next_timestep
|
def _read_next_timestep(self, ts=None):
"""copy next frame into timestep"""
if self.ts.frame >= self.n_frames-1:
raise IOError(errno.EIO, 'trying to go over trajectory limit')
if ts is None:
ts = self.ts
ts.frame += 1
self.zdock_inst._set_pose_num(ts.frame+1)
ts._pos = self.zdock_inst.static_mobile_copy_uni.trajectory.ts._pos
return ts
|
python
|
def _read_next_timestep(self, ts=None):
"""copy next frame into timestep"""
if self.ts.frame >= self.n_frames-1:
raise IOError(errno.EIO, 'trying to go over trajectory limit')
if ts is None:
ts = self.ts
ts.frame += 1
self.zdock_inst._set_pose_num(ts.frame+1)
ts._pos = self.zdock_inst.static_mobile_copy_uni.trajectory.ts._pos
return ts
|
[
"def",
"_read_next_timestep",
"(",
"self",
",",
"ts",
"=",
"None",
")",
":",
"if",
"self",
".",
"ts",
".",
"frame",
">=",
"self",
".",
"n_frames",
"-",
"1",
":",
"raise",
"IOError",
"(",
"errno",
".",
"EIO",
",",
"'trying to go over trajectory limit'",
")",
"if",
"ts",
"is",
"None",
":",
"ts",
"=",
"self",
".",
"ts",
"ts",
".",
"frame",
"+=",
"1",
"self",
".",
"zdock_inst",
".",
"_set_pose_num",
"(",
"ts",
".",
"frame",
"+",
"1",
")",
"ts",
".",
"_pos",
"=",
"self",
".",
"zdock_inst",
".",
"static_mobile_copy_uni",
".",
"trajectory",
".",
"ts",
".",
"_pos",
"return",
"ts"
] |
copy next frame into timestep
|
[
"copy",
"next",
"frame",
"into",
"timestep"
] |
3c83d47775c985f595cf0a7e4e6fefca5dbc9ed7
|
https://github.com/capslockwizard/zdock-parser/blob/3c83d47775c985f595cf0a7e4e6fefca5dbc9ed7/zdock_parser/__init__.py#L36-L48
|
239,679
|
PhilippeFerreiraDeSousa/bitext-matching
|
lib/enpc_aligner/IBM2_func.py
|
viterbi_alignment
|
def viterbi_alignment(es, fs, t, a):
'''
return
dictionary
e in es -> f in fs
'''
max_a = collections.defaultdict(float)
l_e = len(es)
l_f = len(fs)
for (j, e) in enumerate(es, 1):
current_max = (0, -1)
for (i, f) in enumerate(fs, 1):
val = t[(e, f)] * a[(i, j, l_e, l_f)]
# select the first one among the maximum candidates
if current_max[1] < val:
current_max = (i, val)
max_a[j] = current_max[0]
return max_a
|
python
|
def viterbi_alignment(es, fs, t, a):
'''
return
dictionary
e in es -> f in fs
'''
max_a = collections.defaultdict(float)
l_e = len(es)
l_f = len(fs)
for (j, e) in enumerate(es, 1):
current_max = (0, -1)
for (i, f) in enumerate(fs, 1):
val = t[(e, f)] * a[(i, j, l_e, l_f)]
# select the first one among the maximum candidates
if current_max[1] < val:
current_max = (i, val)
max_a[j] = current_max[0]
return max_a
|
[
"def",
"viterbi_alignment",
"(",
"es",
",",
"fs",
",",
"t",
",",
"a",
")",
":",
"max_a",
"=",
"collections",
".",
"defaultdict",
"(",
"float",
")",
"l_e",
"=",
"len",
"(",
"es",
")",
"l_f",
"=",
"len",
"(",
"fs",
")",
"for",
"(",
"j",
",",
"e",
")",
"in",
"enumerate",
"(",
"es",
",",
"1",
")",
":",
"current_max",
"=",
"(",
"0",
",",
"-",
"1",
")",
"for",
"(",
"i",
",",
"f",
")",
"in",
"enumerate",
"(",
"fs",
",",
"1",
")",
":",
"val",
"=",
"t",
"[",
"(",
"e",
",",
"f",
")",
"]",
"*",
"a",
"[",
"(",
"i",
",",
"j",
",",
"l_e",
",",
"l_f",
")",
"]",
"# select the first one among the maximum candidates\r",
"if",
"current_max",
"[",
"1",
"]",
"<",
"val",
":",
"current_max",
"=",
"(",
"i",
",",
"val",
")",
"max_a",
"[",
"j",
"]",
"=",
"current_max",
"[",
"0",
"]",
"return",
"max_a"
] |
return
dictionary
e in es -> f in fs
|
[
"return",
"dictionary",
"e",
"in",
"es",
"-",
">",
"f",
"in",
"fs"
] |
195c3e98775cfa5e63e4bb0bb1da6f741880d980
|
https://github.com/PhilippeFerreiraDeSousa/bitext-matching/blob/195c3e98775cfa5e63e4bb0bb1da6f741880d980/lib/enpc_aligner/IBM2_func.py#L85-L102
|
239,680
|
tiffon/take
|
take/scanner.py
|
Scanner._make_marker_token
|
def _make_marker_token(self, type_):
"""Make a token that has no content"""
tok = Token(type_,
'',
self.line,
self.line_num,
self.start,
self.start)
return tok
|
python
|
def _make_marker_token(self, type_):
"""Make a token that has no content"""
tok = Token(type_,
'',
self.line,
self.line_num,
self.start,
self.start)
return tok
|
[
"def",
"_make_marker_token",
"(",
"self",
",",
"type_",
")",
":",
"tok",
"=",
"Token",
"(",
"type_",
",",
"''",
",",
"self",
".",
"line",
",",
"self",
".",
"line_num",
",",
"self",
".",
"start",
",",
"self",
".",
"start",
")",
"return",
"tok"
] |
Make a token that has no content
|
[
"Make",
"a",
"token",
"that",
"has",
"no",
"content"
] |
907a2c4a72f5cbd357eadd4837fa4cae23647096
|
https://github.com/tiffon/take/blob/907a2c4a72f5cbd357eadd4837fa4cae23647096/take/scanner.py#L134-L142
|
239,681
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/convert/JsonConverter.py
|
JsonConverter.to_nullable_map
|
def to_nullable_map(value):
"""
Converts JSON string into map object or returns null when conversion is not possible.
:param value: the JSON string to convert.
:return: Map object value or null when conversion is not supported.
"""
if value == None:
return None
# Parse JSON
try:
value = json.loads(value)
return RecursiveMapConverter.to_nullable_map(value)
except:
return None
|
python
|
def to_nullable_map(value):
"""
Converts JSON string into map object or returns null when conversion is not possible.
:param value: the JSON string to convert.
:return: Map object value or null when conversion is not supported.
"""
if value == None:
return None
# Parse JSON
try:
value = json.loads(value)
return RecursiveMapConverter.to_nullable_map(value)
except:
return None
|
[
"def",
"to_nullable_map",
"(",
"value",
")",
":",
"if",
"value",
"==",
"None",
":",
"return",
"None",
"# Parse JSON",
"try",
":",
"value",
"=",
"json",
".",
"loads",
"(",
"value",
")",
"return",
"RecursiveMapConverter",
".",
"to_nullable_map",
"(",
"value",
")",
"except",
":",
"return",
"None"
] |
Converts JSON string into map object or returns null when conversion is not possible.
:param value: the JSON string to convert.
:return: Map object value or null when conversion is not supported.
|
[
"Converts",
"JSON",
"string",
"into",
"map",
"object",
"or",
"returns",
"null",
"when",
"conversion",
"is",
"not",
"possible",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/convert/JsonConverter.py#L53-L69
|
239,682
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/convert/JsonConverter.py
|
JsonConverter.to_map_with_default
|
def to_map_with_default(value, default_value):
"""
Converts JSON string into map object or returns default value when conversion is not possible.
:param value: the JSON string to convert.
:param default_value: the default value.
:return: Map object value or default when conversion is not supported.
"""
result = JsonConverter.to_nullable_map(value)
return result if result != None else default_value
|
python
|
def to_map_with_default(value, default_value):
"""
Converts JSON string into map object or returns default value when conversion is not possible.
:param value: the JSON string to convert.
:param default_value: the default value.
:return: Map object value or default when conversion is not supported.
"""
result = JsonConverter.to_nullable_map(value)
return result if result != None else default_value
|
[
"def",
"to_map_with_default",
"(",
"value",
",",
"default_value",
")",
":",
"result",
"=",
"JsonConverter",
".",
"to_nullable_map",
"(",
"value",
")",
"return",
"result",
"if",
"result",
"!=",
"None",
"else",
"default_value"
] |
Converts JSON string into map object or returns default value when conversion is not possible.
:param value: the JSON string to convert.
:param default_value: the default value.
:return: Map object value or default when conversion is not supported.
|
[
"Converts",
"JSON",
"string",
"into",
"map",
"object",
"or",
"returns",
"default",
"value",
"when",
"conversion",
"is",
"not",
"possible",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/convert/JsonConverter.py#L84-L95
|
239,683
|
roboogle/gtkmvc3
|
gtkmvco/gtkmvc3/progen/model.py
|
ProgenModel.generate_project
|
def generate_project(self):
"""
Generate the whole project. Returns True if at least one
file has been generated, False otherwise."""
# checks needed properties
if not self.name or not self.destdir or \
not os.path.isdir(self.destdir):
raise ValueError("Empty or invalid property values: run with 'help' command")
_log("Generating project '%s'" % self.name)
_log("Destination directory is: '%s'" % self.destdir)
top = os.path.join(self.destdir, self.name)
src = os.path.join(top, self.src_name)
resources = os.path.join(top, self.res_name)
utils = os.path.join(src, "utils")
if self.complex:
models = os.path.join(src, "models")
ctrls = os.path.join(src, "ctrls")
views = os.path.join(src, "views")
else: models = ctrls = views = src
res = self.__generate_tree(top, src, resources, models, ctrls, views, utils)
res = self.__generate_classes(models, ctrls, views) or res
res = self.__mksrc(os.path.join(utils, "globals.py"), templates.glob) or res
if self.complex: self.templ.update({'model_import' : "from models.application import ApplModel",
'ctrl_import' : "from ctrls.application import ApplCtrl",
'view_import' : "from views.application import ApplView"})
else: self.templ.update({'model_import' : "from ApplModel import ApplModel",
'ctrl_import' : "from ApplCtrl import ApplCtrl",
'view_import' : "from ApplView import ApplView"})
res = self.__mksrc(os.path.join(top, "%s.py" % self.name), templates.main) or res
# builder file
if self.builder:
res = self.__generate_builder(resources) or res
if self.dist_gtkmvc3: res = self.__copy_framework(os.path.join(resources, "external")) or res
if not res: _log("No actions were taken")
else: _log("Done")
return res
|
python
|
def generate_project(self):
"""
Generate the whole project. Returns True if at least one
file has been generated, False otherwise."""
# checks needed properties
if not self.name or not self.destdir or \
not os.path.isdir(self.destdir):
raise ValueError("Empty or invalid property values: run with 'help' command")
_log("Generating project '%s'" % self.name)
_log("Destination directory is: '%s'" % self.destdir)
top = os.path.join(self.destdir, self.name)
src = os.path.join(top, self.src_name)
resources = os.path.join(top, self.res_name)
utils = os.path.join(src, "utils")
if self.complex:
models = os.path.join(src, "models")
ctrls = os.path.join(src, "ctrls")
views = os.path.join(src, "views")
else: models = ctrls = views = src
res = self.__generate_tree(top, src, resources, models, ctrls, views, utils)
res = self.__generate_classes(models, ctrls, views) or res
res = self.__mksrc(os.path.join(utils, "globals.py"), templates.glob) or res
if self.complex: self.templ.update({'model_import' : "from models.application import ApplModel",
'ctrl_import' : "from ctrls.application import ApplCtrl",
'view_import' : "from views.application import ApplView"})
else: self.templ.update({'model_import' : "from ApplModel import ApplModel",
'ctrl_import' : "from ApplCtrl import ApplCtrl",
'view_import' : "from ApplView import ApplView"})
res = self.__mksrc(os.path.join(top, "%s.py" % self.name), templates.main) or res
# builder file
if self.builder:
res = self.__generate_builder(resources) or res
if self.dist_gtkmvc3: res = self.__copy_framework(os.path.join(resources, "external")) or res
if not res: _log("No actions were taken")
else: _log("Done")
return res
|
[
"def",
"generate_project",
"(",
"self",
")",
":",
"# checks needed properties",
"if",
"not",
"self",
".",
"name",
"or",
"not",
"self",
".",
"destdir",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"destdir",
")",
":",
"raise",
"ValueError",
"(",
"\"Empty or invalid property values: run with 'help' command\"",
")",
"_log",
"(",
"\"Generating project '%s'\"",
"%",
"self",
".",
"name",
")",
"_log",
"(",
"\"Destination directory is: '%s'\"",
"%",
"self",
".",
"destdir",
")",
"top",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"destdir",
",",
"self",
".",
"name",
")",
"src",
"=",
"os",
".",
"path",
".",
"join",
"(",
"top",
",",
"self",
".",
"src_name",
")",
"resources",
"=",
"os",
".",
"path",
".",
"join",
"(",
"top",
",",
"self",
".",
"res_name",
")",
"utils",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src",
",",
"\"utils\"",
")",
"if",
"self",
".",
"complex",
":",
"models",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src",
",",
"\"models\"",
")",
"ctrls",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src",
",",
"\"ctrls\"",
")",
"views",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src",
",",
"\"views\"",
")",
"else",
":",
"models",
"=",
"ctrls",
"=",
"views",
"=",
"src",
"res",
"=",
"self",
".",
"__generate_tree",
"(",
"top",
",",
"src",
",",
"resources",
",",
"models",
",",
"ctrls",
",",
"views",
",",
"utils",
")",
"res",
"=",
"self",
".",
"__generate_classes",
"(",
"models",
",",
"ctrls",
",",
"views",
")",
"or",
"res",
"res",
"=",
"self",
".",
"__mksrc",
"(",
"os",
".",
"path",
".",
"join",
"(",
"utils",
",",
"\"globals.py\"",
")",
",",
"templates",
".",
"glob",
")",
"or",
"res",
"if",
"self",
".",
"complex",
":",
"self",
".",
"templ",
".",
"update",
"(",
"{",
"'model_import'",
":",
"\"from models.application import ApplModel\"",
",",
"'ctrl_import'",
":",
"\"from ctrls.application import ApplCtrl\"",
",",
"'view_import'",
":",
"\"from views.application import ApplView\"",
"}",
")",
"else",
":",
"self",
".",
"templ",
".",
"update",
"(",
"{",
"'model_import'",
":",
"\"from ApplModel import ApplModel\"",
",",
"'ctrl_import'",
":",
"\"from ApplCtrl import ApplCtrl\"",
",",
"'view_import'",
":",
"\"from ApplView import ApplView\"",
"}",
")",
"res",
"=",
"self",
".",
"__mksrc",
"(",
"os",
".",
"path",
".",
"join",
"(",
"top",
",",
"\"%s.py\"",
"%",
"self",
".",
"name",
")",
",",
"templates",
".",
"main",
")",
"or",
"res",
"# builder file",
"if",
"self",
".",
"builder",
":",
"res",
"=",
"self",
".",
"__generate_builder",
"(",
"resources",
")",
"or",
"res",
"if",
"self",
".",
"dist_gtkmvc3",
":",
"res",
"=",
"self",
".",
"__copy_framework",
"(",
"os",
".",
"path",
".",
"join",
"(",
"resources",
",",
"\"external\"",
")",
")",
"or",
"res",
"if",
"not",
"res",
":",
"_log",
"(",
"\"No actions were taken\"",
")",
"else",
":",
"_log",
"(",
"\"Done\"",
")",
"return",
"res"
] |
Generate the whole project. Returns True if at least one
file has been generated, False otherwise.
|
[
"Generate",
"the",
"whole",
"project",
".",
"Returns",
"True",
"if",
"at",
"least",
"one",
"file",
"has",
"been",
"generated",
"False",
"otherwise",
"."
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/progen/model.py#L97-L141
|
239,684
|
roboogle/gtkmvc3
|
gtkmvco/gtkmvc3/progen/model.py
|
ProgenModel.__generate_tree
|
def __generate_tree(self, top, src, resources, models, ctrls, views, utils):
"""Creates directories and packages"""
res = self.__mkdir(top)
for fn in (src, models, ctrls, views, utils): res = self.__mkpkg(fn) or res
res = self.__mkdir(resources) or res
res = self.__mkdir(os.path.join(resources, "ui", "builder")) or res
res = self.__mkdir(os.path.join(resources, "ui", "styles")) or res
res = self.__mkdir(os.path.join(resources, "external")) or res
return res
|
python
|
def __generate_tree(self, top, src, resources, models, ctrls, views, utils):
"""Creates directories and packages"""
res = self.__mkdir(top)
for fn in (src, models, ctrls, views, utils): res = self.__mkpkg(fn) or res
res = self.__mkdir(resources) or res
res = self.__mkdir(os.path.join(resources, "ui", "builder")) or res
res = self.__mkdir(os.path.join(resources, "ui", "styles")) or res
res = self.__mkdir(os.path.join(resources, "external")) or res
return res
|
[
"def",
"__generate_tree",
"(",
"self",
",",
"top",
",",
"src",
",",
"resources",
",",
"models",
",",
"ctrls",
",",
"views",
",",
"utils",
")",
":",
"res",
"=",
"self",
".",
"__mkdir",
"(",
"top",
")",
"for",
"fn",
"in",
"(",
"src",
",",
"models",
",",
"ctrls",
",",
"views",
",",
"utils",
")",
":",
"res",
"=",
"self",
".",
"__mkpkg",
"(",
"fn",
")",
"or",
"res",
"res",
"=",
"self",
".",
"__mkdir",
"(",
"resources",
")",
"or",
"res",
"res",
"=",
"self",
".",
"__mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"resources",
",",
"\"ui\"",
",",
"\"builder\"",
")",
")",
"or",
"res",
"res",
"=",
"self",
".",
"__mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"resources",
",",
"\"ui\"",
",",
"\"styles\"",
")",
")",
"or",
"res",
"res",
"=",
"self",
".",
"__mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"resources",
",",
"\"external\"",
")",
")",
"or",
"res",
"return",
"res"
] |
Creates directories and packages
|
[
"Creates",
"directories",
"and",
"packages"
] |
63405fd8d2056be26af49103b13a8d5e57fe4dff
|
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/progen/model.py#L144-L152
|
239,685
|
knetsolutions/smallcli
|
smallcli/resources/ssh.py
|
ssh.scp_put
|
def scp_put(self, src, dst):
"""Copy src file from local system to dst on remote system."""
cmd = [ 'scp',
'-B',
'-oStrictHostKeyChecking=no',
'-oUserKnownHostsFile=/dev/null',
'-oLogLevel=ERROR']
if self._key is not None:
cmd.extend(['-i', self._key])
cmd.append(src)
remote = ''
if self._user is not None:
remote += self._user + '@'
remote += self._ip + ':' + dst
cmd.append(remote)
try:
# Actually ignore output on success, but capture stderr on failure
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
raise RuntimeError('scp returned exit status %d:\n%s'
% (ex.returncode, ex.output.strip()))
|
python
|
def scp_put(self, src, dst):
"""Copy src file from local system to dst on remote system."""
cmd = [ 'scp',
'-B',
'-oStrictHostKeyChecking=no',
'-oUserKnownHostsFile=/dev/null',
'-oLogLevel=ERROR']
if self._key is not None:
cmd.extend(['-i', self._key])
cmd.append(src)
remote = ''
if self._user is not None:
remote += self._user + '@'
remote += self._ip + ':' + dst
cmd.append(remote)
try:
# Actually ignore output on success, but capture stderr on failure
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
raise RuntimeError('scp returned exit status %d:\n%s'
% (ex.returncode, ex.output.strip()))
|
[
"def",
"scp_put",
"(",
"self",
",",
"src",
",",
"dst",
")",
":",
"cmd",
"=",
"[",
"'scp'",
",",
"'-B'",
",",
"'-oStrictHostKeyChecking=no'",
",",
"'-oUserKnownHostsFile=/dev/null'",
",",
"'-oLogLevel=ERROR'",
"]",
"if",
"self",
".",
"_key",
"is",
"not",
"None",
":",
"cmd",
".",
"extend",
"(",
"[",
"'-i'",
",",
"self",
".",
"_key",
"]",
")",
"cmd",
".",
"append",
"(",
"src",
")",
"remote",
"=",
"''",
"if",
"self",
".",
"_user",
"is",
"not",
"None",
":",
"remote",
"+=",
"self",
".",
"_user",
"+",
"'@'",
"remote",
"+=",
"self",
".",
"_ip",
"+",
"':'",
"+",
"dst",
"cmd",
".",
"append",
"(",
"remote",
")",
"try",
":",
"# Actually ignore output on success, but capture stderr on failure",
"subprocess",
".",
"check_output",
"(",
"cmd",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"ex",
":",
"raise",
"RuntimeError",
"(",
"'scp returned exit status %d:\\n%s'",
"%",
"(",
"ex",
".",
"returncode",
",",
"ex",
".",
"output",
".",
"strip",
"(",
")",
")",
")"
] |
Copy src file from local system to dst on remote system.
|
[
"Copy",
"src",
"file",
"from",
"local",
"system",
"to",
"dst",
"on",
"remote",
"system",
"."
] |
4e0e4407e0f827d2abcb13401fddf4d747d1655e
|
https://github.com/knetsolutions/smallcli/blob/4e0e4407e0f827d2abcb13401fddf4d747d1655e/smallcli/resources/ssh.py#L49-L69
|
239,686
|
etcher-be/elib_miz
|
elib_miz/mission.py
|
BaseMissionObject.get_group_by_id
|
def get_group_by_id(self, group_id: str) -> typing.Optional['Group']:
"""
Gets a group by id
Args:
group_id: group id
Returns: Group
"""
VALID_POSITIVE_INT.validate(group_id, 'get_group_by_id', exc=ValueError)
for group in self.groups:
if group.group_id == group_id:
return group
return None
|
python
|
def get_group_by_id(self, group_id: str) -> typing.Optional['Group']:
"""
Gets a group by id
Args:
group_id: group id
Returns: Group
"""
VALID_POSITIVE_INT.validate(group_id, 'get_group_by_id', exc=ValueError)
for group in self.groups:
if group.group_id == group_id:
return group
return None
|
[
"def",
"get_group_by_id",
"(",
"self",
",",
"group_id",
":",
"str",
")",
"->",
"typing",
".",
"Optional",
"[",
"'Group'",
"]",
":",
"VALID_POSITIVE_INT",
".",
"validate",
"(",
"group_id",
",",
"'get_group_by_id'",
",",
"exc",
"=",
"ValueError",
")",
"for",
"group",
"in",
"self",
".",
"groups",
":",
"if",
"group",
".",
"group_id",
"==",
"group_id",
":",
"return",
"group",
"return",
"None"
] |
Gets a group by id
Args:
group_id: group id
Returns: Group
|
[
"Gets",
"a",
"group",
"by",
"id"
] |
f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7
|
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/mission.py#L114-L128
|
239,687
|
etcher-be/elib_miz
|
elib_miz/mission.py
|
BaseMissionObject.get_clients_groups
|
def get_clients_groups(self) -> typing.Iterator['Group']:
"""
Gets all clients groups
Returns: generator of Groups
"""
for group in self.groups:
if group.group_is_client_group:
yield group
|
python
|
def get_clients_groups(self) -> typing.Iterator['Group']:
"""
Gets all clients groups
Returns: generator of Groups
"""
for group in self.groups:
if group.group_is_client_group:
yield group
|
[
"def",
"get_clients_groups",
"(",
"self",
")",
"->",
"typing",
".",
"Iterator",
"[",
"'Group'",
"]",
":",
"for",
"group",
"in",
"self",
".",
"groups",
":",
"if",
"group",
".",
"group_is_client_group",
":",
"yield",
"group"
] |
Gets all clients groups
Returns: generator of Groups
|
[
"Gets",
"all",
"clients",
"groups"
] |
f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7
|
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/mission.py#L130-L139
|
239,688
|
etcher-be/elib_miz
|
elib_miz/mission.py
|
BaseMissionObject.get_group_by_name
|
def get_group_by_name(self, group_name: str) -> typing.Optional['Group']:
"""
Gets a group from its name
Args:
group_name:
Returns: Group
"""
VALID_STR.validate(group_name, 'get_group_by_name')
for group in self.groups:
if group.group_name == group_name:
return group
return None
|
python
|
def get_group_by_name(self, group_name: str) -> typing.Optional['Group']:
"""
Gets a group from its name
Args:
group_name:
Returns: Group
"""
VALID_STR.validate(group_name, 'get_group_by_name')
for group in self.groups:
if group.group_name == group_name:
return group
return None
|
[
"def",
"get_group_by_name",
"(",
"self",
",",
"group_name",
":",
"str",
")",
"->",
"typing",
".",
"Optional",
"[",
"'Group'",
"]",
":",
"VALID_STR",
".",
"validate",
"(",
"group_name",
",",
"'get_group_by_name'",
")",
"for",
"group",
"in",
"self",
".",
"groups",
":",
"if",
"group",
".",
"group_name",
"==",
"group_name",
":",
"return",
"group",
"return",
"None"
] |
Gets a group from its name
Args:
group_name:
Returns: Group
|
[
"Gets",
"a",
"group",
"from",
"its",
"name"
] |
f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7
|
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/mission.py#L141-L155
|
239,689
|
etcher-be/elib_miz
|
elib_miz/mission.py
|
BaseMissionObject.get_unit_by_name
|
def get_unit_by_name(self, unit_name: str) -> typing.Optional['BaseUnit']:
"""
Gets a unit from its name
Args:
unit_name: unit name
Returns:
"""
VALID_STR.validate(unit_name, 'get_unit_by_name')
for unit in self.units:
if unit.unit_name == unit_name:
return unit
return None
|
python
|
def get_unit_by_name(self, unit_name: str) -> typing.Optional['BaseUnit']:
"""
Gets a unit from its name
Args:
unit_name: unit name
Returns:
"""
VALID_STR.validate(unit_name, 'get_unit_by_name')
for unit in self.units:
if unit.unit_name == unit_name:
return unit
return None
|
[
"def",
"get_unit_by_name",
"(",
"self",
",",
"unit_name",
":",
"str",
")",
"->",
"typing",
".",
"Optional",
"[",
"'BaseUnit'",
"]",
":",
"VALID_STR",
".",
"validate",
"(",
"unit_name",
",",
"'get_unit_by_name'",
")",
"for",
"unit",
"in",
"self",
".",
"units",
":",
"if",
"unit",
".",
"unit_name",
"==",
"unit_name",
":",
"return",
"unit",
"return",
"None"
] |
Gets a unit from its name
Args:
unit_name: unit name
Returns:
|
[
"Gets",
"a",
"unit",
"from",
"its",
"name"
] |
f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7
|
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/mission.py#L157-L171
|
239,690
|
etcher-be/elib_miz
|
elib_miz/mission.py
|
BaseMissionObject.get_unit_by_id
|
def get_unit_by_id(self, unit_id: str) -> typing.Optional['BaseUnit']:
"""
Gets a unit from its ID
Args:
unit_id: unit id
Returns: Unit
"""
VALID_POSITIVE_INT.validate(unit_id, 'get_unit_by_id')
for unit in self.units:
if unit.unit_id == unit_id:
return unit
return None
|
python
|
def get_unit_by_id(self, unit_id: str) -> typing.Optional['BaseUnit']:
"""
Gets a unit from its ID
Args:
unit_id: unit id
Returns: Unit
"""
VALID_POSITIVE_INT.validate(unit_id, 'get_unit_by_id')
for unit in self.units:
if unit.unit_id == unit_id:
return unit
return None
|
[
"def",
"get_unit_by_id",
"(",
"self",
",",
"unit_id",
":",
"str",
")",
"->",
"typing",
".",
"Optional",
"[",
"'BaseUnit'",
"]",
":",
"VALID_POSITIVE_INT",
".",
"validate",
"(",
"unit_id",
",",
"'get_unit_by_id'",
")",
"for",
"unit",
"in",
"self",
".",
"units",
":",
"if",
"unit",
".",
"unit_id",
"==",
"unit_id",
":",
"return",
"unit",
"return",
"None"
] |
Gets a unit from its ID
Args:
unit_id: unit id
Returns: Unit
|
[
"Gets",
"a",
"unit",
"from",
"its",
"ID"
] |
f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7
|
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/mission.py#L173-L187
|
239,691
|
etcher-be/elib_miz
|
elib_miz/mission.py
|
BaseMissionObject.units
|
def units(self) -> typing.Iterator['BaseUnit']:
"""
Iterates over all units
Returns: generator of Unit
"""
for group in self.groups:
for unit in group.units:
yield unit
|
python
|
def units(self) -> typing.Iterator['BaseUnit']:
"""
Iterates over all units
Returns: generator of Unit
"""
for group in self.groups:
for unit in group.units:
yield unit
|
[
"def",
"units",
"(",
"self",
")",
"->",
"typing",
".",
"Iterator",
"[",
"'BaseUnit'",
"]",
":",
"for",
"group",
"in",
"self",
".",
"groups",
":",
"for",
"unit",
"in",
"group",
".",
"units",
":",
"yield",
"unit"
] |
Iterates over all units
Returns: generator of Unit
|
[
"Iterates",
"over",
"all",
"units"
] |
f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7
|
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/mission.py#L190-L199
|
239,692
|
etcher-be/elib_miz
|
elib_miz/mission.py
|
BaseMissionObject.groups
|
def groups(self) -> typing.Iterator['Group']:
"""
Iterates over all groups
Returns: generator of Group
"""
for country in self.countries:
for group in country.groups:
yield group
|
python
|
def groups(self) -> typing.Iterator['Group']:
"""
Iterates over all groups
Returns: generator of Group
"""
for country in self.countries:
for group in country.groups:
yield group
|
[
"def",
"groups",
"(",
"self",
")",
"->",
"typing",
".",
"Iterator",
"[",
"'Group'",
"]",
":",
"for",
"country",
"in",
"self",
".",
"countries",
":",
"for",
"group",
"in",
"country",
".",
"groups",
":",
"yield",
"group"
] |
Iterates over all groups
Returns: generator of Group
|
[
"Iterates",
"over",
"all",
"groups"
] |
f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7
|
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/mission.py#L202-L211
|
239,693
|
etcher-be/elib_miz
|
elib_miz/mission.py
|
Coalition.get_country_by_name
|
def get_country_by_name(self, country_name) -> 'Country':
"""
Gets a country in this coalition by its name
Args:
country_name: country name
Returns: Country
"""
VALID_STR.validate(country_name, 'get_country_by_name', exc=ValueError)
if country_name not in self._countries_by_name.keys():
for country in self.countries:
if country.country_name == country_name:
return country
raise ValueError(country_name)
else:
return self._countries_by_name[country_name]
|
python
|
def get_country_by_name(self, country_name) -> 'Country':
"""
Gets a country in this coalition by its name
Args:
country_name: country name
Returns: Country
"""
VALID_STR.validate(country_name, 'get_country_by_name', exc=ValueError)
if country_name not in self._countries_by_name.keys():
for country in self.countries:
if country.country_name == country_name:
return country
raise ValueError(country_name)
else:
return self._countries_by_name[country_name]
|
[
"def",
"get_country_by_name",
"(",
"self",
",",
"country_name",
")",
"->",
"'Country'",
":",
"VALID_STR",
".",
"validate",
"(",
"country_name",
",",
"'get_country_by_name'",
",",
"exc",
"=",
"ValueError",
")",
"if",
"country_name",
"not",
"in",
"self",
".",
"_countries_by_name",
".",
"keys",
"(",
")",
":",
"for",
"country",
"in",
"self",
".",
"countries",
":",
"if",
"country",
".",
"country_name",
"==",
"country_name",
":",
"return",
"country",
"raise",
"ValueError",
"(",
"country_name",
")",
"else",
":",
"return",
"self",
".",
"_countries_by_name",
"[",
"country_name",
"]"
] |
Gets a country in this coalition by its name
Args:
country_name: country name
Returns: Country
|
[
"Gets",
"a",
"country",
"in",
"this",
"coalition",
"by",
"its",
"name"
] |
f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7
|
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/mission.py#L578-L595
|
239,694
|
etcher-be/elib_miz
|
elib_miz/mission.py
|
Coalition.get_country_by_id
|
def get_country_by_id(self, country_id) -> 'Country':
"""
Gets a country in this coalition by its ID
Args:
country_id: country Id
Returns: Country
"""
VALID_POSITIVE_INT.validate(country_id, 'get_country_by_id', exc=ValueError)
if country_id not in self._countries_by_id.keys():
for country in self.countries:
if country.country_id == country_id:
return country
raise ValueError(country_id)
else:
return self._countries_by_id[country_id]
|
python
|
def get_country_by_id(self, country_id) -> 'Country':
"""
Gets a country in this coalition by its ID
Args:
country_id: country Id
Returns: Country
"""
VALID_POSITIVE_INT.validate(country_id, 'get_country_by_id', exc=ValueError)
if country_id not in self._countries_by_id.keys():
for country in self.countries:
if country.country_id == country_id:
return country
raise ValueError(country_id)
else:
return self._countries_by_id[country_id]
|
[
"def",
"get_country_by_id",
"(",
"self",
",",
"country_id",
")",
"->",
"'Country'",
":",
"VALID_POSITIVE_INT",
".",
"validate",
"(",
"country_id",
",",
"'get_country_by_id'",
",",
"exc",
"=",
"ValueError",
")",
"if",
"country_id",
"not",
"in",
"self",
".",
"_countries_by_id",
".",
"keys",
"(",
")",
":",
"for",
"country",
"in",
"self",
".",
"countries",
":",
"if",
"country",
".",
"country_id",
"==",
"country_id",
":",
"return",
"country",
"raise",
"ValueError",
"(",
"country_id",
")",
"else",
":",
"return",
"self",
".",
"_countries_by_id",
"[",
"country_id",
"]"
] |
Gets a country in this coalition by its ID
Args:
country_id: country Id
Returns: Country
|
[
"Gets",
"a",
"country",
"in",
"this",
"coalition",
"by",
"its",
"ID"
] |
f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7
|
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/mission.py#L597-L614
|
239,695
|
mrstephenneal/PyBundle
|
PyBundle/PyBundle.py
|
bundle_dir
|
def bundle_dir():
"""Handle resource management within an executable file."""
if frozen():
directory = sys._MEIPASS
else:
directory = os.path.dirname(os.path.abspath(stack()[1][1]))
if os.path.exists(directory):
return directory
|
python
|
def bundle_dir():
"""Handle resource management within an executable file."""
if frozen():
directory = sys._MEIPASS
else:
directory = os.path.dirname(os.path.abspath(stack()[1][1]))
if os.path.exists(directory):
return directory
|
[
"def",
"bundle_dir",
"(",
")",
":",
"if",
"frozen",
"(",
")",
":",
"directory",
"=",
"sys",
".",
"_MEIPASS",
"else",
":",
"directory",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"stack",
"(",
")",
"[",
"1",
"]",
"[",
"1",
"]",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"directory",
")",
":",
"return",
"directory"
] |
Handle resource management within an executable file.
|
[
"Handle",
"resource",
"management",
"within",
"an",
"executable",
"file",
"."
] |
44f538d6d6a2acaac55b4305db9427a5d7b14164
|
https://github.com/mrstephenneal/PyBundle/blob/44f538d6d6a2acaac55b4305db9427a5d7b14164/PyBundle/PyBundle.py#L25-L32
|
239,696
|
mrstephenneal/PyBundle
|
PyBundle/PyBundle.py
|
resource_path
|
def resource_path(relative):
"""Adjust path for executable use in executable file"""
if hasattr(sys, "_MEIPASS"):
return os.path.join(sys._MEIPASS, relative)
return os.path.join(relative)
|
python
|
def resource_path(relative):
"""Adjust path for executable use in executable file"""
if hasattr(sys, "_MEIPASS"):
return os.path.join(sys._MEIPASS, relative)
return os.path.join(relative)
|
[
"def",
"resource_path",
"(",
"relative",
")",
":",
"if",
"hasattr",
"(",
"sys",
",",
"\"_MEIPASS\"",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"sys",
".",
"_MEIPASS",
",",
"relative",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"relative",
")"
] |
Adjust path for executable use in executable file
|
[
"Adjust",
"path",
"for",
"executable",
"use",
"in",
"executable",
"file"
] |
44f538d6d6a2acaac55b4305db9427a5d7b14164
|
https://github.com/mrstephenneal/PyBundle/blob/44f538d6d6a2acaac55b4305db9427a5d7b14164/PyBundle/PyBundle.py#L35-L39
|
239,697
|
pip-services3-python/pip-services3-commons-python
|
pip_services3_commons/refer/Descriptor.py
|
Descriptor.from_string
|
def from_string(value):
"""
Parses colon-separated list of descriptor fields and returns them as a Descriptor.
:param value: colon-separated descriptor fields to initialize Descriptor.
:return: a newly created Descriptor.
"""
if value == None or len(value) == 0:
return None
tokens = value.split(":")
if len(tokens) != 5:
raise ConfigException(
None, "BAD_DESCRIPTOR", "Descriptor " + str(value) + " is in wrong format"
).with_details("descriptor", value)
return Descriptor(tokens[0].strip(), tokens[1].strip(), tokens[2].strip(), tokens[3].strip(), tokens[4].strip())
|
python
|
def from_string(value):
"""
Parses colon-separated list of descriptor fields and returns them as a Descriptor.
:param value: colon-separated descriptor fields to initialize Descriptor.
:return: a newly created Descriptor.
"""
if value == None or len(value) == 0:
return None
tokens = value.split(":")
if len(tokens) != 5:
raise ConfigException(
None, "BAD_DESCRIPTOR", "Descriptor " + str(value) + " is in wrong format"
).with_details("descriptor", value)
return Descriptor(tokens[0].strip(), tokens[1].strip(), tokens[2].strip(), tokens[3].strip(), tokens[4].strip())
|
[
"def",
"from_string",
"(",
"value",
")",
":",
"if",
"value",
"==",
"None",
"or",
"len",
"(",
"value",
")",
"==",
"0",
":",
"return",
"None",
"tokens",
"=",
"value",
".",
"split",
"(",
"\":\"",
")",
"if",
"len",
"(",
"tokens",
")",
"!=",
"5",
":",
"raise",
"ConfigException",
"(",
"None",
",",
"\"BAD_DESCRIPTOR\"",
",",
"\"Descriptor \"",
"+",
"str",
"(",
"value",
")",
"+",
"\" is in wrong format\"",
")",
".",
"with_details",
"(",
"\"descriptor\"",
",",
"value",
")",
"return",
"Descriptor",
"(",
"tokens",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"tokens",
"[",
"1",
"]",
".",
"strip",
"(",
")",
",",
"tokens",
"[",
"2",
"]",
".",
"strip",
"(",
")",
",",
"tokens",
"[",
"3",
"]",
".",
"strip",
"(",
")",
",",
"tokens",
"[",
"4",
"]",
".",
"strip",
"(",
")",
")"
] |
Parses colon-separated list of descriptor fields and returns them as a Descriptor.
:param value: colon-separated descriptor fields to initialize Descriptor.
:return: a newly created Descriptor.
|
[
"Parses",
"colon",
"-",
"separated",
"list",
"of",
"descriptor",
"fields",
"and",
"returns",
"them",
"as",
"a",
"Descriptor",
"."
] |
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
|
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/refer/Descriptor.py#L199-L216
|
239,698
|
FujiMakoto/AgentML
|
agentml/parser/tags/random.py
|
Random.value
|
def value(self):
"""
Fetch a random weighted choice
"""
choice = weighted_choice(self._responses)
# If the choice is a tuple, join the elements into a single mapped string
if isinstance(choice, tuple):
return ''.join(map(str, choice)).strip()
# Otherwise, return the choice itself as a string
return str(choice)
|
python
|
def value(self):
"""
Fetch a random weighted choice
"""
choice = weighted_choice(self._responses)
# If the choice is a tuple, join the elements into a single mapped string
if isinstance(choice, tuple):
return ''.join(map(str, choice)).strip()
# Otherwise, return the choice itself as a string
return str(choice)
|
[
"def",
"value",
"(",
"self",
")",
":",
"choice",
"=",
"weighted_choice",
"(",
"self",
".",
"_responses",
")",
"# If the choice is a tuple, join the elements into a single mapped string",
"if",
"isinstance",
"(",
"choice",
",",
"tuple",
")",
":",
"return",
"''",
".",
"join",
"(",
"map",
"(",
"str",
",",
"choice",
")",
")",
".",
"strip",
"(",
")",
"# Otherwise, return the choice itself as a string",
"return",
"str",
"(",
"choice",
")"
] |
Fetch a random weighted choice
|
[
"Fetch",
"a",
"random",
"weighted",
"choice"
] |
c8cb64b460d876666bf29ea2c682189874c7c403
|
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/parser/tags/random.py#L45-L56
|
239,699
|
kaniblu/pydumper
|
dumper/__init__.py
|
dump
|
def dump(obj, name, path=None, ext="dat", overwrite=True, silent=False):
"""
Dumps the object to disk with given name and extension.
Optionally the path can be specified as well. (But nothing stops
you from adding path to the name.
"""
if path and os.path.isfile(path):
raise ValueException("Specified path is a file.")
filename = __get_filename(path, name, ext)
if not overwrite and os.path.exists(filename):
if not silent:
raise ValueException("Specified output filename already exists.")
return
with open(filename, "wb") as f:
pickle.dump(obj, f)
|
python
|
def dump(obj, name, path=None, ext="dat", overwrite=True, silent=False):
"""
Dumps the object to disk with given name and extension.
Optionally the path can be specified as well. (But nothing stops
you from adding path to the name.
"""
if path and os.path.isfile(path):
raise ValueException("Specified path is a file.")
filename = __get_filename(path, name, ext)
if not overwrite and os.path.exists(filename):
if not silent:
raise ValueException("Specified output filename already exists.")
return
with open(filename, "wb") as f:
pickle.dump(obj, f)
|
[
"def",
"dump",
"(",
"obj",
",",
"name",
",",
"path",
"=",
"None",
",",
"ext",
"=",
"\"dat\"",
",",
"overwrite",
"=",
"True",
",",
"silent",
"=",
"False",
")",
":",
"if",
"path",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"raise",
"ValueException",
"(",
"\"Specified path is a file.\"",
")",
"filename",
"=",
"__get_filename",
"(",
"path",
",",
"name",
",",
"ext",
")",
"if",
"not",
"overwrite",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"if",
"not",
"silent",
":",
"raise",
"ValueException",
"(",
"\"Specified output filename already exists.\"",
")",
"return",
"with",
"open",
"(",
"filename",
",",
"\"wb\"",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"obj",
",",
"f",
")"
] |
Dumps the object to disk with given name and extension.
Optionally the path can be specified as well. (But nothing stops
you from adding path to the name.
|
[
"Dumps",
"the",
"object",
"to",
"disk",
"with",
"given",
"name",
"and",
"extension",
".",
"Optionally",
"the",
"path",
"can",
"be",
"specified",
"as",
"well",
".",
"(",
"But",
"nothing",
"stops",
"you",
"from",
"adding",
"path",
"to",
"the",
"name",
"."
] |
ce61b96b09604b52d4bab667ac1862755ca21f3b
|
https://github.com/kaniblu/pydumper/blob/ce61b96b09604b52d4bab667ac1862755ca21f3b/dumper/__init__.py#L14-L31
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.