repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
listlengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
listlengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
|---|---|---|---|---|---|---|---|---|---|---|
tsnaomi/finnsyll
|
finnsyll/phonology.py
|
sonseq
|
def sonseq(word):
'''Return True if 'word' does not violate sonority sequencing.'''
parts = re.split(r'([ieaouäöy]+)', word, flags=re.I | re.U)
onset, coda = parts[0], parts[-1]
# simplex onset Finnish complex onset
if len(onset) <= 1 or onset.lower() in ONSETS:
# simplex coda Finnish complex coda
return len(coda) <= 1 # or coda in codas_inventory
return False
|
python
|
def sonseq(word):
'''Return True if 'word' does not violate sonority sequencing.'''
parts = re.split(r'([ieaouäöy]+)', word, flags=re.I | re.U)
onset, coda = parts[0], parts[-1]
# simplex onset Finnish complex onset
if len(onset) <= 1 or onset.lower() in ONSETS:
# simplex coda Finnish complex coda
return len(coda) <= 1 # or coda in codas_inventory
return False
|
[
"def",
"sonseq",
"(",
"word",
")",
":",
"parts",
"=",
"re",
".",
"split",
"(",
"r'([ieaouäöy]+)', ",
"w",
"rd, ",
"f",
"ags=r",
"e",
".I",
" ",
"|",
"r",
".U",
")",
"",
"",
"onset",
",",
"coda",
"=",
"parts",
"[",
"0",
"]",
",",
"parts",
"[",
"-",
"1",
"]",
"# simplex onset Finnish complex onset",
"if",
"len",
"(",
"onset",
")",
"<=",
"1",
"or",
"onset",
".",
"lower",
"(",
")",
"in",
"ONSETS",
":",
"# simplex coda Finnish complex coda",
"return",
"len",
"(",
"coda",
")",
"<=",
"1",
"# or coda in codas_inventory",
"return",
"False"
] |
Return True if 'word' does not violate sonority sequencing.
|
[
"Return",
"True",
"if",
"word",
"does",
"not",
"violate",
"sonority",
"sequencing",
"."
] |
train
|
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/phonology.py#L154-L164
|
tsnaomi/finnsyll
|
finnsyll/phonology.py
|
harmonic
|
def harmonic(word):
'''Return True if the word's vowels agree in frontness/backness.'''
depth = {'ä': 0, 'ö': 0, 'y': 0, 'a': 1, 'o': 1, 'u': 1}
vowels = filter(lambda ch: is_front(ch) or is_back(ch), word)
depths = (depth[x.lower()] for x in vowels)
return len(set(depths)) < 2
|
python
|
def harmonic(word):
'''Return True if the word's vowels agree in frontness/backness.'''
depth = {'ä': 0, 'ö': 0, 'y': 0, 'a': 1, 'o': 1, 'u': 1}
vowels = filter(lambda ch: is_front(ch) or is_back(ch), word)
depths = (depth[x.lower()] for x in vowels)
return len(set(depths)) < 2
|
[
"def",
"harmonic",
"(",
"word",
")",
":",
"depth",
"=",
"{",
"'ä':",
" ",
",",
" ",
"ö': ",
"0",
" ",
"'",
"': ",
"0",
" ",
"'",
"': ",
"1",
" ",
"'",
"': ",
"1",
" ",
"'",
"': ",
"1",
"",
"",
"vowels",
"=",
"filter",
"(",
"lambda",
"ch",
":",
"is_front",
"(",
"ch",
")",
"or",
"is_back",
"(",
"ch",
")",
",",
"word",
")",
"depths",
"=",
"(",
"depth",
"[",
"x",
".",
"lower",
"(",
")",
"]",
"for",
"x",
"in",
"vowels",
")",
"return",
"len",
"(",
"set",
"(",
"depths",
")",
")",
"<",
"2"
] |
Return True if the word's vowels agree in frontness/backness.
|
[
"Return",
"True",
"if",
"the",
"word",
"s",
"vowels",
"agree",
"in",
"frontness",
"/",
"backness",
"."
] |
train
|
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/phonology.py#L172-L178
|
saltant-org/saltant-py
|
saltant/models/executable_task_type.py
|
ExecutableTaskType.put
|
def put(self):
"""Updates this task type on the saltant server.
Returns:
:class:`saltant.models.container_task_type.ExecutableTaskType`:
An executable task type model instance representing the task type
just updated.
"""
return self.manager.put(
id=self.id,
name=self.name,
description=self.description,
command_to_run=self.command_to_run,
environment_variables=self.environment_variables,
required_arguments=self.required_arguments,
required_arguments_default_values=(
self.required_arguments_default_values
),
json_file_option=self.json_file_option,
)
|
python
|
def put(self):
"""Updates this task type on the saltant server.
Returns:
:class:`saltant.models.container_task_type.ExecutableTaskType`:
An executable task type model instance representing the task type
just updated.
"""
return self.manager.put(
id=self.id,
name=self.name,
description=self.description,
command_to_run=self.command_to_run,
environment_variables=self.environment_variables,
required_arguments=self.required_arguments,
required_arguments_default_values=(
self.required_arguments_default_values
),
json_file_option=self.json_file_option,
)
|
[
"def",
"put",
"(",
"self",
")",
":",
"return",
"self",
".",
"manager",
".",
"put",
"(",
"id",
"=",
"self",
".",
"id",
",",
"name",
"=",
"self",
".",
"name",
",",
"description",
"=",
"self",
".",
"description",
",",
"command_to_run",
"=",
"self",
".",
"command_to_run",
",",
"environment_variables",
"=",
"self",
".",
"environment_variables",
",",
"required_arguments",
"=",
"self",
".",
"required_arguments",
",",
"required_arguments_default_values",
"=",
"(",
"self",
".",
"required_arguments_default_values",
")",
",",
"json_file_option",
"=",
"self",
".",
"json_file_option",
",",
")"
] |
Updates this task type on the saltant server.
Returns:
:class:`saltant.models.container_task_type.ExecutableTaskType`:
An executable task type model instance representing the task type
just updated.
|
[
"Updates",
"this",
"task",
"type",
"on",
"the",
"saltant",
"server",
"."
] |
train
|
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/executable_task_type.py#L84-L103
|
saltant-org/saltant-py
|
saltant/models/executable_task_type.py
|
ExecutableTaskTypeManager.create
|
def create(
self,
name,
command_to_run,
description="",
environment_variables=None,
required_arguments=None,
required_arguments_default_values=None,
json_file_option=None,
extra_data_to_post=None,
):
"""Create a container task type.
Args:
name (str): The name of the task.
command_to_run (str): The command to run to execute the task.
description (str, optional): The description of the task type.
environment_variables (list, optional): The environment
variables required on the host to execute the task.
required_arguments (list, optional): The argument names for
the task type.
required_arguments_default_values (dict, optional): Default
values for the task's required arguments.
json_file_option (str, optional): The name of a command line
option, e.g., --json-file, which accepts a JSON-encoded
file for the command to run.
extra_data_to_post (dict, optional): Extra key-value pairs
to add to the request data. This is useful for
subclasses which require extra parameters.
Returns:
:class:`saltant.models.container_task_type.ExecutableTaskType`:
An executable task type model instance representing the
task type just created.
"""
# Add in extra data specific to container task types
if extra_data_to_post is None:
extra_data_to_post = {}
extra_data_to_post.update({"json_file_option": json_file_option})
# Call the parent create function
return super(ExecutableTaskTypeManager, self).create(
name=name,
command_to_run=command_to_run,
description=description,
environment_variables=environment_variables,
required_arguments=required_arguments,
required_arguments_default_values=required_arguments_default_values,
extra_data_to_post=extra_data_to_post,
)
|
python
|
def create(
self,
name,
command_to_run,
description="",
environment_variables=None,
required_arguments=None,
required_arguments_default_values=None,
json_file_option=None,
extra_data_to_post=None,
):
"""Create a container task type.
Args:
name (str): The name of the task.
command_to_run (str): The command to run to execute the task.
description (str, optional): The description of the task type.
environment_variables (list, optional): The environment
variables required on the host to execute the task.
required_arguments (list, optional): The argument names for
the task type.
required_arguments_default_values (dict, optional): Default
values for the task's required arguments.
json_file_option (str, optional): The name of a command line
option, e.g., --json-file, which accepts a JSON-encoded
file for the command to run.
extra_data_to_post (dict, optional): Extra key-value pairs
to add to the request data. This is useful for
subclasses which require extra parameters.
Returns:
:class:`saltant.models.container_task_type.ExecutableTaskType`:
An executable task type model instance representing the
task type just created.
"""
# Add in extra data specific to container task types
if extra_data_to_post is None:
extra_data_to_post = {}
extra_data_to_post.update({"json_file_option": json_file_option})
# Call the parent create function
return super(ExecutableTaskTypeManager, self).create(
name=name,
command_to_run=command_to_run,
description=description,
environment_variables=environment_variables,
required_arguments=required_arguments,
required_arguments_default_values=required_arguments_default_values,
extra_data_to_post=extra_data_to_post,
)
|
[
"def",
"create",
"(",
"self",
",",
"name",
",",
"command_to_run",
",",
"description",
"=",
"\"\"",
",",
"environment_variables",
"=",
"None",
",",
"required_arguments",
"=",
"None",
",",
"required_arguments_default_values",
"=",
"None",
",",
"json_file_option",
"=",
"None",
",",
"extra_data_to_post",
"=",
"None",
",",
")",
":",
"# Add in extra data specific to container task types",
"if",
"extra_data_to_post",
"is",
"None",
":",
"extra_data_to_post",
"=",
"{",
"}",
"extra_data_to_post",
".",
"update",
"(",
"{",
"\"json_file_option\"",
":",
"json_file_option",
"}",
")",
"# Call the parent create function",
"return",
"super",
"(",
"ExecutableTaskTypeManager",
",",
"self",
")",
".",
"create",
"(",
"name",
"=",
"name",
",",
"command_to_run",
"=",
"command_to_run",
",",
"description",
"=",
"description",
",",
"environment_variables",
"=",
"environment_variables",
",",
"required_arguments",
"=",
"required_arguments",
",",
"required_arguments_default_values",
"=",
"required_arguments_default_values",
",",
"extra_data_to_post",
"=",
"extra_data_to_post",
",",
")"
] |
Create a container task type.
Args:
name (str): The name of the task.
command_to_run (str): The command to run to execute the task.
description (str, optional): The description of the task type.
environment_variables (list, optional): The environment
variables required on the host to execute the task.
required_arguments (list, optional): The argument names for
the task type.
required_arguments_default_values (dict, optional): Default
values for the task's required arguments.
json_file_option (str, optional): The name of a command line
option, e.g., --json-file, which accepts a JSON-encoded
file for the command to run.
extra_data_to_post (dict, optional): Extra key-value pairs
to add to the request data. This is useful for
subclasses which require extra parameters.
Returns:
:class:`saltant.models.container_task_type.ExecutableTaskType`:
An executable task type model instance representing the
task type just created.
|
[
"Create",
"a",
"container",
"task",
"type",
"."
] |
train
|
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/executable_task_type.py#L122-L172
|
saltant-org/saltant-py
|
saltant/models/executable_task_type.py
|
ExecutableTaskTypeManager.put
|
def put(
self,
id,
name,
description,
command_to_run,
environment_variables,
required_arguments,
required_arguments_default_values,
json_file_option,
extra_data_to_put=None,
):
"""Updates a task type on the saltant server.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
json_file_option (str): The name of a command line option,
e.g., --json-file, which accepts a JSON-encoded file for
the command to run.
extra_data_to_put (dict, optional): Extra key-value pairs to
add to the request data. This is useful for subclasses
which require extra parameters.
"""
# Add in extra data specific to container task types
if extra_data_to_put is None:
extra_data_to_put = {}
extra_data_to_put.update({"json_file_option": json_file_option})
# Call the parent create function
return super(ExecutableTaskTypeManager, self).put(
id=id,
name=name,
description=description,
command_to_run=command_to_run,
environment_variables=environment_variables,
required_arguments=required_arguments,
required_arguments_default_values=(
required_arguments_default_values
),
extra_data_to_put=extra_data_to_put,
)
|
python
|
def put(
self,
id,
name,
description,
command_to_run,
environment_variables,
required_arguments,
required_arguments_default_values,
json_file_option,
extra_data_to_put=None,
):
"""Updates a task type on the saltant server.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
json_file_option (str): The name of a command line option,
e.g., --json-file, which accepts a JSON-encoded file for
the command to run.
extra_data_to_put (dict, optional): Extra key-value pairs to
add to the request data. This is useful for subclasses
which require extra parameters.
"""
# Add in extra data specific to container task types
if extra_data_to_put is None:
extra_data_to_put = {}
extra_data_to_put.update({"json_file_option": json_file_option})
# Call the parent create function
return super(ExecutableTaskTypeManager, self).put(
id=id,
name=name,
description=description,
command_to_run=command_to_run,
environment_variables=environment_variables,
required_arguments=required_arguments,
required_arguments_default_values=(
required_arguments_default_values
),
extra_data_to_put=extra_data_to_put,
)
|
[
"def",
"put",
"(",
"self",
",",
"id",
",",
"name",
",",
"description",
",",
"command_to_run",
",",
"environment_variables",
",",
"required_arguments",
",",
"required_arguments_default_values",
",",
"json_file_option",
",",
"extra_data_to_put",
"=",
"None",
",",
")",
":",
"# Add in extra data specific to container task types",
"if",
"extra_data_to_put",
"is",
"None",
":",
"extra_data_to_put",
"=",
"{",
"}",
"extra_data_to_put",
".",
"update",
"(",
"{",
"\"json_file_option\"",
":",
"json_file_option",
"}",
")",
"# Call the parent create function",
"return",
"super",
"(",
"ExecutableTaskTypeManager",
",",
"self",
")",
".",
"put",
"(",
"id",
"=",
"id",
",",
"name",
"=",
"name",
",",
"description",
"=",
"description",
",",
"command_to_run",
"=",
"command_to_run",
",",
"environment_variables",
"=",
"environment_variables",
",",
"required_arguments",
"=",
"required_arguments",
",",
"required_arguments_default_values",
"=",
"(",
"required_arguments_default_values",
")",
",",
"extra_data_to_put",
"=",
"extra_data_to_put",
",",
")"
] |
Updates a task type on the saltant server.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
json_file_option (str): The name of a command line option,
e.g., --json-file, which accepts a JSON-encoded file for
the command to run.
extra_data_to_put (dict, optional): Extra key-value pairs to
add to the request data. This is useful for subclasses
which require extra parameters.
|
[
"Updates",
"a",
"task",
"type",
"on",
"the",
"saltant",
"server",
"."
] |
train
|
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/executable_task_type.py#L174-L223
|
OLC-Bioinformatics/sipprverse
|
sipprverse_reporter/reports.py
|
Reports.reporter
|
def reporter(self, analysistype='genesippr'):
"""
Creates a report of the genesippr results
:param analysistype: The variable to use when accessing attributes in the metadata object
"""
logging.info('Creating {} report'.format(analysistype))
# Create a dictionary to link all the genera with their genes
genusgenes = dict()
# The organism-specific targets are in .tfa files in the target path
targetpath = str()
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
targetpath = sample[analysistype].targetpath
for organismfile in glob(os.path.join(targetpath, '*.tfa')):
organism = os.path.splitext(os.path.basename(organismfile))[0]
# Use BioPython to extract all the gene names from the file
for record in SeqIO.parse(open(organismfile), 'fasta'):
# Append the gene names to the genus-specific list
try:
genusgenes[organism].add(record.id.split('_')[0])
except (KeyError, IndexError):
genusgenes[organism] = set()
genusgenes[organism].add(record.id.split('_')[0])
# Determine from which genera the gene hits were sourced
for sample in self.runmetadata.samples:
# Initialise the list to store the genera
sample[analysistype].targetgenera = list()
if sample.general.bestassemblyfile != 'NA':
for organism in genusgenes:
# Iterate through all the genesippr hits and attribute each gene to the appropriate genus
for gene in sample[analysistype].results:
# If the gene name is in the genes from that organism, add the genus name to the list of
# genera found in the sample
if gene.split('_')[0] in genusgenes[organism]:
if organism not in sample[analysistype].targetgenera:
sample[analysistype].targetgenera.append(organism)
# Create the path in which the reports are stored
make_path(self.reportpath)
# The report will have every gene for all genera in the header
header = 'Strain,Genus,{},\n'.format(','.join(self.genelist))
data = str()
with open(os.path.join(self.reportpath, analysistype + '.csv'), 'w') as report:
for sample in self.runmetadata.samples:
sample[analysistype].report_output = list()
if sample.general.bestassemblyfile != 'NA':
# Add the genus/genera found in the sample
data += '{},{},'.format(sample.name, ';'.join(sample[analysistype].targetgenera))
best_dict = dict()
if sample[analysistype].results:
gene_check = list()
# Find the best match for all the hits
for target, pid in sample[analysistype].results.items():
gene_name = target.split('_')[0]
for gene in self.genelist:
# If the key matches a gene in the list of genes
if gene == gene_name:
# If the percent identity is better, update the dictionary
try:
if float(pid) > best_dict[gene]:
best_dict[gene] = float(pid)
except KeyError:
best_dict[gene] = float(pid)
for gene in self.genelist:
# If the gene was not found in the sample, print an empty cell in the report
try:
best_dict[gene]
except KeyError:
data += ','
# Print the required information for the gene
for name, identity in sample[analysistype].results.items():
if name.split('_')[0] == gene and gene not in gene_check:
data += '{pid}%'.format(pid=best_dict[gene])
try:
if not sample.general.trimmedcorrectedfastqfiles[0].endswith('.fasta'):
data += ' ({avgd} +/- {std}),'\
.format(avgd=sample[analysistype].avgdepth[name],
std=sample[analysistype].standarddev[name])
else:
data += ','
except IndexError:
data += ','
gene_check.append(gene)
# Add the simplified results to the object - used in the assembly pipeline report
sample[analysistype].report_output.append(gene)
# Add a newline after each sample
data += '\n'
# Add a newline if the sample did not have any gene hits
else:
data += '\n'
# Write the header and data to file
report.write(header)
report.write(data)
|
python
|
def reporter(self, analysistype='genesippr'):
"""
Creates a report of the genesippr results
:param analysistype: The variable to use when accessing attributes in the metadata object
"""
logging.info('Creating {} report'.format(analysistype))
# Create a dictionary to link all the genera with their genes
genusgenes = dict()
# The organism-specific targets are in .tfa files in the target path
targetpath = str()
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
targetpath = sample[analysistype].targetpath
for organismfile in glob(os.path.join(targetpath, '*.tfa')):
organism = os.path.splitext(os.path.basename(organismfile))[0]
# Use BioPython to extract all the gene names from the file
for record in SeqIO.parse(open(organismfile), 'fasta'):
# Append the gene names to the genus-specific list
try:
genusgenes[organism].add(record.id.split('_')[0])
except (KeyError, IndexError):
genusgenes[organism] = set()
genusgenes[organism].add(record.id.split('_')[0])
# Determine from which genera the gene hits were sourced
for sample in self.runmetadata.samples:
# Initialise the list to store the genera
sample[analysistype].targetgenera = list()
if sample.general.bestassemblyfile != 'NA':
for organism in genusgenes:
# Iterate through all the genesippr hits and attribute each gene to the appropriate genus
for gene in sample[analysistype].results:
# If the gene name is in the genes from that organism, add the genus name to the list of
# genera found in the sample
if gene.split('_')[0] in genusgenes[organism]:
if organism not in sample[analysistype].targetgenera:
sample[analysistype].targetgenera.append(organism)
# Create the path in which the reports are stored
make_path(self.reportpath)
# The report will have every gene for all genera in the header
header = 'Strain,Genus,{},\n'.format(','.join(self.genelist))
data = str()
with open(os.path.join(self.reportpath, analysistype + '.csv'), 'w') as report:
for sample in self.runmetadata.samples:
sample[analysistype].report_output = list()
if sample.general.bestassemblyfile != 'NA':
# Add the genus/genera found in the sample
data += '{},{},'.format(sample.name, ';'.join(sample[analysistype].targetgenera))
best_dict = dict()
if sample[analysistype].results:
gene_check = list()
# Find the best match for all the hits
for target, pid in sample[analysistype].results.items():
gene_name = target.split('_')[0]
for gene in self.genelist:
# If the key matches a gene in the list of genes
if gene == gene_name:
# If the percent identity is better, update the dictionary
try:
if float(pid) > best_dict[gene]:
best_dict[gene] = float(pid)
except KeyError:
best_dict[gene] = float(pid)
for gene in self.genelist:
# If the gene was not found in the sample, print an empty cell in the report
try:
best_dict[gene]
except KeyError:
data += ','
# Print the required information for the gene
for name, identity in sample[analysistype].results.items():
if name.split('_')[0] == gene and gene not in gene_check:
data += '{pid}%'.format(pid=best_dict[gene])
try:
if not sample.general.trimmedcorrectedfastqfiles[0].endswith('.fasta'):
data += ' ({avgd} +/- {std}),'\
.format(avgd=sample[analysistype].avgdepth[name],
std=sample[analysistype].standarddev[name])
else:
data += ','
except IndexError:
data += ','
gene_check.append(gene)
# Add the simplified results to the object - used in the assembly pipeline report
sample[analysistype].report_output.append(gene)
# Add a newline after each sample
data += '\n'
# Add a newline if the sample did not have any gene hits
else:
data += '\n'
# Write the header and data to file
report.write(header)
report.write(data)
|
[
"def",
"reporter",
"(",
"self",
",",
"analysistype",
"=",
"'genesippr'",
")",
":",
"logging",
".",
"info",
"(",
"'Creating {} report'",
".",
"format",
"(",
"analysistype",
")",
")",
"# Create a dictionary to link all the genera with their genes",
"genusgenes",
"=",
"dict",
"(",
")",
"# The organism-specific targets are in .tfa files in the target path",
"targetpath",
"=",
"str",
"(",
")",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"targetpath",
"=",
"sample",
"[",
"analysistype",
"]",
".",
"targetpath",
"for",
"organismfile",
"in",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"targetpath",
",",
"'*.tfa'",
")",
")",
":",
"organism",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"organismfile",
")",
")",
"[",
"0",
"]",
"# Use BioPython to extract all the gene names from the file",
"for",
"record",
"in",
"SeqIO",
".",
"parse",
"(",
"open",
"(",
"organismfile",
")",
",",
"'fasta'",
")",
":",
"# Append the gene names to the genus-specific list",
"try",
":",
"genusgenes",
"[",
"organism",
"]",
".",
"add",
"(",
"record",
".",
"id",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
")",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"genusgenes",
"[",
"organism",
"]",
"=",
"set",
"(",
")",
"genusgenes",
"[",
"organism",
"]",
".",
"add",
"(",
"record",
".",
"id",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
")",
"# Determine from which genera the gene hits were sourced",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"# Initialise the list to store the genera",
"sample",
"[",
"analysistype",
"]",
".",
"targetgenera",
"=",
"list",
"(",
")",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"for",
"organism",
"in",
"genusgenes",
":",
"# Iterate through all the genesippr hits and attribute each gene to the appropriate genus",
"for",
"gene",
"in",
"sample",
"[",
"analysistype",
"]",
".",
"results",
":",
"# If the gene name is in the genes from that organism, add the genus name to the list of",
"# genera found in the sample",
"if",
"gene",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"in",
"genusgenes",
"[",
"organism",
"]",
":",
"if",
"organism",
"not",
"in",
"sample",
"[",
"analysistype",
"]",
".",
"targetgenera",
":",
"sample",
"[",
"analysistype",
"]",
".",
"targetgenera",
".",
"append",
"(",
"organism",
")",
"# Create the path in which the reports are stored",
"make_path",
"(",
"self",
".",
"reportpath",
")",
"# The report will have every gene for all genera in the header",
"header",
"=",
"'Strain,Genus,{},\\n'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"self",
".",
"genelist",
")",
")",
"data",
"=",
"str",
"(",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"analysistype",
"+",
"'.csv'",
")",
",",
"'w'",
")",
"as",
"report",
":",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"sample",
"[",
"analysistype",
"]",
".",
"report_output",
"=",
"list",
"(",
")",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"# Add the genus/genera found in the sample",
"data",
"+=",
"'{},{},'",
".",
"format",
"(",
"sample",
".",
"name",
",",
"';'",
".",
"join",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"targetgenera",
")",
")",
"best_dict",
"=",
"dict",
"(",
")",
"if",
"sample",
"[",
"analysistype",
"]",
".",
"results",
":",
"gene_check",
"=",
"list",
"(",
")",
"# Find the best match for all the hits",
"for",
"target",
",",
"pid",
"in",
"sample",
"[",
"analysistype",
"]",
".",
"results",
".",
"items",
"(",
")",
":",
"gene_name",
"=",
"target",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"for",
"gene",
"in",
"self",
".",
"genelist",
":",
"# If the key matches a gene in the list of genes",
"if",
"gene",
"==",
"gene_name",
":",
"# If the percent identity is better, update the dictionary",
"try",
":",
"if",
"float",
"(",
"pid",
")",
">",
"best_dict",
"[",
"gene",
"]",
":",
"best_dict",
"[",
"gene",
"]",
"=",
"float",
"(",
"pid",
")",
"except",
"KeyError",
":",
"best_dict",
"[",
"gene",
"]",
"=",
"float",
"(",
"pid",
")",
"for",
"gene",
"in",
"self",
".",
"genelist",
":",
"# If the gene was not found in the sample, print an empty cell in the report",
"try",
":",
"best_dict",
"[",
"gene",
"]",
"except",
"KeyError",
":",
"data",
"+=",
"','",
"# Print the required information for the gene",
"for",
"name",
",",
"identity",
"in",
"sample",
"[",
"analysistype",
"]",
".",
"results",
".",
"items",
"(",
")",
":",
"if",
"name",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"==",
"gene",
"and",
"gene",
"not",
"in",
"gene_check",
":",
"data",
"+=",
"'{pid}%'",
".",
"format",
"(",
"pid",
"=",
"best_dict",
"[",
"gene",
"]",
")",
"try",
":",
"if",
"not",
"sample",
".",
"general",
".",
"trimmedcorrectedfastqfiles",
"[",
"0",
"]",
".",
"endswith",
"(",
"'.fasta'",
")",
":",
"data",
"+=",
"' ({avgd} +/- {std}),'",
".",
"format",
"(",
"avgd",
"=",
"sample",
"[",
"analysistype",
"]",
".",
"avgdepth",
"[",
"name",
"]",
",",
"std",
"=",
"sample",
"[",
"analysistype",
"]",
".",
"standarddev",
"[",
"name",
"]",
")",
"else",
":",
"data",
"+=",
"','",
"except",
"IndexError",
":",
"data",
"+=",
"','",
"gene_check",
".",
"append",
"(",
"gene",
")",
"# Add the simplified results to the object - used in the assembly pipeline report",
"sample",
"[",
"analysistype",
"]",
".",
"report_output",
".",
"append",
"(",
"gene",
")",
"# Add a newline after each sample",
"data",
"+=",
"'\\n'",
"# Add a newline if the sample did not have any gene hits",
"else",
":",
"data",
"+=",
"'\\n'",
"# Write the header and data to file",
"report",
".",
"write",
"(",
"header",
")",
"report",
".",
"write",
"(",
"data",
")"
] |
Creates a report of the genesippr results
:param analysistype: The variable to use when accessing attributes in the metadata object
|
[
"Creates",
"a",
"report",
"of",
"the",
"genesippr",
"results",
":",
"param",
"analysistype",
":",
"The",
"variable",
"to",
"use",
"when",
"accessing",
"attributes",
"in",
"the",
"metadata",
"object"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sipprverse_reporter/reports.py#L24-L115
|
OLC-Bioinformatics/sipprverse
|
sipprverse_reporter/reports.py
|
Reports.genusspecific
|
def genusspecific(self, analysistype='genesippr'):
"""
Creates simplified genus-specific reports. Instead of the % ID and the fold coverage, a simple +/- scheme is
used for presence/absence
:param analysistype: The variable to use when accessing attributes in the metadata object
"""
# Dictionary to store all the output strings
results = dict()
for genus, genelist in self.genedict.items():
# Initialise the dictionary with the appropriate genus
results[genus] = str()
for sample in self.runmetadata.samples:
try:
# Find the samples that match the current genus - note that samples with multiple hits will be
# represented in multiple outputs
if genus in sample[analysistype].targetgenera:
# Populate the results string with the sample name
results[genus] += '{},'.format(sample.name)
# Iterate through all the genes associated with this genus. If the gene is in the current
# sample, add a + to the string, otherwise, add a -
for gene in genelist:
if gene.lower() in [target[0].lower().split('_')[0] for target in
sample[analysistype].results.items()]:
results[genus] += '+,'
else:
results[genus] += '-,'
results[genus] += '\n'
# If the sample is missing the targetgenera attribute, then it is ignored for these reports
except AttributeError:
pass
# Create and populate the genus-specific reports
for genus, resultstring in results.items():
# Only create the report if there are results for the current genus
if resultstring:
with open(os.path.join(self.reportpath, '{}_genesippr.csv'.format(genus)), 'w') as genusreport:
# Write the header to the report - Strain plus add the genes associated with the genus
genusreport.write('Strain,{}\n'.format(','.join(self.genedict[genus])))
# Write the results to the report
genusreport.write(resultstring)
|
python
|
def genusspecific(self, analysistype='genesippr'):
"""
Creates simplified genus-specific reports. Instead of the % ID and the fold coverage, a simple +/- scheme is
used for presence/absence
:param analysistype: The variable to use when accessing attributes in the metadata object
"""
# Dictionary to store all the output strings
results = dict()
for genus, genelist in self.genedict.items():
# Initialise the dictionary with the appropriate genus
results[genus] = str()
for sample in self.runmetadata.samples:
try:
# Find the samples that match the current genus - note that samples with multiple hits will be
# represented in multiple outputs
if genus in sample[analysistype].targetgenera:
# Populate the results string with the sample name
results[genus] += '{},'.format(sample.name)
# Iterate through all the genes associated with this genus. If the gene is in the current
# sample, add a + to the string, otherwise, add a -
for gene in genelist:
if gene.lower() in [target[0].lower().split('_')[0] for target in
sample[analysistype].results.items()]:
results[genus] += '+,'
else:
results[genus] += '-,'
results[genus] += '\n'
# If the sample is missing the targetgenera attribute, then it is ignored for these reports
except AttributeError:
pass
# Create and populate the genus-specific reports
for genus, resultstring in results.items():
# Only create the report if there are results for the current genus
if resultstring:
with open(os.path.join(self.reportpath, '{}_genesippr.csv'.format(genus)), 'w') as genusreport:
# Write the header to the report - Strain plus add the genes associated with the genus
genusreport.write('Strain,{}\n'.format(','.join(self.genedict[genus])))
# Write the results to the report
genusreport.write(resultstring)
|
[
"def",
"genusspecific",
"(",
"self",
",",
"analysistype",
"=",
"'genesippr'",
")",
":",
"# Dictionary to store all the output strings",
"results",
"=",
"dict",
"(",
")",
"for",
"genus",
",",
"genelist",
"in",
"self",
".",
"genedict",
".",
"items",
"(",
")",
":",
"# Initialise the dictionary with the appropriate genus",
"results",
"[",
"genus",
"]",
"=",
"str",
"(",
")",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"try",
":",
"# Find the samples that match the current genus - note that samples with multiple hits will be",
"# represented in multiple outputs",
"if",
"genus",
"in",
"sample",
"[",
"analysistype",
"]",
".",
"targetgenera",
":",
"# Populate the results string with the sample name",
"results",
"[",
"genus",
"]",
"+=",
"'{},'",
".",
"format",
"(",
"sample",
".",
"name",
")",
"# Iterate through all the genes associated with this genus. If the gene is in the current",
"# sample, add a + to the string, otherwise, add a -",
"for",
"gene",
"in",
"genelist",
":",
"if",
"gene",
".",
"lower",
"(",
")",
"in",
"[",
"target",
"[",
"0",
"]",
".",
"lower",
"(",
")",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"for",
"target",
"in",
"sample",
"[",
"analysistype",
"]",
".",
"results",
".",
"items",
"(",
")",
"]",
":",
"results",
"[",
"genus",
"]",
"+=",
"'+,'",
"else",
":",
"results",
"[",
"genus",
"]",
"+=",
"'-,'",
"results",
"[",
"genus",
"]",
"+=",
"'\\n'",
"# If the sample is missing the targetgenera attribute, then it is ignored for these reports",
"except",
"AttributeError",
":",
"pass",
"# Create and populate the genus-specific reports",
"for",
"genus",
",",
"resultstring",
"in",
"results",
".",
"items",
"(",
")",
":",
"# Only create the report if there are results for the current genus",
"if",
"resultstring",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"'{}_genesippr.csv'",
".",
"format",
"(",
"genus",
")",
")",
",",
"'w'",
")",
"as",
"genusreport",
":",
"# Write the header to the report - Strain plus add the genes associated with the genus",
"genusreport",
".",
"write",
"(",
"'Strain,{}\\n'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"self",
".",
"genedict",
"[",
"genus",
"]",
")",
")",
")",
"# Write the results to the report",
"genusreport",
".",
"write",
"(",
"resultstring",
")"
] |
Creates simplified genus-specific reports. Instead of the % ID and the fold coverage, a simple +/- scheme is
used for presence/absence
:param analysistype: The variable to use when accessing attributes in the metadata object
|
[
"Creates",
"simplified",
"genus",
"-",
"specific",
"reports",
".",
"Instead",
"of",
"the",
"%",
"ID",
"and",
"the",
"fold",
"coverage",
"a",
"simple",
"+",
"/",
"-",
"scheme",
"is",
"used",
"for",
"presence",
"/",
"absence",
":",
"param",
"analysistype",
":",
"The",
"variable",
"to",
"use",
"when",
"accessing",
"attributes",
"in",
"the",
"metadata",
"object"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sipprverse_reporter/reports.py#L117-L155
|
OLC-Bioinformatics/sipprverse
|
sipprverse_reporter/reports.py
|
Reports.gdcsreporter
|
def gdcsreporter(self, analysistype='GDCS'):
"""
Creates a report of the GDCS results
:param analysistype: The variable to use when accessing attributes in the metadata object
"""
logging.info('Creating {} report'.format(analysistype))
# Initialise list to store all the GDCS genes, and genera in the analysis
gdcs = list()
genera = list()
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
if os.path.isdir(sample[analysistype].targetpath):
# Update the fai dict with all the genes in the analysis, rather than just those with baited hits
self.gdcs_fai(sample)
sample[analysistype].createreport = True
# Determine which genera are present in the analysis
if sample.general.closestrefseqgenus not in genera:
genera.append(sample.general.closestrefseqgenus)
try:
# Add all the GDCS genes to the list
for gene in sorted(sample[analysistype].faidict):
if gene not in gdcs:
gdcs.append(gene)
except AttributeError:
sample[analysistype].createreport = False
else:
sample[analysistype].createreport = False
else:
sample[analysistype].createreport = False
sample.general.incomplete = True
header = 'Strain,Genus,Matches,MeanCoverage,Pass/Fail,{},\n'.format(','.join(gdcs))
data = str()
with open(os.path.join(self.reportpath, '{}.csv'.format(analysistype)), 'w') as report:
# Sort the samples in the report based on the closest refseq genus e.g. all samples with the same genus
# will be grouped together in the report
for genus in genera:
for sample in self.runmetadata.samples:
if sample.general.closestrefseqgenus == genus:
if sample[analysistype].createreport:
sample[analysistype].totaldepth = list()
# Add the sample to the report if it matches the current genus
# if genus == sample.general.closestrefseqgenus:
data += '{},{},'.format(sample.name, genus)
# Initialise a variable to store the number of GDCS genes were matched
count = 0
# As I want the count to be in the report before all the gene results, this string will
# store the specific sample information, and will be added to data once count is known
specific = str()
for gene in gdcs:
# As there are different genes present in the GDCS databases for each organism of
# interest, genes that did not match because they're absent in the specific database are
# indicated using an X
if gene not in [result for result in sample[analysistype].faidict]:
specific += 'X,'
else:
try:
# Report the necessary information for each gene result
identity = sample[analysistype].results[gene]
specific += '{}% ({} +/- {}),'\
.format(identity, sample[analysistype].avgdepth[gene],
sample[analysistype].standarddev[gene])
sample[analysistype].totaldepth.append(
float(sample[analysistype].avgdepth[gene]))
count += 1
# If the gene was missing from the results attribute, add a - to the cell
except (KeyError, AttributeError):
sample.general.incomplete = True
specific += '-,'
# Calculate the mean depth of the genes and the standard deviation
sample[analysistype].mean = numpy.mean(sample[analysistype].totaldepth)
sample[analysistype].stddev = numpy.std(sample[analysistype].totaldepth)
# Determine whether the sample pass the necessary quality criteria:
# Pass, all GDCS, mean coverage greater than 20X coverage;
# ?: Indeterminate value;
# -: Fail value
# Allow one missing GDCS to still be considered a pass
if count >= len(sample[analysistype].faidict) - 1:
if sample[analysistype].mean > 20:
quality = '+'
else:
quality = '?'
sample.general.incomplete = True
else:
quality = '-'
sample.general.incomplete = True
# Add the count, mean depth with standard deviation, the pass/fail determination,
# and the total number of GDCS genes as well as the results
data += '{hits}/{total},{mean} +/- {std},{fail},{gdcs}\n'\
.format(hits=str(count),
total=len(sample[analysistype].faidict),
mean='{:.2f}'.format(sample[analysistype].mean),
std='{:.2f}'.format(sample[analysistype].stddev),
fail=quality,
gdcs=specific)
# # Any samples with a best assembly of 'NA' are considered incomplete.
# else:
# data += '{},{},,,-\n'.format(sample.name, sample.general.closestrefseqgenus)
# sample.general.incomplete = True
elif sample.general.closestrefseqgenus == 'NA':
data += '{}\n'.format(sample.name)
sample.general.incomplete = True
# Write the header and data to file
report.write(header)
report.write(data)
|
python
|
def gdcsreporter(self, analysistype='GDCS'):
"""
Creates a report of the GDCS results
:param analysistype: The variable to use when accessing attributes in the metadata object
"""
logging.info('Creating {} report'.format(analysistype))
# Initialise list to store all the GDCS genes, and genera in the analysis
gdcs = list()
genera = list()
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
if os.path.isdir(sample[analysistype].targetpath):
# Update the fai dict with all the genes in the analysis, rather than just those with baited hits
self.gdcs_fai(sample)
sample[analysistype].createreport = True
# Determine which genera are present in the analysis
if sample.general.closestrefseqgenus not in genera:
genera.append(sample.general.closestrefseqgenus)
try:
# Add all the GDCS genes to the list
for gene in sorted(sample[analysistype].faidict):
if gene not in gdcs:
gdcs.append(gene)
except AttributeError:
sample[analysistype].createreport = False
else:
sample[analysistype].createreport = False
else:
sample[analysistype].createreport = False
sample.general.incomplete = True
header = 'Strain,Genus,Matches,MeanCoverage,Pass/Fail,{},\n'.format(','.join(gdcs))
data = str()
with open(os.path.join(self.reportpath, '{}.csv'.format(analysistype)), 'w') as report:
# Sort the samples in the report based on the closest refseq genus e.g. all samples with the same genus
# will be grouped together in the report
for genus in genera:
for sample in self.runmetadata.samples:
if sample.general.closestrefseqgenus == genus:
if sample[analysistype].createreport:
sample[analysistype].totaldepth = list()
# Add the sample to the report if it matches the current genus
# if genus == sample.general.closestrefseqgenus:
data += '{},{},'.format(sample.name, genus)
# Initialise a variable to store the number of GDCS genes were matched
count = 0
# As I want the count to be in the report before all the gene results, this string will
# store the specific sample information, and will be added to data once count is known
specific = str()
for gene in gdcs:
# As there are different genes present in the GDCS databases for each organism of
# interest, genes that did not match because they're absent in the specific database are
# indicated using an X
if gene not in [result for result in sample[analysistype].faidict]:
specific += 'X,'
else:
try:
# Report the necessary information for each gene result
identity = sample[analysistype].results[gene]
specific += '{}% ({} +/- {}),'\
.format(identity, sample[analysistype].avgdepth[gene],
sample[analysistype].standarddev[gene])
sample[analysistype].totaldepth.append(
float(sample[analysistype].avgdepth[gene]))
count += 1
# If the gene was missing from the results attribute, add a - to the cell
except (KeyError, AttributeError):
sample.general.incomplete = True
specific += '-,'
# Calculate the mean depth of the genes and the standard deviation
sample[analysistype].mean = numpy.mean(sample[analysistype].totaldepth)
sample[analysistype].stddev = numpy.std(sample[analysistype].totaldepth)
# Determine whether the sample pass the necessary quality criteria:
# Pass, all GDCS, mean coverage greater than 20X coverage;
# ?: Indeterminate value;
# -: Fail value
# Allow one missing GDCS to still be considered a pass
if count >= len(sample[analysistype].faidict) - 1:
if sample[analysistype].mean > 20:
quality = '+'
else:
quality = '?'
sample.general.incomplete = True
else:
quality = '-'
sample.general.incomplete = True
# Add the count, mean depth with standard deviation, the pass/fail determination,
# and the total number of GDCS genes as well as the results
data += '{hits}/{total},{mean} +/- {std},{fail},{gdcs}\n'\
.format(hits=str(count),
total=len(sample[analysistype].faidict),
mean='{:.2f}'.format(sample[analysistype].mean),
std='{:.2f}'.format(sample[analysistype].stddev),
fail=quality,
gdcs=specific)
# # Any samples with a best assembly of 'NA' are considered incomplete.
# else:
# data += '{},{},,,-\n'.format(sample.name, sample.general.closestrefseqgenus)
# sample.general.incomplete = True
elif sample.general.closestrefseqgenus == 'NA':
data += '{}\n'.format(sample.name)
sample.general.incomplete = True
# Write the header and data to file
report.write(header)
report.write(data)
|
[
"def",
"gdcsreporter",
"(",
"self",
",",
"analysistype",
"=",
"'GDCS'",
")",
":",
"logging",
".",
"info",
"(",
"'Creating {} report'",
".",
"format",
"(",
"analysistype",
")",
")",
"# Initialise list to store all the GDCS genes, and genera in the analysis",
"gdcs",
"=",
"list",
"(",
")",
"genera",
"=",
"list",
"(",
")",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"targetpath",
")",
":",
"# Update the fai dict with all the genes in the analysis, rather than just those with baited hits",
"self",
".",
"gdcs_fai",
"(",
"sample",
")",
"sample",
"[",
"analysistype",
"]",
".",
"createreport",
"=",
"True",
"# Determine which genera are present in the analysis",
"if",
"sample",
".",
"general",
".",
"closestrefseqgenus",
"not",
"in",
"genera",
":",
"genera",
".",
"append",
"(",
"sample",
".",
"general",
".",
"closestrefseqgenus",
")",
"try",
":",
"# Add all the GDCS genes to the list",
"for",
"gene",
"in",
"sorted",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"faidict",
")",
":",
"if",
"gene",
"not",
"in",
"gdcs",
":",
"gdcs",
".",
"append",
"(",
"gene",
")",
"except",
"AttributeError",
":",
"sample",
"[",
"analysistype",
"]",
".",
"createreport",
"=",
"False",
"else",
":",
"sample",
"[",
"analysistype",
"]",
".",
"createreport",
"=",
"False",
"else",
":",
"sample",
"[",
"analysistype",
"]",
".",
"createreport",
"=",
"False",
"sample",
".",
"general",
".",
"incomplete",
"=",
"True",
"header",
"=",
"'Strain,Genus,Matches,MeanCoverage,Pass/Fail,{},\\n'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"gdcs",
")",
")",
"data",
"=",
"str",
"(",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"'{}.csv'",
".",
"format",
"(",
"analysistype",
")",
")",
",",
"'w'",
")",
"as",
"report",
":",
"# Sort the samples in the report based on the closest refseq genus e.g. all samples with the same genus",
"# will be grouped together in the report",
"for",
"genus",
"in",
"genera",
":",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"if",
"sample",
".",
"general",
".",
"closestrefseqgenus",
"==",
"genus",
":",
"if",
"sample",
"[",
"analysistype",
"]",
".",
"createreport",
":",
"sample",
"[",
"analysistype",
"]",
".",
"totaldepth",
"=",
"list",
"(",
")",
"# Add the sample to the report if it matches the current genus",
"# if genus == sample.general.closestrefseqgenus:",
"data",
"+=",
"'{},{},'",
".",
"format",
"(",
"sample",
".",
"name",
",",
"genus",
")",
"# Initialise a variable to store the number of GDCS genes were matched",
"count",
"=",
"0",
"# As I want the count to be in the report before all the gene results, this string will",
"# store the specific sample information, and will be added to data once count is known",
"specific",
"=",
"str",
"(",
")",
"for",
"gene",
"in",
"gdcs",
":",
"# As there are different genes present in the GDCS databases for each organism of",
"# interest, genes that did not match because they're absent in the specific database are",
"# indicated using an X",
"if",
"gene",
"not",
"in",
"[",
"result",
"for",
"result",
"in",
"sample",
"[",
"analysistype",
"]",
".",
"faidict",
"]",
":",
"specific",
"+=",
"'X,'",
"else",
":",
"try",
":",
"# Report the necessary information for each gene result",
"identity",
"=",
"sample",
"[",
"analysistype",
"]",
".",
"results",
"[",
"gene",
"]",
"specific",
"+=",
"'{}% ({} +/- {}),'",
".",
"format",
"(",
"identity",
",",
"sample",
"[",
"analysistype",
"]",
".",
"avgdepth",
"[",
"gene",
"]",
",",
"sample",
"[",
"analysistype",
"]",
".",
"standarddev",
"[",
"gene",
"]",
")",
"sample",
"[",
"analysistype",
"]",
".",
"totaldepth",
".",
"append",
"(",
"float",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"avgdepth",
"[",
"gene",
"]",
")",
")",
"count",
"+=",
"1",
"# If the gene was missing from the results attribute, add a - to the cell",
"except",
"(",
"KeyError",
",",
"AttributeError",
")",
":",
"sample",
".",
"general",
".",
"incomplete",
"=",
"True",
"specific",
"+=",
"'-,'",
"# Calculate the mean depth of the genes and the standard deviation",
"sample",
"[",
"analysistype",
"]",
".",
"mean",
"=",
"numpy",
".",
"mean",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"totaldepth",
")",
"sample",
"[",
"analysistype",
"]",
".",
"stddev",
"=",
"numpy",
".",
"std",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"totaldepth",
")",
"# Determine whether the sample pass the necessary quality criteria:",
"# Pass, all GDCS, mean coverage greater than 20X coverage;",
"# ?: Indeterminate value;",
"# -: Fail value",
"# Allow one missing GDCS to still be considered a pass",
"if",
"count",
">=",
"len",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"faidict",
")",
"-",
"1",
":",
"if",
"sample",
"[",
"analysistype",
"]",
".",
"mean",
">",
"20",
":",
"quality",
"=",
"'+'",
"else",
":",
"quality",
"=",
"'?'",
"sample",
".",
"general",
".",
"incomplete",
"=",
"True",
"else",
":",
"quality",
"=",
"'-'",
"sample",
".",
"general",
".",
"incomplete",
"=",
"True",
"# Add the count, mean depth with standard deviation, the pass/fail determination,",
"# and the total number of GDCS genes as well as the results",
"data",
"+=",
"'{hits}/{total},{mean} +/- {std},{fail},{gdcs}\\n'",
".",
"format",
"(",
"hits",
"=",
"str",
"(",
"count",
")",
",",
"total",
"=",
"len",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"faidict",
")",
",",
"mean",
"=",
"'{:.2f}'",
".",
"format",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"mean",
")",
",",
"std",
"=",
"'{:.2f}'",
".",
"format",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"stddev",
")",
",",
"fail",
"=",
"quality",
",",
"gdcs",
"=",
"specific",
")",
"# # Any samples with a best assembly of 'NA' are considered incomplete.",
"# else:",
"# data += '{},{},,,-\\n'.format(sample.name, sample.general.closestrefseqgenus)",
"# sample.general.incomplete = True",
"elif",
"sample",
".",
"general",
".",
"closestrefseqgenus",
"==",
"'NA'",
":",
"data",
"+=",
"'{}\\n'",
".",
"format",
"(",
"sample",
".",
"name",
")",
"sample",
".",
"general",
".",
"incomplete",
"=",
"True",
"# Write the header and data to file",
"report",
".",
"write",
"(",
"header",
")",
"report",
".",
"write",
"(",
"data",
")"
] |
Creates a report of the GDCS results
:param analysistype: The variable to use when accessing attributes in the metadata object
|
[
"Creates",
"a",
"report",
"of",
"the",
"GDCS",
"results",
":",
"param",
"analysistype",
":",
"The",
"variable",
"to",
"use",
"when",
"accessing",
"attributes",
"in",
"the",
"metadata",
"object"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sipprverse_reporter/reports.py#L157-L260
|
OLC-Bioinformatics/sipprverse
|
sipprverse_reporter/reports.py
|
Reports.gdcs_fai
|
def gdcs_fai(sample, analysistype='GDCS'):
"""
GDCS analyses need to use the .fai file supplied in the targets folder rather than the one created following
reverse baiting
:param sample: sample object
:param analysistype: current analysis being performed
"""
try:
# Find the .fai file in the target path
sample[analysistype].faifile = glob(os.path.join(sample[analysistype].targetpath, '*.fai'))[0]
except IndexError:
target_file = glob(os.path.join(sample[analysistype].targetpath, '*.fasta'))[0]
samindex = SamtoolsFaidxCommandline(reference=target_file)
map(StringIO, samindex(cwd=sample[analysistype].targetpath))
sample[analysistype].faifile = glob(os.path.join(sample[analysistype].targetpath, '*.fai'))[0]
# Get the fai file into a dictionary to be used in parsing results
try:
with open(sample[analysistype].faifile, 'r') as faifile:
for line in faifile:
data = line.split('\t')
try:
sample[analysistype].faidict[data[0]] = int(data[1])
except KeyError:
sample[analysistype].faidict = dict()
sample[analysistype].faidict[data[0]] = int(data[1])
except FileNotFoundError:
pass
|
python
|
def gdcs_fai(sample, analysistype='GDCS'):
"""
GDCS analyses need to use the .fai file supplied in the targets folder rather than the one created following
reverse baiting
:param sample: sample object
:param analysistype: current analysis being performed
"""
try:
# Find the .fai file in the target path
sample[analysistype].faifile = glob(os.path.join(sample[analysistype].targetpath, '*.fai'))[0]
except IndexError:
target_file = glob(os.path.join(sample[analysistype].targetpath, '*.fasta'))[0]
samindex = SamtoolsFaidxCommandline(reference=target_file)
map(StringIO, samindex(cwd=sample[analysistype].targetpath))
sample[analysistype].faifile = glob(os.path.join(sample[analysistype].targetpath, '*.fai'))[0]
# Get the fai file into a dictionary to be used in parsing results
try:
with open(sample[analysistype].faifile, 'r') as faifile:
for line in faifile:
data = line.split('\t')
try:
sample[analysistype].faidict[data[0]] = int(data[1])
except KeyError:
sample[analysistype].faidict = dict()
sample[analysistype].faidict[data[0]] = int(data[1])
except FileNotFoundError:
pass
|
[
"def",
"gdcs_fai",
"(",
"sample",
",",
"analysistype",
"=",
"'GDCS'",
")",
":",
"try",
":",
"# Find the .fai file in the target path",
"sample",
"[",
"analysistype",
"]",
".",
"faifile",
"=",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"targetpath",
",",
"'*.fai'",
")",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"target_file",
"=",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"targetpath",
",",
"'*.fasta'",
")",
")",
"[",
"0",
"]",
"samindex",
"=",
"SamtoolsFaidxCommandline",
"(",
"reference",
"=",
"target_file",
")",
"map",
"(",
"StringIO",
",",
"samindex",
"(",
"cwd",
"=",
"sample",
"[",
"analysistype",
"]",
".",
"targetpath",
")",
")",
"sample",
"[",
"analysistype",
"]",
".",
"faifile",
"=",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"targetpath",
",",
"'*.fai'",
")",
")",
"[",
"0",
"]",
"# Get the fai file into a dictionary to be used in parsing results",
"try",
":",
"with",
"open",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"faifile",
",",
"'r'",
")",
"as",
"faifile",
":",
"for",
"line",
"in",
"faifile",
":",
"data",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"try",
":",
"sample",
"[",
"analysistype",
"]",
".",
"faidict",
"[",
"data",
"[",
"0",
"]",
"]",
"=",
"int",
"(",
"data",
"[",
"1",
"]",
")",
"except",
"KeyError",
":",
"sample",
"[",
"analysistype",
"]",
".",
"faidict",
"=",
"dict",
"(",
")",
"sample",
"[",
"analysistype",
"]",
".",
"faidict",
"[",
"data",
"[",
"0",
"]",
"]",
"=",
"int",
"(",
"data",
"[",
"1",
"]",
")",
"except",
"FileNotFoundError",
":",
"pass"
] |
GDCS analyses need to use the .fai file supplied in the targets folder rather than the one created following
reverse baiting
:param sample: sample object
:param analysistype: current analysis being performed
|
[
"GDCS",
"analyses",
"need",
"to",
"use",
"the",
".",
"fai",
"file",
"supplied",
"in",
"the",
"targets",
"folder",
"rather",
"than",
"the",
"one",
"created",
"following",
"reverse",
"baiting",
":",
"param",
"sample",
":",
"sample",
"object",
":",
"param",
"analysistype",
":",
"current",
"analysis",
"being",
"performed"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sipprverse_reporter/reports.py#L263-L289
|
OLC-Bioinformatics/sipprverse
|
sipprverse_reporter/reports.py
|
Reports.sixteensreporter
|
def sixteensreporter(self, analysistype='sixteens_full'):
"""
Creates a report of the results
:param analysistype: The variable to use when accessing attributes in the metadata object
"""
# Create the path in which the reports are stored
make_path(self.reportpath)
# Initialise the header and data strings
header = 'Strain,Gene,PercentIdentity,Genus,FoldCoverage\n'
data = ''
with open(os.path.join(self.reportpath, analysistype + '.csv'), 'w') as report:
with open(os.path.join(self.reportpath, analysistype + '_sequences.fa'), 'w') as sequences:
for sample in self.runmetadata.samples:
try:
# Select the best hit of all the full-length 16S genes mapped
sample[analysistype].besthit = sorted(sample[analysistype].results.items(),
key=operator.itemgetter(1), reverse=True)[0][0]
# Add the sample name to the data string
data += sample.name + ','
# Find the record that matches the best hit, and extract the necessary values to be place in the
# data string
for name, identity in sample[analysistype].results.items():
if name == sample[analysistype].besthit:
data += '{},{},{},{}\n'.format(name, identity, sample[analysistype].genus,
sample[analysistype].avgdepth[name])
# Create a FASTA-formatted sequence output of the 16S sequence
record = SeqRecord(Seq(sample[analysistype].sequences[name],
IUPAC.unambiguous_dna),
id='{}_{}'.format(sample.name, '16S'),
description='')
SeqIO.write(record, sequences, 'fasta')
except (KeyError, IndexError):
data += '{}\n'.format(sample.name)
# Write the results to the report
report.write(header)
report.write(data)
|
python
|
def sixteensreporter(self, analysistype='sixteens_full'):
"""
Creates a report of the results
:param analysistype: The variable to use when accessing attributes in the metadata object
"""
# Create the path in which the reports are stored
make_path(self.reportpath)
# Initialise the header and data strings
header = 'Strain,Gene,PercentIdentity,Genus,FoldCoverage\n'
data = ''
with open(os.path.join(self.reportpath, analysistype + '.csv'), 'w') as report:
with open(os.path.join(self.reportpath, analysistype + '_sequences.fa'), 'w') as sequences:
for sample in self.runmetadata.samples:
try:
# Select the best hit of all the full-length 16S genes mapped
sample[analysistype].besthit = sorted(sample[analysistype].results.items(),
key=operator.itemgetter(1), reverse=True)[0][0]
# Add the sample name to the data string
data += sample.name + ','
# Find the record that matches the best hit, and extract the necessary values to be place in the
# data string
for name, identity in sample[analysistype].results.items():
if name == sample[analysistype].besthit:
data += '{},{},{},{}\n'.format(name, identity, sample[analysistype].genus,
sample[analysistype].avgdepth[name])
# Create a FASTA-formatted sequence output of the 16S sequence
record = SeqRecord(Seq(sample[analysistype].sequences[name],
IUPAC.unambiguous_dna),
id='{}_{}'.format(sample.name, '16S'),
description='')
SeqIO.write(record, sequences, 'fasta')
except (KeyError, IndexError):
data += '{}\n'.format(sample.name)
# Write the results to the report
report.write(header)
report.write(data)
|
[
"def",
"sixteensreporter",
"(",
"self",
",",
"analysistype",
"=",
"'sixteens_full'",
")",
":",
"# Create the path in which the reports are stored",
"make_path",
"(",
"self",
".",
"reportpath",
")",
"# Initialise the header and data strings",
"header",
"=",
"'Strain,Gene,PercentIdentity,Genus,FoldCoverage\\n'",
"data",
"=",
"''",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"analysistype",
"+",
"'.csv'",
")",
",",
"'w'",
")",
"as",
"report",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"analysistype",
"+",
"'_sequences.fa'",
")",
",",
"'w'",
")",
"as",
"sequences",
":",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"try",
":",
"# Select the best hit of all the full-length 16S genes mapped",
"sample",
"[",
"analysistype",
"]",
".",
"besthit",
"=",
"sorted",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"results",
".",
"items",
"(",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"1",
")",
",",
"reverse",
"=",
"True",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# Add the sample name to the data string",
"data",
"+=",
"sample",
".",
"name",
"+",
"','",
"# Find the record that matches the best hit, and extract the necessary values to be place in the",
"# data string",
"for",
"name",
",",
"identity",
"in",
"sample",
"[",
"analysistype",
"]",
".",
"results",
".",
"items",
"(",
")",
":",
"if",
"name",
"==",
"sample",
"[",
"analysistype",
"]",
".",
"besthit",
":",
"data",
"+=",
"'{},{},{},{}\\n'",
".",
"format",
"(",
"name",
",",
"identity",
",",
"sample",
"[",
"analysistype",
"]",
".",
"genus",
",",
"sample",
"[",
"analysistype",
"]",
".",
"avgdepth",
"[",
"name",
"]",
")",
"# Create a FASTA-formatted sequence output of the 16S sequence",
"record",
"=",
"SeqRecord",
"(",
"Seq",
"(",
"sample",
"[",
"analysistype",
"]",
".",
"sequences",
"[",
"name",
"]",
",",
"IUPAC",
".",
"unambiguous_dna",
")",
",",
"id",
"=",
"'{}_{}'",
".",
"format",
"(",
"sample",
".",
"name",
",",
"'16S'",
")",
",",
"description",
"=",
"''",
")",
"SeqIO",
".",
"write",
"(",
"record",
",",
"sequences",
",",
"'fasta'",
")",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"data",
"+=",
"'{}\\n'",
".",
"format",
"(",
"sample",
".",
"name",
")",
"# Write the results to the report",
"report",
".",
"write",
"(",
"header",
")",
"report",
".",
"write",
"(",
"data",
")"
] |
Creates a report of the results
:param analysistype: The variable to use when accessing attributes in the metadata object
|
[
"Creates",
"a",
"report",
"of",
"the",
"results",
":",
"param",
"analysistype",
":",
"The",
"variable",
"to",
"use",
"when",
"accessing",
"attributes",
"in",
"the",
"metadata",
"object"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sipprverse_reporter/reports.py#L291-L326
|
OLC-Bioinformatics/sipprverse
|
sipprverse_reporter/reports.py
|
Reports.confindr_reporter
|
def confindr_reporter(self, analysistype='confindr'):
"""
Creates a final report of all the ConFindr results
"""
# Initialise the data strings
data = 'Strain,Genus,NumContamSNVs,ContamStatus,PercentContam,PercentContamSTD\n'
with open(os.path.join(self.reportpath, analysistype + '.csv'), 'w') as report:
# Iterate through all the results
for sample in self.runmetadata.samples:
data += '{str},{genus},{numcontamsnv},{status},{pc},{pcs}\n'.format(
str=sample.name,
genus=sample.confindr.genus,
numcontamsnv=sample.confindr.num_contaminated_snvs,
status=sample.confindr.contam_status,
pc=sample.confindr.percent_contam,
pcs=sample.confindr.percent_contam_std
)
# Write the string to the report
report.write(data)
|
python
|
def confindr_reporter(self, analysistype='confindr'):
"""
Creates a final report of all the ConFindr results
"""
# Initialise the data strings
data = 'Strain,Genus,NumContamSNVs,ContamStatus,PercentContam,PercentContamSTD\n'
with open(os.path.join(self.reportpath, analysistype + '.csv'), 'w') as report:
# Iterate through all the results
for sample in self.runmetadata.samples:
data += '{str},{genus},{numcontamsnv},{status},{pc},{pcs}\n'.format(
str=sample.name,
genus=sample.confindr.genus,
numcontamsnv=sample.confindr.num_contaminated_snvs,
status=sample.confindr.contam_status,
pc=sample.confindr.percent_contam,
pcs=sample.confindr.percent_contam_std
)
# Write the string to the report
report.write(data)
|
[
"def",
"confindr_reporter",
"(",
"self",
",",
"analysistype",
"=",
"'confindr'",
")",
":",
"# Initialise the data strings",
"data",
"=",
"'Strain,Genus,NumContamSNVs,ContamStatus,PercentContam,PercentContamSTD\\n'",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"analysistype",
"+",
"'.csv'",
")",
",",
"'w'",
")",
"as",
"report",
":",
"# Iterate through all the results",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"data",
"+=",
"'{str},{genus},{numcontamsnv},{status},{pc},{pcs}\\n'",
".",
"format",
"(",
"str",
"=",
"sample",
".",
"name",
",",
"genus",
"=",
"sample",
".",
"confindr",
".",
"genus",
",",
"numcontamsnv",
"=",
"sample",
".",
"confindr",
".",
"num_contaminated_snvs",
",",
"status",
"=",
"sample",
".",
"confindr",
".",
"contam_status",
",",
"pc",
"=",
"sample",
".",
"confindr",
".",
"percent_contam",
",",
"pcs",
"=",
"sample",
".",
"confindr",
".",
"percent_contam_std",
")",
"# Write the string to the report",
"report",
".",
"write",
"(",
"data",
")"
] |
Creates a final report of all the ConFindr results
|
[
"Creates",
"a",
"final",
"report",
"of",
"all",
"the",
"ConFindr",
"results"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sipprverse_reporter/reports.py#L328-L346
|
OLC-Bioinformatics/sipprverse
|
sipprverse_reporter/reports.py
|
Reports.methodreporter
|
def methodreporter(self):
"""
Create final reports collating results from all the individual iterations through the method pipeline
"""
# Ensure that the analyses are set to complete
self.analysescomplete = True
# Reset the report path to original value
self.reportpath = os.path.join(self.path, 'reports')
# Clear the runmetadata - it will be populated with all the metadata from completemetadata
self.runmetadata = MetadataObject()
self.runmetadata.samples = list()
# As the samples were entered into self.completemetadata depending on when they passed the quality threshold,
# this list is not ordered numerically/alphabetically like the original runmetadata. Reset the order.
for strain in self.samples:
for sample in self.completemetadata:
if sample.name == strain:
# Append the sample to the ordered list of objects
self.runmetadata.samples.append(sample)
# Create the reports
self.reporter()
self.genusspecific()
self.sixteensreporter()
self.gdcsreporter()
self.confindr_reporter()
|
python
|
def methodreporter(self):
"""
Create final reports collating results from all the individual iterations through the method pipeline
"""
# Ensure that the analyses are set to complete
self.analysescomplete = True
# Reset the report path to original value
self.reportpath = os.path.join(self.path, 'reports')
# Clear the runmetadata - it will be populated with all the metadata from completemetadata
self.runmetadata = MetadataObject()
self.runmetadata.samples = list()
# As the samples were entered into self.completemetadata depending on when they passed the quality threshold,
# this list is not ordered numerically/alphabetically like the original runmetadata. Reset the order.
for strain in self.samples:
for sample in self.completemetadata:
if sample.name == strain:
# Append the sample to the ordered list of objects
self.runmetadata.samples.append(sample)
# Create the reports
self.reporter()
self.genusspecific()
self.sixteensreporter()
self.gdcsreporter()
self.confindr_reporter()
|
[
"def",
"methodreporter",
"(",
"self",
")",
":",
"# Ensure that the analyses are set to complete",
"self",
".",
"analysescomplete",
"=",
"True",
"# Reset the report path to original value",
"self",
".",
"reportpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"'reports'",
")",
"# Clear the runmetadata - it will be populated with all the metadata from completemetadata",
"self",
".",
"runmetadata",
"=",
"MetadataObject",
"(",
")",
"self",
".",
"runmetadata",
".",
"samples",
"=",
"list",
"(",
")",
"# As the samples were entered into self.completemetadata depending on when they passed the quality threshold,",
"# this list is not ordered numerically/alphabetically like the original runmetadata. Reset the order.",
"for",
"strain",
"in",
"self",
".",
"samples",
":",
"for",
"sample",
"in",
"self",
".",
"completemetadata",
":",
"if",
"sample",
".",
"name",
"==",
"strain",
":",
"# Append the sample to the ordered list of objects",
"self",
".",
"runmetadata",
".",
"samples",
".",
"append",
"(",
"sample",
")",
"# Create the reports",
"self",
".",
"reporter",
"(",
")",
"self",
".",
"genusspecific",
"(",
")",
"self",
".",
"sixteensreporter",
"(",
")",
"self",
".",
"gdcsreporter",
"(",
")",
"self",
".",
"confindr_reporter",
"(",
")"
] |
Create final reports collating results from all the individual iterations through the method pipeline
|
[
"Create",
"final",
"reports",
"collating",
"results",
"from",
"all",
"the",
"individual",
"iterations",
"through",
"the",
"method",
"pipeline"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sipprverse_reporter/reports.py#L348-L371
|
OLC-Bioinformatics/sipprverse
|
sipprverse_reporter/reports.py
|
ReportImage.main
|
def main(self):
"""
Run the methods required to create the genesippr report summary image
"""
self.dataframe_setup()
self.figure_populate(self.outputfolder,
self.image_report,
self.header_list,
self.samples,
'genesippr',
'report',
fail=self.fail)
|
python
|
def main(self):
"""
Run the methods required to create the genesippr report summary image
"""
self.dataframe_setup()
self.figure_populate(self.outputfolder,
self.image_report,
self.header_list,
self.samples,
'genesippr',
'report',
fail=self.fail)
|
[
"def",
"main",
"(",
"self",
")",
":",
"self",
".",
"dataframe_setup",
"(",
")",
"self",
".",
"figure_populate",
"(",
"self",
".",
"outputfolder",
",",
"self",
".",
"image_report",
",",
"self",
".",
"header_list",
",",
"self",
".",
"samples",
",",
"'genesippr'",
",",
"'report'",
",",
"fail",
"=",
"self",
".",
"fail",
")"
] |
Run the methods required to create the genesippr report summary image
|
[
"Run",
"the",
"methods",
"required",
"to",
"create",
"the",
"genesippr",
"report",
"summary",
"image"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sipprverse_reporter/reports.py#L417-L428
|
OLC-Bioinformatics/sipprverse
|
sipprverse_reporter/reports.py
|
ReportImage.data_sanitise
|
def data_sanitise(self, inputstring, header=None):
"""
Format the data to be consistent with heatmaps
:param inputstring: string containing data to be formatted
:param header: class of the data - certain categories have specific formatting requirements
:return: the formatted output string
"""
if str(inputstring) == 'nan':
outputstring = 0
elif '%' in str(inputstring):
group = re.findall('(\d+)\..+', str(inputstring))
outputstring = group[0]
elif header == 'Pass/Fail':
if str(inputstring) == '+':
outputstring = '100'
else:
outputstring = -100
self.fail = True
elif header == 'ContamStatus':
if str(inputstring) == 'Clean':
outputstring = '100'
else:
outputstring = -100
self.fail = True
elif header == 'MeanCoverage':
cov = float(str(inputstring).split(' ')[0])
if cov >= 20:
outputstring = 100
else:
outputstring = -100
self.fail = True
else:
outputstring = str(inputstring)
return outputstring
|
python
|
def data_sanitise(self, inputstring, header=None):
"""
Format the data to be consistent with heatmaps
:param inputstring: string containing data to be formatted
:param header: class of the data - certain categories have specific formatting requirements
:return: the formatted output string
"""
if str(inputstring) == 'nan':
outputstring = 0
elif '%' in str(inputstring):
group = re.findall('(\d+)\..+', str(inputstring))
outputstring = group[0]
elif header == 'Pass/Fail':
if str(inputstring) == '+':
outputstring = '100'
else:
outputstring = -100
self.fail = True
elif header == 'ContamStatus':
if str(inputstring) == 'Clean':
outputstring = '100'
else:
outputstring = -100
self.fail = True
elif header == 'MeanCoverage':
cov = float(str(inputstring).split(' ')[0])
if cov >= 20:
outputstring = 100
else:
outputstring = -100
self.fail = True
else:
outputstring = str(inputstring)
return outputstring
|
[
"def",
"data_sanitise",
"(",
"self",
",",
"inputstring",
",",
"header",
"=",
"None",
")",
":",
"if",
"str",
"(",
"inputstring",
")",
"==",
"'nan'",
":",
"outputstring",
"=",
"0",
"elif",
"'%'",
"in",
"str",
"(",
"inputstring",
")",
":",
"group",
"=",
"re",
".",
"findall",
"(",
"'(\\d+)\\..+'",
",",
"str",
"(",
"inputstring",
")",
")",
"outputstring",
"=",
"group",
"[",
"0",
"]",
"elif",
"header",
"==",
"'Pass/Fail'",
":",
"if",
"str",
"(",
"inputstring",
")",
"==",
"'+'",
":",
"outputstring",
"=",
"'100'",
"else",
":",
"outputstring",
"=",
"-",
"100",
"self",
".",
"fail",
"=",
"True",
"elif",
"header",
"==",
"'ContamStatus'",
":",
"if",
"str",
"(",
"inputstring",
")",
"==",
"'Clean'",
":",
"outputstring",
"=",
"'100'",
"else",
":",
"outputstring",
"=",
"-",
"100",
"self",
".",
"fail",
"=",
"True",
"elif",
"header",
"==",
"'MeanCoverage'",
":",
"cov",
"=",
"float",
"(",
"str",
"(",
"inputstring",
")",
".",
"split",
"(",
"' '",
")",
"[",
"0",
"]",
")",
"if",
"cov",
">=",
"20",
":",
"outputstring",
"=",
"100",
"else",
":",
"outputstring",
"=",
"-",
"100",
"self",
".",
"fail",
"=",
"True",
"else",
":",
"outputstring",
"=",
"str",
"(",
"inputstring",
")",
"return",
"outputstring"
] |
Format the data to be consistent with heatmaps
:param inputstring: string containing data to be formatted
:param header: class of the data - certain categories have specific formatting requirements
:return: the formatted output string
|
[
"Format",
"the",
"data",
"to",
"be",
"consistent",
"with",
"heatmaps",
":",
"param",
"inputstring",
":",
"string",
"containing",
"data",
"to",
"be",
"formatted",
":",
"param",
"header",
":",
"class",
"of",
"the",
"data",
"-",
"certain",
"categories",
"have",
"specific",
"formatting",
"requirements",
":",
"return",
":",
"the",
"formatted",
"output",
"string"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sipprverse_reporter/reports.py#L430-L463
|
OLC-Bioinformatics/sipprverse
|
sipprverse_reporter/reports.py
|
ReportImage.dataframe_setup
|
def dataframe_setup(self):
"""
Set-up a report to store the desired header: sanitized string combinations
"""
# Initialise a dictionary to store the sanitized headers and strings
genesippr_dict = dict()
# Try to open all the reports - use pandas to extract the results from any report that exists
try:
sippr_matrix = pd.read_csv(os.path.join(self.reportpath, 'genesippr.csv'),
delimiter=',', index_col=0).T.to_dict()
except FileNotFoundError:
sippr_matrix = dict()
try:
conf_matrix = pd.read_csv(os.path.join(self.reportpath, 'confindr_report.csv'),
delimiter=',', index_col=0).T.to_dict()
except FileNotFoundError:
conf_matrix = dict()
try:
gdcs_matrix = pd.read_csv(os.path.join(self.reportpath, 'GDCS.csv'),
delimiter=',', index_col=0).T.to_dict()
except FileNotFoundError:
gdcs_matrix = dict()
# Populate the header:sanitized string dictionary with results from all strains
for sample in self.metadata:
genesippr_dict[sample.name] = dict()
try:
genesippr_dict[sample.name]['eae'] = self.data_sanitise(sippr_matrix[sample.name]['eae'])
except KeyError:
genesippr_dict[sample.name]['eae'] = 0
try:
genesippr_dict[sample.name]['hlyAEc'] = self.data_sanitise(sippr_matrix[sample.name]['hlyAEc'])
except KeyError:
genesippr_dict[sample.name]['hlyAEc'] = 0
try:
genesippr_dict[sample.name]['VT1'] = self.data_sanitise(sippr_matrix[sample.name]['VT1'])
except KeyError:
genesippr_dict[sample.name]['VT1'] = 0
try:
genesippr_dict[sample.name]['VT2'] = self.data_sanitise(sippr_matrix[sample.name]['VT2'])
except KeyError:
genesippr_dict[sample.name]['VT2'] = 0
try:
genesippr_dict[sample.name]['hlyALm'] = self.data_sanitise(sippr_matrix[sample.name]['hlyALm'])
except KeyError:
genesippr_dict[sample.name]['hlyALm'] = 0
try:
genesippr_dict[sample.name]['IGS'] = self.data_sanitise(sippr_matrix[sample.name]['IGS'])
except KeyError:
genesippr_dict[sample.name]['IGS'] = 0
try:
genesippr_dict[sample.name]['inlJ'] = self.data_sanitise(sippr_matrix[sample.name]['inlJ'])
except KeyError:
genesippr_dict[sample.name]['inlJ'] = 0
try:
genesippr_dict[sample.name]['invA'] = self.data_sanitise(sippr_matrix[sample.name]['invA'])
except KeyError:
genesippr_dict[sample.name]['invA'] = 0
try:
genesippr_dict[sample.name]['stn'] = self.data_sanitise(sippr_matrix[sample.name]['stn'])
except KeyError:
genesippr_dict[sample.name]['stn'] = 0
try:
genesippr_dict[sample.name]['GDCS'] = self.data_sanitise(gdcs_matrix[sample.name]['Pass/Fail'],
header='Pass/Fail')
except KeyError:
genesippr_dict[sample.name]['GDCS'] = 0
try:
genesippr_dict[sample.name]['Contamination'] = self.data_sanitise(
conf_matrix[sample.name]['ContamStatus'], header='ContamStatus')
except KeyError:
genesippr_dict[sample.name]['Contamination'] = 0
try:
genesippr_dict[sample.name]['Coverage'] = self.data_sanitise(
gdcs_matrix[sample.name]['MeanCoverage'], header='MeanCoverage')
except KeyError:
genesippr_dict[sample.name]['Coverage'] = 0
# Create a report from the header: sanitized string dictionary to be used in the creation of the report image
with open(self.image_report, 'w') as csv:
data = '{}\n'.format(','.join(self.header_list))
for strain in sorted(genesippr_dict):
data += '{str},'.format(str=strain)
for header in self.header_list[1:]:
data += '{value},'.format(value=genesippr_dict[strain][header])
data = data.rstrip(',')
data += '\n'
csv.write(data)
|
python
|
def dataframe_setup(self):
"""
Set-up a report to store the desired header: sanitized string combinations
"""
# Initialise a dictionary to store the sanitized headers and strings
genesippr_dict = dict()
# Try to open all the reports - use pandas to extract the results from any report that exists
try:
sippr_matrix = pd.read_csv(os.path.join(self.reportpath, 'genesippr.csv'),
delimiter=',', index_col=0).T.to_dict()
except FileNotFoundError:
sippr_matrix = dict()
try:
conf_matrix = pd.read_csv(os.path.join(self.reportpath, 'confindr_report.csv'),
delimiter=',', index_col=0).T.to_dict()
except FileNotFoundError:
conf_matrix = dict()
try:
gdcs_matrix = pd.read_csv(os.path.join(self.reportpath, 'GDCS.csv'),
delimiter=',', index_col=0).T.to_dict()
except FileNotFoundError:
gdcs_matrix = dict()
# Populate the header:sanitized string dictionary with results from all strains
for sample in self.metadata:
genesippr_dict[sample.name] = dict()
try:
genesippr_dict[sample.name]['eae'] = self.data_sanitise(sippr_matrix[sample.name]['eae'])
except KeyError:
genesippr_dict[sample.name]['eae'] = 0
try:
genesippr_dict[sample.name]['hlyAEc'] = self.data_sanitise(sippr_matrix[sample.name]['hlyAEc'])
except KeyError:
genesippr_dict[sample.name]['hlyAEc'] = 0
try:
genesippr_dict[sample.name]['VT1'] = self.data_sanitise(sippr_matrix[sample.name]['VT1'])
except KeyError:
genesippr_dict[sample.name]['VT1'] = 0
try:
genesippr_dict[sample.name]['VT2'] = self.data_sanitise(sippr_matrix[sample.name]['VT2'])
except KeyError:
genesippr_dict[sample.name]['VT2'] = 0
try:
genesippr_dict[sample.name]['hlyALm'] = self.data_sanitise(sippr_matrix[sample.name]['hlyALm'])
except KeyError:
genesippr_dict[sample.name]['hlyALm'] = 0
try:
genesippr_dict[sample.name]['IGS'] = self.data_sanitise(sippr_matrix[sample.name]['IGS'])
except KeyError:
genesippr_dict[sample.name]['IGS'] = 0
try:
genesippr_dict[sample.name]['inlJ'] = self.data_sanitise(sippr_matrix[sample.name]['inlJ'])
except KeyError:
genesippr_dict[sample.name]['inlJ'] = 0
try:
genesippr_dict[sample.name]['invA'] = self.data_sanitise(sippr_matrix[sample.name]['invA'])
except KeyError:
genesippr_dict[sample.name]['invA'] = 0
try:
genesippr_dict[sample.name]['stn'] = self.data_sanitise(sippr_matrix[sample.name]['stn'])
except KeyError:
genesippr_dict[sample.name]['stn'] = 0
try:
genesippr_dict[sample.name]['GDCS'] = self.data_sanitise(gdcs_matrix[sample.name]['Pass/Fail'],
header='Pass/Fail')
except KeyError:
genesippr_dict[sample.name]['GDCS'] = 0
try:
genesippr_dict[sample.name]['Contamination'] = self.data_sanitise(
conf_matrix[sample.name]['ContamStatus'], header='ContamStatus')
except KeyError:
genesippr_dict[sample.name]['Contamination'] = 0
try:
genesippr_dict[sample.name]['Coverage'] = self.data_sanitise(
gdcs_matrix[sample.name]['MeanCoverage'], header='MeanCoverage')
except KeyError:
genesippr_dict[sample.name]['Coverage'] = 0
# Create a report from the header: sanitized string dictionary to be used in the creation of the report image
with open(self.image_report, 'w') as csv:
data = '{}\n'.format(','.join(self.header_list))
for strain in sorted(genesippr_dict):
data += '{str},'.format(str=strain)
for header in self.header_list[1:]:
data += '{value},'.format(value=genesippr_dict[strain][header])
data = data.rstrip(',')
data += '\n'
csv.write(data)
|
[
"def",
"dataframe_setup",
"(",
"self",
")",
":",
"# Initialise a dictionary to store the sanitized headers and strings",
"genesippr_dict",
"=",
"dict",
"(",
")",
"# Try to open all the reports - use pandas to extract the results from any report that exists",
"try",
":",
"sippr_matrix",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"'genesippr.csv'",
")",
",",
"delimiter",
"=",
"','",
",",
"index_col",
"=",
"0",
")",
".",
"T",
".",
"to_dict",
"(",
")",
"except",
"FileNotFoundError",
":",
"sippr_matrix",
"=",
"dict",
"(",
")",
"try",
":",
"conf_matrix",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"'confindr_report.csv'",
")",
",",
"delimiter",
"=",
"','",
",",
"index_col",
"=",
"0",
")",
".",
"T",
".",
"to_dict",
"(",
")",
"except",
"FileNotFoundError",
":",
"conf_matrix",
"=",
"dict",
"(",
")",
"try",
":",
"gdcs_matrix",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"'GDCS.csv'",
")",
",",
"delimiter",
"=",
"','",
",",
"index_col",
"=",
"0",
")",
".",
"T",
".",
"to_dict",
"(",
")",
"except",
"FileNotFoundError",
":",
"gdcs_matrix",
"=",
"dict",
"(",
")",
"# Populate the header:sanitized string dictionary with results from all strains",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"=",
"dict",
"(",
")",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'eae'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'eae'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'eae'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'hlyAEc'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'hlyAEc'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'hlyAEc'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'VT1'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'VT1'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'VT1'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'VT2'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'VT2'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'VT2'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'hlyALm'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'hlyALm'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'hlyALm'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'IGS'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'IGS'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'IGS'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'inlJ'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'inlJ'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'inlJ'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'invA'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'invA'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'invA'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'stn'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"sippr_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'stn'",
"]",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'stn'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'GDCS'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"gdcs_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'Pass/Fail'",
"]",
",",
"header",
"=",
"'Pass/Fail'",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'GDCS'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'Contamination'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"conf_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'ContamStatus'",
"]",
",",
"header",
"=",
"'ContamStatus'",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'Contamination'",
"]",
"=",
"0",
"try",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'Coverage'",
"]",
"=",
"self",
".",
"data_sanitise",
"(",
"gdcs_matrix",
"[",
"sample",
".",
"name",
"]",
"[",
"'MeanCoverage'",
"]",
",",
"header",
"=",
"'MeanCoverage'",
")",
"except",
"KeyError",
":",
"genesippr_dict",
"[",
"sample",
".",
"name",
"]",
"[",
"'Coverage'",
"]",
"=",
"0",
"# Create a report from the header: sanitized string dictionary to be used in the creation of the report image",
"with",
"open",
"(",
"self",
".",
"image_report",
",",
"'w'",
")",
"as",
"csv",
":",
"data",
"=",
"'{}\\n'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"self",
".",
"header_list",
")",
")",
"for",
"strain",
"in",
"sorted",
"(",
"genesippr_dict",
")",
":",
"data",
"+=",
"'{str},'",
".",
"format",
"(",
"str",
"=",
"strain",
")",
"for",
"header",
"in",
"self",
".",
"header_list",
"[",
"1",
":",
"]",
":",
"data",
"+=",
"'{value},'",
".",
"format",
"(",
"value",
"=",
"genesippr_dict",
"[",
"strain",
"]",
"[",
"header",
"]",
")",
"data",
"=",
"data",
".",
"rstrip",
"(",
"','",
")",
"data",
"+=",
"'\\n'",
"csv",
".",
"write",
"(",
"data",
")"
] |
Set-up a report to store the desired header: sanitized string combinations
|
[
"Set",
"-",
"up",
"a",
"report",
"to",
"store",
"the",
"desired",
"header",
":",
"sanitized",
"string",
"combinations"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sipprverse_reporter/reports.py#L465-L551
|
OLC-Bioinformatics/sipprverse
|
sipprverse_reporter/reports.py
|
ReportImage.figure_populate
|
def figure_populate(outputpath, csv, xlabels, ylabels, analysistype, description, fail=False):
"""
Create the report image from the summary report created in self.dataframesetup
:param outputpath: Path in which the outputs are to be created
:param csv: Name of the report file from which data are to be extracted
:param xlabels: List of all the labels to use on the x-axis
:param ylabels: List of all the labels to use on the y-axis
:param analysistype: String of the analysis type
:param description: String describing the analysis: set to either template for the empty heatmap created prior
to analyses or report for normal functionality
:param fail: Boolean of whether any samples have failed the quality checks - used for determining the palette
"""
# Create a data frame from the summary report
df = pd.read_csv(
os.path.join(outputpath, csv),
delimiter=',',
index_col=0)
# Set the palette appropriately - 'template' uses only grey
if description == 'template':
cmap = ['#a0a0a0']
# 'fail' uses red (fail), grey (not detected), and green (detected/pass)
elif fail:
cmap = ['#ff0000', '#a0a0a0', '#00cc00']
# Otherwise only use grey (not detected) and green (detected/pass)
else:
cmap = ['#a0a0a0', '#00cc00']
# Use seaborn to create a heatmap of the data
plot = sns.heatmap(df,
cbar=False,
linewidths=.5,
cmap=cmap)
# Move the x-axis to the top of the plot
plot.xaxis.set_ticks_position('top')
# Remove the y-labels
plot.set_ylabel('')
# Set the x-tick labels as a slice of the x-labels list (first entry is not required, as it makes the
# report image look crowded. Rotate the x-tick labels 90 degrees
plot.set_xticklabels(xlabels[1:], rotation=90)
# Set the y-tick labels from the supplied list
plot.set_yticklabels(ylabels, rotation=0)
# Create the figure
fig = plot.get_figure()
# Save the figure in .png format, using the bbox_inches='tight' option to ensure that everything is scaled
fig.savefig(os.path.join(outputpath, '{at}_{desc}.png'.format(at=analysistype,
desc=description)),
bbox_inches='tight'
)
|
python
|
def figure_populate(outputpath, csv, xlabels, ylabels, analysistype, description, fail=False):
"""
Create the report image from the summary report created in self.dataframesetup
:param outputpath: Path in which the outputs are to be created
:param csv: Name of the report file from which data are to be extracted
:param xlabels: List of all the labels to use on the x-axis
:param ylabels: List of all the labels to use on the y-axis
:param analysistype: String of the analysis type
:param description: String describing the analysis: set to either template for the empty heatmap created prior
to analyses or report for normal functionality
:param fail: Boolean of whether any samples have failed the quality checks - used for determining the palette
"""
# Create a data frame from the summary report
df = pd.read_csv(
os.path.join(outputpath, csv),
delimiter=',',
index_col=0)
# Set the palette appropriately - 'template' uses only grey
if description == 'template':
cmap = ['#a0a0a0']
# 'fail' uses red (fail), grey (not detected), and green (detected/pass)
elif fail:
cmap = ['#ff0000', '#a0a0a0', '#00cc00']
# Otherwise only use grey (not detected) and green (detected/pass)
else:
cmap = ['#a0a0a0', '#00cc00']
# Use seaborn to create a heatmap of the data
plot = sns.heatmap(df,
cbar=False,
linewidths=.5,
cmap=cmap)
# Move the x-axis to the top of the plot
plot.xaxis.set_ticks_position('top')
# Remove the y-labels
plot.set_ylabel('')
# Set the x-tick labels as a slice of the x-labels list (first entry is not required, as it makes the
# report image look crowded. Rotate the x-tick labels 90 degrees
plot.set_xticklabels(xlabels[1:], rotation=90)
# Set the y-tick labels from the supplied list
plot.set_yticklabels(ylabels, rotation=0)
# Create the figure
fig = plot.get_figure()
# Save the figure in .png format, using the bbox_inches='tight' option to ensure that everything is scaled
fig.savefig(os.path.join(outputpath, '{at}_{desc}.png'.format(at=analysistype,
desc=description)),
bbox_inches='tight'
)
|
[
"def",
"figure_populate",
"(",
"outputpath",
",",
"csv",
",",
"xlabels",
",",
"ylabels",
",",
"analysistype",
",",
"description",
",",
"fail",
"=",
"False",
")",
":",
"# Create a data frame from the summary report",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outputpath",
",",
"csv",
")",
",",
"delimiter",
"=",
"','",
",",
"index_col",
"=",
"0",
")",
"# Set the palette appropriately - 'template' uses only grey",
"if",
"description",
"==",
"'template'",
":",
"cmap",
"=",
"[",
"'#a0a0a0'",
"]",
"# 'fail' uses red (fail), grey (not detected), and green (detected/pass)",
"elif",
"fail",
":",
"cmap",
"=",
"[",
"'#ff0000'",
",",
"'#a0a0a0'",
",",
"'#00cc00'",
"]",
"# Otherwise only use grey (not detected) and green (detected/pass)",
"else",
":",
"cmap",
"=",
"[",
"'#a0a0a0'",
",",
"'#00cc00'",
"]",
"# Use seaborn to create a heatmap of the data",
"plot",
"=",
"sns",
".",
"heatmap",
"(",
"df",
",",
"cbar",
"=",
"False",
",",
"linewidths",
"=",
".5",
",",
"cmap",
"=",
"cmap",
")",
"# Move the x-axis to the top of the plot",
"plot",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"'top'",
")",
"# Remove the y-labels",
"plot",
".",
"set_ylabel",
"(",
"''",
")",
"# Set the x-tick labels as a slice of the x-labels list (first entry is not required, as it makes the",
"# report image look crowded. Rotate the x-tick labels 90 degrees",
"plot",
".",
"set_xticklabels",
"(",
"xlabels",
"[",
"1",
":",
"]",
",",
"rotation",
"=",
"90",
")",
"# Set the y-tick labels from the supplied list",
"plot",
".",
"set_yticklabels",
"(",
"ylabels",
",",
"rotation",
"=",
"0",
")",
"# Create the figure",
"fig",
"=",
"plot",
".",
"get_figure",
"(",
")",
"# Save the figure in .png format, using the bbox_inches='tight' option to ensure that everything is scaled",
"fig",
".",
"savefig",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outputpath",
",",
"'{at}_{desc}.png'",
".",
"format",
"(",
"at",
"=",
"analysistype",
",",
"desc",
"=",
"description",
")",
")",
",",
"bbox_inches",
"=",
"'tight'",
")"
] |
Create the report image from the summary report created in self.dataframesetup
:param outputpath: Path in which the outputs are to be created
:param csv: Name of the report file from which data are to be extracted
:param xlabels: List of all the labels to use on the x-axis
:param ylabels: List of all the labels to use on the y-axis
:param analysistype: String of the analysis type
:param description: String describing the analysis: set to either template for the empty heatmap created prior
to analyses or report for normal functionality
:param fail: Boolean of whether any samples have failed the quality checks - used for determining the palette
|
[
"Create",
"the",
"report",
"image",
"from",
"the",
"summary",
"report",
"created",
"in",
"self",
".",
"dataframesetup",
":",
"param",
"outputpath",
":",
"Path",
"in",
"which",
"the",
"outputs",
"are",
"to",
"be",
"created",
":",
"param",
"csv",
":",
"Name",
"of",
"the",
"report",
"file",
"from",
"which",
"data",
"are",
"to",
"be",
"extracted",
":",
"param",
"xlabels",
":",
"List",
"of",
"all",
"the",
"labels",
"to",
"use",
"on",
"the",
"x",
"-",
"axis",
":",
"param",
"ylabels",
":",
"List",
"of",
"all",
"the",
"labels",
"to",
"use",
"on",
"the",
"y",
"-",
"axis",
":",
"param",
"analysistype",
":",
"String",
"of",
"the",
"analysis",
"type",
":",
"param",
"description",
":",
"String",
"describing",
"the",
"analysis",
":",
"set",
"to",
"either",
"template",
"for",
"the",
"empty",
"heatmap",
"created",
"prior",
"to",
"analyses",
"or",
"report",
"for",
"normal",
"functionality",
":",
"param",
"fail",
":",
"Boolean",
"of",
"whether",
"any",
"samples",
"have",
"failed",
"the",
"quality",
"checks",
"-",
"used",
"for",
"determining",
"the",
"palette"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sipprverse_reporter/reports.py#L554-L600
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/os_helpers.py
|
add_usr_local_bin_to_path
|
def add_usr_local_bin_to_path(log=False):
""" adds /usr/local/bin to $PATH """
if log:
bookshelf2.logging_helpers.log_green('inserts /usr/local/bin into PATH')
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
capture=True):
try:
sudo('echo "export PATH=/usr/local/bin:$PATH" '
'|sudo /usr/bin/tee /etc/profile.d/fix-path.sh')
return True
except:
raise SystemExit(1)
|
python
|
def add_usr_local_bin_to_path(log=False):
""" adds /usr/local/bin to $PATH """
if log:
bookshelf2.logging_helpers.log_green('inserts /usr/local/bin into PATH')
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
capture=True):
try:
sudo('echo "export PATH=/usr/local/bin:$PATH" '
'|sudo /usr/bin/tee /etc/profile.d/fix-path.sh')
return True
except:
raise SystemExit(1)
|
[
"def",
"add_usr_local_bin_to_path",
"(",
"log",
"=",
"False",
")",
":",
"if",
"log",
":",
"bookshelf2",
".",
"logging_helpers",
".",
"log_green",
"(",
"'inserts /usr/local/bin into PATH'",
")",
"with",
"settings",
"(",
"hide",
"(",
"'warnings'",
",",
"'running'",
",",
"'stdout'",
",",
"'stderr'",
")",
",",
"capture",
"=",
"True",
")",
":",
"try",
":",
"sudo",
"(",
"'echo \"export PATH=/usr/local/bin:$PATH\" '",
"'|sudo /usr/bin/tee /etc/profile.d/fix-path.sh'",
")",
"return",
"True",
"except",
":",
"raise",
"SystemExit",
"(",
"1",
")"
] |
adds /usr/local/bin to $PATH
|
[
"adds",
"/",
"usr",
"/",
"local",
"/",
"bin",
"to",
"$PATH"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L12-L24
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/os_helpers.py
|
dir_attribs
|
def dir_attribs(location, mode=None, owner=None,
group=None, recursive=False, use_sudo=False):
""" cuisine dir_attribs doesn't do sudo, so we implement our own
Updates the mode/owner/group for the given remote directory."""
args = ''
if recursive:
args = args + ' -R '
if mode:
if use_sudo:
sudo('chmod %s %s %s' % (args, mode, location))
else:
run('chmod %s %s %s' % (args, mode, location))
if owner:
if use_sudo:
sudo('chown %s %s %s' % (args, owner, location))
else:
run('chown %s %s %s' % (args, owner, location))
if group:
if use_sudo:
sudo('chgrp %s %s %s' % (args, group, location))
else:
run('chgrp %s %s %s' % (args, group, location))
return True
|
python
|
def dir_attribs(location, mode=None, owner=None,
group=None, recursive=False, use_sudo=False):
""" cuisine dir_attribs doesn't do sudo, so we implement our own
Updates the mode/owner/group for the given remote directory."""
args = ''
if recursive:
args = args + ' -R '
if mode:
if use_sudo:
sudo('chmod %s %s %s' % (args, mode, location))
else:
run('chmod %s %s %s' % (args, mode, location))
if owner:
if use_sudo:
sudo('chown %s %s %s' % (args, owner, location))
else:
run('chown %s %s %s' % (args, owner, location))
if group:
if use_sudo:
sudo('chgrp %s %s %s' % (args, group, location))
else:
run('chgrp %s %s %s' % (args, group, location))
return True
|
[
"def",
"dir_attribs",
"(",
"location",
",",
"mode",
"=",
"None",
",",
"owner",
"=",
"None",
",",
"group",
"=",
"None",
",",
"recursive",
"=",
"False",
",",
"use_sudo",
"=",
"False",
")",
":",
"args",
"=",
"''",
"if",
"recursive",
":",
"args",
"=",
"args",
"+",
"' -R '",
"if",
"mode",
":",
"if",
"use_sudo",
":",
"sudo",
"(",
"'chmod %s %s %s'",
"%",
"(",
"args",
",",
"mode",
",",
"location",
")",
")",
"else",
":",
"run",
"(",
"'chmod %s %s %s'",
"%",
"(",
"args",
",",
"mode",
",",
"location",
")",
")",
"if",
"owner",
":",
"if",
"use_sudo",
":",
"sudo",
"(",
"'chown %s %s %s'",
"%",
"(",
"args",
",",
"owner",
",",
"location",
")",
")",
"else",
":",
"run",
"(",
"'chown %s %s %s'",
"%",
"(",
"args",
",",
"owner",
",",
"location",
")",
")",
"if",
"group",
":",
"if",
"use_sudo",
":",
"sudo",
"(",
"'chgrp %s %s %s'",
"%",
"(",
"args",
",",
"group",
",",
"location",
")",
")",
"else",
":",
"run",
"(",
"'chgrp %s %s %s'",
"%",
"(",
"args",
",",
"group",
",",
"location",
")",
")",
"return",
"True"
] |
cuisine dir_attribs doesn't do sudo, so we implement our own
Updates the mode/owner/group for the given remote directory.
|
[
"cuisine",
"dir_attribs",
"doesn",
"t",
"do",
"sudo",
"so",
"we",
"implement",
"our",
"own",
"Updates",
"the",
"mode",
"/",
"owner",
"/",
"group",
"for",
"the",
"given",
"remote",
"directory",
"."
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L35-L58
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/os_helpers.py
|
dir_ensure
|
def dir_ensure(location, recursive=False, mode=None,
owner=None, group=None, use_sudo=False):
""" cuisine dir_ensure doesn't do sudo, so we implement our own
Ensures that there is a remote directory at the given location,
optionally updating its mode/owner/group.
If we are not updating the owner/group then this can be done as a single
ssh call, so use that method, otherwise set owner/group after creation."""
args = ''
if recursive:
args = args + ' -p '
if not dir_exists(location):
if use_sudo:
sudo('mkdir %s %s' % (args, location))
else:
run('mkdir %s %s' % (args, location))
if owner or group or mode:
if use_sudo:
dir_attribs(location,
owner=owner,
group=group,
mode=mode,
recursive=recursive,
use_sudo=True)
else:
dir_attribs(location,
owner=owner,
group=group,
mode=mode,
recursive=recursive)
return True
|
python
|
def dir_ensure(location, recursive=False, mode=None,
owner=None, group=None, use_sudo=False):
""" cuisine dir_ensure doesn't do sudo, so we implement our own
Ensures that there is a remote directory at the given location,
optionally updating its mode/owner/group.
If we are not updating the owner/group then this can be done as a single
ssh call, so use that method, otherwise set owner/group after creation."""
args = ''
if recursive:
args = args + ' -p '
if not dir_exists(location):
if use_sudo:
sudo('mkdir %s %s' % (args, location))
else:
run('mkdir %s %s' % (args, location))
if owner or group or mode:
if use_sudo:
dir_attribs(location,
owner=owner,
group=group,
mode=mode,
recursive=recursive,
use_sudo=True)
else:
dir_attribs(location,
owner=owner,
group=group,
mode=mode,
recursive=recursive)
return True
|
[
"def",
"dir_ensure",
"(",
"location",
",",
"recursive",
"=",
"False",
",",
"mode",
"=",
"None",
",",
"owner",
"=",
"None",
",",
"group",
"=",
"None",
",",
"use_sudo",
"=",
"False",
")",
":",
"args",
"=",
"''",
"if",
"recursive",
":",
"args",
"=",
"args",
"+",
"' -p '",
"if",
"not",
"dir_exists",
"(",
"location",
")",
":",
"if",
"use_sudo",
":",
"sudo",
"(",
"'mkdir %s %s'",
"%",
"(",
"args",
",",
"location",
")",
")",
"else",
":",
"run",
"(",
"'mkdir %s %s'",
"%",
"(",
"args",
",",
"location",
")",
")",
"if",
"owner",
"or",
"group",
"or",
"mode",
":",
"if",
"use_sudo",
":",
"dir_attribs",
"(",
"location",
",",
"owner",
"=",
"owner",
",",
"group",
"=",
"group",
",",
"mode",
"=",
"mode",
",",
"recursive",
"=",
"recursive",
",",
"use_sudo",
"=",
"True",
")",
"else",
":",
"dir_attribs",
"(",
"location",
",",
"owner",
"=",
"owner",
",",
"group",
"=",
"group",
",",
"mode",
"=",
"mode",
",",
"recursive",
"=",
"recursive",
")",
"return",
"True"
] |
cuisine dir_ensure doesn't do sudo, so we implement our own
Ensures that there is a remote directory at the given location,
optionally updating its mode/owner/group.
If we are not updating the owner/group then this can be done as a single
ssh call, so use that method, otherwise set owner/group after creation.
|
[
"cuisine",
"dir_ensure",
"doesn",
"t",
"do",
"sudo",
"so",
"we",
"implement",
"our",
"own",
"Ensures",
"that",
"there",
"is",
"a",
"remote",
"directory",
"at",
"the",
"given",
"location",
"optionally",
"updating",
"its",
"mode",
"/",
"owner",
"/",
"group",
".",
"If",
"we",
"are",
"not",
"updating",
"the",
"owner",
"/",
"group",
"then",
"this",
"can",
"be",
"done",
"as",
"a",
"single",
"ssh",
"call",
"so",
"use",
"that",
"method",
"otherwise",
"set",
"owner",
"/",
"group",
"after",
"creation",
"."
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L61-L93
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/os_helpers.py
|
dir_exists
|
def dir_exists(location, use_sudo=False):
"""Tells if there is a remote directory at the given location."""
with settings(hide('running', 'stdout', 'stderr'), warn_only=True):
if use_sudo:
# convert return code 0 to True
return not bool(sudo('test -d %s' % (location)).return_code)
else:
return not bool(run('test -d %s' % (location)).return_code)
|
python
|
def dir_exists(location, use_sudo=False):
"""Tells if there is a remote directory at the given location."""
with settings(hide('running', 'stdout', 'stderr'), warn_only=True):
if use_sudo:
# convert return code 0 to True
return not bool(sudo('test -d %s' % (location)).return_code)
else:
return not bool(run('test -d %s' % (location)).return_code)
|
[
"def",
"dir_exists",
"(",
"location",
",",
"use_sudo",
"=",
"False",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'stdout'",
",",
"'stderr'",
")",
",",
"warn_only",
"=",
"True",
")",
":",
"if",
"use_sudo",
":",
"# convert return code 0 to True",
"return",
"not",
"bool",
"(",
"sudo",
"(",
"'test -d %s'",
"%",
"(",
"location",
")",
")",
".",
"return_code",
")",
"else",
":",
"return",
"not",
"bool",
"(",
"run",
"(",
"'test -d %s'",
"%",
"(",
"location",
")",
")",
".",
"return_code",
")"
] |
Tells if there is a remote directory at the given location.
|
[
"Tells",
"if",
"there",
"is",
"a",
"remote",
"directory",
"at",
"the",
"given",
"location",
"."
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L96-L103
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/os_helpers.py
|
disable_env_reset_on_sudo
|
def disable_env_reset_on_sudo(log=False):
""" updates /etc/sudoers so that users from %wheel keep their
environment when executing a sudo call
"""
if log:
bookshelf2.logging_helpers.log_green('disabling env reset on sudo')
file_append('/etc/sudoers',
'Defaults:%wheel !env_reset,!secure_path',
use_sudo=True,
partial=True)
return True
|
python
|
def disable_env_reset_on_sudo(log=False):
""" updates /etc/sudoers so that users from %wheel keep their
environment when executing a sudo call
"""
if log:
bookshelf2.logging_helpers.log_green('disabling env reset on sudo')
file_append('/etc/sudoers',
'Defaults:%wheel !env_reset,!secure_path',
use_sudo=True,
partial=True)
return True
|
[
"def",
"disable_env_reset_on_sudo",
"(",
"log",
"=",
"False",
")",
":",
"if",
"log",
":",
"bookshelf2",
".",
"logging_helpers",
".",
"log_green",
"(",
"'disabling env reset on sudo'",
")",
"file_append",
"(",
"'/etc/sudoers'",
",",
"'Defaults:%wheel !env_reset,!secure_path'",
",",
"use_sudo",
"=",
"True",
",",
"partial",
"=",
"True",
")",
"return",
"True"
] |
updates /etc/sudoers so that users from %wheel keep their
environment when executing a sudo call
|
[
"updates",
"/",
"etc",
"/",
"sudoers",
"so",
"that",
"users",
"from",
"%wheel",
"keep",
"their",
"environment",
"when",
"executing",
"a",
"sudo",
"call"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L106-L117
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/os_helpers.py
|
disable_requiretty_on_sudoers
|
def disable_requiretty_on_sudoers(log=False):
""" allow sudo calls through ssh without a tty """
if log:
bookshelf2.logging_helpers.log_green(
'disabling requiretty on sudo calls')
comment_line('/etc/sudoers',
'^Defaults.*requiretty', use_sudo=True)
return True
|
python
|
def disable_requiretty_on_sudoers(log=False):
""" allow sudo calls through ssh without a tty """
if log:
bookshelf2.logging_helpers.log_green(
'disabling requiretty on sudo calls')
comment_line('/etc/sudoers',
'^Defaults.*requiretty', use_sudo=True)
return True
|
[
"def",
"disable_requiretty_on_sudoers",
"(",
"log",
"=",
"False",
")",
":",
"if",
"log",
":",
"bookshelf2",
".",
"logging_helpers",
".",
"log_green",
"(",
"'disabling requiretty on sudo calls'",
")",
"comment_line",
"(",
"'/etc/sudoers'",
",",
"'^Defaults.*requiretty'",
",",
"use_sudo",
"=",
"True",
")",
"return",
"True"
] |
allow sudo calls through ssh without a tty
|
[
"allow",
"sudo",
"calls",
"through",
"ssh",
"without",
"a",
"tty"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L120-L128
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/os_helpers.py
|
file_attribs
|
def file_attribs(location,
mode=None,
owner=None,
group=None,
use_sudo=False,
recursive=True):
"""Updates the mode/owner/group for the remote file at the given
location."""
return dir_attribs(location=location,
mode=mode,
owner=owner,
group=group,
recursive=recursive,
use_sudo=False)
|
python
|
def file_attribs(location,
mode=None,
owner=None,
group=None,
use_sudo=False,
recursive=True):
"""Updates the mode/owner/group for the remote file at the given
location."""
return dir_attribs(location=location,
mode=mode,
owner=owner,
group=group,
recursive=recursive,
use_sudo=False)
|
[
"def",
"file_attribs",
"(",
"location",
",",
"mode",
"=",
"None",
",",
"owner",
"=",
"None",
",",
"group",
"=",
"None",
",",
"use_sudo",
"=",
"False",
",",
"recursive",
"=",
"True",
")",
":",
"return",
"dir_attribs",
"(",
"location",
"=",
"location",
",",
"mode",
"=",
"mode",
",",
"owner",
"=",
"owner",
",",
"group",
"=",
"group",
",",
"recursive",
"=",
"recursive",
",",
"use_sudo",
"=",
"False",
")"
] |
Updates the mode/owner/group for the remote file at the given
location.
|
[
"Updates",
"the",
"mode",
"/",
"owner",
"/",
"group",
"for",
"the",
"remote",
"file",
"at",
"the",
"given",
"location",
"."
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L182-L195
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/os_helpers.py
|
os_release
|
def os_release():
""" returns /etc/os-release in a dictionary """
with settings(hide('warnings', 'running', 'stderr'),
warn_only=True, capture=True):
release = {}
data = run('cat /etc/os-release')
for line in data.split('\n'):
if not line:
continue
parts = line.split('=')
if len(parts) == 2:
release[parts[0]] = parts[1].strip('\n\r"')
return release
|
python
|
def os_release():
""" returns /etc/os-release in a dictionary """
with settings(hide('warnings', 'running', 'stderr'),
warn_only=True, capture=True):
release = {}
data = run('cat /etc/os-release')
for line in data.split('\n'):
if not line:
continue
parts = line.split('=')
if len(parts) == 2:
release[parts[0]] = parts[1].strip('\n\r"')
return release
|
[
"def",
"os_release",
"(",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'warnings'",
",",
"'running'",
",",
"'stderr'",
")",
",",
"warn_only",
"=",
"True",
",",
"capture",
"=",
"True",
")",
":",
"release",
"=",
"{",
"}",
"data",
"=",
"run",
"(",
"'cat /etc/os-release'",
")",
"for",
"line",
"in",
"data",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"not",
"line",
":",
"continue",
"parts",
"=",
"line",
".",
"split",
"(",
"'='",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"2",
":",
"release",
"[",
"parts",
"[",
"0",
"]",
"]",
"=",
"parts",
"[",
"1",
"]",
".",
"strip",
"(",
"'\\n\\r\"'",
")",
"return",
"release"
] |
returns /etc/os-release in a dictionary
|
[
"returns",
"/",
"etc",
"/",
"os",
"-",
"release",
"in",
"a",
"dictionary"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L198-L212
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/os_helpers.py
|
linux_distribution
|
def linux_distribution():
""" returns the linux distribution in lower case """
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True, capture=True):
data = os_release()
return(data['ID'])
|
python
|
def linux_distribution():
""" returns the linux distribution in lower case """
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True, capture=True):
data = os_release()
return(data['ID'])
|
[
"def",
"linux_distribution",
"(",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'warnings'",
",",
"'running'",
",",
"'stdout'",
",",
"'stderr'",
")",
",",
"warn_only",
"=",
"True",
",",
"capture",
"=",
"True",
")",
":",
"data",
"=",
"os_release",
"(",
")",
"return",
"(",
"data",
"[",
"'ID'",
"]",
")"
] |
returns the linux distribution in lower case
|
[
"returns",
"the",
"linux",
"distribution",
"in",
"lower",
"case"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L215-L220
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/os_helpers.py
|
lsb_release
|
def lsb_release():
""" returns /etc/lsb-release in a dictionary """
with settings(hide('warnings', 'running'), capture=True):
_lsb_release = {}
data = sudo('cat /etc/lsb-release')
for line in data.split('\n'):
if not line:
continue
parts = line.split('=')
if len(parts) == 2:
_lsb_release[parts[0]] = parts[1].strip('\n\r"')
return _lsb_release
|
python
|
def lsb_release():
""" returns /etc/lsb-release in a dictionary """
with settings(hide('warnings', 'running'), capture=True):
_lsb_release = {}
data = sudo('cat /etc/lsb-release')
for line in data.split('\n'):
if not line:
continue
parts = line.split('=')
if len(parts) == 2:
_lsb_release[parts[0]] = parts[1].strip('\n\r"')
return _lsb_release
|
[
"def",
"lsb_release",
"(",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'warnings'",
",",
"'running'",
")",
",",
"capture",
"=",
"True",
")",
":",
"_lsb_release",
"=",
"{",
"}",
"data",
"=",
"sudo",
"(",
"'cat /etc/lsb-release'",
")",
"for",
"line",
"in",
"data",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"not",
"line",
":",
"continue",
"parts",
"=",
"line",
".",
"split",
"(",
"'='",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"2",
":",
"_lsb_release",
"[",
"parts",
"[",
"0",
"]",
"]",
"=",
"parts",
"[",
"1",
"]",
".",
"strip",
"(",
"'\\n\\r\"'",
")",
"return",
"_lsb_release"
] |
returns /etc/lsb-release in a dictionary
|
[
"returns",
"/",
"etc",
"/",
"lsb",
"-",
"release",
"in",
"a",
"dictionary"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L223-L236
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/os_helpers.py
|
restart_service
|
def restart_service(service, log=False):
""" restarts a service """
with settings():
if log:
bookshelf2.logging_helpers.log_yellow(
'stoping service %s' % service)
sudo('service %s stop' % service)
if log:
bookshelf2.logging_helpers.log_yellow(
'starting service %s' % service)
sudo('service %s start' % service)
return True
|
python
|
def restart_service(service, log=False):
""" restarts a service """
with settings():
if log:
bookshelf2.logging_helpers.log_yellow(
'stoping service %s' % service)
sudo('service %s stop' % service)
if log:
bookshelf2.logging_helpers.log_yellow(
'starting service %s' % service)
sudo('service %s start' % service)
return True
|
[
"def",
"restart_service",
"(",
"service",
",",
"log",
"=",
"False",
")",
":",
"with",
"settings",
"(",
")",
":",
"if",
"log",
":",
"bookshelf2",
".",
"logging_helpers",
".",
"log_yellow",
"(",
"'stoping service %s'",
"%",
"service",
")",
"sudo",
"(",
"'service %s stop'",
"%",
"service",
")",
"if",
"log",
":",
"bookshelf2",
".",
"logging_helpers",
".",
"log_yellow",
"(",
"'starting service %s'",
"%",
"service",
")",
"sudo",
"(",
"'service %s start'",
"%",
"service",
")",
"return",
"True"
] |
restarts a service
|
[
"restarts",
"a",
"service"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L246-L257
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/os_helpers.py
|
systemd
|
def systemd(service, start=True, enabled=True, unmask=False, restart=False):
""" manipulates systemd services """
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True, capture=True):
if restart:
sudo('systemctl restart %s' % service)
else:
if start:
sudo('systemctl start %s' % service)
else:
sudo('systemctl stop %s' % service)
if enabled:
sudo('systemctl enable %s' % service)
else:
sudo('systemctl disable %s' % service)
if unmask:
sudo('systemctl unmask %s' % service)
|
python
|
def systemd(service, start=True, enabled=True, unmask=False, restart=False):
""" manipulates systemd services """
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True, capture=True):
if restart:
sudo('systemctl restart %s' % service)
else:
if start:
sudo('systemctl start %s' % service)
else:
sudo('systemctl stop %s' % service)
if enabled:
sudo('systemctl enable %s' % service)
else:
sudo('systemctl disable %s' % service)
if unmask:
sudo('systemctl unmask %s' % service)
|
[
"def",
"systemd",
"(",
"service",
",",
"start",
"=",
"True",
",",
"enabled",
"=",
"True",
",",
"unmask",
"=",
"False",
",",
"restart",
"=",
"False",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'warnings'",
",",
"'running'",
",",
"'stdout'",
",",
"'stderr'",
")",
",",
"warn_only",
"=",
"True",
",",
"capture",
"=",
"True",
")",
":",
"if",
"restart",
":",
"sudo",
"(",
"'systemctl restart %s'",
"%",
"service",
")",
"else",
":",
"if",
"start",
":",
"sudo",
"(",
"'systemctl start %s'",
"%",
"service",
")",
"else",
":",
"sudo",
"(",
"'systemctl stop %s'",
"%",
"service",
")",
"if",
"enabled",
":",
"sudo",
"(",
"'systemctl enable %s'",
"%",
"service",
")",
"else",
":",
"sudo",
"(",
"'systemctl disable %s'",
"%",
"service",
")",
"if",
"unmask",
":",
"sudo",
"(",
"'systemctl unmask %s'",
"%",
"service",
")"
] |
manipulates systemd services
|
[
"manipulates",
"systemd",
"services"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L260-L280
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/os_helpers.py
|
install_os_updates
|
def install_os_updates(distribution, force=False):
""" installs OS updates """
if ('centos' in distribution or
'rhel' in distribution or
'redhat' in distribution):
bookshelf2.logging_helpers.log_green('installing OS updates')
sudo("yum -y --quiet clean all")
sudo("yum group mark convert")
sudo("yum -y --quiet update")
if ('ubuntu' in distribution or
'debian' in distribution):
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=False, capture=True):
sudo("DEBIAN_FRONTEND=noninteractive apt-get update")
if force:
sudo("sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o "
"Dpkg::Options::='--force-confdef' "
"-o Dpkg::Options::='--force-confold' upgrade --force-yes")
else:
sudo("sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o "
"Dpkg::Options::='--force-confdef' -o "
"Dpkg::Options::='--force-confold' upgrade")
|
python
|
def install_os_updates(distribution, force=False):
""" installs OS updates """
if ('centos' in distribution or
'rhel' in distribution or
'redhat' in distribution):
bookshelf2.logging_helpers.log_green('installing OS updates')
sudo("yum -y --quiet clean all")
sudo("yum group mark convert")
sudo("yum -y --quiet update")
if ('ubuntu' in distribution or
'debian' in distribution):
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=False, capture=True):
sudo("DEBIAN_FRONTEND=noninteractive apt-get update")
if force:
sudo("sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o "
"Dpkg::Options::='--force-confdef' "
"-o Dpkg::Options::='--force-confold' upgrade --force-yes")
else:
sudo("sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o "
"Dpkg::Options::='--force-confdef' -o "
"Dpkg::Options::='--force-confold' upgrade")
|
[
"def",
"install_os_updates",
"(",
"distribution",
",",
"force",
"=",
"False",
")",
":",
"if",
"(",
"'centos'",
"in",
"distribution",
"or",
"'rhel'",
"in",
"distribution",
"or",
"'redhat'",
"in",
"distribution",
")",
":",
"bookshelf2",
".",
"logging_helpers",
".",
"log_green",
"(",
"'installing OS updates'",
")",
"sudo",
"(",
"\"yum -y --quiet clean all\"",
")",
"sudo",
"(",
"\"yum group mark convert\"",
")",
"sudo",
"(",
"\"yum -y --quiet update\"",
")",
"if",
"(",
"'ubuntu'",
"in",
"distribution",
"or",
"'debian'",
"in",
"distribution",
")",
":",
"with",
"settings",
"(",
"hide",
"(",
"'warnings'",
",",
"'running'",
",",
"'stdout'",
",",
"'stderr'",
")",
",",
"warn_only",
"=",
"False",
",",
"capture",
"=",
"True",
")",
":",
"sudo",
"(",
"\"DEBIAN_FRONTEND=noninteractive apt-get update\"",
")",
"if",
"force",
":",
"sudo",
"(",
"\"sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o \"",
"\"Dpkg::Options::='--force-confdef' \"",
"\"-o Dpkg::Options::='--force-confold' upgrade --force-yes\"",
")",
"else",
":",
"sudo",
"(",
"\"sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o \"",
"\"Dpkg::Options::='--force-confdef' -o \"",
"\"Dpkg::Options::='--force-confold' upgrade\"",
")"
] |
installs OS updates
|
[
"installs",
"OS",
"updates"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L319-L341
|
moralrecordings/mrcrowbar
|
mrcrowbar/statistics.py
|
Stats.ansi_format
|
def ansi_format( self, width=64, height=12 ):
"""Return a human readable ANSI-terminal printout of the stats.
width
Custom width for the graph (in characters).
height
Custom height for the graph (in characters).
"""
from mrcrowbar.ansi import format_bar_graph_iter
if (256 % width) != 0:
raise ValueError( 'Width of the histogram must be a divisor of 256' )
elif (width <= 0):
raise ValueError( 'Width of the histogram must be greater than zero' )
elif (width > 256):
raise ValueError( 'Width of the histogram must be less than or equal to 256' )
buckets = self.histogram( width )
result = []
for line in format_bar_graph_iter( buckets, width=width, height=height ):
result.append( ' {}\n'.format( line ) )
result.append( '╘'+('═'*width)+'╛\n' )
result.append( 'entropy: {:.10f}\n'.format( self.entropy ) )
result.append( 'samples: {}'.format( self.samples ) )
return ''.join( result )
|
python
|
def ansi_format( self, width=64, height=12 ):
"""Return a human readable ANSI-terminal printout of the stats.
width
Custom width for the graph (in characters).
height
Custom height for the graph (in characters).
"""
from mrcrowbar.ansi import format_bar_graph_iter
if (256 % width) != 0:
raise ValueError( 'Width of the histogram must be a divisor of 256' )
elif (width <= 0):
raise ValueError( 'Width of the histogram must be greater than zero' )
elif (width > 256):
raise ValueError( 'Width of the histogram must be less than or equal to 256' )
buckets = self.histogram( width )
result = []
for line in format_bar_graph_iter( buckets, width=width, height=height ):
result.append( ' {}\n'.format( line ) )
result.append( '╘'+('═'*width)+'╛\n' )
result.append( 'entropy: {:.10f}\n'.format( self.entropy ) )
result.append( 'samples: {}'.format( self.samples ) )
return ''.join( result )
|
[
"def",
"ansi_format",
"(",
"self",
",",
"width",
"=",
"64",
",",
"height",
"=",
"12",
")",
":",
"from",
"mrcrowbar",
".",
"ansi",
"import",
"format_bar_graph_iter",
"if",
"(",
"256",
"%",
"width",
")",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"'Width of the histogram must be a divisor of 256'",
")",
"elif",
"(",
"width",
"<=",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'Width of the histogram must be greater than zero'",
")",
"elif",
"(",
"width",
">",
"256",
")",
":",
"raise",
"ValueError",
"(",
"'Width of the histogram must be less than or equal to 256'",
")",
"buckets",
"=",
"self",
".",
"histogram",
"(",
"width",
")",
"result",
"=",
"[",
"]",
"for",
"line",
"in",
"format_bar_graph_iter",
"(",
"buckets",
",",
"width",
"=",
"width",
",",
"height",
"=",
"height",
")",
":",
"result",
".",
"append",
"(",
"' {}\\n'",
".",
"format",
"(",
"line",
")",
")",
"result",
".",
"append",
"(",
"'╘'+(",
"'",
"═",
"'*wid",
"t",
"h)+'╛",
"\\",
"n",
"' )",
"",
"result",
".",
"append",
"(",
"'entropy: {:.10f}\\n'",
".",
"format",
"(",
"self",
".",
"entropy",
")",
")",
"result",
".",
"append",
"(",
"'samples: {}'",
".",
"format",
"(",
"self",
".",
"samples",
")",
")",
"return",
"''",
".",
"join",
"(",
"result",
")"
] |
Return a human readable ANSI-terminal printout of the stats.
width
Custom width for the graph (in characters).
height
Custom height for the graph (in characters).
|
[
"Return",
"a",
"human",
"readable",
"ANSI",
"-",
"terminal",
"printout",
"of",
"the",
"stats",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/statistics.py#L34-L59
|
saltant-org/saltant-py
|
saltant/models/task_whitelist.py
|
TaskWhitelist.put
|
def put(self):
"""Updates this task whitelist on the saltant server.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just updated.
"""
return self.manager.put(
id=self.id,
name=self.name,
description=self.description,
whitelisted_container_task_types=(
self.whitelisted_container_task_types
),
whitelisted_executable_task_types=(
self.whitelisted_executable_task_types
),
)
|
python
|
def put(self):
"""Updates this task whitelist on the saltant server.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just updated.
"""
return self.manager.put(
id=self.id,
name=self.name,
description=self.description,
whitelisted_container_task_types=(
self.whitelisted_container_task_types
),
whitelisted_executable_task_types=(
self.whitelisted_executable_task_types
),
)
|
[
"def",
"put",
"(",
"self",
")",
":",
"return",
"self",
".",
"manager",
".",
"put",
"(",
"id",
"=",
"self",
".",
"id",
",",
"name",
"=",
"self",
".",
"name",
",",
"description",
"=",
"self",
".",
"description",
",",
"whitelisted_container_task_types",
"=",
"(",
"self",
".",
"whitelisted_container_task_types",
")",
",",
"whitelisted_executable_task_types",
"=",
"(",
"self",
".",
"whitelisted_executable_task_types",
")",
",",
")"
] |
Updates this task whitelist on the saltant server.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just updated.
|
[
"Updates",
"this",
"task",
"whitelist",
"on",
"the",
"saltant",
"server",
"."
] |
train
|
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/task_whitelist.py#L96-L114
|
saltant-org/saltant-py
|
saltant/models/task_whitelist.py
|
TaskWhitelistManager.create
|
def create(
self,
name,
description="",
whitelisted_container_task_types=None,
whitelisted_executable_task_types=None,
):
"""Create a task whitelist.
Args:
name (str): The name of the task whitelist.
description (str, optional): A description of the task whitelist.
whitelisted_container_task_types (list, optional): A list of
whitelisted container task type IDs.
whitelisted_executable_task_types (list, optional): A list
of whitelisted executable task type IDs.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just created.
"""
# Translate whitelists None to [] if necessary
if whitelisted_container_task_types is None:
whitelisted_container_task_types = []
if whitelisted_executable_task_types is None:
whitelisted_executable_task_types = []
# Create the object
request_url = self._client.base_api_url + self.list_url
data_to_post = {
"name": name,
"description": description,
"whitelisted_container_task_types": whitelisted_container_task_types,
"whitelisted_executable_task_types": whitelisted_executable_task_types,
}
response = self._client.session.post(request_url, data=data_to_post)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_201_CREATED,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
|
python
|
def create(
self,
name,
description="",
whitelisted_container_task_types=None,
whitelisted_executable_task_types=None,
):
"""Create a task whitelist.
Args:
name (str): The name of the task whitelist.
description (str, optional): A description of the task whitelist.
whitelisted_container_task_types (list, optional): A list of
whitelisted container task type IDs.
whitelisted_executable_task_types (list, optional): A list
of whitelisted executable task type IDs.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just created.
"""
# Translate whitelists None to [] if necessary
if whitelisted_container_task_types is None:
whitelisted_container_task_types = []
if whitelisted_executable_task_types is None:
whitelisted_executable_task_types = []
# Create the object
request_url = self._client.base_api_url + self.list_url
data_to_post = {
"name": name,
"description": description,
"whitelisted_container_task_types": whitelisted_container_task_types,
"whitelisted_executable_task_types": whitelisted_executable_task_types,
}
response = self._client.session.post(request_url, data=data_to_post)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_201_CREATED,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
|
[
"def",
"create",
"(",
"self",
",",
"name",
",",
"description",
"=",
"\"\"",
",",
"whitelisted_container_task_types",
"=",
"None",
",",
"whitelisted_executable_task_types",
"=",
"None",
",",
")",
":",
"# Translate whitelists None to [] if necessary",
"if",
"whitelisted_container_task_types",
"is",
"None",
":",
"whitelisted_container_task_types",
"=",
"[",
"]",
"if",
"whitelisted_executable_task_types",
"is",
"None",
":",
"whitelisted_executable_task_types",
"=",
"[",
"]",
"# Create the object",
"request_url",
"=",
"self",
".",
"_client",
".",
"base_api_url",
"+",
"self",
".",
"list_url",
"data_to_post",
"=",
"{",
"\"name\"",
":",
"name",
",",
"\"description\"",
":",
"description",
",",
"\"whitelisted_container_task_types\"",
":",
"whitelisted_container_task_types",
",",
"\"whitelisted_executable_task_types\"",
":",
"whitelisted_executable_task_types",
",",
"}",
"response",
"=",
"self",
".",
"_client",
".",
"session",
".",
"post",
"(",
"request_url",
",",
"data",
"=",
"data_to_post",
")",
"# Validate that the request was successful",
"self",
".",
"validate_request_success",
"(",
"response_text",
"=",
"response",
".",
"text",
",",
"request_url",
"=",
"request_url",
",",
"status_code",
"=",
"response",
".",
"status_code",
",",
"expected_status_code",
"=",
"HTTP_201_CREATED",
",",
")",
"# Return a model instance representing the task instance",
"return",
"self",
".",
"response_data_to_model_instance",
"(",
"response",
".",
"json",
"(",
")",
")"
] |
Create a task whitelist.
Args:
name (str): The name of the task whitelist.
description (str, optional): A description of the task whitelist.
whitelisted_container_task_types (list, optional): A list of
whitelisted container task type IDs.
whitelisted_executable_task_types (list, optional): A list
of whitelisted executable task type IDs.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just created.
|
[
"Create",
"a",
"task",
"whitelist",
"."
] |
train
|
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/task_whitelist.py#L163-L212
|
saltant-org/saltant-py
|
saltant/models/task_whitelist.py
|
TaskWhitelistManager.patch
|
def patch(
self,
id,
name=None,
description=None,
whitelisted_container_task_types=None,
whitelisted_executable_task_types=None,
):
"""Partially updates a task whitelist on the saltant server.
Args:
id (int): The ID of the task whitelist.
name (str, optional): The name of the task whitelist.
description (str, optional): A description of the task whitelist.
whitelisted_container_task_types (list, optional): A list of
whitelisted container task type IDs.
whitelisted_executable_task_types (list, optional): A list
of whitelisted executable task type IDs.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_patch = {}
if name is not None:
data_to_patch["name"] = name
if description is not None:
data_to_patch["description"] = description
if whitelisted_container_task_types is not None:
data_to_patch[
"whitelisted_container_task_types"
] = whitelisted_container_task_types
if whitelisted_executable_task_types is not None:
data_to_patch[
"whitelisted_executable_task_types"
] = whitelisted_executable_task_types
response = self._client.session.patch(request_url, data=data_to_patch)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
|
python
|
def patch(
self,
id,
name=None,
description=None,
whitelisted_container_task_types=None,
whitelisted_executable_task_types=None,
):
"""Partially updates a task whitelist on the saltant server.
Args:
id (int): The ID of the task whitelist.
name (str, optional): The name of the task whitelist.
description (str, optional): A description of the task whitelist.
whitelisted_container_task_types (list, optional): A list of
whitelisted container task type IDs.
whitelisted_executable_task_types (list, optional): A list
of whitelisted executable task type IDs.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_patch = {}
if name is not None:
data_to_patch["name"] = name
if description is not None:
data_to_patch["description"] = description
if whitelisted_container_task_types is not None:
data_to_patch[
"whitelisted_container_task_types"
] = whitelisted_container_task_types
if whitelisted_executable_task_types is not None:
data_to_patch[
"whitelisted_executable_task_types"
] = whitelisted_executable_task_types
response = self._client.session.patch(request_url, data=data_to_patch)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
|
[
"def",
"patch",
"(",
"self",
",",
"id",
",",
"name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"whitelisted_container_task_types",
"=",
"None",
",",
"whitelisted_executable_task_types",
"=",
"None",
",",
")",
":",
"# Update the object",
"request_url",
"=",
"self",
".",
"_client",
".",
"base_api_url",
"+",
"self",
".",
"detail_url",
".",
"format",
"(",
"id",
"=",
"id",
")",
"data_to_patch",
"=",
"{",
"}",
"if",
"name",
"is",
"not",
"None",
":",
"data_to_patch",
"[",
"\"name\"",
"]",
"=",
"name",
"if",
"description",
"is",
"not",
"None",
":",
"data_to_patch",
"[",
"\"description\"",
"]",
"=",
"description",
"if",
"whitelisted_container_task_types",
"is",
"not",
"None",
":",
"data_to_patch",
"[",
"\"whitelisted_container_task_types\"",
"]",
"=",
"whitelisted_container_task_types",
"if",
"whitelisted_executable_task_types",
"is",
"not",
"None",
":",
"data_to_patch",
"[",
"\"whitelisted_executable_task_types\"",
"]",
"=",
"whitelisted_executable_task_types",
"response",
"=",
"self",
".",
"_client",
".",
"session",
".",
"patch",
"(",
"request_url",
",",
"data",
"=",
"data_to_patch",
")",
"# Validate that the request was successful",
"self",
".",
"validate_request_success",
"(",
"response_text",
"=",
"response",
".",
"text",
",",
"request_url",
"=",
"request_url",
",",
"status_code",
"=",
"response",
".",
"status_code",
",",
"expected_status_code",
"=",
"HTTP_200_OK",
",",
")",
"# Return a model instance representing the task instance",
"return",
"self",
".",
"response_data_to_model_instance",
"(",
"response",
".",
"json",
"(",
")",
")"
] |
Partially updates a task whitelist on the saltant server.
Args:
id (int): The ID of the task whitelist.
name (str, optional): The name of the task whitelist.
description (str, optional): A description of the task whitelist.
whitelisted_container_task_types (list, optional): A list of
whitelisted container task type IDs.
whitelisted_executable_task_types (list, optional): A list
of whitelisted executable task type IDs.
Returns:
:class:`saltant.models.task_whitelist.TaskWhitelist`:
A task whitelist model instance representing the task
whitelist just updated.
|
[
"Partially",
"updates",
"a",
"task",
"whitelist",
"on",
"the",
"saltant",
"server",
"."
] |
train
|
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/task_whitelist.py#L214-L270
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
enable_logging
|
def enable_logging( level='WARNING' ):
"""Enable sending logs to stderr. Useful for shell sessions.
level
Logging threshold, as defined in the logging module of the Python
standard library. Defaults to 'WARNING'.
"""
log = logging.getLogger( 'mrcrowbar' )
log.setLevel( level )
out = logging.StreamHandler()
out.setLevel( level )
form = logging.Formatter( '[%(levelname)s] %(name)s - %(message)s' )
out.setFormatter( form )
log.addHandler( out )
|
python
|
def enable_logging( level='WARNING' ):
"""Enable sending logs to stderr. Useful for shell sessions.
level
Logging threshold, as defined in the logging module of the Python
standard library. Defaults to 'WARNING'.
"""
log = logging.getLogger( 'mrcrowbar' )
log.setLevel( level )
out = logging.StreamHandler()
out.setLevel( level )
form = logging.Formatter( '[%(levelname)s] %(name)s - %(message)s' )
out.setFormatter( form )
log.addHandler( out )
|
[
"def",
"enable_logging",
"(",
"level",
"=",
"'WARNING'",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"'mrcrowbar'",
")",
"log",
".",
"setLevel",
"(",
"level",
")",
"out",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"out",
".",
"setLevel",
"(",
"level",
")",
"form",
"=",
"logging",
".",
"Formatter",
"(",
"'[%(levelname)s] %(name)s - %(message)s'",
")",
"out",
".",
"setFormatter",
"(",
"form",
")",
"log",
".",
"addHandler",
"(",
"out",
")"
] |
Enable sending logs to stderr. Useful for shell sessions.
level
Logging threshold, as defined in the logging module of the Python
standard library. Defaults to 'WARNING'.
|
[
"Enable",
"sending",
"logs",
"to",
"stderr",
".",
"Useful",
"for",
"shell",
"sessions",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L15-L28
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
find_all_iter
|
def find_all_iter( source, substring, start=None, end=None, overlap=False ):
"""Iterate through every location a substring can be found in a source string.
source
The source string to search.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
overlap
Whether to return overlapping matches (default: false)
"""
data = source
base = 0
if end is not None:
data = data[:end]
if start is not None:
data = data[start:]
base = start
pointer = 0
increment = 1 if overlap else (len( substring ) or 1)
while True:
pointer = data.find( substring, pointer )
if pointer == -1:
return
yield base+pointer
pointer += increment
|
python
|
def find_all_iter( source, substring, start=None, end=None, overlap=False ):
"""Iterate through every location a substring can be found in a source string.
source
The source string to search.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
overlap
Whether to return overlapping matches (default: false)
"""
data = source
base = 0
if end is not None:
data = data[:end]
if start is not None:
data = data[start:]
base = start
pointer = 0
increment = 1 if overlap else (len( substring ) or 1)
while True:
pointer = data.find( substring, pointer )
if pointer == -1:
return
yield base+pointer
pointer += increment
|
[
"def",
"find_all_iter",
"(",
"source",
",",
"substring",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"overlap",
"=",
"False",
")",
":",
"data",
"=",
"source",
"base",
"=",
"0",
"if",
"end",
"is",
"not",
"None",
":",
"data",
"=",
"data",
"[",
":",
"end",
"]",
"if",
"start",
"is",
"not",
"None",
":",
"data",
"=",
"data",
"[",
"start",
":",
"]",
"base",
"=",
"start",
"pointer",
"=",
"0",
"increment",
"=",
"1",
"if",
"overlap",
"else",
"(",
"len",
"(",
"substring",
")",
"or",
"1",
")",
"while",
"True",
":",
"pointer",
"=",
"data",
".",
"find",
"(",
"substring",
",",
"pointer",
")",
"if",
"pointer",
"==",
"-",
"1",
":",
"return",
"yield",
"base",
"+",
"pointer",
"pointer",
"+=",
"increment"
] |
Iterate through every location a substring can be found in a source string.
source
The source string to search.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
overlap
Whether to return overlapping matches (default: false)
|
[
"Iterate",
"through",
"every",
"location",
"a",
"substring",
"can",
"be",
"found",
"in",
"a",
"source",
"string",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L51-L80
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
find_all
|
def find_all( source, substring, start=None, end=None, overlap=False ):
"""Return every location a substring can be found in a source string.
source
The source string to search.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
overlap
Whether to return overlapping matches (default: false)
"""
return [x for x in find_all_iter( source, substring, start, end, overlap )]
|
python
|
def find_all( source, substring, start=None, end=None, overlap=False ):
"""Return every location a substring can be found in a source string.
source
The source string to search.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
overlap
Whether to return overlapping matches (default: false)
"""
return [x for x in find_all_iter( source, substring, start, end, overlap )]
|
[
"def",
"find_all",
"(",
"source",
",",
"substring",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"overlap",
"=",
"False",
")",
":",
"return",
"[",
"x",
"for",
"x",
"in",
"find_all_iter",
"(",
"source",
",",
"substring",
",",
"start",
",",
"end",
",",
"overlap",
")",
"]"
] |
Return every location a substring can be found in a source string.
source
The source string to search.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
overlap
Whether to return overlapping matches (default: false)
|
[
"Return",
"every",
"location",
"a",
"substring",
"can",
"be",
"found",
"in",
"a",
"source",
"string",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L83-L98
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
basic_diff
|
def basic_diff( source1, source2, start=None, end=None ):
"""Perform a basic diff between two equal-sized binary strings and
return a list of (offset, size) tuples denoting the differences.
source1
The first byte string source.
source2
The second byte string source.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
"""
start = start if start is not None else 0
end = end if end is not None else min( len( source1 ), len( source2 ) )
end_point = min( end, len( source1 ), len( source2 ) )
pointer = start
diff_start = None
results = []
while pointer < end_point:
if source1[pointer] != source2[pointer]:
if diff_start is None:
diff_start = pointer
else:
if diff_start is not None:
results.append( (diff_start, pointer-diff_start) )
diff_start = None
pointer += 1
if diff_start is not None:
results.append( (diff_start, pointer-diff_start) )
diff_start = None
return results
|
python
|
def basic_diff( source1, source2, start=None, end=None ):
"""Perform a basic diff between two equal-sized binary strings and
return a list of (offset, size) tuples denoting the differences.
source1
The first byte string source.
source2
The second byte string source.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
"""
start = start if start is not None else 0
end = end if end is not None else min( len( source1 ), len( source2 ) )
end_point = min( end, len( source1 ), len( source2 ) )
pointer = start
diff_start = None
results = []
while pointer < end_point:
if source1[pointer] != source2[pointer]:
if diff_start is None:
diff_start = pointer
else:
if diff_start is not None:
results.append( (diff_start, pointer-diff_start) )
diff_start = None
pointer += 1
if diff_start is not None:
results.append( (diff_start, pointer-diff_start) )
diff_start = None
return results
|
[
"def",
"basic_diff",
"(",
"source1",
",",
"source2",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"start",
"=",
"start",
"if",
"start",
"is",
"not",
"None",
"else",
"0",
"end",
"=",
"end",
"if",
"end",
"is",
"not",
"None",
"else",
"min",
"(",
"len",
"(",
"source1",
")",
",",
"len",
"(",
"source2",
")",
")",
"end_point",
"=",
"min",
"(",
"end",
",",
"len",
"(",
"source1",
")",
",",
"len",
"(",
"source2",
")",
")",
"pointer",
"=",
"start",
"diff_start",
"=",
"None",
"results",
"=",
"[",
"]",
"while",
"pointer",
"<",
"end_point",
":",
"if",
"source1",
"[",
"pointer",
"]",
"!=",
"source2",
"[",
"pointer",
"]",
":",
"if",
"diff_start",
"is",
"None",
":",
"diff_start",
"=",
"pointer",
"else",
":",
"if",
"diff_start",
"is",
"not",
"None",
":",
"results",
".",
"append",
"(",
"(",
"diff_start",
",",
"pointer",
"-",
"diff_start",
")",
")",
"diff_start",
"=",
"None",
"pointer",
"+=",
"1",
"if",
"diff_start",
"is",
"not",
"None",
":",
"results",
".",
"append",
"(",
"(",
"diff_start",
",",
"pointer",
"-",
"diff_start",
")",
")",
"diff_start",
"=",
"None",
"return",
"results"
] |
Perform a basic diff between two equal-sized binary strings and
return a list of (offset, size) tuples denoting the differences.
source1
The first byte string source.
source2
The second byte string source.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
|
[
"Perform",
"a",
"basic",
"diff",
"between",
"two",
"equal",
"-",
"sized",
"binary",
"strings",
"and",
"return",
"a",
"list",
"of",
"(",
"offset",
"size",
")",
"tuples",
"denoting",
"the",
"differences",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L101-L137
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
hexdump_iter
|
def hexdump_iter( source, start=None, end=None, length=None, major_len=8, minor_len=4, colour=True, address_base=None ):
"""Return the contents of a byte string in tabular hexadecimal/ASCII format.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
major_len
Number of hexadecimal groups per line
minor_len
Number of bytes per hexadecimal group
colour
Add ANSI colour formatting to output (default: true)
address_base
Base address to use for labels (default: start)
Raises ValueError if both end and length are defined.
"""
assert is_bytes( source )
start, end = bounds( start, end, length, len( source ) )
start = max( start, 0 )
end = min( end, len( source ) )
if len( source ) == 0 or (start == end == 0):
return
address_base_offset = address_base-start if address_base is not None else 0
for offset in range( start, end, minor_len*major_len ):
yield ansi.format_hexdump_line( source, offset, end, major_len, minor_len, colour, address_base_offset=address_base_offset )
return
|
python
|
def hexdump_iter( source, start=None, end=None, length=None, major_len=8, minor_len=4, colour=True, address_base=None ):
"""Return the contents of a byte string in tabular hexadecimal/ASCII format.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
major_len
Number of hexadecimal groups per line
minor_len
Number of bytes per hexadecimal group
colour
Add ANSI colour formatting to output (default: true)
address_base
Base address to use for labels (default: start)
Raises ValueError if both end and length are defined.
"""
assert is_bytes( source )
start, end = bounds( start, end, length, len( source ) )
start = max( start, 0 )
end = min( end, len( source ) )
if len( source ) == 0 or (start == end == 0):
return
address_base_offset = address_base-start if address_base is not None else 0
for offset in range( start, end, minor_len*major_len ):
yield ansi.format_hexdump_line( source, offset, end, major_len, minor_len, colour, address_base_offset=address_base_offset )
return
|
[
"def",
"hexdump_iter",
"(",
"source",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"length",
"=",
"None",
",",
"major_len",
"=",
"8",
",",
"minor_len",
"=",
"4",
",",
"colour",
"=",
"True",
",",
"address_base",
"=",
"None",
")",
":",
"assert",
"is_bytes",
"(",
"source",
")",
"start",
",",
"end",
"=",
"bounds",
"(",
"start",
",",
"end",
",",
"length",
",",
"len",
"(",
"source",
")",
")",
"start",
"=",
"max",
"(",
"start",
",",
"0",
")",
"end",
"=",
"min",
"(",
"end",
",",
"len",
"(",
"source",
")",
")",
"if",
"len",
"(",
"source",
")",
"==",
"0",
"or",
"(",
"start",
"==",
"end",
"==",
"0",
")",
":",
"return",
"address_base_offset",
"=",
"address_base",
"-",
"start",
"if",
"address_base",
"is",
"not",
"None",
"else",
"0",
"for",
"offset",
"in",
"range",
"(",
"start",
",",
"end",
",",
"minor_len",
"*",
"major_len",
")",
":",
"yield",
"ansi",
".",
"format_hexdump_line",
"(",
"source",
",",
"offset",
",",
"end",
",",
"major_len",
",",
"minor_len",
",",
"colour",
",",
"address_base_offset",
"=",
"address_base_offset",
")",
"return"
] |
Return the contents of a byte string in tabular hexadecimal/ASCII format.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
major_len
Number of hexadecimal groups per line
minor_len
Number of bytes per hexadecimal group
colour
Add ANSI colour formatting to output (default: true)
address_base
Base address to use for labels (default: start)
Raises ValueError if both end and length are defined.
|
[
"Return",
"the",
"contents",
"of",
"a",
"byte",
"string",
"in",
"tabular",
"hexadecimal",
"/",
"ASCII",
"format",
".",
"source",
"The",
"byte",
"string",
"to",
"print",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L160-L200
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
hexdump
|
def hexdump( source, start=None, end=None, length=None, major_len=8, minor_len=4, colour=True, address_base=None ):
"""Print the contents of a byte string in tabular hexadecimal/ASCII format.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
major_len
Number of hexadecimal groups per line
minor_len
Number of bytes per hexadecimal group
colour
Add ANSI colour formatting to output (default: true)
address_base
Base address to use for labels (default: start)
Raises ValueError if both end and length are defined.
"""
for line in hexdump_iter( source, start, end, length, major_len, minor_len, colour, address_base ):
print( line )
|
python
|
def hexdump( source, start=None, end=None, length=None, major_len=8, minor_len=4, colour=True, address_base=None ):
"""Print the contents of a byte string in tabular hexadecimal/ASCII format.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
major_len
Number of hexadecimal groups per line
minor_len
Number of bytes per hexadecimal group
colour
Add ANSI colour formatting to output (default: true)
address_base
Base address to use for labels (default: start)
Raises ValueError if both end and length are defined.
"""
for line in hexdump_iter( source, start, end, length, major_len, minor_len, colour, address_base ):
print( line )
|
[
"def",
"hexdump",
"(",
"source",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"length",
"=",
"None",
",",
"major_len",
"=",
"8",
",",
"minor_len",
"=",
"4",
",",
"colour",
"=",
"True",
",",
"address_base",
"=",
"None",
")",
":",
"for",
"line",
"in",
"hexdump_iter",
"(",
"source",
",",
"start",
",",
"end",
",",
"length",
",",
"major_len",
",",
"minor_len",
",",
"colour",
",",
"address_base",
")",
":",
"print",
"(",
"line",
")"
] |
Print the contents of a byte string in tabular hexadecimal/ASCII format.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
major_len
Number of hexadecimal groups per line
minor_len
Number of bytes per hexadecimal group
colour
Add ANSI colour formatting to output (default: true)
address_base
Base address to use for labels (default: start)
Raises ValueError if both end and length are defined.
|
[
"Print",
"the",
"contents",
"of",
"a",
"byte",
"string",
"in",
"tabular",
"hexadecimal",
"/",
"ASCII",
"format",
".",
"source",
"The",
"byte",
"string",
"to",
"print",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L203-L233
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
hexdump_diff_iter
|
def hexdump_diff_iter( source1, source2, start=None, end=None, length=None, major_len=8, minor_len=4, colour=True, before=2, after=2, address_base=None ):
"""Returns the differences between two byte strings in tabular hexadecimal/ASCII format.
source1
The first byte string source.
source2
The second byte string source.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
major_len
Number of hexadecimal groups per line
minor_len
Number of bytes per hexadecimal group
colour
Add ANSI colour formatting to output (default: true)
before
Number of lines of context preceeding a match to show
after
Number of lines of context following a match to show
address_base
Base address to use for labels (default: start)
Raises ValueError if both end and length are defined.
"""
stride = minor_len*major_len
start = start if start is not None else 0
end = end if end is not None else max( len( source1 ), len( source2 ) )
start = max( start, 0 )
end = min( end, max( len( source1 ), len( source2 ) ) )
address_base_offset = address_base-start if address_base is not None else 0
diff_lines = []
for offset in range( start, end, stride ):
if source1[offset:offset+stride] != source2[offset:offset+stride]:
diff_lines.append( offset )
show_all = before is None or after is None
if show_all:
show_lines = {x: (2 if x in diff_lines else 1) for x in range( start, end, stride )}
else:
show_lines = {x: (2 if x in diff_lines else 0) for x in range( start, end, stride )}
for index in diff_lines:
for b in [index-(x+1)*stride for x in range( before )]:
if b in show_lines and show_lines[b] == 0:
show_lines[b] = 1
for a in [index+(x+1)*stride for x in range( after )]:
if a in show_lines and show_lines[a] == 0:
show_lines[a] = 1
skip = False
for offset in sorted( show_lines.keys() ):
if skip == True and show_lines[offset] != 0:
yield '...'
skip = False
if show_lines[offset] == 2:
check = basic_diff( source1, source2, start=offset, end=offset+stride )
highlights = {}
for (o, l) in check:
for i in range( o, o+l ):
highlights[i] = DIFF_COLOUR_MAP[0]
if offset < len( source1 ):
yield ansi.format_hexdump_line( source1, offset, min( end, len( source1 ) ), major_len, minor_len, colour, prefix='-', highlight_addr=DIFF_COLOUR_MAP[0], highlight_map=highlights, address_base_offset=address_base_offset )
highlights = {k: DIFF_COLOUR_MAP[1] for k in highlights.keys()}
if offset < len( source2 ):
yield ansi.format_hexdump_line( source2, offset, min( end, len( source2 ) ), major_len, minor_len, colour, prefix='+' , highlight_addr=DIFF_COLOUR_MAP[1], highlight_map=highlights, address_base_offset=address_base_offset )
elif show_lines[offset] == 1:
yield ansi.format_hexdump_line( source1, offset, end, major_len, minor_len, colour, prefix=' ', address_base_offset=address_base_offset )
elif show_lines[offset] == 0:
skip = True
if skip == True:
yield '...'
skip = False
return
|
python
|
def hexdump_diff_iter( source1, source2, start=None, end=None, length=None, major_len=8, minor_len=4, colour=True, before=2, after=2, address_base=None ):
"""Returns the differences between two byte strings in tabular hexadecimal/ASCII format.
source1
The first byte string source.
source2
The second byte string source.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
major_len
Number of hexadecimal groups per line
minor_len
Number of bytes per hexadecimal group
colour
Add ANSI colour formatting to output (default: true)
before
Number of lines of context preceeding a match to show
after
Number of lines of context following a match to show
address_base
Base address to use for labels (default: start)
Raises ValueError if both end and length are defined.
"""
stride = minor_len*major_len
start = start if start is not None else 0
end = end if end is not None else max( len( source1 ), len( source2 ) )
start = max( start, 0 )
end = min( end, max( len( source1 ), len( source2 ) ) )
address_base_offset = address_base-start if address_base is not None else 0
diff_lines = []
for offset in range( start, end, stride ):
if source1[offset:offset+stride] != source2[offset:offset+stride]:
diff_lines.append( offset )
show_all = before is None or after is None
if show_all:
show_lines = {x: (2 if x in diff_lines else 1) for x in range( start, end, stride )}
else:
show_lines = {x: (2 if x in diff_lines else 0) for x in range( start, end, stride )}
for index in diff_lines:
for b in [index-(x+1)*stride for x in range( before )]:
if b in show_lines and show_lines[b] == 0:
show_lines[b] = 1
for a in [index+(x+1)*stride for x in range( after )]:
if a in show_lines and show_lines[a] == 0:
show_lines[a] = 1
skip = False
for offset in sorted( show_lines.keys() ):
if skip == True and show_lines[offset] != 0:
yield '...'
skip = False
if show_lines[offset] == 2:
check = basic_diff( source1, source2, start=offset, end=offset+stride )
highlights = {}
for (o, l) in check:
for i in range( o, o+l ):
highlights[i] = DIFF_COLOUR_MAP[0]
if offset < len( source1 ):
yield ansi.format_hexdump_line( source1, offset, min( end, len( source1 ) ), major_len, minor_len, colour, prefix='-', highlight_addr=DIFF_COLOUR_MAP[0], highlight_map=highlights, address_base_offset=address_base_offset )
highlights = {k: DIFF_COLOUR_MAP[1] for k in highlights.keys()}
if offset < len( source2 ):
yield ansi.format_hexdump_line( source2, offset, min( end, len( source2 ) ), major_len, minor_len, colour, prefix='+' , highlight_addr=DIFF_COLOUR_MAP[1], highlight_map=highlights, address_base_offset=address_base_offset )
elif show_lines[offset] == 1:
yield ansi.format_hexdump_line( source1, offset, end, major_len, minor_len, colour, prefix=' ', address_base_offset=address_base_offset )
elif show_lines[offset] == 0:
skip = True
if skip == True:
yield '...'
skip = False
return
|
[
"def",
"hexdump_diff_iter",
"(",
"source1",
",",
"source2",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"length",
"=",
"None",
",",
"major_len",
"=",
"8",
",",
"minor_len",
"=",
"4",
",",
"colour",
"=",
"True",
",",
"before",
"=",
"2",
",",
"after",
"=",
"2",
",",
"address_base",
"=",
"None",
")",
":",
"stride",
"=",
"minor_len",
"*",
"major_len",
"start",
"=",
"start",
"if",
"start",
"is",
"not",
"None",
"else",
"0",
"end",
"=",
"end",
"if",
"end",
"is",
"not",
"None",
"else",
"max",
"(",
"len",
"(",
"source1",
")",
",",
"len",
"(",
"source2",
")",
")",
"start",
"=",
"max",
"(",
"start",
",",
"0",
")",
"end",
"=",
"min",
"(",
"end",
",",
"max",
"(",
"len",
"(",
"source1",
")",
",",
"len",
"(",
"source2",
")",
")",
")",
"address_base_offset",
"=",
"address_base",
"-",
"start",
"if",
"address_base",
"is",
"not",
"None",
"else",
"0",
"diff_lines",
"=",
"[",
"]",
"for",
"offset",
"in",
"range",
"(",
"start",
",",
"end",
",",
"stride",
")",
":",
"if",
"source1",
"[",
"offset",
":",
"offset",
"+",
"stride",
"]",
"!=",
"source2",
"[",
"offset",
":",
"offset",
"+",
"stride",
"]",
":",
"diff_lines",
".",
"append",
"(",
"offset",
")",
"show_all",
"=",
"before",
"is",
"None",
"or",
"after",
"is",
"None",
"if",
"show_all",
":",
"show_lines",
"=",
"{",
"x",
":",
"(",
"2",
"if",
"x",
"in",
"diff_lines",
"else",
"1",
")",
"for",
"x",
"in",
"range",
"(",
"start",
",",
"end",
",",
"stride",
")",
"}",
"else",
":",
"show_lines",
"=",
"{",
"x",
":",
"(",
"2",
"if",
"x",
"in",
"diff_lines",
"else",
"0",
")",
"for",
"x",
"in",
"range",
"(",
"start",
",",
"end",
",",
"stride",
")",
"}",
"for",
"index",
"in",
"diff_lines",
":",
"for",
"b",
"in",
"[",
"index",
"-",
"(",
"x",
"+",
"1",
")",
"*",
"stride",
"for",
"x",
"in",
"range",
"(",
"before",
")",
"]",
":",
"if",
"b",
"in",
"show_lines",
"and",
"show_lines",
"[",
"b",
"]",
"==",
"0",
":",
"show_lines",
"[",
"b",
"]",
"=",
"1",
"for",
"a",
"in",
"[",
"index",
"+",
"(",
"x",
"+",
"1",
")",
"*",
"stride",
"for",
"x",
"in",
"range",
"(",
"after",
")",
"]",
":",
"if",
"a",
"in",
"show_lines",
"and",
"show_lines",
"[",
"a",
"]",
"==",
"0",
":",
"show_lines",
"[",
"a",
"]",
"=",
"1",
"skip",
"=",
"False",
"for",
"offset",
"in",
"sorted",
"(",
"show_lines",
".",
"keys",
"(",
")",
")",
":",
"if",
"skip",
"==",
"True",
"and",
"show_lines",
"[",
"offset",
"]",
"!=",
"0",
":",
"yield",
"'...'",
"skip",
"=",
"False",
"if",
"show_lines",
"[",
"offset",
"]",
"==",
"2",
":",
"check",
"=",
"basic_diff",
"(",
"source1",
",",
"source2",
",",
"start",
"=",
"offset",
",",
"end",
"=",
"offset",
"+",
"stride",
")",
"highlights",
"=",
"{",
"}",
"for",
"(",
"o",
",",
"l",
")",
"in",
"check",
":",
"for",
"i",
"in",
"range",
"(",
"o",
",",
"o",
"+",
"l",
")",
":",
"highlights",
"[",
"i",
"]",
"=",
"DIFF_COLOUR_MAP",
"[",
"0",
"]",
"if",
"offset",
"<",
"len",
"(",
"source1",
")",
":",
"yield",
"ansi",
".",
"format_hexdump_line",
"(",
"source1",
",",
"offset",
",",
"min",
"(",
"end",
",",
"len",
"(",
"source1",
")",
")",
",",
"major_len",
",",
"minor_len",
",",
"colour",
",",
"prefix",
"=",
"'-'",
",",
"highlight_addr",
"=",
"DIFF_COLOUR_MAP",
"[",
"0",
"]",
",",
"highlight_map",
"=",
"highlights",
",",
"address_base_offset",
"=",
"address_base_offset",
")",
"highlights",
"=",
"{",
"k",
":",
"DIFF_COLOUR_MAP",
"[",
"1",
"]",
"for",
"k",
"in",
"highlights",
".",
"keys",
"(",
")",
"}",
"if",
"offset",
"<",
"len",
"(",
"source2",
")",
":",
"yield",
"ansi",
".",
"format_hexdump_line",
"(",
"source2",
",",
"offset",
",",
"min",
"(",
"end",
",",
"len",
"(",
"source2",
")",
")",
",",
"major_len",
",",
"minor_len",
",",
"colour",
",",
"prefix",
"=",
"'+'",
",",
"highlight_addr",
"=",
"DIFF_COLOUR_MAP",
"[",
"1",
"]",
",",
"highlight_map",
"=",
"highlights",
",",
"address_base_offset",
"=",
"address_base_offset",
")",
"elif",
"show_lines",
"[",
"offset",
"]",
"==",
"1",
":",
"yield",
"ansi",
".",
"format_hexdump_line",
"(",
"source1",
",",
"offset",
",",
"end",
",",
"major_len",
",",
"minor_len",
",",
"colour",
",",
"prefix",
"=",
"' '",
",",
"address_base_offset",
"=",
"address_base_offset",
")",
"elif",
"show_lines",
"[",
"offset",
"]",
"==",
"0",
":",
"skip",
"=",
"True",
"if",
"skip",
"==",
"True",
":",
"yield",
"'...'",
"skip",
"=",
"False",
"return"
] |
Returns the differences between two byte strings in tabular hexadecimal/ASCII format.
source1
The first byte string source.
source2
The second byte string source.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
major_len
Number of hexadecimal groups per line
minor_len
Number of bytes per hexadecimal group
colour
Add ANSI colour formatting to output (default: true)
before
Number of lines of context preceeding a match to show
after
Number of lines of context following a match to show
address_base
Base address to use for labels (default: start)
Raises ValueError if both end and length are defined.
|
[
"Returns",
"the",
"differences",
"between",
"two",
"byte",
"strings",
"in",
"tabular",
"hexadecimal",
"/",
"ASCII",
"format",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L236-L324
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
hexdump_diff
|
def hexdump_diff( source1, source2, start=None, end=None, length=None, major_len=8, minor_len=4, colour=True, before=2, after=2, address_base=None ):
"""Returns the differences between two byte strings in tabular hexadecimal/ASCII format.
source1
The first byte string source.
source2
The second byte string source.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
major_len
Number of hexadecimal groups per line
minor_len
Number of bytes per hexadecimal group
colour
Add ANSI colour formatting to output (default: true)
before
Number of lines of context preceeding a match to show
after
Number of lines of context following a match to show
address_base
Base address to use for labels (default: start)
Raises ValueError if both end and length are defined.
"""
for line in hexdump_diff_iter( source1, source2, start, end, length, major_len, minor_len, colour, before, after, address_base ):
print( line )
|
python
|
def hexdump_diff( source1, source2, start=None, end=None, length=None, major_len=8, minor_len=4, colour=True, before=2, after=2, address_base=None ):
"""Returns the differences between two byte strings in tabular hexadecimal/ASCII format.
source1
The first byte string source.
source2
The second byte string source.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
major_len
Number of hexadecimal groups per line
minor_len
Number of bytes per hexadecimal group
colour
Add ANSI colour formatting to output (default: true)
before
Number of lines of context preceeding a match to show
after
Number of lines of context following a match to show
address_base
Base address to use for labels (default: start)
Raises ValueError if both end and length are defined.
"""
for line in hexdump_diff_iter( source1, source2, start, end, length, major_len, minor_len, colour, before, after, address_base ):
print( line )
|
[
"def",
"hexdump_diff",
"(",
"source1",
",",
"source2",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"length",
"=",
"None",
",",
"major_len",
"=",
"8",
",",
"minor_len",
"=",
"4",
",",
"colour",
"=",
"True",
",",
"before",
"=",
"2",
",",
"after",
"=",
"2",
",",
"address_base",
"=",
"None",
")",
":",
"for",
"line",
"in",
"hexdump_diff_iter",
"(",
"source1",
",",
"source2",
",",
"start",
",",
"end",
",",
"length",
",",
"major_len",
",",
"minor_len",
",",
"colour",
",",
"before",
",",
"after",
",",
"address_base",
")",
":",
"print",
"(",
"line",
")"
] |
Returns the differences between two byte strings in tabular hexadecimal/ASCII format.
source1
The first byte string source.
source2
The second byte string source.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
major_len
Number of hexadecimal groups per line
minor_len
Number of bytes per hexadecimal group
colour
Add ANSI colour formatting to output (default: true)
before
Number of lines of context preceeding a match to show
after
Number of lines of context following a match to show
address_base
Base address to use for labels (default: start)
Raises ValueError if both end and length are defined.
|
[
"Returns",
"the",
"differences",
"between",
"two",
"byte",
"strings",
"in",
"tabular",
"hexadecimal",
"/",
"ASCII",
"format",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L327-L366
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
unpack_bits
|
def unpack_bits( byte ):
"""Expand a bitfield into a 64-bit int (8 bool bytes)."""
longbits = byte & (0x00000000000000ff)
longbits = (longbits | (longbits<<28)) & (0x0000000f0000000f)
longbits = (longbits | (longbits<<14)) & (0x0003000300030003)
longbits = (longbits | (longbits<<7)) & (0x0101010101010101)
return longbits
|
python
|
def unpack_bits( byte ):
"""Expand a bitfield into a 64-bit int (8 bool bytes)."""
longbits = byte & (0x00000000000000ff)
longbits = (longbits | (longbits<<28)) & (0x0000000f0000000f)
longbits = (longbits | (longbits<<14)) & (0x0003000300030003)
longbits = (longbits | (longbits<<7)) & (0x0101010101010101)
return longbits
|
[
"def",
"unpack_bits",
"(",
"byte",
")",
":",
"longbits",
"=",
"byte",
"&",
"(",
"0x00000000000000ff",
")",
"longbits",
"=",
"(",
"longbits",
"|",
"(",
"longbits",
"<<",
"28",
")",
")",
"&",
"(",
"0x0000000f0000000f",
")",
"longbits",
"=",
"(",
"longbits",
"|",
"(",
"longbits",
"<<",
"14",
")",
")",
"&",
"(",
"0x0003000300030003",
")",
"longbits",
"=",
"(",
"longbits",
"|",
"(",
"longbits",
"<<",
"7",
")",
")",
"&",
"(",
"0x0101010101010101",
")",
"return",
"longbits"
] |
Expand a bitfield into a 64-bit int (8 bool bytes).
|
[
"Expand",
"a",
"bitfield",
"into",
"a",
"64",
"-",
"bit",
"int",
"(",
"8",
"bool",
"bytes",
")",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L376-L382
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
pack_bits
|
def pack_bits( longbits ):
"""Crunch a 64-bit int (8 bool bytes) into a bitfield."""
byte = longbits & (0x0101010101010101)
byte = (byte | (byte>>7)) & (0x0003000300030003)
byte = (byte | (byte>>14)) & (0x0000000f0000000f)
byte = (byte | (byte>>28)) & (0x00000000000000ff)
return byte
|
python
|
def pack_bits( longbits ):
"""Crunch a 64-bit int (8 bool bytes) into a bitfield."""
byte = longbits & (0x0101010101010101)
byte = (byte | (byte>>7)) & (0x0003000300030003)
byte = (byte | (byte>>14)) & (0x0000000f0000000f)
byte = (byte | (byte>>28)) & (0x00000000000000ff)
return byte
|
[
"def",
"pack_bits",
"(",
"longbits",
")",
":",
"byte",
"=",
"longbits",
"&",
"(",
"0x0101010101010101",
")",
"byte",
"=",
"(",
"byte",
"|",
"(",
"byte",
">>",
"7",
")",
")",
"&",
"(",
"0x0003000300030003",
")",
"byte",
"=",
"(",
"byte",
"|",
"(",
"byte",
">>",
"14",
")",
")",
"&",
"(",
"0x0000000f0000000f",
")",
"byte",
"=",
"(",
"byte",
"|",
"(",
"byte",
">>",
"28",
")",
")",
"&",
"(",
"0x00000000000000ff",
")",
"return",
"byte"
] |
Crunch a 64-bit int (8 bool bytes) into a bitfield.
|
[
"Crunch",
"a",
"64",
"-",
"bit",
"int",
"(",
"8",
"bool",
"bytes",
")",
"into",
"a",
"bitfield",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L385-L391
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
pixdump_iter
|
def pixdump_iter( source, start=None, end=None, length=None, width=64, height=None, palette=None ):
"""Return the contents of a byte string as a 256 colour image.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
width
Width of image to render in pixels (default: 64)
height
Height of image to render in pixels (default: auto)
palette
List of Colours to use (default: test palette)
"""
assert is_bytes( source )
if not palette:
palette = colour.TEST_PALETTE
start = 0 if (start is None) else start
if (end is not None) and (length is not None):
raise ValueError( 'Can\'t define both an end and a length!' )
elif (length is not None):
end = start+length
elif (end is not None):
pass
else:
end = len( source )
start = max( start, 0 )
end = min( end, len( source ) )
if len( source ) == 0 or (start == end == 0):
return iter(())
if height is None:
height = math.ceil( (end-start)/width )
def data_fetch( x_pos, y_pos, frame ):
index = y_pos*width + x_pos + start
if index >= end:
return (0, 0, 0, 0)
return palette[source[index]]
return ansi.format_image_iter( data_fetch, width=width, height=height )
|
python
|
def pixdump_iter( source, start=None, end=None, length=None, width=64, height=None, palette=None ):
"""Return the contents of a byte string as a 256 colour image.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
width
Width of image to render in pixels (default: 64)
height
Height of image to render in pixels (default: auto)
palette
List of Colours to use (default: test palette)
"""
assert is_bytes( source )
if not palette:
palette = colour.TEST_PALETTE
start = 0 if (start is None) else start
if (end is not None) and (length is not None):
raise ValueError( 'Can\'t define both an end and a length!' )
elif (length is not None):
end = start+length
elif (end is not None):
pass
else:
end = len( source )
start = max( start, 0 )
end = min( end, len( source ) )
if len( source ) == 0 or (start == end == 0):
return iter(())
if height is None:
height = math.ceil( (end-start)/width )
def data_fetch( x_pos, y_pos, frame ):
index = y_pos*width + x_pos + start
if index >= end:
return (0, 0, 0, 0)
return palette[source[index]]
return ansi.format_image_iter( data_fetch, width=width, height=height )
|
[
"def",
"pixdump_iter",
"(",
"source",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"length",
"=",
"None",
",",
"width",
"=",
"64",
",",
"height",
"=",
"None",
",",
"palette",
"=",
"None",
")",
":",
"assert",
"is_bytes",
"(",
"source",
")",
"if",
"not",
"palette",
":",
"palette",
"=",
"colour",
".",
"TEST_PALETTE",
"start",
"=",
"0",
"if",
"(",
"start",
"is",
"None",
")",
"else",
"start",
"if",
"(",
"end",
"is",
"not",
"None",
")",
"and",
"(",
"length",
"is",
"not",
"None",
")",
":",
"raise",
"ValueError",
"(",
"'Can\\'t define both an end and a length!'",
")",
"elif",
"(",
"length",
"is",
"not",
"None",
")",
":",
"end",
"=",
"start",
"+",
"length",
"elif",
"(",
"end",
"is",
"not",
"None",
")",
":",
"pass",
"else",
":",
"end",
"=",
"len",
"(",
"source",
")",
"start",
"=",
"max",
"(",
"start",
",",
"0",
")",
"end",
"=",
"min",
"(",
"end",
",",
"len",
"(",
"source",
")",
")",
"if",
"len",
"(",
"source",
")",
"==",
"0",
"or",
"(",
"start",
"==",
"end",
"==",
"0",
")",
":",
"return",
"iter",
"(",
"(",
")",
")",
"if",
"height",
"is",
"None",
":",
"height",
"=",
"math",
".",
"ceil",
"(",
"(",
"end",
"-",
"start",
")",
"/",
"width",
")",
"def",
"data_fetch",
"(",
"x_pos",
",",
"y_pos",
",",
"frame",
")",
":",
"index",
"=",
"y_pos",
"*",
"width",
"+",
"x_pos",
"+",
"start",
"if",
"index",
">=",
"end",
":",
"return",
"(",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
"return",
"palette",
"[",
"source",
"[",
"index",
"]",
"]",
"return",
"ansi",
".",
"format_image_iter",
"(",
"data_fetch",
",",
"width",
"=",
"width",
",",
"height",
"=",
"height",
")"
] |
Return the contents of a byte string as a 256 colour image.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
width
Width of image to render in pixels (default: 64)
height
Height of image to render in pixels (default: auto)
palette
List of Colours to use (default: test palette)
|
[
"Return",
"the",
"contents",
"of",
"a",
"byte",
"string",
"as",
"a",
"256",
"colour",
"image",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L394-L447
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
pixdump
|
def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ):
"""Print the contents of a byte string as a 256 colour image.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
width
Width of image to render in pixels (default: 64)
height
Height of image to render in pixels (default: auto)
palette
List of Colours to use (default: test palette)
"""
for line in pixdump_iter( source, start, end, length, width, height, palette ):
print( line )
|
python
|
def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ):
"""Print the contents of a byte string as a 256 colour image.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
width
Width of image to render in pixels (default: 64)
height
Height of image to render in pixels (default: auto)
palette
List of Colours to use (default: test palette)
"""
for line in pixdump_iter( source, start, end, length, width, height, palette ):
print( line )
|
[
"def",
"pixdump",
"(",
"source",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"length",
"=",
"None",
",",
"width",
"=",
"64",
",",
"height",
"=",
"None",
",",
"palette",
"=",
"None",
")",
":",
"for",
"line",
"in",
"pixdump_iter",
"(",
"source",
",",
"start",
",",
"end",
",",
"length",
",",
"width",
",",
"height",
",",
"palette",
")",
":",
"print",
"(",
"line",
")"
] |
Print the contents of a byte string as a 256 colour image.
source
The byte string to print.
start
Start offset to read from (default: start)
end
End offset to stop reading at (default: end)
length
Length to read in (optional replacement for end)
width
Width of image to render in pixels (default: 64)
height
Height of image to render in pixels (default: auto)
palette
List of Colours to use (default: test palette)
|
[
"Print",
"the",
"contents",
"of",
"a",
"byte",
"string",
"as",
"a",
"256",
"colour",
"image",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L450-L476
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
BitReader.set_offset
|
def set_offset( self, offset ):
"""Set the current read offset (in bytes) for the instance."""
assert offset in range( len( self.buffer ) )
self.pos = offset
self._fill_buffer()
|
python
|
def set_offset( self, offset ):
"""Set the current read offset (in bytes) for the instance."""
assert offset in range( len( self.buffer ) )
self.pos = offset
self._fill_buffer()
|
[
"def",
"set_offset",
"(",
"self",
",",
"offset",
")",
":",
"assert",
"offset",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"buffer",
")",
")",
"self",
".",
"pos",
"=",
"offset",
"self",
".",
"_fill_buffer",
"(",
")"
] |
Set the current read offset (in bytes) for the instance.
|
[
"Set",
"the",
"current",
"read",
"offset",
"(",
"in",
"bytes",
")",
"for",
"the",
"instance",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L538-L542
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
BitReader.get_bits
|
def get_bits( self, count ):
"""Get an integer containing the next [count] bits from the source."""
result = 0
for i in range( count ):
if self.bits_remaining <= 0:
self._fill_buffer()
if self.bits_reverse:
bit = (1 if (self.current_bits & (0x80 << 8*(self.bytes_to_cache-1))) else 0)
self.current_bits <<= 1
self.current_bits &= 0xff
else:
bit = (self.current_bits & 1)
self.current_bits >>= 1
self.bits_remaining -= 1
if self.output_reverse:
result <<= 1
result |= bit
else:
result |= bit << i
return result
|
python
|
def get_bits( self, count ):
"""Get an integer containing the next [count] bits from the source."""
result = 0
for i in range( count ):
if self.bits_remaining <= 0:
self._fill_buffer()
if self.bits_reverse:
bit = (1 if (self.current_bits & (0x80 << 8*(self.bytes_to_cache-1))) else 0)
self.current_bits <<= 1
self.current_bits &= 0xff
else:
bit = (self.current_bits & 1)
self.current_bits >>= 1
self.bits_remaining -= 1
if self.output_reverse:
result <<= 1
result |= bit
else:
result |= bit << i
return result
|
[
"def",
"get_bits",
"(",
"self",
",",
"count",
")",
":",
"result",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"count",
")",
":",
"if",
"self",
".",
"bits_remaining",
"<=",
"0",
":",
"self",
".",
"_fill_buffer",
"(",
")",
"if",
"self",
".",
"bits_reverse",
":",
"bit",
"=",
"(",
"1",
"if",
"(",
"self",
".",
"current_bits",
"&",
"(",
"0x80",
"<<",
"8",
"*",
"(",
"self",
".",
"bytes_to_cache",
"-",
"1",
")",
")",
")",
"else",
"0",
")",
"self",
".",
"current_bits",
"<<=",
"1",
"self",
".",
"current_bits",
"&=",
"0xff",
"else",
":",
"bit",
"=",
"(",
"self",
".",
"current_bits",
"&",
"1",
")",
"self",
".",
"current_bits",
">>=",
"1",
"self",
".",
"bits_remaining",
"-=",
"1",
"if",
"self",
".",
"output_reverse",
":",
"result",
"<<=",
"1",
"result",
"|=",
"bit",
"else",
":",
"result",
"|=",
"bit",
"<<",
"i",
"return",
"result"
] |
Get an integer containing the next [count] bits from the source.
|
[
"Get",
"an",
"integer",
"containing",
"the",
"next",
"[",
"count",
"]",
"bits",
"from",
"the",
"source",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L545-L566
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
BitWriter.put_bits
|
def put_bits( self, value, count ):
"""Push bits into the target.
value
Integer containing bits to push, ordered from least-significant bit to
most-significant bit.
count
Number of bits to push to the target.
"""
for _ in range( count ):
# bits are retrieved from the source LSB first
bit = (value & 1)
value >>= 1
# however, bits are put into the result based on the rule
if self.bits_reverse:
if self.insert_at_msb:
self.current_bits |= (bit << (self.bits_remaining-1))
else:
self.current_bits <<= 1
self.current_bits |= bit
else:
if self.insert_at_msb:
self.current_bits >>= 1
self.current_bits |= (bit << 7)
else:
self.current_bits |= (bit << (8-self.bits_remaining))
self.bits_remaining -= 1
if self.bits_remaining <= 0:
self.output.append( self.current_bits )
self.current_bits = 0
self.bits_remaining = 8
|
python
|
def put_bits( self, value, count ):
"""Push bits into the target.
value
Integer containing bits to push, ordered from least-significant bit to
most-significant bit.
count
Number of bits to push to the target.
"""
for _ in range( count ):
# bits are retrieved from the source LSB first
bit = (value & 1)
value >>= 1
# however, bits are put into the result based on the rule
if self.bits_reverse:
if self.insert_at_msb:
self.current_bits |= (bit << (self.bits_remaining-1))
else:
self.current_bits <<= 1
self.current_bits |= bit
else:
if self.insert_at_msb:
self.current_bits >>= 1
self.current_bits |= (bit << 7)
else:
self.current_bits |= (bit << (8-self.bits_remaining))
self.bits_remaining -= 1
if self.bits_remaining <= 0:
self.output.append( self.current_bits )
self.current_bits = 0
self.bits_remaining = 8
|
[
"def",
"put_bits",
"(",
"self",
",",
"value",
",",
"count",
")",
":",
"for",
"_",
"in",
"range",
"(",
"count",
")",
":",
"# bits are retrieved from the source LSB first",
"bit",
"=",
"(",
"value",
"&",
"1",
")",
"value",
">>=",
"1",
"# however, bits are put into the result based on the rule",
"if",
"self",
".",
"bits_reverse",
":",
"if",
"self",
".",
"insert_at_msb",
":",
"self",
".",
"current_bits",
"|=",
"(",
"bit",
"<<",
"(",
"self",
".",
"bits_remaining",
"-",
"1",
")",
")",
"else",
":",
"self",
".",
"current_bits",
"<<=",
"1",
"self",
".",
"current_bits",
"|=",
"bit",
"else",
":",
"if",
"self",
".",
"insert_at_msb",
":",
"self",
".",
"current_bits",
">>=",
"1",
"self",
".",
"current_bits",
"|=",
"(",
"bit",
"<<",
"7",
")",
"else",
":",
"self",
".",
"current_bits",
"|=",
"(",
"bit",
"<<",
"(",
"8",
"-",
"self",
".",
"bits_remaining",
")",
")",
"self",
".",
"bits_remaining",
"-=",
"1",
"if",
"self",
".",
"bits_remaining",
"<=",
"0",
":",
"self",
".",
"output",
".",
"append",
"(",
"self",
".",
"current_bits",
")",
"self",
".",
"current_bits",
"=",
"0",
"self",
".",
"bits_remaining",
"=",
"8"
] |
Push bits into the target.
value
Integer containing bits to push, ordered from least-significant bit to
most-significant bit.
count
Number of bits to push to the target.
|
[
"Push",
"bits",
"into",
"the",
"target",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L592-L627
|
moralrecordings/mrcrowbar
|
mrcrowbar/utils.py
|
BitWriter.get_buffer
|
def get_buffer( self ):
"""Return a byte string containing the target as currently written."""
last_byte = self.current_bits if (self.bits_remaining < 8) else None
result = self.output
if last_byte is not None:
result = bytearray( result )
result.append( last_byte )
if self.bytes_reverse:
return bytes( reversed( result ) )
else:
return bytes( result )
|
python
|
def get_buffer( self ):
"""Return a byte string containing the target as currently written."""
last_byte = self.current_bits if (self.bits_remaining < 8) else None
result = self.output
if last_byte is not None:
result = bytearray( result )
result.append( last_byte )
if self.bytes_reverse:
return bytes( reversed( result ) )
else:
return bytes( result )
|
[
"def",
"get_buffer",
"(",
"self",
")",
":",
"last_byte",
"=",
"self",
".",
"current_bits",
"if",
"(",
"self",
".",
"bits_remaining",
"<",
"8",
")",
"else",
"None",
"result",
"=",
"self",
".",
"output",
"if",
"last_byte",
"is",
"not",
"None",
":",
"result",
"=",
"bytearray",
"(",
"result",
")",
"result",
".",
"append",
"(",
"last_byte",
")",
"if",
"self",
".",
"bytes_reverse",
":",
"return",
"bytes",
"(",
"reversed",
"(",
"result",
")",
")",
"else",
":",
"return",
"bytes",
"(",
"result",
")"
] |
Return a byte string containing the target as currently written.
|
[
"Return",
"a",
"byte",
"string",
"containing",
"the",
"target",
"as",
"currently",
"written",
"."
] |
train
|
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L629-L641
|
Riffstation/Flask-Philo-SQLAlchemy
|
flask_philo_sqlalchemy/orm.py
|
BaseManager.get_for_update
|
def get_for_update(self, connection_name='DEFAULT', **kwargs):
"""
http://docs.sqlalchemy.org/en/latest/orm/query.html?highlight=update#sqlalchemy.orm.query.Query.with_for_update # noqa
"""
if not kwargs:
raise InvalidQueryError(
"Can not execute a query without parameters")
obj = self.pool.connections[connection_name].session.query(
self._model).with_for_update(
nowait=True, of=self._model).filter_by(**kwargs).first()
if not obj:
raise NotFoundError('Object not found')
return obj
|
python
|
def get_for_update(self, connection_name='DEFAULT', **kwargs):
"""
http://docs.sqlalchemy.org/en/latest/orm/query.html?highlight=update#sqlalchemy.orm.query.Query.with_for_update # noqa
"""
if not kwargs:
raise InvalidQueryError(
"Can not execute a query without parameters")
obj = self.pool.connections[connection_name].session.query(
self._model).with_for_update(
nowait=True, of=self._model).filter_by(**kwargs).first()
if not obj:
raise NotFoundError('Object not found')
return obj
|
[
"def",
"get_for_update",
"(",
"self",
",",
"connection_name",
"=",
"'DEFAULT'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"kwargs",
":",
"raise",
"InvalidQueryError",
"(",
"\"Can not execute a query without parameters\"",
")",
"obj",
"=",
"self",
".",
"pool",
".",
"connections",
"[",
"connection_name",
"]",
".",
"session",
".",
"query",
"(",
"self",
".",
"_model",
")",
".",
"with_for_update",
"(",
"nowait",
"=",
"True",
",",
"of",
"=",
"self",
".",
"_model",
")",
".",
"filter_by",
"(",
"*",
"*",
"kwargs",
")",
".",
"first",
"(",
")",
"if",
"not",
"obj",
":",
"raise",
"NotFoundError",
"(",
"'Object not found'",
")",
"return",
"obj"
] |
http://docs.sqlalchemy.org/en/latest/orm/query.html?highlight=update#sqlalchemy.orm.query.Query.with_for_update # noqa
|
[
"http",
":",
"//",
"docs",
".",
"sqlalchemy",
".",
"org",
"/",
"en",
"/",
"latest",
"/",
"orm",
"/",
"query",
".",
"html?highlight",
"=",
"update#sqlalchemy",
".",
"orm",
".",
"query",
".",
"Query",
".",
"with_for_update",
"#",
"noqa"
] |
train
|
https://github.com/Riffstation/Flask-Philo-SQLAlchemy/blob/71598bb603b8458a2cf9f7989f71d8f1c77fafb9/flask_philo_sqlalchemy/orm.py#L35-L49
|
tsnaomi/finnsyll
|
finnsyll/prev/v07.py
|
syllabify
|
def syllabify(word, compound=None):
'''Syllabify the given word, whether simplex or complex.'''
if compound is None:
compound = bool(re.search(r'(-| |=)', word))
syllabify = _syllabify_compound if compound else _syllabify
syll, rules = syllabify(word)
yield syll, rules
n = 7
if 'T4' in rules:
yield syllabify(word, T4=False)
n -= 1
if 'e' in rules:
yield syllabify(word, T1E=False)
n -= 1
if 'e' in rules and 'T4' in rules:
yield syllabify(word, T4=False, T1E=False)
n -= 1
# yield empty syllabifications and rules
for i in range(n):
yield '', ''
|
python
|
def syllabify(word, compound=None):
'''Syllabify the given word, whether simplex or complex.'''
if compound is None:
compound = bool(re.search(r'(-| |=)', word))
syllabify = _syllabify_compound if compound else _syllabify
syll, rules = syllabify(word)
yield syll, rules
n = 7
if 'T4' in rules:
yield syllabify(word, T4=False)
n -= 1
if 'e' in rules:
yield syllabify(word, T1E=False)
n -= 1
if 'e' in rules and 'T4' in rules:
yield syllabify(word, T4=False, T1E=False)
n -= 1
# yield empty syllabifications and rules
for i in range(n):
yield '', ''
|
[
"def",
"syllabify",
"(",
"word",
",",
"compound",
"=",
"None",
")",
":",
"if",
"compound",
"is",
"None",
":",
"compound",
"=",
"bool",
"(",
"re",
".",
"search",
"(",
"r'(-| |=)'",
",",
"word",
")",
")",
"syllabify",
"=",
"_syllabify_compound",
"if",
"compound",
"else",
"_syllabify",
"syll",
",",
"rules",
"=",
"syllabify",
"(",
"word",
")",
"yield",
"syll",
",",
"rules",
"n",
"=",
"7",
"if",
"'T4'",
"in",
"rules",
":",
"yield",
"syllabify",
"(",
"word",
",",
"T4",
"=",
"False",
")",
"n",
"-=",
"1",
"if",
"'e'",
"in",
"rules",
":",
"yield",
"syllabify",
"(",
"word",
",",
"T1E",
"=",
"False",
")",
"n",
"-=",
"1",
"if",
"'e'",
"in",
"rules",
"and",
"'T4'",
"in",
"rules",
":",
"yield",
"syllabify",
"(",
"word",
",",
"T4",
"=",
"False",
",",
"T1E",
"=",
"False",
")",
"n",
"-=",
"1",
"# yield empty syllabifications and rules",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"yield",
"''",
",",
"''"
] |
Syllabify the given word, whether simplex or complex.
|
[
"Syllabify",
"the",
"given",
"word",
"whether",
"simplex",
"or",
"complex",
"."
] |
train
|
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v07.py#L17-L43
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.convert_case
|
def convert_case(name):
"""Converts name from CamelCase to snake_case"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
|
python
|
def convert_case(name):
"""Converts name from CamelCase to snake_case"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
|
[
"def",
"convert_case",
"(",
"name",
")",
":",
"s1",
"=",
"re",
".",
"sub",
"(",
"'(.)([A-Z][a-z]+)'",
",",
"r'\\1_\\2'",
",",
"name",
")",
"return",
"re",
".",
"sub",
"(",
"'([a-z0-9])([A-Z])'",
",",
"r'\\1_\\2'",
",",
"s1",
")",
".",
"lower",
"(",
")"
] |
Converts name from CamelCase to snake_case
|
[
"Converts",
"name",
"from",
"CamelCase",
"to",
"snake_case"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L35-L38
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.table_name
|
def table_name(self):
"""Pluralises the class_name using utterly simple algo and returns as table_name"""
if not self.class_name:
raise ValueError
else:
tbl_name = ModelCompiler.convert_case(self.class_name)
last_letter = tbl_name[-1]
if last_letter in ("y",):
return "{}ies".format(tbl_name[:-1])
elif last_letter in ("s",):
return "{}es".format(tbl_name)
else:
return "{}s".format(tbl_name)
|
python
|
def table_name(self):
"""Pluralises the class_name using utterly simple algo and returns as table_name"""
if not self.class_name:
raise ValueError
else:
tbl_name = ModelCompiler.convert_case(self.class_name)
last_letter = tbl_name[-1]
if last_letter in ("y",):
return "{}ies".format(tbl_name[:-1])
elif last_letter in ("s",):
return "{}es".format(tbl_name)
else:
return "{}s".format(tbl_name)
|
[
"def",
"table_name",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"class_name",
":",
"raise",
"ValueError",
"else",
":",
"tbl_name",
"=",
"ModelCompiler",
".",
"convert_case",
"(",
"self",
".",
"class_name",
")",
"last_letter",
"=",
"tbl_name",
"[",
"-",
"1",
"]",
"if",
"last_letter",
"in",
"(",
"\"y\"",
",",
")",
":",
"return",
"\"{}ies\"",
".",
"format",
"(",
"tbl_name",
"[",
":",
"-",
"1",
"]",
")",
"elif",
"last_letter",
"in",
"(",
"\"s\"",
",",
")",
":",
"return",
"\"{}es\"",
".",
"format",
"(",
"tbl_name",
")",
"else",
":",
"return",
"\"{}s\"",
".",
"format",
"(",
"tbl_name",
")"
] |
Pluralises the class_name using utterly simple algo and returns as table_name
|
[
"Pluralises",
"the",
"class_name",
"using",
"utterly",
"simple",
"algo",
"and",
"returns",
"as",
"table_name"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L41-L53
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.types
|
def types(self):
"""All the unique types found in user supplied model"""
res = []
for column in self.column_definitions:
tmp = column.get('type', None)
res.append(ModelCompiler.get_column_type(tmp)) if tmp else False
res = list(set(res))
return res
|
python
|
def types(self):
"""All the unique types found in user supplied model"""
res = []
for column in self.column_definitions:
tmp = column.get('type', None)
res.append(ModelCompiler.get_column_type(tmp)) if tmp else False
res = list(set(res))
return res
|
[
"def",
"types",
"(",
"self",
")",
":",
"res",
"=",
"[",
"]",
"for",
"column",
"in",
"self",
".",
"column_definitions",
":",
"tmp",
"=",
"column",
".",
"get",
"(",
"'type'",
",",
"None",
")",
"res",
".",
"append",
"(",
"ModelCompiler",
".",
"get_column_type",
"(",
"tmp",
")",
")",
"if",
"tmp",
"else",
"False",
"res",
"=",
"list",
"(",
"set",
"(",
"res",
")",
")",
"return",
"res"
] |
All the unique types found in user supplied model
|
[
"All",
"the",
"unique",
"types",
"found",
"in",
"user",
"supplied",
"model"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L71-L78
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.basic_types
|
def basic_types(self):
"""Returns non-postgres types referenced in user supplied model """
if not self.foreign_key_definitions:
return self.standard_types
else:
tmp = self.standard_types
tmp.append('ForeignKey')
return tmp
|
python
|
def basic_types(self):
"""Returns non-postgres types referenced in user supplied model """
if not self.foreign_key_definitions:
return self.standard_types
else:
tmp = self.standard_types
tmp.append('ForeignKey')
return tmp
|
[
"def",
"basic_types",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"foreign_key_definitions",
":",
"return",
"self",
".",
"standard_types",
"else",
":",
"tmp",
"=",
"self",
".",
"standard_types",
"tmp",
".",
"append",
"(",
"'ForeignKey'",
")",
"return",
"tmp"
] |
Returns non-postgres types referenced in user supplied model
|
[
"Returns",
"non",
"-",
"postgres",
"types",
"referenced",
"in",
"user",
"supplied",
"model"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L91-L98
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.primary_keys
|
def primary_keys(self):
"""Returns the primary keys referenced in user supplied model"""
res = []
for column in self.column_definitions:
if 'primary_key' in column.keys():
tmp = column.get('primary_key', None)
res.append(column['name']) if tmp else False
return res
|
python
|
def primary_keys(self):
"""Returns the primary keys referenced in user supplied model"""
res = []
for column in self.column_definitions:
if 'primary_key' in column.keys():
tmp = column.get('primary_key', None)
res.append(column['name']) if tmp else False
return res
|
[
"def",
"primary_keys",
"(",
"self",
")",
":",
"res",
"=",
"[",
"]",
"for",
"column",
"in",
"self",
".",
"column_definitions",
":",
"if",
"'primary_key'",
"in",
"column",
".",
"keys",
"(",
")",
":",
"tmp",
"=",
"column",
".",
"get",
"(",
"'primary_key'",
",",
"None",
")",
"res",
".",
"append",
"(",
"column",
"[",
"'name'",
"]",
")",
"if",
"tmp",
"else",
"False",
"return",
"res"
] |
Returns the primary keys referenced in user supplied model
|
[
"Returns",
"the",
"primary",
"keys",
"referenced",
"in",
"user",
"supplied",
"model"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L105-L112
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.compiled_named_imports
|
def compiled_named_imports(self):
"""Returns compiled named imports required for the model"""
res = []
if self.postgres_types:
res.append(
ALCHEMY_TEMPLATES.named_import.safe_substitute(
module='sqlalchemy.dialects.postgresql',
labels=", ".join(self.postgres_types)))
if self.mutable_dict_types:
res.append(
ALCHEMY_TEMPLATES.named_import.safe_substitute(
module='sqlalchemy.ext.mutable', labels='MutableDict'
))
return "\n".join(res)
|
python
|
def compiled_named_imports(self):
"""Returns compiled named imports required for the model"""
res = []
if self.postgres_types:
res.append(
ALCHEMY_TEMPLATES.named_import.safe_substitute(
module='sqlalchemy.dialects.postgresql',
labels=", ".join(self.postgres_types)))
if self.mutable_dict_types:
res.append(
ALCHEMY_TEMPLATES.named_import.safe_substitute(
module='sqlalchemy.ext.mutable', labels='MutableDict'
))
return "\n".join(res)
|
[
"def",
"compiled_named_imports",
"(",
"self",
")",
":",
"res",
"=",
"[",
"]",
"if",
"self",
".",
"postgres_types",
":",
"res",
".",
"append",
"(",
"ALCHEMY_TEMPLATES",
".",
"named_import",
".",
"safe_substitute",
"(",
"module",
"=",
"'sqlalchemy.dialects.postgresql'",
",",
"labels",
"=",
"\", \"",
".",
"join",
"(",
"self",
".",
"postgres_types",
")",
")",
")",
"if",
"self",
".",
"mutable_dict_types",
":",
"res",
".",
"append",
"(",
"ALCHEMY_TEMPLATES",
".",
"named_import",
".",
"safe_substitute",
"(",
"module",
"=",
"'sqlalchemy.ext.mutable'",
",",
"labels",
"=",
"'MutableDict'",
")",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"res",
")"
] |
Returns compiled named imports required for the model
|
[
"Returns",
"compiled",
"named",
"imports",
"required",
"for",
"the",
"model"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L115-L128
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.compiled_orm_imports
|
def compiled_orm_imports(self):
"""Returns compiled named imports required for the model"""
module = 'sqlalchemy.orm'
labels = []
if self.relationship_definitions:
labels.append("relationship")
return ALCHEMY_TEMPLATES.named_import.safe_substitute(module=module, labels=", ".join(labels))
|
python
|
def compiled_orm_imports(self):
"""Returns compiled named imports required for the model"""
module = 'sqlalchemy.orm'
labels = []
if self.relationship_definitions:
labels.append("relationship")
return ALCHEMY_TEMPLATES.named_import.safe_substitute(module=module, labels=", ".join(labels))
|
[
"def",
"compiled_orm_imports",
"(",
"self",
")",
":",
"module",
"=",
"'sqlalchemy.orm'",
"labels",
"=",
"[",
"]",
"if",
"self",
".",
"relationship_definitions",
":",
"labels",
".",
"append",
"(",
"\"relationship\"",
")",
"return",
"ALCHEMY_TEMPLATES",
".",
"named_import",
".",
"safe_substitute",
"(",
"module",
"=",
"module",
",",
"labels",
"=",
"\", \"",
".",
"join",
"(",
"labels",
")",
")"
] |
Returns compiled named imports required for the model
|
[
"Returns",
"compiled",
"named",
"imports",
"required",
"for",
"the",
"model"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L131-L137
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.compiled_columns
|
def compiled_columns(self):
"""Returns compiled column definitions"""
def get_column_args(column):
tmp = []
for arg_name, arg_val in column.items():
if arg_name not in ('name', 'type'):
if arg_name in ('server_default', 'server_onupdate'):
arg_val = '"{}"'.format(arg_val)
tmp.append(ALCHEMY_TEMPLATES.column_arg.safe_substitute(arg_name=arg_name,
arg_val=arg_val))
return ", ".join(tmp)
res = []
for column in self.column_definitions:
column_args = get_column_args(column)
column_type, type_params = ModelCompiler.get_col_type_info(column.get('type'))
column_name = column.get('name')
if column_type in MUTABLE_DICT_TYPES:
column_type = ALCHEMY_TEMPLATES.mutable_dict_type.safe_substitute(type=column_type,
type_params=type_params)
type_params = ''
res.append(
ALCHEMY_TEMPLATES.column_definition.safe_substitute(column_name=column_name,
column_type=column_type,
column_args=column_args,
type_params=type_params))
join_string = "\n" + self.tab
return join_string.join(res)
|
python
|
def compiled_columns(self):
"""Returns compiled column definitions"""
def get_column_args(column):
tmp = []
for arg_name, arg_val in column.items():
if arg_name not in ('name', 'type'):
if arg_name in ('server_default', 'server_onupdate'):
arg_val = '"{}"'.format(arg_val)
tmp.append(ALCHEMY_TEMPLATES.column_arg.safe_substitute(arg_name=arg_name,
arg_val=arg_val))
return ", ".join(tmp)
res = []
for column in self.column_definitions:
column_args = get_column_args(column)
column_type, type_params = ModelCompiler.get_col_type_info(column.get('type'))
column_name = column.get('name')
if column_type in MUTABLE_DICT_TYPES:
column_type = ALCHEMY_TEMPLATES.mutable_dict_type.safe_substitute(type=column_type,
type_params=type_params)
type_params = ''
res.append(
ALCHEMY_TEMPLATES.column_definition.safe_substitute(column_name=column_name,
column_type=column_type,
column_args=column_args,
type_params=type_params))
join_string = "\n" + self.tab
return join_string.join(res)
|
[
"def",
"compiled_columns",
"(",
"self",
")",
":",
"def",
"get_column_args",
"(",
"column",
")",
":",
"tmp",
"=",
"[",
"]",
"for",
"arg_name",
",",
"arg_val",
"in",
"column",
".",
"items",
"(",
")",
":",
"if",
"arg_name",
"not",
"in",
"(",
"'name'",
",",
"'type'",
")",
":",
"if",
"arg_name",
"in",
"(",
"'server_default'",
",",
"'server_onupdate'",
")",
":",
"arg_val",
"=",
"'\"{}\"'",
".",
"format",
"(",
"arg_val",
")",
"tmp",
".",
"append",
"(",
"ALCHEMY_TEMPLATES",
".",
"column_arg",
".",
"safe_substitute",
"(",
"arg_name",
"=",
"arg_name",
",",
"arg_val",
"=",
"arg_val",
")",
")",
"return",
"\", \"",
".",
"join",
"(",
"tmp",
")",
"res",
"=",
"[",
"]",
"for",
"column",
"in",
"self",
".",
"column_definitions",
":",
"column_args",
"=",
"get_column_args",
"(",
"column",
")",
"column_type",
",",
"type_params",
"=",
"ModelCompiler",
".",
"get_col_type_info",
"(",
"column",
".",
"get",
"(",
"'type'",
")",
")",
"column_name",
"=",
"column",
".",
"get",
"(",
"'name'",
")",
"if",
"column_type",
"in",
"MUTABLE_DICT_TYPES",
":",
"column_type",
"=",
"ALCHEMY_TEMPLATES",
".",
"mutable_dict_type",
".",
"safe_substitute",
"(",
"type",
"=",
"column_type",
",",
"type_params",
"=",
"type_params",
")",
"type_params",
"=",
"''",
"res",
".",
"append",
"(",
"ALCHEMY_TEMPLATES",
".",
"column_definition",
".",
"safe_substitute",
"(",
"column_name",
"=",
"column_name",
",",
"column_type",
"=",
"column_type",
",",
"column_args",
"=",
"column_args",
",",
"type_params",
"=",
"type_params",
")",
")",
"join_string",
"=",
"\"\\n\"",
"+",
"self",
".",
"tab",
"return",
"join_string",
".",
"join",
"(",
"res",
")"
] |
Returns compiled column definitions
|
[
"Returns",
"compiled",
"column",
"definitions"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L140-L168
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.compiled_foreign_keys
|
def compiled_foreign_keys(self):
"""Returns compiled foreign key definitions"""
def get_column_args(column):
tmp = []
for arg_name, arg_val in column.items():
if arg_name not in ('name', 'type', 'reference'):
if arg_name in ('server_default', 'server_onupdate'):
arg_val = '"{}"'.format(arg_val)
tmp.append(ALCHEMY_TEMPLATES.column_arg.safe_substitute(arg_name=arg_name,
arg_val=arg_val))
return ", ".join(tmp)
def get_fkey_args(column):
table = column['reference']['table']
column = column['reference']['column']
return ALCHEMY_TEMPLATES.foreign_key_arg.safe_substitute(reference_table=table, reference_column=column)
res = []
for column in self.foreign_key_definitions:
column_args = get_column_args(column)
column_type, type_params = ModelCompiler.get_col_type_info(column.get('type'))
column_name = column.get('name')
reference = get_fkey_args(column)
if column_type in MUTABLE_DICT_TYPES:
column_type = ALCHEMY_TEMPLATES.mutable_dict_type.safe_substitute(type=column_type,
type_params=type_params)
type_params = ''
res.append(
ALCHEMY_TEMPLATES.foreign_key.safe_substitute(column_name=column_name,
column_type=column_type,
column_args=column_args,
foreign_key_args=reference,
type_params=type_params))
join_string = "\n" + self.tab
return join_string.join(res)
|
python
|
def compiled_foreign_keys(self):
"""Returns compiled foreign key definitions"""
def get_column_args(column):
tmp = []
for arg_name, arg_val in column.items():
if arg_name not in ('name', 'type', 'reference'):
if arg_name in ('server_default', 'server_onupdate'):
arg_val = '"{}"'.format(arg_val)
tmp.append(ALCHEMY_TEMPLATES.column_arg.safe_substitute(arg_name=arg_name,
arg_val=arg_val))
return ", ".join(tmp)
def get_fkey_args(column):
table = column['reference']['table']
column = column['reference']['column']
return ALCHEMY_TEMPLATES.foreign_key_arg.safe_substitute(reference_table=table, reference_column=column)
res = []
for column in self.foreign_key_definitions:
column_args = get_column_args(column)
column_type, type_params = ModelCompiler.get_col_type_info(column.get('type'))
column_name = column.get('name')
reference = get_fkey_args(column)
if column_type in MUTABLE_DICT_TYPES:
column_type = ALCHEMY_TEMPLATES.mutable_dict_type.safe_substitute(type=column_type,
type_params=type_params)
type_params = ''
res.append(
ALCHEMY_TEMPLATES.foreign_key.safe_substitute(column_name=column_name,
column_type=column_type,
column_args=column_args,
foreign_key_args=reference,
type_params=type_params))
join_string = "\n" + self.tab
return join_string.join(res)
|
[
"def",
"compiled_foreign_keys",
"(",
"self",
")",
":",
"def",
"get_column_args",
"(",
"column",
")",
":",
"tmp",
"=",
"[",
"]",
"for",
"arg_name",
",",
"arg_val",
"in",
"column",
".",
"items",
"(",
")",
":",
"if",
"arg_name",
"not",
"in",
"(",
"'name'",
",",
"'type'",
",",
"'reference'",
")",
":",
"if",
"arg_name",
"in",
"(",
"'server_default'",
",",
"'server_onupdate'",
")",
":",
"arg_val",
"=",
"'\"{}\"'",
".",
"format",
"(",
"arg_val",
")",
"tmp",
".",
"append",
"(",
"ALCHEMY_TEMPLATES",
".",
"column_arg",
".",
"safe_substitute",
"(",
"arg_name",
"=",
"arg_name",
",",
"arg_val",
"=",
"arg_val",
")",
")",
"return",
"\", \"",
".",
"join",
"(",
"tmp",
")",
"def",
"get_fkey_args",
"(",
"column",
")",
":",
"table",
"=",
"column",
"[",
"'reference'",
"]",
"[",
"'table'",
"]",
"column",
"=",
"column",
"[",
"'reference'",
"]",
"[",
"'column'",
"]",
"return",
"ALCHEMY_TEMPLATES",
".",
"foreign_key_arg",
".",
"safe_substitute",
"(",
"reference_table",
"=",
"table",
",",
"reference_column",
"=",
"column",
")",
"res",
"=",
"[",
"]",
"for",
"column",
"in",
"self",
".",
"foreign_key_definitions",
":",
"column_args",
"=",
"get_column_args",
"(",
"column",
")",
"column_type",
",",
"type_params",
"=",
"ModelCompiler",
".",
"get_col_type_info",
"(",
"column",
".",
"get",
"(",
"'type'",
")",
")",
"column_name",
"=",
"column",
".",
"get",
"(",
"'name'",
")",
"reference",
"=",
"get_fkey_args",
"(",
"column",
")",
"if",
"column_type",
"in",
"MUTABLE_DICT_TYPES",
":",
"column_type",
"=",
"ALCHEMY_TEMPLATES",
".",
"mutable_dict_type",
".",
"safe_substitute",
"(",
"type",
"=",
"column_type",
",",
"type_params",
"=",
"type_params",
")",
"type_params",
"=",
"''",
"res",
".",
"append",
"(",
"ALCHEMY_TEMPLATES",
".",
"foreign_key",
".",
"safe_substitute",
"(",
"column_name",
"=",
"column_name",
",",
"column_type",
"=",
"column_type",
",",
"column_args",
"=",
"column_args",
",",
"foreign_key_args",
"=",
"reference",
",",
"type_params",
"=",
"type_params",
")",
")",
"join_string",
"=",
"\"\\n\"",
"+",
"self",
".",
"tab",
"return",
"join_string",
".",
"join",
"(",
"res",
")"
] |
Returns compiled foreign key definitions
|
[
"Returns",
"compiled",
"foreign",
"key",
"definitions"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L171-L206
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.compiled_relationships
|
def compiled_relationships(self):
"""Returns compiled relationship definitions"""
def get_column_args(column):
tmp = []
for arg_name, arg_val in column.items():
if arg_name not in ('name', 'type', 'reference', 'class'):
if arg_name in ('back_populates', ):
arg_val = "'{}'".format(arg_val)
tmp.append(ALCHEMY_TEMPLATES.column_arg.safe_substitute(arg_name=arg_name,
arg_val=arg_val))
return ", ".join(tmp)
res = []
for column in self.relationship_definitions:
column_args = get_column_args(column)
column_name = column.get('name')
cls_name = column.get("class")
res.append(
ALCHEMY_TEMPLATES.relationship.safe_substitute(column_name=column_name,
column_args=column_args,
class_name=cls_name))
join_string = "\n" + self.tab
return join_string.join(res)
|
python
|
def compiled_relationships(self):
"""Returns compiled relationship definitions"""
def get_column_args(column):
tmp = []
for arg_name, arg_val in column.items():
if arg_name not in ('name', 'type', 'reference', 'class'):
if arg_name in ('back_populates', ):
arg_val = "'{}'".format(arg_val)
tmp.append(ALCHEMY_TEMPLATES.column_arg.safe_substitute(arg_name=arg_name,
arg_val=arg_val))
return ", ".join(tmp)
res = []
for column in self.relationship_definitions:
column_args = get_column_args(column)
column_name = column.get('name')
cls_name = column.get("class")
res.append(
ALCHEMY_TEMPLATES.relationship.safe_substitute(column_name=column_name,
column_args=column_args,
class_name=cls_name))
join_string = "\n" + self.tab
return join_string.join(res)
|
[
"def",
"compiled_relationships",
"(",
"self",
")",
":",
"def",
"get_column_args",
"(",
"column",
")",
":",
"tmp",
"=",
"[",
"]",
"for",
"arg_name",
",",
"arg_val",
"in",
"column",
".",
"items",
"(",
")",
":",
"if",
"arg_name",
"not",
"in",
"(",
"'name'",
",",
"'type'",
",",
"'reference'",
",",
"'class'",
")",
":",
"if",
"arg_name",
"in",
"(",
"'back_populates'",
",",
")",
":",
"arg_val",
"=",
"\"'{}'\"",
".",
"format",
"(",
"arg_val",
")",
"tmp",
".",
"append",
"(",
"ALCHEMY_TEMPLATES",
".",
"column_arg",
".",
"safe_substitute",
"(",
"arg_name",
"=",
"arg_name",
",",
"arg_val",
"=",
"arg_val",
")",
")",
"return",
"\", \"",
".",
"join",
"(",
"tmp",
")",
"res",
"=",
"[",
"]",
"for",
"column",
"in",
"self",
".",
"relationship_definitions",
":",
"column_args",
"=",
"get_column_args",
"(",
"column",
")",
"column_name",
"=",
"column",
".",
"get",
"(",
"'name'",
")",
"cls_name",
"=",
"column",
".",
"get",
"(",
"\"class\"",
")",
"res",
".",
"append",
"(",
"ALCHEMY_TEMPLATES",
".",
"relationship",
".",
"safe_substitute",
"(",
"column_name",
"=",
"column_name",
",",
"column_args",
"=",
"column_args",
",",
"class_name",
"=",
"cls_name",
")",
")",
"join_string",
"=",
"\"\\n\"",
"+",
"self",
".",
"tab",
"return",
"join_string",
".",
"join",
"(",
"res",
")"
] |
Returns compiled relationship definitions
|
[
"Returns",
"compiled",
"relationship",
"definitions"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L209-L232
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.columns
|
def columns(self):
"""Return names of all the addressable columns (including foreign keys) referenced in user supplied model"""
res = [col['name'] for col in self.column_definitions]
res.extend([col['name'] for col in self.foreign_key_definitions])
return res
|
python
|
def columns(self):
"""Return names of all the addressable columns (including foreign keys) referenced in user supplied model"""
res = [col['name'] for col in self.column_definitions]
res.extend([col['name'] for col in self.foreign_key_definitions])
return res
|
[
"def",
"columns",
"(",
"self",
")",
":",
"res",
"=",
"[",
"col",
"[",
"'name'",
"]",
"for",
"col",
"in",
"self",
".",
"column_definitions",
"]",
"res",
".",
"extend",
"(",
"[",
"col",
"[",
"'name'",
"]",
"for",
"col",
"in",
"self",
".",
"foreign_key_definitions",
"]",
")",
"return",
"res"
] |
Return names of all the addressable columns (including foreign keys) referenced in user supplied model
|
[
"Return",
"names",
"of",
"all",
"the",
"addressable",
"columns",
"(",
"including",
"foreign",
"keys",
")",
"referenced",
"in",
"user",
"supplied",
"model"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L235-L239
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.compiled_init_func
|
def compiled_init_func(self):
"""Returns compiled init function"""
def get_column_assignment(column_name):
return ALCHEMY_TEMPLATES.col_assignment.safe_substitute(col_name=column_name)
def get_compiled_args(arg_name):
return ALCHEMY_TEMPLATES.func_arg.safe_substitute(arg_name=arg_name)
join_string = "\n" + self.tab + self.tab
column_assignments = join_string.join([get_column_assignment(n) for n in self.columns])
init_args = ", ".join(get_compiled_args(n) for n in self.columns)
return ALCHEMY_TEMPLATES.init_function.safe_substitute(col_assignments=column_assignments,
init_args=init_args)
|
python
|
def compiled_init_func(self):
"""Returns compiled init function"""
def get_column_assignment(column_name):
return ALCHEMY_TEMPLATES.col_assignment.safe_substitute(col_name=column_name)
def get_compiled_args(arg_name):
return ALCHEMY_TEMPLATES.func_arg.safe_substitute(arg_name=arg_name)
join_string = "\n" + self.tab + self.tab
column_assignments = join_string.join([get_column_assignment(n) for n in self.columns])
init_args = ", ".join(get_compiled_args(n) for n in self.columns)
return ALCHEMY_TEMPLATES.init_function.safe_substitute(col_assignments=column_assignments,
init_args=init_args)
|
[
"def",
"compiled_init_func",
"(",
"self",
")",
":",
"def",
"get_column_assignment",
"(",
"column_name",
")",
":",
"return",
"ALCHEMY_TEMPLATES",
".",
"col_assignment",
".",
"safe_substitute",
"(",
"col_name",
"=",
"column_name",
")",
"def",
"get_compiled_args",
"(",
"arg_name",
")",
":",
"return",
"ALCHEMY_TEMPLATES",
".",
"func_arg",
".",
"safe_substitute",
"(",
"arg_name",
"=",
"arg_name",
")",
"join_string",
"=",
"\"\\n\"",
"+",
"self",
".",
"tab",
"+",
"self",
".",
"tab",
"column_assignments",
"=",
"join_string",
".",
"join",
"(",
"[",
"get_column_assignment",
"(",
"n",
")",
"for",
"n",
"in",
"self",
".",
"columns",
"]",
")",
"init_args",
"=",
"\", \"",
".",
"join",
"(",
"get_compiled_args",
"(",
"n",
")",
"for",
"n",
"in",
"self",
".",
"columns",
")",
"return",
"ALCHEMY_TEMPLATES",
".",
"init_function",
".",
"safe_substitute",
"(",
"col_assignments",
"=",
"column_assignments",
",",
"init_args",
"=",
"init_args",
")"
] |
Returns compiled init function
|
[
"Returns",
"compiled",
"init",
"function"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L242-L255
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.compiled_update_func
|
def compiled_update_func(self):
"""Returns compiled update function"""
def get_not_none_col_assignment(column_name):
return ALCHEMY_TEMPLATES.not_none_col_assignment.safe_substitute(col_name=column_name)
def get_compiled_args(arg_name):
return ALCHEMY_TEMPLATES.func_arg.safe_substitute(arg_name=arg_name)
join_string = "\n" + self.tab + self.tab
columns = [n for n in self.columns if n not in self.primary_keys]
not_none_col_assignments = join_string.join([get_not_none_col_assignment(n) for n in columns])
update_args = ", ".join(get_compiled_args(n) for n in columns)
return ALCHEMY_TEMPLATES.update_function.safe_substitute(not_none_col_assignments=not_none_col_assignments,
update_args=update_args,
class_name=self.class_name)
|
python
|
def compiled_update_func(self):
"""Returns compiled update function"""
def get_not_none_col_assignment(column_name):
return ALCHEMY_TEMPLATES.not_none_col_assignment.safe_substitute(col_name=column_name)
def get_compiled_args(arg_name):
return ALCHEMY_TEMPLATES.func_arg.safe_substitute(arg_name=arg_name)
join_string = "\n" + self.tab + self.tab
columns = [n for n in self.columns if n not in self.primary_keys]
not_none_col_assignments = join_string.join([get_not_none_col_assignment(n) for n in columns])
update_args = ", ".join(get_compiled_args(n) for n in columns)
return ALCHEMY_TEMPLATES.update_function.safe_substitute(not_none_col_assignments=not_none_col_assignments,
update_args=update_args,
class_name=self.class_name)
|
[
"def",
"compiled_update_func",
"(",
"self",
")",
":",
"def",
"get_not_none_col_assignment",
"(",
"column_name",
")",
":",
"return",
"ALCHEMY_TEMPLATES",
".",
"not_none_col_assignment",
".",
"safe_substitute",
"(",
"col_name",
"=",
"column_name",
")",
"def",
"get_compiled_args",
"(",
"arg_name",
")",
":",
"return",
"ALCHEMY_TEMPLATES",
".",
"func_arg",
".",
"safe_substitute",
"(",
"arg_name",
"=",
"arg_name",
")",
"join_string",
"=",
"\"\\n\"",
"+",
"self",
".",
"tab",
"+",
"self",
".",
"tab",
"columns",
"=",
"[",
"n",
"for",
"n",
"in",
"self",
".",
"columns",
"if",
"n",
"not",
"in",
"self",
".",
"primary_keys",
"]",
"not_none_col_assignments",
"=",
"join_string",
".",
"join",
"(",
"[",
"get_not_none_col_assignment",
"(",
"n",
")",
"for",
"n",
"in",
"columns",
"]",
")",
"update_args",
"=",
"\", \"",
".",
"join",
"(",
"get_compiled_args",
"(",
"n",
")",
"for",
"n",
"in",
"columns",
")",
"return",
"ALCHEMY_TEMPLATES",
".",
"update_function",
".",
"safe_substitute",
"(",
"not_none_col_assignments",
"=",
"not_none_col_assignments",
",",
"update_args",
"=",
"update_args",
",",
"class_name",
"=",
"self",
".",
"class_name",
")"
] |
Returns compiled update function
|
[
"Returns",
"compiled",
"update",
"function"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L258-L273
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.compiled_hash_func
|
def compiled_hash_func(self):
"""Returns compiled hash function based on hash of stringified primary_keys.
This isn't the most efficient way"""
def get_primary_key_str(pkey_name):
return "str(self.{})".format(pkey_name)
hash_str = "+ ".join([get_primary_key_str(n) for n in self.primary_keys])
return ALCHEMY_TEMPLATES.hash_function.safe_substitute(concated_primary_key_strs=hash_str)
|
python
|
def compiled_hash_func(self):
"""Returns compiled hash function based on hash of stringified primary_keys.
This isn't the most efficient way"""
def get_primary_key_str(pkey_name):
return "str(self.{})".format(pkey_name)
hash_str = "+ ".join([get_primary_key_str(n) for n in self.primary_keys])
return ALCHEMY_TEMPLATES.hash_function.safe_substitute(concated_primary_key_strs=hash_str)
|
[
"def",
"compiled_hash_func",
"(",
"self",
")",
":",
"def",
"get_primary_key_str",
"(",
"pkey_name",
")",
":",
"return",
"\"str(self.{})\"",
".",
"format",
"(",
"pkey_name",
")",
"hash_str",
"=",
"\"+ \"",
".",
"join",
"(",
"[",
"get_primary_key_str",
"(",
"n",
")",
"for",
"n",
"in",
"self",
".",
"primary_keys",
"]",
")",
"return",
"ALCHEMY_TEMPLATES",
".",
"hash_function",
".",
"safe_substitute",
"(",
"concated_primary_key_strs",
"=",
"hash_str",
")"
] |
Returns compiled hash function based on hash of stringified primary_keys.
This isn't the most efficient way
|
[
"Returns",
"compiled",
"hash",
"function",
"based",
"on",
"hash",
"of",
"stringified",
"primary_keys",
".",
"This",
"isn",
"t",
"the",
"most",
"efficient",
"way"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L276-L284
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.representation_function_compiler
|
def representation_function_compiler(self, func_name):
"""Generic function can be used to compile __repr__ or __unicode__ or __str__"""
def get_col_accessor(col):
return ALCHEMY_TEMPLATES.col_accessor.safe_substitute(col=col)
def get_col_evaluator(col):
return ALCHEMY_TEMPLATES.col_evaluator.safe_substitute(col=col)
col_evaluators = ", ".join([get_col_evaluator(n) for n in self.primary_keys])
col_accessors = ", ".join([get_col_accessor(n) for n in self.primary_keys])
return ALCHEMY_TEMPLATES.representor_function.safe_substitute(func_name=func_name,
col_accessors=col_accessors,
col_evaluators=col_evaluators,
class_name=self.class_name)
|
python
|
def representation_function_compiler(self, func_name):
"""Generic function can be used to compile __repr__ or __unicode__ or __str__"""
def get_col_accessor(col):
return ALCHEMY_TEMPLATES.col_accessor.safe_substitute(col=col)
def get_col_evaluator(col):
return ALCHEMY_TEMPLATES.col_evaluator.safe_substitute(col=col)
col_evaluators = ", ".join([get_col_evaluator(n) for n in self.primary_keys])
col_accessors = ", ".join([get_col_accessor(n) for n in self.primary_keys])
return ALCHEMY_TEMPLATES.representor_function.safe_substitute(func_name=func_name,
col_accessors=col_accessors,
col_evaluators=col_evaluators,
class_name=self.class_name)
|
[
"def",
"representation_function_compiler",
"(",
"self",
",",
"func_name",
")",
":",
"def",
"get_col_accessor",
"(",
"col",
")",
":",
"return",
"ALCHEMY_TEMPLATES",
".",
"col_accessor",
".",
"safe_substitute",
"(",
"col",
"=",
"col",
")",
"def",
"get_col_evaluator",
"(",
"col",
")",
":",
"return",
"ALCHEMY_TEMPLATES",
".",
"col_evaluator",
".",
"safe_substitute",
"(",
"col",
"=",
"col",
")",
"col_evaluators",
"=",
"\", \"",
".",
"join",
"(",
"[",
"get_col_evaluator",
"(",
"n",
")",
"for",
"n",
"in",
"self",
".",
"primary_keys",
"]",
")",
"col_accessors",
"=",
"\", \"",
".",
"join",
"(",
"[",
"get_col_accessor",
"(",
"n",
")",
"for",
"n",
"in",
"self",
".",
"primary_keys",
"]",
")",
"return",
"ALCHEMY_TEMPLATES",
".",
"representor_function",
".",
"safe_substitute",
"(",
"func_name",
"=",
"func_name",
",",
"col_accessors",
"=",
"col_accessors",
",",
"col_evaluators",
"=",
"col_evaluators",
",",
"class_name",
"=",
"self",
".",
"class_name",
")"
] |
Generic function can be used to compile __repr__ or __unicode__ or __str__
|
[
"Generic",
"function",
"can",
"be",
"used",
"to",
"compile",
"__repr__",
"or",
"__unicode__",
"or",
"__str__"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L308-L323
|
danishabdullah/algen
|
algen/compilers.py
|
ModelCompiler.compiled_model
|
def compiled_model(self):
"""Returns compile ORM class for the user supplied model"""
return ALCHEMY_TEMPLATES.model.safe_substitute(class_name=self.class_name,
table_name=self.table_name,
column_definitions=self.compiled_columns,
init_function=self.compiled_init_func,
update_function=self.compiled_update_func,
hash_function=self.compiled_hash_func,
eq_function=self.compiled_eq_func,
neq_function=self.compiled_neq_func,
str_function=self.compiled_str_func,
unicode_function=self.compiled_unicode_func,
repr_function=self.compiled_repr_func,
types=", ".join(self.basic_types),
username=self.username,
foreign_keys=self.compiled_foreign_keys,
relationships=self.compiled_relationships,
named_imports=self.compiled_named_imports,
orm_imports=self.compiled_orm_imports,
get_proxy_cls_function=self.compiled_proxy_cls_func,
add_function=ALCHEMY_TEMPLATES.add_function.template,
delete_function=ALCHEMY_TEMPLATES.delete_function.template,
to_dict_function=ALCHEMY_TEMPLATES.to_dict_function.template,
to_proxy_function=ALCHEMY_TEMPLATES.to_proxy_function.template,
from_proxy_function=ALCHEMY_TEMPLATES.from_proxy_function.template)
|
python
|
def compiled_model(self):
"""Returns compile ORM class for the user supplied model"""
return ALCHEMY_TEMPLATES.model.safe_substitute(class_name=self.class_name,
table_name=self.table_name,
column_definitions=self.compiled_columns,
init_function=self.compiled_init_func,
update_function=self.compiled_update_func,
hash_function=self.compiled_hash_func,
eq_function=self.compiled_eq_func,
neq_function=self.compiled_neq_func,
str_function=self.compiled_str_func,
unicode_function=self.compiled_unicode_func,
repr_function=self.compiled_repr_func,
types=", ".join(self.basic_types),
username=self.username,
foreign_keys=self.compiled_foreign_keys,
relationships=self.compiled_relationships,
named_imports=self.compiled_named_imports,
orm_imports=self.compiled_orm_imports,
get_proxy_cls_function=self.compiled_proxy_cls_func,
add_function=ALCHEMY_TEMPLATES.add_function.template,
delete_function=ALCHEMY_TEMPLATES.delete_function.template,
to_dict_function=ALCHEMY_TEMPLATES.to_dict_function.template,
to_proxy_function=ALCHEMY_TEMPLATES.to_proxy_function.template,
from_proxy_function=ALCHEMY_TEMPLATES.from_proxy_function.template)
|
[
"def",
"compiled_model",
"(",
"self",
")",
":",
"return",
"ALCHEMY_TEMPLATES",
".",
"model",
".",
"safe_substitute",
"(",
"class_name",
"=",
"self",
".",
"class_name",
",",
"table_name",
"=",
"self",
".",
"table_name",
",",
"column_definitions",
"=",
"self",
".",
"compiled_columns",
",",
"init_function",
"=",
"self",
".",
"compiled_init_func",
",",
"update_function",
"=",
"self",
".",
"compiled_update_func",
",",
"hash_function",
"=",
"self",
".",
"compiled_hash_func",
",",
"eq_function",
"=",
"self",
".",
"compiled_eq_func",
",",
"neq_function",
"=",
"self",
".",
"compiled_neq_func",
",",
"str_function",
"=",
"self",
".",
"compiled_str_func",
",",
"unicode_function",
"=",
"self",
".",
"compiled_unicode_func",
",",
"repr_function",
"=",
"self",
".",
"compiled_repr_func",
",",
"types",
"=",
"\", \"",
".",
"join",
"(",
"self",
".",
"basic_types",
")",
",",
"username",
"=",
"self",
".",
"username",
",",
"foreign_keys",
"=",
"self",
".",
"compiled_foreign_keys",
",",
"relationships",
"=",
"self",
".",
"compiled_relationships",
",",
"named_imports",
"=",
"self",
".",
"compiled_named_imports",
",",
"orm_imports",
"=",
"self",
".",
"compiled_orm_imports",
",",
"get_proxy_cls_function",
"=",
"self",
".",
"compiled_proxy_cls_func",
",",
"add_function",
"=",
"ALCHEMY_TEMPLATES",
".",
"add_function",
".",
"template",
",",
"delete_function",
"=",
"ALCHEMY_TEMPLATES",
".",
"delete_function",
".",
"template",
",",
"to_dict_function",
"=",
"ALCHEMY_TEMPLATES",
".",
"to_dict_function",
".",
"template",
",",
"to_proxy_function",
"=",
"ALCHEMY_TEMPLATES",
".",
"to_proxy_function",
".",
"template",
",",
"from_proxy_function",
"=",
"ALCHEMY_TEMPLATES",
".",
"from_proxy_function",
".",
"template",
")"
] |
Returns compile ORM class for the user supplied model
|
[
"Returns",
"compile",
"ORM",
"class",
"for",
"the",
"user",
"supplied",
"model"
] |
train
|
https://github.com/danishabdullah/algen/blob/642ba26d8721c588fce505ac67528070c1edc264/algen/compilers.py#L346-L370
|
saltant-org/saltant-py
|
saltant/models/task_queue.py
|
TaskQueue.put
|
def put(self):
"""Updates this task queue on the saltant server.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
"""
return self.manager.put(
id=self.id,
name=self.name,
description=self.description,
private=self.private,
runs_executable_tasks=self.runs_executable_tasks,
runs_docker_container_tasks=self.runs_docker_container_tasks,
runs_singularity_container_tasks=self.runs_singularity_container_tasks,
active=self.active,
whitelists=self.whitelists,
)
|
python
|
def put(self):
"""Updates this task queue on the saltant server.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
"""
return self.manager.put(
id=self.id,
name=self.name,
description=self.description,
private=self.private,
runs_executable_tasks=self.runs_executable_tasks,
runs_docker_container_tasks=self.runs_docker_container_tasks,
runs_singularity_container_tasks=self.runs_singularity_container_tasks,
active=self.active,
whitelists=self.whitelists,
)
|
[
"def",
"put",
"(",
"self",
")",
":",
"return",
"self",
".",
"manager",
".",
"put",
"(",
"id",
"=",
"self",
".",
"id",
",",
"name",
"=",
"self",
".",
"name",
",",
"description",
"=",
"self",
".",
"description",
",",
"private",
"=",
"self",
".",
"private",
",",
"runs_executable_tasks",
"=",
"self",
".",
"runs_executable_tasks",
",",
"runs_docker_container_tasks",
"=",
"self",
".",
"runs_docker_container_tasks",
",",
"runs_singularity_container_tasks",
"=",
"self",
".",
"runs_singularity_container_tasks",
",",
"active",
"=",
"self",
".",
"active",
",",
"whitelists",
"=",
"self",
".",
"whitelists",
",",
")"
] |
Updates this task queue on the saltant server.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
|
[
"Updates",
"this",
"task",
"queue",
"on",
"the",
"saltant",
"server",
"."
] |
train
|
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/task_queue.py#L119-L137
|
saltant-org/saltant-py
|
saltant/models/task_queue.py
|
TaskQueueManager.get
|
def get(self, id=None, name=None):
"""Get a task queue.
Either the id xor the name of the task type must be specified.
Args:
id (int, optional): The id of the task type to get.
name (str, optional): The name of the task type to get.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
requested.
Raises:
ValueError: Neither id nor name were set *or* both id and
name were set.
"""
# Validate arguments - use an xor
if not (id is None) ^ (name is None):
raise ValueError("Either id or name must be set (but not both!)")
# If it's just ID provided, call the parent function
if id is not None:
return super(TaskQueueManager, self).get(id=id)
# Try getting the task queue by name
return self.list(filters={"name": name})[0]
|
python
|
def get(self, id=None, name=None):
"""Get a task queue.
Either the id xor the name of the task type must be specified.
Args:
id (int, optional): The id of the task type to get.
name (str, optional): The name of the task type to get.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
requested.
Raises:
ValueError: Neither id nor name were set *or* both id and
name were set.
"""
# Validate arguments - use an xor
if not (id is None) ^ (name is None):
raise ValueError("Either id or name must be set (but not both!)")
# If it's just ID provided, call the parent function
if id is not None:
return super(TaskQueueManager, self).get(id=id)
# Try getting the task queue by name
return self.list(filters={"name": name})[0]
|
[
"def",
"get",
"(",
"self",
",",
"id",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"# Validate arguments - use an xor",
"if",
"not",
"(",
"id",
"is",
"None",
")",
"^",
"(",
"name",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"\"Either id or name must be set (but not both!)\"",
")",
"# If it's just ID provided, call the parent function",
"if",
"id",
"is",
"not",
"None",
":",
"return",
"super",
"(",
"TaskQueueManager",
",",
"self",
")",
".",
"get",
"(",
"id",
"=",
"id",
")",
"# Try getting the task queue by name",
"return",
"self",
".",
"list",
"(",
"filters",
"=",
"{",
"\"name\"",
":",
"name",
"}",
")",
"[",
"0",
"]"
] |
Get a task queue.
Either the id xor the name of the task type must be specified.
Args:
id (int, optional): The id of the task type to get.
name (str, optional): The name of the task type to get.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
requested.
Raises:
ValueError: Neither id nor name were set *or* both id and
name were set.
|
[
"Get",
"a",
"task",
"queue",
"."
] |
train
|
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/task_queue.py#L156-L183
|
saltant-org/saltant-py
|
saltant/models/task_queue.py
|
TaskQueueManager.create
|
def create(
self,
name,
description="",
private=False,
runs_executable_tasks=True,
runs_docker_container_tasks=True,
runs_singularity_container_tasks=True,
active=True,
whitelists=None,
):
"""Create a task queue.
Args:
name (str): The name of the task queue.
description (str, optional): A description of the task queue.
private (bool, optional): A boolean specifying whether the
queue is exclusive to its creator. Defaults to False.
runs_executable_tasks (bool, optional): A Boolean specifying
whether the queue runs executable tasks. Defaults to
True.
runs_docker_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Docker containers. Defaults to True.
runs_singularity_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers. Defaults to True.
active (bool, optional): A boolean specifying whether the
queue is active. Default to True.
whitelists (list, optional): A list of task whitelist IDs.
Defaults to None (which gets translated to []).
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just created.
"""
# Translate whitelists None to [] if necessary
if whitelists is None:
whitelists = []
# Create the object
request_url = self._client.base_api_url + self.list_url
data_to_post = {
"name": name,
"description": description,
"private": private,
"runs_executable_tasks": runs_executable_tasks,
"runs_docker_container_tasks": runs_docker_container_tasks,
"runs_singularity_container_tasks": runs_singularity_container_tasks,
"active": active,
"whitelists": whitelists,
}
response = self._client.session.post(request_url, data=data_to_post)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_201_CREATED,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
|
python
|
def create(
self,
name,
description="",
private=False,
runs_executable_tasks=True,
runs_docker_container_tasks=True,
runs_singularity_container_tasks=True,
active=True,
whitelists=None,
):
"""Create a task queue.
Args:
name (str): The name of the task queue.
description (str, optional): A description of the task queue.
private (bool, optional): A boolean specifying whether the
queue is exclusive to its creator. Defaults to False.
runs_executable_tasks (bool, optional): A Boolean specifying
whether the queue runs executable tasks. Defaults to
True.
runs_docker_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Docker containers. Defaults to True.
runs_singularity_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers. Defaults to True.
active (bool, optional): A boolean specifying whether the
queue is active. Default to True.
whitelists (list, optional): A list of task whitelist IDs.
Defaults to None (which gets translated to []).
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just created.
"""
# Translate whitelists None to [] if necessary
if whitelists is None:
whitelists = []
# Create the object
request_url = self._client.base_api_url + self.list_url
data_to_post = {
"name": name,
"description": description,
"private": private,
"runs_executable_tasks": runs_executable_tasks,
"runs_docker_container_tasks": runs_docker_container_tasks,
"runs_singularity_container_tasks": runs_singularity_container_tasks,
"active": active,
"whitelists": whitelists,
}
response = self._client.session.post(request_url, data=data_to_post)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_201_CREATED,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
|
[
"def",
"create",
"(",
"self",
",",
"name",
",",
"description",
"=",
"\"\"",
",",
"private",
"=",
"False",
",",
"runs_executable_tasks",
"=",
"True",
",",
"runs_docker_container_tasks",
"=",
"True",
",",
"runs_singularity_container_tasks",
"=",
"True",
",",
"active",
"=",
"True",
",",
"whitelists",
"=",
"None",
",",
")",
":",
"# Translate whitelists None to [] if necessary",
"if",
"whitelists",
"is",
"None",
":",
"whitelists",
"=",
"[",
"]",
"# Create the object",
"request_url",
"=",
"self",
".",
"_client",
".",
"base_api_url",
"+",
"self",
".",
"list_url",
"data_to_post",
"=",
"{",
"\"name\"",
":",
"name",
",",
"\"description\"",
":",
"description",
",",
"\"private\"",
":",
"private",
",",
"\"runs_executable_tasks\"",
":",
"runs_executable_tasks",
",",
"\"runs_docker_container_tasks\"",
":",
"runs_docker_container_tasks",
",",
"\"runs_singularity_container_tasks\"",
":",
"runs_singularity_container_tasks",
",",
"\"active\"",
":",
"active",
",",
"\"whitelists\"",
":",
"whitelists",
",",
"}",
"response",
"=",
"self",
".",
"_client",
".",
"session",
".",
"post",
"(",
"request_url",
",",
"data",
"=",
"data_to_post",
")",
"# Validate that the request was successful",
"self",
".",
"validate_request_success",
"(",
"response_text",
"=",
"response",
".",
"text",
",",
"request_url",
"=",
"request_url",
",",
"status_code",
"=",
"response",
".",
"status_code",
",",
"expected_status_code",
"=",
"HTTP_201_CREATED",
",",
")",
"# Return a model instance representing the task instance",
"return",
"self",
".",
"response_data_to_model_instance",
"(",
"response",
".",
"json",
"(",
")",
")"
] |
Create a task queue.
Args:
name (str): The name of the task queue.
description (str, optional): A description of the task queue.
private (bool, optional): A boolean specifying whether the
queue is exclusive to its creator. Defaults to False.
runs_executable_tasks (bool, optional): A Boolean specifying
whether the queue runs executable tasks. Defaults to
True.
runs_docker_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Docker containers. Defaults to True.
runs_singularity_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers. Defaults to True.
active (bool, optional): A boolean specifying whether the
queue is active. Default to True.
whitelists (list, optional): A list of task whitelist IDs.
Defaults to None (which gets translated to []).
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just created.
|
[
"Create",
"a",
"task",
"queue",
"."
] |
train
|
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/task_queue.py#L185-L250
|
saltant-org/saltant-py
|
saltant/models/task_queue.py
|
TaskQueueManager.patch
|
def patch(
self,
id,
name=None,
description=None,
private=None,
runs_executable_tasks=None,
runs_docker_container_tasks=None,
runs_singularity_container_tasks=None,
active=None,
whitelists=None,
):
"""Partially updates a task queue on the saltant server.
Args:
id (int): The ID of the task queue.
name (str, optional): The name of the task queue.
description (str, optional): The description of the task
queue.
private (bool, optional): A Booleon signalling whether the
queue can only be used by its associated user.
runs_executable_tasks (bool, optional): A Boolean specifying
whether the queue runs executable tasks.
runs_docker_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Docker containers.
runs_singularity_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers.
active (bool, optional): A Booleon signalling whether the
queue is active.
whitelists (list, optional): A list of task whitelist IDs.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_patch = {}
if name is not None:
data_to_patch["name"] = name
if description is not None:
data_to_patch["description"] = description
if private is not None:
data_to_patch["private"] = private
if runs_executable_tasks is not None:
data_to_patch["runs_executable_tasks"] = runs_executable_tasks
if runs_docker_container_tasks is not None:
data_to_patch[
"runs_docker_container_tasks"
] = runs_docker_container_tasks
if runs_singularity_container_tasks is not None:
data_to_patch[
"runs_singularity_container_tasks"
] = runs_singularity_container_tasks
if active is not None:
data_to_patch["active"] = active
if whitelists is not None:
data_to_patch["whitelists"] = whitelists
response = self._client.session.patch(request_url, data=data_to_patch)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
|
python
|
def patch(
self,
id,
name=None,
description=None,
private=None,
runs_executable_tasks=None,
runs_docker_container_tasks=None,
runs_singularity_container_tasks=None,
active=None,
whitelists=None,
):
"""Partially updates a task queue on the saltant server.
Args:
id (int): The ID of the task queue.
name (str, optional): The name of the task queue.
description (str, optional): The description of the task
queue.
private (bool, optional): A Booleon signalling whether the
queue can only be used by its associated user.
runs_executable_tasks (bool, optional): A Boolean specifying
whether the queue runs executable tasks.
runs_docker_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Docker containers.
runs_singularity_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers.
active (bool, optional): A Booleon signalling whether the
queue is active.
whitelists (list, optional): A list of task whitelist IDs.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_patch = {}
if name is not None:
data_to_patch["name"] = name
if description is not None:
data_to_patch["description"] = description
if private is not None:
data_to_patch["private"] = private
if runs_executable_tasks is not None:
data_to_patch["runs_executable_tasks"] = runs_executable_tasks
if runs_docker_container_tasks is not None:
data_to_patch[
"runs_docker_container_tasks"
] = runs_docker_container_tasks
if runs_singularity_container_tasks is not None:
data_to_patch[
"runs_singularity_container_tasks"
] = runs_singularity_container_tasks
if active is not None:
data_to_patch["active"] = active
if whitelists is not None:
data_to_patch["whitelists"] = whitelists
response = self._client.session.patch(request_url, data=data_to_patch)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
|
[
"def",
"patch",
"(",
"self",
",",
"id",
",",
"name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"private",
"=",
"None",
",",
"runs_executable_tasks",
"=",
"None",
",",
"runs_docker_container_tasks",
"=",
"None",
",",
"runs_singularity_container_tasks",
"=",
"None",
",",
"active",
"=",
"None",
",",
"whitelists",
"=",
"None",
",",
")",
":",
"# Update the object",
"request_url",
"=",
"self",
".",
"_client",
".",
"base_api_url",
"+",
"self",
".",
"detail_url",
".",
"format",
"(",
"id",
"=",
"id",
")",
"data_to_patch",
"=",
"{",
"}",
"if",
"name",
"is",
"not",
"None",
":",
"data_to_patch",
"[",
"\"name\"",
"]",
"=",
"name",
"if",
"description",
"is",
"not",
"None",
":",
"data_to_patch",
"[",
"\"description\"",
"]",
"=",
"description",
"if",
"private",
"is",
"not",
"None",
":",
"data_to_patch",
"[",
"\"private\"",
"]",
"=",
"private",
"if",
"runs_executable_tasks",
"is",
"not",
"None",
":",
"data_to_patch",
"[",
"\"runs_executable_tasks\"",
"]",
"=",
"runs_executable_tasks",
"if",
"runs_docker_container_tasks",
"is",
"not",
"None",
":",
"data_to_patch",
"[",
"\"runs_docker_container_tasks\"",
"]",
"=",
"runs_docker_container_tasks",
"if",
"runs_singularity_container_tasks",
"is",
"not",
"None",
":",
"data_to_patch",
"[",
"\"runs_singularity_container_tasks\"",
"]",
"=",
"runs_singularity_container_tasks",
"if",
"active",
"is",
"not",
"None",
":",
"data_to_patch",
"[",
"\"active\"",
"]",
"=",
"active",
"if",
"whitelists",
"is",
"not",
"None",
":",
"data_to_patch",
"[",
"\"whitelists\"",
"]",
"=",
"whitelists",
"response",
"=",
"self",
".",
"_client",
".",
"session",
".",
"patch",
"(",
"request_url",
",",
"data",
"=",
"data_to_patch",
")",
"# Validate that the request was successful",
"self",
".",
"validate_request_success",
"(",
"response_text",
"=",
"response",
".",
"text",
",",
"request_url",
"=",
"request_url",
",",
"status_code",
"=",
"response",
".",
"status_code",
",",
"expected_status_code",
"=",
"HTTP_200_OK",
",",
")",
"# Return a model instance representing the task instance",
"return",
"self",
".",
"response_data_to_model_instance",
"(",
"response",
".",
"json",
"(",
")",
")"
] |
Partially updates a task queue on the saltant server.
Args:
id (int): The ID of the task queue.
name (str, optional): The name of the task queue.
description (str, optional): The description of the task
queue.
private (bool, optional): A Booleon signalling whether the
queue can only be used by its associated user.
runs_executable_tasks (bool, optional): A Boolean specifying
whether the queue runs executable tasks.
runs_docker_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Docker containers.
runs_singularity_container_tasks (bool, optional): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers.
active (bool, optional): A Booleon signalling whether the
queue is active.
whitelists (list, optional): A list of task whitelist IDs.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
|
[
"Partially",
"updates",
"a",
"task",
"queue",
"on",
"the",
"saltant",
"server",
"."
] |
train
|
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/task_queue.py#L252-L334
|
saltant-org/saltant-py
|
saltant/models/task_queue.py
|
TaskQueueManager.put
|
def put(
self,
id,
name,
description,
private,
runs_executable_tasks,
runs_docker_container_tasks,
runs_singularity_container_tasks,
active,
whitelists,
):
"""Updates a task queue on the saltant server.
Args:
id (int): The ID of the task queue.
name (str): The name of the task queue.
description (str): The description of the task queue.
private (bool): A Booleon signalling whether the queue can
only be used by its associated user.
runs_executable_tasks (bool): A Boolean specifying whether
the queue runs executable tasks.
runs_docker_container_tasks (bool): A Boolean specifying
whether the queue runs container tasks that run in
Docker containers.
runs_singularity_container_tasks (bool): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers.
active (bool): A Booleon signalling whether the queue is
active.
whitelists (list): A list of task whitelist IDs.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_put = {
"name": name,
"description": description,
"private": private,
"runs_executable_tasks": runs_executable_tasks,
"runs_docker_container_tasks": runs_docker_container_tasks,
"runs_singularity_container_tasks": runs_singularity_container_tasks,
"active": active,
"whitelists": whitelists,
}
response = self._client.session.put(request_url, data=data_to_put)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
|
python
|
def put(
self,
id,
name,
description,
private,
runs_executable_tasks,
runs_docker_container_tasks,
runs_singularity_container_tasks,
active,
whitelists,
):
"""Updates a task queue on the saltant server.
Args:
id (int): The ID of the task queue.
name (str): The name of the task queue.
description (str): The description of the task queue.
private (bool): A Booleon signalling whether the queue can
only be used by its associated user.
runs_executable_tasks (bool): A Boolean specifying whether
the queue runs executable tasks.
runs_docker_container_tasks (bool): A Boolean specifying
whether the queue runs container tasks that run in
Docker containers.
runs_singularity_container_tasks (bool): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers.
active (bool): A Booleon signalling whether the queue is
active.
whitelists (list): A list of task whitelist IDs.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_put = {
"name": name,
"description": description,
"private": private,
"runs_executable_tasks": runs_executable_tasks,
"runs_docker_container_tasks": runs_docker_container_tasks,
"runs_singularity_container_tasks": runs_singularity_container_tasks,
"active": active,
"whitelists": whitelists,
}
response = self._client.session.put(request_url, data=data_to_put)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json())
|
[
"def",
"put",
"(",
"self",
",",
"id",
",",
"name",
",",
"description",
",",
"private",
",",
"runs_executable_tasks",
",",
"runs_docker_container_tasks",
",",
"runs_singularity_container_tasks",
",",
"active",
",",
"whitelists",
",",
")",
":",
"# Update the object",
"request_url",
"=",
"self",
".",
"_client",
".",
"base_api_url",
"+",
"self",
".",
"detail_url",
".",
"format",
"(",
"id",
"=",
"id",
")",
"data_to_put",
"=",
"{",
"\"name\"",
":",
"name",
",",
"\"description\"",
":",
"description",
",",
"\"private\"",
":",
"private",
",",
"\"runs_executable_tasks\"",
":",
"runs_executable_tasks",
",",
"\"runs_docker_container_tasks\"",
":",
"runs_docker_container_tasks",
",",
"\"runs_singularity_container_tasks\"",
":",
"runs_singularity_container_tasks",
",",
"\"active\"",
":",
"active",
",",
"\"whitelists\"",
":",
"whitelists",
",",
"}",
"response",
"=",
"self",
".",
"_client",
".",
"session",
".",
"put",
"(",
"request_url",
",",
"data",
"=",
"data_to_put",
")",
"# Validate that the request was successful",
"self",
".",
"validate_request_success",
"(",
"response_text",
"=",
"response",
".",
"text",
",",
"request_url",
"=",
"request_url",
",",
"status_code",
"=",
"response",
".",
"status_code",
",",
"expected_status_code",
"=",
"HTTP_200_OK",
",",
")",
"# Return a model instance representing the task instance",
"return",
"self",
".",
"response_data_to_model_instance",
"(",
"response",
".",
"json",
"(",
")",
")"
] |
Updates a task queue on the saltant server.
Args:
id (int): The ID of the task queue.
name (str): The name of the task queue.
description (str): The description of the task queue.
private (bool): A Booleon signalling whether the queue can
only be used by its associated user.
runs_executable_tasks (bool): A Boolean specifying whether
the queue runs executable tasks.
runs_docker_container_tasks (bool): A Boolean specifying
whether the queue runs container tasks that run in
Docker containers.
runs_singularity_container_tasks (bool): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers.
active (bool): A Booleon signalling whether the queue is
active.
whitelists (list): A list of task whitelist IDs.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
|
[
"Updates",
"a",
"task",
"queue",
"on",
"the",
"saltant",
"server",
"."
] |
train
|
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/task_queue.py#L336-L397
|
OLC-Bioinformatics/sipprverse
|
sixteenS/sixteens_full.py
|
SixteenSBait.main
|
def main(self):
"""
Run the required methods in the appropriate order
"""
self.targets()
self.bait(k=49)
self.reversebait(maskmiddle='t', k=19)
self.subsample_reads()
|
python
|
def main(self):
"""
Run the required methods in the appropriate order
"""
self.targets()
self.bait(k=49)
self.reversebait(maskmiddle='t', k=19)
self.subsample_reads()
|
[
"def",
"main",
"(",
"self",
")",
":",
"self",
".",
"targets",
"(",
")",
"self",
".",
"bait",
"(",
"k",
"=",
"49",
")",
"self",
".",
"reversebait",
"(",
"maskmiddle",
"=",
"'t'",
",",
"k",
"=",
"19",
")",
"self",
".",
"subsample_reads",
"(",
")"
] |
Run the required methods in the appropriate order
|
[
"Run",
"the",
"required",
"methods",
"in",
"the",
"appropriate",
"order"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sixteenS/sixteens_full.py#L31-L38
|
OLC-Bioinformatics/sipprverse
|
sixteenS/sixteens_full.py
|
SixteenSBait.targets
|
def targets(self):
"""
Create the GenObject for the analysis type, create the hash file for baiting (if necessary)
"""
for sample in self.runmetadata:
if sample.general.bestassemblyfile != 'NA':
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].runanalysis = True
sample[self.analysistype].targetpath = self.targetpath
baitpath = os.path.join(self.targetpath, 'bait')
sample[self.analysistype].baitfile = glob(os.path.join(baitpath, '*.fa'))[0]
try:
sample[self.analysistype].outputdir = os.path.join(sample.run.outputdirectory, self.analysistype)
except AttributeError:
sample[self.analysistype].outputdir = \
os.path.join(sample.general.outputdirectory, self.analysistype)
sample.run.outputdirectory = sample.general.outputdirectory
sample[self.analysistype].logout = os.path.join(sample[self.analysistype].outputdir, 'logout.txt')
sample[self.analysistype].logerr = os.path.join(sample[self.analysistype].outputdir, 'logerr.txt')
sample[self.analysistype].baitedfastq = os.path.join(sample[self.analysistype].outputdir,
'{}_targetMatches.fastq'.format(self.analysistype))
sample[self.analysistype].complete = False
|
python
|
def targets(self):
"""
Create the GenObject for the analysis type, create the hash file for baiting (if necessary)
"""
for sample in self.runmetadata:
if sample.general.bestassemblyfile != 'NA':
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].runanalysis = True
sample[self.analysistype].targetpath = self.targetpath
baitpath = os.path.join(self.targetpath, 'bait')
sample[self.analysistype].baitfile = glob(os.path.join(baitpath, '*.fa'))[0]
try:
sample[self.analysistype].outputdir = os.path.join(sample.run.outputdirectory, self.analysistype)
except AttributeError:
sample[self.analysistype].outputdir = \
os.path.join(sample.general.outputdirectory, self.analysistype)
sample.run.outputdirectory = sample.general.outputdirectory
sample[self.analysistype].logout = os.path.join(sample[self.analysistype].outputdir, 'logout.txt')
sample[self.analysistype].logerr = os.path.join(sample[self.analysistype].outputdir, 'logerr.txt')
sample[self.analysistype].baitedfastq = os.path.join(sample[self.analysistype].outputdir,
'{}_targetMatches.fastq'.format(self.analysistype))
sample[self.analysistype].complete = False
|
[
"def",
"targets",
"(",
"self",
")",
":",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
":",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"setattr",
"(",
"sample",
",",
"self",
".",
"analysistype",
",",
"GenObject",
"(",
")",
")",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"runanalysis",
"=",
"True",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"targetpath",
"=",
"self",
".",
"targetpath",
"baitpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"targetpath",
",",
"'bait'",
")",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"baitfile",
"=",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"baitpath",
",",
"'*.fa'",
")",
")",
"[",
"0",
"]",
"try",
":",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"outputdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
".",
"run",
".",
"outputdirectory",
",",
"self",
".",
"analysistype",
")",
"except",
"AttributeError",
":",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"outputdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
".",
"general",
".",
"outputdirectory",
",",
"self",
".",
"analysistype",
")",
"sample",
".",
"run",
".",
"outputdirectory",
"=",
"sample",
".",
"general",
".",
"outputdirectory",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"logout",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"outputdir",
",",
"'logout.txt'",
")",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"logerr",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"outputdir",
",",
"'logerr.txt'",
")",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"baitedfastq",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"outputdir",
",",
"'{}_targetMatches.fastq'",
".",
"format",
"(",
"self",
".",
"analysistype",
")",
")",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"complete",
"=",
"False"
] |
Create the GenObject for the analysis type, create the hash file for baiting (if necessary)
|
[
"Create",
"the",
"GenObject",
"for",
"the",
"analysis",
"type",
"create",
"the",
"hash",
"file",
"for",
"baiting",
"(",
"if",
"necessary",
")"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sixteenS/sixteens_full.py#L40-L61
|
OLC-Bioinformatics/sipprverse
|
sixteenS/sixteens_full.py
|
SixteenSSipper.targets
|
def targets(self):
"""
Using the data from the BLAST analyses, set the targets folder, and create the 'mapping file'. This is the
genera-specific FASTA file that will be used for all the reference mapping; it replaces the 'bait file' in the
code
"""
logging.info('Performing analysis with {} targets folder'.format(self.analysistype))
for sample in self.runmetadata:
if sample.general.bestassemblyfile != 'NA':
sample[self.analysistype].targetpath = \
os.path.join(self.targetpath, 'genera', sample[self.analysistype].genus, '')
# There is a relatively strict databasing scheme necessary for the custom targets. Eventually,
# there will be a helper script to combine individual files into a properly formatted combined file
try:
sample[self.analysistype].mappingfile = glob('{}*.fa'
.format(sample[self.analysistype].targetpath))[0]
# If the fasta file is missing, raise a custom error
except IndexError as e:
# noinspection PyPropertyAccess
e.args = ['Cannot find the combined fasta file in {}. Please note that the file must have a '
'.fasta extension'.format(sample[self.analysistype].targetpath)]
if os.path.isdir(sample[self.analysistype].targetpath):
raise
else:
sample.general.bestassemblyfile = 'NA'
|
python
|
def targets(self):
"""
Using the data from the BLAST analyses, set the targets folder, and create the 'mapping file'. This is the
genera-specific FASTA file that will be used for all the reference mapping; it replaces the 'bait file' in the
code
"""
logging.info('Performing analysis with {} targets folder'.format(self.analysistype))
for sample in self.runmetadata:
if sample.general.bestassemblyfile != 'NA':
sample[self.analysistype].targetpath = \
os.path.join(self.targetpath, 'genera', sample[self.analysistype].genus, '')
# There is a relatively strict databasing scheme necessary for the custom targets. Eventually,
# there will be a helper script to combine individual files into a properly formatted combined file
try:
sample[self.analysistype].mappingfile = glob('{}*.fa'
.format(sample[self.analysistype].targetpath))[0]
# If the fasta file is missing, raise a custom error
except IndexError as e:
# noinspection PyPropertyAccess
e.args = ['Cannot find the combined fasta file in {}. Please note that the file must have a '
'.fasta extension'.format(sample[self.analysistype].targetpath)]
if os.path.isdir(sample[self.analysistype].targetpath):
raise
else:
sample.general.bestassemblyfile = 'NA'
|
[
"def",
"targets",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Performing analysis with {} targets folder'",
".",
"format",
"(",
"self",
".",
"analysistype",
")",
")",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
":",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"targetpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"targetpath",
",",
"'genera'",
",",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"genus",
",",
"''",
")",
"# There is a relatively strict databasing scheme necessary for the custom targets. Eventually,",
"# there will be a helper script to combine individual files into a properly formatted combined file",
"try",
":",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"mappingfile",
"=",
"glob",
"(",
"'{}*.fa'",
".",
"format",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"targetpath",
")",
")",
"[",
"0",
"]",
"# If the fasta file is missing, raise a custom error",
"except",
"IndexError",
"as",
"e",
":",
"# noinspection PyPropertyAccess",
"e",
".",
"args",
"=",
"[",
"'Cannot find the combined fasta file in {}. Please note that the file must have a '",
"'.fasta extension'",
".",
"format",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"targetpath",
")",
"]",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"targetpath",
")",
":",
"raise",
"else",
":",
"sample",
".",
"general",
".",
"bestassemblyfile",
"=",
"'NA'"
] |
Using the data from the BLAST analyses, set the targets folder, and create the 'mapping file'. This is the
genera-specific FASTA file that will be used for all the reference mapping; it replaces the 'bait file' in the
code
|
[
"Using",
"the",
"data",
"from",
"the",
"BLAST",
"analyses",
"set",
"the",
"targets",
"folder",
"and",
"create",
"the",
"mapping",
"file",
".",
"This",
"is",
"the",
"genera",
"-",
"specific",
"FASTA",
"file",
"that",
"will",
"be",
"used",
"for",
"all",
"the",
"reference",
"mapping",
";",
"it",
"replaces",
"the",
"bait",
"file",
"in",
"the",
"code"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sixteenS/sixteens_full.py#L82-L106
|
OLC-Bioinformatics/sipprverse
|
sixteenS/sixteens_full.py
|
SixteenS.runner
|
def runner(self):
"""
Run the necessary methods in the correct order
"""
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
if not self.pipeline:
# If the metadata has been passed from the method script, self.pipeline must still be false in order to
# get Sippr() to function correctly, but the metadata shouldn't be recreated
try:
_ = vars(self.runmetadata)['samples']
except AttributeError:
# Create the objects to be used in the analyses
objects = Objectprep(self)
objects.objectprep()
self.runmetadata = objects.samples
else:
for sample in self.runmetadata.samples:
setattr(sample, self.analysistype, GenObject())
sample.run.outputdirectory = sample.general.outputdirectory
self.threads = int(self.cpus / len(self.runmetadata.samples)) \
if self.cpus / len(self.runmetadata.samples) > 1 \
else 1
# Use a custom sippr method to use the full reference database as bait, and run mirabait against the FASTQ
# reads - do not perform reference mapping yet
SixteenSBait(self, self.cutoff)
# Subsample 1000 reads from the FASTQ files
self.subsample()
# Convert the subsampled FASTQ files to FASTA format
self.fasta()
# Create BLAST databases if required
self.makeblastdb()
# Run BLAST analyses of the subsampled FASTA files against the NCBI 16S reference database
self.blast()
# Parse the BLAST results
self.blastparse()
# Feed the BLAST results into a modified sippr method to perform reference mapping using the calculated
# genus of the sample as the mapping file
SixteenSSipper(self, self.cutoff)
# Create reports
self.reporter()
|
python
|
def runner(self):
"""
Run the necessary methods in the correct order
"""
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
if not self.pipeline:
# If the metadata has been passed from the method script, self.pipeline must still be false in order to
# get Sippr() to function correctly, but the metadata shouldn't be recreated
try:
_ = vars(self.runmetadata)['samples']
except AttributeError:
# Create the objects to be used in the analyses
objects = Objectprep(self)
objects.objectprep()
self.runmetadata = objects.samples
else:
for sample in self.runmetadata.samples:
setattr(sample, self.analysistype, GenObject())
sample.run.outputdirectory = sample.general.outputdirectory
self.threads = int(self.cpus / len(self.runmetadata.samples)) \
if self.cpus / len(self.runmetadata.samples) > 1 \
else 1
# Use a custom sippr method to use the full reference database as bait, and run mirabait against the FASTQ
# reads - do not perform reference mapping yet
SixteenSBait(self, self.cutoff)
# Subsample 1000 reads from the FASTQ files
self.subsample()
# Convert the subsampled FASTQ files to FASTA format
self.fasta()
# Create BLAST databases if required
self.makeblastdb()
# Run BLAST analyses of the subsampled FASTA files against the NCBI 16S reference database
self.blast()
# Parse the BLAST results
self.blastparse()
# Feed the BLAST results into a modified sippr method to perform reference mapping using the calculated
# genus of the sample as the mapping file
SixteenSSipper(self, self.cutoff)
# Create reports
self.reporter()
|
[
"def",
"runner",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Starting {} analysis pipeline'",
".",
"format",
"(",
"self",
".",
"analysistype",
")",
")",
"if",
"not",
"self",
".",
"pipeline",
":",
"# If the metadata has been passed from the method script, self.pipeline must still be false in order to",
"# get Sippr() to function correctly, but the metadata shouldn't be recreated",
"try",
":",
"_",
"=",
"vars",
"(",
"self",
".",
"runmetadata",
")",
"[",
"'samples'",
"]",
"except",
"AttributeError",
":",
"# Create the objects to be used in the analyses",
"objects",
"=",
"Objectprep",
"(",
"self",
")",
"objects",
".",
"objectprep",
"(",
")",
"self",
".",
"runmetadata",
"=",
"objects",
".",
"samples",
"else",
":",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"setattr",
"(",
"sample",
",",
"self",
".",
"analysistype",
",",
"GenObject",
"(",
")",
")",
"sample",
".",
"run",
".",
"outputdirectory",
"=",
"sample",
".",
"general",
".",
"outputdirectory",
"self",
".",
"threads",
"=",
"int",
"(",
"self",
".",
"cpus",
"/",
"len",
"(",
"self",
".",
"runmetadata",
".",
"samples",
")",
")",
"if",
"self",
".",
"cpus",
"/",
"len",
"(",
"self",
".",
"runmetadata",
".",
"samples",
")",
">",
"1",
"else",
"1",
"# Use a custom sippr method to use the full reference database as bait, and run mirabait against the FASTQ",
"# reads - do not perform reference mapping yet",
"SixteenSBait",
"(",
"self",
",",
"self",
".",
"cutoff",
")",
"# Subsample 1000 reads from the FASTQ files",
"self",
".",
"subsample",
"(",
")",
"# Convert the subsampled FASTQ files to FASTA format",
"self",
".",
"fasta",
"(",
")",
"# Create BLAST databases if required",
"self",
".",
"makeblastdb",
"(",
")",
"# Run BLAST analyses of the subsampled FASTA files against the NCBI 16S reference database",
"self",
".",
"blast",
"(",
")",
"# Parse the BLAST results",
"self",
".",
"blastparse",
"(",
")",
"# Feed the BLAST results into a modified sippr method to perform reference mapping using the calculated",
"# genus of the sample as the mapping file",
"SixteenSSipper",
"(",
"self",
",",
"self",
".",
"cutoff",
")",
"# Create reports",
"self",
".",
"reporter",
"(",
")"
] |
Run the necessary methods in the correct order
|
[
"Run",
"the",
"necessary",
"methods",
"in",
"the",
"correct",
"order"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sixteenS/sixteens_full.py#L111-L151
|
OLC-Bioinformatics/sipprverse
|
sixteenS/sixteens_full.py
|
SixteenS.subsample
|
def subsample(self):
"""
Subsample 1000 reads from the baited files
"""
# Create the threads for the analysis
logging.info('Subsampling FASTQ reads')
for _ in range(self.cpus):
threads = Thread(target=self.subsamplethreads, args=())
threads.setDaemon(True)
threads.start()
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA':
# Set the name of the subsampled FASTQ file
sample[self.analysistype].subsampledfastq = \
os.path.splitext(sample[self.analysistype].baitedfastq)[0] + '_subsampled.fastq'
# Set the system call
sample[self.analysistype].seqtkcall = 'reformat.sh in={} out={} samplereadstarget=1000'\
.format(sample[self.analysistype].baitedfastq,
sample[self.analysistype].subsampledfastq)
# Add the sample to the queue
self.samplequeue.put(sample)
self.samplequeue.join()
|
python
|
def subsample(self):
"""
Subsample 1000 reads from the baited files
"""
# Create the threads for the analysis
logging.info('Subsampling FASTQ reads')
for _ in range(self.cpus):
threads = Thread(target=self.subsamplethreads, args=())
threads.setDaemon(True)
threads.start()
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA':
# Set the name of the subsampled FASTQ file
sample[self.analysistype].subsampledfastq = \
os.path.splitext(sample[self.analysistype].baitedfastq)[0] + '_subsampled.fastq'
# Set the system call
sample[self.analysistype].seqtkcall = 'reformat.sh in={} out={} samplereadstarget=1000'\
.format(sample[self.analysistype].baitedfastq,
sample[self.analysistype].subsampledfastq)
# Add the sample to the queue
self.samplequeue.put(sample)
self.samplequeue.join()
|
[
"def",
"subsample",
"(",
"self",
")",
":",
"# Create the threads for the analysis",
"logging",
".",
"info",
"(",
"'Subsampling FASTQ reads'",
")",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"cpus",
")",
":",
"threads",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"subsamplethreads",
",",
"args",
"=",
"(",
")",
")",
"threads",
".",
"setDaemon",
"(",
"True",
")",
"threads",
".",
"start",
"(",
")",
"with",
"progressbar",
"(",
"self",
".",
"runmetadata",
".",
"samples",
")",
"as",
"bar",
":",
"for",
"sample",
"in",
"bar",
":",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"# Set the name of the subsampled FASTQ file",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"subsampledfastq",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"baitedfastq",
")",
"[",
"0",
"]",
"+",
"'_subsampled.fastq'",
"# Set the system call",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"seqtkcall",
"=",
"'reformat.sh in={} out={} samplereadstarget=1000'",
".",
"format",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"baitedfastq",
",",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"subsampledfastq",
")",
"# Add the sample to the queue",
"self",
".",
"samplequeue",
".",
"put",
"(",
"sample",
")",
"self",
".",
"samplequeue",
".",
"join",
"(",
")"
] |
Subsample 1000 reads from the baited files
|
[
"Subsample",
"1000",
"reads",
"from",
"the",
"baited",
"files"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sixteenS/sixteens_full.py#L153-L175
|
OLC-Bioinformatics/sipprverse
|
sixteenS/sixteens_full.py
|
SixteenS.fasta
|
def fasta(self):
"""
Convert the subsampled reads to FASTA format using reformat.sh
"""
logging.info('Converting FASTQ files to FASTA format')
# Create the threads for the analysis
for _ in range(self.cpus):
threads = Thread(target=self.fastathreads, args=())
threads.setDaemon(True)
threads.start()
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA':
# Set the name as the FASTA file - the same as the FASTQ, but with .fa file extension
sample[self.analysistype].fasta = \
os.path.splitext(sample[self.analysistype].subsampledfastq)[0] + '.fa'
# Set the system call
sample[self.analysistype].reformatcall = 'reformat.sh in={fastq} out={fasta}'\
.format(fastq=sample[self.analysistype].subsampledfastq,
fasta=sample[self.analysistype].fasta)
# Add the sample to the queue
self.fastaqueue.put(sample)
self.fastaqueue.join()
|
python
|
def fasta(self):
"""
Convert the subsampled reads to FASTA format using reformat.sh
"""
logging.info('Converting FASTQ files to FASTA format')
# Create the threads for the analysis
for _ in range(self.cpus):
threads = Thread(target=self.fastathreads, args=())
threads.setDaemon(True)
threads.start()
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA':
# Set the name as the FASTA file - the same as the FASTQ, but with .fa file extension
sample[self.analysistype].fasta = \
os.path.splitext(sample[self.analysistype].subsampledfastq)[0] + '.fa'
# Set the system call
sample[self.analysistype].reformatcall = 'reformat.sh in={fastq} out={fasta}'\
.format(fastq=sample[self.analysistype].subsampledfastq,
fasta=sample[self.analysistype].fasta)
# Add the sample to the queue
self.fastaqueue.put(sample)
self.fastaqueue.join()
|
[
"def",
"fasta",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Converting FASTQ files to FASTA format'",
")",
"# Create the threads for the analysis",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"cpus",
")",
":",
"threads",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"fastathreads",
",",
"args",
"=",
"(",
")",
")",
"threads",
".",
"setDaemon",
"(",
"True",
")",
"threads",
".",
"start",
"(",
")",
"with",
"progressbar",
"(",
"self",
".",
"runmetadata",
".",
"samples",
")",
"as",
"bar",
":",
"for",
"sample",
"in",
"bar",
":",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"# Set the name as the FASTA file - the same as the FASTQ, but with .fa file extension",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"fasta",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"subsampledfastq",
")",
"[",
"0",
"]",
"+",
"'.fa'",
"# Set the system call",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"reformatcall",
"=",
"'reformat.sh in={fastq} out={fasta}'",
".",
"format",
"(",
"fastq",
"=",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"subsampledfastq",
",",
"fasta",
"=",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"fasta",
")",
"# Add the sample to the queue",
"self",
".",
"fastaqueue",
".",
"put",
"(",
"sample",
")",
"self",
".",
"fastaqueue",
".",
"join",
"(",
")"
] |
Convert the subsampled reads to FASTA format using reformat.sh
|
[
"Convert",
"the",
"subsampled",
"reads",
"to",
"FASTA",
"format",
"using",
"reformat",
".",
"sh"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sixteenS/sixteens_full.py#L194-L216
|
OLC-Bioinformatics/sipprverse
|
sixteenS/sixteens_full.py
|
SixteenS.makeblastdb
|
def makeblastdb(self):
"""
Makes blast database files from targets as necessary
"""
# Iterate through the samples to set the bait file.
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
# Remove the file extension
db = os.path.splitext(sample[self.analysistype].baitfile)[0]
# Add '.nhr' for searching below
nhr = '{}.nhr'.format(db)
# Check for already existing database files
if not os.path.isfile(str(nhr)):
# Create the databases
command = 'makeblastdb -in {} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {}'\
.format(sample[self.analysistype].baitfile, db)
out, err = run_subprocess(command)
write_to_logfile(command,
command,
self.logfile, sample.general.logout, sample.general.logerr,
sample[self.analysistype].logout, sample[self.analysistype].logerr)
write_to_logfile(out,
err,
self.logfile, sample.general.logout, sample.general.logerr,
sample[self.analysistype].logout, sample[self.analysistype].logerr)
|
python
|
def makeblastdb(self):
"""
Makes blast database files from targets as necessary
"""
# Iterate through the samples to set the bait file.
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
# Remove the file extension
db = os.path.splitext(sample[self.analysistype].baitfile)[0]
# Add '.nhr' for searching below
nhr = '{}.nhr'.format(db)
# Check for already existing database files
if not os.path.isfile(str(nhr)):
# Create the databases
command = 'makeblastdb -in {} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {}'\
.format(sample[self.analysistype].baitfile, db)
out, err = run_subprocess(command)
write_to_logfile(command,
command,
self.logfile, sample.general.logout, sample.general.logerr,
sample[self.analysistype].logout, sample[self.analysistype].logerr)
write_to_logfile(out,
err,
self.logfile, sample.general.logout, sample.general.logerr,
sample[self.analysistype].logout, sample[self.analysistype].logerr)
|
[
"def",
"makeblastdb",
"(",
"self",
")",
":",
"# Iterate through the samples to set the bait file.",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"# Remove the file extension",
"db",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"baitfile",
")",
"[",
"0",
"]",
"# Add '.nhr' for searching below",
"nhr",
"=",
"'{}.nhr'",
".",
"format",
"(",
"db",
")",
"# Check for already existing database files",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"str",
"(",
"nhr",
")",
")",
":",
"# Create the databases",
"command",
"=",
"'makeblastdb -in {} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {}'",
".",
"format",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"baitfile",
",",
"db",
")",
"out",
",",
"err",
"=",
"run_subprocess",
"(",
"command",
")",
"write_to_logfile",
"(",
"command",
",",
"command",
",",
"self",
".",
"logfile",
",",
"sample",
".",
"general",
".",
"logout",
",",
"sample",
".",
"general",
".",
"logerr",
",",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"logout",
",",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"logerr",
")",
"write_to_logfile",
"(",
"out",
",",
"err",
",",
"self",
".",
"logfile",
",",
"sample",
".",
"general",
".",
"logout",
",",
"sample",
".",
"general",
".",
"logerr",
",",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"logout",
",",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"logerr",
")"
] |
Makes blast database files from targets as necessary
|
[
"Makes",
"blast",
"database",
"files",
"from",
"targets",
"as",
"necessary"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sixteenS/sixteens_full.py#L235-L259
|
OLC-Bioinformatics/sipprverse
|
sixteenS/sixteens_full.py
|
SixteenS.blast
|
def blast(self):
"""
Run BLAST analyses of the subsampled FASTQ reads against the NCBI 16S reference database
"""
logging.info('BLASTing FASTA files against {} database'.format(self.analysistype))
for _ in range(self.cpus):
threads = Thread(target=self.blastthreads, args=())
threads.setDaemon(True)
threads.start()
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA':
# Set the name of the BLAST report
sample[self.analysistype].blastreport = os.path.join(
sample[self.analysistype].outputdir,
'{}_{}_blastresults.csv'.format(sample.name, self.analysistype))
# Use the NCBI BLASTn command line wrapper module from BioPython to set the parameters of the search
blastn = NcbiblastnCommandline(query=sample[self.analysistype].fasta,
db=os.path.splitext(sample[self.analysistype].baitfile)[0],
max_target_seqs=1,
num_threads=self.threads,
outfmt="'6 qseqid sseqid positive mismatch gaps evalue "
"bitscore slen length qstart qend qseq sstart send sseq'",
out=sample[self.analysistype].blastreport)
# Add a string of the command to the metadata object
sample[self.analysistype].blastcall = str(blastn)
# Add the object and the command to the BLAST queue
self.blastqueue.put((sample, blastn))
self.blastqueue.join()
|
python
|
def blast(self):
"""
Run BLAST analyses of the subsampled FASTQ reads against the NCBI 16S reference database
"""
logging.info('BLASTing FASTA files against {} database'.format(self.analysistype))
for _ in range(self.cpus):
threads = Thread(target=self.blastthreads, args=())
threads.setDaemon(True)
threads.start()
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA':
# Set the name of the BLAST report
sample[self.analysistype].blastreport = os.path.join(
sample[self.analysistype].outputdir,
'{}_{}_blastresults.csv'.format(sample.name, self.analysistype))
# Use the NCBI BLASTn command line wrapper module from BioPython to set the parameters of the search
blastn = NcbiblastnCommandline(query=sample[self.analysistype].fasta,
db=os.path.splitext(sample[self.analysistype].baitfile)[0],
max_target_seqs=1,
num_threads=self.threads,
outfmt="'6 qseqid sseqid positive mismatch gaps evalue "
"bitscore slen length qstart qend qseq sstart send sseq'",
out=sample[self.analysistype].blastreport)
# Add a string of the command to the metadata object
sample[self.analysistype].blastcall = str(blastn)
# Add the object and the command to the BLAST queue
self.blastqueue.put((sample, blastn))
self.blastqueue.join()
|
[
"def",
"blast",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'BLASTing FASTA files against {} database'",
".",
"format",
"(",
"self",
".",
"analysistype",
")",
")",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"cpus",
")",
":",
"threads",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"blastthreads",
",",
"args",
"=",
"(",
")",
")",
"threads",
".",
"setDaemon",
"(",
"True",
")",
"threads",
".",
"start",
"(",
")",
"with",
"progressbar",
"(",
"self",
".",
"runmetadata",
".",
"samples",
")",
"as",
"bar",
":",
"for",
"sample",
"in",
"bar",
":",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"# Set the name of the BLAST report",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"blastreport",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"outputdir",
",",
"'{}_{}_blastresults.csv'",
".",
"format",
"(",
"sample",
".",
"name",
",",
"self",
".",
"analysistype",
")",
")",
"# Use the NCBI BLASTn command line wrapper module from BioPython to set the parameters of the search",
"blastn",
"=",
"NcbiblastnCommandline",
"(",
"query",
"=",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"fasta",
",",
"db",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"baitfile",
")",
"[",
"0",
"]",
",",
"max_target_seqs",
"=",
"1",
",",
"num_threads",
"=",
"self",
".",
"threads",
",",
"outfmt",
"=",
"\"'6 qseqid sseqid positive mismatch gaps evalue \"",
"\"bitscore slen length qstart qend qseq sstart send sseq'\"",
",",
"out",
"=",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"blastreport",
")",
"# Add a string of the command to the metadata object",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"blastcall",
"=",
"str",
"(",
"blastn",
")",
"# Add the object and the command to the BLAST queue",
"self",
".",
"blastqueue",
".",
"put",
"(",
"(",
"sample",
",",
"blastn",
")",
")",
"self",
".",
"blastqueue",
".",
"join",
"(",
")"
] |
Run BLAST analyses of the subsampled FASTQ reads against the NCBI 16S reference database
|
[
"Run",
"BLAST",
"analyses",
"of",
"the",
"subsampled",
"FASTQ",
"reads",
"against",
"the",
"NCBI",
"16S",
"reference",
"database"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sixteenS/sixteens_full.py#L261-L289
|
OLC-Bioinformatics/sipprverse
|
sixteenS/sixteens_full.py
|
SixteenS.blastparse
|
def blastparse(self):
"""
Parse the blast results, and store necessary data in dictionaries in sample object
"""
logging.info('Parsing BLAST results')
# Load the NCBI 16S reference database as a dictionary
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
# Load the NCBI 16S reference database as a dictionary
dbrecords = SeqIO.to_dict(SeqIO.parse(sample[self.analysistype].baitfile, 'fasta'))
# Allow for no BLAST results
if os.path.isfile(sample[self.analysistype].blastreport):
# Initialise a dictionary to store the number of times a genus is the best hit
sample[self.analysistype].frequency = dict()
# Open the sequence profile file as a dictionary
blastdict = DictReader(open(sample[self.analysistype].blastreport),
fieldnames=self.fieldnames, dialect='excel-tab')
recorddict = dict()
for record in blastdict:
# Create the subject id. It will look like this: gi|1018196593|ref|NR_136472.1|
subject = record['subject_id']
# Extract the genus name. Use the subject id as a key in the dictionary of the reference db.
# It will return the full record e.g. gi|1018196593|ref|NR_136472.1| Escherichia marmotae
# strain HT073016 16S ribosomal RNA, partial sequence
# This full description can be manipulated to extract the genus e.g. Escherichia
genus = dbrecords[subject].description.split('|')[-1].split()[0]
# Increment the number of times this genus was found, or initialise the dictionary with this
# genus the first time it is seen
try:
sample[self.analysistype].frequency[genus] += 1
except KeyError:
sample[self.analysistype].frequency[genus] = 1
try:
recorddict[dbrecords[subject].description] += 1
except KeyError:
recorddict[dbrecords[subject].description] = 1
# Sort the dictionary based on the number of times a genus is seen
sample[self.analysistype].sortedgenera = sorted(sample[self.analysistype].frequency.items(),
key=operator.itemgetter(1), reverse=True)
try:
# Extract the top result, and set it as the genus of the sample
sample[self.analysistype].genus = sample[self.analysistype].sortedgenera[0][0]
# Previous code relies on having the closest refseq genus, so set this as above
# sample.general.closestrefseqgenus = sample[self.analysistype].genus
except IndexError:
# Populate attributes with 'NA'
sample[self.analysistype].sortedgenera = 'NA'
sample[self.analysistype].genus = 'NA'
# sample.general.closestrefseqgenus = 'NA'
else:
# Populate attributes with 'NA'
sample[self.analysistype].sortedgenera = 'NA'
sample[self.analysistype].genus = 'NA'
# sample.general.closestrefseqgenus = 'NA'
else:
# Populate attributes with 'NA'
sample[self.analysistype].sortedgenera = 'NA'
sample[self.analysistype].genus = 'NA'
|
python
|
def blastparse(self):
"""
Parse the blast results, and store necessary data in dictionaries in sample object
"""
logging.info('Parsing BLAST results')
# Load the NCBI 16S reference database as a dictionary
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
# Load the NCBI 16S reference database as a dictionary
dbrecords = SeqIO.to_dict(SeqIO.parse(sample[self.analysistype].baitfile, 'fasta'))
# Allow for no BLAST results
if os.path.isfile(sample[self.analysistype].blastreport):
# Initialise a dictionary to store the number of times a genus is the best hit
sample[self.analysistype].frequency = dict()
# Open the sequence profile file as a dictionary
blastdict = DictReader(open(sample[self.analysistype].blastreport),
fieldnames=self.fieldnames, dialect='excel-tab')
recorddict = dict()
for record in blastdict:
# Create the subject id. It will look like this: gi|1018196593|ref|NR_136472.1|
subject = record['subject_id']
# Extract the genus name. Use the subject id as a key in the dictionary of the reference db.
# It will return the full record e.g. gi|1018196593|ref|NR_136472.1| Escherichia marmotae
# strain HT073016 16S ribosomal RNA, partial sequence
# This full description can be manipulated to extract the genus e.g. Escherichia
genus = dbrecords[subject].description.split('|')[-1].split()[0]
# Increment the number of times this genus was found, or initialise the dictionary with this
# genus the first time it is seen
try:
sample[self.analysistype].frequency[genus] += 1
except KeyError:
sample[self.analysistype].frequency[genus] = 1
try:
recorddict[dbrecords[subject].description] += 1
except KeyError:
recorddict[dbrecords[subject].description] = 1
# Sort the dictionary based on the number of times a genus is seen
sample[self.analysistype].sortedgenera = sorted(sample[self.analysistype].frequency.items(),
key=operator.itemgetter(1), reverse=True)
try:
# Extract the top result, and set it as the genus of the sample
sample[self.analysistype].genus = sample[self.analysistype].sortedgenera[0][0]
# Previous code relies on having the closest refseq genus, so set this as above
# sample.general.closestrefseqgenus = sample[self.analysistype].genus
except IndexError:
# Populate attributes with 'NA'
sample[self.analysistype].sortedgenera = 'NA'
sample[self.analysistype].genus = 'NA'
# sample.general.closestrefseqgenus = 'NA'
else:
# Populate attributes with 'NA'
sample[self.analysistype].sortedgenera = 'NA'
sample[self.analysistype].genus = 'NA'
# sample.general.closestrefseqgenus = 'NA'
else:
# Populate attributes with 'NA'
sample[self.analysistype].sortedgenera = 'NA'
sample[self.analysistype].genus = 'NA'
|
[
"def",
"blastparse",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Parsing BLAST results'",
")",
"# Load the NCBI 16S reference database as a dictionary",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"# Load the NCBI 16S reference database as a dictionary",
"dbrecords",
"=",
"SeqIO",
".",
"to_dict",
"(",
"SeqIO",
".",
"parse",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"baitfile",
",",
"'fasta'",
")",
")",
"# Allow for no BLAST results",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"blastreport",
")",
":",
"# Initialise a dictionary to store the number of times a genus is the best hit",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"frequency",
"=",
"dict",
"(",
")",
"# Open the sequence profile file as a dictionary",
"blastdict",
"=",
"DictReader",
"(",
"open",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"blastreport",
")",
",",
"fieldnames",
"=",
"self",
".",
"fieldnames",
",",
"dialect",
"=",
"'excel-tab'",
")",
"recorddict",
"=",
"dict",
"(",
")",
"for",
"record",
"in",
"blastdict",
":",
"# Create the subject id. It will look like this: gi|1018196593|ref|NR_136472.1|",
"subject",
"=",
"record",
"[",
"'subject_id'",
"]",
"# Extract the genus name. Use the subject id as a key in the dictionary of the reference db.",
"# It will return the full record e.g. gi|1018196593|ref|NR_136472.1| Escherichia marmotae",
"# strain HT073016 16S ribosomal RNA, partial sequence",
"# This full description can be manipulated to extract the genus e.g. Escherichia",
"genus",
"=",
"dbrecords",
"[",
"subject",
"]",
".",
"description",
".",
"split",
"(",
"'|'",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
"# Increment the number of times this genus was found, or initialise the dictionary with this",
"# genus the first time it is seen",
"try",
":",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"frequency",
"[",
"genus",
"]",
"+=",
"1",
"except",
"KeyError",
":",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"frequency",
"[",
"genus",
"]",
"=",
"1",
"try",
":",
"recorddict",
"[",
"dbrecords",
"[",
"subject",
"]",
".",
"description",
"]",
"+=",
"1",
"except",
"KeyError",
":",
"recorddict",
"[",
"dbrecords",
"[",
"subject",
"]",
".",
"description",
"]",
"=",
"1",
"# Sort the dictionary based on the number of times a genus is seen",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"sortedgenera",
"=",
"sorted",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"frequency",
".",
"items",
"(",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"1",
")",
",",
"reverse",
"=",
"True",
")",
"try",
":",
"# Extract the top result, and set it as the genus of the sample",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"genus",
"=",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"sortedgenera",
"[",
"0",
"]",
"[",
"0",
"]",
"# Previous code relies on having the closest refseq genus, so set this as above",
"# sample.general.closestrefseqgenus = sample[self.analysistype].genus",
"except",
"IndexError",
":",
"# Populate attributes with 'NA'",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"sortedgenera",
"=",
"'NA'",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"genus",
"=",
"'NA'",
"# sample.general.closestrefseqgenus = 'NA'",
"else",
":",
"# Populate attributes with 'NA'",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"sortedgenera",
"=",
"'NA'",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"genus",
"=",
"'NA'",
"# sample.general.closestrefseqgenus = 'NA'",
"else",
":",
"# Populate attributes with 'NA'",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"sortedgenera",
"=",
"'NA'",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"genus",
"=",
"'NA'"
] |
Parse the blast results, and store necessary data in dictionaries in sample object
|
[
"Parse",
"the",
"blast",
"results",
"and",
"store",
"necessary",
"data",
"in",
"dictionaries",
"in",
"sample",
"object"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sixteenS/sixteens_full.py#L304-L361
|
OLC-Bioinformatics/sipprverse
|
sixteenS/sixteens_full.py
|
SixteenS.reporter
|
def reporter(self):
"""
Creates a report of the results
"""
# Create the path in which the reports are stored
make_path(self.reportpath)
logging.info('Creating {} report'.format(self.analysistype))
# Initialise the header and data strings
header = 'Strain,Gene,PercentIdentity,Genus,FoldCoverage\n'
data = ''
with open(self.sixteens_report, 'w') as report:
with open(os.path.join(self.reportpath, self.analysistype + '_sequences.fa'), 'w') as sequences:
for sample in self.runmetadata.samples:
# Initialise
sample[self.analysistype].sixteens_match = 'NA'
sample[self.analysistype].species = 'NA'
try:
# Select the best hit of all the full-length 16S genes mapped - for 16S use the hit with the
# fewest number of SNPs rather than the highest percent identity
sample[self.analysistype].besthit = sorted(sample[self.analysistype].resultssnp.items(),
key=operator.itemgetter(1))[0][0]
# Parse the baited FASTA file to pull out the the description of the hit
for record in SeqIO.parse(sample[self.analysistype].baitfile, 'fasta'):
# If the best hit e.g. gi|631251361|ref|NR_112558.1| is present in the current record,
# gi|631251361|ref|NR_112558.1| Escherichia coli strain JCM 1649 16S ribosomal RNA ...,
# extract the match and the species
if sample[self.analysistype].besthit in record.id:
# Set the best match and species from the records
sample[self.analysistype].sixteens_match = record.description.split(' 16S')[0]
sample[self.analysistype].species = \
sample[self.analysistype].sixteens_match.split('|')[-1].split()[1]
# Add the sample name to the data string
data += sample.name + ','
# Find the record that matches the best hit, and extract the necessary values to be place in the
# data string
for name, identity in sample[self.analysistype].results.items():
if name == sample[self.analysistype].besthit:
data += '{},{},{},{}\n'.format(name, identity, sample[self.analysistype].genus,
sample[self.analysistype].avgdepth[name])
# Create a FASTA-formatted sequence output of the 16S sequence
record = SeqRecord(Seq(sample[self.analysistype].sequences[name],
IUPAC.unambiguous_dna),
id='{}_{}'.format(sample.name, '16S'),
description='')
SeqIO.write(record, sequences, 'fasta')
except (AttributeError, IndexError):
data += '{}\n'.format(sample.name)
# Write the results to the report
report.write(header)
report.write(data)
|
python
|
def reporter(self):
"""
Creates a report of the results
"""
# Create the path in which the reports are stored
make_path(self.reportpath)
logging.info('Creating {} report'.format(self.analysistype))
# Initialise the header and data strings
header = 'Strain,Gene,PercentIdentity,Genus,FoldCoverage\n'
data = ''
with open(self.sixteens_report, 'w') as report:
with open(os.path.join(self.reportpath, self.analysistype + '_sequences.fa'), 'w') as sequences:
for sample in self.runmetadata.samples:
# Initialise
sample[self.analysistype].sixteens_match = 'NA'
sample[self.analysistype].species = 'NA'
try:
# Select the best hit of all the full-length 16S genes mapped - for 16S use the hit with the
# fewest number of SNPs rather than the highest percent identity
sample[self.analysistype].besthit = sorted(sample[self.analysistype].resultssnp.items(),
key=operator.itemgetter(1))[0][0]
# Parse the baited FASTA file to pull out the the description of the hit
for record in SeqIO.parse(sample[self.analysistype].baitfile, 'fasta'):
# If the best hit e.g. gi|631251361|ref|NR_112558.1| is present in the current record,
# gi|631251361|ref|NR_112558.1| Escherichia coli strain JCM 1649 16S ribosomal RNA ...,
# extract the match and the species
if sample[self.analysistype].besthit in record.id:
# Set the best match and species from the records
sample[self.analysistype].sixteens_match = record.description.split(' 16S')[0]
sample[self.analysistype].species = \
sample[self.analysistype].sixteens_match.split('|')[-1].split()[1]
# Add the sample name to the data string
data += sample.name + ','
# Find the record that matches the best hit, and extract the necessary values to be place in the
# data string
for name, identity in sample[self.analysistype].results.items():
if name == sample[self.analysistype].besthit:
data += '{},{},{},{}\n'.format(name, identity, sample[self.analysistype].genus,
sample[self.analysistype].avgdepth[name])
# Create a FASTA-formatted sequence output of the 16S sequence
record = SeqRecord(Seq(sample[self.analysistype].sequences[name],
IUPAC.unambiguous_dna),
id='{}_{}'.format(sample.name, '16S'),
description='')
SeqIO.write(record, sequences, 'fasta')
except (AttributeError, IndexError):
data += '{}\n'.format(sample.name)
# Write the results to the report
report.write(header)
report.write(data)
|
[
"def",
"reporter",
"(",
"self",
")",
":",
"# Create the path in which the reports are stored",
"make_path",
"(",
"self",
".",
"reportpath",
")",
"logging",
".",
"info",
"(",
"'Creating {} report'",
".",
"format",
"(",
"self",
".",
"analysistype",
")",
")",
"# Initialise the header and data strings",
"header",
"=",
"'Strain,Gene,PercentIdentity,Genus,FoldCoverage\\n'",
"data",
"=",
"''",
"with",
"open",
"(",
"self",
".",
"sixteens_report",
",",
"'w'",
")",
"as",
"report",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"self",
".",
"analysistype",
"+",
"'_sequences.fa'",
")",
",",
"'w'",
")",
"as",
"sequences",
":",
"for",
"sample",
"in",
"self",
".",
"runmetadata",
".",
"samples",
":",
"# Initialise",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"sixteens_match",
"=",
"'NA'",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"species",
"=",
"'NA'",
"try",
":",
"# Select the best hit of all the full-length 16S genes mapped - for 16S use the hit with the",
"# fewest number of SNPs rather than the highest percent identity",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"besthit",
"=",
"sorted",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"resultssnp",
".",
"items",
"(",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"1",
")",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# Parse the baited FASTA file to pull out the the description of the hit",
"for",
"record",
"in",
"SeqIO",
".",
"parse",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"baitfile",
",",
"'fasta'",
")",
":",
"# If the best hit e.g. gi|631251361|ref|NR_112558.1| is present in the current record,",
"# gi|631251361|ref|NR_112558.1| Escherichia coli strain JCM 1649 16S ribosomal RNA ...,",
"# extract the match and the species",
"if",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"besthit",
"in",
"record",
".",
"id",
":",
"# Set the best match and species from the records",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"sixteens_match",
"=",
"record",
".",
"description",
".",
"split",
"(",
"' 16S'",
")",
"[",
"0",
"]",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"species",
"=",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"sixteens_match",
".",
"split",
"(",
"'|'",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
")",
"[",
"1",
"]",
"# Add the sample name to the data string",
"data",
"+=",
"sample",
".",
"name",
"+",
"','",
"# Find the record that matches the best hit, and extract the necessary values to be place in the",
"# data string",
"for",
"name",
",",
"identity",
"in",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"results",
".",
"items",
"(",
")",
":",
"if",
"name",
"==",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"besthit",
":",
"data",
"+=",
"'{},{},{},{}\\n'",
".",
"format",
"(",
"name",
",",
"identity",
",",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"genus",
",",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"avgdepth",
"[",
"name",
"]",
")",
"# Create a FASTA-formatted sequence output of the 16S sequence",
"record",
"=",
"SeqRecord",
"(",
"Seq",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"sequences",
"[",
"name",
"]",
",",
"IUPAC",
".",
"unambiguous_dna",
")",
",",
"id",
"=",
"'{}_{}'",
".",
"format",
"(",
"sample",
".",
"name",
",",
"'16S'",
")",
",",
"description",
"=",
"''",
")",
"SeqIO",
".",
"write",
"(",
"record",
",",
"sequences",
",",
"'fasta'",
")",
"except",
"(",
"AttributeError",
",",
"IndexError",
")",
":",
"data",
"+=",
"'{}\\n'",
".",
"format",
"(",
"sample",
".",
"name",
")",
"# Write the results to the report",
"report",
".",
"write",
"(",
"header",
")",
"report",
".",
"write",
"(",
"data",
")"
] |
Creates a report of the results
|
[
"Creates",
"a",
"report",
"of",
"the",
"results"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sixteenS/sixteens_full.py#L364-L413
|
QunarOPS/qg.core
|
qg/core/observer.py
|
Observable.add_listener
|
def add_listener(self, evt_name, fn):
"""添加观察者函数。
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数
.. note::
允许一个函数多次注册,多次注册意味着一次 :func:`fire_event` 多次调用。
"""
self._listeners.setdefault(evt_name, [])
listeners = self.__get_listeners(evt_name)
listeners.append(fn)
|
python
|
def add_listener(self, evt_name, fn):
"""添加观察者函数。
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数
.. note::
允许一个函数多次注册,多次注册意味着一次 :func:`fire_event` 多次调用。
"""
self._listeners.setdefault(evt_name, [])
listeners = self.__get_listeners(evt_name)
listeners.append(fn)
|
[
"def",
"add_listener",
"(",
"self",
",",
"evt_name",
",",
"fn",
")",
":",
"self",
".",
"_listeners",
".",
"setdefault",
"(",
"evt_name",
",",
"[",
"]",
")",
"listeners",
"=",
"self",
".",
"__get_listeners",
"(",
"evt_name",
")",
"listeners",
".",
"append",
"(",
"fn",
")"
] |
添加观察者函数。
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数
.. note::
允许一个函数多次注册,多次注册意味着一次 :func:`fire_event` 多次调用。
|
[
"添加观察者函数。"
] |
train
|
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/observer.py#L59-L70
|
QunarOPS/qg.core
|
qg/core/observer.py
|
Observable.remove_listener
|
def remove_listener(self, evt_name, fn, remove_all=False):
"""删除观察者函数。
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数
:params remove_all: 是否删除fn在evt_name中的所有注册\n
如果为 `True`,则删除所有\n
如果为 `False`,则按注册先后顺序删除第一个\n
.. note::
允许一个函数多次注册,多次注册意味着一次时间多次调用。
"""
listeners = self.__get_listeners(evt_name)
if not self.has_listener(evt_name, fn):
raise ObservableError(
"function %r does not exist in the %r event",
fn, evt_name)
if remove_all:
listeners[:] = [i for i in listeners if i != fn]
else:
listeners.remove(fn)
|
python
|
def remove_listener(self, evt_name, fn, remove_all=False):
"""删除观察者函数。
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数
:params remove_all: 是否删除fn在evt_name中的所有注册\n
如果为 `True`,则删除所有\n
如果为 `False`,则按注册先后顺序删除第一个\n
.. note::
允许一个函数多次注册,多次注册意味着一次时间多次调用。
"""
listeners = self.__get_listeners(evt_name)
if not self.has_listener(evt_name, fn):
raise ObservableError(
"function %r does not exist in the %r event",
fn, evt_name)
if remove_all:
listeners[:] = [i for i in listeners if i != fn]
else:
listeners.remove(fn)
|
[
"def",
"remove_listener",
"(",
"self",
",",
"evt_name",
",",
"fn",
",",
"remove_all",
"=",
"False",
")",
":",
"listeners",
"=",
"self",
".",
"__get_listeners",
"(",
"evt_name",
")",
"if",
"not",
"self",
".",
"has_listener",
"(",
"evt_name",
",",
"fn",
")",
":",
"raise",
"ObservableError",
"(",
"\"function %r does not exist in the %r event\"",
",",
"fn",
",",
"evt_name",
")",
"if",
"remove_all",
":",
"listeners",
"[",
":",
"]",
"=",
"[",
"i",
"for",
"i",
"in",
"listeners",
"if",
"i",
"!=",
"fn",
"]",
"else",
":",
"listeners",
".",
"remove",
"(",
"fn",
")"
] |
删除观察者函数。
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数
:params remove_all: 是否删除fn在evt_name中的所有注册\n
如果为 `True`,则删除所有\n
如果为 `False`,则按注册先后顺序删除第一个\n
.. note::
允许一个函数多次注册,多次注册意味着一次时间多次调用。
|
[
"删除观察者函数。"
] |
train
|
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/observer.py#L72-L92
|
QunarOPS/qg.core
|
qg/core/observer.py
|
Observable.has_listener
|
def has_listener(self, evt_name, fn):
"""指定listener是否存在
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数
"""
listeners = self.__get_listeners(evt_name)
return fn in listeners
|
python
|
def has_listener(self, evt_name, fn):
"""指定listener是否存在
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数
"""
listeners = self.__get_listeners(evt_name)
return fn in listeners
|
[
"def",
"has_listener",
"(",
"self",
",",
"evt_name",
",",
"fn",
")",
":",
"listeners",
"=",
"self",
".",
"__get_listeners",
"(",
"evt_name",
")",
"return",
"fn",
"in",
"listeners"
] |
指定listener是否存在
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数
|
[
"指定listener是否存在"
] |
train
|
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/observer.py#L94-L101
|
QunarOPS/qg.core
|
qg/core/observer.py
|
Observable.fire_event
|
def fire_event(self, evt_name, *args, **kwargs):
"""触发事件
:params evt_name: 事件名称
:params args: 给事件接受者的参数
:params kwargs: 给事件接受者的参数
"""
listeners = self.__get_listeners(evt_name)
evt = self.generate_event(evt_name)
for listener in listeners:
listener(evt, *args, **kwargs)
|
python
|
def fire_event(self, evt_name, *args, **kwargs):
"""触发事件
:params evt_name: 事件名称
:params args: 给事件接受者的参数
:params kwargs: 给事件接受者的参数
"""
listeners = self.__get_listeners(evt_name)
evt = self.generate_event(evt_name)
for listener in listeners:
listener(evt, *args, **kwargs)
|
[
"def",
"fire_event",
"(",
"self",
",",
"evt_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"listeners",
"=",
"self",
".",
"__get_listeners",
"(",
"evt_name",
")",
"evt",
"=",
"self",
".",
"generate_event",
"(",
"evt_name",
")",
"for",
"listener",
"in",
"listeners",
":",
"listener",
"(",
"evt",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
触发事件
:params evt_name: 事件名称
:params args: 给事件接受者的参数
:params kwargs: 给事件接受者的参数
|
[
"触发事件"
] |
train
|
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/observer.py#L118-L128
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/ec2.py
|
create_server_ec2
|
def create_server_ec2(connection,
region,
disk_name,
disk_size,
ami,
key_pair,
instance_type,
tags={},
security_groups=None,
delete_on_termination=True,
log=False,
wait_for_ssh_available=True):
"""
Creates EC2 Instance
"""
if log:
log_green("Started...")
log_yellow("...Creating EC2 instance...")
ebs_volume = EBSBlockDeviceType()
ebs_volume.size = disk_size
bdm = BlockDeviceMapping()
bdm[disk_name] = ebs_volume
# get an ec2 ami image object with our choosen ami
image = connection.get_all_images(ami)[0]
# start a new instance
reservation = image.run(1, 1,
key_name=key_pair,
security_groups=security_groups,
block_device_map=bdm,
instance_type=instance_type)
# and get our instance_id
instance = reservation.instances[0]
# and loop and wait until ssh is available
while instance.state == u'pending':
if log:
log_yellow("Instance state: %s" % instance.state)
sleep(10)
instance.update()
if log:
log_green("Instance state: %s" % instance.state)
if wait_for_ssh_available:
wait_for_ssh(instance.public_dns_name)
# update the EBS volumes to be deleted on instance termination
if delete_on_termination:
for dev, bd in instance.block_device_mapping.items():
instance.modify_attribute('BlockDeviceMapping',
["%s=%d" % (dev, 1)])
# add a tag to our instance
connection.create_tags([instance.id], tags)
if log:
log_green("Public dns: %s" % instance.public_dns_name)
# returns our new instance
return instance
|
python
|
def create_server_ec2(connection,
region,
disk_name,
disk_size,
ami,
key_pair,
instance_type,
tags={},
security_groups=None,
delete_on_termination=True,
log=False,
wait_for_ssh_available=True):
"""
Creates EC2 Instance
"""
if log:
log_green("Started...")
log_yellow("...Creating EC2 instance...")
ebs_volume = EBSBlockDeviceType()
ebs_volume.size = disk_size
bdm = BlockDeviceMapping()
bdm[disk_name] = ebs_volume
# get an ec2 ami image object with our choosen ami
image = connection.get_all_images(ami)[0]
# start a new instance
reservation = image.run(1, 1,
key_name=key_pair,
security_groups=security_groups,
block_device_map=bdm,
instance_type=instance_type)
# and get our instance_id
instance = reservation.instances[0]
# and loop and wait until ssh is available
while instance.state == u'pending':
if log:
log_yellow("Instance state: %s" % instance.state)
sleep(10)
instance.update()
if log:
log_green("Instance state: %s" % instance.state)
if wait_for_ssh_available:
wait_for_ssh(instance.public_dns_name)
# update the EBS volumes to be deleted on instance termination
if delete_on_termination:
for dev, bd in instance.block_device_mapping.items():
instance.modify_attribute('BlockDeviceMapping',
["%s=%d" % (dev, 1)])
# add a tag to our instance
connection.create_tags([instance.id], tags)
if log:
log_green("Public dns: %s" % instance.public_dns_name)
# returns our new instance
return instance
|
[
"def",
"create_server_ec2",
"(",
"connection",
",",
"region",
",",
"disk_name",
",",
"disk_size",
",",
"ami",
",",
"key_pair",
",",
"instance_type",
",",
"tags",
"=",
"{",
"}",
",",
"security_groups",
"=",
"None",
",",
"delete_on_termination",
"=",
"True",
",",
"log",
"=",
"False",
",",
"wait_for_ssh_available",
"=",
"True",
")",
":",
"if",
"log",
":",
"log_green",
"(",
"\"Started...\"",
")",
"log_yellow",
"(",
"\"...Creating EC2 instance...\"",
")",
"ebs_volume",
"=",
"EBSBlockDeviceType",
"(",
")",
"ebs_volume",
".",
"size",
"=",
"disk_size",
"bdm",
"=",
"BlockDeviceMapping",
"(",
")",
"bdm",
"[",
"disk_name",
"]",
"=",
"ebs_volume",
"# get an ec2 ami image object with our choosen ami",
"image",
"=",
"connection",
".",
"get_all_images",
"(",
"ami",
")",
"[",
"0",
"]",
"# start a new instance",
"reservation",
"=",
"image",
".",
"run",
"(",
"1",
",",
"1",
",",
"key_name",
"=",
"key_pair",
",",
"security_groups",
"=",
"security_groups",
",",
"block_device_map",
"=",
"bdm",
",",
"instance_type",
"=",
"instance_type",
")",
"# and get our instance_id",
"instance",
"=",
"reservation",
".",
"instances",
"[",
"0",
"]",
"# and loop and wait until ssh is available",
"while",
"instance",
".",
"state",
"==",
"u'pending'",
":",
"if",
"log",
":",
"log_yellow",
"(",
"\"Instance state: %s\"",
"%",
"instance",
".",
"state",
")",
"sleep",
"(",
"10",
")",
"instance",
".",
"update",
"(",
")",
"if",
"log",
":",
"log_green",
"(",
"\"Instance state: %s\"",
"%",
"instance",
".",
"state",
")",
"if",
"wait_for_ssh_available",
":",
"wait_for_ssh",
"(",
"instance",
".",
"public_dns_name",
")",
"# update the EBS volumes to be deleted on instance termination",
"if",
"delete_on_termination",
":",
"for",
"dev",
",",
"bd",
"in",
"instance",
".",
"block_device_mapping",
".",
"items",
"(",
")",
":",
"instance",
".",
"modify_attribute",
"(",
"'BlockDeviceMapping'",
",",
"[",
"\"%s=%d\"",
"%",
"(",
"dev",
",",
"1",
")",
"]",
")",
"# add a tag to our instance",
"connection",
".",
"create_tags",
"(",
"[",
"instance",
".",
"id",
"]",
",",
"tags",
")",
"if",
"log",
":",
"log_green",
"(",
"\"Public dns: %s\"",
"%",
"instance",
".",
"public_dns_name",
")",
"# returns our new instance",
"return",
"instance"
] |
Creates EC2 Instance
|
[
"Creates",
"EC2",
"Instance"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/ec2.py#L54-L115
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/ec2.py
|
destroy_ebs_volume
|
def destroy_ebs_volume(connection, region, volume_id, log=False):
""" destroys an ebs volume """
if ebs_volume_exists(connection, region, volume_id):
if log:
log_yellow('destroying EBS volume ...')
try:
connection.delete_volume(volume_id)
except:
# our EBS volume may be gone, but AWS info tables are stale
# wait a bit and ask again
sleep(5)
if not ebs_volume_exists(connection, region, volume_id):
pass
else:
raise("Couldn't delete EBS volume")
|
python
|
def destroy_ebs_volume(connection, region, volume_id, log=False):
""" destroys an ebs volume """
if ebs_volume_exists(connection, region, volume_id):
if log:
log_yellow('destroying EBS volume ...')
try:
connection.delete_volume(volume_id)
except:
# our EBS volume may be gone, but AWS info tables are stale
# wait a bit and ask again
sleep(5)
if not ebs_volume_exists(connection, region, volume_id):
pass
else:
raise("Couldn't delete EBS volume")
|
[
"def",
"destroy_ebs_volume",
"(",
"connection",
",",
"region",
",",
"volume_id",
",",
"log",
"=",
"False",
")",
":",
"if",
"ebs_volume_exists",
"(",
"connection",
",",
"region",
",",
"volume_id",
")",
":",
"if",
"log",
":",
"log_yellow",
"(",
"'destroying EBS volume ...'",
")",
"try",
":",
"connection",
".",
"delete_volume",
"(",
"volume_id",
")",
"except",
":",
"# our EBS volume may be gone, but AWS info tables are stale",
"# wait a bit and ask again",
"sleep",
"(",
"5",
")",
"if",
"not",
"ebs_volume_exists",
"(",
"connection",
",",
"region",
",",
"volume_id",
")",
":",
"pass",
"else",
":",
"raise",
"(",
"\"Couldn't delete EBS volume\"",
")"
] |
destroys an ebs volume
|
[
"destroys",
"an",
"ebs",
"volume"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/ec2.py#L118-L133
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/ec2.py
|
destroy_ec2
|
def destroy_ec2(connection, region, instance_id, log=False):
""" terminates the instance """
data = get_ec2_info(connection=connection,
instance_id=instance_id,
region=region)
instance = connection.terminate_instances(instance_ids=[data['id']])[0]
if log:
log_yellow('destroying instance ...')
while instance.state != "terminated":
if log:
log_yellow("Instance state: %s" % instance.state)
sleep(10)
instance.update()
volume_id = data['volume']
if volume_id:
destroy_ebs_volume(connection, region, volume_id)
|
python
|
def destroy_ec2(connection, region, instance_id, log=False):
""" terminates the instance """
data = get_ec2_info(connection=connection,
instance_id=instance_id,
region=region)
instance = connection.terminate_instances(instance_ids=[data['id']])[0]
if log:
log_yellow('destroying instance ...')
while instance.state != "terminated":
if log:
log_yellow("Instance state: %s" % instance.state)
sleep(10)
instance.update()
volume_id = data['volume']
if volume_id:
destroy_ebs_volume(connection, region, volume_id)
|
[
"def",
"destroy_ec2",
"(",
"connection",
",",
"region",
",",
"instance_id",
",",
"log",
"=",
"False",
")",
":",
"data",
"=",
"get_ec2_info",
"(",
"connection",
"=",
"connection",
",",
"instance_id",
"=",
"instance_id",
",",
"region",
"=",
"region",
")",
"instance",
"=",
"connection",
".",
"terminate_instances",
"(",
"instance_ids",
"=",
"[",
"data",
"[",
"'id'",
"]",
"]",
")",
"[",
"0",
"]",
"if",
"log",
":",
"log_yellow",
"(",
"'destroying instance ...'",
")",
"while",
"instance",
".",
"state",
"!=",
"\"terminated\"",
":",
"if",
"log",
":",
"log_yellow",
"(",
"\"Instance state: %s\"",
"%",
"instance",
".",
"state",
")",
"sleep",
"(",
"10",
")",
"instance",
".",
"update",
"(",
")",
"volume_id",
"=",
"data",
"[",
"'volume'",
"]",
"if",
"volume_id",
":",
"destroy_ebs_volume",
"(",
"connection",
",",
"region",
",",
"volume_id",
")"
] |
terminates the instance
|
[
"terminates",
"the",
"instance"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/ec2.py#L136-L153
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/ec2.py
|
down_ec2
|
def down_ec2(connection, instance_id, region, log=False):
""" shutdown of an existing EC2 instance """
# get the instance_id from the state file, and stop the instance
instance = connection.stop_instances(instance_ids=instance_id)[0]
while instance.state != "stopped":
if log:
log_yellow("Instance state: %s" % instance.state)
sleep(10)
instance.update()
if log:
log_green('Instance state: %s' % instance.state)
|
python
|
def down_ec2(connection, instance_id, region, log=False):
""" shutdown of an existing EC2 instance """
# get the instance_id from the state file, and stop the instance
instance = connection.stop_instances(instance_ids=instance_id)[0]
while instance.state != "stopped":
if log:
log_yellow("Instance state: %s" % instance.state)
sleep(10)
instance.update()
if log:
log_green('Instance state: %s' % instance.state)
|
[
"def",
"down_ec2",
"(",
"connection",
",",
"instance_id",
",",
"region",
",",
"log",
"=",
"False",
")",
":",
"# get the instance_id from the state file, and stop the instance",
"instance",
"=",
"connection",
".",
"stop_instances",
"(",
"instance_ids",
"=",
"instance_id",
")",
"[",
"0",
"]",
"while",
"instance",
".",
"state",
"!=",
"\"stopped\"",
":",
"if",
"log",
":",
"log_yellow",
"(",
"\"Instance state: %s\"",
"%",
"instance",
".",
"state",
")",
"sleep",
"(",
"10",
")",
"instance",
".",
"update",
"(",
")",
"if",
"log",
":",
"log_green",
"(",
"'Instance state: %s'",
"%",
"instance",
".",
"state",
")"
] |
shutdown of an existing EC2 instance
|
[
"shutdown",
"of",
"an",
"existing",
"EC2",
"instance"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/ec2.py#L156-L166
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/ec2.py
|
ebs_volume_exists
|
def ebs_volume_exists(connection, region, volume_id):
""" finds out if a ebs volume exists """
for vol in connection.get_all_volumes():
if vol.id == volume_id:
return True
return False
|
python
|
def ebs_volume_exists(connection, region, volume_id):
""" finds out if a ebs volume exists """
for vol in connection.get_all_volumes():
if vol.id == volume_id:
return True
return False
|
[
"def",
"ebs_volume_exists",
"(",
"connection",
",",
"region",
",",
"volume_id",
")",
":",
"for",
"vol",
"in",
"connection",
".",
"get_all_volumes",
"(",
")",
":",
"if",
"vol",
".",
"id",
"==",
"volume_id",
":",
"return",
"True",
"return",
"False"
] |
finds out if a ebs volume exists
|
[
"finds",
"out",
"if",
"a",
"ebs",
"volume",
"exists"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/ec2.py#L169-L174
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/ec2.py
|
get_ec2_info
|
def get_ec2_info(connection,
instance_id,
region,
username=None):
""" queries EC2 for details about a particular instance_id
"""
instance = connection.get_only_instances(
filters={'instance_id': instance_id}
)[0]
data = instance.__dict__
data['state'] = instance.state
data['cloud_type'] = 'ec2'
try:
volume = connection.get_all_volumes(
filters={'attachment.instance-id': instance.id}
)[0].id
data['volume'] = volume
except:
data['volume'] = ''
return data
|
python
|
def get_ec2_info(connection,
instance_id,
region,
username=None):
""" queries EC2 for details about a particular instance_id
"""
instance = connection.get_only_instances(
filters={'instance_id': instance_id}
)[0]
data = instance.__dict__
data['state'] = instance.state
data['cloud_type'] = 'ec2'
try:
volume = connection.get_all_volumes(
filters={'attachment.instance-id': instance.id}
)[0].id
data['volume'] = volume
except:
data['volume'] = ''
return data
|
[
"def",
"get_ec2_info",
"(",
"connection",
",",
"instance_id",
",",
"region",
",",
"username",
"=",
"None",
")",
":",
"instance",
"=",
"connection",
".",
"get_only_instances",
"(",
"filters",
"=",
"{",
"'instance_id'",
":",
"instance_id",
"}",
")",
"[",
"0",
"]",
"data",
"=",
"instance",
".",
"__dict__",
"data",
"[",
"'state'",
"]",
"=",
"instance",
".",
"state",
"data",
"[",
"'cloud_type'",
"]",
"=",
"'ec2'",
"try",
":",
"volume",
"=",
"connection",
".",
"get_all_volumes",
"(",
"filters",
"=",
"{",
"'attachment.instance-id'",
":",
"instance",
".",
"id",
"}",
")",
"[",
"0",
"]",
".",
"id",
"data",
"[",
"'volume'",
"]",
"=",
"volume",
"except",
":",
"data",
"[",
"'volume'",
"]",
"=",
"''",
"return",
"data"
] |
queries EC2 for details about a particular instance_id
|
[
"queries",
"EC2",
"for",
"details",
"about",
"a",
"particular",
"instance_id"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/ec2.py#L181-L202
|
pyBookshelf/bookshelf
|
bookshelf/api_v2/ec2.py
|
up_ec2
|
def up_ec2(connection,
region,
instance_id,
wait_for_ssh_available=True,
log=False,
timeout=600):
""" boots an existing ec2_instance """
# boot the ec2 instance
instance = connection.start_instances(instance_ids=instance_id)[0]
instance.update()
while instance.state != "running" and timeout > 1:
log_yellow("Instance state: %s" % instance.state)
if log:
log_yellow("Instance state: %s" % instance.state)
sleep(10)
timeout = timeout - 10
instance.update()
# and make sure we don't return until the instance is fully up
if wait_for_ssh_available:
wait_for_ssh(instance.ip_address)
|
python
|
def up_ec2(connection,
region,
instance_id,
wait_for_ssh_available=True,
log=False,
timeout=600):
""" boots an existing ec2_instance """
# boot the ec2 instance
instance = connection.start_instances(instance_ids=instance_id)[0]
instance.update()
while instance.state != "running" and timeout > 1:
log_yellow("Instance state: %s" % instance.state)
if log:
log_yellow("Instance state: %s" % instance.state)
sleep(10)
timeout = timeout - 10
instance.update()
# and make sure we don't return until the instance is fully up
if wait_for_ssh_available:
wait_for_ssh(instance.ip_address)
|
[
"def",
"up_ec2",
"(",
"connection",
",",
"region",
",",
"instance_id",
",",
"wait_for_ssh_available",
"=",
"True",
",",
"log",
"=",
"False",
",",
"timeout",
"=",
"600",
")",
":",
"# boot the ec2 instance",
"instance",
"=",
"connection",
".",
"start_instances",
"(",
"instance_ids",
"=",
"instance_id",
")",
"[",
"0",
"]",
"instance",
".",
"update",
"(",
")",
"while",
"instance",
".",
"state",
"!=",
"\"running\"",
"and",
"timeout",
">",
"1",
":",
"log_yellow",
"(",
"\"Instance state: %s\"",
"%",
"instance",
".",
"state",
")",
"if",
"log",
":",
"log_yellow",
"(",
"\"Instance state: %s\"",
"%",
"instance",
".",
"state",
")",
"sleep",
"(",
"10",
")",
"timeout",
"=",
"timeout",
"-",
"10",
"instance",
".",
"update",
"(",
")",
"# and make sure we don't return until the instance is fully up",
"if",
"wait_for_ssh_available",
":",
"wait_for_ssh",
"(",
"instance",
".",
"ip_address",
")"
] |
boots an existing ec2_instance
|
[
"boots",
"an",
"existing",
"ec2_instance"
] |
train
|
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/ec2.py#L205-L226
|
tsnaomi/finnsyll
|
finnsyll/prev/v03.py
|
apply_T4
|
def apply_T4(word):
'''An agglutination diphthong that ends in /u, y/ usually contains a
syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us],
[va.ka.ut.taa].'''
WORD = word.split('.')
for i, v in enumerate(WORD):
# i % 2 != 0 prevents this rule from applying to first, third, etc.
# syllables, which receive stress (WSP)
if is_consonant(v[-1]) and i % 2 != 0:
if i + 1 == len(WORD) or is_consonant(WORD[i + 1][0]):
vv = u_or_y_final_diphthongs(v)
if vv and not is_long(vv.group(1)):
I = vv.start(1) + 1
WORD[i] = v[:I] + '.' + v[I:]
WORD = '.'.join(WORD)
RULE = ' T4' if word != WORD else ''
return WORD, RULE
|
python
|
def apply_T4(word):
'''An agglutination diphthong that ends in /u, y/ usually contains a
syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us],
[va.ka.ut.taa].'''
WORD = word.split('.')
for i, v in enumerate(WORD):
# i % 2 != 0 prevents this rule from applying to first, third, etc.
# syllables, which receive stress (WSP)
if is_consonant(v[-1]) and i % 2 != 0:
if i + 1 == len(WORD) or is_consonant(WORD[i + 1][0]):
vv = u_or_y_final_diphthongs(v)
if vv and not is_long(vv.group(1)):
I = vv.start(1) + 1
WORD[i] = v[:I] + '.' + v[I:]
WORD = '.'.join(WORD)
RULE = ' T4' if word != WORD else ''
return WORD, RULE
|
[
"def",
"apply_T4",
"(",
"word",
")",
":",
"WORD",
"=",
"word",
".",
"split",
"(",
"'.'",
")",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"WORD",
")",
":",
"# i % 2 != 0 prevents this rule from applying to first, third, etc.",
"# syllables, which receive stress (WSP)",
"if",
"is_consonant",
"(",
"v",
"[",
"-",
"1",
"]",
")",
"and",
"i",
"%",
"2",
"!=",
"0",
":",
"if",
"i",
"+",
"1",
"==",
"len",
"(",
"WORD",
")",
"or",
"is_consonant",
"(",
"WORD",
"[",
"i",
"+",
"1",
"]",
"[",
"0",
"]",
")",
":",
"vv",
"=",
"u_or_y_final_diphthongs",
"(",
"v",
")",
"if",
"vv",
"and",
"not",
"is_long",
"(",
"vv",
".",
"group",
"(",
"1",
")",
")",
":",
"I",
"=",
"vv",
".",
"start",
"(",
"1",
")",
"+",
"1",
"WORD",
"[",
"i",
"]",
"=",
"v",
"[",
":",
"I",
"]",
"+",
"'.'",
"+",
"v",
"[",
"I",
":",
"]",
"WORD",
"=",
"'.'",
".",
"join",
"(",
"WORD",
")",
"RULE",
"=",
"' T4'",
"if",
"word",
"!=",
"WORD",
"else",
"''",
"return",
"WORD",
",",
"RULE"
] |
An agglutination diphthong that ends in /u, y/ usually contains a
syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us],
[va.ka.ut.taa].
|
[
"An",
"agglutination",
"diphthong",
"that",
"ends",
"in",
"/",
"u",
"y",
"/",
"usually",
"contains",
"a",
"syllable",
"boundary",
"when",
"-",
"C#",
"or",
"-",
"CCV",
"follow",
"e",
".",
"g",
".",
"[",
"lau",
".",
"ka",
".",
"us",
"]",
"[",
"va",
".",
"ka",
".",
"ut",
".",
"taa",
"]",
"."
] |
train
|
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v03.py#L137-L159
|
OLC-Bioinformatics/sipprverse
|
cgecore/utility.py
|
seqs_from_file
|
def seqs_from_file(filename, exit_on_err=False, return_qual=False):
"""Extract sequences from a file
Name:
seqs_from_file
Author(s):
Martin C F Thomsen
Date:
18 Jul 2013
Description:
Iterator which extract sequence data from the input file
Args:
filename: string which contain a path to the input file
Supported Formats:
fasta, fastq
USAGE:
>>> import os, sys
>>> # Create fasta test file
>>> file_content = ('>head1 desc1\nthis_is_seq_1\n>head2 desc2\n'
'this_is_seq_2\n>head3 desc3\nthis_is_seq_3\n')
>>> with open_('test.fsa', 'w') as f: f.write(file_content)
>>> # Parse and print the fasta file
>>> for seq, name, desc in SeqsFromFile('test.fsa'):
... print ">%s %s\n%s"%(name, desc, seq)
...
>head1 desc1
this_is_seq_1
>head2 desc2
this_is_seq_2
>head3 desc3
this_is_seq_3
"""
# VALIDATE INPUT
if not isinstance(filename, str):
msg = 'Filename has to be a string.'
if exit_on_err:
sys.stderr.write('Error: %s\n'%msg)
sys.exit(1)
else: raise IOError(msg)
if not os.path.exists(filename):
msg = 'File "%s" does not exist.'%filename
if exit_on_err:
sys.stderr.write('Error: %s\n'%msg)
sys.exit(1)
else: raise IOError(msg)
# EXTRACT DATA
with open_(filename,"rt") as f:
query_seq_segments = []
seq, name, desc, qual = '', '', '', ''
add_segment = query_seq_segments.append
for l in f:
if len(l.strip()) == 0: continue
#sys.stderr.write("%s\n"%line)
fields=l.strip().split()
if l.startswith(">"):
# FASTA HEADER FOUND
if query_seq_segments != []:
# YIELD SEQUENCE AND RESET
seq = ''.join(query_seq_segments)
yield (seq, name, desc)
seq, name, desc = '', '', ''
del query_seq_segments[:]
name = fields[0][1:]
desc = ' '.join(fields[1:])
elif l.startswith("@"):
# FASTQ HEADER FOUND
name = fields[0][1:]
desc = ' '.join(fields[1:])
try:
# EXTRACT FASTQ SEQUENCE
seq = next(f).strip().split()[0]
# SKIP SECOND HEADER LINE AND QUALITY SCORES
l = next(f)
qual = next(f).strip() # Qualities
except:
break
else:
# YIELD SEQUENCE AND RESET
if return_qual:
yield (seq, qual, name, desc)
else:
yield (seq, name, desc)
seq, name, desc, qual = '', '', '', ''
elif len(fields[0])>0:
# EXTRACT FASTA SEQUENCE
add_segment(fields[0])
# CHECK FOR LAST FASTA SEQUENCE
if query_seq_segments != []:
# YIELD SEQUENCE
seq = ''.join(query_seq_segments)
yield (seq, name, desc)
|
python
|
def seqs_from_file(filename, exit_on_err=False, return_qual=False):
"""Extract sequences from a file
Name:
seqs_from_file
Author(s):
Martin C F Thomsen
Date:
18 Jul 2013
Description:
Iterator which extract sequence data from the input file
Args:
filename: string which contain a path to the input file
Supported Formats:
fasta, fastq
USAGE:
>>> import os, sys
>>> # Create fasta test file
>>> file_content = ('>head1 desc1\nthis_is_seq_1\n>head2 desc2\n'
'this_is_seq_2\n>head3 desc3\nthis_is_seq_3\n')
>>> with open_('test.fsa', 'w') as f: f.write(file_content)
>>> # Parse and print the fasta file
>>> for seq, name, desc in SeqsFromFile('test.fsa'):
... print ">%s %s\n%s"%(name, desc, seq)
...
>head1 desc1
this_is_seq_1
>head2 desc2
this_is_seq_2
>head3 desc3
this_is_seq_3
"""
# VALIDATE INPUT
if not isinstance(filename, str):
msg = 'Filename has to be a string.'
if exit_on_err:
sys.stderr.write('Error: %s\n'%msg)
sys.exit(1)
else: raise IOError(msg)
if not os.path.exists(filename):
msg = 'File "%s" does not exist.'%filename
if exit_on_err:
sys.stderr.write('Error: %s\n'%msg)
sys.exit(1)
else: raise IOError(msg)
# EXTRACT DATA
with open_(filename,"rt") as f:
query_seq_segments = []
seq, name, desc, qual = '', '', '', ''
add_segment = query_seq_segments.append
for l in f:
if len(l.strip()) == 0: continue
#sys.stderr.write("%s\n"%line)
fields=l.strip().split()
if l.startswith(">"):
# FASTA HEADER FOUND
if query_seq_segments != []:
# YIELD SEQUENCE AND RESET
seq = ''.join(query_seq_segments)
yield (seq, name, desc)
seq, name, desc = '', '', ''
del query_seq_segments[:]
name = fields[0][1:]
desc = ' '.join(fields[1:])
elif l.startswith("@"):
# FASTQ HEADER FOUND
name = fields[0][1:]
desc = ' '.join(fields[1:])
try:
# EXTRACT FASTQ SEQUENCE
seq = next(f).strip().split()[0]
# SKIP SECOND HEADER LINE AND QUALITY SCORES
l = next(f)
qual = next(f).strip() # Qualities
except:
break
else:
# YIELD SEQUENCE AND RESET
if return_qual:
yield (seq, qual, name, desc)
else:
yield (seq, name, desc)
seq, name, desc, qual = '', '', '', ''
elif len(fields[0])>0:
# EXTRACT FASTA SEQUENCE
add_segment(fields[0])
# CHECK FOR LAST FASTA SEQUENCE
if query_seq_segments != []:
# YIELD SEQUENCE
seq = ''.join(query_seq_segments)
yield (seq, name, desc)
|
[
"def",
"seqs_from_file",
"(",
"filename",
",",
"exit_on_err",
"=",
"False",
",",
"return_qual",
"=",
"False",
")",
":",
"# VALIDATE INPUT",
"if",
"not",
"isinstance",
"(",
"filename",
",",
"str",
")",
":",
"msg",
"=",
"'Filename has to be a string.'",
"if",
"exit_on_err",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Error: %s\\n'",
"%",
"msg",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"raise",
"IOError",
"(",
"msg",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"msg",
"=",
"'File \"%s\" does not exist.'",
"%",
"filename",
"if",
"exit_on_err",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Error: %s\\n'",
"%",
"msg",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"raise",
"IOError",
"(",
"msg",
")",
"# EXTRACT DATA",
"with",
"open_",
"(",
"filename",
",",
"\"rt\"",
")",
"as",
"f",
":",
"query_seq_segments",
"=",
"[",
"]",
"seq",
",",
"name",
",",
"desc",
",",
"qual",
"=",
"''",
",",
"''",
",",
"''",
",",
"''",
"add_segment",
"=",
"query_seq_segments",
".",
"append",
"for",
"l",
"in",
"f",
":",
"if",
"len",
"(",
"l",
".",
"strip",
"(",
")",
")",
"==",
"0",
":",
"continue",
"#sys.stderr.write(\"%s\\n\"%line)",
"fields",
"=",
"l",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"l",
".",
"startswith",
"(",
"\">\"",
")",
":",
"# FASTA HEADER FOUND",
"if",
"query_seq_segments",
"!=",
"[",
"]",
":",
"# YIELD SEQUENCE AND RESET",
"seq",
"=",
"''",
".",
"join",
"(",
"query_seq_segments",
")",
"yield",
"(",
"seq",
",",
"name",
",",
"desc",
")",
"seq",
",",
"name",
",",
"desc",
"=",
"''",
",",
"''",
",",
"''",
"del",
"query_seq_segments",
"[",
":",
"]",
"name",
"=",
"fields",
"[",
"0",
"]",
"[",
"1",
":",
"]",
"desc",
"=",
"' '",
".",
"join",
"(",
"fields",
"[",
"1",
":",
"]",
")",
"elif",
"l",
".",
"startswith",
"(",
"\"@\"",
")",
":",
"# FASTQ HEADER FOUND",
"name",
"=",
"fields",
"[",
"0",
"]",
"[",
"1",
":",
"]",
"desc",
"=",
"' '",
".",
"join",
"(",
"fields",
"[",
"1",
":",
"]",
")",
"try",
":",
"# EXTRACT FASTQ SEQUENCE",
"seq",
"=",
"next",
"(",
"f",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"# SKIP SECOND HEADER LINE AND QUALITY SCORES",
"l",
"=",
"next",
"(",
"f",
")",
"qual",
"=",
"next",
"(",
"f",
")",
".",
"strip",
"(",
")",
"# Qualities",
"except",
":",
"break",
"else",
":",
"# YIELD SEQUENCE AND RESET",
"if",
"return_qual",
":",
"yield",
"(",
"seq",
",",
"qual",
",",
"name",
",",
"desc",
")",
"else",
":",
"yield",
"(",
"seq",
",",
"name",
",",
"desc",
")",
"seq",
",",
"name",
",",
"desc",
",",
"qual",
"=",
"''",
",",
"''",
",",
"''",
",",
"''",
"elif",
"len",
"(",
"fields",
"[",
"0",
"]",
")",
">",
"0",
":",
"# EXTRACT FASTA SEQUENCE",
"add_segment",
"(",
"fields",
"[",
"0",
"]",
")",
"# CHECK FOR LAST FASTA SEQUENCE",
"if",
"query_seq_segments",
"!=",
"[",
"]",
":",
"# YIELD SEQUENCE",
"seq",
"=",
"''",
".",
"join",
"(",
"query_seq_segments",
")",
"yield",
"(",
"seq",
",",
"name",
",",
"desc",
")"
] |
Extract sequences from a file
Name:
seqs_from_file
Author(s):
Martin C F Thomsen
Date:
18 Jul 2013
Description:
Iterator which extract sequence data from the input file
Args:
filename: string which contain a path to the input file
Supported Formats:
fasta, fastq
USAGE:
>>> import os, sys
>>> # Create fasta test file
>>> file_content = ('>head1 desc1\nthis_is_seq_1\n>head2 desc2\n'
'this_is_seq_2\n>head3 desc3\nthis_is_seq_3\n')
>>> with open_('test.fsa', 'w') as f: f.write(file_content)
>>> # Parse and print the fasta file
>>> for seq, name, desc in SeqsFromFile('test.fsa'):
... print ">%s %s\n%s"%(name, desc, seq)
...
>head1 desc1
this_is_seq_1
>head2 desc2
this_is_seq_2
>head3 desc3
this_is_seq_3
|
[
"Extract",
"sequences",
"from",
"a",
"file",
"Name",
":",
"seqs_from_file",
"Author",
"(",
"s",
")",
":",
"Martin",
"C",
"F",
"Thomsen",
"Date",
":",
"18",
"Jul",
"2013",
"Description",
":",
"Iterator",
"which",
"extract",
"sequence",
"data",
"from",
"the",
"input",
"file",
"Args",
":",
"filename",
":",
"string",
"which",
"contain",
"a",
"path",
"to",
"the",
"input",
"file",
"Supported",
"Formats",
":",
"fasta",
"fastq",
"USAGE",
":",
">>>",
"import",
"os",
"sys",
">>>",
"#",
"Create",
"fasta",
"test",
"file",
">>>",
"file_content",
"=",
"(",
">",
"head1",
"desc1",
"\\",
"nthis_is_seq_1",
"\\",
"n",
">",
"head2",
"desc2",
"\\",
"n",
"this_is_seq_2",
"\\",
"n",
">",
"head3",
"desc3",
"\\",
"nthis_is_seq_3",
"\\",
"n",
")",
">>>",
"with",
"open_",
"(",
"test",
".",
"fsa",
"w",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"file_content",
")",
">>>",
"#",
"Parse",
"and",
"print",
"the",
"fasta",
"file",
">>>",
"for",
"seq",
"name",
"desc",
"in",
"SeqsFromFile",
"(",
"test",
".",
"fsa",
")",
":",
"...",
"print",
">",
"%s",
"%s",
"\\",
"n%s",
"%",
"(",
"name",
"desc",
"seq",
")",
"...",
">",
"head1",
"desc1",
"this_is_seq_1",
">",
"head2",
"desc2",
"this_is_seq_2",
">",
"head3",
"desc3",
"this_is_seq_3"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/utility.py#L223-L318
|
OLC-Bioinformatics/sipprverse
|
cgecore/utility.py
|
open_
|
def open_(filename, mode=None, compresslevel=9):
"""Switch for both open() and gzip.open().
Determines if the file is normal or gzipped by looking at the file
extension.
The filename argument is required; mode defaults to 'rb' for gzip and 'r'
for normal and compresslevel defaults to 9 for gzip.
>>> import gzip
>>> from contextlib import closing
>>> with open_(filename) as f:
... f.read()
"""
if filename[-3:] == '.gz':
if mode is None: mode = 'rt'
return closing(gzip.open(filename, mode, compresslevel))
else:
if mode is None: mode = 'r'
return open(filename, mode)
|
python
|
def open_(filename, mode=None, compresslevel=9):
"""Switch for both open() and gzip.open().
Determines if the file is normal or gzipped by looking at the file
extension.
The filename argument is required; mode defaults to 'rb' for gzip and 'r'
for normal and compresslevel defaults to 9 for gzip.
>>> import gzip
>>> from contextlib import closing
>>> with open_(filename) as f:
... f.read()
"""
if filename[-3:] == '.gz':
if mode is None: mode = 'rt'
return closing(gzip.open(filename, mode, compresslevel))
else:
if mode is None: mode = 'r'
return open(filename, mode)
|
[
"def",
"open_",
"(",
"filename",
",",
"mode",
"=",
"None",
",",
"compresslevel",
"=",
"9",
")",
":",
"if",
"filename",
"[",
"-",
"3",
":",
"]",
"==",
"'.gz'",
":",
"if",
"mode",
"is",
"None",
":",
"mode",
"=",
"'rt'",
"return",
"closing",
"(",
"gzip",
".",
"open",
"(",
"filename",
",",
"mode",
",",
"compresslevel",
")",
")",
"else",
":",
"if",
"mode",
"is",
"None",
":",
"mode",
"=",
"'r'",
"return",
"open",
"(",
"filename",
",",
"mode",
")"
] |
Switch for both open() and gzip.open().
Determines if the file is normal or gzipped by looking at the file
extension.
The filename argument is required; mode defaults to 'rb' for gzip and 'r'
for normal and compresslevel defaults to 9 for gzip.
>>> import gzip
>>> from contextlib import closing
>>> with open_(filename) as f:
... f.read()
|
[
"Switch",
"for",
"both",
"open",
"()",
"and",
"gzip",
".",
"open",
"()",
".",
"Determines",
"if",
"the",
"file",
"is",
"normal",
"or",
"gzipped",
"by",
"looking",
"at",
"the",
"file",
"extension",
".",
"The",
"filename",
"argument",
"is",
"required",
";",
"mode",
"defaults",
"to",
"rb",
"for",
"gzip",
"and",
"r",
"for",
"normal",
"and",
"compresslevel",
"defaults",
"to",
"9",
"for",
"gzip",
".",
">>>",
"import",
"gzip",
">>>",
"from",
"contextlib",
"import",
"closing",
">>>",
"with",
"open_",
"(",
"filename",
")",
"as",
"f",
":",
"...",
"f",
".",
"read",
"()"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/utility.py#L321-L340
|
OLC-Bioinformatics/sipprverse
|
cgecore/utility.py
|
load_json
|
def load_json(json_object):
''' Load json from file or file name '''
content = None
if isinstance(json_object, str) and os.path.exists(json_object):
with open_(json_object) as f:
try:
content = json.load(f)
except Exception as e:
debug.log("Warning: Content of '%s' file is not json."%f.name)
elif hasattr(json_object, 'read'):
try:
content = json.load(json_object)
except Exception as e:
debug.log("Warning: Content of '%s' file is not json."%json_object.name)
else:
debug.log("%s\nWarning: Object type invalid!"%json_object)
return content
|
python
|
def load_json(json_object):
''' Load json from file or file name '''
content = None
if isinstance(json_object, str) and os.path.exists(json_object):
with open_(json_object) as f:
try:
content = json.load(f)
except Exception as e:
debug.log("Warning: Content of '%s' file is not json."%f.name)
elif hasattr(json_object, 'read'):
try:
content = json.load(json_object)
except Exception as e:
debug.log("Warning: Content of '%s' file is not json."%json_object.name)
else:
debug.log("%s\nWarning: Object type invalid!"%json_object)
return content
|
[
"def",
"load_json",
"(",
"json_object",
")",
":",
"content",
"=",
"None",
"if",
"isinstance",
"(",
"json_object",
",",
"str",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"json_object",
")",
":",
"with",
"open_",
"(",
"json_object",
")",
"as",
"f",
":",
"try",
":",
"content",
"=",
"json",
".",
"load",
"(",
"f",
")",
"except",
"Exception",
"as",
"e",
":",
"debug",
".",
"log",
"(",
"\"Warning: Content of '%s' file is not json.\"",
"%",
"f",
".",
"name",
")",
"elif",
"hasattr",
"(",
"json_object",
",",
"'read'",
")",
":",
"try",
":",
"content",
"=",
"json",
".",
"load",
"(",
"json_object",
")",
"except",
"Exception",
"as",
"e",
":",
"debug",
".",
"log",
"(",
"\"Warning: Content of '%s' file is not json.\"",
"%",
"json_object",
".",
"name",
")",
"else",
":",
"debug",
".",
"log",
"(",
"\"%s\\nWarning: Object type invalid!\"",
"%",
"json_object",
")",
"return",
"content"
] |
Load json from file or file name
|
[
"Load",
"json",
"from",
"file",
"or",
"file",
"name"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/utility.py#L342-L358
|
OLC-Bioinformatics/sipprverse
|
cgecore/utility.py
|
sort2groups
|
def sort2groups(array, gpat=['_R1','_R2']):
""" Sort an array of strings to groups by patterns """
groups = [REGroup(gp) for gp in gpat]
unmatched = []
for item in array:
matched = False
for m in groups:
if m.match(item):
matched = True
break
if not matched: unmatched.append(item)
return [sorted(m.list) for m in groups], sorted(unmatched)
|
python
|
def sort2groups(array, gpat=['_R1','_R2']):
""" Sort an array of strings to groups by patterns """
groups = [REGroup(gp) for gp in gpat]
unmatched = []
for item in array:
matched = False
for m in groups:
if m.match(item):
matched = True
break
if not matched: unmatched.append(item)
return [sorted(m.list) for m in groups], sorted(unmatched)
|
[
"def",
"sort2groups",
"(",
"array",
",",
"gpat",
"=",
"[",
"'_R1'",
",",
"'_R2'",
"]",
")",
":",
"groups",
"=",
"[",
"REGroup",
"(",
"gp",
")",
"for",
"gp",
"in",
"gpat",
"]",
"unmatched",
"=",
"[",
"]",
"for",
"item",
"in",
"array",
":",
"matched",
"=",
"False",
"for",
"m",
"in",
"groups",
":",
"if",
"m",
".",
"match",
"(",
"item",
")",
":",
"matched",
"=",
"True",
"break",
"if",
"not",
"matched",
":",
"unmatched",
".",
"append",
"(",
"item",
")",
"return",
"[",
"sorted",
"(",
"m",
".",
"list",
")",
"for",
"m",
"in",
"groups",
"]",
",",
"sorted",
"(",
"unmatched",
")"
] |
Sort an array of strings to groups by patterns
|
[
"Sort",
"an",
"array",
"of",
"strings",
"to",
"groups",
"by",
"patterns"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/utility.py#L360-L371
|
OLC-Bioinformatics/sipprverse
|
cgecore/utility.py
|
sort_and_distribute
|
def sort_and_distribute(array, splits=2):
""" Sort an array of strings to groups by alphabetically continuous
distribution
"""
if not isinstance(array, (list,tuple)): raise TypeError("array must be a list")
if not isinstance(splits, int): raise TypeError("splits must be an integer")
remaining = sorted(array)
if sys.version_info < (3, 0):
myrange = xrange(splits)
else:
myrange = range(splits)
groups = [[] for i in myrange]
while len(remaining) > 0:
for i in myrange:
if len(remaining) > 0: groups[i].append(remaining.pop(0))
return groups
|
python
|
def sort_and_distribute(array, splits=2):
""" Sort an array of strings to groups by alphabetically continuous
distribution
"""
if not isinstance(array, (list,tuple)): raise TypeError("array must be a list")
if not isinstance(splits, int): raise TypeError("splits must be an integer")
remaining = sorted(array)
if sys.version_info < (3, 0):
myrange = xrange(splits)
else:
myrange = range(splits)
groups = [[] for i in myrange]
while len(remaining) > 0:
for i in myrange:
if len(remaining) > 0: groups[i].append(remaining.pop(0))
return groups
|
[
"def",
"sort_and_distribute",
"(",
"array",
",",
"splits",
"=",
"2",
")",
":",
"if",
"not",
"isinstance",
"(",
"array",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"array must be a list\"",
")",
"if",
"not",
"isinstance",
"(",
"splits",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"splits must be an integer\"",
")",
"remaining",
"=",
"sorted",
"(",
"array",
")",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
"0",
")",
":",
"myrange",
"=",
"xrange",
"(",
"splits",
")",
"else",
":",
"myrange",
"=",
"range",
"(",
"splits",
")",
"groups",
"=",
"[",
"[",
"]",
"for",
"i",
"in",
"myrange",
"]",
"while",
"len",
"(",
"remaining",
")",
">",
"0",
":",
"for",
"i",
"in",
"myrange",
":",
"if",
"len",
"(",
"remaining",
")",
">",
"0",
":",
"groups",
"[",
"i",
"]",
".",
"append",
"(",
"remaining",
".",
"pop",
"(",
"0",
")",
")",
"return",
"groups"
] |
Sort an array of strings to groups by alphabetically continuous
distribution
|
[
"Sort",
"an",
"array",
"of",
"strings",
"to",
"groups",
"by",
"alphabetically",
"continuous",
"distribution"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/utility.py#L373-L388
|
OLC-Bioinformatics/sipprverse
|
cgecore/utility.py
|
mkpath
|
def mkpath(filepath, permissions=0o777):
""" This function executes a mkdir command for filepath and with permissions
(octal number with leading 0 or string only)
# eg. mkpath("path/to/file", "0o775")
"""
# Converting string of octal to integer, if string is given.
if isinstance(permissions, str):
permissions = sum([int(x)*8**i for i,x in enumerate(reversed(permissions))])
# Creating directory
if not os.path.exists(filepath):
debug.log("Creating Directory %s (permissions: %s)"%(
filepath, permissions))
os.makedirs(filepath, permissions)
else:
debug.log("Warning: The directory "+ filepath +" already exists")
return filepath
|
python
|
def mkpath(filepath, permissions=0o777):
""" This function executes a mkdir command for filepath and with permissions
(octal number with leading 0 or string only)
# eg. mkpath("path/to/file", "0o775")
"""
# Converting string of octal to integer, if string is given.
if isinstance(permissions, str):
permissions = sum([int(x)*8**i for i,x in enumerate(reversed(permissions))])
# Creating directory
if not os.path.exists(filepath):
debug.log("Creating Directory %s (permissions: %s)"%(
filepath, permissions))
os.makedirs(filepath, permissions)
else:
debug.log("Warning: The directory "+ filepath +" already exists")
return filepath
|
[
"def",
"mkpath",
"(",
"filepath",
",",
"permissions",
"=",
"0o777",
")",
":",
"# Converting string of octal to integer, if string is given.",
"if",
"isinstance",
"(",
"permissions",
",",
"str",
")",
":",
"permissions",
"=",
"sum",
"(",
"[",
"int",
"(",
"x",
")",
"*",
"8",
"**",
"i",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"reversed",
"(",
"permissions",
")",
")",
"]",
")",
"# Creating directory",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filepath",
")",
":",
"debug",
".",
"log",
"(",
"\"Creating Directory %s (permissions: %s)\"",
"%",
"(",
"filepath",
",",
"permissions",
")",
")",
"os",
".",
"makedirs",
"(",
"filepath",
",",
"permissions",
")",
"else",
":",
"debug",
".",
"log",
"(",
"\"Warning: The directory \"",
"+",
"filepath",
"+",
"\" already exists\"",
")",
"return",
"filepath"
] |
This function executes a mkdir command for filepath and with permissions
(octal number with leading 0 or string only)
# eg. mkpath("path/to/file", "0o775")
|
[
"This",
"function",
"executes",
"a",
"mkdir",
"command",
"for",
"filepath",
"and",
"with",
"permissions",
"(",
"octal",
"number",
"with",
"leading",
"0",
"or",
"string",
"only",
")",
"#",
"eg",
".",
"mkpath",
"(",
"path",
"/",
"to",
"/",
"file",
"0o775",
")"
] |
train
|
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/utility.py#L390-L405
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.