partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
RequestBuilder.with_headers
|
Adds headers to the request
Args:
headers (dict): The headers to add the request headers
Returns:
The request builder instance in order to chain calls
|
hbp_service_client/request/request_builder.py
|
def with_headers(self, headers):
'''Adds headers to the request
Args:
headers (dict): The headers to add the request headers
Returns:
The request builder instance in order to chain calls
'''
copy = headers.copy()
copy.update(self._headers)
return self.__copy_and_set('headers', copy)
|
def with_headers(self, headers):
'''Adds headers to the request
Args:
headers (dict): The headers to add the request headers
Returns:
The request builder instance in order to chain calls
'''
copy = headers.copy()
copy.update(self._headers)
return self.__copy_and_set('headers', copy)
|
[
"Adds",
"headers",
"to",
"the",
"request"
] |
HumanBrainProject/hbp-service-client
|
python
|
https://github.com/HumanBrainProject/hbp-service-client/blob/b338fb41a7f0e7b9d654ff28fcf13a56d03bff4d/hbp_service_client/request/request_builder.py#L115-L126
|
[
"def",
"with_headers",
"(",
"self",
",",
"headers",
")",
":",
"copy",
"=",
"headers",
".",
"copy",
"(",
")",
"copy",
".",
"update",
"(",
"self",
".",
"_headers",
")",
"return",
"self",
".",
"__copy_and_set",
"(",
"'headers'",
",",
"copy",
")"
] |
b338fb41a7f0e7b9d654ff28fcf13a56d03bff4d
|
test
|
RequestBuilder.with_params
|
Adds parameters to the request params
Args:
params (dict): The parameters to add to the request params
Returns:
The request builder instance in order to chain calls
|
hbp_service_client/request/request_builder.py
|
def with_params(self, params):
'''Adds parameters to the request params
Args:
params (dict): The parameters to add to the request params
Returns:
The request builder instance in order to chain calls
'''
copy = params.copy()
copy.update(self._params)
return self.__copy_and_set('params', copy)
|
def with_params(self, params):
'''Adds parameters to the request params
Args:
params (dict): The parameters to add to the request params
Returns:
The request builder instance in order to chain calls
'''
copy = params.copy()
copy.update(self._params)
return self.__copy_and_set('params', copy)
|
[
"Adds",
"parameters",
"to",
"the",
"request",
"params"
] |
HumanBrainProject/hbp-service-client
|
python
|
https://github.com/HumanBrainProject/hbp-service-client/blob/b338fb41a7f0e7b9d654ff28fcf13a56d03bff4d/hbp_service_client/request/request_builder.py#L139-L150
|
[
"def",
"with_params",
"(",
"self",
",",
"params",
")",
":",
"copy",
"=",
"params",
".",
"copy",
"(",
")",
"copy",
".",
"update",
"(",
"self",
".",
"_params",
")",
"return",
"self",
".",
"__copy_and_set",
"(",
"'params'",
",",
"copy",
")"
] |
b338fb41a7f0e7b9d654ff28fcf13a56d03bff4d
|
test
|
RequestBuilder.throw
|
Defines if the an exception should be thrown after the request is sent
Args:
exception_class (class): The class of the exception to instantiate
should_throw (function): The predicate that should indicate if the exception
should be thrown. This function will be called with the response as a parameter
Returns:
The request builder instance in order to chain calls
|
hbp_service_client/request/request_builder.py
|
def throw(self, exception_class, should_throw):
'''Defines if the an exception should be thrown after the request is sent
Args:
exception_class (class): The class of the exception to instantiate
should_throw (function): The predicate that should indicate if the exception
should be thrown. This function will be called with the response as a parameter
Returns:
The request builder instance in order to chain calls
'''
return self.__copy_and_set('throws', self._throws + [(exception_class, should_throw)])
|
def throw(self, exception_class, should_throw):
'''Defines if the an exception should be thrown after the request is sent
Args:
exception_class (class): The class of the exception to instantiate
should_throw (function): The predicate that should indicate if the exception
should be thrown. This function will be called with the response as a parameter
Returns:
The request builder instance in order to chain calls
'''
return self.__copy_and_set('throws', self._throws + [(exception_class, should_throw)])
|
[
"Defines",
"if",
"the",
"an",
"exception",
"should",
"be",
"thrown",
"after",
"the",
"request",
"is",
"sent"
] |
HumanBrainProject/hbp-service-client
|
python
|
https://github.com/HumanBrainProject/hbp-service-client/blob/b338fb41a7f0e7b9d654ff28fcf13a56d03bff4d/hbp_service_client/request/request_builder.py#L191-L202
|
[
"def",
"throw",
"(",
"self",
",",
"exception_class",
",",
"should_throw",
")",
":",
"return",
"self",
".",
"__copy_and_set",
"(",
"'throws'",
",",
"self",
".",
"_throws",
"+",
"[",
"(",
"exception_class",
",",
"should_throw",
")",
"]",
")"
] |
b338fb41a7f0e7b9d654ff28fcf13a56d03bff4d
|
test
|
run_command
|
Run the command, piping stderr to stdout.
Sends output to stdout.
:param cmd: The list for args to pass to the process
|
docker_runner/application_runner.py
|
def run_command(cmd):
"""
Run the command, piping stderr to stdout.
Sends output to stdout.
:param cmd: The list for args to pass to the process
"""
try:
process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
outdata, _ = process.communicate()
return outdata
except subprocess.CalledProcessError as e:
return e
|
def run_command(cmd):
"""
Run the command, piping stderr to stdout.
Sends output to stdout.
:param cmd: The list for args to pass to the process
"""
try:
process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
outdata, _ = process.communicate()
return outdata
except subprocess.CalledProcessError as e:
return e
|
[
"Run",
"the",
"command",
"piping",
"stderr",
"to",
"stdout",
".",
"Sends",
"output",
"to",
"stdout",
".",
":",
"param",
"cmd",
":",
"The",
"list",
"for",
"args",
"to",
"pass",
"to",
"the",
"process"
] |
TDG-Platform/cloud-harness
|
python
|
https://github.com/TDG-Platform/cloud-harness/blob/1d8f972f861816b90785a484e9bec5bd4bc2f569/docker_runner/application_runner.py#L38-L54
|
[
"def",
"run_command",
"(",
"cmd",
")",
":",
"try",
":",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
")",
"outdata",
",",
"_",
"=",
"process",
".",
"communicate",
"(",
")",
"return",
"outdata",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"return",
"e"
] |
1d8f972f861816b90785a484e9bec5bd4bc2f569
|
test
|
extract_source
|
Extract the source bundle
:param bundle_path: path to the aource bundle *.tar.gz
:param source_path: path to location where to extractall
|
docker_runner/application_runner.py
|
def extract_source(bundle_path, source_path):
"""
Extract the source bundle
:param bundle_path: path to the aource bundle *.tar.gz
:param source_path: path to location where to extractall
"""
with tarfile.open(bundle_path, 'r:gz') as tf:
tf.extractall(path=source_path)
logger.debug("Archive Files: %s" % os.listdir(os.path.dirname(bundle_path)))
|
def extract_source(bundle_path, source_path):
"""
Extract the source bundle
:param bundle_path: path to the aource bundle *.tar.gz
:param source_path: path to location where to extractall
"""
with tarfile.open(bundle_path, 'r:gz') as tf:
tf.extractall(path=source_path)
logger.debug("Archive Files: %s" % os.listdir(os.path.dirname(bundle_path)))
|
[
"Extract",
"the",
"source",
"bundle",
":",
"param",
"bundle_path",
":",
"path",
"to",
"the",
"aource",
"bundle",
"*",
".",
"tar",
".",
"gz",
":",
"param",
"source_path",
":",
"path",
"to",
"location",
"where",
"to",
"extractall"
] |
TDG-Platform/cloud-harness
|
python
|
https://github.com/TDG-Platform/cloud-harness/blob/1d8f972f861816b90785a484e9bec5bd4bc2f569/docker_runner/application_runner.py#L57-L65
|
[
"def",
"extract_source",
"(",
"bundle_path",
",",
"source_path",
")",
":",
"with",
"tarfile",
".",
"open",
"(",
"bundle_path",
",",
"'r:gz'",
")",
"as",
"tf",
":",
"tf",
".",
"extractall",
"(",
"path",
"=",
"source_path",
")",
"logger",
".",
"debug",
"(",
"\"Archive Files: %s\"",
"%",
"os",
".",
"listdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"bundle_path",
")",
")",
")"
] |
1d8f972f861816b90785a484e9bec5bd4bc2f569
|
test
|
printer
|
response is json or straight text.
:param data:
:return:
|
gbdx_cloud_harness/utils/printer.py
|
def printer(data):
"""
response is json or straight text.
:param data:
:return:
"""
data = str(data) # Get rid of unicode
if not isinstance(data, str):
output = json.dumps(
data,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
elif hasattr(data, 'json'):
output = data.json()
else:
output = data
sys.stdout.write(output)
sys.stdout.write('\n')
sys.stdout.flush()
return
|
def printer(data):
"""
response is json or straight text.
:param data:
:return:
"""
data = str(data) # Get rid of unicode
if not isinstance(data, str):
output = json.dumps(
data,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
elif hasattr(data, 'json'):
output = data.json()
else:
output = data
sys.stdout.write(output)
sys.stdout.write('\n')
sys.stdout.flush()
return
|
[
"response",
"is",
"json",
"or",
"straight",
"text",
".",
":",
"param",
"data",
":",
":",
"return",
":"
] |
TDG-Platform/cloud-harness
|
python
|
https://github.com/TDG-Platform/cloud-harness/blob/1d8f972f861816b90785a484e9bec5bd4bc2f569/gbdx_cloud_harness/utils/printer.py#L5-L27
|
[
"def",
"printer",
"(",
"data",
")",
":",
"data",
"=",
"str",
"(",
"data",
")",
"# Get rid of unicode",
"if",
"not",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"output",
"=",
"json",
".",
"dumps",
"(",
"data",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
"elif",
"hasattr",
"(",
"data",
",",
"'json'",
")",
":",
"output",
"=",
"data",
".",
"json",
"(",
")",
"else",
":",
"output",
"=",
"data",
"sys",
".",
"stdout",
".",
"write",
"(",
"output",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\n'",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"return"
] |
1d8f972f861816b90785a484e9bec5bd4bc2f569
|
test
|
AdminBooleanMixin.get_list_display
|
Return a sequence containing the fields to be displayed on the
changelist.
|
boolean_switch/admin.py
|
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
list_display = []
for field_name in self.list_display:
try:
db_field = self.model._meta.get_field(field_name)
if isinstance(db_field, BooleanField):
field_name = boolean_switch_field(db_field)
except FieldDoesNotExist:
pass
list_display.append(field_name)
return list_display
|
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
list_display = []
for field_name in self.list_display:
try:
db_field = self.model._meta.get_field(field_name)
if isinstance(db_field, BooleanField):
field_name = boolean_switch_field(db_field)
except FieldDoesNotExist:
pass
list_display.append(field_name)
return list_display
|
[
"Return",
"a",
"sequence",
"containing",
"the",
"fields",
"to",
"be",
"displayed",
"on",
"the",
"changelist",
"."
] |
makeev/django-boolean-switch
|
python
|
https://github.com/makeev/django-boolean-switch/blob/ed740dbb56d0bb1ad20d4b1e124055283b0e932f/boolean_switch/admin.py#L44-L58
|
[
"def",
"get_list_display",
"(",
"self",
",",
"request",
")",
":",
"list_display",
"=",
"[",
"]",
"for",
"field_name",
"in",
"self",
".",
"list_display",
":",
"try",
":",
"db_field",
"=",
"self",
".",
"model",
".",
"_meta",
".",
"get_field",
"(",
"field_name",
")",
"if",
"isinstance",
"(",
"db_field",
",",
"BooleanField",
")",
":",
"field_name",
"=",
"boolean_switch_field",
"(",
"db_field",
")",
"except",
"FieldDoesNotExist",
":",
"pass",
"list_display",
".",
"append",
"(",
"field_name",
")",
"return",
"list_display"
] |
ed740dbb56d0bb1ad20d4b1e124055283b0e932f
|
test
|
map_job
|
Spawns a tree of jobs to avoid overloading the number of jobs spawned by a single parent.
This function is appropriate to use when batching samples greater than 1,000.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param function func: Function to spawn dynamically, passes one sample as first argument
:param list inputs: Array of samples to be batched
:param list args: any arguments to be passed to the function
|
src/toil_lib/jobs.py
|
def map_job(job, func, inputs, *args):
"""
Spawns a tree of jobs to avoid overloading the number of jobs spawned by a single parent.
This function is appropriate to use when batching samples greater than 1,000.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param function func: Function to spawn dynamically, passes one sample as first argument
:param list inputs: Array of samples to be batched
:param list args: any arguments to be passed to the function
"""
# num_partitions isn't exposed as an argument in order to be transparent to the user.
# The value for num_partitions is a tested value
num_partitions = 100
partition_size = len(inputs) / num_partitions
if partition_size > 1:
for partition in partitions(inputs, partition_size):
job.addChildJobFn(map_job, func, partition, *args)
else:
for sample in inputs:
job.addChildJobFn(func, sample, *args)
|
def map_job(job, func, inputs, *args):
"""
Spawns a tree of jobs to avoid overloading the number of jobs spawned by a single parent.
This function is appropriate to use when batching samples greater than 1,000.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param function func: Function to spawn dynamically, passes one sample as first argument
:param list inputs: Array of samples to be batched
:param list args: any arguments to be passed to the function
"""
# num_partitions isn't exposed as an argument in order to be transparent to the user.
# The value for num_partitions is a tested value
num_partitions = 100
partition_size = len(inputs) / num_partitions
if partition_size > 1:
for partition in partitions(inputs, partition_size):
job.addChildJobFn(map_job, func, partition, *args)
else:
for sample in inputs:
job.addChildJobFn(func, sample, *args)
|
[
"Spawns",
"a",
"tree",
"of",
"jobs",
"to",
"avoid",
"overloading",
"the",
"number",
"of",
"jobs",
"spawned",
"by",
"a",
"single",
"parent",
".",
"This",
"function",
"is",
"appropriate",
"to",
"use",
"when",
"batching",
"samples",
"greater",
"than",
"1",
"000",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/jobs.py#L4-L23
|
[
"def",
"map_job",
"(",
"job",
",",
"func",
",",
"inputs",
",",
"*",
"args",
")",
":",
"# num_partitions isn't exposed as an argument in order to be transparent to the user.",
"# The value for num_partitions is a tested value",
"num_partitions",
"=",
"100",
"partition_size",
"=",
"len",
"(",
"inputs",
")",
"/",
"num_partitions",
"if",
"partition_size",
">",
"1",
":",
"for",
"partition",
"in",
"partitions",
"(",
"inputs",
",",
"partition_size",
")",
":",
"job",
".",
"addChildJobFn",
"(",
"map_job",
",",
"func",
",",
"partition",
",",
"*",
"args",
")",
"else",
":",
"for",
"sample",
"in",
"inputs",
":",
"job",
".",
"addChildJobFn",
"(",
"func",
",",
"sample",
",",
"*",
"args",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
gatk_genotype_gvcfs
|
Runs GenotypeGVCFs on one or more gVCFs generated by HaplotypeCaller.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict gvcfs: Dictionary of GVCF FileStoreIDs {sample identifier: FileStoreID}
:param str ref: FileStoreID for the reference genome fasta file
:param str fai: FileStoreID for the reference genome index file
:param str ref_dict: FileStoreID for the reference genome sequence dictionary
:param list[str] annotations: Optional list of GATK variant annotations. Default: None.
:param float emit_threshold: Minimum phred-scale confidence threshold for
a variant to be emitted. GATK default: 10.0
:param float call_threshold: Minimum phred-scale confidence threshold for
a variant to be called. GATK default: 30.0
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: VCF FileStoreID
:rtype: str
|
src/toil_lib/tools/variant_annotation.py
|
def gatk_genotype_gvcfs(job,
gvcfs,
ref, fai, ref_dict,
annotations=None,
emit_threshold=10.0, call_threshold=30.0,
unsafe_mode=False):
"""
Runs GenotypeGVCFs on one or more gVCFs generated by HaplotypeCaller.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict gvcfs: Dictionary of GVCF FileStoreIDs {sample identifier: FileStoreID}
:param str ref: FileStoreID for the reference genome fasta file
:param str fai: FileStoreID for the reference genome index file
:param str ref_dict: FileStoreID for the reference genome sequence dictionary
:param list[str] annotations: Optional list of GATK variant annotations. Default: None.
:param float emit_threshold: Minimum phred-scale confidence threshold for
a variant to be emitted. GATK default: 10.0
:param float call_threshold: Minimum phred-scale confidence threshold for
a variant to be called. GATK default: 30.0
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: VCF FileStoreID
:rtype: str
"""
inputs = {'genome.fa': ref,
'genome.fa.fai': fai,
'genome.dict': ref_dict}
inputs.update(gvcfs)
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
command = ['-T', 'GenotypeGVCFs',
'-R', '/data/genome.fa',
'--out', 'genotyped.vcf',
'-stand_emit_conf', str(emit_threshold),
'-stand_call_conf', str(call_threshold)]
if annotations:
for annotation in annotations:
command.extend(['-A', annotation])
# Include all GVCFs for joint genotyping
for uuid in gvcfs.keys():
command.extend(['--variant', os.path.join('/data', uuid)])
if unsafe_mode:
command.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'])
job.fileStore.logToMaster('Running GATK GenotypeGVCFs\n'
'Emit threshold: {emit_threshold}\n'
'Call threshold: {call_threshold}\n\n'
'Annotations:\n{annotations}\n\n'
'Samples:\n{samples}\n'.format(emit_threshold=emit_threshold,
call_threshold=call_threshold,
annotations='\n'.join(annotations) if annotations else '',
samples='\n'.join(gvcfs.keys())))
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'genotyped.vcf'))
|
def gatk_genotype_gvcfs(job,
gvcfs,
ref, fai, ref_dict,
annotations=None,
emit_threshold=10.0, call_threshold=30.0,
unsafe_mode=False):
"""
Runs GenotypeGVCFs on one or more gVCFs generated by HaplotypeCaller.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict gvcfs: Dictionary of GVCF FileStoreIDs {sample identifier: FileStoreID}
:param str ref: FileStoreID for the reference genome fasta file
:param str fai: FileStoreID for the reference genome index file
:param str ref_dict: FileStoreID for the reference genome sequence dictionary
:param list[str] annotations: Optional list of GATK variant annotations. Default: None.
:param float emit_threshold: Minimum phred-scale confidence threshold for
a variant to be emitted. GATK default: 10.0
:param float call_threshold: Minimum phred-scale confidence threshold for
a variant to be called. GATK default: 30.0
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: VCF FileStoreID
:rtype: str
"""
inputs = {'genome.fa': ref,
'genome.fa.fai': fai,
'genome.dict': ref_dict}
inputs.update(gvcfs)
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
command = ['-T', 'GenotypeGVCFs',
'-R', '/data/genome.fa',
'--out', 'genotyped.vcf',
'-stand_emit_conf', str(emit_threshold),
'-stand_call_conf', str(call_threshold)]
if annotations:
for annotation in annotations:
command.extend(['-A', annotation])
# Include all GVCFs for joint genotyping
for uuid in gvcfs.keys():
command.extend(['--variant', os.path.join('/data', uuid)])
if unsafe_mode:
command.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'])
job.fileStore.logToMaster('Running GATK GenotypeGVCFs\n'
'Emit threshold: {emit_threshold}\n'
'Call threshold: {call_threshold}\n\n'
'Annotations:\n{annotations}\n\n'
'Samples:\n{samples}\n'.format(emit_threshold=emit_threshold,
call_threshold=call_threshold,
annotations='\n'.join(annotations) if annotations else '',
samples='\n'.join(gvcfs.keys())))
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'genotyped.vcf'))
|
[
"Runs",
"GenotypeGVCFs",
"on",
"one",
"or",
"more",
"gVCFs",
"generated",
"by",
"HaplotypeCaller",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/variant_annotation.py#L7-L72
|
[
"def",
"gatk_genotype_gvcfs",
"(",
"job",
",",
"gvcfs",
",",
"ref",
",",
"fai",
",",
"ref_dict",
",",
"annotations",
"=",
"None",
",",
"emit_threshold",
"=",
"10.0",
",",
"call_threshold",
"=",
"30.0",
",",
"unsafe_mode",
"=",
"False",
")",
":",
"inputs",
"=",
"{",
"'genome.fa'",
":",
"ref",
",",
"'genome.fa.fai'",
":",
"fai",
",",
"'genome.dict'",
":",
"ref_dict",
"}",
"inputs",
".",
"update",
"(",
"gvcfs",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"for",
"name",
",",
"file_store_id",
"in",
"inputs",
".",
"iteritems",
"(",
")",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"file_store_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"name",
")",
")",
"command",
"=",
"[",
"'-T'",
",",
"'GenotypeGVCFs'",
",",
"'-R'",
",",
"'/data/genome.fa'",
",",
"'--out'",
",",
"'genotyped.vcf'",
",",
"'-stand_emit_conf'",
",",
"str",
"(",
"emit_threshold",
")",
",",
"'-stand_call_conf'",
",",
"str",
"(",
"call_threshold",
")",
"]",
"if",
"annotations",
":",
"for",
"annotation",
"in",
"annotations",
":",
"command",
".",
"extend",
"(",
"[",
"'-A'",
",",
"annotation",
"]",
")",
"# Include all GVCFs for joint genotyping",
"for",
"uuid",
"in",
"gvcfs",
".",
"keys",
"(",
")",
":",
"command",
".",
"extend",
"(",
"[",
"'--variant'",
",",
"os",
".",
"path",
".",
"join",
"(",
"'/data'",
",",
"uuid",
")",
"]",
")",
"if",
"unsafe_mode",
":",
"command",
".",
"extend",
"(",
"[",
"'-U'",
",",
"'ALLOW_SEQ_DICT_INCOMPATIBILITY'",
"]",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running GATK GenotypeGVCFs\\n'",
"'Emit threshold: {emit_threshold}\\n'",
"'Call threshold: {call_threshold}\\n\\n'",
"'Annotations:\\n{annotations}\\n\\n'",
"'Samples:\\n{samples}\\n'",
".",
"format",
"(",
"emit_threshold",
"=",
"emit_threshold",
",",
"call_threshold",
"=",
"call_threshold",
",",
"annotations",
"=",
"'\\n'",
".",
"join",
"(",
"annotations",
")",
"if",
"annotations",
"else",
"''",
",",
"samples",
"=",
"'\\n'",
".",
"join",
"(",
"gvcfs",
".",
"keys",
"(",
")",
")",
")",
")",
"docker_parameters",
"=",
"[",
"'--rm'",
",",
"'log-driver'",
",",
"'none'",
",",
"'-e'",
",",
"'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'",
".",
"format",
"(",
"job",
".",
"memory",
")",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"command",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2'",
",",
"dockerParameters",
"=",
"docker_parameters",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'genotyped.vcf'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
run_oncotator
|
Uses Oncotator to add cancer relevant variant annotations to a VCF file. Oncotator can accept
other genome builds, but the output VCF is based on hg19.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str vcf_id: FileStoreID for VCF file
:param str oncotator_db: FileStoreID for Oncotator database
:return: Annotated VCF FileStoreID
:rtype: str
|
src/toil_lib/tools/variant_annotation.py
|
def run_oncotator(job, vcf_id, oncotator_db):
"""
Uses Oncotator to add cancer relevant variant annotations to a VCF file. Oncotator can accept
other genome builds, but the output VCF is based on hg19.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str vcf_id: FileStoreID for VCF file
:param str oncotator_db: FileStoreID for Oncotator database
:return: Annotated VCF FileStoreID
:rtype: str
"""
job.fileStore.logToMaster('Running Oncotator')
inputs = {'input.vcf': vcf_id,
'oncotator_db': oncotator_db}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
inputs[name] = job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# The Oncotator database may be tar/gzipped
if tarfile.is_tarfile(inputs['oncotator_db']):
tar = tarfile.open(inputs['oncotator_db'])
tar.extractall(path=work_dir)
# Get the extracted database directory name
inputs['oncotator_db'] = tar.getmembers()[0].name
tar.close()
command = ['-i', 'VCF',
'-o', 'VCF',
'--db-dir', inputs['oncotator_db'],
'input.vcf',
'annotated.vcf',
'hg19'] # Oncotator annotations are based on hg19
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='jpfeil/oncotator:1.9--8fffc356981862d50cfacd711b753700b886b605',
dockerParameters=docker_parameters)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'annotated.vcf'))
|
def run_oncotator(job, vcf_id, oncotator_db):
"""
Uses Oncotator to add cancer relevant variant annotations to a VCF file. Oncotator can accept
other genome builds, but the output VCF is based on hg19.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str vcf_id: FileStoreID for VCF file
:param str oncotator_db: FileStoreID for Oncotator database
:return: Annotated VCF FileStoreID
:rtype: str
"""
job.fileStore.logToMaster('Running Oncotator')
inputs = {'input.vcf': vcf_id,
'oncotator_db': oncotator_db}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
inputs[name] = job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# The Oncotator database may be tar/gzipped
if tarfile.is_tarfile(inputs['oncotator_db']):
tar = tarfile.open(inputs['oncotator_db'])
tar.extractall(path=work_dir)
# Get the extracted database directory name
inputs['oncotator_db'] = tar.getmembers()[0].name
tar.close()
command = ['-i', 'VCF',
'-o', 'VCF',
'--db-dir', inputs['oncotator_db'],
'input.vcf',
'annotated.vcf',
'hg19'] # Oncotator annotations are based on hg19
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='jpfeil/oncotator:1.9--8fffc356981862d50cfacd711b753700b886b605',
dockerParameters=docker_parameters)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'annotated.vcf'))
|
[
"Uses",
"Oncotator",
"to",
"add",
"cancer",
"relevant",
"variant",
"annotations",
"to",
"a",
"VCF",
"file",
".",
"Oncotator",
"can",
"accept",
"other",
"genome",
"builds",
"but",
"the",
"output",
"VCF",
"is",
"based",
"on",
"hg19",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/variant_annotation.py#L75-L117
|
[
"def",
"run_oncotator",
"(",
"job",
",",
"vcf_id",
",",
"oncotator_db",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running Oncotator'",
")",
"inputs",
"=",
"{",
"'input.vcf'",
":",
"vcf_id",
",",
"'oncotator_db'",
":",
"oncotator_db",
"}",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"for",
"name",
",",
"file_store_id",
"in",
"inputs",
".",
"iteritems",
"(",
")",
":",
"inputs",
"[",
"name",
"]",
"=",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"file_store_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"name",
")",
")",
"# The Oncotator database may be tar/gzipped",
"if",
"tarfile",
".",
"is_tarfile",
"(",
"inputs",
"[",
"'oncotator_db'",
"]",
")",
":",
"tar",
"=",
"tarfile",
".",
"open",
"(",
"inputs",
"[",
"'oncotator_db'",
"]",
")",
"tar",
".",
"extractall",
"(",
"path",
"=",
"work_dir",
")",
"# Get the extracted database directory name",
"inputs",
"[",
"'oncotator_db'",
"]",
"=",
"tar",
".",
"getmembers",
"(",
")",
"[",
"0",
"]",
".",
"name",
"tar",
".",
"close",
"(",
")",
"command",
"=",
"[",
"'-i'",
",",
"'VCF'",
",",
"'-o'",
",",
"'VCF'",
",",
"'--db-dir'",
",",
"inputs",
"[",
"'oncotator_db'",
"]",
",",
"'input.vcf'",
",",
"'annotated.vcf'",
",",
"'hg19'",
"]",
"# Oncotator annotations are based on hg19",
"docker_parameters",
"=",
"[",
"'--rm'",
",",
"'log-driver'",
",",
"'none'",
",",
"'-e'",
",",
"'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'",
".",
"format",
"(",
"job",
".",
"memory",
")",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"command",
",",
"tool",
"=",
"'jpfeil/oncotator:1.9--8fffc356981862d50cfacd711b753700b886b605'",
",",
"dockerParameters",
"=",
"docker_parameters",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'annotated.vcf'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
DatapointArray.sort
|
Sort here works by sorting by timestamp by default
|
connectordb/_datapointarray.py
|
def sort(self, f=lambda d: d["t"]):
"""Sort here works by sorting by timestamp by default"""
list.sort(self, key=f)
return self
|
def sort(self, f=lambda d: d["t"]):
"""Sort here works by sorting by timestamp by default"""
list.sort(self, key=f)
return self
|
[
"Sort",
"here",
"works",
"by",
"sorting",
"by",
"timestamp",
"by",
"default"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_datapointarray.py#L41-L44
|
[
"def",
"sort",
"(",
"self",
",",
"f",
"=",
"lambda",
"d",
":",
"d",
"[",
"\"t\"",
"]",
")",
":",
"list",
".",
"sort",
"(",
"self",
",",
"key",
"=",
"f",
")",
"return",
"self"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
DatapointArray.t
|
Returns just the timestamp portion of the datapoints as a list.
The timestamps are in python datetime's date format.
|
connectordb/_datapointarray.py
|
def t(self):
"""Returns just the timestamp portion of the datapoints as a list.
The timestamps are in python datetime's date format."""
return list(map(lambda x: datetime.datetime.fromtimestamp(x["t"]), self.raw()))
|
def t(self):
"""Returns just the timestamp portion of the datapoints as a list.
The timestamps are in python datetime's date format."""
return list(map(lambda x: datetime.datetime.fromtimestamp(x["t"]), self.raw()))
|
[
"Returns",
"just",
"the",
"timestamp",
"portion",
"of",
"the",
"datapoints",
"as",
"a",
"list",
".",
"The",
"timestamps",
"are",
"in",
"python",
"datetime",
"s",
"date",
"format",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_datapointarray.py#L50-L53
|
[
"def",
"t",
"(",
"self",
")",
":",
"return",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"x",
"[",
"\"t\"",
"]",
")",
",",
"self",
".",
"raw",
"(",
")",
")",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
DatapointArray.writeJSON
|
Writes the data to the given file::
DatapointArray([{"t": unix timestamp, "d": data}]).writeJSON("myfile.json")
The data can later be loaded using loadJSON.
|
connectordb/_datapointarray.py
|
def writeJSON(self, filename):
"""Writes the data to the given file::
DatapointArray([{"t": unix timestamp, "d": data}]).writeJSON("myfile.json")
The data can later be loaded using loadJSON.
"""
with open(filename, "w") as f:
json.dump(self, f)
|
def writeJSON(self, filename):
"""Writes the data to the given file::
DatapointArray([{"t": unix timestamp, "d": data}]).writeJSON("myfile.json")
The data can later be loaded using loadJSON.
"""
with open(filename, "w") as f:
json.dump(self, f)
|
[
"Writes",
"the",
"data",
"to",
"the",
"given",
"file",
"::"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_datapointarray.py#L76-L84
|
[
"def",
"writeJSON",
"(",
"self",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"w\"",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"self",
",",
"f",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
DatapointArray.loadJSON
|
Adds the data from a JSON file. The file is expected to be in datapoint format::
d = DatapointArray().loadJSON("myfile.json")
|
connectordb/_datapointarray.py
|
def loadJSON(self, filename):
"""Adds the data from a JSON file. The file is expected to be in datapoint format::
d = DatapointArray().loadJSON("myfile.json")
"""
with open(filename, "r") as f:
self.merge(json.load(f))
return self
|
def loadJSON(self, filename):
"""Adds the data from a JSON file. The file is expected to be in datapoint format::
d = DatapointArray().loadJSON("myfile.json")
"""
with open(filename, "r") as f:
self.merge(json.load(f))
return self
|
[
"Adds",
"the",
"data",
"from",
"a",
"JSON",
"file",
".",
"The",
"file",
"is",
"expected",
"to",
"be",
"in",
"datapoint",
"format",
"::"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_datapointarray.py#L86-L93
|
[
"def",
"loadJSON",
"(",
"self",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"r\"",
")",
"as",
"f",
":",
"self",
".",
"merge",
"(",
"json",
".",
"load",
"(",
"f",
")",
")",
"return",
"self"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
DatapointArray.loadExport
|
Adds the data from a ConnectorDB export. If it is a stream export, then the folder
is the location of the export. If it is a device export, then the folder is the export folder
with the stream name as a subdirectory
If it is a user export, you will use the path of the export folder, with the user/device/stream
appended to the end::
myuser.export("./exportdir")
DatapointArray().loadExport("./exportdir/username/devicename/streamname")
|
connectordb/_datapointarray.py
|
def loadExport(self, folder):
"""Adds the data from a ConnectorDB export. If it is a stream export, then the folder
is the location of the export. If it is a device export, then the folder is the export folder
with the stream name as a subdirectory
If it is a user export, you will use the path of the export folder, with the user/device/stream
appended to the end::
myuser.export("./exportdir")
DatapointArray().loadExport("./exportdir/username/devicename/streamname")
"""
self.loadJSON(os.path.join(folder, "data.json"))
return self
|
def loadExport(self, folder):
"""Adds the data from a ConnectorDB export. If it is a stream export, then the folder
is the location of the export. If it is a device export, then the folder is the export folder
with the stream name as a subdirectory
If it is a user export, you will use the path of the export folder, with the user/device/stream
appended to the end::
myuser.export("./exportdir")
DatapointArray().loadExport("./exportdir/username/devicename/streamname")
"""
self.loadJSON(os.path.join(folder, "data.json"))
return self
|
[
"Adds",
"the",
"data",
"from",
"a",
"ConnectorDB",
"export",
".",
"If",
"it",
"is",
"a",
"stream",
"export",
"then",
"the",
"folder",
"is",
"the",
"location",
"of",
"the",
"export",
".",
"If",
"it",
"is",
"a",
"device",
"export",
"then",
"the",
"folder",
"is",
"the",
"export",
"folder",
"with",
"the",
"stream",
"name",
"as",
"a",
"subdirectory"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_datapointarray.py#L95-L107
|
[
"def",
"loadExport",
"(",
"self",
",",
"folder",
")",
":",
"self",
".",
"loadJSON",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"\"data.json\"",
")",
")",
"return",
"self"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
DatapointArray.tshift
|
Shifts all timestamps in the datapoint array by the given number of seconds.
It is the same as the 'tshift' pipescript transform.
Warning: The shift is performed in-place! This means that it modifies the underlying array::
d = DatapointArray([{"t":56,"d":1}])
d.tshift(20)
print(d) # [{"t":76,"d":1}]
|
connectordb/_datapointarray.py
|
def tshift(self, t):
"""Shifts all timestamps in the datapoint array by the given number of seconds.
It is the same as the 'tshift' pipescript transform.
Warning: The shift is performed in-place! This means that it modifies the underlying array::
d = DatapointArray([{"t":56,"d":1}])
d.tshift(20)
print(d) # [{"t":76,"d":1}]
"""
raw = self.raw()
for i in range(len(raw)):
raw[i]["t"] += t
return self
|
def tshift(self, t):
"""Shifts all timestamps in the datapoint array by the given number of seconds.
It is the same as the 'tshift' pipescript transform.
Warning: The shift is performed in-place! This means that it modifies the underlying array::
d = DatapointArray([{"t":56,"d":1}])
d.tshift(20)
print(d) # [{"t":76,"d":1}]
"""
raw = self.raw()
for i in range(len(raw)):
raw[i]["t"] += t
return self
|
[
"Shifts",
"all",
"timestamps",
"in",
"the",
"datapoint",
"array",
"by",
"the",
"given",
"number",
"of",
"seconds",
".",
"It",
"is",
"the",
"same",
"as",
"the",
"tshift",
"pipescript",
"transform",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_datapointarray.py#L109-L122
|
[
"def",
"tshift",
"(",
"self",
",",
"t",
")",
":",
"raw",
"=",
"self",
".",
"raw",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"raw",
")",
")",
":",
"raw",
"[",
"i",
"]",
"[",
"\"t\"",
"]",
"+=",
"t",
"return",
"self"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
DatapointArray.sum
|
Gets the sum of the data portions of all datapoints within
|
connectordb/_datapointarray.py
|
def sum(self):
"""Gets the sum of the data portions of all datapoints within"""
raw = self.raw()
s = 0
for i in range(len(raw)):
s += raw[i]["d"]
return s
|
def sum(self):
"""Gets the sum of the data portions of all datapoints within"""
raw = self.raw()
s = 0
for i in range(len(raw)):
s += raw[i]["d"]
return s
|
[
"Gets",
"the",
"sum",
"of",
"the",
"data",
"portions",
"of",
"all",
"datapoints",
"within"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_datapointarray.py#L124-L130
|
[
"def",
"sum",
"(",
"self",
")",
":",
"raw",
"=",
"self",
".",
"raw",
"(",
")",
"s",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"raw",
")",
")",
":",
"s",
"+=",
"raw",
"[",
"i",
"]",
"[",
"\"d\"",
"]",
"return",
"s"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
rfxcom
|
Start the event loop to collect data from the serial device.
|
home/__main__.py
|
def rfxcom(device):
"""Start the event loop to collect data from the serial device."""
# If the device isn't passed in, look for it in the config.
if device is None:
device = app.config.get('DEVICE')
# If the device is *still* none, error.
if device is None:
print("The serial device needs to be passed in as --device or "
"set in the config as DEVICE.")
return
rfxcom_collect(device)
|
def rfxcom(device):
"""Start the event loop to collect data from the serial device."""
# If the device isn't passed in, look for it in the config.
if device is None:
device = app.config.get('DEVICE')
# If the device is *still* none, error.
if device is None:
print("The serial device needs to be passed in as --device or "
"set in the config as DEVICE.")
return
rfxcom_collect(device)
|
[
"Start",
"the",
"event",
"loop",
"to",
"collect",
"data",
"from",
"the",
"serial",
"device",
"."
] |
d0ugal/home
|
python
|
https://github.com/d0ugal/home/blob/e984716ae6c74dc8e40346584668ac5cfeaaf520/home/__main__.py#L31-L44
|
[
"def",
"rfxcom",
"(",
"device",
")",
":",
"# If the device isn't passed in, look for it in the config.",
"if",
"device",
"is",
"None",
":",
"device",
"=",
"app",
".",
"config",
".",
"get",
"(",
"'DEVICE'",
")",
"# If the device is *still* none, error.",
"if",
"device",
"is",
"None",
":",
"print",
"(",
"\"The serial device needs to be passed in as --device or \"",
"\"set in the config as DEVICE.\"",
")",
"return",
"rfxcom_collect",
"(",
"device",
")"
] |
e984716ae6c74dc8e40346584668ac5cfeaaf520
|
test
|
create_user
|
Create a new user.
|
home/__main__.py
|
def create_user(username):
"Create a new user."
password = prompt_pass("Enter password")
user = User(username=username, password=password)
db.session.add(user)
db.session.commit()
|
def create_user(username):
"Create a new user."
password = prompt_pass("Enter password")
user = User(username=username, password=password)
db.session.add(user)
db.session.commit()
|
[
"Create",
"a",
"new",
"user",
"."
] |
d0ugal/home
|
python
|
https://github.com/d0ugal/home/blob/e984716ae6c74dc8e40346584668ac5cfeaaf520/home/__main__.py#L55-L60
|
[
"def",
"create_user",
"(",
"username",
")",
":",
"password",
"=",
"prompt_pass",
"(",
"\"Enter password\"",
")",
"user",
"=",
"User",
"(",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")",
"db",
".",
"session",
".",
"add",
"(",
"user",
")",
"db",
".",
"session",
".",
"commit",
"(",
")"
] |
e984716ae6c74dc8e40346584668ac5cfeaaf520
|
test
|
to_iri
|
Safely quotes an IRI in a way that is resilient to unicode and incorrect
arguments (checks for RFC 3987 compliance and falls back to percent encoding)
|
iribaker/__init__.py
|
def to_iri(iri):
"""
Safely quotes an IRI in a way that is resilient to unicode and incorrect
arguments (checks for RFC 3987 compliance and falls back to percent encoding)
"""
# First decode the IRI if needed (python 2)
if sys.version_info[0] < 3:
if not isinstance(iri, unicode):
logger.debug("Converting IRI to unicode")
iri = iri.decode('utf-8')
try:
# If we can safely parse the URI, then we don't
# need to do anything special here
rfc3987.parse(iri, rule='IRI')
logger.debug("This is already a valid IRI, doing nothing...")
return iri
except:
# The URI is not valid, so we'll have to fix it.
logger.debug("The IRI is not valid, proceeding to quote...")
# First see whether we can actually parse it *as if* it is a URI
parts = urlparse.urlsplit(iri)
if not parts.scheme or not parts.netloc:
# If there is no scheme (e.g. http) nor a net location (e.g.
# example.com) then we cannot do anything
logger.error("The argument you provided does not comply with "
"RFC 3987 and is not parseable as a IRI"
"(there is no scheme or no net location part)")
logger.error(iri)
raise Exception("The argument you provided does not comply with"
"RFC 3987 and is not parseable as a IRI"
"(there is no scheme or no net location part)")
logger.debug("The IRI contains all necessary parts (scheme + net location)")
quoted_parts = {}
# We'll now convert the path, query and fragment parts of the URI
# Get the 'anti-pattern' for the valid characters (see rfc3987 package)
# This is roughly the ipchar pattern plus the '/' as we don't need to match
# the entire path, but merely the individual characters
no_invalid_characters = rfc3987.get_compiled_pattern("(?!%(iunreserved)s|%(pct_encoded)s|%(sub_delims)s|:|@|/)(.)")
# Replace the invalid characters with an underscore (no need to roundtrip)
quoted_parts['path'] = no_invalid_characters.sub(u'_', parts.path)
if parts.fragment:
quoted_parts['fragment'] = no_invalid_characters.sub(u'_', parts.fragment)
if parts.query:
quoted_parts['query'] = urllib.quote(parts.query.encode('utf-8'),safe="&=")
# Leave these untouched
quoted_parts['scheme'] = parts.scheme
quoted_parts['authority'] = parts.netloc
# Extra check to make sure we now have a valid IRI
quoted_iri = rfc3987.compose(**quoted_parts)
try:
rfc3987.parse(quoted_iri)
except:
# Unable to generate a valid quoted iri, using the straightforward
# urllib percent quoting (but this is ugly!)
logger.warning('Could not safely quote as IRI, falling back to '
'percent encoding')
quoted_iri = urllib.quote(iri.encode('utf-8'))
return quoted_iri
|
def to_iri(iri):
"""
Safely quotes an IRI in a way that is resilient to unicode and incorrect
arguments (checks for RFC 3987 compliance and falls back to percent encoding)
"""
# First decode the IRI if needed (python 2)
if sys.version_info[0] < 3:
if not isinstance(iri, unicode):
logger.debug("Converting IRI to unicode")
iri = iri.decode('utf-8')
try:
# If we can safely parse the URI, then we don't
# need to do anything special here
rfc3987.parse(iri, rule='IRI')
logger.debug("This is already a valid IRI, doing nothing...")
return iri
except:
# The URI is not valid, so we'll have to fix it.
logger.debug("The IRI is not valid, proceeding to quote...")
# First see whether we can actually parse it *as if* it is a URI
parts = urlparse.urlsplit(iri)
if not parts.scheme or not parts.netloc:
# If there is no scheme (e.g. http) nor a net location (e.g.
# example.com) then we cannot do anything
logger.error("The argument you provided does not comply with "
"RFC 3987 and is not parseable as a IRI"
"(there is no scheme or no net location part)")
logger.error(iri)
raise Exception("The argument you provided does not comply with"
"RFC 3987 and is not parseable as a IRI"
"(there is no scheme or no net location part)")
logger.debug("The IRI contains all necessary parts (scheme + net location)")
quoted_parts = {}
# We'll now convert the path, query and fragment parts of the URI
# Get the 'anti-pattern' for the valid characters (see rfc3987 package)
# This is roughly the ipchar pattern plus the '/' as we don't need to match
# the entire path, but merely the individual characters
no_invalid_characters = rfc3987.get_compiled_pattern("(?!%(iunreserved)s|%(pct_encoded)s|%(sub_delims)s|:|@|/)(.)")
# Replace the invalid characters with an underscore (no need to roundtrip)
quoted_parts['path'] = no_invalid_characters.sub(u'_', parts.path)
if parts.fragment:
quoted_parts['fragment'] = no_invalid_characters.sub(u'_', parts.fragment)
if parts.query:
quoted_parts['query'] = urllib.quote(parts.query.encode('utf-8'),safe="&=")
# Leave these untouched
quoted_parts['scheme'] = parts.scheme
quoted_parts['authority'] = parts.netloc
# Extra check to make sure we now have a valid IRI
quoted_iri = rfc3987.compose(**quoted_parts)
try:
rfc3987.parse(quoted_iri)
except:
# Unable to generate a valid quoted iri, using the straightforward
# urllib percent quoting (but this is ugly!)
logger.warning('Could not safely quote as IRI, falling back to '
'percent encoding')
quoted_iri = urllib.quote(iri.encode('utf-8'))
return quoted_iri
|
[
"Safely",
"quotes",
"an",
"IRI",
"in",
"a",
"way",
"that",
"is",
"resilient",
"to",
"unicode",
"and",
"incorrect",
"arguments",
"(",
"checks",
"for",
"RFC",
"3987",
"compliance",
"and",
"falls",
"back",
"to",
"percent",
"encoding",
")"
] |
CLARIAH/iribaker
|
python
|
https://github.com/CLARIAH/iribaker/blob/47d2e8e95472353769962fde7626881f53429379/iribaker/__init__.py#L17-L81
|
[
"def",
"to_iri",
"(",
"iri",
")",
":",
"# First decode the IRI if needed (python 2)",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"<",
"3",
":",
"if",
"not",
"isinstance",
"(",
"iri",
",",
"unicode",
")",
":",
"logger",
".",
"debug",
"(",
"\"Converting IRI to unicode\"",
")",
"iri",
"=",
"iri",
".",
"decode",
"(",
"'utf-8'",
")",
"try",
":",
"# If we can safely parse the URI, then we don't",
"# need to do anything special here",
"rfc3987",
".",
"parse",
"(",
"iri",
",",
"rule",
"=",
"'IRI'",
")",
"logger",
".",
"debug",
"(",
"\"This is already a valid IRI, doing nothing...\"",
")",
"return",
"iri",
"except",
":",
"# The URI is not valid, so we'll have to fix it.",
"logger",
".",
"debug",
"(",
"\"The IRI is not valid, proceeding to quote...\"",
")",
"# First see whether we can actually parse it *as if* it is a URI",
"parts",
"=",
"urlparse",
".",
"urlsplit",
"(",
"iri",
")",
"if",
"not",
"parts",
".",
"scheme",
"or",
"not",
"parts",
".",
"netloc",
":",
"# If there is no scheme (e.g. http) nor a net location (e.g.",
"# example.com) then we cannot do anything",
"logger",
".",
"error",
"(",
"\"The argument you provided does not comply with \"",
"\"RFC 3987 and is not parseable as a IRI\"",
"\"(there is no scheme or no net location part)\"",
")",
"logger",
".",
"error",
"(",
"iri",
")",
"raise",
"Exception",
"(",
"\"The argument you provided does not comply with\"",
"\"RFC 3987 and is not parseable as a IRI\"",
"\"(there is no scheme or no net location part)\"",
")",
"logger",
".",
"debug",
"(",
"\"The IRI contains all necessary parts (scheme + net location)\"",
")",
"quoted_parts",
"=",
"{",
"}",
"# We'll now convert the path, query and fragment parts of the URI",
"# Get the 'anti-pattern' for the valid characters (see rfc3987 package)",
"# This is roughly the ipchar pattern plus the '/' as we don't need to match",
"# the entire path, but merely the individual characters",
"no_invalid_characters",
"=",
"rfc3987",
".",
"get_compiled_pattern",
"(",
"\"(?!%(iunreserved)s|%(pct_encoded)s|%(sub_delims)s|:|@|/)(.)\"",
")",
"# Replace the invalid characters with an underscore (no need to roundtrip)",
"quoted_parts",
"[",
"'path'",
"]",
"=",
"no_invalid_characters",
".",
"sub",
"(",
"u'_'",
",",
"parts",
".",
"path",
")",
"if",
"parts",
".",
"fragment",
":",
"quoted_parts",
"[",
"'fragment'",
"]",
"=",
"no_invalid_characters",
".",
"sub",
"(",
"u'_'",
",",
"parts",
".",
"fragment",
")",
"if",
"parts",
".",
"query",
":",
"quoted_parts",
"[",
"'query'",
"]",
"=",
"urllib",
".",
"quote",
"(",
"parts",
".",
"query",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"safe",
"=",
"\"&=\"",
")",
"# Leave these untouched",
"quoted_parts",
"[",
"'scheme'",
"]",
"=",
"parts",
".",
"scheme",
"quoted_parts",
"[",
"'authority'",
"]",
"=",
"parts",
".",
"netloc",
"# Extra check to make sure we now have a valid IRI",
"quoted_iri",
"=",
"rfc3987",
".",
"compose",
"(",
"*",
"*",
"quoted_parts",
")",
"try",
":",
"rfc3987",
".",
"parse",
"(",
"quoted_iri",
")",
"except",
":",
"# Unable to generate a valid quoted iri, using the straightforward",
"# urllib percent quoting (but this is ugly!)",
"logger",
".",
"warning",
"(",
"'Could not safely quote as IRI, falling back to '",
"'percent encoding'",
")",
"quoted_iri",
"=",
"urllib",
".",
"quote",
"(",
"iri",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"return",
"quoted_iri"
] |
47d2e8e95472353769962fde7626881f53429379
|
test
|
parse_vn_results
|
Parse Visual Novel search pages.
:param soup: The BS4 class object
:return: A list of dictionaries containing a name and id.
|
Shosetsu/Parsing.py
|
async def parse_vn_results(soup):
"""
Parse Visual Novel search pages.
:param soup: The BS4 class object
:return: A list of dictionaries containing a name and id.
"""
soup = soup.find_all('td', class_='tc1')
vns = []
for item in soup[1:]:
vns.append({'name': item.string, 'id': item.a.get('href')[1:]})
return vns
|
async def parse_vn_results(soup):
"""
Parse Visual Novel search pages.
:param soup: The BS4 class object
:return: A list of dictionaries containing a name and id.
"""
soup = soup.find_all('td', class_='tc1')
vns = []
for item in soup[1:]:
vns.append({'name': item.string, 'id': item.a.get('href')[1:]})
return vns
|
[
"Parse",
"Visual",
"Novel",
"search",
"pages",
"."
] |
ccubed/Shosetsu
|
python
|
https://github.com/ccubed/Shosetsu/blob/eba01c058100ec8806129b11a2859f3126a1b101/Shosetsu/Parsing.py#L3-L14
|
[
"async",
"def",
"parse_vn_results",
"(",
"soup",
")",
":",
"soup",
"=",
"soup",
".",
"find_all",
"(",
"'td'",
",",
"class_",
"=",
"'tc1'",
")",
"vns",
"=",
"[",
"]",
"for",
"item",
"in",
"soup",
"[",
"1",
":",
"]",
":",
"vns",
".",
"append",
"(",
"{",
"'name'",
":",
"item",
".",
"string",
",",
"'id'",
":",
"item",
".",
"a",
".",
"get",
"(",
"'href'",
")",
"[",
"1",
":",
"]",
"}",
")",
"return",
"vns"
] |
eba01c058100ec8806129b11a2859f3126a1b101
|
test
|
parse_release_results
|
Parse Releases search pages.
:param soup: The BS4 class object
:return: A list of dictionaries containing a release dictionary. This is the same as the one returned in get_novel.
It contains a Date released, Platform, Ages group and Name.
|
Shosetsu/Parsing.py
|
async def parse_release_results(soup):
"""
Parse Releases search pages.
:param soup: The BS4 class object
:return: A list of dictionaries containing a release dictionary. This is the same as the one returned in get_novel.
It contains a Date released, Platform, Ages group and Name.
"""
soup = list(soup.find_all('table', class_='stripe')[0].children)[1:]
releases = []
for item in soup:
child = list(item.children)
temp_rel = {'date': None, 'ages': None, 'platform': None, 'name': None}
temp_rel['date'] = child[0].string
temp_rel['ages'] = child[1].string
temp_rel['platform'] = child[2].abbr.get('title')
temp_rel['name'] = child[3].a.string
releases.append(temp_rel)
del temp_rel
return releases
|
async def parse_release_results(soup):
"""
Parse Releases search pages.
:param soup: The BS4 class object
:return: A list of dictionaries containing a release dictionary. This is the same as the one returned in get_novel.
It contains a Date released, Platform, Ages group and Name.
"""
soup = list(soup.find_all('table', class_='stripe')[0].children)[1:]
releases = []
for item in soup:
child = list(item.children)
temp_rel = {'date': None, 'ages': None, 'platform': None, 'name': None}
temp_rel['date'] = child[0].string
temp_rel['ages'] = child[1].string
temp_rel['platform'] = child[2].abbr.get('title')
temp_rel['name'] = child[3].a.string
releases.append(temp_rel)
del temp_rel
return releases
|
[
"Parse",
"Releases",
"search",
"pages",
"."
] |
ccubed/Shosetsu
|
python
|
https://github.com/ccubed/Shosetsu/blob/eba01c058100ec8806129b11a2859f3126a1b101/Shosetsu/Parsing.py#L16-L35
|
[
"async",
"def",
"parse_release_results",
"(",
"soup",
")",
":",
"soup",
"=",
"list",
"(",
"soup",
".",
"find_all",
"(",
"'table'",
",",
"class_",
"=",
"'stripe'",
")",
"[",
"0",
"]",
".",
"children",
")",
"[",
"1",
":",
"]",
"releases",
"=",
"[",
"]",
"for",
"item",
"in",
"soup",
":",
"child",
"=",
"list",
"(",
"item",
".",
"children",
")",
"temp_rel",
"=",
"{",
"'date'",
":",
"None",
",",
"'ages'",
":",
"None",
",",
"'platform'",
":",
"None",
",",
"'name'",
":",
"None",
"}",
"temp_rel",
"[",
"'date'",
"]",
"=",
"child",
"[",
"0",
"]",
".",
"string",
"temp_rel",
"[",
"'ages'",
"]",
"=",
"child",
"[",
"1",
"]",
".",
"string",
"temp_rel",
"[",
"'platform'",
"]",
"=",
"child",
"[",
"2",
"]",
".",
"abbr",
".",
"get",
"(",
"'title'",
")",
"temp_rel",
"[",
"'name'",
"]",
"=",
"child",
"[",
"3",
"]",
".",
"a",
".",
"string",
"releases",
".",
"append",
"(",
"temp_rel",
")",
"del",
"temp_rel",
"return",
"releases"
] |
eba01c058100ec8806129b11a2859f3126a1b101
|
test
|
parse_prod_staff_results
|
Parse a page of producer or staff results
:param soup: The BS4 class object
:return: A list of dictionaries containing a name and nationality.
|
Shosetsu/Parsing.py
|
async def parse_prod_staff_results(soup):
"""
Parse a page of producer or staff results
:param soup: The BS4 class object
:return: A list of dictionaries containing a name and nationality.
"""
soup = soup.find_all('li')
producers = []
for item in soup:
producers.append({'nationality': item.abbr.get('title'), 'name': item.a.string})
return producers
|
async def parse_prod_staff_results(soup):
"""
Parse a page of producer or staff results
:param soup: The BS4 class object
:return: A list of dictionaries containing a name and nationality.
"""
soup = soup.find_all('li')
producers = []
for item in soup:
producers.append({'nationality': item.abbr.get('title'), 'name': item.a.string})
return producers
|
[
"Parse",
"a",
"page",
"of",
"producer",
"or",
"staff",
"results"
] |
ccubed/Shosetsu
|
python
|
https://github.com/ccubed/Shosetsu/blob/eba01c058100ec8806129b11a2859f3126a1b101/Shosetsu/Parsing.py#L37-L48
|
[
"async",
"def",
"parse_prod_staff_results",
"(",
"soup",
")",
":",
"soup",
"=",
"soup",
".",
"find_all",
"(",
"'li'",
")",
"producers",
"=",
"[",
"]",
"for",
"item",
"in",
"soup",
":",
"producers",
".",
"append",
"(",
"{",
"'nationality'",
":",
"item",
".",
"abbr",
".",
"get",
"(",
"'title'",
")",
",",
"'name'",
":",
"item",
".",
"a",
".",
"string",
"}",
")",
"return",
"producers"
] |
eba01c058100ec8806129b11a2859f3126a1b101
|
test
|
parse_character_results
|
Parse a page of character results.
:param soup: The BS4 class object
:return: Returns a list of dictionaries containing a name, gender and list of dictionaries containing a game name/id pair
for games they appeared in.
|
Shosetsu/Parsing.py
|
async def parse_character_results(soup):
"""
Parse a page of character results.
:param soup: The BS4 class object
:return: Returns a list of dictionaries containing a name, gender and list of dictionaries containing a game name/id pair
for games they appeared in.
"""
soup = list(soup.find_all('table', class_='stripe')[0].children)[1:]
characters = []
for item in soup:
temp_c = {'gender': None, 'name': None, 'games': {}}
temp_c['gender'] = item.abbr.get('title')
temp_c['name'] = list(item.children)[1].a.string
temp_c['games'] = []
for game in list(list(list(item.children)[1].children)[1].children):
if isinstance(game, NavigableString):
continue
temp_c['games'].append({'name': game.string, 'id': game.get('href').split('/')[1]})
characters.append(temp_c)
del temp_c
return characters
|
async def parse_character_results(soup):
"""
Parse a page of character results.
:param soup: The BS4 class object
:return: Returns a list of dictionaries containing a name, gender and list of dictionaries containing a game name/id pair
for games they appeared in.
"""
soup = list(soup.find_all('table', class_='stripe')[0].children)[1:]
characters = []
for item in soup:
temp_c = {'gender': None, 'name': None, 'games': {}}
temp_c['gender'] = item.abbr.get('title')
temp_c['name'] = list(item.children)[1].a.string
temp_c['games'] = []
for game in list(list(list(item.children)[1].children)[1].children):
if isinstance(game, NavigableString):
continue
temp_c['games'].append({'name': game.string, 'id': game.get('href').split('/')[1]})
characters.append(temp_c)
del temp_c
return characters
|
[
"Parse",
"a",
"page",
"of",
"character",
"results",
"."
] |
ccubed/Shosetsu
|
python
|
https://github.com/ccubed/Shosetsu/blob/eba01c058100ec8806129b11a2859f3126a1b101/Shosetsu/Parsing.py#L50-L71
|
[
"async",
"def",
"parse_character_results",
"(",
"soup",
")",
":",
"soup",
"=",
"list",
"(",
"soup",
".",
"find_all",
"(",
"'table'",
",",
"class_",
"=",
"'stripe'",
")",
"[",
"0",
"]",
".",
"children",
")",
"[",
"1",
":",
"]",
"characters",
"=",
"[",
"]",
"for",
"item",
"in",
"soup",
":",
"temp_c",
"=",
"{",
"'gender'",
":",
"None",
",",
"'name'",
":",
"None",
",",
"'games'",
":",
"{",
"}",
"}",
"temp_c",
"[",
"'gender'",
"]",
"=",
"item",
".",
"abbr",
".",
"get",
"(",
"'title'",
")",
"temp_c",
"[",
"'name'",
"]",
"=",
"list",
"(",
"item",
".",
"children",
")",
"[",
"1",
"]",
".",
"a",
".",
"string",
"temp_c",
"[",
"'games'",
"]",
"=",
"[",
"]",
"for",
"game",
"in",
"list",
"(",
"list",
"(",
"list",
"(",
"item",
".",
"children",
")",
"[",
"1",
"]",
".",
"children",
")",
"[",
"1",
"]",
".",
"children",
")",
":",
"if",
"isinstance",
"(",
"game",
",",
"NavigableString",
")",
":",
"continue",
"temp_c",
"[",
"'games'",
"]",
".",
"append",
"(",
"{",
"'name'",
":",
"game",
".",
"string",
",",
"'id'",
":",
"game",
".",
"get",
"(",
"'href'",
")",
".",
"split",
"(",
"'/'",
")",
"[",
"1",
"]",
"}",
")",
"characters",
".",
"append",
"(",
"temp_c",
")",
"del",
"temp_c",
"return",
"characters"
] |
eba01c058100ec8806129b11a2859f3126a1b101
|
test
|
parse_tag_results
|
Parse a page of tag or trait results. Same format.
:param soup: BS4 Class Object
:return: A list of tags, Nothing else really useful there
|
Shosetsu/Parsing.py
|
async def parse_tag_results(soup):
"""
Parse a page of tag or trait results. Same format.
:param soup: BS4 Class Object
:return: A list of tags, Nothing else really useful there
"""
soup = soup.find_all('td', class_='tc3')
tags = []
for item in soup:
tags.append(item.a.string)
return tags
|
async def parse_tag_results(soup):
"""
Parse a page of tag or trait results. Same format.
:param soup: BS4 Class Object
:return: A list of tags, Nothing else really useful there
"""
soup = soup.find_all('td', class_='tc3')
tags = []
for item in soup:
tags.append(item.a.string)
return tags
|
[
"Parse",
"a",
"page",
"of",
"tag",
"or",
"trait",
"results",
".",
"Same",
"format",
"."
] |
ccubed/Shosetsu
|
python
|
https://github.com/ccubed/Shosetsu/blob/eba01c058100ec8806129b11a2859f3126a1b101/Shosetsu/Parsing.py#L73-L84
|
[
"async",
"def",
"parse_tag_results",
"(",
"soup",
")",
":",
"soup",
"=",
"soup",
".",
"find_all",
"(",
"'td'",
",",
"class_",
"=",
"'tc3'",
")",
"tags",
"=",
"[",
"]",
"for",
"item",
"in",
"soup",
":",
"tags",
".",
"append",
"(",
"item",
".",
"a",
".",
"string",
")",
"return",
"tags"
] |
eba01c058100ec8806129b11a2859f3126a1b101
|
test
|
parse_user_results
|
Parse a page of user results
:param soup: Bs4 Class object
:return: A list of dictionaries containing a name and join date
|
Shosetsu/Parsing.py
|
async def parse_user_results(soup):
"""
Parse a page of user results
:param soup: Bs4 Class object
:return: A list of dictionaries containing a name and join date
"""
soup = list(soup.find_all('table', class_='stripe')[0].children)[1:]
users = []
for item in soup:
t_u = {'name': None, 'joined': None}
t_u['name'] = list(item.children)[0].a.string
t_u['joined'] = list(item.children)[1].string
users.append(t_u)
del t_u
return users
|
async def parse_user_results(soup):
"""
Parse a page of user results
:param soup: Bs4 Class object
:return: A list of dictionaries containing a name and join date
"""
soup = list(soup.find_all('table', class_='stripe')[0].children)[1:]
users = []
for item in soup:
t_u = {'name': None, 'joined': None}
t_u['name'] = list(item.children)[0].a.string
t_u['joined'] = list(item.children)[1].string
users.append(t_u)
del t_u
return users
|
[
"Parse",
"a",
"page",
"of",
"user",
"results"
] |
ccubed/Shosetsu
|
python
|
https://github.com/ccubed/Shosetsu/blob/eba01c058100ec8806129b11a2859f3126a1b101/Shosetsu/Parsing.py#L86-L101
|
[
"async",
"def",
"parse_user_results",
"(",
"soup",
")",
":",
"soup",
"=",
"list",
"(",
"soup",
".",
"find_all",
"(",
"'table'",
",",
"class_",
"=",
"'stripe'",
")",
"[",
"0",
"]",
".",
"children",
")",
"[",
"1",
":",
"]",
"users",
"=",
"[",
"]",
"for",
"item",
"in",
"soup",
":",
"t_u",
"=",
"{",
"'name'",
":",
"None",
",",
"'joined'",
":",
"None",
"}",
"t_u",
"[",
"'name'",
"]",
"=",
"list",
"(",
"item",
".",
"children",
")",
"[",
"0",
"]",
".",
"a",
".",
"string",
"t_u",
"[",
"'joined'",
"]",
"=",
"list",
"(",
"item",
".",
"children",
")",
"[",
"1",
"]",
".",
"string",
"users",
".",
"append",
"(",
"t_u",
")",
"del",
"t_u",
"return",
"users"
] |
eba01c058100ec8806129b11a2859f3126a1b101
|
test
|
tarball_files
|
Creates a tarball from a group of files
:param str tar_name: Name of tarball
:param list[str] file_paths: Absolute file paths to include in the tarball
:param str output_dir: Output destination for tarball
:param str prefix: Optional prefix for files in tarball
|
src/toil_lib/files.py
|
def tarball_files(tar_name, file_paths, output_dir='.', prefix=''):
"""
Creates a tarball from a group of files
:param str tar_name: Name of tarball
:param list[str] file_paths: Absolute file paths to include in the tarball
:param str output_dir: Output destination for tarball
:param str prefix: Optional prefix for files in tarball
"""
with tarfile.open(os.path.join(output_dir, tar_name), 'w:gz') as f_out:
for file_path in file_paths:
if not file_path.startswith('/'):
raise ValueError('Path provided is relative not absolute.')
arcname = prefix + os.path.basename(file_path)
f_out.add(file_path, arcname=arcname)
|
def tarball_files(tar_name, file_paths, output_dir='.', prefix=''):
"""
Creates a tarball from a group of files
:param str tar_name: Name of tarball
:param list[str] file_paths: Absolute file paths to include in the tarball
:param str output_dir: Output destination for tarball
:param str prefix: Optional prefix for files in tarball
"""
with tarfile.open(os.path.join(output_dir, tar_name), 'w:gz') as f_out:
for file_path in file_paths:
if not file_path.startswith('/'):
raise ValueError('Path provided is relative not absolute.')
arcname = prefix + os.path.basename(file_path)
f_out.add(file_path, arcname=arcname)
|
[
"Creates",
"a",
"tarball",
"from",
"a",
"group",
"of",
"files"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/files.py#L9-L23
|
[
"def",
"tarball_files",
"(",
"tar_name",
",",
"file_paths",
",",
"output_dir",
"=",
"'.'",
",",
"prefix",
"=",
"''",
")",
":",
"with",
"tarfile",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"tar_name",
")",
",",
"'w:gz'",
")",
"as",
"f_out",
":",
"for",
"file_path",
"in",
"file_paths",
":",
"if",
"not",
"file_path",
".",
"startswith",
"(",
"'/'",
")",
":",
"raise",
"ValueError",
"(",
"'Path provided is relative not absolute.'",
")",
"arcname",
"=",
"prefix",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"file_path",
")",
"f_out",
".",
"add",
"(",
"file_path",
",",
"arcname",
"=",
"arcname",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
__forall_files
|
Applies a function to a set of files and an output directory.
:param str output_dir: Output directory
:param list[str] file_paths: Absolute file paths to move
|
src/toil_lib/files.py
|
def __forall_files(file_paths, output_dir, op):
"""
Applies a function to a set of files and an output directory.
:param str output_dir: Output directory
:param list[str] file_paths: Absolute file paths to move
"""
for file_path in file_paths:
if not file_path.startswith('/'):
raise ValueError('Path provided (%s) is relative not absolute.' % file_path)
dest = os.path.join(output_dir, os.path.basename(file_path))
op(file_path, dest)
|
def __forall_files(file_paths, output_dir, op):
"""
Applies a function to a set of files and an output directory.
:param str output_dir: Output directory
:param list[str] file_paths: Absolute file paths to move
"""
for file_path in file_paths:
if not file_path.startswith('/'):
raise ValueError('Path provided (%s) is relative not absolute.' % file_path)
dest = os.path.join(output_dir, os.path.basename(file_path))
op(file_path, dest)
|
[
"Applies",
"a",
"function",
"to",
"a",
"set",
"of",
"files",
"and",
"an",
"output",
"directory",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/files.py#L26-L37
|
[
"def",
"__forall_files",
"(",
"file_paths",
",",
"output_dir",
",",
"op",
")",
":",
"for",
"file_path",
"in",
"file_paths",
":",
"if",
"not",
"file_path",
".",
"startswith",
"(",
"'/'",
")",
":",
"raise",
"ValueError",
"(",
"'Path provided (%s) is relative not absolute.'",
"%",
"file_path",
")",
"dest",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"os",
".",
"path",
".",
"basename",
"(",
"file_path",
")",
")",
"op",
"(",
"file_path",
",",
"dest",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
copy_file_job
|
Job version of move_files for one file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str name: Name of output file (including extension)
:param str file_id: FileStoreID of file
:param str output_dir: Location to place output file
|
src/toil_lib/files.py
|
def copy_file_job(job, name, file_id, output_dir):
"""
Job version of move_files for one file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str name: Name of output file (including extension)
:param str file_id: FileStoreID of file
:param str output_dir: Location to place output file
"""
work_dir = job.fileStore.getLocalTempDir()
fpath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, name))
copy_files([fpath], output_dir)
|
def copy_file_job(job, name, file_id, output_dir):
"""
Job version of move_files for one file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str name: Name of output file (including extension)
:param str file_id: FileStoreID of file
:param str output_dir: Location to place output file
"""
work_dir = job.fileStore.getLocalTempDir()
fpath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, name))
copy_files([fpath], output_dir)
|
[
"Job",
"version",
"of",
"move_files",
"for",
"one",
"file"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/files.py#L40-L51
|
[
"def",
"copy_file_job",
"(",
"job",
",",
"name",
",",
"file_id",
",",
"output_dir",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"fpath",
"=",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"file_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"name",
")",
")",
"copy_files",
"(",
"[",
"fpath",
"]",
",",
"output_dir",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
consolidate_tarballs_job
|
Combine the contents of separate tarballs into one.
Subdirs within the tarball will be named the keys in **fname_to_id
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict[str,str] fname_to_id: Dictionary of the form: file-name-prefix=FileStoreID
:return: The file store ID of the generated tarball
:rtype: str
|
src/toil_lib/files.py
|
def consolidate_tarballs_job(job, fname_to_id):
"""
Combine the contents of separate tarballs into one.
Subdirs within the tarball will be named the keys in **fname_to_id
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict[str,str] fname_to_id: Dictionary of the form: file-name-prefix=FileStoreID
:return: The file store ID of the generated tarball
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
# Retrieve output file paths to consolidate
tar_paths = []
for fname, file_store_id in fname_to_id.iteritems():
p = job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, fname + '.tar.gz'))
tar_paths.append((p, fname))
# I/O
# output_name is arbitrary as this job function returns a FileStoreId
output_name = 'foo.tar.gz'
out_tar = os.path.join(work_dir, output_name)
# Consolidate separate tarballs into one
with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out:
for tar, fname in tar_paths:
with tarfile.open(tar, 'r') as f_in:
for tarinfo in f_in:
with closing(f_in.extractfile(tarinfo)) as f_in_file:
tarinfo.name = os.path.join(output_name, fname, os.path.basename(tarinfo.name))
f_out.addfile(tarinfo, fileobj=f_in_file)
return job.fileStore.writeGlobalFile(out_tar)
|
def consolidate_tarballs_job(job, fname_to_id):
"""
Combine the contents of separate tarballs into one.
Subdirs within the tarball will be named the keys in **fname_to_id
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict[str,str] fname_to_id: Dictionary of the form: file-name-prefix=FileStoreID
:return: The file store ID of the generated tarball
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
# Retrieve output file paths to consolidate
tar_paths = []
for fname, file_store_id in fname_to_id.iteritems():
p = job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, fname + '.tar.gz'))
tar_paths.append((p, fname))
# I/O
# output_name is arbitrary as this job function returns a FileStoreId
output_name = 'foo.tar.gz'
out_tar = os.path.join(work_dir, output_name)
# Consolidate separate tarballs into one
with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out:
for tar, fname in tar_paths:
with tarfile.open(tar, 'r') as f_in:
for tarinfo in f_in:
with closing(f_in.extractfile(tarinfo)) as f_in_file:
tarinfo.name = os.path.join(output_name, fname, os.path.basename(tarinfo.name))
f_out.addfile(tarinfo, fileobj=f_in_file)
return job.fileStore.writeGlobalFile(out_tar)
|
[
"Combine",
"the",
"contents",
"of",
"separate",
"tarballs",
"into",
"one",
".",
"Subdirs",
"within",
"the",
"tarball",
"will",
"be",
"named",
"the",
"keys",
"in",
"**",
"fname_to_id"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/files.py#L81-L109
|
[
"def",
"consolidate_tarballs_job",
"(",
"job",
",",
"fname_to_id",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"# Retrieve output file paths to consolidate",
"tar_paths",
"=",
"[",
"]",
"for",
"fname",
",",
"file_store_id",
"in",
"fname_to_id",
".",
"iteritems",
"(",
")",
":",
"p",
"=",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"file_store_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"fname",
"+",
"'.tar.gz'",
")",
")",
"tar_paths",
".",
"append",
"(",
"(",
"p",
",",
"fname",
")",
")",
"# I/O",
"# output_name is arbitrary as this job function returns a FileStoreId",
"output_name",
"=",
"'foo.tar.gz'",
"out_tar",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"output_name",
")",
"# Consolidate separate tarballs into one",
"with",
"tarfile",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"out_tar",
")",
",",
"'w:gz'",
")",
"as",
"f_out",
":",
"for",
"tar",
",",
"fname",
"in",
"tar_paths",
":",
"with",
"tarfile",
".",
"open",
"(",
"tar",
",",
"'r'",
")",
"as",
"f_in",
":",
"for",
"tarinfo",
"in",
"f_in",
":",
"with",
"closing",
"(",
"f_in",
".",
"extractfile",
"(",
"tarinfo",
")",
")",
"as",
"f_in_file",
":",
"tarinfo",
".",
"name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_name",
",",
"fname",
",",
"os",
".",
"path",
".",
"basename",
"(",
"tarinfo",
".",
"name",
")",
")",
"f_out",
".",
"addfile",
"(",
"tarinfo",
",",
"fileobj",
"=",
"f_in_file",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"out_tar",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
_make_parameters
|
Makes a Spark Submit style job submission line.
:param masterIP: The Spark leader IP address.
:param default_parameters: Application specific Spark configuration parameters.
:param memory: The memory to allocate to each Spark driver and executor.
:param arguments: Arguments to pass to the submitted job.
:param override_parameters: Parameters passed by the user, that override our defaults.
:type masterIP: MasterAddress
:type default_parameters: list of string
:type arguments: list of string
:type memory: int or None
:type override_parameters: list of string or None
|
src/toil_lib/tools/spark_tools.py
|
def _make_parameters(master_ip, default_parameters, memory, arguments, override_parameters):
"""
Makes a Spark Submit style job submission line.
:param masterIP: The Spark leader IP address.
:param default_parameters: Application specific Spark configuration parameters.
:param memory: The memory to allocate to each Spark driver and executor.
:param arguments: Arguments to pass to the submitted job.
:param override_parameters: Parameters passed by the user, that override our defaults.
:type masterIP: MasterAddress
:type default_parameters: list of string
:type arguments: list of string
:type memory: int or None
:type override_parameters: list of string or None
"""
# python doesn't support logical xor?
# anywho, exactly one of memory or override_parameters must be defined
require((override_parameters is not None or memory is not None) and
(override_parameters is None or memory is None),
"Either the memory setting must be defined or you must provide Spark configuration parameters.")
# if the user hasn't provided overrides, set our defaults
parameters = []
if memory is not None:
parameters = ["--master", "spark://%s:%s" % (master_ip, SPARK_MASTER_PORT),
"--conf", "spark.driver.memory=%sg" % memory,
"--conf", "spark.executor.memory=%sg" % memory,
"--conf", ("spark.hadoop.fs.default.name=hdfs://%s:%s" % (master_ip, HDFS_MASTER_PORT))]
else:
parameters.extend(override_parameters)
# add the tool specific spark parameters
parameters.extend(default_parameters)
# spark submit expects a '--' to split the spark conf arguments from tool arguments
parameters.append('--')
# now add the tool arguments and return
parameters.extend(arguments)
return parameters
|
def _make_parameters(master_ip, default_parameters, memory, arguments, override_parameters):
"""
Makes a Spark Submit style job submission line.
:param masterIP: The Spark leader IP address.
:param default_parameters: Application specific Spark configuration parameters.
:param memory: The memory to allocate to each Spark driver and executor.
:param arguments: Arguments to pass to the submitted job.
:param override_parameters: Parameters passed by the user, that override our defaults.
:type masterIP: MasterAddress
:type default_parameters: list of string
:type arguments: list of string
:type memory: int or None
:type override_parameters: list of string or None
"""
# python doesn't support logical xor?
# anywho, exactly one of memory or override_parameters must be defined
require((override_parameters is not None or memory is not None) and
(override_parameters is None or memory is None),
"Either the memory setting must be defined or you must provide Spark configuration parameters.")
# if the user hasn't provided overrides, set our defaults
parameters = []
if memory is not None:
parameters = ["--master", "spark://%s:%s" % (master_ip, SPARK_MASTER_PORT),
"--conf", "spark.driver.memory=%sg" % memory,
"--conf", "spark.executor.memory=%sg" % memory,
"--conf", ("spark.hadoop.fs.default.name=hdfs://%s:%s" % (master_ip, HDFS_MASTER_PORT))]
else:
parameters.extend(override_parameters)
# add the tool specific spark parameters
parameters.extend(default_parameters)
# spark submit expects a '--' to split the spark conf arguments from tool arguments
parameters.append('--')
# now add the tool arguments and return
parameters.extend(arguments)
return parameters
|
[
"Makes",
"a",
"Spark",
"Submit",
"style",
"job",
"submission",
"line",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/spark_tools.py#L53-L95
|
[
"def",
"_make_parameters",
"(",
"master_ip",
",",
"default_parameters",
",",
"memory",
",",
"arguments",
",",
"override_parameters",
")",
":",
"# python doesn't support logical xor?",
"# anywho, exactly one of memory or override_parameters must be defined",
"require",
"(",
"(",
"override_parameters",
"is",
"not",
"None",
"or",
"memory",
"is",
"not",
"None",
")",
"and",
"(",
"override_parameters",
"is",
"None",
"or",
"memory",
"is",
"None",
")",
",",
"\"Either the memory setting must be defined or you must provide Spark configuration parameters.\"",
")",
"# if the user hasn't provided overrides, set our defaults",
"parameters",
"=",
"[",
"]",
"if",
"memory",
"is",
"not",
"None",
":",
"parameters",
"=",
"[",
"\"--master\"",
",",
"\"spark://%s:%s\"",
"%",
"(",
"master_ip",
",",
"SPARK_MASTER_PORT",
")",
",",
"\"--conf\"",
",",
"\"spark.driver.memory=%sg\"",
"%",
"memory",
",",
"\"--conf\"",
",",
"\"spark.executor.memory=%sg\"",
"%",
"memory",
",",
"\"--conf\"",
",",
"(",
"\"spark.hadoop.fs.default.name=hdfs://%s:%s\"",
"%",
"(",
"master_ip",
",",
"HDFS_MASTER_PORT",
")",
")",
"]",
"else",
":",
"parameters",
".",
"extend",
"(",
"override_parameters",
")",
"# add the tool specific spark parameters",
"parameters",
".",
"extend",
"(",
"default_parameters",
")",
"# spark submit expects a '--' to split the spark conf arguments from tool arguments",
"parameters",
".",
"append",
"(",
"'--'",
")",
"# now add the tool arguments and return",
"parameters",
".",
"extend",
"(",
"arguments",
")",
"return",
"parameters"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
call_conductor
|
Invokes the Conductor container to copy files between S3 and HDFS and vice versa.
Find Conductor at https://github.com/BD2KGenomics/conductor.
:param toil.Job.job job: The Toil Job calling this function
:param masterIP: The Spark leader IP address.
:param src: URL of file to copy.
:param src: URL of location to copy file to.
:param memory: Gigabytes of memory to provision for Spark driver/worker.
:param override_parameters: Parameters passed by the user, that override our defaults.
:type masterIP: MasterAddress
:type src: string
:type dst: string
:type memory: int or None
:type override_parameters: list of string or None
|
src/toil_lib/tools/spark_tools.py
|
def call_conductor(job, master_ip, src, dst, memory=None, override_parameters=None):
"""
Invokes the Conductor container to copy files between S3 and HDFS and vice versa.
Find Conductor at https://github.com/BD2KGenomics/conductor.
:param toil.Job.job job: The Toil Job calling this function
:param masterIP: The Spark leader IP address.
:param src: URL of file to copy.
:param src: URL of location to copy file to.
:param memory: Gigabytes of memory to provision for Spark driver/worker.
:param override_parameters: Parameters passed by the user, that override our defaults.
:type masterIP: MasterAddress
:type src: string
:type dst: string
:type memory: int or None
:type override_parameters: list of string or None
"""
arguments = ["-C", src, dst]
docker_parameters = ['--log-driver', 'none', master_ip.docker_parameters(["--net=host"])]
dockerCall(job=job,
tool="quay.io/ucsc_cgl/conductor",
parameters=_make_parameters(master_ip,
[], # no conductor specific spark configuration
memory,
arguments,
override_parameters),
dockerParameters=docker_parameters)
|
def call_conductor(job, master_ip, src, dst, memory=None, override_parameters=None):
"""
Invokes the Conductor container to copy files between S3 and HDFS and vice versa.
Find Conductor at https://github.com/BD2KGenomics/conductor.
:param toil.Job.job job: The Toil Job calling this function
:param masterIP: The Spark leader IP address.
:param src: URL of file to copy.
:param src: URL of location to copy file to.
:param memory: Gigabytes of memory to provision for Spark driver/worker.
:param override_parameters: Parameters passed by the user, that override our defaults.
:type masterIP: MasterAddress
:type src: string
:type dst: string
:type memory: int or None
:type override_parameters: list of string or None
"""
arguments = ["-C", src, dst]
docker_parameters = ['--log-driver', 'none', master_ip.docker_parameters(["--net=host"])]
dockerCall(job=job,
tool="quay.io/ucsc_cgl/conductor",
parameters=_make_parameters(master_ip,
[], # no conductor specific spark configuration
memory,
arguments,
override_parameters),
dockerParameters=docker_parameters)
|
[
"Invokes",
"the",
"Conductor",
"container",
"to",
"copy",
"files",
"between",
"S3",
"and",
"HDFS",
"and",
"vice",
"versa",
".",
"Find",
"Conductor",
"at",
"https",
":",
"//",
"github",
".",
"com",
"/",
"BD2KGenomics",
"/",
"conductor",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/spark_tools.py#L98-L127
|
[
"def",
"call_conductor",
"(",
"job",
",",
"master_ip",
",",
"src",
",",
"dst",
",",
"memory",
"=",
"None",
",",
"override_parameters",
"=",
"None",
")",
":",
"arguments",
"=",
"[",
"\"-C\"",
",",
"src",
",",
"dst",
"]",
"docker_parameters",
"=",
"[",
"'--log-driver'",
",",
"'none'",
",",
"master_ip",
".",
"docker_parameters",
"(",
"[",
"\"--net=host\"",
"]",
")",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"tool",
"=",
"\"quay.io/ucsc_cgl/conductor\"",
",",
"parameters",
"=",
"_make_parameters",
"(",
"master_ip",
",",
"[",
"]",
",",
"# no conductor specific spark configuration",
"memory",
",",
"arguments",
",",
"override_parameters",
")",
",",
"dockerParameters",
"=",
"docker_parameters",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
call_adam
|
Invokes the ADAM container. Find ADAM at https://github.com/bigdatagenomics/adam.
:param toil.Job.job job: The Toil Job calling this function
:param masterIP: The Spark leader IP address.
:param arguments: Arguments to pass to ADAM.
:param memory: Gigabytes of memory to provision for Spark driver/worker.
:param override_parameters: Parameters passed by the user, that override our defaults.
:param native_adam_path: Path to ADAM executable. If not provided, Docker is used.
:param run_local: If true, runs Spark with the --master local[*] setting, which uses
all cores on the local machine. The master_ip will be disregarded.
:type masterIP: MasterAddress
:type arguments: list of string
:type memory: int or None
:type override_parameters: list of string or None
:type native_adam_path: string or None
:type run_local: boolean
|
src/toil_lib/tools/spark_tools.py
|
def call_adam(job, master_ip, arguments,
memory=None,
override_parameters=None,
run_local=False,
native_adam_path=None):
"""
Invokes the ADAM container. Find ADAM at https://github.com/bigdatagenomics/adam.
:param toil.Job.job job: The Toil Job calling this function
:param masterIP: The Spark leader IP address.
:param arguments: Arguments to pass to ADAM.
:param memory: Gigabytes of memory to provision for Spark driver/worker.
:param override_parameters: Parameters passed by the user, that override our defaults.
:param native_adam_path: Path to ADAM executable. If not provided, Docker is used.
:param run_local: If true, runs Spark with the --master local[*] setting, which uses
all cores on the local machine. The master_ip will be disregarded.
:type masterIP: MasterAddress
:type arguments: list of string
:type memory: int or None
:type override_parameters: list of string or None
:type native_adam_path: string or None
:type run_local: boolean
"""
if run_local:
master = ["--master", "local[*]"]
else:
master = ["--master",
("spark://%s:%s" % (master_ip, SPARK_MASTER_PORT)),
"--conf", ("spark.hadoop.fs.default.name=hdfs://%s:%s" % (master_ip, HDFS_MASTER_PORT)),]
default_params = (master + [
# set max result size to unlimited, see #177
"--conf", "spark.driver.maxResultSize=0",
# these memory tuning parameters were derived in the course of running the
# experiments for the ADAM sigmod paper:
#
# Nothaft, Frank Austin, et al. "Rethinking data-intensive science using scalable
# analytics systems." Proceedings of the 2015 ACM SIGMOD International Conference
# on Management of Data. ACM, 2015.
#
# the memory tunings reduce the amount of memory dedicated to caching, which we don't
# take advantage of, and the network timeout flag reduces the number of job failures
# caused by heavy gc load
"--conf", "spark.storage.memoryFraction=0.3",
"--conf", "spark.storage.unrollFraction=0.1",
"--conf", "spark.network.timeout=300s"])
# are we running adam via docker, or do we have a native path?
if native_adam_path is None:
docker_parameters = ['--log-driver', 'none', master_ip.docker_parameters(["--net=host"])]
dockerCall(job=job,
tool="quay.io/ucsc_cgl/adam:962-ehf--6e7085f8cac4b9a927dc9fb06b48007957256b80",
dockerParameters=docker_parameters,
parameters=_make_parameters(master_ip,
default_params,
memory,
arguments,
override_parameters))
else:
check_call([os.path.join(native_adam_path, "bin/adam-submit")] +
default_params +
arguments)
|
def call_adam(job, master_ip, arguments,
memory=None,
override_parameters=None,
run_local=False,
native_adam_path=None):
"""
Invokes the ADAM container. Find ADAM at https://github.com/bigdatagenomics/adam.
:param toil.Job.job job: The Toil Job calling this function
:param masterIP: The Spark leader IP address.
:param arguments: Arguments to pass to ADAM.
:param memory: Gigabytes of memory to provision for Spark driver/worker.
:param override_parameters: Parameters passed by the user, that override our defaults.
:param native_adam_path: Path to ADAM executable. If not provided, Docker is used.
:param run_local: If true, runs Spark with the --master local[*] setting, which uses
all cores on the local machine. The master_ip will be disregarded.
:type masterIP: MasterAddress
:type arguments: list of string
:type memory: int or None
:type override_parameters: list of string or None
:type native_adam_path: string or None
:type run_local: boolean
"""
if run_local:
master = ["--master", "local[*]"]
else:
master = ["--master",
("spark://%s:%s" % (master_ip, SPARK_MASTER_PORT)),
"--conf", ("spark.hadoop.fs.default.name=hdfs://%s:%s" % (master_ip, HDFS_MASTER_PORT)),]
default_params = (master + [
# set max result size to unlimited, see #177
"--conf", "spark.driver.maxResultSize=0",
# these memory tuning parameters were derived in the course of running the
# experiments for the ADAM sigmod paper:
#
# Nothaft, Frank Austin, et al. "Rethinking data-intensive science using scalable
# analytics systems." Proceedings of the 2015 ACM SIGMOD International Conference
# on Management of Data. ACM, 2015.
#
# the memory tunings reduce the amount of memory dedicated to caching, which we don't
# take advantage of, and the network timeout flag reduces the number of job failures
# caused by heavy gc load
"--conf", "spark.storage.memoryFraction=0.3",
"--conf", "spark.storage.unrollFraction=0.1",
"--conf", "spark.network.timeout=300s"])
# are we running adam via docker, or do we have a native path?
if native_adam_path is None:
docker_parameters = ['--log-driver', 'none', master_ip.docker_parameters(["--net=host"])]
dockerCall(job=job,
tool="quay.io/ucsc_cgl/adam:962-ehf--6e7085f8cac4b9a927dc9fb06b48007957256b80",
dockerParameters=docker_parameters,
parameters=_make_parameters(master_ip,
default_params,
memory,
arguments,
override_parameters))
else:
check_call([os.path.join(native_adam_path, "bin/adam-submit")] +
default_params +
arguments)
|
[
"Invokes",
"the",
"ADAM",
"container",
".",
"Find",
"ADAM",
"at",
"https",
":",
"//",
"github",
".",
"com",
"/",
"bigdatagenomics",
"/",
"adam",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/spark_tools.py#L130-L192
|
[
"def",
"call_adam",
"(",
"job",
",",
"master_ip",
",",
"arguments",
",",
"memory",
"=",
"None",
",",
"override_parameters",
"=",
"None",
",",
"run_local",
"=",
"False",
",",
"native_adam_path",
"=",
"None",
")",
":",
"if",
"run_local",
":",
"master",
"=",
"[",
"\"--master\"",
",",
"\"local[*]\"",
"]",
"else",
":",
"master",
"=",
"[",
"\"--master\"",
",",
"(",
"\"spark://%s:%s\"",
"%",
"(",
"master_ip",
",",
"SPARK_MASTER_PORT",
")",
")",
",",
"\"--conf\"",
",",
"(",
"\"spark.hadoop.fs.default.name=hdfs://%s:%s\"",
"%",
"(",
"master_ip",
",",
"HDFS_MASTER_PORT",
")",
")",
",",
"]",
"default_params",
"=",
"(",
"master",
"+",
"[",
"# set max result size to unlimited, see #177",
"\"--conf\"",
",",
"\"spark.driver.maxResultSize=0\"",
",",
"# these memory tuning parameters were derived in the course of running the",
"# experiments for the ADAM sigmod paper:",
"#",
"# Nothaft, Frank Austin, et al. \"Rethinking data-intensive science using scalable",
"# analytics systems.\" Proceedings of the 2015 ACM SIGMOD International Conference",
"# on Management of Data. ACM, 2015.",
"#",
"# the memory tunings reduce the amount of memory dedicated to caching, which we don't",
"# take advantage of, and the network timeout flag reduces the number of job failures",
"# caused by heavy gc load",
"\"--conf\"",
",",
"\"spark.storage.memoryFraction=0.3\"",
",",
"\"--conf\"",
",",
"\"spark.storage.unrollFraction=0.1\"",
",",
"\"--conf\"",
",",
"\"spark.network.timeout=300s\"",
"]",
")",
"# are we running adam via docker, or do we have a native path?",
"if",
"native_adam_path",
"is",
"None",
":",
"docker_parameters",
"=",
"[",
"'--log-driver'",
",",
"'none'",
",",
"master_ip",
".",
"docker_parameters",
"(",
"[",
"\"--net=host\"",
"]",
")",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"tool",
"=",
"\"quay.io/ucsc_cgl/adam:962-ehf--6e7085f8cac4b9a927dc9fb06b48007957256b80\"",
",",
"dockerParameters",
"=",
"docker_parameters",
",",
"parameters",
"=",
"_make_parameters",
"(",
"master_ip",
",",
"default_params",
",",
"memory",
",",
"arguments",
",",
"override_parameters",
")",
")",
"else",
":",
"check_call",
"(",
"[",
"os",
".",
"path",
".",
"join",
"(",
"native_adam_path",
",",
"\"bin/adam-submit\"",
")",
"]",
"+",
"default_params",
"+",
"arguments",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
MasterAddress.docker_parameters
|
Augment a list of "docker run" arguments with those needed to map the notional Spark master address to the
real one, if they are different.
|
src/toil_lib/tools/spark_tools.py
|
def docker_parameters(self, docker_parameters=None):
"""
Augment a list of "docker run" arguments with those needed to map the notional Spark master address to the
real one, if they are different.
"""
if self != self.actual:
add_host_option = '--add-host=spark-master:' + self.actual
if docker_parameters is None:
docker_parameters = [add_host_option]
else:
docker_parameters.append(add_host_option)
return docker_parameters
|
def docker_parameters(self, docker_parameters=None):
"""
Augment a list of "docker run" arguments with those needed to map the notional Spark master address to the
real one, if they are different.
"""
if self != self.actual:
add_host_option = '--add-host=spark-master:' + self.actual
if docker_parameters is None:
docker_parameters = [add_host_option]
else:
docker_parameters.append(add_host_option)
return docker_parameters
|
[
"Augment",
"a",
"list",
"of",
"docker",
"run",
"arguments",
"with",
"those",
"needed",
"to",
"map",
"the",
"notional",
"Spark",
"master",
"address",
"to",
"the",
"real",
"one",
"if",
"they",
"are",
"different",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/spark_tools.py#L40-L51
|
[
"def",
"docker_parameters",
"(",
"self",
",",
"docker_parameters",
"=",
"None",
")",
":",
"if",
"self",
"!=",
"self",
".",
"actual",
":",
"add_host_option",
"=",
"'--add-host=spark-master:'",
"+",
"self",
".",
"actual",
"if",
"docker_parameters",
"is",
"None",
":",
"docker_parameters",
"=",
"[",
"add_host_option",
"]",
"else",
":",
"docker_parameters",
".",
"append",
"(",
"add_host_option",
")",
"return",
"docker_parameters"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
ConnectorObject.refresh
|
Refresh reloads data from the server. It raises an error if it fails to get the object's metadata
|
connectordb/_connectorobject.py
|
def refresh(self):
"""Refresh reloads data from the server. It raises an error if it fails to get the object's metadata"""
self.metadata = self.db.read(self.path).json()
|
def refresh(self):
"""Refresh reloads data from the server. It raises an error if it fails to get the object's metadata"""
self.metadata = self.db.read(self.path).json()
|
[
"Refresh",
"reloads",
"data",
"from",
"the",
"server",
".",
"It",
"raises",
"an",
"error",
"if",
"it",
"fails",
"to",
"get",
"the",
"object",
"s",
"metadata"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connectorobject.py#L16-L18
|
[
"def",
"refresh",
"(",
"self",
")",
":",
"self",
".",
"metadata",
"=",
"self",
".",
"db",
".",
"read",
"(",
"self",
".",
"path",
")",
".",
"json",
"(",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
ConnectorObject.set
|
Attempts to set the given properties of the object.
An example of this is setting the nickname of the object::
cdb.set({"nickname": "My new nickname"})
note that there is a convenience property `cdb.nickname` that allows you to get/set the nickname directly.
|
connectordb/_connectorobject.py
|
def set(self, property_dict):
"""Attempts to set the given properties of the object.
An example of this is setting the nickname of the object::
cdb.set({"nickname": "My new nickname"})
note that there is a convenience property `cdb.nickname` that allows you to get/set the nickname directly.
"""
self.metadata = self.db.update(self.path, property_dict).json()
|
def set(self, property_dict):
"""Attempts to set the given properties of the object.
An example of this is setting the nickname of the object::
cdb.set({"nickname": "My new nickname"})
note that there is a convenience property `cdb.nickname` that allows you to get/set the nickname directly.
"""
self.metadata = self.db.update(self.path, property_dict).json()
|
[
"Attempts",
"to",
"set",
"the",
"given",
"properties",
"of",
"the",
"object",
".",
"An",
"example",
"of",
"this",
"is",
"setting",
"the",
"nickname",
"of",
"the",
"object",
"::",
"cdb",
".",
"set",
"(",
"{",
"nickname",
":",
"My",
"new",
"nickname",
"}",
")",
"note",
"that",
"there",
"is",
"a",
"convenience",
"property",
"cdb",
".",
"nickname",
"that",
"allows",
"you",
"to",
"get",
"/",
"set",
"the",
"nickname",
"directly",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connectorobject.py#L49-L57
|
[
"def",
"set",
"(",
"self",
",",
"property_dict",
")",
":",
"self",
".",
"metadata",
"=",
"self",
".",
"db",
".",
"update",
"(",
"self",
".",
"path",
",",
"property_dict",
")",
".",
"json",
"(",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
run_mutect
|
Calls MuTect to perform variant analysis
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
:param str ref: Reference genome FileStoreID
:param str ref_dict: Reference dictionary FileStoreID
:param str fai: Reference index FileStoreID
:param str cosmic: Cosmic VCF FileStoreID
:param str dbsnp: DBSNP VCF FileStoreID
:return: MuTect output (tarball) FileStoreID
:rtype: str
|
src/toil_lib/tools/mutation_callers.py
|
def run_mutect(job, normal_bam, normal_bai, tumor_bam, tumor_bai, ref, ref_dict, fai, cosmic, dbsnp):
"""
Calls MuTect to perform variant analysis
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
:param str ref: Reference genome FileStoreID
:param str ref_dict: Reference dictionary FileStoreID
:param str fai: Reference index FileStoreID
:param str cosmic: Cosmic VCF FileStoreID
:param str dbsnp: DBSNP VCF FileStoreID
:return: MuTect output (tarball) FileStoreID
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
file_ids = [normal_bam, normal_bai, tumor_bam, tumor_bai, ref, fai, ref_dict, cosmic, dbsnp]
file_names = ['normal.bam', 'normal.bai', 'tumor.bam', 'tumor.bai', 'ref.fasta',
'ref.fasta.fai', 'ref.dict', 'cosmic.vcf', 'dbsnp.vcf']
for file_store_id, name in zip(file_ids, file_names):
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Call: MuTect
parameters = ['--analysis_type', 'MuTect',
'--reference_sequence', 'ref.fasta',
'--cosmic', '/data/cosmic.vcf',
'--dbsnp', '/data/dbsnp.vcf',
'--input_file:normal', '/data/normal.bam',
'--input_file:tumor', '/data/tumor.bam',
'--tumor_lod', str(10), # Taken from MC3 pipeline
'--initial_tumor_lod', str(4.0), # Taken from MC3 pipeline
'--out', 'mutect.out',
'--coverage_file', 'mutect.cov',
'--vcf', 'mutect.vcf']
dockerCall(job=job, workDir=work_dir, parameters=parameters,
tool='quay.io/ucsc_cgl/mutect:1.1.7--e8bf09459cf0aecb9f55ee689c2b2d194754cbd3')
# Write output to file store
output_file_names = ['mutect.vcf', 'mutect.cov', 'mutect.out']
output_file_paths = [os.path.join(work_dir, x) for x in output_file_names]
tarball_files('mutect.tar.gz', file_paths=output_file_paths, output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'mutect.tar.gz'))
|
def run_mutect(job, normal_bam, normal_bai, tumor_bam, tumor_bai, ref, ref_dict, fai, cosmic, dbsnp):
"""
Calls MuTect to perform variant analysis
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
:param str ref: Reference genome FileStoreID
:param str ref_dict: Reference dictionary FileStoreID
:param str fai: Reference index FileStoreID
:param str cosmic: Cosmic VCF FileStoreID
:param str dbsnp: DBSNP VCF FileStoreID
:return: MuTect output (tarball) FileStoreID
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
file_ids = [normal_bam, normal_bai, tumor_bam, tumor_bai, ref, fai, ref_dict, cosmic, dbsnp]
file_names = ['normal.bam', 'normal.bai', 'tumor.bam', 'tumor.bai', 'ref.fasta',
'ref.fasta.fai', 'ref.dict', 'cosmic.vcf', 'dbsnp.vcf']
for file_store_id, name in zip(file_ids, file_names):
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Call: MuTect
parameters = ['--analysis_type', 'MuTect',
'--reference_sequence', 'ref.fasta',
'--cosmic', '/data/cosmic.vcf',
'--dbsnp', '/data/dbsnp.vcf',
'--input_file:normal', '/data/normal.bam',
'--input_file:tumor', '/data/tumor.bam',
'--tumor_lod', str(10), # Taken from MC3 pipeline
'--initial_tumor_lod', str(4.0), # Taken from MC3 pipeline
'--out', 'mutect.out',
'--coverage_file', 'mutect.cov',
'--vcf', 'mutect.vcf']
dockerCall(job=job, workDir=work_dir, parameters=parameters,
tool='quay.io/ucsc_cgl/mutect:1.1.7--e8bf09459cf0aecb9f55ee689c2b2d194754cbd3')
# Write output to file store
output_file_names = ['mutect.vcf', 'mutect.cov', 'mutect.out']
output_file_paths = [os.path.join(work_dir, x) for x in output_file_names]
tarball_files('mutect.tar.gz', file_paths=output_file_paths, output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'mutect.tar.gz'))
|
[
"Calls",
"MuTect",
"to",
"perform",
"variant",
"analysis"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/mutation_callers.py#L9-L50
|
[
"def",
"run_mutect",
"(",
"job",
",",
"normal_bam",
",",
"normal_bai",
",",
"tumor_bam",
",",
"tumor_bai",
",",
"ref",
",",
"ref_dict",
",",
"fai",
",",
"cosmic",
",",
"dbsnp",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"file_ids",
"=",
"[",
"normal_bam",
",",
"normal_bai",
",",
"tumor_bam",
",",
"tumor_bai",
",",
"ref",
",",
"fai",
",",
"ref_dict",
",",
"cosmic",
",",
"dbsnp",
"]",
"file_names",
"=",
"[",
"'normal.bam'",
",",
"'normal.bai'",
",",
"'tumor.bam'",
",",
"'tumor.bai'",
",",
"'ref.fasta'",
",",
"'ref.fasta.fai'",
",",
"'ref.dict'",
",",
"'cosmic.vcf'",
",",
"'dbsnp.vcf'",
"]",
"for",
"file_store_id",
",",
"name",
"in",
"zip",
"(",
"file_ids",
",",
"file_names",
")",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"file_store_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"name",
")",
")",
"# Call: MuTect",
"parameters",
"=",
"[",
"'--analysis_type'",
",",
"'MuTect'",
",",
"'--reference_sequence'",
",",
"'ref.fasta'",
",",
"'--cosmic'",
",",
"'/data/cosmic.vcf'",
",",
"'--dbsnp'",
",",
"'/data/dbsnp.vcf'",
",",
"'--input_file:normal'",
",",
"'/data/normal.bam'",
",",
"'--input_file:tumor'",
",",
"'/data/tumor.bam'",
",",
"'--tumor_lod'",
",",
"str",
"(",
"10",
")",
",",
"# Taken from MC3 pipeline",
"'--initial_tumor_lod'",
",",
"str",
"(",
"4.0",
")",
",",
"# Taken from MC3 pipeline",
"'--out'",
",",
"'mutect.out'",
",",
"'--coverage_file'",
",",
"'mutect.cov'",
",",
"'--vcf'",
",",
"'mutect.vcf'",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"parameters",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/mutect:1.1.7--e8bf09459cf0aecb9f55ee689c2b2d194754cbd3'",
")",
"# Write output to file store",
"output_file_names",
"=",
"[",
"'mutect.vcf'",
",",
"'mutect.cov'",
",",
"'mutect.out'",
"]",
"output_file_paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"x",
")",
"for",
"x",
"in",
"output_file_names",
"]",
"tarball_files",
"(",
"'mutect.tar.gz'",
",",
"file_paths",
"=",
"output_file_paths",
",",
"output_dir",
"=",
"work_dir",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'mutect.tar.gz'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
run_pindel
|
Calls Pindel to compute indels / deletions
:param JobFunctionWrappingJob job: Passed automatically by Toil
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
:param str ref: Reference genome FileStoreID
:param str fai: Reference index FileStoreID
:return: Pindel output (tarball) FileStoreID
:rtype: str
|
src/toil_lib/tools/mutation_callers.py
|
def run_pindel(job, normal_bam, normal_bai, tumor_bam, tumor_bai, ref, fai):
"""
Calls Pindel to compute indels / deletions
:param JobFunctionWrappingJob job: Passed automatically by Toil
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
:param str ref: Reference genome FileStoreID
:param str fai: Reference index FileStoreID
:return: Pindel output (tarball) FileStoreID
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
file_ids = [normal_bam, normal_bai, tumor_bam, tumor_bai, ref, fai]
file_names = ['normal.bam', 'normal.bai', 'tumor.bam', 'tumor.bai', 'ref.fasta', 'ref.fasta.fai']
for file_store_id, name in zip(file_ids, file_names):
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Create Pindel config
with open(os.path.join(work_dir, 'pindel-config.txt'), 'w') as f:
for bam in ['normal', 'tumor']:
f.write('/data/{} {} {}\n'.format(bam + '.bam', get_mean_insert_size(work_dir, bam + '.bam'), bam))
# Call: Pindel
parameters = ['-f', '/data/ref.fasta',
'-i', '/data/pindel-config.txt',
'--number_of_threads', str(job.cores),
'--minimum_support_for_event', '3',
'--report_long_insertions', 'true',
'--report_breakpoints', 'true',
'-o', 'pindel']
dockerCall(job=job, tool='quay.io/ucsc_cgl/pindel:0.2.5b6--4e8d1b31d4028f464b3409c6558fb9dfcad73f88',
workDir=work_dir, parameters=parameters)
# Collect output files and write to file store
output_files = glob(os.path.join(work_dir, 'pindel*'))
tarball_files('pindel.tar.gz', file_paths=output_files, output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'pindel.tar.gz'))
|
def run_pindel(job, normal_bam, normal_bai, tumor_bam, tumor_bai, ref, fai):
"""
Calls Pindel to compute indels / deletions
:param JobFunctionWrappingJob job: Passed automatically by Toil
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
:param str ref: Reference genome FileStoreID
:param str fai: Reference index FileStoreID
:return: Pindel output (tarball) FileStoreID
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
file_ids = [normal_bam, normal_bai, tumor_bam, tumor_bai, ref, fai]
file_names = ['normal.bam', 'normal.bai', 'tumor.bam', 'tumor.bai', 'ref.fasta', 'ref.fasta.fai']
for file_store_id, name in zip(file_ids, file_names):
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Create Pindel config
with open(os.path.join(work_dir, 'pindel-config.txt'), 'w') as f:
for bam in ['normal', 'tumor']:
f.write('/data/{} {} {}\n'.format(bam + '.bam', get_mean_insert_size(work_dir, bam + '.bam'), bam))
# Call: Pindel
parameters = ['-f', '/data/ref.fasta',
'-i', '/data/pindel-config.txt',
'--number_of_threads', str(job.cores),
'--minimum_support_for_event', '3',
'--report_long_insertions', 'true',
'--report_breakpoints', 'true',
'-o', 'pindel']
dockerCall(job=job, tool='quay.io/ucsc_cgl/pindel:0.2.5b6--4e8d1b31d4028f464b3409c6558fb9dfcad73f88',
workDir=work_dir, parameters=parameters)
# Collect output files and write to file store
output_files = glob(os.path.join(work_dir, 'pindel*'))
tarball_files('pindel.tar.gz', file_paths=output_files, output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'pindel.tar.gz'))
|
[
"Calls",
"Pindel",
"to",
"compute",
"indels",
"/",
"deletions"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/mutation_callers.py#L93-L129
|
[
"def",
"run_pindel",
"(",
"job",
",",
"normal_bam",
",",
"normal_bai",
",",
"tumor_bam",
",",
"tumor_bai",
",",
"ref",
",",
"fai",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"file_ids",
"=",
"[",
"normal_bam",
",",
"normal_bai",
",",
"tumor_bam",
",",
"tumor_bai",
",",
"ref",
",",
"fai",
"]",
"file_names",
"=",
"[",
"'normal.bam'",
",",
"'normal.bai'",
",",
"'tumor.bam'",
",",
"'tumor.bai'",
",",
"'ref.fasta'",
",",
"'ref.fasta.fai'",
"]",
"for",
"file_store_id",
",",
"name",
"in",
"zip",
"(",
"file_ids",
",",
"file_names",
")",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"file_store_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"name",
")",
")",
"# Create Pindel config",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'pindel-config.txt'",
")",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"bam",
"in",
"[",
"'normal'",
",",
"'tumor'",
"]",
":",
"f",
".",
"write",
"(",
"'/data/{} {} {}\\n'",
".",
"format",
"(",
"bam",
"+",
"'.bam'",
",",
"get_mean_insert_size",
"(",
"work_dir",
",",
"bam",
"+",
"'.bam'",
")",
",",
"bam",
")",
")",
"# Call: Pindel",
"parameters",
"=",
"[",
"'-f'",
",",
"'/data/ref.fasta'",
",",
"'-i'",
",",
"'/data/pindel-config.txt'",
",",
"'--number_of_threads'",
",",
"str",
"(",
"job",
".",
"cores",
")",
",",
"'--minimum_support_for_event'",
",",
"'3'",
",",
"'--report_long_insertions'",
",",
"'true'",
",",
"'--report_breakpoints'",
",",
"'true'",
",",
"'-o'",
",",
"'pindel'",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/pindel:0.2.5b6--4e8d1b31d4028f464b3409c6558fb9dfcad73f88'",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"parameters",
")",
"# Collect output files and write to file store",
"output_files",
"=",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'pindel*'",
")",
")",
"tarball_files",
"(",
"'pindel.tar.gz'",
",",
"file_paths",
"=",
"output_files",
",",
"output_dir",
"=",
"work_dir",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'pindel.tar.gz'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
Device.create
|
Creates the device. Attempts to create private devices by default,
but if public is set to true, creates public devices.
You can also set other default properties by passing in the relevant information.
For example, setting a device with the given nickname and description::
dev.create(nickname="mydevice", description="This is an example")
Furthermore, ConnectorDB supports creation of a device's streams immediately,
which can considerably speed up device setup::
dev.create(streams={
"stream1": {"schema": '{\"type\":\"number\"}'}
})
Note that the schema must be encoded as a string when creating in this format.
|
connectordb/_device.py
|
def create(self, public=False, **kwargs):
"""Creates the device. Attempts to create private devices by default,
but if public is set to true, creates public devices.
You can also set other default properties by passing in the relevant information.
For example, setting a device with the given nickname and description::
dev.create(nickname="mydevice", description="This is an example")
Furthermore, ConnectorDB supports creation of a device's streams immediately,
which can considerably speed up device setup::
dev.create(streams={
"stream1": {"schema": '{\"type\":\"number\"}'}
})
Note that the schema must be encoded as a string when creating in this format.
"""
kwargs["public"] = public
self.metadata = self.db.create(self.path, kwargs).json()
|
def create(self, public=False, **kwargs):
"""Creates the device. Attempts to create private devices by default,
but if public is set to true, creates public devices.
You can also set other default properties by passing in the relevant information.
For example, setting a device with the given nickname and description::
dev.create(nickname="mydevice", description="This is an example")
Furthermore, ConnectorDB supports creation of a device's streams immediately,
which can considerably speed up device setup::
dev.create(streams={
"stream1": {"schema": '{\"type\":\"number\"}'}
})
Note that the schema must be encoded as a string when creating in this format.
"""
kwargs["public"] = public
self.metadata = self.db.create(self.path, kwargs).json()
|
[
"Creates",
"the",
"device",
".",
"Attempts",
"to",
"create",
"private",
"devices",
"by",
"default",
"but",
"if",
"public",
"is",
"set",
"to",
"true",
"creates",
"public",
"devices",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_device.py#L12-L31
|
[
"def",
"create",
"(",
"self",
",",
"public",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"\"public\"",
"]",
"=",
"public",
"self",
".",
"metadata",
"=",
"self",
".",
"db",
".",
"create",
"(",
"self",
".",
"path",
",",
"kwargs",
")",
".",
"json",
"(",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Device.streams
|
Returns the list of streams that belong to the device
|
connectordb/_device.py
|
def streams(self):
"""Returns the list of streams that belong to the device"""
result = self.db.read(self.path, {"q": "ls"})
if result is None or result.json() is None:
return []
streams = []
for s in result.json():
strm = self[s["name"]]
strm.metadata = s
streams.append(strm)
return streams
|
def streams(self):
"""Returns the list of streams that belong to the device"""
result = self.db.read(self.path, {"q": "ls"})
if result is None or result.json() is None:
return []
streams = []
for s in result.json():
strm = self[s["name"]]
strm.metadata = s
streams.append(strm)
return streams
|
[
"Returns",
"the",
"list",
"of",
"streams",
"that",
"belong",
"to",
"the",
"device"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_device.py#L33-L44
|
[
"def",
"streams",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"db",
".",
"read",
"(",
"self",
".",
"path",
",",
"{",
"\"q\"",
":",
"\"ls\"",
"}",
")",
"if",
"result",
"is",
"None",
"or",
"result",
".",
"json",
"(",
")",
"is",
"None",
":",
"return",
"[",
"]",
"streams",
"=",
"[",
"]",
"for",
"s",
"in",
"result",
".",
"json",
"(",
")",
":",
"strm",
"=",
"self",
"[",
"s",
"[",
"\"name\"",
"]",
"]",
"strm",
".",
"metadata",
"=",
"s",
"streams",
".",
"append",
"(",
"strm",
")",
"return",
"streams"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Device.export
|
Exports the device to the given directory. The directory can't exist.
You can later import this device by running import_device on a user.
|
connectordb/_device.py
|
def export(self, directory):
"""Exports the device to the given directory. The directory can't exist.
You can later import this device by running import_device on a user.
"""
if os.path.exists(directory):
raise FileExistsError(
"The device export directory already exists")
os.mkdir(directory)
# Write the device's info
with open(os.path.join(directory, "device.json"), "w") as f:
json.dump(self.data, f)
# Now export the streams one by one
for s in self.streams():
s.export(os.path.join(directory, s.name))
|
def export(self, directory):
"""Exports the device to the given directory. The directory can't exist.
You can later import this device by running import_device on a user.
"""
if os.path.exists(directory):
raise FileExistsError(
"The device export directory already exists")
os.mkdir(directory)
# Write the device's info
with open(os.path.join(directory, "device.json"), "w") as f:
json.dump(self.data, f)
# Now export the streams one by one
for s in self.streams():
s.export(os.path.join(directory, s.name))
|
[
"Exports",
"the",
"device",
"to",
"the",
"given",
"directory",
".",
"The",
"directory",
"can",
"t",
"exist",
".",
"You",
"can",
"later",
"import",
"this",
"device",
"by",
"running",
"import_device",
"on",
"a",
"user",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_device.py#L54-L70
|
[
"def",
"export",
"(",
"self",
",",
"directory",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"directory",
")",
":",
"raise",
"FileExistsError",
"(",
"\"The device export directory already exists\"",
")",
"os",
".",
"mkdir",
"(",
"directory",
")",
"# Write the device's info",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"\"device.json\"",
")",
",",
"\"w\"",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"self",
".",
"data",
",",
"f",
")",
"# Now export the streams one by one",
"for",
"s",
"in",
"self",
".",
"streams",
"(",
")",
":",
"s",
".",
"export",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"s",
".",
"name",
")",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Device.import_stream
|
Imports a stream from the given directory. You export the Stream
by using stream.export()
|
connectordb/_device.py
|
def import_stream(self, directory):
"""Imports a stream from the given directory. You export the Stream
by using stream.export()"""
# read the stream's info
with open(os.path.join(directory, "stream.json"), "r") as f:
sdata = json.load(f)
s = self[sdata["name"]]
if s.exists():
raise ValueError("The stream " + s.name + " already exists")
# Create the stream empty first, so we can insert all the data without
# worrying about schema violations or downlinks
s.create()
# Now, in order to insert data into this stream, we must be logged in as
# the owning device
ddb = DatabaseConnection(self.apikey, url=self.db.baseurl)
d = Device(ddb, self.path)
# Set up the owning device
sown = d[s.name]
# read the stream's info
sown.insert_array(DatapointArray().loadExport(directory))
# Now we MIGHT be able to recover the downlink data,
# only if we are not logged in as the device that the stream is being inserted into
# So we check. When downlink is true, data is inserted into the
# downlink stream
if (sdata["downlink"] and self.db.path != self.path):
s.downlink = True
with open(os.path.join(directory, "downlink.json"), "r") as f:
s.insert_array(json.load(f))
# And finally, update the device
del sdata["name"]
s.set(sdata)
|
def import_stream(self, directory):
"""Imports a stream from the given directory. You export the Stream
by using stream.export()"""
# read the stream's info
with open(os.path.join(directory, "stream.json"), "r") as f:
sdata = json.load(f)
s = self[sdata["name"]]
if s.exists():
raise ValueError("The stream " + s.name + " already exists")
# Create the stream empty first, so we can insert all the data without
# worrying about schema violations or downlinks
s.create()
# Now, in order to insert data into this stream, we must be logged in as
# the owning device
ddb = DatabaseConnection(self.apikey, url=self.db.baseurl)
d = Device(ddb, self.path)
# Set up the owning device
sown = d[s.name]
# read the stream's info
sown.insert_array(DatapointArray().loadExport(directory))
# Now we MIGHT be able to recover the downlink data,
# only if we are not logged in as the device that the stream is being inserted into
# So we check. When downlink is true, data is inserted into the
# downlink stream
if (sdata["downlink"] and self.db.path != self.path):
s.downlink = True
with open(os.path.join(directory, "downlink.json"), "r") as f:
s.insert_array(json.load(f))
# And finally, update the device
del sdata["name"]
s.set(sdata)
|
[
"Imports",
"a",
"stream",
"from",
"the",
"given",
"directory",
".",
"You",
"export",
"the",
"Stream",
"by",
"using",
"stream",
".",
"export",
"()"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_device.py#L72-L110
|
[
"def",
"import_stream",
"(",
"self",
",",
"directory",
")",
":",
"# read the stream's info",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"\"stream.json\"",
")",
",",
"\"r\"",
")",
"as",
"f",
":",
"sdata",
"=",
"json",
".",
"load",
"(",
"f",
")",
"s",
"=",
"self",
"[",
"sdata",
"[",
"\"name\"",
"]",
"]",
"if",
"s",
".",
"exists",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"The stream \"",
"+",
"s",
".",
"name",
"+",
"\" already exists\"",
")",
"# Create the stream empty first, so we can insert all the data without",
"# worrying about schema violations or downlinks",
"s",
".",
"create",
"(",
")",
"# Now, in order to insert data into this stream, we must be logged in as",
"# the owning device",
"ddb",
"=",
"DatabaseConnection",
"(",
"self",
".",
"apikey",
",",
"url",
"=",
"self",
".",
"db",
".",
"baseurl",
")",
"d",
"=",
"Device",
"(",
"ddb",
",",
"self",
".",
"path",
")",
"# Set up the owning device",
"sown",
"=",
"d",
"[",
"s",
".",
"name",
"]",
"# read the stream's info",
"sown",
".",
"insert_array",
"(",
"DatapointArray",
"(",
")",
".",
"loadExport",
"(",
"directory",
")",
")",
"# Now we MIGHT be able to recover the downlink data,",
"# only if we are not logged in as the device that the stream is being inserted into",
"# So we check. When downlink is true, data is inserted into the",
"# downlink stream",
"if",
"(",
"sdata",
"[",
"\"downlink\"",
"]",
"and",
"self",
".",
"db",
".",
"path",
"!=",
"self",
".",
"path",
")",
":",
"s",
".",
"downlink",
"=",
"True",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"\"downlink.json\"",
")",
",",
"\"r\"",
")",
"as",
"f",
":",
"s",
".",
"insert_array",
"(",
"json",
".",
"load",
"(",
"f",
")",
")",
"# And finally, update the device",
"del",
"sdata",
"[",
"\"name\"",
"]",
"s",
".",
"set",
"(",
"sdata",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Shosetsu.search_vndb
|
Search vndb.org for a term and return matching results from type.
:param stype: type to search for.
Type should be one of:
v - Visual Novels
r - Releases
p - Producers
s - Staff
c - Characters
g - Tags
i - traits
u - Users
:param term: string to search for
:return: Results. Result format depends on what you searched for. See the Parsing.py module for more specific documentation.
Exceptions:
aiohttp.HttpBadRequest - On 404s
VNDBOneResult - When you search for something but it instead redirects us to a direct content page
VNDBNoResults - When nothing was found for that search
VNDBBadStype - Raised when an incorrect search type is passed
|
Shosetsu/VNDB.py
|
async def search_vndb(self, stype, term):
"""
Search vndb.org for a term and return matching results from type.
:param stype: type to search for.
Type should be one of:
v - Visual Novels
r - Releases
p - Producers
s - Staff
c - Characters
g - Tags
i - traits
u - Users
:param term: string to search for
:return: Results. Result format depends on what you searched for. See the Parsing.py module for more specific documentation.
Exceptions:
aiohttp.HttpBadRequest - On 404s
VNDBOneResult - When you search for something but it instead redirects us to a direct content page
VNDBNoResults - When nothing was found for that search
VNDBBadStype - Raised when an incorrect search type is passed
"""
fstype = ""
if stype not in ['v', 'r', 'p', 's', 'c', 'g', 'i', 'u']:
raise VNDBBadStype(stype)
else:
if stype in ['v', 'p', 's', 'c', 'u']:
fstype = '/{}/all'.format(stype)
elif stype in ['g', 'i']:
fstype = '/{}/list'.format(stype)
elif stype == 'r':
fstype = '/r'
async with self.session.get(self.base_url + "{}".format(fstype), params={"q": term}, headers=self.headers) as response:
if response.status == 404:
raise aiohttp.HttpBadRequest("VN Not Found")
elif 'q=' not in response.url:
raise VNDBOneResult(term, response.url.rsplit('/', 1)[1])
text = await response.text()
if 'No Results' in text:
raise VNDBNoResults(term)
soup = BeautifulSoup(text, 'lxml')
resp = await self.parse_search(stype, soup)
if resp == []:
raise VNDBNoResults(term)
return resp
|
async def search_vndb(self, stype, term):
"""
Search vndb.org for a term and return matching results from type.
:param stype: type to search for.
Type should be one of:
v - Visual Novels
r - Releases
p - Producers
s - Staff
c - Characters
g - Tags
i - traits
u - Users
:param term: string to search for
:return: Results. Result format depends on what you searched for. See the Parsing.py module for more specific documentation.
Exceptions:
aiohttp.HttpBadRequest - On 404s
VNDBOneResult - When you search for something but it instead redirects us to a direct content page
VNDBNoResults - When nothing was found for that search
VNDBBadStype - Raised when an incorrect search type is passed
"""
fstype = ""
if stype not in ['v', 'r', 'p', 's', 'c', 'g', 'i', 'u']:
raise VNDBBadStype(stype)
else:
if stype in ['v', 'p', 's', 'c', 'u']:
fstype = '/{}/all'.format(stype)
elif stype in ['g', 'i']:
fstype = '/{}/list'.format(stype)
elif stype == 'r':
fstype = '/r'
async with self.session.get(self.base_url + "{}".format(fstype), params={"q": term}, headers=self.headers) as response:
if response.status == 404:
raise aiohttp.HttpBadRequest("VN Not Found")
elif 'q=' not in response.url:
raise VNDBOneResult(term, response.url.rsplit('/', 1)[1])
text = await response.text()
if 'No Results' in text:
raise VNDBNoResults(term)
soup = BeautifulSoup(text, 'lxml')
resp = await self.parse_search(stype, soup)
if resp == []:
raise VNDBNoResults(term)
return resp
|
[
"Search",
"vndb",
".",
"org",
"for",
"a",
"term",
"and",
"return",
"matching",
"results",
"from",
"type",
"."
] |
ccubed/Shosetsu
|
python
|
https://github.com/ccubed/Shosetsu/blob/eba01c058100ec8806129b11a2859f3126a1b101/Shosetsu/VNDB.py#L17-L62
|
[
"async",
"def",
"search_vndb",
"(",
"self",
",",
"stype",
",",
"term",
")",
":",
"fstype",
"=",
"\"\"",
"if",
"stype",
"not",
"in",
"[",
"'v'",
",",
"'r'",
",",
"'p'",
",",
"'s'",
",",
"'c'",
",",
"'g'",
",",
"'i'",
",",
"'u'",
"]",
":",
"raise",
"VNDBBadStype",
"(",
"stype",
")",
"else",
":",
"if",
"stype",
"in",
"[",
"'v'",
",",
"'p'",
",",
"'s'",
",",
"'c'",
",",
"'u'",
"]",
":",
"fstype",
"=",
"'/{}/all'",
".",
"format",
"(",
"stype",
")",
"elif",
"stype",
"in",
"[",
"'g'",
",",
"'i'",
"]",
":",
"fstype",
"=",
"'/{}/list'",
".",
"format",
"(",
"stype",
")",
"elif",
"stype",
"==",
"'r'",
":",
"fstype",
"=",
"'/r'",
"async",
"with",
"self",
".",
"session",
".",
"get",
"(",
"self",
".",
"base_url",
"+",
"\"{}\"",
".",
"format",
"(",
"fstype",
")",
",",
"params",
"=",
"{",
"\"q\"",
":",
"term",
"}",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"as",
"response",
":",
"if",
"response",
".",
"status",
"==",
"404",
":",
"raise",
"aiohttp",
".",
"HttpBadRequest",
"(",
"\"VN Not Found\"",
")",
"elif",
"'q='",
"not",
"in",
"response",
".",
"url",
":",
"raise",
"VNDBOneResult",
"(",
"term",
",",
"response",
".",
"url",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"[",
"1",
"]",
")",
"text",
"=",
"await",
"response",
".",
"text",
"(",
")",
"if",
"'No Results'",
"in",
"text",
":",
"raise",
"VNDBNoResults",
"(",
"term",
")",
"soup",
"=",
"BeautifulSoup",
"(",
"text",
",",
"'lxml'",
")",
"resp",
"=",
"await",
"self",
".",
"parse_search",
"(",
"stype",
",",
"soup",
")",
"if",
"resp",
"==",
"[",
"]",
":",
"raise",
"VNDBNoResults",
"(",
"term",
")",
"return",
"resp"
] |
eba01c058100ec8806129b11a2859f3126a1b101
|
test
|
Shosetsu.get_novel
|
If term is an ID will return that specific ID. If it's a string, it will return the details of the first search result for that term.
Returned Dictionary Has the following structure:
Please note, if it says list or dict, it means the python types.
Indentation indicates level. So English is ['Titles']['English']
'Titles' - Contains all the titles found for the anime
'English' - English title of the novel
'Alt' - Alternative title (Usually the Japanese one, but other languages exist)
'Aliases' - A list of str that define the aliases as given in VNDB.
'Img' - Link to the Image shown on VNDB for that Visual Novel
'Length' - Length given by VNDB
'Developers' - A list containing the Developers of the VN.
'Publishers' - A list containing the Publishers of the VN.
'Tags' - Contains 3 lists of different tag categories
'Content' - List of tags that have to do with the story's content as defined by VNDB. Ex: Edo Era
'Technology' - List of tags that have to do with the VN's technology. Ex: Protagonist with a Face (Wew Lad, 21st century)
'Erotic' - List of tags that have to do with the VN's sexual content. Ex: Tentacles
'Releases' - A list of dictionaries. They have the following format.
'Date' - Date VNDB lists for release
'Ages' - Age group appropriate for as determined on VNDB
'Platform' - Release Platform
'Name' - The name for this particular Release
'ID' - The id for this release, also doubles as the link if you append https://vndb.org/ to it
'Description' - Contains novel description text if there is any.
'ID' - The id for this novel, also doubles as the link if you append https://vndb.org/ to it
:param term: id or name to get details of.
:param hide_nsfw: bool if 'Img' should filter links flagged as NSFW or not. (no reason to be kwargs...yet)
:return dict: Dictionary with the parsed results of a novel
|
Shosetsu/VNDB.py
|
async def get_novel(self, term, hide_nsfw=False):
"""
If term is an ID will return that specific ID. If it's a string, it will return the details of the first search result for that term.
Returned Dictionary Has the following structure:
Please note, if it says list or dict, it means the python types.
Indentation indicates level. So English is ['Titles']['English']
'Titles' - Contains all the titles found for the anime
'English' - English title of the novel
'Alt' - Alternative title (Usually the Japanese one, but other languages exist)
'Aliases' - A list of str that define the aliases as given in VNDB.
'Img' - Link to the Image shown on VNDB for that Visual Novel
'Length' - Length given by VNDB
'Developers' - A list containing the Developers of the VN.
'Publishers' - A list containing the Publishers of the VN.
'Tags' - Contains 3 lists of different tag categories
'Content' - List of tags that have to do with the story's content as defined by VNDB. Ex: Edo Era
'Technology' - List of tags that have to do with the VN's technology. Ex: Protagonist with a Face (Wew Lad, 21st century)
'Erotic' - List of tags that have to do with the VN's sexual content. Ex: Tentacles
'Releases' - A list of dictionaries. They have the following format.
'Date' - Date VNDB lists for release
'Ages' - Age group appropriate for as determined on VNDB
'Platform' - Release Platform
'Name' - The name for this particular Release
'ID' - The id for this release, also doubles as the link if you append https://vndb.org/ to it
'Description' - Contains novel description text if there is any.
'ID' - The id for this novel, also doubles as the link if you append https://vndb.org/ to it
:param term: id or name to get details of.
:param hide_nsfw: bool if 'Img' should filter links flagged as NSFW or not. (no reason to be kwargs...yet)
:return dict: Dictionary with the parsed results of a novel
"""
if not term.isdigit() and not term.startswith('v'):
try:
vnid = await self.search_vndb('v', term)
vnid = vnid[0]['id']
except VNDBOneResult as e:
vnid = e.vnid
else:
vnid = str(term)
if not vnid.startswith('v'):
vnid = 'v' + vnid
async with self.session.get(self.base_url + "/{}".format(vnid), headers=self.headers) as response:
if response.status == 404:
raise aiohttp.HttpBadRequest("VNDB reported that there is no data for ID {}".format(vnid))
text = await response.text()
soup = BeautifulSoup(text, 'lxml')
data = {'titles': {'english': [], 'alt': [], 'aliases': []}, 'img': None, 'length': None, 'developers': [], 'publishers': [], 'tags': {}, 'releases': {}, 'id': vnid}
data['titles']['english'] = soup.find_all('div', class_='mainbox')[0].h1.string
try:
data['titles']['alt'] = soup.find_all('h2', class_='alttitle')[0].string
except IndexError:
data['titles']['alt'] = None
try:
imgdiv = soup.find_all('div', class_='vnimg')[0]
if not (hide_nsfw and 'class' in imgdiv.p.attrs):
data['img'] = 'https:' + imgdiv.img.get('src')
except AttributeError:
pass
for item in soup.find_all('tr'):
if 'class' in item.attrs or len(list(item.children)) == 1:
continue
if item.td.string == 'Aliases':
tlist = []
for alias in list(item.children)[1:]:
tlist.append(alias.string)
data['titles']['aliases'] = tlist
elif item.td.string == 'Length':
data['length'] = list(item.children)[1].string
elif item.td.string == 'Developer':
tl = []
for item in list(list(item.children)[1].children):
if isinstance(item, NavigableString):
continue
if 'href' in item.attrs:
tl.append(item.string)
data['developers'] = tl
del tl
elif item.td.string == 'Publishers':
tl = []
for item in list(list(item.children)[1].children):
if isinstance(item, NavigableString):
continue
if 'href' in item.attrs:
tl.append(item.string)
data['publishers'] = tl
conttags = []
techtags = []
erotags = []
test = soup.find('div', attrs={'id': 'vntags'})
if test:
for item in list(test.children):
if isinstance(item, NavigableString):
continue
if 'class' not in item.attrs:
continue
if 'cont' in " ".join(item.get('class')):
conttags.append(item.a.string)
if 'tech' in " ".join(item.get('class')):
techtags.append(item.a.string)
if 'ero' in " ".join(item.get('class')):
erotags.append(item.a.string)
data['tags']['content'] = conttags if len(conttags) else None
data['tags']['technology'] = techtags if len(techtags) else None
data['tags']['erotic'] = erotags if len(erotags) else None
del conttags
del techtags
del erotags
releases = []
cur_lang = None
for item in list(soup.find('div', class_='mainbox releases').table.children):
if isinstance(item, NavigableString):
continue
if 'class' in item.attrs:
if cur_lang is None:
cur_lang = item.td.abbr.get('title')
else:
data['releases'][cur_lang] = releases
releases = []
cur_lang = item.td.abbr.get('title')
else:
temp_rel = {'date': 0, 'ages': 0, 'platform': 0, 'name': 0, 'id': 0}
children = list(item.children)
temp_rel['date'] = children[0].string
temp_rel['ages'] = children[1].string
temp_rel['platform'] = children[2].abbr.get('title')
temp_rel['name'] = children[3].a.string
temp_rel['id'] = children[3].a.get('href')[1:]
del children
releases.append(temp_rel)
del temp_rel
if len(releases) > 0 and cur_lang is not None:
data['releases'][cur_lang] = releases
del releases
del cur_lang
desc = ""
for item in list(soup.find_all('td', class_='vndesc')[0].children)[1].contents:
if not isinstance(item, NavigableString):
continue
if item.startswith('['):
continue
if item.endswith(']'):
continue
desc += item.string + "\n"
data['description'] = desc
return data
|
async def get_novel(self, term, hide_nsfw=False):
"""
If term is an ID will return that specific ID. If it's a string, it will return the details of the first search result for that term.
Returned Dictionary Has the following structure:
Please note, if it says list or dict, it means the python types.
Indentation indicates level. So English is ['Titles']['English']
'Titles' - Contains all the titles found for the anime
'English' - English title of the novel
'Alt' - Alternative title (Usually the Japanese one, but other languages exist)
'Aliases' - A list of str that define the aliases as given in VNDB.
'Img' - Link to the Image shown on VNDB for that Visual Novel
'Length' - Length given by VNDB
'Developers' - A list containing the Developers of the VN.
'Publishers' - A list containing the Publishers of the VN.
'Tags' - Contains 3 lists of different tag categories
'Content' - List of tags that have to do with the story's content as defined by VNDB. Ex: Edo Era
'Technology' - List of tags that have to do with the VN's technology. Ex: Protagonist with a Face (Wew Lad, 21st century)
'Erotic' - List of tags that have to do with the VN's sexual content. Ex: Tentacles
'Releases' - A list of dictionaries. They have the following format.
'Date' - Date VNDB lists for release
'Ages' - Age group appropriate for as determined on VNDB
'Platform' - Release Platform
'Name' - The name for this particular Release
'ID' - The id for this release, also doubles as the link if you append https://vndb.org/ to it
'Description' - Contains novel description text if there is any.
'ID' - The id for this novel, also doubles as the link if you append https://vndb.org/ to it
:param term: id or name to get details of.
:param hide_nsfw: bool if 'Img' should filter links flagged as NSFW or not. (no reason to be kwargs...yet)
:return dict: Dictionary with the parsed results of a novel
"""
if not term.isdigit() and not term.startswith('v'):
try:
vnid = await self.search_vndb('v', term)
vnid = vnid[0]['id']
except VNDBOneResult as e:
vnid = e.vnid
else:
vnid = str(term)
if not vnid.startswith('v'):
vnid = 'v' + vnid
async with self.session.get(self.base_url + "/{}".format(vnid), headers=self.headers) as response:
if response.status == 404:
raise aiohttp.HttpBadRequest("VNDB reported that there is no data for ID {}".format(vnid))
text = await response.text()
soup = BeautifulSoup(text, 'lxml')
data = {'titles': {'english': [], 'alt': [], 'aliases': []}, 'img': None, 'length': None, 'developers': [], 'publishers': [], 'tags': {}, 'releases': {}, 'id': vnid}
data['titles']['english'] = soup.find_all('div', class_='mainbox')[0].h1.string
try:
data['titles']['alt'] = soup.find_all('h2', class_='alttitle')[0].string
except IndexError:
data['titles']['alt'] = None
try:
imgdiv = soup.find_all('div', class_='vnimg')[0]
if not (hide_nsfw and 'class' in imgdiv.p.attrs):
data['img'] = 'https:' + imgdiv.img.get('src')
except AttributeError:
pass
for item in soup.find_all('tr'):
if 'class' in item.attrs or len(list(item.children)) == 1:
continue
if item.td.string == 'Aliases':
tlist = []
for alias in list(item.children)[1:]:
tlist.append(alias.string)
data['titles']['aliases'] = tlist
elif item.td.string == 'Length':
data['length'] = list(item.children)[1].string
elif item.td.string == 'Developer':
tl = []
for item in list(list(item.children)[1].children):
if isinstance(item, NavigableString):
continue
if 'href' in item.attrs:
tl.append(item.string)
data['developers'] = tl
del tl
elif item.td.string == 'Publishers':
tl = []
for item in list(list(item.children)[1].children):
if isinstance(item, NavigableString):
continue
if 'href' in item.attrs:
tl.append(item.string)
data['publishers'] = tl
conttags = []
techtags = []
erotags = []
test = soup.find('div', attrs={'id': 'vntags'})
if test:
for item in list(test.children):
if isinstance(item, NavigableString):
continue
if 'class' not in item.attrs:
continue
if 'cont' in " ".join(item.get('class')):
conttags.append(item.a.string)
if 'tech' in " ".join(item.get('class')):
techtags.append(item.a.string)
if 'ero' in " ".join(item.get('class')):
erotags.append(item.a.string)
data['tags']['content'] = conttags if len(conttags) else None
data['tags']['technology'] = techtags if len(techtags) else None
data['tags']['erotic'] = erotags if len(erotags) else None
del conttags
del techtags
del erotags
releases = []
cur_lang = None
for item in list(soup.find('div', class_='mainbox releases').table.children):
if isinstance(item, NavigableString):
continue
if 'class' in item.attrs:
if cur_lang is None:
cur_lang = item.td.abbr.get('title')
else:
data['releases'][cur_lang] = releases
releases = []
cur_lang = item.td.abbr.get('title')
else:
temp_rel = {'date': 0, 'ages': 0, 'platform': 0, 'name': 0, 'id': 0}
children = list(item.children)
temp_rel['date'] = children[0].string
temp_rel['ages'] = children[1].string
temp_rel['platform'] = children[2].abbr.get('title')
temp_rel['name'] = children[3].a.string
temp_rel['id'] = children[3].a.get('href')[1:]
del children
releases.append(temp_rel)
del temp_rel
if len(releases) > 0 and cur_lang is not None:
data['releases'][cur_lang] = releases
del releases
del cur_lang
desc = ""
for item in list(soup.find_all('td', class_='vndesc')[0].children)[1].contents:
if not isinstance(item, NavigableString):
continue
if item.startswith('['):
continue
if item.endswith(']'):
continue
desc += item.string + "\n"
data['description'] = desc
return data
|
[
"If",
"term",
"is",
"an",
"ID",
"will",
"return",
"that",
"specific",
"ID",
".",
"If",
"it",
"s",
"a",
"string",
"it",
"will",
"return",
"the",
"details",
"of",
"the",
"first",
"search",
"result",
"for",
"that",
"term",
".",
"Returned",
"Dictionary",
"Has",
"the",
"following",
"structure",
":",
"Please",
"note",
"if",
"it",
"says",
"list",
"or",
"dict",
"it",
"means",
"the",
"python",
"types",
".",
"Indentation",
"indicates",
"level",
".",
"So",
"English",
"is",
"[",
"Titles",
"]",
"[",
"English",
"]"
] |
ccubed/Shosetsu
|
python
|
https://github.com/ccubed/Shosetsu/blob/eba01c058100ec8806129b11a2859f3126a1b101/Shosetsu/VNDB.py#L64-L209
|
[
"async",
"def",
"get_novel",
"(",
"self",
",",
"term",
",",
"hide_nsfw",
"=",
"False",
")",
":",
"if",
"not",
"term",
".",
"isdigit",
"(",
")",
"and",
"not",
"term",
".",
"startswith",
"(",
"'v'",
")",
":",
"try",
":",
"vnid",
"=",
"await",
"self",
".",
"search_vndb",
"(",
"'v'",
",",
"term",
")",
"vnid",
"=",
"vnid",
"[",
"0",
"]",
"[",
"'id'",
"]",
"except",
"VNDBOneResult",
"as",
"e",
":",
"vnid",
"=",
"e",
".",
"vnid",
"else",
":",
"vnid",
"=",
"str",
"(",
"term",
")",
"if",
"not",
"vnid",
".",
"startswith",
"(",
"'v'",
")",
":",
"vnid",
"=",
"'v'",
"+",
"vnid",
"async",
"with",
"self",
".",
"session",
".",
"get",
"(",
"self",
".",
"base_url",
"+",
"\"/{}\"",
".",
"format",
"(",
"vnid",
")",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"as",
"response",
":",
"if",
"response",
".",
"status",
"==",
"404",
":",
"raise",
"aiohttp",
".",
"HttpBadRequest",
"(",
"\"VNDB reported that there is no data for ID {}\"",
".",
"format",
"(",
"vnid",
")",
")",
"text",
"=",
"await",
"response",
".",
"text",
"(",
")",
"soup",
"=",
"BeautifulSoup",
"(",
"text",
",",
"'lxml'",
")",
"data",
"=",
"{",
"'titles'",
":",
"{",
"'english'",
":",
"[",
"]",
",",
"'alt'",
":",
"[",
"]",
",",
"'aliases'",
":",
"[",
"]",
"}",
",",
"'img'",
":",
"None",
",",
"'length'",
":",
"None",
",",
"'developers'",
":",
"[",
"]",
",",
"'publishers'",
":",
"[",
"]",
",",
"'tags'",
":",
"{",
"}",
",",
"'releases'",
":",
"{",
"}",
",",
"'id'",
":",
"vnid",
"}",
"data",
"[",
"'titles'",
"]",
"[",
"'english'",
"]",
"=",
"soup",
".",
"find_all",
"(",
"'div'",
",",
"class_",
"=",
"'mainbox'",
")",
"[",
"0",
"]",
".",
"h1",
".",
"string",
"try",
":",
"data",
"[",
"'titles'",
"]",
"[",
"'alt'",
"]",
"=",
"soup",
".",
"find_all",
"(",
"'h2'",
",",
"class_",
"=",
"'alttitle'",
")",
"[",
"0",
"]",
".",
"string",
"except",
"IndexError",
":",
"data",
"[",
"'titles'",
"]",
"[",
"'alt'",
"]",
"=",
"None",
"try",
":",
"imgdiv",
"=",
"soup",
".",
"find_all",
"(",
"'div'",
",",
"class_",
"=",
"'vnimg'",
")",
"[",
"0",
"]",
"if",
"not",
"(",
"hide_nsfw",
"and",
"'class'",
"in",
"imgdiv",
".",
"p",
".",
"attrs",
")",
":",
"data",
"[",
"'img'",
"]",
"=",
"'https:'",
"+",
"imgdiv",
".",
"img",
".",
"get",
"(",
"'src'",
")",
"except",
"AttributeError",
":",
"pass",
"for",
"item",
"in",
"soup",
".",
"find_all",
"(",
"'tr'",
")",
":",
"if",
"'class'",
"in",
"item",
".",
"attrs",
"or",
"len",
"(",
"list",
"(",
"item",
".",
"children",
")",
")",
"==",
"1",
":",
"continue",
"if",
"item",
".",
"td",
".",
"string",
"==",
"'Aliases'",
":",
"tlist",
"=",
"[",
"]",
"for",
"alias",
"in",
"list",
"(",
"item",
".",
"children",
")",
"[",
"1",
":",
"]",
":",
"tlist",
".",
"append",
"(",
"alias",
".",
"string",
")",
"data",
"[",
"'titles'",
"]",
"[",
"'aliases'",
"]",
"=",
"tlist",
"elif",
"item",
".",
"td",
".",
"string",
"==",
"'Length'",
":",
"data",
"[",
"'length'",
"]",
"=",
"list",
"(",
"item",
".",
"children",
")",
"[",
"1",
"]",
".",
"string",
"elif",
"item",
".",
"td",
".",
"string",
"==",
"'Developer'",
":",
"tl",
"=",
"[",
"]",
"for",
"item",
"in",
"list",
"(",
"list",
"(",
"item",
".",
"children",
")",
"[",
"1",
"]",
".",
"children",
")",
":",
"if",
"isinstance",
"(",
"item",
",",
"NavigableString",
")",
":",
"continue",
"if",
"'href'",
"in",
"item",
".",
"attrs",
":",
"tl",
".",
"append",
"(",
"item",
".",
"string",
")",
"data",
"[",
"'developers'",
"]",
"=",
"tl",
"del",
"tl",
"elif",
"item",
".",
"td",
".",
"string",
"==",
"'Publishers'",
":",
"tl",
"=",
"[",
"]",
"for",
"item",
"in",
"list",
"(",
"list",
"(",
"item",
".",
"children",
")",
"[",
"1",
"]",
".",
"children",
")",
":",
"if",
"isinstance",
"(",
"item",
",",
"NavigableString",
")",
":",
"continue",
"if",
"'href'",
"in",
"item",
".",
"attrs",
":",
"tl",
".",
"append",
"(",
"item",
".",
"string",
")",
"data",
"[",
"'publishers'",
"]",
"=",
"tl",
"conttags",
"=",
"[",
"]",
"techtags",
"=",
"[",
"]",
"erotags",
"=",
"[",
"]",
"test",
"=",
"soup",
".",
"find",
"(",
"'div'",
",",
"attrs",
"=",
"{",
"'id'",
":",
"'vntags'",
"}",
")",
"if",
"test",
":",
"for",
"item",
"in",
"list",
"(",
"test",
".",
"children",
")",
":",
"if",
"isinstance",
"(",
"item",
",",
"NavigableString",
")",
":",
"continue",
"if",
"'class'",
"not",
"in",
"item",
".",
"attrs",
":",
"continue",
"if",
"'cont'",
"in",
"\" \"",
".",
"join",
"(",
"item",
".",
"get",
"(",
"'class'",
")",
")",
":",
"conttags",
".",
"append",
"(",
"item",
".",
"a",
".",
"string",
")",
"if",
"'tech'",
"in",
"\" \"",
".",
"join",
"(",
"item",
".",
"get",
"(",
"'class'",
")",
")",
":",
"techtags",
".",
"append",
"(",
"item",
".",
"a",
".",
"string",
")",
"if",
"'ero'",
"in",
"\" \"",
".",
"join",
"(",
"item",
".",
"get",
"(",
"'class'",
")",
")",
":",
"erotags",
".",
"append",
"(",
"item",
".",
"a",
".",
"string",
")",
"data",
"[",
"'tags'",
"]",
"[",
"'content'",
"]",
"=",
"conttags",
"if",
"len",
"(",
"conttags",
")",
"else",
"None",
"data",
"[",
"'tags'",
"]",
"[",
"'technology'",
"]",
"=",
"techtags",
"if",
"len",
"(",
"techtags",
")",
"else",
"None",
"data",
"[",
"'tags'",
"]",
"[",
"'erotic'",
"]",
"=",
"erotags",
"if",
"len",
"(",
"erotags",
")",
"else",
"None",
"del",
"conttags",
"del",
"techtags",
"del",
"erotags",
"releases",
"=",
"[",
"]",
"cur_lang",
"=",
"None",
"for",
"item",
"in",
"list",
"(",
"soup",
".",
"find",
"(",
"'div'",
",",
"class_",
"=",
"'mainbox releases'",
")",
".",
"table",
".",
"children",
")",
":",
"if",
"isinstance",
"(",
"item",
",",
"NavigableString",
")",
":",
"continue",
"if",
"'class'",
"in",
"item",
".",
"attrs",
":",
"if",
"cur_lang",
"is",
"None",
":",
"cur_lang",
"=",
"item",
".",
"td",
".",
"abbr",
".",
"get",
"(",
"'title'",
")",
"else",
":",
"data",
"[",
"'releases'",
"]",
"[",
"cur_lang",
"]",
"=",
"releases",
"releases",
"=",
"[",
"]",
"cur_lang",
"=",
"item",
".",
"td",
".",
"abbr",
".",
"get",
"(",
"'title'",
")",
"else",
":",
"temp_rel",
"=",
"{",
"'date'",
":",
"0",
",",
"'ages'",
":",
"0",
",",
"'platform'",
":",
"0",
",",
"'name'",
":",
"0",
",",
"'id'",
":",
"0",
"}",
"children",
"=",
"list",
"(",
"item",
".",
"children",
")",
"temp_rel",
"[",
"'date'",
"]",
"=",
"children",
"[",
"0",
"]",
".",
"string",
"temp_rel",
"[",
"'ages'",
"]",
"=",
"children",
"[",
"1",
"]",
".",
"string",
"temp_rel",
"[",
"'platform'",
"]",
"=",
"children",
"[",
"2",
"]",
".",
"abbr",
".",
"get",
"(",
"'title'",
")",
"temp_rel",
"[",
"'name'",
"]",
"=",
"children",
"[",
"3",
"]",
".",
"a",
".",
"string",
"temp_rel",
"[",
"'id'",
"]",
"=",
"children",
"[",
"3",
"]",
".",
"a",
".",
"get",
"(",
"'href'",
")",
"[",
"1",
":",
"]",
"del",
"children",
"releases",
".",
"append",
"(",
"temp_rel",
")",
"del",
"temp_rel",
"if",
"len",
"(",
"releases",
")",
">",
"0",
"and",
"cur_lang",
"is",
"not",
"None",
":",
"data",
"[",
"'releases'",
"]",
"[",
"cur_lang",
"]",
"=",
"releases",
"del",
"releases",
"del",
"cur_lang",
"desc",
"=",
"\"\"",
"for",
"item",
"in",
"list",
"(",
"soup",
".",
"find_all",
"(",
"'td'",
",",
"class_",
"=",
"'vndesc'",
")",
"[",
"0",
"]",
".",
"children",
")",
"[",
"1",
"]",
".",
"contents",
":",
"if",
"not",
"isinstance",
"(",
"item",
",",
"NavigableString",
")",
":",
"continue",
"if",
"item",
".",
"startswith",
"(",
"'['",
")",
":",
"continue",
"if",
"item",
".",
"endswith",
"(",
"']'",
")",
":",
"continue",
"desc",
"+=",
"item",
".",
"string",
"+",
"\"\\n\"",
"data",
"[",
"'description'",
"]",
"=",
"desc",
"return",
"data"
] |
eba01c058100ec8806129b11a2859f3126a1b101
|
test
|
Shosetsu.parse_search
|
This is our parsing dispatcher
:param stype: Search type category
:param soup: The beautifulsoup object that contains the parsed html
|
Shosetsu/VNDB.py
|
async def parse_search(self, stype, soup):
"""
This is our parsing dispatcher
:param stype: Search type category
:param soup: The beautifulsoup object that contains the parsed html
"""
if stype == 'v':
return await parse_vn_results(soup)
elif stype == 'r':
return await parse_release_results(soup)
elif stype == 'p':
return await parse_prod_staff_results(soup)
elif stype == 's':
return await parse_prod_staff_results(soup)
elif stype == 'c':
return await parse_character_results(soup)
elif stype == 'g':
return await parse_tag_results(soup)
elif stype == 'i':
return await parse_tag_results(soup)
elif stype == 'u':
return await parse_user_results(soup)
|
async def parse_search(self, stype, soup):
"""
This is our parsing dispatcher
:param stype: Search type category
:param soup: The beautifulsoup object that contains the parsed html
"""
if stype == 'v':
return await parse_vn_results(soup)
elif stype == 'r':
return await parse_release_results(soup)
elif stype == 'p':
return await parse_prod_staff_results(soup)
elif stype == 's':
return await parse_prod_staff_results(soup)
elif stype == 'c':
return await parse_character_results(soup)
elif stype == 'g':
return await parse_tag_results(soup)
elif stype == 'i':
return await parse_tag_results(soup)
elif stype == 'u':
return await parse_user_results(soup)
|
[
"This",
"is",
"our",
"parsing",
"dispatcher"
] |
ccubed/Shosetsu
|
python
|
https://github.com/ccubed/Shosetsu/blob/eba01c058100ec8806129b11a2859f3126a1b101/Shosetsu/VNDB.py#L211-L233
|
[
"async",
"def",
"parse_search",
"(",
"self",
",",
"stype",
",",
"soup",
")",
":",
"if",
"stype",
"==",
"'v'",
":",
"return",
"await",
"parse_vn_results",
"(",
"soup",
")",
"elif",
"stype",
"==",
"'r'",
":",
"return",
"await",
"parse_release_results",
"(",
"soup",
")",
"elif",
"stype",
"==",
"'p'",
":",
"return",
"await",
"parse_prod_staff_results",
"(",
"soup",
")",
"elif",
"stype",
"==",
"'s'",
":",
"return",
"await",
"parse_prod_staff_results",
"(",
"soup",
")",
"elif",
"stype",
"==",
"'c'",
":",
"return",
"await",
"parse_character_results",
"(",
"soup",
")",
"elif",
"stype",
"==",
"'g'",
":",
"return",
"await",
"parse_tag_results",
"(",
"soup",
")",
"elif",
"stype",
"==",
"'i'",
":",
"return",
"await",
"parse_tag_results",
"(",
"soup",
")",
"elif",
"stype",
"==",
"'u'",
":",
"return",
"await",
"parse_user_results",
"(",
"soup",
")"
] |
eba01c058100ec8806129b11a2859f3126a1b101
|
test
|
Dataset.addStream
|
Adds the given stream to the query construction. Additionally, you can choose the interpolator to use for this stream, as well as a special name
for the column in the returned dataset. If no column name is given, the full stream path will be used.
addStream also supports Merge queries. You can insert a merge query instead of a stream, but be sure to name the column::
d = Dataset(cdb, t1=time.time()-1000,t2=time.time(),dt=10.)
d.addStream("temperature","average")
d.addStream("steps","sum")
m = Merge(cdb)
m.addStream("mystream")
m.addStream("mystream2")
d.addStream(m,colname="mycolumn")
result = d.run()
|
connectordb/query/dataset.py
|
def addStream(self, stream, interpolator="closest", t1=None, t2=None, dt=None, limit=None, i1=None, i2=None, transform=None,colname=None):
"""Adds the given stream to the query construction. Additionally, you can choose the interpolator to use for this stream, as well as a special name
for the column in the returned dataset. If no column name is given, the full stream path will be used.
addStream also supports Merge queries. You can insert a merge query instead of a stream, but be sure to name the column::
d = Dataset(cdb, t1=time.time()-1000,t2=time.time(),dt=10.)
d.addStream("temperature","average")
d.addStream("steps","sum")
m = Merge(cdb)
m.addStream("mystream")
m.addStream("mystream2")
d.addStream(m,colname="mycolumn")
result = d.run()
"""
streamquery = query_maker(t1, t2, limit, i1, i2, transform)
param_stream(self.cdb, streamquery, stream)
streamquery["interpolator"] = interpolator
if colname is None:
# What do we call this column?
if isinstance(stream, six.string_types):
colname = stream
elif isinstance(stream, Stream):
colname = stream.path
else:
raise Exception(
"Could not find a name for the column! use the 'colname' parameter.")
if colname in self.query["dataset"] or colname is "x":
raise Exception(
"The column name either exists, or is labeled 'x'. Use the colname parameter to change the column name.")
self.query["dataset"][colname] = streamquery
|
def addStream(self, stream, interpolator="closest", t1=None, t2=None, dt=None, limit=None, i1=None, i2=None, transform=None,colname=None):
"""Adds the given stream to the query construction. Additionally, you can choose the interpolator to use for this stream, as well as a special name
for the column in the returned dataset. If no column name is given, the full stream path will be used.
addStream also supports Merge queries. You can insert a merge query instead of a stream, but be sure to name the column::
d = Dataset(cdb, t1=time.time()-1000,t2=time.time(),dt=10.)
d.addStream("temperature","average")
d.addStream("steps","sum")
m = Merge(cdb)
m.addStream("mystream")
m.addStream("mystream2")
d.addStream(m,colname="mycolumn")
result = d.run()
"""
streamquery = query_maker(t1, t2, limit, i1, i2, transform)
param_stream(self.cdb, streamquery, stream)
streamquery["interpolator"] = interpolator
if colname is None:
# What do we call this column?
if isinstance(stream, six.string_types):
colname = stream
elif isinstance(stream, Stream):
colname = stream.path
else:
raise Exception(
"Could not find a name for the column! use the 'colname' parameter.")
if colname in self.query["dataset"] or colname is "x":
raise Exception(
"The column name either exists, or is labeled 'x'. Use the colname parameter to change the column name.")
self.query["dataset"][colname] = streamquery
|
[
"Adds",
"the",
"given",
"stream",
"to",
"the",
"query",
"construction",
".",
"Additionally",
"you",
"can",
"choose",
"the",
"interpolator",
"to",
"use",
"for",
"this",
"stream",
"as",
"well",
"as",
"a",
"special",
"name",
"for",
"the",
"column",
"in",
"the",
"returned",
"dataset",
".",
"If",
"no",
"column",
"name",
"is",
"given",
"the",
"full",
"stream",
"path",
"will",
"be",
"used",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/query/dataset.py#L165-L202
|
[
"def",
"addStream",
"(",
"self",
",",
"stream",
",",
"interpolator",
"=",
"\"closest\"",
",",
"t1",
"=",
"None",
",",
"t2",
"=",
"None",
",",
"dt",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"i1",
"=",
"None",
",",
"i2",
"=",
"None",
",",
"transform",
"=",
"None",
",",
"colname",
"=",
"None",
")",
":",
"streamquery",
"=",
"query_maker",
"(",
"t1",
",",
"t2",
",",
"limit",
",",
"i1",
",",
"i2",
",",
"transform",
")",
"param_stream",
"(",
"self",
".",
"cdb",
",",
"streamquery",
",",
"stream",
")",
"streamquery",
"[",
"\"interpolator\"",
"]",
"=",
"interpolator",
"if",
"colname",
"is",
"None",
":",
"# What do we call this column?",
"if",
"isinstance",
"(",
"stream",
",",
"six",
".",
"string_types",
")",
":",
"colname",
"=",
"stream",
"elif",
"isinstance",
"(",
"stream",
",",
"Stream",
")",
":",
"colname",
"=",
"stream",
".",
"path",
"else",
":",
"raise",
"Exception",
"(",
"\"Could not find a name for the column! use the 'colname' parameter.\"",
")",
"if",
"colname",
"in",
"self",
".",
"query",
"[",
"\"dataset\"",
"]",
"or",
"colname",
"is",
"\"x\"",
":",
"raise",
"Exception",
"(",
"\"The column name either exists, or is labeled 'x'. Use the colname parameter to change the column name.\"",
")",
"self",
".",
"query",
"[",
"\"dataset\"",
"]",
"[",
"colname",
"]",
"=",
"streamquery"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
ConnectorDB.reset_apikey
|
invalidates the device's current api key, and generates a new one. Resets current auth to use the new apikey,
since the change would have future queries fail if they use the old api key.
|
connectordb/_connectordb.py
|
def reset_apikey(self):
"""invalidates the device's current api key, and generates a new one. Resets current auth to use the new apikey,
since the change would have future queries fail if they use the old api key."""
apikey = Device.reset_apikey(self)
self.db.setauth(apikey)
return apikey
|
def reset_apikey(self):
"""invalidates the device's current api key, and generates a new one. Resets current auth to use the new apikey,
since the change would have future queries fail if they use the old api key."""
apikey = Device.reset_apikey(self)
self.db.setauth(apikey)
return apikey
|
[
"invalidates",
"the",
"device",
"s",
"current",
"api",
"key",
"and",
"generates",
"a",
"new",
"one",
".",
"Resets",
"current",
"auth",
"to",
"use",
"the",
"new",
"apikey",
"since",
"the",
"change",
"would",
"have",
"future",
"queries",
"fail",
"if",
"they",
"use",
"the",
"old",
"api",
"key",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connectordb.py#L70-L75
|
[
"def",
"reset_apikey",
"(",
"self",
")",
":",
"apikey",
"=",
"Device",
".",
"reset_apikey",
"(",
"self",
")",
"self",
".",
"db",
".",
"setauth",
"(",
"apikey",
")",
"return",
"apikey"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
ConnectorDB.info
|
returns a dictionary of information about the database, including the database version, the transforms
and the interpolators supported::
>>>cdb = connectordb.ConnectorDB(apikey)
>>>cdb.info()
{
"version": "0.3.0",
"transforms": {
"sum": {"description": "Returns the sum of all the datapoints that go through the transform"}
...
},
"interpolators": {
"closest": {"description": "Uses the datapoint closest to the interpolation timestamp"}
...
}
}
|
connectordb/_connectordb.py
|
def info(self):
"""returns a dictionary of information about the database, including the database version, the transforms
and the interpolators supported::
>>>cdb = connectordb.ConnectorDB(apikey)
>>>cdb.info()
{
"version": "0.3.0",
"transforms": {
"sum": {"description": "Returns the sum of all the datapoints that go through the transform"}
...
},
"interpolators": {
"closest": {"description": "Uses the datapoint closest to the interpolation timestamp"}
...
}
}
"""
return {
"version": self.db.get("meta/version").text,
"transforms": self.db.get("meta/transforms").json(),
"interpolators": self.db.get("meta/interpolators").json()
}
|
def info(self):
"""returns a dictionary of information about the database, including the database version, the transforms
and the interpolators supported::
>>>cdb = connectordb.ConnectorDB(apikey)
>>>cdb.info()
{
"version": "0.3.0",
"transforms": {
"sum": {"description": "Returns the sum of all the datapoints that go through the transform"}
...
},
"interpolators": {
"closest": {"description": "Uses the datapoint closest to the interpolation timestamp"}
...
}
}
"""
return {
"version": self.db.get("meta/version").text,
"transforms": self.db.get("meta/transforms").json(),
"interpolators": self.db.get("meta/interpolators").json()
}
|
[
"returns",
"a",
"dictionary",
"of",
"information",
"about",
"the",
"database",
"including",
"the",
"database",
"version",
"the",
"transforms",
"and",
"the",
"interpolators",
"supported",
"::"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connectordb.py#L89-L112
|
[
"def",
"info",
"(",
"self",
")",
":",
"return",
"{",
"\"version\"",
":",
"self",
".",
"db",
".",
"get",
"(",
"\"meta/version\"",
")",
".",
"text",
",",
"\"transforms\"",
":",
"self",
".",
"db",
".",
"get",
"(",
"\"meta/transforms\"",
")",
".",
"json",
"(",
")",
",",
"\"interpolators\"",
":",
"self",
".",
"db",
".",
"get",
"(",
"\"meta/interpolators\"",
")",
".",
"json",
"(",
")",
"}"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
ConnectorDB.users
|
Returns the list of users in the database
|
connectordb/_connectordb.py
|
def users(self):
"""Returns the list of users in the database"""
result = self.db.read("", {"q": "ls"})
if result is None or result.json() is None:
return []
users = []
for u in result.json():
usr = self(u["name"])
usr.metadata = u
users.append(usr)
return users
|
def users(self):
"""Returns the list of users in the database"""
result = self.db.read("", {"q": "ls"})
if result is None or result.json() is None:
return []
users = []
for u in result.json():
usr = self(u["name"])
usr.metadata = u
users.append(usr)
return users
|
[
"Returns",
"the",
"list",
"of",
"users",
"in",
"the",
"database"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connectordb.py#L117-L128
|
[
"def",
"users",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"db",
".",
"read",
"(",
"\"\"",
",",
"{",
"\"q\"",
":",
"\"ls\"",
"}",
")",
"if",
"result",
"is",
"None",
"or",
"result",
".",
"json",
"(",
")",
"is",
"None",
":",
"return",
"[",
"]",
"users",
"=",
"[",
"]",
"for",
"u",
"in",
"result",
".",
"json",
"(",
")",
":",
"usr",
"=",
"self",
"(",
"u",
"[",
"\"name\"",
"]",
")",
"usr",
".",
"metadata",
"=",
"u",
"users",
".",
"append",
"(",
"usr",
")",
"return",
"users"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
ConnectorDB.import_users
|
Imports version 1 of ConnectorDB export. These exports can be generated
by running user.export(dir), possibly on multiple users.
|
connectordb/_connectordb.py
|
def import_users(self, directory):
"""Imports version 1 of ConnectorDB export. These exports can be generated
by running user.export(dir), possibly on multiple users.
"""
exportInfoFile = os.path.join(directory, "connectordb.json")
with open(exportInfoFile) as f:
exportInfo = json.load(f)
if exportInfo["Version"] != 1:
raise ValueError("Not able to read this import version")
# Now we list all the user directories
for name in os.listdir(directory):
udir = os.path.join(directory, name)
if os.path.isdir(udir):
# Let's read in the user
with open(os.path.join(udir, "user.json")) as f:
usrdata = json.load(f)
u = self(usrdata["name"])
if u.exists():
raise ValueError("The user " + name + " already exists")
del usrdata["name"]
u.create(password=name, **usrdata)
# Now read all of the user's devices
for dname in os.listdir(udir):
ddir = os.path.join(udir, dname)
if os.path.isdir(ddir):
u.import_device(ddir)
|
def import_users(self, directory):
"""Imports version 1 of ConnectorDB export. These exports can be generated
by running user.export(dir), possibly on multiple users.
"""
exportInfoFile = os.path.join(directory, "connectordb.json")
with open(exportInfoFile) as f:
exportInfo = json.load(f)
if exportInfo["Version"] != 1:
raise ValueError("Not able to read this import version")
# Now we list all the user directories
for name in os.listdir(directory):
udir = os.path.join(directory, name)
if os.path.isdir(udir):
# Let's read in the user
with open(os.path.join(udir, "user.json")) as f:
usrdata = json.load(f)
u = self(usrdata["name"])
if u.exists():
raise ValueError("The user " + name + " already exists")
del usrdata["name"]
u.create(password=name, **usrdata)
# Now read all of the user's devices
for dname in os.listdir(udir):
ddir = os.path.join(udir, dname)
if os.path.isdir(ddir):
u.import_device(ddir)
|
[
"Imports",
"version",
"1",
"of",
"ConnectorDB",
"export",
".",
"These",
"exports",
"can",
"be",
"generated",
"by",
"running",
"user",
".",
"export",
"(",
"dir",
")",
"possibly",
"on",
"multiple",
"users",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connectordb.py#L134-L163
|
[
"def",
"import_users",
"(",
"self",
",",
"directory",
")",
":",
"exportInfoFile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"\"connectordb.json\"",
")",
"with",
"open",
"(",
"exportInfoFile",
")",
"as",
"f",
":",
"exportInfo",
"=",
"json",
".",
"load",
"(",
"f",
")",
"if",
"exportInfo",
"[",
"\"Version\"",
"]",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Not able to read this import version\"",
")",
"# Now we list all the user directories",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"directory",
")",
":",
"udir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"name",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"udir",
")",
":",
"# Let's read in the user",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"udir",
",",
"\"user.json\"",
")",
")",
"as",
"f",
":",
"usrdata",
"=",
"json",
".",
"load",
"(",
"f",
")",
"u",
"=",
"self",
"(",
"usrdata",
"[",
"\"name\"",
"]",
")",
"if",
"u",
".",
"exists",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"The user \"",
"+",
"name",
"+",
"\" already exists\"",
")",
"del",
"usrdata",
"[",
"\"name\"",
"]",
"u",
".",
"create",
"(",
"password",
"=",
"name",
",",
"*",
"*",
"usrdata",
")",
"# Now read all of the user's devices",
"for",
"dname",
"in",
"os",
".",
"listdir",
"(",
"udir",
")",
":",
"ddir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"udir",
",",
"dname",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"ddir",
")",
":",
"u",
".",
"import_device",
"(",
"ddir",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
run_bwa_index
|
Use BWA to create reference index files
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str ref_id: FileStoreID for the reference genome
:return: FileStoreIDs for BWA index files
:rtype: tuple(str, str, str, str, str)
|
src/toil_lib/tools/indexing.py
|
def run_bwa_index(job, ref_id):
"""
Use BWA to create reference index files
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str ref_id: FileStoreID for the reference genome
:return: FileStoreIDs for BWA index files
:rtype: tuple(str, str, str, str, str)
"""
job.fileStore.logToMaster('Created BWA index files')
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fa'))
command = ['index', '/data/ref.fa']
dockerCall(job=job, workDir=work_dir, parameters=command,
tool='quay.io/ucsc_cgl/bwa:0.7.12--256539928ea162949d8a65ca5c79a72ef557ce7c')
ids = {}
for output in ['ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa']:
ids[output.split('.')[-1]] = (job.fileStore.writeGlobalFile(os.path.join(work_dir, output)))
return ids['amb'], ids['ann'], ids['bwt'], ids['pac'], ids['sa']
|
def run_bwa_index(job, ref_id):
"""
Use BWA to create reference index files
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str ref_id: FileStoreID for the reference genome
:return: FileStoreIDs for BWA index files
:rtype: tuple(str, str, str, str, str)
"""
job.fileStore.logToMaster('Created BWA index files')
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fa'))
command = ['index', '/data/ref.fa']
dockerCall(job=job, workDir=work_dir, parameters=command,
tool='quay.io/ucsc_cgl/bwa:0.7.12--256539928ea162949d8a65ca5c79a72ef557ce7c')
ids = {}
for output in ['ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa']:
ids[output.split('.')[-1]] = (job.fileStore.writeGlobalFile(os.path.join(work_dir, output)))
return ids['amb'], ids['ann'], ids['bwt'], ids['pac'], ids['sa']
|
[
"Use",
"BWA",
"to",
"create",
"reference",
"index",
"files"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/indexing.py#L6-L24
|
[
"def",
"run_bwa_index",
"(",
"job",
",",
"ref_id",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Created BWA index files'",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"ref_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'ref.fa'",
")",
")",
"command",
"=",
"[",
"'index'",
",",
"'/data/ref.fa'",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"command",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/bwa:0.7.12--256539928ea162949d8a65ca5c79a72ef557ce7c'",
")",
"ids",
"=",
"{",
"}",
"for",
"output",
"in",
"[",
"'ref.fa.amb'",
",",
"'ref.fa.ann'",
",",
"'ref.fa.bwt'",
",",
"'ref.fa.pac'",
",",
"'ref.fa.sa'",
"]",
":",
"ids",
"[",
"output",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"]",
"=",
"(",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"output",
")",
")",
")",
"return",
"ids",
"[",
"'amb'",
"]",
",",
"ids",
"[",
"'ann'",
"]",
",",
"ids",
"[",
"'bwt'",
"]",
",",
"ids",
"[",
"'pac'",
"]",
",",
"ids",
"[",
"'sa'",
"]"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
Logger.connectordb
|
Returns the ConnectorDB object that the logger uses. Raises an error if Logger isn't able to connect
|
connectordb/logger.py
|
def connectordb(self):
"""Returns the ConnectorDB object that the logger uses. Raises an error if Logger isn't able to connect"""
if self.__cdb is None:
logging.debug("Logger: Connecting to " + self.serverurl)
self.__cdb = ConnectorDB(self.apikey, url=self.serverurl)
return self.__cdb
|
def connectordb(self):
"""Returns the ConnectorDB object that the logger uses. Raises an error if Logger isn't able to connect"""
if self.__cdb is None:
logging.debug("Logger: Connecting to " + self.serverurl)
self.__cdb = ConnectorDB(self.apikey, url=self.serverurl)
return self.__cdb
|
[
"Returns",
"the",
"ConnectorDB",
"object",
"that",
"the",
"logger",
"uses",
".",
"Raises",
"an",
"error",
"if",
"Logger",
"isn",
"t",
"able",
"to",
"connect"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/logger.py#L96-L101
|
[
"def",
"connectordb",
"(",
"self",
")",
":",
"if",
"self",
".",
"__cdb",
"is",
"None",
":",
"logging",
".",
"debug",
"(",
"\"Logger: Connecting to \"",
"+",
"self",
".",
"serverurl",
")",
"self",
".",
"__cdb",
"=",
"ConnectorDB",
"(",
"self",
".",
"apikey",
",",
"url",
"=",
"self",
".",
"serverurl",
")",
"return",
"self",
".",
"__cdb"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Logger.addStream
|
Adds the given stream to the logger. Requires an active connection to the ConnectorDB database.
If a schema is not specified, loads the stream from the database. If a schema is specified, and the stream
does not exist, creates the stream. You can also add stream properties such as description or nickname to be added
during creation.
|
connectordb/logger.py
|
def addStream(self, streamname, schema=None, **kwargs):
"""Adds the given stream to the logger. Requires an active connection to the ConnectorDB database.
If a schema is not specified, loads the stream from the database. If a schema is specified, and the stream
does not exist, creates the stream. You can also add stream properties such as description or nickname to be added
during creation."""
stream = self.connectordb[streamname]
if not stream.exists():
if schema is not None:
stream.create(schema, **kwargs)
else:
raise Exception(
"The stream '%s' was not found" % (streamname, ))
self.addStream_force(streamname, stream.schema)
|
def addStream(self, streamname, schema=None, **kwargs):
"""Adds the given stream to the logger. Requires an active connection to the ConnectorDB database.
If a schema is not specified, loads the stream from the database. If a schema is specified, and the stream
does not exist, creates the stream. You can also add stream properties such as description or nickname to be added
during creation."""
stream = self.connectordb[streamname]
if not stream.exists():
if schema is not None:
stream.create(schema, **kwargs)
else:
raise Exception(
"The stream '%s' was not found" % (streamname, ))
self.addStream_force(streamname, stream.schema)
|
[
"Adds",
"the",
"given",
"stream",
"to",
"the",
"logger",
".",
"Requires",
"an",
"active",
"connection",
"to",
"the",
"ConnectorDB",
"database",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/logger.py#L118-L134
|
[
"def",
"addStream",
"(",
"self",
",",
"streamname",
",",
"schema",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"stream",
"=",
"self",
".",
"connectordb",
"[",
"streamname",
"]",
"if",
"not",
"stream",
".",
"exists",
"(",
")",
":",
"if",
"schema",
"is",
"not",
"None",
":",
"stream",
".",
"create",
"(",
"schema",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"The stream '%s' was not found\"",
"%",
"(",
"streamname",
",",
")",
")",
"self",
".",
"addStream_force",
"(",
"streamname",
",",
"stream",
".",
"schema",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Logger.addStream_force
|
This function adds the given stream to the logger, but does not check with a ConnectorDB database
to make sure that the stream exists. Use at your own risk.
|
connectordb/logger.py
|
def addStream_force(self, streamname, schema=None):
"""This function adds the given stream to the logger, but does not check with a ConnectorDB database
to make sure that the stream exists. Use at your own risk."""
c = self.database.cursor()
c.execute("INSERT OR REPLACE INTO streams VALUES (?,?);",
(streamname, json.dumps(schema)))
self.streams[streamname] = schema
|
def addStream_force(self, streamname, schema=None):
"""This function adds the given stream to the logger, but does not check with a ConnectorDB database
to make sure that the stream exists. Use at your own risk."""
c = self.database.cursor()
c.execute("INSERT OR REPLACE INTO streams VALUES (?,?);",
(streamname, json.dumps(schema)))
self.streams[streamname] = schema
|
[
"This",
"function",
"adds",
"the",
"given",
"stream",
"to",
"the",
"logger",
"but",
"does",
"not",
"check",
"with",
"a",
"ConnectorDB",
"database",
"to",
"make",
"sure",
"that",
"the",
"stream",
"exists",
".",
"Use",
"at",
"your",
"own",
"risk",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/logger.py#L136-L144
|
[
"def",
"addStream_force",
"(",
"self",
",",
"streamname",
",",
"schema",
"=",
"None",
")",
":",
"c",
"=",
"self",
".",
"database",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"INSERT OR REPLACE INTO streams VALUES (?,?);\"",
",",
"(",
"streamname",
",",
"json",
".",
"dumps",
"(",
"schema",
")",
")",
")",
"self",
".",
"streams",
"[",
"streamname",
"]",
"=",
"schema"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Logger.insert
|
Insert the datapoint into the logger for the given stream name. The logger caches the datapoint
and eventually synchronizes it with ConnectorDB
|
connectordb/logger.py
|
def insert(self, streamname, value):
"""Insert the datapoint into the logger for the given stream name. The logger caches the datapoint
and eventually synchronizes it with ConnectorDB"""
if streamname not in self.streams:
raise Exception("The stream '%s' was not found" % (streamname, ))
# Validate the schema
validate(value, self.streams[streamname])
# Insert the datapoint - it fits the schema
value = json.dumps(value)
logging.debug("Logger: %s <= %s" % (streamname, value))
c = self.database.cursor()
c.execute("INSERT INTO cache VALUES (?,?,?);",
(streamname, time.time(), value))
|
def insert(self, streamname, value):
"""Insert the datapoint into the logger for the given stream name. The logger caches the datapoint
and eventually synchronizes it with ConnectorDB"""
if streamname not in self.streams:
raise Exception("The stream '%s' was not found" % (streamname, ))
# Validate the schema
validate(value, self.streams[streamname])
# Insert the datapoint - it fits the schema
value = json.dumps(value)
logging.debug("Logger: %s <= %s" % (streamname, value))
c = self.database.cursor()
c.execute("INSERT INTO cache VALUES (?,?,?);",
(streamname, time.time(), value))
|
[
"Insert",
"the",
"datapoint",
"into",
"the",
"logger",
"for",
"the",
"given",
"stream",
"name",
".",
"The",
"logger",
"caches",
"the",
"datapoint",
"and",
"eventually",
"synchronizes",
"it",
"with",
"ConnectorDB"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/logger.py#L146-L160
|
[
"def",
"insert",
"(",
"self",
",",
"streamname",
",",
"value",
")",
":",
"if",
"streamname",
"not",
"in",
"self",
".",
"streams",
":",
"raise",
"Exception",
"(",
"\"The stream '%s' was not found\"",
"%",
"(",
"streamname",
",",
")",
")",
"# Validate the schema",
"validate",
"(",
"value",
",",
"self",
".",
"streams",
"[",
"streamname",
"]",
")",
"# Insert the datapoint - it fits the schema",
"value",
"=",
"json",
".",
"dumps",
"(",
"value",
")",
"logging",
".",
"debug",
"(",
"\"Logger: %s <= %s\"",
"%",
"(",
"streamname",
",",
"value",
")",
")",
"c",
"=",
"self",
".",
"database",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"INSERT INTO cache VALUES (?,?,?);\"",
",",
"(",
"streamname",
",",
"time",
".",
"time",
"(",
")",
",",
"value",
")",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Logger.insert_many
|
Inserts data into the cache, if the data is a dict of the form {streamname: [{"t": timestamp,"d":data,...]}
|
connectordb/logger.py
|
def insert_many(self, data_dict):
""" Inserts data into the cache, if the data is a dict of the form {streamname: [{"t": timestamp,"d":data,...]}"""
c = self.database.cursor()
c.execute("BEGIN TRANSACTION;")
try:
for streamname in data_dict:
if streamname not in self.streams:
raise Exception(
"The stream '%s' was not found" % (streamname, ))
for dp in data_dict[streamname]:
validate(dp["d"], self.streams[streamname])
c.execute("INSERT INTO cache VALUES (?,?,?);",
(streamname, dp["t"], dp["d"]))
except:
c.execute("ROLLBACK;")
raise
c.exectute("COMMIT;")
|
def insert_many(self, data_dict):
""" Inserts data into the cache, if the data is a dict of the form {streamname: [{"t": timestamp,"d":data,...]}"""
c = self.database.cursor()
c.execute("BEGIN TRANSACTION;")
try:
for streamname in data_dict:
if streamname not in self.streams:
raise Exception(
"The stream '%s' was not found" % (streamname, ))
for dp in data_dict[streamname]:
validate(dp["d"], self.streams[streamname])
c.execute("INSERT INTO cache VALUES (?,?,?);",
(streamname, dp["t"], dp["d"]))
except:
c.execute("ROLLBACK;")
raise
c.exectute("COMMIT;")
|
[
"Inserts",
"data",
"into",
"the",
"cache",
"if",
"the",
"data",
"is",
"a",
"dict",
"of",
"the",
"form",
"{",
"streamname",
":",
"[",
"{",
"t",
":",
"timestamp",
"d",
":",
"data",
"...",
"]",
"}"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/logger.py#L162-L178
|
[
"def",
"insert_many",
"(",
"self",
",",
"data_dict",
")",
":",
"c",
"=",
"self",
".",
"database",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"BEGIN TRANSACTION;\"",
")",
"try",
":",
"for",
"streamname",
"in",
"data_dict",
":",
"if",
"streamname",
"not",
"in",
"self",
".",
"streams",
":",
"raise",
"Exception",
"(",
"\"The stream '%s' was not found\"",
"%",
"(",
"streamname",
",",
")",
")",
"for",
"dp",
"in",
"data_dict",
"[",
"streamname",
"]",
":",
"validate",
"(",
"dp",
"[",
"\"d\"",
"]",
",",
"self",
".",
"streams",
"[",
"streamname",
"]",
")",
"c",
".",
"execute",
"(",
"\"INSERT INTO cache VALUES (?,?,?);\"",
",",
"(",
"streamname",
",",
"dp",
"[",
"\"t\"",
"]",
",",
"dp",
"[",
"\"d\"",
"]",
")",
")",
"except",
":",
"c",
".",
"execute",
"(",
"\"ROLLBACK;\"",
")",
"raise",
"c",
".",
"exectute",
"(",
"\"COMMIT;\"",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Logger.sync
|
Attempt to sync with the ConnectorDB server
|
connectordb/logger.py
|
def sync(self):
"""Attempt to sync with the ConnectorDB server"""
logging.debug("Logger: Syncing...")
failed = False
try:
# Get the connectordb object
cdb = self.connectordb
# Ping the database - most connection errors will happen here
cdb.ping()
with self.synclock:
c = self.database.cursor()
for stream in self.streams:
s = cdb[stream]
c.execute(
"SELECT * FROM cache WHERE stream=? ORDER BY timestamp ASC;",
(stream, ))
datapointArray = []
for dp in c.fetchall():
datapointArray.append(
{"t": dp[1],
"d": json.loads(dp[2])})
# First, check if the data already inserted has newer timestamps,
# and in that case, assume that there was an error, and remove the datapoints
# with an older timestamp, so that we don't have an error when syncing
if len(s) > 0:
newtime = s[-1]["t"]
while (len(datapointArray) > 0 and datapointArray[0]["t"] < newtime):
logging.debug("Datapoint exists with older timestamp. Removing the datapoint.")
datapointArray = datapointArray[1:]
if len(datapointArray) > 0:
logging.debug("%s: syncing %i datapoints" %
(stream, len(datapointArray)))
while (len(datapointArray) > DATAPOINT_INSERT_LIMIT):
# We insert datapoints in chunks of a couple
# thousand so that they fit in the insert size
# limit of ConnectorDB
s.insert_array(
datapointArray[:DATAPOINT_INSERT_LIMIT])
# Clear the written datapoints
datapointArray = datapointArray[
DATAPOINT_INSERT_LIMIT:]
# If there was no error inserting, delete the
# datapoints from the cache
c.execute(
"DELETE FROM cache WHERE stream=? AND timestamp <?",
(stream, datapointArray[0]["t"]))
s.insert_array(datapointArray)
# If there was no error inserting, delete the
# datapoints from the cache
c.execute(
"DELETE FROM cache WHERE stream=? AND timestamp <=?",
(stream, datapointArray[-1]["t"]))
self.lastsynctime = time.time()
if self.onsync is not None:
self.onsync()
except Exception as e:
# Handle the sync failure callback
falied = True
reraise = self.syncraise
if self.onsyncfail is not None:
reraise = self.onsyncfail(e)
if reraise:
raise
|
def sync(self):
"""Attempt to sync with the ConnectorDB server"""
logging.debug("Logger: Syncing...")
failed = False
try:
# Get the connectordb object
cdb = self.connectordb
# Ping the database - most connection errors will happen here
cdb.ping()
with self.synclock:
c = self.database.cursor()
for stream in self.streams:
s = cdb[stream]
c.execute(
"SELECT * FROM cache WHERE stream=? ORDER BY timestamp ASC;",
(stream, ))
datapointArray = []
for dp in c.fetchall():
datapointArray.append(
{"t": dp[1],
"d": json.loads(dp[2])})
# First, check if the data already inserted has newer timestamps,
# and in that case, assume that there was an error, and remove the datapoints
# with an older timestamp, so that we don't have an error when syncing
if len(s) > 0:
newtime = s[-1]["t"]
while (len(datapointArray) > 0 and datapointArray[0]["t"] < newtime):
logging.debug("Datapoint exists with older timestamp. Removing the datapoint.")
datapointArray = datapointArray[1:]
if len(datapointArray) > 0:
logging.debug("%s: syncing %i datapoints" %
(stream, len(datapointArray)))
while (len(datapointArray) > DATAPOINT_INSERT_LIMIT):
# We insert datapoints in chunks of a couple
# thousand so that they fit in the insert size
# limit of ConnectorDB
s.insert_array(
datapointArray[:DATAPOINT_INSERT_LIMIT])
# Clear the written datapoints
datapointArray = datapointArray[
DATAPOINT_INSERT_LIMIT:]
# If there was no error inserting, delete the
# datapoints from the cache
c.execute(
"DELETE FROM cache WHERE stream=? AND timestamp <?",
(stream, datapointArray[0]["t"]))
s.insert_array(datapointArray)
# If there was no error inserting, delete the
# datapoints from the cache
c.execute(
"DELETE FROM cache WHERE stream=? AND timestamp <=?",
(stream, datapointArray[-1]["t"]))
self.lastsynctime = time.time()
if self.onsync is not None:
self.onsync()
except Exception as e:
# Handle the sync failure callback
falied = True
reraise = self.syncraise
if self.onsyncfail is not None:
reraise = self.onsyncfail(e)
if reraise:
raise
|
[
"Attempt",
"to",
"sync",
"with",
"the",
"ConnectorDB",
"server"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/logger.py#L180-L253
|
[
"def",
"sync",
"(",
"self",
")",
":",
"logging",
".",
"debug",
"(",
"\"Logger: Syncing...\"",
")",
"failed",
"=",
"False",
"try",
":",
"# Get the connectordb object",
"cdb",
"=",
"self",
".",
"connectordb",
"# Ping the database - most connection errors will happen here",
"cdb",
".",
"ping",
"(",
")",
"with",
"self",
".",
"synclock",
":",
"c",
"=",
"self",
".",
"database",
".",
"cursor",
"(",
")",
"for",
"stream",
"in",
"self",
".",
"streams",
":",
"s",
"=",
"cdb",
"[",
"stream",
"]",
"c",
".",
"execute",
"(",
"\"SELECT * FROM cache WHERE stream=? ORDER BY timestamp ASC;\"",
",",
"(",
"stream",
",",
")",
")",
"datapointArray",
"=",
"[",
"]",
"for",
"dp",
"in",
"c",
".",
"fetchall",
"(",
")",
":",
"datapointArray",
".",
"append",
"(",
"{",
"\"t\"",
":",
"dp",
"[",
"1",
"]",
",",
"\"d\"",
":",
"json",
".",
"loads",
"(",
"dp",
"[",
"2",
"]",
")",
"}",
")",
"# First, check if the data already inserted has newer timestamps,",
"# and in that case, assume that there was an error, and remove the datapoints",
"# with an older timestamp, so that we don't have an error when syncing",
"if",
"len",
"(",
"s",
")",
">",
"0",
":",
"newtime",
"=",
"s",
"[",
"-",
"1",
"]",
"[",
"\"t\"",
"]",
"while",
"(",
"len",
"(",
"datapointArray",
")",
">",
"0",
"and",
"datapointArray",
"[",
"0",
"]",
"[",
"\"t\"",
"]",
"<",
"newtime",
")",
":",
"logging",
".",
"debug",
"(",
"\"Datapoint exists with older timestamp. Removing the datapoint.\"",
")",
"datapointArray",
"=",
"datapointArray",
"[",
"1",
":",
"]",
"if",
"len",
"(",
"datapointArray",
")",
">",
"0",
":",
"logging",
".",
"debug",
"(",
"\"%s: syncing %i datapoints\"",
"%",
"(",
"stream",
",",
"len",
"(",
"datapointArray",
")",
")",
")",
"while",
"(",
"len",
"(",
"datapointArray",
")",
">",
"DATAPOINT_INSERT_LIMIT",
")",
":",
"# We insert datapoints in chunks of a couple",
"# thousand so that they fit in the insert size",
"# limit of ConnectorDB",
"s",
".",
"insert_array",
"(",
"datapointArray",
"[",
":",
"DATAPOINT_INSERT_LIMIT",
"]",
")",
"# Clear the written datapoints",
"datapointArray",
"=",
"datapointArray",
"[",
"DATAPOINT_INSERT_LIMIT",
":",
"]",
"# If there was no error inserting, delete the",
"# datapoints from the cache",
"c",
".",
"execute",
"(",
"\"DELETE FROM cache WHERE stream=? AND timestamp <?\"",
",",
"(",
"stream",
",",
"datapointArray",
"[",
"0",
"]",
"[",
"\"t\"",
"]",
")",
")",
"s",
".",
"insert_array",
"(",
"datapointArray",
")",
"# If there was no error inserting, delete the",
"# datapoints from the cache",
"c",
".",
"execute",
"(",
"\"DELETE FROM cache WHERE stream=? AND timestamp <=?\"",
",",
"(",
"stream",
",",
"datapointArray",
"[",
"-",
"1",
"]",
"[",
"\"t\"",
"]",
")",
")",
"self",
".",
"lastsynctime",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"onsync",
"is",
"not",
"None",
":",
"self",
".",
"onsync",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"# Handle the sync failure callback",
"falied",
"=",
"True",
"reraise",
"=",
"self",
".",
"syncraise",
"if",
"self",
".",
"onsyncfail",
"is",
"not",
"None",
":",
"reraise",
"=",
"self",
".",
"onsyncfail",
"(",
"e",
")",
"if",
"reraise",
":",
"raise"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Logger.start
|
Start the logger background synchronization service. This allows you to not need to
worry about syncing with ConnectorDB - you just insert into the Logger, and the Logger
will by synced every syncperiod.
|
connectordb/logger.py
|
def start(self):
"""Start the logger background synchronization service. This allows you to not need to
worry about syncing with ConnectorDB - you just insert into the Logger, and the Logger
will by synced every syncperiod."""
with self.synclock:
if self.syncthread is not None:
logging.warn(
"Logger: Start called on a syncer that is already running")
return
self.sync() # Attempt a sync right away
self.__setsync()
|
def start(self):
"""Start the logger background synchronization service. This allows you to not need to
worry about syncing with ConnectorDB - you just insert into the Logger, and the Logger
will by synced every syncperiod."""
with self.synclock:
if self.syncthread is not None:
logging.warn(
"Logger: Start called on a syncer that is already running")
return
self.sync() # Attempt a sync right away
self.__setsync()
|
[
"Start",
"the",
"logger",
"background",
"synchronization",
"service",
".",
"This",
"allows",
"you",
"to",
"not",
"need",
"to",
"worry",
"about",
"syncing",
"with",
"ConnectorDB",
"-",
"you",
"just",
"insert",
"into",
"the",
"Logger",
"and",
"the",
"Logger",
"will",
"by",
"synced",
"every",
"syncperiod",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/logger.py#L272-L284
|
[
"def",
"start",
"(",
"self",
")",
":",
"with",
"self",
".",
"synclock",
":",
"if",
"self",
".",
"syncthread",
"is",
"not",
"None",
":",
"logging",
".",
"warn",
"(",
"\"Logger: Start called on a syncer that is already running\"",
")",
"return",
"self",
".",
"sync",
"(",
")",
"# Attempt a sync right away",
"self",
".",
"__setsync",
"(",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Logger.stop
|
Stops the background synchronization thread
|
connectordb/logger.py
|
def stop(self):
"""Stops the background synchronization thread"""
with self.synclock:
if self.syncthread is not None:
self.syncthread.cancel()
self.syncthread = None
|
def stop(self):
"""Stops the background synchronization thread"""
with self.synclock:
if self.syncthread is not None:
self.syncthread.cancel()
self.syncthread = None
|
[
"Stops",
"the",
"background",
"synchronization",
"thread"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/logger.py#L286-L291
|
[
"def",
"stop",
"(",
"self",
")",
":",
"with",
"self",
".",
"synclock",
":",
"if",
"self",
".",
"syncthread",
"is",
"not",
"None",
":",
"self",
".",
"syncthread",
".",
"cancel",
"(",
")",
"self",
".",
"syncthread",
"=",
"None"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Logger.data
|
The data property allows the user to save settings/data in the database, so that
there does not need to be extra code messing around with settings.
Use this property to save things that can be converted to JSON inside the logger database,
so that you don't have to mess with configuration files or saving setting otherwise::
from connectordb.logger import Logger
l = Logger("log.db")
l.data = {"hi": 56}
# prints the data dictionary
print l.data
|
connectordb/logger.py
|
def data(self):
"""The data property allows the user to save settings/data in the database, so that
there does not need to be extra code messing around with settings.
Use this property to save things that can be converted to JSON inside the logger database,
so that you don't have to mess with configuration files or saving setting otherwise::
from connectordb.logger import Logger
l = Logger("log.db")
l.data = {"hi": 56}
# prints the data dictionary
print l.data
"""
c = self.database.cursor()
c.execute("SELECT userdatajson FROM metadata;")
return json.loads(next(c)[0])
|
def data(self):
"""The data property allows the user to save settings/data in the database, so that
there does not need to be extra code messing around with settings.
Use this property to save things that can be converted to JSON inside the logger database,
so that you don't have to mess with configuration files or saving setting otherwise::
from connectordb.logger import Logger
l = Logger("log.db")
l.data = {"hi": 56}
# prints the data dictionary
print l.data
"""
c = self.database.cursor()
c.execute("SELECT userdatajson FROM metadata;")
return json.loads(next(c)[0])
|
[
"The",
"data",
"property",
"allows",
"the",
"user",
"to",
"save",
"settings",
"/",
"data",
"in",
"the",
"database",
"so",
"that",
"there",
"does",
"not",
"need",
"to",
"be",
"extra",
"code",
"messing",
"around",
"with",
"settings",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/logger.py#L361-L379
|
[
"def",
"data",
"(",
"self",
")",
":",
"c",
"=",
"self",
".",
"database",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"SELECT userdatajson FROM metadata;\"",
")",
"return",
"json",
".",
"loads",
"(",
"next",
"(",
"c",
")",
"[",
"0",
"]",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
read
|
Build a file path from *paths* and return the contents.
|
setup.py
|
def read(*paths):
"""Build a file path from *paths* and return the contents."""
filename = os.path.join(*paths)
with codecs.open(filename, mode='r', encoding='utf-8') as handle:
return handle.read()
|
def read(*paths):
"""Build a file path from *paths* and return the contents."""
filename = os.path.join(*paths)
with codecs.open(filename, mode='r', encoding='utf-8') as handle:
return handle.read()
|
[
"Build",
"a",
"file",
"path",
"from",
"*",
"paths",
"*",
"and",
"return",
"the",
"contents",
"."
] |
omaciel/pytest-fauxfactory
|
python
|
https://github.com/omaciel/pytest-fauxfactory/blob/4365f521e7d8a6db00bdc9a02743467aa5bd1d72/setup.py#L9-L13
|
[
"def",
"read",
"(",
"*",
"paths",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"*",
"paths",
")",
"with",
"codecs",
".",
"open",
"(",
"filename",
",",
"mode",
"=",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"handle",
":",
"return",
"handle",
".",
"read",
"(",
")"
] |
4365f521e7d8a6db00bdc9a02743467aa5bd1d72
|
test
|
download_url
|
Downloads URL, can pass in file://, http://, s3://, or ftp://, gnos://cghub/analysisID, or gnos:///analysisID
If downloading S3 URLs, the S3AM binary must be on the PATH
:param toil.job.Job job: Toil job that is calling this function
:param str url: URL to download from
:param str work_dir: Directory to download file to
:param str name: Name of output file, if None, basename of URL is used
:param str s3_key_path: Path to 32-byte encryption key if url points to S3 file that uses SSE-C
:param str cghub_key_path: Path to cghub key used to download from CGHub.
:return: Path to the downloaded file
:rtype: str
|
src/toil_lib/urls.py
|
def download_url(job, url, work_dir='.', name=None, s3_key_path=None, cghub_key_path=None):
"""
Downloads URL, can pass in file://, http://, s3://, or ftp://, gnos://cghub/analysisID, or gnos:///analysisID
If downloading S3 URLs, the S3AM binary must be on the PATH
:param toil.job.Job job: Toil job that is calling this function
:param str url: URL to download from
:param str work_dir: Directory to download file to
:param str name: Name of output file, if None, basename of URL is used
:param str s3_key_path: Path to 32-byte encryption key if url points to S3 file that uses SSE-C
:param str cghub_key_path: Path to cghub key used to download from CGHub.
:return: Path to the downloaded file
:rtype: str
"""
file_path = os.path.join(work_dir, name) if name else os.path.join(work_dir, os.path.basename(url))
if cghub_key_path:
_download_with_genetorrent(job, url, file_path, cghub_key_path)
elif urlparse(url).scheme == 's3':
_s3am_with_retry(job, num_cores=1, file_path=file_path, s3_url=url, mode='download', s3_key_path=s3_key_path)
elif urlparse(url).scheme == 'file':
shutil.copy(urlparse(url).path, file_path)
else:
subprocess.check_call(['curl', '-fs', '--retry', '5', '--create-dir', url, '-o', file_path])
assert os.path.exists(file_path)
return file_path
|
def download_url(job, url, work_dir='.', name=None, s3_key_path=None, cghub_key_path=None):
"""
Downloads URL, can pass in file://, http://, s3://, or ftp://, gnos://cghub/analysisID, or gnos:///analysisID
If downloading S3 URLs, the S3AM binary must be on the PATH
:param toil.job.Job job: Toil job that is calling this function
:param str url: URL to download from
:param str work_dir: Directory to download file to
:param str name: Name of output file, if None, basename of URL is used
:param str s3_key_path: Path to 32-byte encryption key if url points to S3 file that uses SSE-C
:param str cghub_key_path: Path to cghub key used to download from CGHub.
:return: Path to the downloaded file
:rtype: str
"""
file_path = os.path.join(work_dir, name) if name else os.path.join(work_dir, os.path.basename(url))
if cghub_key_path:
_download_with_genetorrent(job, url, file_path, cghub_key_path)
elif urlparse(url).scheme == 's3':
_s3am_with_retry(job, num_cores=1, file_path=file_path, s3_url=url, mode='download', s3_key_path=s3_key_path)
elif urlparse(url).scheme == 'file':
shutil.copy(urlparse(url).path, file_path)
else:
subprocess.check_call(['curl', '-fs', '--retry', '5', '--create-dir', url, '-o', file_path])
assert os.path.exists(file_path)
return file_path
|
[
"Downloads",
"URL",
"can",
"pass",
"in",
"file",
":",
"//",
"http",
":",
"//",
"s3",
":",
"//",
"or",
"ftp",
":",
"//",
"gnos",
":",
"//",
"cghub",
"/",
"analysisID",
"or",
"gnos",
":",
"///",
"analysisID",
"If",
"downloading",
"S3",
"URLs",
"the",
"S3AM",
"binary",
"must",
"be",
"on",
"the",
"PATH"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/urls.py#L15-L39
|
[
"def",
"download_url",
"(",
"job",
",",
"url",
",",
"work_dir",
"=",
"'.'",
",",
"name",
"=",
"None",
",",
"s3_key_path",
"=",
"None",
",",
"cghub_key_path",
"=",
"None",
")",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"name",
")",
"if",
"name",
"else",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"os",
".",
"path",
".",
"basename",
"(",
"url",
")",
")",
"if",
"cghub_key_path",
":",
"_download_with_genetorrent",
"(",
"job",
",",
"url",
",",
"file_path",
",",
"cghub_key_path",
")",
"elif",
"urlparse",
"(",
"url",
")",
".",
"scheme",
"==",
"'s3'",
":",
"_s3am_with_retry",
"(",
"job",
",",
"num_cores",
"=",
"1",
",",
"file_path",
"=",
"file_path",
",",
"s3_url",
"=",
"url",
",",
"mode",
"=",
"'download'",
",",
"s3_key_path",
"=",
"s3_key_path",
")",
"elif",
"urlparse",
"(",
"url",
")",
".",
"scheme",
"==",
"'file'",
":",
"shutil",
".",
"copy",
"(",
"urlparse",
"(",
"url",
")",
".",
"path",
",",
"file_path",
")",
"else",
":",
"subprocess",
".",
"check_call",
"(",
"[",
"'curl'",
",",
"'-fs'",
",",
"'--retry'",
",",
"'5'",
",",
"'--create-dir'",
",",
"url",
",",
"'-o'",
",",
"file_path",
"]",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
"return",
"file_path"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
download_url_job
|
Job version of `download_url`
|
src/toil_lib/urls.py
|
def download_url_job(job, url, name=None, s3_key_path=None, cghub_key_path=None):
"""Job version of `download_url`"""
work_dir = job.fileStore.getLocalTempDir()
fpath = download_url(job=job, url=url, work_dir=work_dir, name=name,
s3_key_path=s3_key_path, cghub_key_path=cghub_key_path)
return job.fileStore.writeGlobalFile(fpath)
|
def download_url_job(job, url, name=None, s3_key_path=None, cghub_key_path=None):
"""Job version of `download_url`"""
work_dir = job.fileStore.getLocalTempDir()
fpath = download_url(job=job, url=url, work_dir=work_dir, name=name,
s3_key_path=s3_key_path, cghub_key_path=cghub_key_path)
return job.fileStore.writeGlobalFile(fpath)
|
[
"Job",
"version",
"of",
"download_url"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/urls.py#L42-L47
|
[
"def",
"download_url_job",
"(",
"job",
",",
"url",
",",
"name",
"=",
"None",
",",
"s3_key_path",
"=",
"None",
",",
"cghub_key_path",
"=",
"None",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"fpath",
"=",
"download_url",
"(",
"job",
"=",
"job",
",",
"url",
"=",
"url",
",",
"work_dir",
"=",
"work_dir",
",",
"name",
"=",
"name",
",",
"s3_key_path",
"=",
"s3_key_path",
",",
"cghub_key_path",
"=",
"cghub_key_path",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"fpath",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
s3am_upload
|
Uploads a file to s3 via S3AM
S3AM binary must be on the PATH to use this function
For SSE-C encryption: provide a path to a 32-byte file
:param toil.job.Job job: Toil job that is calling this function
:param str fpath: Path to file to upload
:param str s3_dir: Ouptut S3 path. Format: s3://bucket/[directory]
:param int num_cores: Number of cores to use for up/download with S3AM
:param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption
|
src/toil_lib/urls.py
|
def s3am_upload(job, fpath, s3_dir, num_cores=1, s3_key_path=None):
"""
Uploads a file to s3 via S3AM
S3AM binary must be on the PATH to use this function
For SSE-C encryption: provide a path to a 32-byte file
:param toil.job.Job job: Toil job that is calling this function
:param str fpath: Path to file to upload
:param str s3_dir: Ouptut S3 path. Format: s3://bucket/[directory]
:param int num_cores: Number of cores to use for up/download with S3AM
:param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption
"""
require(s3_dir.startswith('s3://'), 'Format of s3_dir (s3://) is incorrect: %s', s3_dir)
s3_dir = os.path.join(s3_dir, os.path.basename(fpath))
_s3am_with_retry(job=job, num_cores=num_cores, file_path=fpath,
s3_url=s3_dir, mode='upload', s3_key_path=s3_key_path)
|
def s3am_upload(job, fpath, s3_dir, num_cores=1, s3_key_path=None):
"""
Uploads a file to s3 via S3AM
S3AM binary must be on the PATH to use this function
For SSE-C encryption: provide a path to a 32-byte file
:param toil.job.Job job: Toil job that is calling this function
:param str fpath: Path to file to upload
:param str s3_dir: Ouptut S3 path. Format: s3://bucket/[directory]
:param int num_cores: Number of cores to use for up/download with S3AM
:param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption
"""
require(s3_dir.startswith('s3://'), 'Format of s3_dir (s3://) is incorrect: %s', s3_dir)
s3_dir = os.path.join(s3_dir, os.path.basename(fpath))
_s3am_with_retry(job=job, num_cores=num_cores, file_path=fpath,
s3_url=s3_dir, mode='upload', s3_key_path=s3_key_path)
|
[
"Uploads",
"a",
"file",
"to",
"s3",
"via",
"S3AM",
"S3AM",
"binary",
"must",
"be",
"on",
"the",
"PATH",
"to",
"use",
"this",
"function",
"For",
"SSE",
"-",
"C",
"encryption",
":",
"provide",
"a",
"path",
"to",
"a",
"32",
"-",
"byte",
"file"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/urls.py#L63-L78
|
[
"def",
"s3am_upload",
"(",
"job",
",",
"fpath",
",",
"s3_dir",
",",
"num_cores",
"=",
"1",
",",
"s3_key_path",
"=",
"None",
")",
":",
"require",
"(",
"s3_dir",
".",
"startswith",
"(",
"'s3://'",
")",
",",
"'Format of s3_dir (s3://) is incorrect: %s'",
",",
"s3_dir",
")",
"s3_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"s3_dir",
",",
"os",
".",
"path",
".",
"basename",
"(",
"fpath",
")",
")",
"_s3am_with_retry",
"(",
"job",
"=",
"job",
",",
"num_cores",
"=",
"num_cores",
",",
"file_path",
"=",
"fpath",
",",
"s3_url",
"=",
"s3_dir",
",",
"mode",
"=",
"'upload'",
",",
"s3_key_path",
"=",
"s3_key_path",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
s3am_upload_job
|
Job version of s3am_upload
|
src/toil_lib/urls.py
|
def s3am_upload_job(job, file_id, file_name, s3_dir, s3_key_path=None):
"""Job version of s3am_upload"""
work_dir = job.fileStore.getLocalTempDir()
fpath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, file_name))
s3am_upload(job=job, fpath=fpath, s3_dir=s3_dir, num_cores=job.cores, s3_key_path=s3_key_path)
|
def s3am_upload_job(job, file_id, file_name, s3_dir, s3_key_path=None):
"""Job version of s3am_upload"""
work_dir = job.fileStore.getLocalTempDir()
fpath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, file_name))
s3am_upload(job=job, fpath=fpath, s3_dir=s3_dir, num_cores=job.cores, s3_key_path=s3_key_path)
|
[
"Job",
"version",
"of",
"s3am_upload"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/urls.py#L81-L85
|
[
"def",
"s3am_upload_job",
"(",
"job",
",",
"file_id",
",",
"file_name",
",",
"s3_dir",
",",
"s3_key_path",
"=",
"None",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"fpath",
"=",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"file_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"file_name",
")",
")",
"s3am_upload",
"(",
"job",
"=",
"job",
",",
"fpath",
"=",
"fpath",
",",
"s3_dir",
"=",
"s3_dir",
",",
"num_cores",
"=",
"job",
".",
"cores",
",",
"s3_key_path",
"=",
"s3_key_path",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
_s3am_with_retry
|
Run s3am with 3 retries
:param toil.job.Job job: Toil job that is calling this function
:param int num_cores: Number of cores to pass to upload/download slots
:param str file_path: Full path to the file
:param str s3_url: S3 URL
:param str mode: Mode to run s3am in. Either "upload" or "download"
:param str s3_key_path: Path to the SSE-C key if using encryption
|
src/toil_lib/urls.py
|
def _s3am_with_retry(job, num_cores, file_path, s3_url, mode='upload', s3_key_path=None):
"""
Run s3am with 3 retries
:param toil.job.Job job: Toil job that is calling this function
:param int num_cores: Number of cores to pass to upload/download slots
:param str file_path: Full path to the file
:param str s3_url: S3 URL
:param str mode: Mode to run s3am in. Either "upload" or "download"
:param str s3_key_path: Path to the SSE-C key if using encryption
"""
container_key_file = None
# try to find suitable credentials
base_boto = '.boto'
base_aws = '.aws/credentials'
docker_home_dir = '/root'
# map existing credential paths to their mount point within the container
credentials_to_mount = {os.path.join(os.path.expanduser("~"), path): os.path.join(docker_home_dir, path)
for path in [base_aws, base_boto]
if os.path.exists(os.path.join(os.path.expanduser("~"), path))}
require(os.path.isabs(file_path), "'file_path' parameter must be an absolute path")
dir_path, file_name = file_path.rsplit('/', 1)
# Mirror user specified paths to simplify debugging
container_dir_path = '/data' + dir_path
container_file = os.path.join(container_dir_path, file_name)
mounts = {dir_path: container_dir_path}
if s3_key_path:
require(os.path.isabs(s3_key_path), "'s3_key_path' parameter must be an absolute path")
key_dir_path, key_name = s3_key_path.rsplit('/', 1)
container_key_dir_path = '/data' + key_dir_path
container_key_file = os.path.join(container_key_dir_path, key_name)
# if the key directory is identical to the file directory this assignment is idempotent
mounts[key_dir_path] = container_key_dir_path
for k, v in credentials_to_mount.iteritems():
mounts[k] = v
arguments = []
url_arguments = []
if mode == 'upload':
arguments.extend(['upload', '--force', '--upload-slots=%s' % num_cores, '--exists=overwrite'])
url_arguments.extend(['file://' + container_file, s3_url])
elif mode == 'download':
arguments.extend(['download', '--file-exists=overwrite', '--download-exists=discard'])
url_arguments.extend([s3_url, 'file://' + container_file])
else:
raise ValueError('Improper mode specified. mode must be equal to "upload" or "download".')
if s3_key_path:
arguments.extend(['--sse-key-is-master', '--sse-key-file', container_key_file])
arguments.extend(['--part-size=50M', '--download-slots=%s' % num_cores])
# finally, add the url path arguments after all the tool parameters are set
arguments.extend(url_arguments)
# Pass credential-related environment variables into container
env = {}
if 'AWS_PROFILE' in os.environ:
env['AWS_PROFILE'] = os.environ['AWS_PROFILE']
# Create parameters to pass to Docker
docker_parameters = ['--rm', '--log-driver', 'none']
if mounts:
for k, v in mounts.iteritems():
docker_parameters.extend(['-v', k + ':' + v])
if env:
for e, v in env.iteritems():
docker_parameters.extend(['-e', '{}={}'.format(e, v)])
# Run s3am with retries
retry_count = 3
for i in xrange(retry_count):
try:
dockerCall(job=job, tool='quay.io/ucsc_cgl/s3am:2.0--fed932897e7fd40f4ec878362e5dd6afe15caaf0',
parameters=arguments, dockerParameters=docker_parameters)
except subprocess.CalledProcessError:
_log.debug('S3AM %s failed', mode, exc_info=True)
else:
_log.debug('S3AM %s succeeded', mode)
return
raise RuntimeError("S3AM failed to %s after %i retries with arguments %s. Enable 'debug' "
"level logging to see more information about the failed attempts." %
(mode, retry_count, arguments))
|
def _s3am_with_retry(job, num_cores, file_path, s3_url, mode='upload', s3_key_path=None):
"""
Run s3am with 3 retries
:param toil.job.Job job: Toil job that is calling this function
:param int num_cores: Number of cores to pass to upload/download slots
:param str file_path: Full path to the file
:param str s3_url: S3 URL
:param str mode: Mode to run s3am in. Either "upload" or "download"
:param str s3_key_path: Path to the SSE-C key if using encryption
"""
container_key_file = None
# try to find suitable credentials
base_boto = '.boto'
base_aws = '.aws/credentials'
docker_home_dir = '/root'
# map existing credential paths to their mount point within the container
credentials_to_mount = {os.path.join(os.path.expanduser("~"), path): os.path.join(docker_home_dir, path)
for path in [base_aws, base_boto]
if os.path.exists(os.path.join(os.path.expanduser("~"), path))}
require(os.path.isabs(file_path), "'file_path' parameter must be an absolute path")
dir_path, file_name = file_path.rsplit('/', 1)
# Mirror user specified paths to simplify debugging
container_dir_path = '/data' + dir_path
container_file = os.path.join(container_dir_path, file_name)
mounts = {dir_path: container_dir_path}
if s3_key_path:
require(os.path.isabs(s3_key_path), "'s3_key_path' parameter must be an absolute path")
key_dir_path, key_name = s3_key_path.rsplit('/', 1)
container_key_dir_path = '/data' + key_dir_path
container_key_file = os.path.join(container_key_dir_path, key_name)
# if the key directory is identical to the file directory this assignment is idempotent
mounts[key_dir_path] = container_key_dir_path
for k, v in credentials_to_mount.iteritems():
mounts[k] = v
arguments = []
url_arguments = []
if mode == 'upload':
arguments.extend(['upload', '--force', '--upload-slots=%s' % num_cores, '--exists=overwrite'])
url_arguments.extend(['file://' + container_file, s3_url])
elif mode == 'download':
arguments.extend(['download', '--file-exists=overwrite', '--download-exists=discard'])
url_arguments.extend([s3_url, 'file://' + container_file])
else:
raise ValueError('Improper mode specified. mode must be equal to "upload" or "download".')
if s3_key_path:
arguments.extend(['--sse-key-is-master', '--sse-key-file', container_key_file])
arguments.extend(['--part-size=50M', '--download-slots=%s' % num_cores])
# finally, add the url path arguments after all the tool parameters are set
arguments.extend(url_arguments)
# Pass credential-related environment variables into container
env = {}
if 'AWS_PROFILE' in os.environ:
env['AWS_PROFILE'] = os.environ['AWS_PROFILE']
# Create parameters to pass to Docker
docker_parameters = ['--rm', '--log-driver', 'none']
if mounts:
for k, v in mounts.iteritems():
docker_parameters.extend(['-v', k + ':' + v])
if env:
for e, v in env.iteritems():
docker_parameters.extend(['-e', '{}={}'.format(e, v)])
# Run s3am with retries
retry_count = 3
for i in xrange(retry_count):
try:
dockerCall(job=job, tool='quay.io/ucsc_cgl/s3am:2.0--fed932897e7fd40f4ec878362e5dd6afe15caaf0',
parameters=arguments, dockerParameters=docker_parameters)
except subprocess.CalledProcessError:
_log.debug('S3AM %s failed', mode, exc_info=True)
else:
_log.debug('S3AM %s succeeded', mode)
return
raise RuntimeError("S3AM failed to %s after %i retries with arguments %s. Enable 'debug' "
"level logging to see more information about the failed attempts." %
(mode, retry_count, arguments))
|
[
"Run",
"s3am",
"with",
"3",
"retries"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/urls.py#L88-L163
|
[
"def",
"_s3am_with_retry",
"(",
"job",
",",
"num_cores",
",",
"file_path",
",",
"s3_url",
",",
"mode",
"=",
"'upload'",
",",
"s3_key_path",
"=",
"None",
")",
":",
"container_key_file",
"=",
"None",
"# try to find suitable credentials",
"base_boto",
"=",
"'.boto'",
"base_aws",
"=",
"'.aws/credentials'",
"docker_home_dir",
"=",
"'/root'",
"# map existing credential paths to their mount point within the container",
"credentials_to_mount",
"=",
"{",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
",",
"path",
")",
":",
"os",
".",
"path",
".",
"join",
"(",
"docker_home_dir",
",",
"path",
")",
"for",
"path",
"in",
"[",
"base_aws",
",",
"base_boto",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
",",
"path",
")",
")",
"}",
"require",
"(",
"os",
".",
"path",
".",
"isabs",
"(",
"file_path",
")",
",",
"\"'file_path' parameter must be an absolute path\"",
")",
"dir_path",
",",
"file_name",
"=",
"file_path",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"# Mirror user specified paths to simplify debugging",
"container_dir_path",
"=",
"'/data'",
"+",
"dir_path",
"container_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"container_dir_path",
",",
"file_name",
")",
"mounts",
"=",
"{",
"dir_path",
":",
"container_dir_path",
"}",
"if",
"s3_key_path",
":",
"require",
"(",
"os",
".",
"path",
".",
"isabs",
"(",
"s3_key_path",
")",
",",
"\"'s3_key_path' parameter must be an absolute path\"",
")",
"key_dir_path",
",",
"key_name",
"=",
"s3_key_path",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"container_key_dir_path",
"=",
"'/data'",
"+",
"key_dir_path",
"container_key_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"container_key_dir_path",
",",
"key_name",
")",
"# if the key directory is identical to the file directory this assignment is idempotent",
"mounts",
"[",
"key_dir_path",
"]",
"=",
"container_key_dir_path",
"for",
"k",
",",
"v",
"in",
"credentials_to_mount",
".",
"iteritems",
"(",
")",
":",
"mounts",
"[",
"k",
"]",
"=",
"v",
"arguments",
"=",
"[",
"]",
"url_arguments",
"=",
"[",
"]",
"if",
"mode",
"==",
"'upload'",
":",
"arguments",
".",
"extend",
"(",
"[",
"'upload'",
",",
"'--force'",
",",
"'--upload-slots=%s'",
"%",
"num_cores",
",",
"'--exists=overwrite'",
"]",
")",
"url_arguments",
".",
"extend",
"(",
"[",
"'file://'",
"+",
"container_file",
",",
"s3_url",
"]",
")",
"elif",
"mode",
"==",
"'download'",
":",
"arguments",
".",
"extend",
"(",
"[",
"'download'",
",",
"'--file-exists=overwrite'",
",",
"'--download-exists=discard'",
"]",
")",
"url_arguments",
".",
"extend",
"(",
"[",
"s3_url",
",",
"'file://'",
"+",
"container_file",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Improper mode specified. mode must be equal to \"upload\" or \"download\".'",
")",
"if",
"s3_key_path",
":",
"arguments",
".",
"extend",
"(",
"[",
"'--sse-key-is-master'",
",",
"'--sse-key-file'",
",",
"container_key_file",
"]",
")",
"arguments",
".",
"extend",
"(",
"[",
"'--part-size=50M'",
",",
"'--download-slots=%s'",
"%",
"num_cores",
"]",
")",
"# finally, add the url path arguments after all the tool parameters are set",
"arguments",
".",
"extend",
"(",
"url_arguments",
")",
"# Pass credential-related environment variables into container",
"env",
"=",
"{",
"}",
"if",
"'AWS_PROFILE'",
"in",
"os",
".",
"environ",
":",
"env",
"[",
"'AWS_PROFILE'",
"]",
"=",
"os",
".",
"environ",
"[",
"'AWS_PROFILE'",
"]",
"# Create parameters to pass to Docker",
"docker_parameters",
"=",
"[",
"'--rm'",
",",
"'--log-driver'",
",",
"'none'",
"]",
"if",
"mounts",
":",
"for",
"k",
",",
"v",
"in",
"mounts",
".",
"iteritems",
"(",
")",
":",
"docker_parameters",
".",
"extend",
"(",
"[",
"'-v'",
",",
"k",
"+",
"':'",
"+",
"v",
"]",
")",
"if",
"env",
":",
"for",
"e",
",",
"v",
"in",
"env",
".",
"iteritems",
"(",
")",
":",
"docker_parameters",
".",
"extend",
"(",
"[",
"'-e'",
",",
"'{}={}'",
".",
"format",
"(",
"e",
",",
"v",
")",
"]",
")",
"# Run s3am with retries",
"retry_count",
"=",
"3",
"for",
"i",
"in",
"xrange",
"(",
"retry_count",
")",
":",
"try",
":",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/s3am:2.0--fed932897e7fd40f4ec878362e5dd6afe15caaf0'",
",",
"parameters",
"=",
"arguments",
",",
"dockerParameters",
"=",
"docker_parameters",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"_log",
".",
"debug",
"(",
"'S3AM %s failed'",
",",
"mode",
",",
"exc_info",
"=",
"True",
")",
"else",
":",
"_log",
".",
"debug",
"(",
"'S3AM %s succeeded'",
",",
"mode",
")",
"return",
"raise",
"RuntimeError",
"(",
"\"S3AM failed to %s after %i retries with arguments %s. Enable 'debug' \"",
"\"level logging to see more information about the failed attempts.\"",
"%",
"(",
"mode",
",",
"retry_count",
",",
"arguments",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
labels
|
Output the names to the given file
|
src/ols_client/cli.py
|
def labels(ontology, output, ols_base):
"""Output the names to the given file"""
for label in get_labels(ontology=ontology, ols_base=ols_base):
click.echo(label, file=output)
|
def labels(ontology, output, ols_base):
"""Output the names to the given file"""
for label in get_labels(ontology=ontology, ols_base=ols_base):
click.echo(label, file=output)
|
[
"Output",
"the",
"names",
"to",
"the",
"given",
"file"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/cli.py#L19-L22
|
[
"def",
"labels",
"(",
"ontology",
",",
"output",
",",
"ols_base",
")",
":",
"for",
"label",
"in",
"get_labels",
"(",
"ontology",
"=",
"ontology",
",",
"ols_base",
"=",
"ols_base",
")",
":",
"click",
".",
"echo",
"(",
"label",
",",
"file",
"=",
"output",
")"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
tree
|
Output the parent-child relations to the given file
|
src/ols_client/cli.py
|
def tree(ontology, output, ols_base):
"""Output the parent-child relations to the given file"""
for parent, child in get_hierarchy(ontology=ontology, ols_base=ols_base):
click.echo('{}\t{}'.format(parent, child), file=output)
|
def tree(ontology, output, ols_base):
"""Output the parent-child relations to the given file"""
for parent, child in get_hierarchy(ontology=ontology, ols_base=ols_base):
click.echo('{}\t{}'.format(parent, child), file=output)
|
[
"Output",
"the",
"parent",
"-",
"child",
"relations",
"to",
"the",
"given",
"file"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/cli.py#L29-L32
|
[
"def",
"tree",
"(",
"ontology",
",",
"output",
",",
"ols_base",
")",
":",
"for",
"parent",
",",
"child",
"in",
"get_hierarchy",
"(",
"ontology",
"=",
"ontology",
",",
"ols_base",
"=",
"ols_base",
")",
":",
"click",
".",
"echo",
"(",
"'{}\\t{}'",
".",
"format",
"(",
"parent",
",",
"child",
")",
",",
"file",
"=",
"output",
")"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
get_mean_insert_size
|
Function taken from MC3 Pipeline
|
src/toil_lib/tools/__init__.py
|
def get_mean_insert_size(work_dir, bam_name):
"""Function taken from MC3 Pipeline"""
cmd = "docker run --log-driver=none --rm -v {}:/data quay.io/ucsc_cgl/samtools " \
"view -f66 {}".format(work_dir, os.path.join(work_dir, bam_name))
process = subprocess.Popen(args=cmd, shell=True, stdout=subprocess.PIPE)
b_sum = 0.0
b_count = 0.0
while True:
line = process.stdout.readline()
if not line:
break
tmp = line.split("\t")
if abs(long(tmp[8])) < 10000:
b_sum += abs(long(tmp[8]))
b_count += 1
process.wait()
try:
mean = b_sum / b_count
except ZeroDivisionError:
mean = 150
print "Using insert size: %d" % mean
return int(mean)
|
def get_mean_insert_size(work_dir, bam_name):
"""Function taken from MC3 Pipeline"""
cmd = "docker run --log-driver=none --rm -v {}:/data quay.io/ucsc_cgl/samtools " \
"view -f66 {}".format(work_dir, os.path.join(work_dir, bam_name))
process = subprocess.Popen(args=cmd, shell=True, stdout=subprocess.PIPE)
b_sum = 0.0
b_count = 0.0
while True:
line = process.stdout.readline()
if not line:
break
tmp = line.split("\t")
if abs(long(tmp[8])) < 10000:
b_sum += abs(long(tmp[8]))
b_count += 1
process.wait()
try:
mean = b_sum / b_count
except ZeroDivisionError:
mean = 150
print "Using insert size: %d" % mean
return int(mean)
|
[
"Function",
"taken",
"from",
"MC3",
"Pipeline"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/__init__.py#L5-L26
|
[
"def",
"get_mean_insert_size",
"(",
"work_dir",
",",
"bam_name",
")",
":",
"cmd",
"=",
"\"docker run --log-driver=none --rm -v {}:/data quay.io/ucsc_cgl/samtools \"",
"\"view -f66 {}\"",
".",
"format",
"(",
"work_dir",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"bam_name",
")",
")",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
"=",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"b_sum",
"=",
"0.0",
"b_count",
"=",
"0.0",
"while",
"True",
":",
"line",
"=",
"process",
".",
"stdout",
".",
"readline",
"(",
")",
"if",
"not",
"line",
":",
"break",
"tmp",
"=",
"line",
".",
"split",
"(",
"\"\\t\"",
")",
"if",
"abs",
"(",
"long",
"(",
"tmp",
"[",
"8",
"]",
")",
")",
"<",
"10000",
":",
"b_sum",
"+=",
"abs",
"(",
"long",
"(",
"tmp",
"[",
"8",
"]",
")",
")",
"b_count",
"+=",
"1",
"process",
".",
"wait",
"(",
")",
"try",
":",
"mean",
"=",
"b_sum",
"/",
"b_count",
"except",
"ZeroDivisionError",
":",
"mean",
"=",
"150",
"print",
"\"Using insert size: %d\"",
"%",
"mean",
"return",
"int",
"(",
"mean",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
partitions
|
>>> list(partitions([], 10))
[]
>>> list(partitions([1,2,3,4,5], 1))
[[1], [2], [3], [4], [5]]
>>> list(partitions([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
>>> list(partitions([1,2,3,4,5], 5))
[[1, 2, 3, 4, 5]]
:param list l: List to be partitioned
:param int partition_size: Size of partitions
|
src/toil_lib/__init__.py
|
def partitions(l, partition_size):
"""
>>> list(partitions([], 10))
[]
>>> list(partitions([1,2,3,4,5], 1))
[[1], [2], [3], [4], [5]]
>>> list(partitions([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
>>> list(partitions([1,2,3,4,5], 5))
[[1, 2, 3, 4, 5]]
:param list l: List to be partitioned
:param int partition_size: Size of partitions
"""
for i in xrange(0, len(l), partition_size):
yield l[i:i + partition_size]
|
def partitions(l, partition_size):
"""
>>> list(partitions([], 10))
[]
>>> list(partitions([1,2,3,4,5], 1))
[[1], [2], [3], [4], [5]]
>>> list(partitions([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
>>> list(partitions([1,2,3,4,5], 5))
[[1, 2, 3, 4, 5]]
:param list l: List to be partitioned
:param int partition_size: Size of partitions
"""
for i in xrange(0, len(l), partition_size):
yield l[i:i + partition_size]
|
[
">>>",
"list",
"(",
"partitions",
"(",
"[]",
"10",
"))",
"[]",
">>>",
"list",
"(",
"partitions",
"(",
"[",
"1",
"2",
"3",
"4",
"5",
"]",
"1",
"))",
"[[",
"1",
"]",
"[",
"2",
"]",
"[",
"3",
"]",
"[",
"4",
"]",
"[",
"5",
"]]",
">>>",
"list",
"(",
"partitions",
"(",
"[",
"1",
"2",
"3",
"4",
"5",
"]",
"2",
"))",
"[[",
"1",
"2",
"]",
"[",
"3",
"4",
"]",
"[",
"5",
"]]",
">>>",
"list",
"(",
"partitions",
"(",
"[",
"1",
"2",
"3",
"4",
"5",
"]",
"5",
"))",
"[[",
"1",
"2",
"3",
"4",
"5",
"]]"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/__init__.py#L25-L40
|
[
"def",
"partitions",
"(",
"l",
",",
"partition_size",
")",
":",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"len",
"(",
"l",
")",
",",
"partition_size",
")",
":",
"yield",
"l",
"[",
"i",
":",
"i",
"+",
"partition_size",
"]"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
required_length
|
For use with argparse's action argument. Allows setting a range for nargs.
Example: nargs='+', action=required_length(2, 3)
:param int nmin: Minimum number of arguments
:param int nmax: Maximum number of arguments
:return: RequiredLength object
|
src/toil_lib/__init__.py
|
def required_length(nmin, nmax):
"""
For use with argparse's action argument. Allows setting a range for nargs.
Example: nargs='+', action=required_length(2, 3)
:param int nmin: Minimum number of arguments
:param int nmax: Maximum number of arguments
:return: RequiredLength object
"""
class RequiredLength(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if not nmin <= len(values) <= nmax:
msg = 'argument "{f}" requires between {nmin} and {nmax} arguments'.format(
f=self.dest, nmin=nmin, nmax=nmax)
raise argparse.ArgumentTypeError(msg)
setattr(args, self.dest, values)
return RequiredLength
|
def required_length(nmin, nmax):
"""
For use with argparse's action argument. Allows setting a range for nargs.
Example: nargs='+', action=required_length(2, 3)
:param int nmin: Minimum number of arguments
:param int nmax: Maximum number of arguments
:return: RequiredLength object
"""
class RequiredLength(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if not nmin <= len(values) <= nmax:
msg = 'argument "{f}" requires between {nmin} and {nmax} arguments'.format(
f=self.dest, nmin=nmin, nmax=nmax)
raise argparse.ArgumentTypeError(msg)
setattr(args, self.dest, values)
return RequiredLength
|
[
"For",
"use",
"with",
"argparse",
"s",
"action",
"argument",
".",
"Allows",
"setting",
"a",
"range",
"for",
"nargs",
".",
"Example",
":",
"nargs",
"=",
"+",
"action",
"=",
"required_length",
"(",
"2",
"3",
")"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/__init__.py#L54-L70
|
[
"def",
"required_length",
"(",
"nmin",
",",
"nmax",
")",
":",
"class",
"RequiredLength",
"(",
"argparse",
".",
"Action",
")",
":",
"def",
"__call__",
"(",
"self",
",",
"parser",
",",
"args",
",",
"values",
",",
"option_string",
"=",
"None",
")",
":",
"if",
"not",
"nmin",
"<=",
"len",
"(",
"values",
")",
"<=",
"nmax",
":",
"msg",
"=",
"'argument \"{f}\" requires between {nmin} and {nmax} arguments'",
".",
"format",
"(",
"f",
"=",
"self",
".",
"dest",
",",
"nmin",
"=",
"nmin",
",",
"nmax",
"=",
"nmax",
")",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"msg",
")",
"setattr",
"(",
"args",
",",
"self",
".",
"dest",
",",
"values",
")",
"return",
"RequiredLength"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
current_docker_container_id
|
Returns a string that represents the container ID of the current Docker container. If this
function is invoked outside of a container a NotInsideContainerError is raised.
>>> import subprocess
>>> import sys
>>> a = subprocess.check_output(['docker', 'run', '-v',
... sys.modules[__name__].__file__ + ':/foo.py',
... 'python:2.7.12','python', '-c',
... 'from foo import current_docker_container_id;\\
... print current_docker_container_id()'])
int call will fail if a is not a valid hex string
>>> int(a, 16) > 0
True
|
src/toil_lib/__init__.py
|
def current_docker_container_id():
"""
Returns a string that represents the container ID of the current Docker container. If this
function is invoked outside of a container a NotInsideContainerError is raised.
>>> import subprocess
>>> import sys
>>> a = subprocess.check_output(['docker', 'run', '-v',
... sys.modules[__name__].__file__ + ':/foo.py',
... 'python:2.7.12','python', '-c',
... 'from foo import current_docker_container_id;\\
... print current_docker_container_id()'])
int call will fail if a is not a valid hex string
>>> int(a, 16) > 0
True
"""
try:
with open('/proc/1/cgroup', 'r') as readable:
raw = readable.read()
ids = set(re.compile('[0-9a-f]{12,}').findall(raw))
assert len(ids) == 1
return ids.pop()
except:
logging.exception('Failed to obtain current container ID')
raise NotInsideContainerError()
|
def current_docker_container_id():
"""
Returns a string that represents the container ID of the current Docker container. If this
function is invoked outside of a container a NotInsideContainerError is raised.
>>> import subprocess
>>> import sys
>>> a = subprocess.check_output(['docker', 'run', '-v',
... sys.modules[__name__].__file__ + ':/foo.py',
... 'python:2.7.12','python', '-c',
... 'from foo import current_docker_container_id;\\
... print current_docker_container_id()'])
int call will fail if a is not a valid hex string
>>> int(a, 16) > 0
True
"""
try:
with open('/proc/1/cgroup', 'r') as readable:
raw = readable.read()
ids = set(re.compile('[0-9a-f]{12,}').findall(raw))
assert len(ids) == 1
return ids.pop()
except:
logging.exception('Failed to obtain current container ID')
raise NotInsideContainerError()
|
[
"Returns",
"a",
"string",
"that",
"represents",
"the",
"container",
"ID",
"of",
"the",
"current",
"Docker",
"container",
".",
"If",
"this",
"function",
"is",
"invoked",
"outside",
"of",
"a",
"container",
"a",
"NotInsideContainerError",
"is",
"raised",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/__init__.py#L98-L122
|
[
"def",
"current_docker_container_id",
"(",
")",
":",
"try",
":",
"with",
"open",
"(",
"'/proc/1/cgroup'",
",",
"'r'",
")",
"as",
"readable",
":",
"raw",
"=",
"readable",
".",
"read",
"(",
")",
"ids",
"=",
"set",
"(",
"re",
".",
"compile",
"(",
"'[0-9a-f]{12,}'",
")",
".",
"findall",
"(",
"raw",
")",
")",
"assert",
"len",
"(",
"ids",
")",
"==",
"1",
"return",
"ids",
".",
"pop",
"(",
")",
"except",
":",
"logging",
".",
"exception",
"(",
"'Failed to obtain current container ID'",
")",
"raise",
"NotInsideContainerError",
"(",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
run_star
|
Performs alignment of fastqs to bam via STAR
--limitBAMsortRAM step added to deal with memory explosion when sorting certain samples.
The value was chosen to complement the recommended amount of memory to have when running STAR (60G)
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq (pair 1)
:param str r2_id: FileStoreID of fastq (pair 2 if applicable, else pass None)
:param str star_index_url: STAR index tarball
:param bool wiggle: If True, will output a wiggle file and return it
:return: FileStoreID from RSEM
:rtype: str
|
src/toil_lib/tools/aligners.py
|
def run_star(job, r1_id, r2_id, star_index_url, wiggle=False, sort=True):
"""
Performs alignment of fastqs to bam via STAR
--limitBAMsortRAM step added to deal with memory explosion when sorting certain samples.
The value was chosen to complement the recommended amount of memory to have when running STAR (60G)
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq (pair 1)
:param str r2_id: FileStoreID of fastq (pair 2 if applicable, else pass None)
:param str star_index_url: STAR index tarball
:param bool wiggle: If True, will output a wiggle file and return it
:return: FileStoreID from RSEM
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
download_url(job, url=star_index_url, name='starIndex.tar.gz', work_dir=work_dir)
subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'starIndex.tar.gz'), '-C', work_dir])
os.remove(os.path.join(work_dir, 'starIndex.tar.gz'))
# Determine tarball structure - star index contains are either in a subdir or in the tarball itself
star_index = os.path.join('/data', os.listdir(work_dir)[0]) if len(os.listdir(work_dir)) == 1 else '/data'
# Parameter handling for paired / single-end data
parameters = ['--runThreadN', str(job.cores),
'--genomeDir', star_index,
'--outFileNamePrefix', 'rna',
'--outSAMunmapped', 'Within',
'--quantMode', 'TranscriptomeSAM',
'--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD',
'--outFilterType', 'BySJout',
'--outFilterMultimapNmax', '20',
'--outFilterMismatchNmax', '999',
'--outFilterMismatchNoverReadLmax', '0.04',
'--alignIntronMin', '20',
'--alignIntronMax', '1000000',
'--alignMatesGapMax', '1000000',
'--alignSJoverhangMin', '8',
'--alignSJDBoverhangMin', '1',
'--sjdbScore', '1',
'--limitBAMsortRAM', '49268954168']
# Modify paramaters based on function arguments
if sort:
parameters.extend(['--outSAMtype', 'BAM', 'SortedByCoordinate'])
aligned_bam = 'rnaAligned.sortedByCoord.out.bam'
else:
parameters.extend(['--outSAMtype', 'BAM', 'Unsorted'])
aligned_bam = 'rnaAligned.out.bam'
if wiggle:
parameters.extend(['--outWigType', 'bedGraph',
'--outWigStrand', 'Unstranded',
'--outWigReferencesPrefix', 'chr'])
if r1_id and r2_id:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['--readFilesIn', '/data/R1.fastq', '/data/R2.fastq'])
else:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters.extend(['--readFilesIn', '/data/R1.fastq'])
# Call: STAR Mapping
dockerCall(job=job, tool='quay.io/ucsc_cgl/star:2.4.2a--bcbd5122b69ff6ac4ef61958e47bde94001cfe80',
workDir=work_dir, parameters=parameters)
# Check output bam isnt size zero if sorted
aligned_bam_path = os.path.join(work_dir, aligned_bam)
if sort:
assert(os.stat(aligned_bam_path).st_size > 0, 'Aligned bam failed to sort. Ensure sufficient memory is free.')
# Write to fileStore
aligned_id = job.fileStore.writeGlobalFile(aligned_bam_path)
transcriptome_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.toTranscriptome.out.bam'))
log_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaLog.final.out'))
sj_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaSJ.out.tab'))
if wiggle:
wiggle_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaSignal.UniqueMultiple.str1.out.bg'))
return transcriptome_id, aligned_id, wiggle_id, log_id, sj_id
else:
return transcriptome_id, aligned_id, log_id, sj_id
|
def run_star(job, r1_id, r2_id, star_index_url, wiggle=False, sort=True):
"""
Performs alignment of fastqs to bam via STAR
--limitBAMsortRAM step added to deal with memory explosion when sorting certain samples.
The value was chosen to complement the recommended amount of memory to have when running STAR (60G)
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq (pair 1)
:param str r2_id: FileStoreID of fastq (pair 2 if applicable, else pass None)
:param str star_index_url: STAR index tarball
:param bool wiggle: If True, will output a wiggle file and return it
:return: FileStoreID from RSEM
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
download_url(job, url=star_index_url, name='starIndex.tar.gz', work_dir=work_dir)
subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'starIndex.tar.gz'), '-C', work_dir])
os.remove(os.path.join(work_dir, 'starIndex.tar.gz'))
# Determine tarball structure - star index contains are either in a subdir or in the tarball itself
star_index = os.path.join('/data', os.listdir(work_dir)[0]) if len(os.listdir(work_dir)) == 1 else '/data'
# Parameter handling for paired / single-end data
parameters = ['--runThreadN', str(job.cores),
'--genomeDir', star_index,
'--outFileNamePrefix', 'rna',
'--outSAMunmapped', 'Within',
'--quantMode', 'TranscriptomeSAM',
'--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD',
'--outFilterType', 'BySJout',
'--outFilterMultimapNmax', '20',
'--outFilterMismatchNmax', '999',
'--outFilterMismatchNoverReadLmax', '0.04',
'--alignIntronMin', '20',
'--alignIntronMax', '1000000',
'--alignMatesGapMax', '1000000',
'--alignSJoverhangMin', '8',
'--alignSJDBoverhangMin', '1',
'--sjdbScore', '1',
'--limitBAMsortRAM', '49268954168']
# Modify paramaters based on function arguments
if sort:
parameters.extend(['--outSAMtype', 'BAM', 'SortedByCoordinate'])
aligned_bam = 'rnaAligned.sortedByCoord.out.bam'
else:
parameters.extend(['--outSAMtype', 'BAM', 'Unsorted'])
aligned_bam = 'rnaAligned.out.bam'
if wiggle:
parameters.extend(['--outWigType', 'bedGraph',
'--outWigStrand', 'Unstranded',
'--outWigReferencesPrefix', 'chr'])
if r1_id and r2_id:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['--readFilesIn', '/data/R1.fastq', '/data/R2.fastq'])
else:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters.extend(['--readFilesIn', '/data/R1.fastq'])
# Call: STAR Mapping
dockerCall(job=job, tool='quay.io/ucsc_cgl/star:2.4.2a--bcbd5122b69ff6ac4ef61958e47bde94001cfe80',
workDir=work_dir, parameters=parameters)
# Check output bam isnt size zero if sorted
aligned_bam_path = os.path.join(work_dir, aligned_bam)
if sort:
assert(os.stat(aligned_bam_path).st_size > 0, 'Aligned bam failed to sort. Ensure sufficient memory is free.')
# Write to fileStore
aligned_id = job.fileStore.writeGlobalFile(aligned_bam_path)
transcriptome_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.toTranscriptome.out.bam'))
log_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaLog.final.out'))
sj_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaSJ.out.tab'))
if wiggle:
wiggle_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaSignal.UniqueMultiple.str1.out.bg'))
return transcriptome_id, aligned_id, wiggle_id, log_id, sj_id
else:
return transcriptome_id, aligned_id, log_id, sj_id
|
[
"Performs",
"alignment",
"of",
"fastqs",
"to",
"bam",
"via",
"STAR"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/aligners.py#L9-L82
|
[
"def",
"run_star",
"(",
"job",
",",
"r1_id",
",",
"r2_id",
",",
"star_index_url",
",",
"wiggle",
"=",
"False",
",",
"sort",
"=",
"True",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"download_url",
"(",
"job",
",",
"url",
"=",
"star_index_url",
",",
"name",
"=",
"'starIndex.tar.gz'",
",",
"work_dir",
"=",
"work_dir",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'tar'",
",",
"'-xvf'",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'starIndex.tar.gz'",
")",
",",
"'-C'",
",",
"work_dir",
"]",
")",
"os",
".",
"remove",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'starIndex.tar.gz'",
")",
")",
"# Determine tarball structure - star index contains are either in a subdir or in the tarball itself",
"star_index",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'/data'",
",",
"os",
".",
"listdir",
"(",
"work_dir",
")",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"os",
".",
"listdir",
"(",
"work_dir",
")",
")",
"==",
"1",
"else",
"'/data'",
"# Parameter handling for paired / single-end data",
"parameters",
"=",
"[",
"'--runThreadN'",
",",
"str",
"(",
"job",
".",
"cores",
")",
",",
"'--genomeDir'",
",",
"star_index",
",",
"'--outFileNamePrefix'",
",",
"'rna'",
",",
"'--outSAMunmapped'",
",",
"'Within'",
",",
"'--quantMode'",
",",
"'TranscriptomeSAM'",
",",
"'--outSAMattributes'",
",",
"'NH'",
",",
"'HI'",
",",
"'AS'",
",",
"'NM'",
",",
"'MD'",
",",
"'--outFilterType'",
",",
"'BySJout'",
",",
"'--outFilterMultimapNmax'",
",",
"'20'",
",",
"'--outFilterMismatchNmax'",
",",
"'999'",
",",
"'--outFilterMismatchNoverReadLmax'",
",",
"'0.04'",
",",
"'--alignIntronMin'",
",",
"'20'",
",",
"'--alignIntronMax'",
",",
"'1000000'",
",",
"'--alignMatesGapMax'",
",",
"'1000000'",
",",
"'--alignSJoverhangMin'",
",",
"'8'",
",",
"'--alignSJDBoverhangMin'",
",",
"'1'",
",",
"'--sjdbScore'",
",",
"'1'",
",",
"'--limitBAMsortRAM'",
",",
"'49268954168'",
"]",
"# Modify paramaters based on function arguments",
"if",
"sort",
":",
"parameters",
".",
"extend",
"(",
"[",
"'--outSAMtype'",
",",
"'BAM'",
",",
"'SortedByCoordinate'",
"]",
")",
"aligned_bam",
"=",
"'rnaAligned.sortedByCoord.out.bam'",
"else",
":",
"parameters",
".",
"extend",
"(",
"[",
"'--outSAMtype'",
",",
"'BAM'",
",",
"'Unsorted'",
"]",
")",
"aligned_bam",
"=",
"'rnaAligned.out.bam'",
"if",
"wiggle",
":",
"parameters",
".",
"extend",
"(",
"[",
"'--outWigType'",
",",
"'bedGraph'",
",",
"'--outWigStrand'",
",",
"'Unstranded'",
",",
"'--outWigReferencesPrefix'",
",",
"'chr'",
"]",
")",
"if",
"r1_id",
"and",
"r2_id",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"r1_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'R1.fastq'",
")",
")",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"r2_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'R2.fastq'",
")",
")",
"parameters",
".",
"extend",
"(",
"[",
"'--readFilesIn'",
",",
"'/data/R1.fastq'",
",",
"'/data/R2.fastq'",
"]",
")",
"else",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"r1_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'R1.fastq'",
")",
")",
"parameters",
".",
"extend",
"(",
"[",
"'--readFilesIn'",
",",
"'/data/R1.fastq'",
"]",
")",
"# Call: STAR Mapping",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/star:2.4.2a--bcbd5122b69ff6ac4ef61958e47bde94001cfe80'",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"parameters",
")",
"# Check output bam isnt size zero if sorted",
"aligned_bam_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"aligned_bam",
")",
"if",
"sort",
":",
"assert",
"(",
"os",
".",
"stat",
"(",
"aligned_bam_path",
")",
".",
"st_size",
">",
"0",
",",
"'Aligned bam failed to sort. Ensure sufficient memory is free.'",
")",
"# Write to fileStore",
"aligned_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"aligned_bam_path",
")",
"transcriptome_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'rnaAligned.toTranscriptome.out.bam'",
")",
")",
"log_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'rnaLog.final.out'",
")",
")",
"sj_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'rnaSJ.out.tab'",
")",
")",
"if",
"wiggle",
":",
"wiggle_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'rnaSignal.UniqueMultiple.str1.out.bg'",
")",
")",
"return",
"transcriptome_id",
",",
"aligned_id",
",",
"wiggle_id",
",",
"log_id",
",",
"sj_id",
"else",
":",
"return",
"transcriptome_id",
",",
"aligned_id",
",",
"log_id",
",",
"sj_id"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
run_bwakit
|
Runs BWA-Kit to align single or paired-end fastq files or realign SAM/BAM files.
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param Namespace config: A configuration object that holds strings as attributes.
The attributes must be accessible via the dot operator.
The config must have:
config.r1 FileStoreID for FASTQ file, or None if realigning SAM/BAM
config.r2 FileStoreID for paired FASTQ file, or None if single-ended
config.bam FileStoreID for BAM file to be realigned, or None if aligning fastq
config.sam FileStoreID for SAM file to be realigned, or None if aligning fastq
config.ref FileStoreID for the reference genome
config.fai FileStoreID for the reference index file
config.amb FileStoreID for the reference amb file
config.ann FileStoreID for the reference ann file
config.bwt FileStoreID for the reference bwt file
config.pac FileStoreID for the reference pac file
config.sa FileStoreID for the reference sa file
config.alt FileStoreID for the reference alt (or None)
config.rg_line The read group value to use (or None -- see below)
config.library Read group attribute: library
config.platform Read group attribute: platform
config.program_unit Read group attribute: program unit
config.uuid Read group attribute: sample ID
If specifying config.rg_line, use the following format:
BAM read group header line (@RG), as defined on page 3 of the SAM spec.
Tabs should be escaped, e.g., @RG\\tID:foo\\tLB:bar...
for the read group "foo" from sequencing library "bar".
Multiple @RG lines can be defined, but should be split by an escaped newline \\n,
e.g., @RG\\tID:foo\\t:LB:bar\\n@RG\\tID:santa\\tLB:cruz.
:param bool sort: If True, sorts the BAM
:param bool trim: If True, performs adapter trimming
:param bool mark_secondary: If True, mark shorter split reads as secondary
:return: FileStoreID of BAM
:rtype: str
|
src/toil_lib/tools/aligners.py
|
def run_bwakit(job, config, sort=True, trim=False, mark_secondary=False):
"""
Runs BWA-Kit to align single or paired-end fastq files or realign SAM/BAM files.
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param Namespace config: A configuration object that holds strings as attributes.
The attributes must be accessible via the dot operator.
The config must have:
config.r1 FileStoreID for FASTQ file, or None if realigning SAM/BAM
config.r2 FileStoreID for paired FASTQ file, or None if single-ended
config.bam FileStoreID for BAM file to be realigned, or None if aligning fastq
config.sam FileStoreID for SAM file to be realigned, or None if aligning fastq
config.ref FileStoreID for the reference genome
config.fai FileStoreID for the reference index file
config.amb FileStoreID for the reference amb file
config.ann FileStoreID for the reference ann file
config.bwt FileStoreID for the reference bwt file
config.pac FileStoreID for the reference pac file
config.sa FileStoreID for the reference sa file
config.alt FileStoreID for the reference alt (or None)
config.rg_line The read group value to use (or None -- see below)
config.library Read group attribute: library
config.platform Read group attribute: platform
config.program_unit Read group attribute: program unit
config.uuid Read group attribute: sample ID
If specifying config.rg_line, use the following format:
BAM read group header line (@RG), as defined on page 3 of the SAM spec.
Tabs should be escaped, e.g., @RG\\tID:foo\\tLB:bar...
for the read group "foo" from sequencing library "bar".
Multiple @RG lines can be defined, but should be split by an escaped newline \\n,
e.g., @RG\\tID:foo\\t:LB:bar\\n@RG\\tID:santa\\tLB:cruz.
:param bool sort: If True, sorts the BAM
:param bool trim: If True, performs adapter trimming
:param bool mark_secondary: If True, mark shorter split reads as secondary
:return: FileStoreID of BAM
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
rg = None
inputs = {'ref.fa': config.ref,
'ref.fa.fai': config.fai,
'ref.fa.amb': config.amb,
'ref.fa.ann': config.ann,
'ref.fa.bwt': config.bwt,
'ref.fa.pac': config.pac,
'ref.fa.sa': config.sa}
samples = []
realignment = False
# If a fastq pair was provided
if getattr(config, 'r1', None):
inputs['input.1.fq.gz'] = config.r1
samples.append('input.1.fq.gz')
if getattr(config, 'r2', None):
inputs['input.2.fq.gz'] = config.r2
samples.append('input.2.fq.gz')
if getattr(config, 'bam', None):
inputs['input.bam'] = config.bam
samples.append('input.bam')
realignment = True
if getattr(config, 'sam', None):
inputs['input.sam'] = config.sam
samples.append('input.sam')
realignment = True
# If an alt file was provided
if getattr(config, 'alt', None):
inputs['ref.fa.alt'] = config.alt
for name, fileStoreID in inputs.iteritems():
job.fileStore.readGlobalFile(fileStoreID, os.path.join(work_dir, name))
# If a read group line was provided
if getattr(config, 'rg_line', None):
rg = config.rg_line
# Otherwise, generate a read group line to place in the BAM.
elif all(getattr(config, elem, None) for elem in ['library', 'platform', 'program_unit', 'uuid']):
rg = "@RG\\tID:{0}".format(config.uuid) # '\' character is escaped so bwakit gets passed '\t' properly
rg_attributes = [config.library, config.platform, config.program_unit, config.uuid]
for tag, info in zip(['LB', 'PL', 'PU', 'SM'], rg_attributes):
rg += '\\t{0}:{1}'.format(tag, info)
# If realigning, then bwakit can use pre-existing read group data
elif realignment:
rg = None
# BWA Options
opt_args = []
if sort:
opt_args.append('-s')
if trim:
opt_args.append('-a')
if mark_secondary:
opt_args.append('-M')
# Call: bwakit
parameters = ['-t', str(job.cores)] + opt_args + ['-o', '/data/aligned', '/data/ref.fa']
if rg is not None:
parameters = ['-R', rg] + parameters
for sample in samples:
parameters.append('/data/{}'.format(sample))
dockerCall(job=job, tool='quay.io/ucsc_cgl/bwakit:0.7.12--c85ccff267d5021b75bb1c9ccf5f4b79f91835cc',
parameters=parameters, workDir=work_dir)
# Either write file to local output directory or upload to S3 cloud storage
job.fileStore.logToMaster('Aligned sample: {}'.format(config.uuid))
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'aligned.aln.bam'))
|
def run_bwakit(job, config, sort=True, trim=False, mark_secondary=False):
"""
Runs BWA-Kit to align single or paired-end fastq files or realign SAM/BAM files.
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param Namespace config: A configuration object that holds strings as attributes.
The attributes must be accessible via the dot operator.
The config must have:
config.r1 FileStoreID for FASTQ file, or None if realigning SAM/BAM
config.r2 FileStoreID for paired FASTQ file, or None if single-ended
config.bam FileStoreID for BAM file to be realigned, or None if aligning fastq
config.sam FileStoreID for SAM file to be realigned, or None if aligning fastq
config.ref FileStoreID for the reference genome
config.fai FileStoreID for the reference index file
config.amb FileStoreID for the reference amb file
config.ann FileStoreID for the reference ann file
config.bwt FileStoreID for the reference bwt file
config.pac FileStoreID for the reference pac file
config.sa FileStoreID for the reference sa file
config.alt FileStoreID for the reference alt (or None)
config.rg_line The read group value to use (or None -- see below)
config.library Read group attribute: library
config.platform Read group attribute: platform
config.program_unit Read group attribute: program unit
config.uuid Read group attribute: sample ID
If specifying config.rg_line, use the following format:
BAM read group header line (@RG), as defined on page 3 of the SAM spec.
Tabs should be escaped, e.g., @RG\\tID:foo\\tLB:bar...
for the read group "foo" from sequencing library "bar".
Multiple @RG lines can be defined, but should be split by an escaped newline \\n,
e.g., @RG\\tID:foo\\t:LB:bar\\n@RG\\tID:santa\\tLB:cruz.
:param bool sort: If True, sorts the BAM
:param bool trim: If True, performs adapter trimming
:param bool mark_secondary: If True, mark shorter split reads as secondary
:return: FileStoreID of BAM
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
rg = None
inputs = {'ref.fa': config.ref,
'ref.fa.fai': config.fai,
'ref.fa.amb': config.amb,
'ref.fa.ann': config.ann,
'ref.fa.bwt': config.bwt,
'ref.fa.pac': config.pac,
'ref.fa.sa': config.sa}
samples = []
realignment = False
# If a fastq pair was provided
if getattr(config, 'r1', None):
inputs['input.1.fq.gz'] = config.r1
samples.append('input.1.fq.gz')
if getattr(config, 'r2', None):
inputs['input.2.fq.gz'] = config.r2
samples.append('input.2.fq.gz')
if getattr(config, 'bam', None):
inputs['input.bam'] = config.bam
samples.append('input.bam')
realignment = True
if getattr(config, 'sam', None):
inputs['input.sam'] = config.sam
samples.append('input.sam')
realignment = True
# If an alt file was provided
if getattr(config, 'alt', None):
inputs['ref.fa.alt'] = config.alt
for name, fileStoreID in inputs.iteritems():
job.fileStore.readGlobalFile(fileStoreID, os.path.join(work_dir, name))
# If a read group line was provided
if getattr(config, 'rg_line', None):
rg = config.rg_line
# Otherwise, generate a read group line to place in the BAM.
elif all(getattr(config, elem, None) for elem in ['library', 'platform', 'program_unit', 'uuid']):
rg = "@RG\\tID:{0}".format(config.uuid) # '\' character is escaped so bwakit gets passed '\t' properly
rg_attributes = [config.library, config.platform, config.program_unit, config.uuid]
for tag, info in zip(['LB', 'PL', 'PU', 'SM'], rg_attributes):
rg += '\\t{0}:{1}'.format(tag, info)
# If realigning, then bwakit can use pre-existing read group data
elif realignment:
rg = None
# BWA Options
opt_args = []
if sort:
opt_args.append('-s')
if trim:
opt_args.append('-a')
if mark_secondary:
opt_args.append('-M')
# Call: bwakit
parameters = ['-t', str(job.cores)] + opt_args + ['-o', '/data/aligned', '/data/ref.fa']
if rg is not None:
parameters = ['-R', rg] + parameters
for sample in samples:
parameters.append('/data/{}'.format(sample))
dockerCall(job=job, tool='quay.io/ucsc_cgl/bwakit:0.7.12--c85ccff267d5021b75bb1c9ccf5f4b79f91835cc',
parameters=parameters, workDir=work_dir)
# Either write file to local output directory or upload to S3 cloud storage
job.fileStore.logToMaster('Aligned sample: {}'.format(config.uuid))
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'aligned.aln.bam'))
|
[
"Runs",
"BWA",
"-",
"Kit",
"to",
"align",
"single",
"or",
"paired",
"-",
"end",
"fastq",
"files",
"or",
"realign",
"SAM",
"/",
"BAM",
"files",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/aligners.py#L85-L188
|
[
"def",
"run_bwakit",
"(",
"job",
",",
"config",
",",
"sort",
"=",
"True",
",",
"trim",
"=",
"False",
",",
"mark_secondary",
"=",
"False",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"rg",
"=",
"None",
"inputs",
"=",
"{",
"'ref.fa'",
":",
"config",
".",
"ref",
",",
"'ref.fa.fai'",
":",
"config",
".",
"fai",
",",
"'ref.fa.amb'",
":",
"config",
".",
"amb",
",",
"'ref.fa.ann'",
":",
"config",
".",
"ann",
",",
"'ref.fa.bwt'",
":",
"config",
".",
"bwt",
",",
"'ref.fa.pac'",
":",
"config",
".",
"pac",
",",
"'ref.fa.sa'",
":",
"config",
".",
"sa",
"}",
"samples",
"=",
"[",
"]",
"realignment",
"=",
"False",
"# If a fastq pair was provided",
"if",
"getattr",
"(",
"config",
",",
"'r1'",
",",
"None",
")",
":",
"inputs",
"[",
"'input.1.fq.gz'",
"]",
"=",
"config",
".",
"r1",
"samples",
".",
"append",
"(",
"'input.1.fq.gz'",
")",
"if",
"getattr",
"(",
"config",
",",
"'r2'",
",",
"None",
")",
":",
"inputs",
"[",
"'input.2.fq.gz'",
"]",
"=",
"config",
".",
"r2",
"samples",
".",
"append",
"(",
"'input.2.fq.gz'",
")",
"if",
"getattr",
"(",
"config",
",",
"'bam'",
",",
"None",
")",
":",
"inputs",
"[",
"'input.bam'",
"]",
"=",
"config",
".",
"bam",
"samples",
".",
"append",
"(",
"'input.bam'",
")",
"realignment",
"=",
"True",
"if",
"getattr",
"(",
"config",
",",
"'sam'",
",",
"None",
")",
":",
"inputs",
"[",
"'input.sam'",
"]",
"=",
"config",
".",
"sam",
"samples",
".",
"append",
"(",
"'input.sam'",
")",
"realignment",
"=",
"True",
"# If an alt file was provided",
"if",
"getattr",
"(",
"config",
",",
"'alt'",
",",
"None",
")",
":",
"inputs",
"[",
"'ref.fa.alt'",
"]",
"=",
"config",
".",
"alt",
"for",
"name",
",",
"fileStoreID",
"in",
"inputs",
".",
"iteritems",
"(",
")",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"fileStoreID",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"name",
")",
")",
"# If a read group line was provided",
"if",
"getattr",
"(",
"config",
",",
"'rg_line'",
",",
"None",
")",
":",
"rg",
"=",
"config",
".",
"rg_line",
"# Otherwise, generate a read group line to place in the BAM.",
"elif",
"all",
"(",
"getattr",
"(",
"config",
",",
"elem",
",",
"None",
")",
"for",
"elem",
"in",
"[",
"'library'",
",",
"'platform'",
",",
"'program_unit'",
",",
"'uuid'",
"]",
")",
":",
"rg",
"=",
"\"@RG\\\\tID:{0}\"",
".",
"format",
"(",
"config",
".",
"uuid",
")",
"# '\\' character is escaped so bwakit gets passed '\\t' properly",
"rg_attributes",
"=",
"[",
"config",
".",
"library",
",",
"config",
".",
"platform",
",",
"config",
".",
"program_unit",
",",
"config",
".",
"uuid",
"]",
"for",
"tag",
",",
"info",
"in",
"zip",
"(",
"[",
"'LB'",
",",
"'PL'",
",",
"'PU'",
",",
"'SM'",
"]",
",",
"rg_attributes",
")",
":",
"rg",
"+=",
"'\\\\t{0}:{1}'",
".",
"format",
"(",
"tag",
",",
"info",
")",
"# If realigning, then bwakit can use pre-existing read group data",
"elif",
"realignment",
":",
"rg",
"=",
"None",
"# BWA Options",
"opt_args",
"=",
"[",
"]",
"if",
"sort",
":",
"opt_args",
".",
"append",
"(",
"'-s'",
")",
"if",
"trim",
":",
"opt_args",
".",
"append",
"(",
"'-a'",
")",
"if",
"mark_secondary",
":",
"opt_args",
".",
"append",
"(",
"'-M'",
")",
"# Call: bwakit",
"parameters",
"=",
"[",
"'-t'",
",",
"str",
"(",
"job",
".",
"cores",
")",
"]",
"+",
"opt_args",
"+",
"[",
"'-o'",
",",
"'/data/aligned'",
",",
"'/data/ref.fa'",
"]",
"if",
"rg",
"is",
"not",
"None",
":",
"parameters",
"=",
"[",
"'-R'",
",",
"rg",
"]",
"+",
"parameters",
"for",
"sample",
"in",
"samples",
":",
"parameters",
".",
"append",
"(",
"'/data/{}'",
".",
"format",
"(",
"sample",
")",
")",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/bwakit:0.7.12--c85ccff267d5021b75bb1c9ccf5f4b79f91835cc'",
",",
"parameters",
"=",
"parameters",
",",
"workDir",
"=",
"work_dir",
")",
"# Either write file to local output directory or upload to S3 cloud storage",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Aligned sample: {}'",
".",
"format",
"(",
"config",
".",
"uuid",
")",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'aligned.aln.bam'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
query_maker
|
query_maker takes the optional arguments and constructs a json query for a stream's
datapoints using it::
#{"t1": 5, "transform": "if $ > 5"}
print query_maker(t1=5,transform="if $ > 5")
|
connectordb/_stream.py
|
def query_maker(t1=None, t2=None, limit=None, i1=None, i2=None, transform=None, downlink=False):
"""query_maker takes the optional arguments and constructs a json query for a stream's
datapoints using it::
#{"t1": 5, "transform": "if $ > 5"}
print query_maker(t1=5,transform="if $ > 5")
"""
params = {}
if t1 is not None:
params["t1"] = t1
if t2 is not None:
params["t2"] = t2
if limit is not None:
params["limit"] = limit
if i1 is not None or i2 is not None:
if len(params) > 0:
raise AssertionError(
"Stream cannot be accessed both by index and by timestamp at the same time.")
if i1 is not None:
params["i1"] = i1
if i2 is not None:
params["i2"] = i2
# If no range is given, query whole stream
if len(params) == 0:
params["i1"] = 0
params["i2"] = 0
if transform is not None:
params["transform"] = transform
if downlink:
params["downlink"] = True
return params
|
def query_maker(t1=None, t2=None, limit=None, i1=None, i2=None, transform=None, downlink=False):
"""query_maker takes the optional arguments and constructs a json query for a stream's
datapoints using it::
#{"t1": 5, "transform": "if $ > 5"}
print query_maker(t1=5,transform="if $ > 5")
"""
params = {}
if t1 is not None:
params["t1"] = t1
if t2 is not None:
params["t2"] = t2
if limit is not None:
params["limit"] = limit
if i1 is not None or i2 is not None:
if len(params) > 0:
raise AssertionError(
"Stream cannot be accessed both by index and by timestamp at the same time.")
if i1 is not None:
params["i1"] = i1
if i2 is not None:
params["i2"] = i2
# If no range is given, query whole stream
if len(params) == 0:
params["i1"] = 0
params["i2"] = 0
if transform is not None:
params["transform"] = transform
if downlink:
params["downlink"] = True
return params
|
[
"query_maker",
"takes",
"the",
"optional",
"arguments",
"and",
"constructs",
"a",
"json",
"query",
"for",
"a",
"stream",
"s",
"datapoints",
"using",
"it",
"::",
"#",
"{",
"t1",
":",
"5",
"transform",
":",
"if",
"$",
">",
"5",
"}",
"print",
"query_maker",
"(",
"t1",
"=",
"5",
"transform",
"=",
"if",
"$",
">",
"5",
")"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_stream.py#L21-L53
|
[
"def",
"query_maker",
"(",
"t1",
"=",
"None",
",",
"t2",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"i1",
"=",
"None",
",",
"i2",
"=",
"None",
",",
"transform",
"=",
"None",
",",
"downlink",
"=",
"False",
")",
":",
"params",
"=",
"{",
"}",
"if",
"t1",
"is",
"not",
"None",
":",
"params",
"[",
"\"t1\"",
"]",
"=",
"t1",
"if",
"t2",
"is",
"not",
"None",
":",
"params",
"[",
"\"t2\"",
"]",
"=",
"t2",
"if",
"limit",
"is",
"not",
"None",
":",
"params",
"[",
"\"limit\"",
"]",
"=",
"limit",
"if",
"i1",
"is",
"not",
"None",
"or",
"i2",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"params",
")",
">",
"0",
":",
"raise",
"AssertionError",
"(",
"\"Stream cannot be accessed both by index and by timestamp at the same time.\"",
")",
"if",
"i1",
"is",
"not",
"None",
":",
"params",
"[",
"\"i1\"",
"]",
"=",
"i1",
"if",
"i2",
"is",
"not",
"None",
":",
"params",
"[",
"\"i2\"",
"]",
"=",
"i2",
"# If no range is given, query whole stream",
"if",
"len",
"(",
"params",
")",
"==",
"0",
":",
"params",
"[",
"\"i1\"",
"]",
"=",
"0",
"params",
"[",
"\"i2\"",
"]",
"=",
"0",
"if",
"transform",
"is",
"not",
"None",
":",
"params",
"[",
"\"transform\"",
"]",
"=",
"transform",
"if",
"downlink",
":",
"params",
"[",
"\"downlink\"",
"]",
"=",
"True",
"return",
"params"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Stream.create
|
Creates a stream given an optional JSON schema encoded as a python dict. You can also add other properties
of the stream, such as the icon, datatype or description. Create accepts both a string schema and
a dict-encoded schema.
|
connectordb/_stream.py
|
def create(self, schema="{}", **kwargs):
"""Creates a stream given an optional JSON schema encoded as a python dict. You can also add other properties
of the stream, such as the icon, datatype or description. Create accepts both a string schema and
a dict-encoded schema."""
if isinstance(schema, basestring):
strschema = schema
schema = json.loads(schema)
else:
strschema = json.dumps(schema)
Draft4Validator.check_schema(schema)
kwargs["schema"] = strschema
self.metadata = self.db.create(self.path, kwargs).json()
|
def create(self, schema="{}", **kwargs):
"""Creates a stream given an optional JSON schema encoded as a python dict. You can also add other properties
of the stream, such as the icon, datatype or description. Create accepts both a string schema and
a dict-encoded schema."""
if isinstance(schema, basestring):
strschema = schema
schema = json.loads(schema)
else:
strschema = json.dumps(schema)
Draft4Validator.check_schema(schema)
kwargs["schema"] = strschema
self.metadata = self.db.create(self.path, kwargs).json()
|
[
"Creates",
"a",
"stream",
"given",
"an",
"optional",
"JSON",
"schema",
"encoded",
"as",
"a",
"python",
"dict",
".",
"You",
"can",
"also",
"add",
"other",
"properties",
"of",
"the",
"stream",
"such",
"as",
"the",
"icon",
"datatype",
"or",
"description",
".",
"Create",
"accepts",
"both",
"a",
"string",
"schema",
"and",
"a",
"dict",
"-",
"encoded",
"schema",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_stream.py#L58-L69
|
[
"def",
"create",
"(",
"self",
",",
"schema",
"=",
"\"{}\"",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"schema",
",",
"basestring",
")",
":",
"strschema",
"=",
"schema",
"schema",
"=",
"json",
".",
"loads",
"(",
"schema",
")",
"else",
":",
"strschema",
"=",
"json",
".",
"dumps",
"(",
"schema",
")",
"Draft4Validator",
".",
"check_schema",
"(",
"schema",
")",
"kwargs",
"[",
"\"schema\"",
"]",
"=",
"strschema",
"self",
".",
"metadata",
"=",
"self",
".",
"db",
".",
"create",
"(",
"self",
".",
"path",
",",
"kwargs",
")",
".",
"json",
"(",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Stream.insert_array
|
given an array of datapoints, inserts them to the stream. This is different from insert(),
because it requires an array of valid datapoints, whereas insert only requires the data portion
of the datapoint, and fills out the rest::
s = cdb["mystream"]
s.create({"type": "number"})
s.insert_array([{"d": 4, "t": time.time()},{"d": 5, "t": time.time()}], restamp=False)
The optional `restamp` parameter specifies whether or not the database should rewrite the timestamps
of datapoints which have a timestamp that is less than one that already exists in the database.
That is, if restamp is False, and a datapoint has a timestamp less than a datapoint that already
exists in the database, then the insert will fail. If restamp is True, then all datapoints
with timestamps below the datapoints already in the database will have their timestamps overwritten
to the same timestamp as the most recent datapoint hat already exists in the database, and the insert will
succeed.
|
connectordb/_stream.py
|
def insert_array(self, datapoint_array, restamp=False):
"""given an array of datapoints, inserts them to the stream. This is different from insert(),
because it requires an array of valid datapoints, whereas insert only requires the data portion
of the datapoint, and fills out the rest::
s = cdb["mystream"]
s.create({"type": "number"})
s.insert_array([{"d": 4, "t": time.time()},{"d": 5, "t": time.time()}], restamp=False)
The optional `restamp` parameter specifies whether or not the database should rewrite the timestamps
of datapoints which have a timestamp that is less than one that already exists in the database.
That is, if restamp is False, and a datapoint has a timestamp less than a datapoint that already
exists in the database, then the insert will fail. If restamp is True, then all datapoints
with timestamps below the datapoints already in the database will have their timestamps overwritten
to the same timestamp as the most recent datapoint hat already exists in the database, and the insert will
succeed.
"""
# To be safe, we split into chunks
while (len(datapoint_array) > DATAPOINT_INSERT_LIMIT):
# We insert datapoints in chunks of a couple thousand so that they
# fit in the insert size limit of ConnectorDB
a = datapoint_array[:DATAPOINT_INSERT_LIMIT]
if restamp:
self.db.update(self.path + "/data", a)
else:
self.db.create(self.path + "/data", a)
# Clear the written datapoints
datapoint_array = datapoint_array[DATAPOINT_INSERT_LIMIT:]
if restamp:
self.db.update(self.path + "/data", datapoint_array)
else:
self.db.create(self.path + "/data", datapoint_array)
|
def insert_array(self, datapoint_array, restamp=False):
"""given an array of datapoints, inserts them to the stream. This is different from insert(),
because it requires an array of valid datapoints, whereas insert only requires the data portion
of the datapoint, and fills out the rest::
s = cdb["mystream"]
s.create({"type": "number"})
s.insert_array([{"d": 4, "t": time.time()},{"d": 5, "t": time.time()}], restamp=False)
The optional `restamp` parameter specifies whether or not the database should rewrite the timestamps
of datapoints which have a timestamp that is less than one that already exists in the database.
That is, if restamp is False, and a datapoint has a timestamp less than a datapoint that already
exists in the database, then the insert will fail. If restamp is True, then all datapoints
with timestamps below the datapoints already in the database will have their timestamps overwritten
to the same timestamp as the most recent datapoint hat already exists in the database, and the insert will
succeed.
"""
# To be safe, we split into chunks
while (len(datapoint_array) > DATAPOINT_INSERT_LIMIT):
# We insert datapoints in chunks of a couple thousand so that they
# fit in the insert size limit of ConnectorDB
a = datapoint_array[:DATAPOINT_INSERT_LIMIT]
if restamp:
self.db.update(self.path + "/data", a)
else:
self.db.create(self.path + "/data", a)
# Clear the written datapoints
datapoint_array = datapoint_array[DATAPOINT_INSERT_LIMIT:]
if restamp:
self.db.update(self.path + "/data", datapoint_array)
else:
self.db.create(self.path + "/data", datapoint_array)
|
[
"given",
"an",
"array",
"of",
"datapoints",
"inserts",
"them",
"to",
"the",
"stream",
".",
"This",
"is",
"different",
"from",
"insert",
"()",
"because",
"it",
"requires",
"an",
"array",
"of",
"valid",
"datapoints",
"whereas",
"insert",
"only",
"requires",
"the",
"data",
"portion",
"of",
"the",
"datapoint",
"and",
"fills",
"out",
"the",
"rest",
"::"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_stream.py#L71-L108
|
[
"def",
"insert_array",
"(",
"self",
",",
"datapoint_array",
",",
"restamp",
"=",
"False",
")",
":",
"# To be safe, we split into chunks",
"while",
"(",
"len",
"(",
"datapoint_array",
")",
">",
"DATAPOINT_INSERT_LIMIT",
")",
":",
"# We insert datapoints in chunks of a couple thousand so that they",
"# fit in the insert size limit of ConnectorDB",
"a",
"=",
"datapoint_array",
"[",
":",
"DATAPOINT_INSERT_LIMIT",
"]",
"if",
"restamp",
":",
"self",
".",
"db",
".",
"update",
"(",
"self",
".",
"path",
"+",
"\"/data\"",
",",
"a",
")",
"else",
":",
"self",
".",
"db",
".",
"create",
"(",
"self",
".",
"path",
"+",
"\"/data\"",
",",
"a",
")",
"# Clear the written datapoints",
"datapoint_array",
"=",
"datapoint_array",
"[",
"DATAPOINT_INSERT_LIMIT",
":",
"]",
"if",
"restamp",
":",
"self",
".",
"db",
".",
"update",
"(",
"self",
".",
"path",
"+",
"\"/data\"",
",",
"datapoint_array",
")",
"else",
":",
"self",
".",
"db",
".",
"create",
"(",
"self",
".",
"path",
"+",
"\"/data\"",
",",
"datapoint_array",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Stream.insert
|
insert inserts one datapoint with the given data, and appends it to
the end of the stream::
s = cdb["mystream"]
s.create({"type": "string"})
s.insert("Hello World!")
|
connectordb/_stream.py
|
def insert(self, data):
"""insert inserts one datapoint with the given data, and appends it to
the end of the stream::
s = cdb["mystream"]
s.create({"type": "string"})
s.insert("Hello World!")
"""
self.insert_array([{"d": data, "t": time.time()}], restamp=True)
|
def insert(self, data):
"""insert inserts one datapoint with the given data, and appends it to
the end of the stream::
s = cdb["mystream"]
s.create({"type": "string"})
s.insert("Hello World!")
"""
self.insert_array([{"d": data, "t": time.time()}], restamp=True)
|
[
"insert",
"inserts",
"one",
"datapoint",
"with",
"the",
"given",
"data",
"and",
"appends",
"it",
"to",
"the",
"end",
"of",
"the",
"stream",
"::"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_stream.py#L110-L121
|
[
"def",
"insert",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"insert_array",
"(",
"[",
"{",
"\"d\"",
":",
"data",
",",
"\"t\"",
":",
"time",
".",
"time",
"(",
")",
"}",
"]",
",",
"restamp",
"=",
"True",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Stream.subscribe
|
Subscribes to the stream, running the callback function each time datapoints are inserted into
the given stream. There is an optional transform to the datapoints, and a downlink parameter.::
s = cdb["mystream"]
def subscription_callback(stream,data):
print stream, data
s.subscribe(subscription_callback)
The downlink parameter is for downlink streams - it allows to subscribe to the downlink substream,
before it is acknowledged. This is especially useful for something like lights - have lights be
a boolean downlink stream, and the light itself be subscribed to the downlink, so that other
devices can write to the light, turning it on and off::
def light_control(stream,data):
light_boolean = data[0]["d"]
print "Setting light to", light_boolean
set_light(light_boolean)
#Acknowledge the write
return True
# We don't care about intermediate values, we only want the most recent setting
# of the light, meaning we want the "if last" transform
s.subscribe(light_control, downlink=True, transform="if last")
|
connectordb/_stream.py
|
def subscribe(self, callback, transform="", downlink=False):
"""Subscribes to the stream, running the callback function each time datapoints are inserted into
the given stream. There is an optional transform to the datapoints, and a downlink parameter.::
s = cdb["mystream"]
def subscription_callback(stream,data):
print stream, data
s.subscribe(subscription_callback)
The downlink parameter is for downlink streams - it allows to subscribe to the downlink substream,
before it is acknowledged. This is especially useful for something like lights - have lights be
a boolean downlink stream, and the light itself be subscribed to the downlink, so that other
devices can write to the light, turning it on and off::
def light_control(stream,data):
light_boolean = data[0]["d"]
print "Setting light to", light_boolean
set_light(light_boolean)
#Acknowledge the write
return True
# We don't care about intermediate values, we only want the most recent setting
# of the light, meaning we want the "if last" transform
s.subscribe(light_control, downlink=True, transform="if last")
"""
streampath = self.path
if downlink:
streampath += "/downlink"
return self.db.subscribe(streampath, callback, transform)
|
def subscribe(self, callback, transform="", downlink=False):
"""Subscribes to the stream, running the callback function each time datapoints are inserted into
the given stream. There is an optional transform to the datapoints, and a downlink parameter.::
s = cdb["mystream"]
def subscription_callback(stream,data):
print stream, data
s.subscribe(subscription_callback)
The downlink parameter is for downlink streams - it allows to subscribe to the downlink substream,
before it is acknowledged. This is especially useful for something like lights - have lights be
a boolean downlink stream, and the light itself be subscribed to the downlink, so that other
devices can write to the light, turning it on and off::
def light_control(stream,data):
light_boolean = data[0]["d"]
print "Setting light to", light_boolean
set_light(light_boolean)
#Acknowledge the write
return True
# We don't care about intermediate values, we only want the most recent setting
# of the light, meaning we want the "if last" transform
s.subscribe(light_control, downlink=True, transform="if last")
"""
streampath = self.path
if downlink:
streampath += "/downlink"
return self.db.subscribe(streampath, callback, transform)
|
[
"Subscribes",
"to",
"the",
"stream",
"running",
"the",
"callback",
"function",
"each",
"time",
"datapoints",
"are",
"inserted",
"into",
"the",
"given",
"stream",
".",
"There",
"is",
"an",
"optional",
"transform",
"to",
"the",
"datapoints",
"and",
"a",
"downlink",
"parameter",
".",
"::"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_stream.py#L127-L160
|
[
"def",
"subscribe",
"(",
"self",
",",
"callback",
",",
"transform",
"=",
"\"\"",
",",
"downlink",
"=",
"False",
")",
":",
"streampath",
"=",
"self",
".",
"path",
"if",
"downlink",
":",
"streampath",
"+=",
"\"/downlink\"",
"return",
"self",
".",
"db",
".",
"subscribe",
"(",
"streampath",
",",
"callback",
",",
"transform",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Stream.unsubscribe
|
Unsubscribes from a previously subscribed stream. Note that the same values of transform
and downlink must be passed in order to do the correct unsubscribe::
s.subscribe(callback,transform="if last")
s.unsubscribe(transform="if last")
|
connectordb/_stream.py
|
def unsubscribe(self, transform="", downlink=False):
"""Unsubscribes from a previously subscribed stream. Note that the same values of transform
and downlink must be passed in order to do the correct unsubscribe::
s.subscribe(callback,transform="if last")
s.unsubscribe(transform="if last")
"""
streampath = self.path
if downlink:
streampath += "/downlink"
return self.db.unsubscribe(streampath, transform)
|
def unsubscribe(self, transform="", downlink=False):
"""Unsubscribes from a previously subscribed stream. Note that the same values of transform
and downlink must be passed in order to do the correct unsubscribe::
s.subscribe(callback,transform="if last")
s.unsubscribe(transform="if last")
"""
streampath = self.path
if downlink:
streampath += "/downlink"
return self.db.unsubscribe(streampath, transform)
|
[
"Unsubscribes",
"from",
"a",
"previously",
"subscribed",
"stream",
".",
"Note",
"that",
"the",
"same",
"values",
"of",
"transform",
"and",
"downlink",
"must",
"be",
"passed",
"in",
"order",
"to",
"do",
"the",
"correct",
"unsubscribe",
"::"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_stream.py#L162-L173
|
[
"def",
"unsubscribe",
"(",
"self",
",",
"transform",
"=",
"\"\"",
",",
"downlink",
"=",
"False",
")",
":",
"streampath",
"=",
"self",
".",
"path",
"if",
"downlink",
":",
"streampath",
"+=",
"\"/downlink\"",
"return",
"self",
".",
"db",
".",
"unsubscribe",
"(",
"streampath",
",",
"transform",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Stream.export
|
Exports the stream to the given directory. The directory can't exist.
You can later import this device by running import_stream on a device.
|
connectordb/_stream.py
|
def export(self, directory):
"""Exports the stream to the given directory. The directory can't exist.
You can later import this device by running import_stream on a device.
"""
if os.path.exists(directory):
raise FileExistsError(
"The stream export directory already exists")
os.mkdir(directory)
# Write the stream's info
with open(os.path.join(directory, "stream.json"), "w") as f:
json.dump(self.data, f)
# Now write the stream's data
# We sort it first, since older versions of ConnectorDB had a bug
# where sometimes datapoints would be returned out of order.
self[:].sort().writeJSON(os.path.join(directory, "data.json"))
# And if the stream is a downlink, write the downlink data
if self.downlink:
self(i1=0, i2=0, downlink=True).sort().writeJSON(os.path.join(directory, "downlink.json"))
|
def export(self, directory):
"""Exports the stream to the given directory. The directory can't exist.
You can later import this device by running import_stream on a device.
"""
if os.path.exists(directory):
raise FileExistsError(
"The stream export directory already exists")
os.mkdir(directory)
# Write the stream's info
with open(os.path.join(directory, "stream.json"), "w") as f:
json.dump(self.data, f)
# Now write the stream's data
# We sort it first, since older versions of ConnectorDB had a bug
# where sometimes datapoints would be returned out of order.
self[:].sort().writeJSON(os.path.join(directory, "data.json"))
# And if the stream is a downlink, write the downlink data
if self.downlink:
self(i1=0, i2=0, downlink=True).sort().writeJSON(os.path.join(directory, "downlink.json"))
|
[
"Exports",
"the",
"stream",
"to",
"the",
"given",
"directory",
".",
"The",
"directory",
"can",
"t",
"exist",
".",
"You",
"can",
"later",
"import",
"this",
"device",
"by",
"running",
"import_stream",
"on",
"a",
"device",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_stream.py#L228-L249
|
[
"def",
"export",
"(",
"self",
",",
"directory",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"directory",
")",
":",
"raise",
"FileExistsError",
"(",
"\"The stream export directory already exists\"",
")",
"os",
".",
"mkdir",
"(",
"directory",
")",
"# Write the stream's info",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"\"stream.json\"",
")",
",",
"\"w\"",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"self",
".",
"data",
",",
"f",
")",
"# Now write the stream's data",
"# We sort it first, since older versions of ConnectorDB had a bug",
"# where sometimes datapoints would be returned out of order.",
"self",
"[",
":",
"]",
".",
"sort",
"(",
")",
".",
"writeJSON",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"\"data.json\"",
")",
")",
"# And if the stream is a downlink, write the downlink data",
"if",
"self",
".",
"downlink",
":",
"self",
"(",
"i1",
"=",
"0",
",",
"i2",
"=",
"0",
",",
"downlink",
"=",
"True",
")",
".",
"sort",
"(",
")",
".",
"writeJSON",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"\"downlink.json\"",
")",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Stream.schema
|
sets the stream's schema. An empty schema is "{}". The schemas allow you to set a specific data type.
Both python dicts and strings are accepted.
|
connectordb/_stream.py
|
def schema(self, schema):
"""sets the stream's schema. An empty schema is "{}". The schemas allow you to set a specific data type.
Both python dicts and strings are accepted."""
if isinstance(schema, basestring):
strschema = schema
schema = json.loads(schema)
else:
strschema = json.dumps(schema)
Draft4Validator.check_schema(schema)
self.set({"schema": strschema})
|
def schema(self, schema):
"""sets the stream's schema. An empty schema is "{}". The schemas allow you to set a specific data type.
Both python dicts and strings are accepted."""
if isinstance(schema, basestring):
strschema = schema
schema = json.loads(schema)
else:
strschema = json.dumps(schema)
Draft4Validator.check_schema(schema)
self.set({"schema": strschema})
|
[
"sets",
"the",
"stream",
"s",
"schema",
".",
"An",
"empty",
"schema",
"is",
"{}",
".",
"The",
"schemas",
"allow",
"you",
"to",
"set",
"a",
"specific",
"data",
"type",
".",
"Both",
"python",
"dicts",
"and",
"strings",
"are",
"accepted",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_stream.py#L305-L314
|
[
"def",
"schema",
"(",
"self",
",",
"schema",
")",
":",
"if",
"isinstance",
"(",
"schema",
",",
"basestring",
")",
":",
"strschema",
"=",
"schema",
"schema",
"=",
"json",
".",
"loads",
"(",
"schema",
")",
"else",
":",
"strschema",
"=",
"json",
".",
"dumps",
"(",
"schema",
")",
"Draft4Validator",
".",
"check_schema",
"(",
"schema",
")",
"self",
".",
"set",
"(",
"{",
"\"schema\"",
":",
"strschema",
"}",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
Stream.device
|
returns the device which owns the given stream
|
connectordb/_stream.py
|
def device(self):
"""returns the device which owns the given stream"""
splitted_path = self.path.split("/")
return Device(self.db,
splitted_path[0] + "/" + splitted_path[1])
|
def device(self):
"""returns the device which owns the given stream"""
splitted_path = self.path.split("/")
return Device(self.db,
splitted_path[0] + "/" + splitted_path[1])
|
[
"returns",
"the",
"device",
"which",
"owns",
"the",
"given",
"stream"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_stream.py#L322-L327
|
[
"def",
"device",
"(",
"self",
")",
":",
"splitted_path",
"=",
"self",
".",
"path",
".",
"split",
"(",
"\"/\"",
")",
"return",
"Device",
"(",
"self",
".",
"db",
",",
"splitted_path",
"[",
"0",
"]",
"+",
"\"/\"",
"+",
"splitted_path",
"[",
"1",
"]",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
get_labels
|
Iterates over the labels of terms in the ontology
:param str ontology: The name of the ontology
:param str ols_base: An optional, custom OLS base url
:rtype: iter[str]
|
src/ols_client/api.py
|
def get_labels(ontology, ols_base=None):
"""Iterates over the labels of terms in the ontology
:param str ontology: The name of the ontology
:param str ols_base: An optional, custom OLS base url
:rtype: iter[str]
"""
client = OlsClient(ols_base=ols_base)
return client.iter_labels(ontology)
|
def get_labels(ontology, ols_base=None):
"""Iterates over the labels of terms in the ontology
:param str ontology: The name of the ontology
:param str ols_base: An optional, custom OLS base url
:rtype: iter[str]
"""
client = OlsClient(ols_base=ols_base)
return client.iter_labels(ontology)
|
[
"Iterates",
"over",
"the",
"labels",
"of",
"terms",
"in",
"the",
"ontology"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/api.py#L16-L24
|
[
"def",
"get_labels",
"(",
"ontology",
",",
"ols_base",
"=",
"None",
")",
":",
"client",
"=",
"OlsClient",
"(",
"ols_base",
"=",
"ols_base",
")",
"return",
"client",
".",
"iter_labels",
"(",
"ontology",
")"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
get_metadata
|
Gets the metadata for a given ontology
:param str ontology: The name of the ontology
:param str ols_base: An optional, custom OLS base url
:return: The dictionary representing the JSON from the OLS
:rtype: dict
|
src/ols_client/api.py
|
def get_metadata(ontology, ols_base=None):
"""Gets the metadata for a given ontology
:param str ontology: The name of the ontology
:param str ols_base: An optional, custom OLS base url
:return: The dictionary representing the JSON from the OLS
:rtype: dict
"""
client = OlsClient(ols_base=ols_base)
return client.get_ontology(ontology)
|
def get_metadata(ontology, ols_base=None):
"""Gets the metadata for a given ontology
:param str ontology: The name of the ontology
:param str ols_base: An optional, custom OLS base url
:return: The dictionary representing the JSON from the OLS
:rtype: dict
"""
client = OlsClient(ols_base=ols_base)
return client.get_ontology(ontology)
|
[
"Gets",
"the",
"metadata",
"for",
"a",
"given",
"ontology"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/api.py#L27-L36
|
[
"def",
"get_metadata",
"(",
"ontology",
",",
"ols_base",
"=",
"None",
")",
":",
"client",
"=",
"OlsClient",
"(",
"ols_base",
"=",
"ols_base",
")",
"return",
"client",
".",
"get_ontology",
"(",
"ontology",
")"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
get_hierarchy
|
Iterates over the parent-child relationships in an ontolog
:param str ontology: The name of the ontology
:param str ols_base: An optional, custom OLS base url
:rtype: iter[tuple[str,str]]
|
src/ols_client/api.py
|
def get_hierarchy(ontology, ols_base=None):
"""Iterates over the parent-child relationships in an ontolog
:param str ontology: The name of the ontology
:param str ols_base: An optional, custom OLS base url
:rtype: iter[tuple[str,str]]
"""
client = OlsClient(ols_base=ols_base)
return client.iter_hierarchy(ontology)
|
def get_hierarchy(ontology, ols_base=None):
"""Iterates over the parent-child relationships in an ontolog
:param str ontology: The name of the ontology
:param str ols_base: An optional, custom OLS base url
:rtype: iter[tuple[str,str]]
"""
client = OlsClient(ols_base=ols_base)
return client.iter_hierarchy(ontology)
|
[
"Iterates",
"over",
"the",
"parent",
"-",
"child",
"relationships",
"in",
"an",
"ontolog"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/api.py#L39-L47
|
[
"def",
"get_hierarchy",
"(",
"ontology",
",",
"ols_base",
"=",
"None",
")",
":",
"client",
"=",
"OlsClient",
"(",
"ols_base",
"=",
"ols_base",
")",
"return",
"client",
".",
"iter_hierarchy",
"(",
"ontology",
")"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
AbstractPipelineWrapper.run
|
Prepares and runs the pipeline. Note this method must be invoked both from inside a
Docker container and while the docker daemon is reachable.
:param str name: The name of the command to start the workflow.
:param str desc: The description of the workflow.
|
src/toil_lib/abstractPipelineWrapper.py
|
def run(cls, name, desc):
"""
Prepares and runs the pipeline. Note this method must be invoked both from inside a
Docker container and while the docker daemon is reachable.
:param str name: The name of the command to start the workflow.
:param str desc: The description of the workflow.
"""
wrapper = cls(name, desc)
mount_path = wrapper._get_mount_path()
# prepare parser
arg_parser = wrapper._create_argument_parser()
wrapper._extend_argument_parser(arg_parser)
# prepare config file
empty_config = wrapper.__get_empty_config()
config_yaml = ruamel.yaml.load(empty_config)
wrapper.__populate_parser_from_config(arg_parser, config_yaml)
args = arg_parser.parse_args()
for k,v in vars(args).items():
k = k.replace('_', '-')
if k in config_yaml:
config_yaml[k] = v
config_path = wrapper._get_config_path()
with open(config_path, 'w') as writable:
ruamel.yaml.dump(config_yaml, stream=writable)
# prepare workdir
workdir_path = os.path.join(mount_path, 'Toil-' + wrapper._name)
if os.path.exists(workdir_path):
if args.restart:
log.info('Reusing temporary directory: %s', workdir_path)
else:
raise UserError('Temporary directory {} already exists. Run with --restart '
'option or remove directory.'.format(workdir_path))
else:
os.makedirs(workdir_path)
log.info('Temporary directory created: %s', workdir_path)
command = wrapper._create_pipeline_command(args, workdir_path, config_path)
wrapper._extend_pipeline_command(command, args)
# run command
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
print(e, file=sys.stderr)
finally:
stat = os.stat(mount_path)
log.info('Pipeline terminated, changing ownership of output files in %s from root to '
'uid %s and gid %s.', mount_path, stat.st_uid, stat.st_gid)
chown_command = ['chown', '-R', '%s:%s' % (stat.st_uid, stat.st_gid), mount_path]
subprocess.check_call(chown_command)
if args.no_clean:
log.info('Flag "--no-clean" was used, therefore %s was not deleted.', workdir_path)
else:
log.info('Cleaning up temporary directory: %s', workdir_path)
shutil.rmtree(workdir_path)
|
def run(cls, name, desc):
"""
Prepares and runs the pipeline. Note this method must be invoked both from inside a
Docker container and while the docker daemon is reachable.
:param str name: The name of the command to start the workflow.
:param str desc: The description of the workflow.
"""
wrapper = cls(name, desc)
mount_path = wrapper._get_mount_path()
# prepare parser
arg_parser = wrapper._create_argument_parser()
wrapper._extend_argument_parser(arg_parser)
# prepare config file
empty_config = wrapper.__get_empty_config()
config_yaml = ruamel.yaml.load(empty_config)
wrapper.__populate_parser_from_config(arg_parser, config_yaml)
args = arg_parser.parse_args()
for k,v in vars(args).items():
k = k.replace('_', '-')
if k in config_yaml:
config_yaml[k] = v
config_path = wrapper._get_config_path()
with open(config_path, 'w') as writable:
ruamel.yaml.dump(config_yaml, stream=writable)
# prepare workdir
workdir_path = os.path.join(mount_path, 'Toil-' + wrapper._name)
if os.path.exists(workdir_path):
if args.restart:
log.info('Reusing temporary directory: %s', workdir_path)
else:
raise UserError('Temporary directory {} already exists. Run with --restart '
'option or remove directory.'.format(workdir_path))
else:
os.makedirs(workdir_path)
log.info('Temporary directory created: %s', workdir_path)
command = wrapper._create_pipeline_command(args, workdir_path, config_path)
wrapper._extend_pipeline_command(command, args)
# run command
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
print(e, file=sys.stderr)
finally:
stat = os.stat(mount_path)
log.info('Pipeline terminated, changing ownership of output files in %s from root to '
'uid %s and gid %s.', mount_path, stat.st_uid, stat.st_gid)
chown_command = ['chown', '-R', '%s:%s' % (stat.st_uid, stat.st_gid), mount_path]
subprocess.check_call(chown_command)
if args.no_clean:
log.info('Flag "--no-clean" was used, therefore %s was not deleted.', workdir_path)
else:
log.info('Cleaning up temporary directory: %s', workdir_path)
shutil.rmtree(workdir_path)
|
[
"Prepares",
"and",
"runs",
"the",
"pipeline",
".",
"Note",
"this",
"method",
"must",
"be",
"invoked",
"both",
"from",
"inside",
"a",
"Docker",
"container",
"and",
"while",
"the",
"docker",
"daemon",
"is",
"reachable",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/abstractPipelineWrapper.py#L32-L86
|
[
"def",
"run",
"(",
"cls",
",",
"name",
",",
"desc",
")",
":",
"wrapper",
"=",
"cls",
"(",
"name",
",",
"desc",
")",
"mount_path",
"=",
"wrapper",
".",
"_get_mount_path",
"(",
")",
"# prepare parser",
"arg_parser",
"=",
"wrapper",
".",
"_create_argument_parser",
"(",
")",
"wrapper",
".",
"_extend_argument_parser",
"(",
"arg_parser",
")",
"# prepare config file",
"empty_config",
"=",
"wrapper",
".",
"__get_empty_config",
"(",
")",
"config_yaml",
"=",
"ruamel",
".",
"yaml",
".",
"load",
"(",
"empty_config",
")",
"wrapper",
".",
"__populate_parser_from_config",
"(",
"arg_parser",
",",
"config_yaml",
")",
"args",
"=",
"arg_parser",
".",
"parse_args",
"(",
")",
"for",
"k",
",",
"v",
"in",
"vars",
"(",
"args",
")",
".",
"items",
"(",
")",
":",
"k",
"=",
"k",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
"if",
"k",
"in",
"config_yaml",
":",
"config_yaml",
"[",
"k",
"]",
"=",
"v",
"config_path",
"=",
"wrapper",
".",
"_get_config_path",
"(",
")",
"with",
"open",
"(",
"config_path",
",",
"'w'",
")",
"as",
"writable",
":",
"ruamel",
".",
"yaml",
".",
"dump",
"(",
"config_yaml",
",",
"stream",
"=",
"writable",
")",
"# prepare workdir",
"workdir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"mount_path",
",",
"'Toil-'",
"+",
"wrapper",
".",
"_name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"workdir_path",
")",
":",
"if",
"args",
".",
"restart",
":",
"log",
".",
"info",
"(",
"'Reusing temporary directory: %s'",
",",
"workdir_path",
")",
"else",
":",
"raise",
"UserError",
"(",
"'Temporary directory {} already exists. Run with --restart '",
"'option or remove directory.'",
".",
"format",
"(",
"workdir_path",
")",
")",
"else",
":",
"os",
".",
"makedirs",
"(",
"workdir_path",
")",
"log",
".",
"info",
"(",
"'Temporary directory created: %s'",
",",
"workdir_path",
")",
"command",
"=",
"wrapper",
".",
"_create_pipeline_command",
"(",
"args",
",",
"workdir_path",
",",
"config_path",
")",
"wrapper",
".",
"_extend_pipeline_command",
"(",
"command",
",",
"args",
")",
"# run command",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"command",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"print",
"(",
"e",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"finally",
":",
"stat",
"=",
"os",
".",
"stat",
"(",
"mount_path",
")",
"log",
".",
"info",
"(",
"'Pipeline terminated, changing ownership of output files in %s from root to '",
"'uid %s and gid %s.'",
",",
"mount_path",
",",
"stat",
".",
"st_uid",
",",
"stat",
".",
"st_gid",
")",
"chown_command",
"=",
"[",
"'chown'",
",",
"'-R'",
",",
"'%s:%s'",
"%",
"(",
"stat",
".",
"st_uid",
",",
"stat",
".",
"st_gid",
")",
",",
"mount_path",
"]",
"subprocess",
".",
"check_call",
"(",
"chown_command",
")",
"if",
"args",
".",
"no_clean",
":",
"log",
".",
"info",
"(",
"'Flag \"--no-clean\" was used, therefore %s was not deleted.'",
",",
"workdir_path",
")",
"else",
":",
"log",
".",
"info",
"(",
"'Cleaning up temporary directory: %s'",
",",
"workdir_path",
")",
"shutil",
".",
"rmtree",
"(",
"workdir_path",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
AbstractPipelineWrapper.__populate_parser_from_config
|
Populates an ArgumentParser object with arguments where each argument is a key from the
given config_data dictionary.
:param str prefix: Prepends the key with this prefix delimited by a single '.' character.
:param argparse.ArgumentParser arg_parser:
:param dict config_data: The parsed yaml data from the config.
>>> pw = AbstractPipelineWrapper('test', 'this is a test')
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser, {'a':None, 'b':2})
>>> vars(parser.parse_args(['--a', '1']))
{'a': '1', 'b': 2}
>>> vars(parser.parse_args(['--b', '3']))
{'a': None, 'b': '3'}
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser, {})
>>> vars(parser.parse_args([]))
{}
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser,
... dict(a={'a':'b', 'c':{'d':'e'}},
... f='g', h={}))
>>> vars(parser.parse_args([]))
{'f': 'g', 'a.a': 'b', 'a.c.d': 'e'}
|
src/toil_lib/abstractPipelineWrapper.py
|
def __populate_parser_from_config(self, arg_parser, config_data, prefix=''):
"""
Populates an ArgumentParser object with arguments where each argument is a key from the
given config_data dictionary.
:param str prefix: Prepends the key with this prefix delimited by a single '.' character.
:param argparse.ArgumentParser arg_parser:
:param dict config_data: The parsed yaml data from the config.
>>> pw = AbstractPipelineWrapper('test', 'this is a test')
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser, {'a':None, 'b':2})
>>> vars(parser.parse_args(['--a', '1']))
{'a': '1', 'b': 2}
>>> vars(parser.parse_args(['--b', '3']))
{'a': None, 'b': '3'}
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser, {})
>>> vars(parser.parse_args([]))
{}
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser,
... dict(a={'a':'b', 'c':{'d':'e'}},
... f='g', h={}))
>>> vars(parser.parse_args([]))
{'f': 'g', 'a.a': 'b', 'a.c.d': 'e'}
"""
for k,v in config_data.items():
k = prefix + '.' + k if prefix else k
if isinstance(v, dict):
self.__populate_parser_from_config(arg_parser, v, prefix=k)
else:
self._add_option(arg_parser, name=k, default=v)
|
def __populate_parser_from_config(self, arg_parser, config_data, prefix=''):
"""
Populates an ArgumentParser object with arguments where each argument is a key from the
given config_data dictionary.
:param str prefix: Prepends the key with this prefix delimited by a single '.' character.
:param argparse.ArgumentParser arg_parser:
:param dict config_data: The parsed yaml data from the config.
>>> pw = AbstractPipelineWrapper('test', 'this is a test')
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser, {'a':None, 'b':2})
>>> vars(parser.parse_args(['--a', '1']))
{'a': '1', 'b': 2}
>>> vars(parser.parse_args(['--b', '3']))
{'a': None, 'b': '3'}
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser, {})
>>> vars(parser.parse_args([]))
{}
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser,
... dict(a={'a':'b', 'c':{'d':'e'}},
... f='g', h={}))
>>> vars(parser.parse_args([]))
{'f': 'g', 'a.a': 'b', 'a.c.d': 'e'}
"""
for k,v in config_data.items():
k = prefix + '.' + k if prefix else k
if isinstance(v, dict):
self.__populate_parser_from_config(arg_parser, v, prefix=k)
else:
self._add_option(arg_parser, name=k, default=v)
|
[
"Populates",
"an",
"ArgumentParser",
"object",
"with",
"arguments",
"where",
"each",
"argument",
"is",
"a",
"key",
"from",
"the",
"given",
"config_data",
"dictionary",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/abstractPipelineWrapper.py#L88-L121
|
[
"def",
"__populate_parser_from_config",
"(",
"self",
",",
"arg_parser",
",",
"config_data",
",",
"prefix",
"=",
"''",
")",
":",
"for",
"k",
",",
"v",
"in",
"config_data",
".",
"items",
"(",
")",
":",
"k",
"=",
"prefix",
"+",
"'.'",
"+",
"k",
"if",
"prefix",
"else",
"k",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"self",
".",
"__populate_parser_from_config",
"(",
"arg_parser",
",",
"v",
",",
"prefix",
"=",
"k",
")",
"else",
":",
"self",
".",
"_add_option",
"(",
"arg_parser",
",",
"name",
"=",
"k",
",",
"default",
"=",
"v",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
AbstractPipelineWrapper.__get_empty_config
|
Returns the config file contents as a string. The config file is generated and then deleted.
|
src/toil_lib/abstractPipelineWrapper.py
|
def __get_empty_config(self):
"""
Returns the config file contents as a string. The config file is generated and then deleted.
"""
self._generate_config()
path = self._get_config_path()
with open(path, 'r') as readable:
contents = readable.read()
os.remove(path)
return contents
|
def __get_empty_config(self):
"""
Returns the config file contents as a string. The config file is generated and then deleted.
"""
self._generate_config()
path = self._get_config_path()
with open(path, 'r') as readable:
contents = readable.read()
os.remove(path)
return contents
|
[
"Returns",
"the",
"config",
"file",
"contents",
"as",
"a",
"string",
".",
"The",
"config",
"file",
"is",
"generated",
"and",
"then",
"deleted",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/abstractPipelineWrapper.py#L123-L132
|
[
"def",
"__get_empty_config",
"(",
"self",
")",
":",
"self",
".",
"_generate_config",
"(",
")",
"path",
"=",
"self",
".",
"_get_config_path",
"(",
")",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"readable",
":",
"contents",
"=",
"readable",
".",
"read",
"(",
")",
"os",
".",
"remove",
"(",
"path",
")",
"return",
"contents"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
AbstractPipelineWrapper._get_mount_path
|
Returns the path of the mount point of the current container. If this method is invoked
outside of a Docker container a NotInsideContainerError is raised. Likewise if the docker
daemon is unreachable from inside the container a UserError is raised. This method is
idempotent.
|
src/toil_lib/abstractPipelineWrapper.py
|
def _get_mount_path(self):
"""
Returns the path of the mount point of the current container. If this method is invoked
outside of a Docker container a NotInsideContainerError is raised. Likewise if the docker
daemon is unreachable from inside the container a UserError is raised. This method is
idempotent.
"""
if self._mount_path is None:
name = current_docker_container_id()
if dockerd_is_reachable():
# Get name of mounted volume
blob = json.loads(subprocess.check_output(['docker', 'inspect', name]))
mounts = blob[0]['Mounts']
# Ensure docker.sock is mounted correctly
sock_mnt = [x['Source'] == x['Destination']
for x in mounts if 'docker.sock' in x['Source']]
require(len(sock_mnt) == 1,
'Missing socket mount. Requires the following: '
'docker run -v /var/run/docker.sock:/var/run/docker.sock')
# Ensure formatting of command for 2 mount points
if len(mounts) == 2:
require(all(x['Source'] == x['Destination'] for x in mounts),
'Docker Src/Dst mount points, invoked with the -v argument, '
'must be the same if only using one mount point aside from the docker '
'socket.')
work_mount = [x['Source'] for x in mounts if 'docker.sock' not in x['Source']]
else:
# Ensure only one mirror mount exists aside from docker.sock
mirror_mounts = [x['Source'] for x in mounts if x['Source'] == x['Destination']]
work_mount = [x for x in mirror_mounts if 'docker.sock' not in x]
require(len(work_mount) == 1, 'Wrong number of mirror mounts provided, see '
'documentation.')
self._mount_path = work_mount[0]
log.info('The work mount is: %s', self._mount_path)
else:
raise UserError('Docker daemon is not reachable, ensure Docker is being run with: '
'"-v /var/run/docker.sock:/var/run/docker.sock" as an argument.')
return self._mount_path
|
def _get_mount_path(self):
"""
Returns the path of the mount point of the current container. If this method is invoked
outside of a Docker container a NotInsideContainerError is raised. Likewise if the docker
daemon is unreachable from inside the container a UserError is raised. This method is
idempotent.
"""
if self._mount_path is None:
name = current_docker_container_id()
if dockerd_is_reachable():
# Get name of mounted volume
blob = json.loads(subprocess.check_output(['docker', 'inspect', name]))
mounts = blob[0]['Mounts']
# Ensure docker.sock is mounted correctly
sock_mnt = [x['Source'] == x['Destination']
for x in mounts if 'docker.sock' in x['Source']]
require(len(sock_mnt) == 1,
'Missing socket mount. Requires the following: '
'docker run -v /var/run/docker.sock:/var/run/docker.sock')
# Ensure formatting of command for 2 mount points
if len(mounts) == 2:
require(all(x['Source'] == x['Destination'] for x in mounts),
'Docker Src/Dst mount points, invoked with the -v argument, '
'must be the same if only using one mount point aside from the docker '
'socket.')
work_mount = [x['Source'] for x in mounts if 'docker.sock' not in x['Source']]
else:
# Ensure only one mirror mount exists aside from docker.sock
mirror_mounts = [x['Source'] for x in mounts if x['Source'] == x['Destination']]
work_mount = [x for x in mirror_mounts if 'docker.sock' not in x]
require(len(work_mount) == 1, 'Wrong number of mirror mounts provided, see '
'documentation.')
self._mount_path = work_mount[0]
log.info('The work mount is: %s', self._mount_path)
else:
raise UserError('Docker daemon is not reachable, ensure Docker is being run with: '
'"-v /var/run/docker.sock:/var/run/docker.sock" as an argument.')
return self._mount_path
|
[
"Returns",
"the",
"path",
"of",
"the",
"mount",
"point",
"of",
"the",
"current",
"container",
".",
"If",
"this",
"method",
"is",
"invoked",
"outside",
"of",
"a",
"Docker",
"container",
"a",
"NotInsideContainerError",
"is",
"raised",
".",
"Likewise",
"if",
"the",
"docker",
"daemon",
"is",
"unreachable",
"from",
"inside",
"the",
"container",
"a",
"UserError",
"is",
"raised",
".",
"This",
"method",
"is",
"idempotent",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/abstractPipelineWrapper.py#L134-L171
|
[
"def",
"_get_mount_path",
"(",
"self",
")",
":",
"if",
"self",
".",
"_mount_path",
"is",
"None",
":",
"name",
"=",
"current_docker_container_id",
"(",
")",
"if",
"dockerd_is_reachable",
"(",
")",
":",
"# Get name of mounted volume",
"blob",
"=",
"json",
".",
"loads",
"(",
"subprocess",
".",
"check_output",
"(",
"[",
"'docker'",
",",
"'inspect'",
",",
"name",
"]",
")",
")",
"mounts",
"=",
"blob",
"[",
"0",
"]",
"[",
"'Mounts'",
"]",
"# Ensure docker.sock is mounted correctly",
"sock_mnt",
"=",
"[",
"x",
"[",
"'Source'",
"]",
"==",
"x",
"[",
"'Destination'",
"]",
"for",
"x",
"in",
"mounts",
"if",
"'docker.sock'",
"in",
"x",
"[",
"'Source'",
"]",
"]",
"require",
"(",
"len",
"(",
"sock_mnt",
")",
"==",
"1",
",",
"'Missing socket mount. Requires the following: '",
"'docker run -v /var/run/docker.sock:/var/run/docker.sock'",
")",
"# Ensure formatting of command for 2 mount points",
"if",
"len",
"(",
"mounts",
")",
"==",
"2",
":",
"require",
"(",
"all",
"(",
"x",
"[",
"'Source'",
"]",
"==",
"x",
"[",
"'Destination'",
"]",
"for",
"x",
"in",
"mounts",
")",
",",
"'Docker Src/Dst mount points, invoked with the -v argument, '",
"'must be the same if only using one mount point aside from the docker '",
"'socket.'",
")",
"work_mount",
"=",
"[",
"x",
"[",
"'Source'",
"]",
"for",
"x",
"in",
"mounts",
"if",
"'docker.sock'",
"not",
"in",
"x",
"[",
"'Source'",
"]",
"]",
"else",
":",
"# Ensure only one mirror mount exists aside from docker.sock",
"mirror_mounts",
"=",
"[",
"x",
"[",
"'Source'",
"]",
"for",
"x",
"in",
"mounts",
"if",
"x",
"[",
"'Source'",
"]",
"==",
"x",
"[",
"'Destination'",
"]",
"]",
"work_mount",
"=",
"[",
"x",
"for",
"x",
"in",
"mirror_mounts",
"if",
"'docker.sock'",
"not",
"in",
"x",
"]",
"require",
"(",
"len",
"(",
"work_mount",
")",
"==",
"1",
",",
"'Wrong number of mirror mounts provided, see '",
"'documentation.'",
")",
"self",
".",
"_mount_path",
"=",
"work_mount",
"[",
"0",
"]",
"log",
".",
"info",
"(",
"'The work mount is: %s'",
",",
"self",
".",
"_mount_path",
")",
"else",
":",
"raise",
"UserError",
"(",
"'Docker daemon is not reachable, ensure Docker is being run with: '",
"'\"-v /var/run/docker.sock:/var/run/docker.sock\" as an argument.'",
")",
"return",
"self",
".",
"_mount_path"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
AbstractPipelineWrapper._add_option
|
Add an argument to the given arg_parser with the given name.
:param argparse.ArgumentParser arg_parser:
:param str name: The name of the option.
|
src/toil_lib/abstractPipelineWrapper.py
|
def _add_option(self, arg_parser, name, *args, **kwargs):
"""
Add an argument to the given arg_parser with the given name.
:param argparse.ArgumentParser arg_parser:
:param str name: The name of the option.
"""
arg_parser.add_argument('--' + name, *args, **kwargs)
|
def _add_option(self, arg_parser, name, *args, **kwargs):
"""
Add an argument to the given arg_parser with the given name.
:param argparse.ArgumentParser arg_parser:
:param str name: The name of the option.
"""
arg_parser.add_argument('--' + name, *args, **kwargs)
|
[
"Add",
"an",
"argument",
"to",
"the",
"given",
"arg_parser",
"with",
"the",
"given",
"name",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/abstractPipelineWrapper.py#L185-L192
|
[
"def",
"_add_option",
"(",
"self",
",",
"arg_parser",
",",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"arg_parser",
".",
"add_argument",
"(",
"'--'",
"+",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
AbstractPipelineWrapper._create_argument_parser
|
Creates and returns an ArgumentParser object prepopulated with 'no clean', 'cores' and
'restart' arguments.
|
src/toil_lib/abstractPipelineWrapper.py
|
def _create_argument_parser(self):
"""
Creates and returns an ArgumentParser object prepopulated with 'no clean', 'cores' and
'restart' arguments.
"""
parser = argparse.ArgumentParser(description=self._desc,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--no-clean', action='store_true',
help='If this flag is used, temporary work directory is not cleaned.')
parser.add_argument('--restart', action='store_true',
help='If this flag is used, a previously uncleaned workflow in the same'
' directory will be resumed')
parser.add_argument('--cores', type=int, default=None,
help='Will set a cap on number of cores to use, default is all '
'available cores.')
return parser
|
def _create_argument_parser(self):
"""
Creates and returns an ArgumentParser object prepopulated with 'no clean', 'cores' and
'restart' arguments.
"""
parser = argparse.ArgumentParser(description=self._desc,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--no-clean', action='store_true',
help='If this flag is used, temporary work directory is not cleaned.')
parser.add_argument('--restart', action='store_true',
help='If this flag is used, a previously uncleaned workflow in the same'
' directory will be resumed')
parser.add_argument('--cores', type=int, default=None,
help='Will set a cap on number of cores to use, default is all '
'available cores.')
return parser
|
[
"Creates",
"and",
"returns",
"an",
"ArgumentParser",
"object",
"prepopulated",
"with",
"no",
"clean",
"cores",
"and",
"restart",
"arguments",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/abstractPipelineWrapper.py#L194-L209
|
[
"def",
"_create_argument_parser",
"(",
"self",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"self",
".",
"_desc",
",",
"formatter_class",
"=",
"argparse",
".",
"RawTextHelpFormatter",
")",
"parser",
".",
"add_argument",
"(",
"'--no-clean'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'If this flag is used, temporary work directory is not cleaned.'",
")",
"parser",
".",
"add_argument",
"(",
"'--restart'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'If this flag is used, a previously uncleaned workflow in the same'",
"' directory will be resumed'",
")",
"parser",
".",
"add_argument",
"(",
"'--cores'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'Will set a cap on number of cores to use, default is all '",
"'available cores.'",
")",
"return",
"parser"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
AbstractPipelineWrapper._create_pipeline_command
|
Creates and returns a list that represents a command for running the pipeline.
|
src/toil_lib/abstractPipelineWrapper.py
|
def _create_pipeline_command(self, args, workdir_path, config_path):
"""
Creates and returns a list that represents a command for running the pipeline.
"""
return ([self._name, 'run', os.path.join(workdir_path, 'jobStore'),
'--config', config_path,
'--workDir', workdir_path, '--retryCount', '1']
+ (['--restart'] if args.restart else []))
|
def _create_pipeline_command(self, args, workdir_path, config_path):
"""
Creates and returns a list that represents a command for running the pipeline.
"""
return ([self._name, 'run', os.path.join(workdir_path, 'jobStore'),
'--config', config_path,
'--workDir', workdir_path, '--retryCount', '1']
+ (['--restart'] if args.restart else []))
|
[
"Creates",
"and",
"returns",
"a",
"list",
"that",
"represents",
"a",
"command",
"for",
"running",
"the",
"pipeline",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/abstractPipelineWrapper.py#L211-L218
|
[
"def",
"_create_pipeline_command",
"(",
"self",
",",
"args",
",",
"workdir_path",
",",
"config_path",
")",
":",
"return",
"(",
"[",
"self",
".",
"_name",
",",
"'run'",
",",
"os",
".",
"path",
".",
"join",
"(",
"workdir_path",
",",
"'jobStore'",
")",
",",
"'--config'",
",",
"config_path",
",",
"'--workDir'",
",",
"workdir_path",
",",
"'--retryCount'",
",",
"'1'",
"]",
"+",
"(",
"[",
"'--restart'",
"]",
"if",
"args",
".",
"restart",
"else",
"[",
"]",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
DatabaseConnection.setauth
|
setauth sets the authentication header for use in the session.
It is for use when apikey is updated or something of the sort, such that
there is a seamless experience.
|
connectordb/_connection.py
|
def setauth(self, user_or_apikey=None, user_password=None):
""" setauth sets the authentication header for use in the session.
It is for use when apikey is updated or something of the sort, such that
there is a seamless experience. """
auth = None
if user_or_apikey is not None:
# ConnectorDB allows login using both basic auth or an apikey url param.
# The python client uses basic auth for all logins
if user_password is None:
# Login by api key - the basic auth login uses "" user and
# apikey as password
user_password = user_or_apikey
user_or_apikey = ""
auth = HTTPBasicAuth(user_or_apikey, user_password)
self.r.auth = auth
# Set the websocket's authentication
self.ws.setauth(auth)
|
def setauth(self, user_or_apikey=None, user_password=None):
""" setauth sets the authentication header for use in the session.
It is for use when apikey is updated or something of the sort, such that
there is a seamless experience. """
auth = None
if user_or_apikey is not None:
# ConnectorDB allows login using both basic auth or an apikey url param.
# The python client uses basic auth for all logins
if user_password is None:
# Login by api key - the basic auth login uses "" user and
# apikey as password
user_password = user_or_apikey
user_or_apikey = ""
auth = HTTPBasicAuth(user_or_apikey, user_password)
self.r.auth = auth
# Set the websocket's authentication
self.ws.setauth(auth)
|
[
"setauth",
"sets",
"the",
"authentication",
"header",
"for",
"use",
"in",
"the",
"session",
".",
"It",
"is",
"for",
"use",
"when",
"apikey",
"is",
"updated",
"or",
"something",
"of",
"the",
"sort",
"such",
"that",
"there",
"is",
"a",
"seamless",
"experience",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connection.py#L68-L85
|
[
"def",
"setauth",
"(",
"self",
",",
"user_or_apikey",
"=",
"None",
",",
"user_password",
"=",
"None",
")",
":",
"auth",
"=",
"None",
"if",
"user_or_apikey",
"is",
"not",
"None",
":",
"# ConnectorDB allows login using both basic auth or an apikey url param.",
"# The python client uses basic auth for all logins",
"if",
"user_password",
"is",
"None",
":",
"# Login by api key - the basic auth login uses \"\" user and",
"# apikey as password",
"user_password",
"=",
"user_or_apikey",
"user_or_apikey",
"=",
"\"\"",
"auth",
"=",
"HTTPBasicAuth",
"(",
"user_or_apikey",
",",
"user_password",
")",
"self",
".",
"r",
".",
"auth",
"=",
"auth",
"# Set the websocket's authentication",
"self",
".",
"ws",
".",
"setauth",
"(",
"auth",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
DatabaseConnection.handleresult
|
Handles HTTP error codes for the given request
Raises:
AuthenticationError on the appropriate 4** errors
ServerError if the response is not an ok (2**)
Arguments:
r -- The request result
|
connectordb/_connection.py
|
def handleresult(self, r):
"""Handles HTTP error codes for the given request
Raises:
AuthenticationError on the appropriate 4** errors
ServerError if the response is not an ok (2**)
Arguments:
r -- The request result
"""
if r.status_code >= 400 and r.status_code < 500:
msg = r.json()
raise AuthenticationError(str(msg["code"]) + ": " + msg["msg"] +
" (" + msg["ref"] + ")")
elif r.status_code > 300:
err = None
try:
msg = r.json()
err = ServerError(str(msg["code"]) + ": " + msg["msg"] + " (" +
msg["ref"] + ")")
except:
raise ServerError(
"Server returned error, but did not give a valid error message")
raise err
return r
|
def handleresult(self, r):
"""Handles HTTP error codes for the given request
Raises:
AuthenticationError on the appropriate 4** errors
ServerError if the response is not an ok (2**)
Arguments:
r -- The request result
"""
if r.status_code >= 400 and r.status_code < 500:
msg = r.json()
raise AuthenticationError(str(msg["code"]) + ": " + msg["msg"] +
" (" + msg["ref"] + ")")
elif r.status_code > 300:
err = None
try:
msg = r.json()
err = ServerError(str(msg["code"]) + ": " + msg["msg"] + " (" +
msg["ref"] + ")")
except:
raise ServerError(
"Server returned error, but did not give a valid error message")
raise err
return r
|
[
"Handles",
"HTTP",
"error",
"codes",
"for",
"the",
"given",
"request"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connection.py#L91-L115
|
[
"def",
"handleresult",
"(",
"self",
",",
"r",
")",
":",
"if",
"r",
".",
"status_code",
">=",
"400",
"and",
"r",
".",
"status_code",
"<",
"500",
":",
"msg",
"=",
"r",
".",
"json",
"(",
")",
"raise",
"AuthenticationError",
"(",
"str",
"(",
"msg",
"[",
"\"code\"",
"]",
")",
"+",
"\": \"",
"+",
"msg",
"[",
"\"msg\"",
"]",
"+",
"\" (\"",
"+",
"msg",
"[",
"\"ref\"",
"]",
"+",
"\")\"",
")",
"elif",
"r",
".",
"status_code",
">",
"300",
":",
"err",
"=",
"None",
"try",
":",
"msg",
"=",
"r",
".",
"json",
"(",
")",
"err",
"=",
"ServerError",
"(",
"str",
"(",
"msg",
"[",
"\"code\"",
"]",
")",
"+",
"\": \"",
"+",
"msg",
"[",
"\"msg\"",
"]",
"+",
"\" (\"",
"+",
"msg",
"[",
"\"ref\"",
"]",
"+",
"\")\"",
")",
"except",
":",
"raise",
"ServerError",
"(",
"\"Server returned error, but did not give a valid error message\"",
")",
"raise",
"err",
"return",
"r"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
DatabaseConnection.ping
|
Attempts to ping the server using current credentials, and responds with the path of the currently
authenticated device
|
connectordb/_connection.py
|
def ping(self):
"""Attempts to ping the server using current credentials, and responds with the path of the currently
authenticated device"""
return self.handleresult(self.r.get(self.url,
params={"q": "this"})).text
|
def ping(self):
"""Attempts to ping the server using current credentials, and responds with the path of the currently
authenticated device"""
return self.handleresult(self.r.get(self.url,
params={"q": "this"})).text
|
[
"Attempts",
"to",
"ping",
"the",
"server",
"using",
"current",
"credentials",
"and",
"responds",
"with",
"the",
"path",
"of",
"the",
"currently",
"authenticated",
"device"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connection.py#L117-L121
|
[
"def",
"ping",
"(",
"self",
")",
":",
"return",
"self",
".",
"handleresult",
"(",
"self",
".",
"r",
".",
"get",
"(",
"self",
".",
"url",
",",
"params",
"=",
"{",
"\"q\"",
":",
"\"this\"",
"}",
")",
")",
".",
"text"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
DatabaseConnection.query
|
Run the given query on the connection (POST request to /query)
|
connectordb/_connection.py
|
def query(self, query_type, query=None):
"""Run the given query on the connection (POST request to /query)"""
return self.handleresult(self.r.post(urljoin(self.url + "query/",
query_type),
data=json.dumps(query))).json()
|
def query(self, query_type, query=None):
"""Run the given query on the connection (POST request to /query)"""
return self.handleresult(self.r.post(urljoin(self.url + "query/",
query_type),
data=json.dumps(query))).json()
|
[
"Run",
"the",
"given",
"query",
"on",
"the",
"connection",
"(",
"POST",
"request",
"to",
"/",
"query",
")"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connection.py#L123-L127
|
[
"def",
"query",
"(",
"self",
",",
"query_type",
",",
"query",
"=",
"None",
")",
":",
"return",
"self",
".",
"handleresult",
"(",
"self",
".",
"r",
".",
"post",
"(",
"urljoin",
"(",
"self",
".",
"url",
"+",
"\"query/\"",
",",
"query_type",
")",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"query",
")",
")",
")",
".",
"json",
"(",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
DatabaseConnection.create
|
Send a POST CRUD API request to the given path using the given data which will be converted
to json
|
connectordb/_connection.py
|
def create(self, path, data=None):
"""Send a POST CRUD API request to the given path using the given data which will be converted
to json"""
return self.handleresult(self.r.post(urljoin(self.url + CRUD_PATH,
path),
data=json.dumps(data)))
|
def create(self, path, data=None):
"""Send a POST CRUD API request to the given path using the given data which will be converted
to json"""
return self.handleresult(self.r.post(urljoin(self.url + CRUD_PATH,
path),
data=json.dumps(data)))
|
[
"Send",
"a",
"POST",
"CRUD",
"API",
"request",
"to",
"the",
"given",
"path",
"using",
"the",
"given",
"data",
"which",
"will",
"be",
"converted",
"to",
"json"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connection.py#L129-L134
|
[
"def",
"create",
"(",
"self",
",",
"path",
",",
"data",
"=",
"None",
")",
":",
"return",
"self",
".",
"handleresult",
"(",
"self",
".",
"r",
".",
"post",
"(",
"urljoin",
"(",
"self",
".",
"url",
"+",
"CRUD_PATH",
",",
"path",
")",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
")",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
DatabaseConnection.read
|
Read the result at the given path (GET) from the CRUD API, using the optional params dictionary
as url parameters.
|
connectordb/_connection.py
|
def read(self, path, params=None):
"""Read the result at the given path (GET) from the CRUD API, using the optional params dictionary
as url parameters."""
return self.handleresult(self.r.get(urljoin(self.url + CRUD_PATH,
path),
params=params))
|
def read(self, path, params=None):
"""Read the result at the given path (GET) from the CRUD API, using the optional params dictionary
as url parameters."""
return self.handleresult(self.r.get(urljoin(self.url + CRUD_PATH,
path),
params=params))
|
[
"Read",
"the",
"result",
"at",
"the",
"given",
"path",
"(",
"GET",
")",
"from",
"the",
"CRUD",
"API",
"using",
"the",
"optional",
"params",
"dictionary",
"as",
"url",
"parameters",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connection.py#L136-L141
|
[
"def",
"read",
"(",
"self",
",",
"path",
",",
"params",
"=",
"None",
")",
":",
"return",
"self",
".",
"handleresult",
"(",
"self",
".",
"r",
".",
"get",
"(",
"urljoin",
"(",
"self",
".",
"url",
"+",
"CRUD_PATH",
",",
"path",
")",
",",
"params",
"=",
"params",
")",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
DatabaseConnection.update
|
Send an update request to the given path of the CRUD API, with the given data dict, which will be converted
into json
|
connectordb/_connection.py
|
def update(self, path, data=None):
"""Send an update request to the given path of the CRUD API, with the given data dict, which will be converted
into json"""
return self.handleresult(self.r.put(urljoin(self.url + CRUD_PATH,
path),
data=json.dumps(data)))
|
def update(self, path, data=None):
"""Send an update request to the given path of the CRUD API, with the given data dict, which will be converted
into json"""
return self.handleresult(self.r.put(urljoin(self.url + CRUD_PATH,
path),
data=json.dumps(data)))
|
[
"Send",
"an",
"update",
"request",
"to",
"the",
"given",
"path",
"of",
"the",
"CRUD",
"API",
"with",
"the",
"given",
"data",
"dict",
"which",
"will",
"be",
"converted",
"into",
"json"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connection.py#L143-L148
|
[
"def",
"update",
"(",
"self",
",",
"path",
",",
"data",
"=",
"None",
")",
":",
"return",
"self",
".",
"handleresult",
"(",
"self",
".",
"r",
".",
"put",
"(",
"urljoin",
"(",
"self",
".",
"url",
"+",
"CRUD_PATH",
",",
"path",
")",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
")",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.