partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
DatabaseConnection.delete
|
Send a delete request to the given path of the CRUD API. This deletes the object. Or at least tries to.
|
connectordb/_connection.py
|
def delete(self, path):
"""Send a delete request to the given path of the CRUD API. This deletes the object. Or at least tries to."""
return self.handleresult(self.r.delete(urljoin(self.url + CRUD_PATH,
path)))
|
def delete(self, path):
"""Send a delete request to the given path of the CRUD API. This deletes the object. Or at least tries to."""
return self.handleresult(self.r.delete(urljoin(self.url + CRUD_PATH,
path)))
|
[
"Send",
"a",
"delete",
"request",
"to",
"the",
"given",
"path",
"of",
"the",
"CRUD",
"API",
".",
"This",
"deletes",
"the",
"object",
".",
"Or",
"at",
"least",
"tries",
"to",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connection.py#L150-L153
|
[
"def",
"delete",
"(",
"self",
",",
"path",
")",
":",
"return",
"self",
".",
"handleresult",
"(",
"self",
".",
"r",
".",
"delete",
"(",
"urljoin",
"(",
"self",
".",
"url",
"+",
"CRUD_PATH",
",",
"path",
")",
")",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
DatabaseConnection.subscribe
|
Subscribe to the given stream with the callback
|
connectordb/_connection.py
|
def subscribe(self, stream, callback, transform=""):
"""Subscribe to the given stream with the callback"""
return self.ws.subscribe(stream, callback, transform)
|
def subscribe(self, stream, callback, transform=""):
"""Subscribe to the given stream with the callback"""
return self.ws.subscribe(stream, callback, transform)
|
[
"Subscribe",
"to",
"the",
"given",
"stream",
"with",
"the",
"callback"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_connection.py#L160-L162
|
[
"def",
"subscribe",
"(",
"self",
",",
"stream",
",",
"callback",
",",
"transform",
"=",
"\"\"",
")",
":",
"return",
"self",
".",
"ws",
".",
"subscribe",
"(",
"stream",
",",
"callback",
",",
"transform",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
User.create
|
Creates the given user - using the passed in email and password.
You can also set other default properties by passing in the relevant information::
usr.create("my@email","mypass",description="I like trains.")
Furthermore, ConnectorDB permits immediate initialization of an entire user tree,
so that you can create all relevant devices and streams in one go::
usr.create("my@email","mypass",devices={
"device1": {
"nickname": "My train",
"streams": {
"stream1": {
"schema": "{\"type\":\"string\"}",
"datatype": "train.choochoo"
}
},
}
})
The user and meta devices are created by default. If you want to add streams to the user device,
use the "streams" option in place of devices in create.
|
connectordb/_user.py
|
def create(self, email, password, role="user", public=True, **kwargs):
"""Creates the given user - using the passed in email and password.
You can also set other default properties by passing in the relevant information::
usr.create("my@email","mypass",description="I like trains.")
Furthermore, ConnectorDB permits immediate initialization of an entire user tree,
so that you can create all relevant devices and streams in one go::
usr.create("my@email","mypass",devices={
"device1": {
"nickname": "My train",
"streams": {
"stream1": {
"schema": "{\"type\":\"string\"}",
"datatype": "train.choochoo"
}
},
}
})
The user and meta devices are created by default. If you want to add streams to the user device,
use the "streams" option in place of devices in create.
"""
kwargs["email"] = email
kwargs["password"] = password
kwargs["role"] = role
kwargs["public"] = public
self.metadata = self.db.create(
self.path, kwargs).json()
|
def create(self, email, password, role="user", public=True, **kwargs):
"""Creates the given user - using the passed in email and password.
You can also set other default properties by passing in the relevant information::
usr.create("my@email","mypass",description="I like trains.")
Furthermore, ConnectorDB permits immediate initialization of an entire user tree,
so that you can create all relevant devices and streams in one go::
usr.create("my@email","mypass",devices={
"device1": {
"nickname": "My train",
"streams": {
"stream1": {
"schema": "{\"type\":\"string\"}",
"datatype": "train.choochoo"
}
},
}
})
The user and meta devices are created by default. If you want to add streams to the user device,
use the "streams" option in place of devices in create.
"""
kwargs["email"] = email
kwargs["password"] = password
kwargs["role"] = role
kwargs["public"] = public
self.metadata = self.db.create(
self.path, kwargs).json()
|
[
"Creates",
"the",
"given",
"user",
"-",
"using",
"the",
"passed",
"in",
"email",
"and",
"password",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_user.py#L10-L40
|
[
"def",
"create",
"(",
"self",
",",
"email",
",",
"password",
",",
"role",
"=",
"\"user\"",
",",
"public",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"\"email\"",
"]",
"=",
"email",
"kwargs",
"[",
"\"password\"",
"]",
"=",
"password",
"kwargs",
"[",
"\"role\"",
"]",
"=",
"role",
"kwargs",
"[",
"\"public\"",
"]",
"=",
"public",
"self",
".",
"metadata",
"=",
"self",
".",
"db",
".",
"create",
"(",
"self",
".",
"path",
",",
"kwargs",
")",
".",
"json",
"(",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
User.devices
|
Returns the list of devices that belong to the user
|
connectordb/_user.py
|
def devices(self):
"""Returns the list of devices that belong to the user"""
result = self.db.read(self.path, {"q": "ls"})
if result is None or result.json() is None:
return []
devices = []
for d in result.json():
dev = self[d["name"]]
dev.metadata = d
devices.append(dev)
return devices
|
def devices(self):
"""Returns the list of devices that belong to the user"""
result = self.db.read(self.path, {"q": "ls"})
if result is None or result.json() is None:
return []
devices = []
for d in result.json():
dev = self[d["name"]]
dev.metadata = d
devices.append(dev)
return devices
|
[
"Returns",
"the",
"list",
"of",
"devices",
"that",
"belong",
"to",
"the",
"user"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_user.py#L46-L57
|
[
"def",
"devices",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"db",
".",
"read",
"(",
"self",
".",
"path",
",",
"{",
"\"q\"",
":",
"\"ls\"",
"}",
")",
"if",
"result",
"is",
"None",
"or",
"result",
".",
"json",
"(",
")",
"is",
"None",
":",
"return",
"[",
"]",
"devices",
"=",
"[",
"]",
"for",
"d",
"in",
"result",
".",
"json",
"(",
")",
":",
"dev",
"=",
"self",
"[",
"d",
"[",
"\"name\"",
"]",
"]",
"dev",
".",
"metadata",
"=",
"d",
"devices",
".",
"append",
"(",
"dev",
")",
"return",
"devices"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
User.streams
|
Returns the list of streams that belong to the user.
The list can optionally be filtered in 3 ways:
- public: when True, returns only streams belonging to public devices
- downlink: If True, returns only downlink streams
- visible: If True (default), returns only streams of visible devices
|
connectordb/_user.py
|
def streams(self, public=False, downlink=False, visible=True):
"""Returns the list of streams that belong to the user.
The list can optionally be filtered in 3 ways:
- public: when True, returns only streams belonging to public devices
- downlink: If True, returns only downlink streams
- visible: If True (default), returns only streams of visible devices
"""
result = self.db.read(self.path, {"q": "streams",
"public": str(public).lower(),
"downlink": str(downlink).lower(),
"visible": str(visible).lower()})
if result is None or result.json() is None:
return []
streams = []
for d in result.json():
s = self[d["device"]][d["name"]]
s.metadata = d
streams.append(s)
return streams
|
def streams(self, public=False, downlink=False, visible=True):
"""Returns the list of streams that belong to the user.
The list can optionally be filtered in 3 ways:
- public: when True, returns only streams belonging to public devices
- downlink: If True, returns only downlink streams
- visible: If True (default), returns only streams of visible devices
"""
result = self.db.read(self.path, {"q": "streams",
"public": str(public).lower(),
"downlink": str(downlink).lower(),
"visible": str(visible).lower()})
if result is None or result.json() is None:
return []
streams = []
for d in result.json():
s = self[d["device"]][d["name"]]
s.metadata = d
streams.append(s)
return streams
|
[
"Returns",
"the",
"list",
"of",
"streams",
"that",
"belong",
"to",
"the",
"user",
".",
"The",
"list",
"can",
"optionally",
"be",
"filtered",
"in",
"3",
"ways",
":",
"-",
"public",
":",
"when",
"True",
"returns",
"only",
"streams",
"belonging",
"to",
"public",
"devices",
"-",
"downlink",
":",
"If",
"True",
"returns",
"only",
"downlink",
"streams",
"-",
"visible",
":",
"If",
"True",
"(",
"default",
")",
"returns",
"only",
"streams",
"of",
"visible",
"devices"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_user.py#L59-L78
|
[
"def",
"streams",
"(",
"self",
",",
"public",
"=",
"False",
",",
"downlink",
"=",
"False",
",",
"visible",
"=",
"True",
")",
":",
"result",
"=",
"self",
".",
"db",
".",
"read",
"(",
"self",
".",
"path",
",",
"{",
"\"q\"",
":",
"\"streams\"",
",",
"\"public\"",
":",
"str",
"(",
"public",
")",
".",
"lower",
"(",
")",
",",
"\"downlink\"",
":",
"str",
"(",
"downlink",
")",
".",
"lower",
"(",
")",
",",
"\"visible\"",
":",
"str",
"(",
"visible",
")",
".",
"lower",
"(",
")",
"}",
")",
"if",
"result",
"is",
"None",
"or",
"result",
".",
"json",
"(",
")",
"is",
"None",
":",
"return",
"[",
"]",
"streams",
"=",
"[",
"]",
"for",
"d",
"in",
"result",
".",
"json",
"(",
")",
":",
"s",
"=",
"self",
"[",
"d",
"[",
"\"device\"",
"]",
"]",
"[",
"d",
"[",
"\"name\"",
"]",
"]",
"s",
".",
"metadata",
"=",
"d",
"streams",
".",
"append",
"(",
"s",
")",
"return",
"streams"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
User.export
|
Exports the ConnectorDB user into the given directory.
The resulting export can be imported by using the import command(cdb.import(directory)),
Note that Python cannot export passwords, since the REST API does
not expose password hashes. Therefore, the imported user will have
password same as username.
The user export function is different than device and stream exports because
it outputs a format compatible directly with connectorDB's import functionality:
connectordb import < mydatabase > <directory >
This also means that you can export multiple users into the same directory without issue
|
connectordb/_user.py
|
def export(self, directory):
"""Exports the ConnectorDB user into the given directory.
The resulting export can be imported by using the import command(cdb.import(directory)),
Note that Python cannot export passwords, since the REST API does
not expose password hashes. Therefore, the imported user will have
password same as username.
The user export function is different than device and stream exports because
it outputs a format compatible directly with connectorDB's import functionality:
connectordb import < mydatabase > <directory >
This also means that you can export multiple users into the same directory without issue
"""
exportInfoFile = os.path.join(directory, "connectordb.json")
if os.path.exists(directory):
# Ensure that there is an export there already, and it is version 1
if not os.path.exists(exportInfoFile):
raise FileExistsError(
"The export directory already exsits, and is not a ConnectorDB export.")
with open(exportInfoFile) as f:
exportInfo = json.load(f)
if exportInfo["Version"] != 1:
raise ValueError(
"Could not export to directory: incompatible export versions.")
else:
# The folder doesn't exist. Make it.
os.mkdir(directory)
with open(exportInfoFile, "w") as f:
json.dump(
{"Version": 1, "ConnectorDB": self.db.get("meta/version").text}, f)
# Now we create the user directory
udir = os.path.join(directory, self.name)
os.mkdir(udir)
# Write the user's info
with open(os.path.join(udir, "user.json"), "w") as f:
json.dump(self.data, f)
# Now export the devices one by one
for d in self.devices():
d.export(os.path.join(udir, d.name))
|
def export(self, directory):
"""Exports the ConnectorDB user into the given directory.
The resulting export can be imported by using the import command(cdb.import(directory)),
Note that Python cannot export passwords, since the REST API does
not expose password hashes. Therefore, the imported user will have
password same as username.
The user export function is different than device and stream exports because
it outputs a format compatible directly with connectorDB's import functionality:
connectordb import < mydatabase > <directory >
This also means that you can export multiple users into the same directory without issue
"""
exportInfoFile = os.path.join(directory, "connectordb.json")
if os.path.exists(directory):
# Ensure that there is an export there already, and it is version 1
if not os.path.exists(exportInfoFile):
raise FileExistsError(
"The export directory already exsits, and is not a ConnectorDB export.")
with open(exportInfoFile) as f:
exportInfo = json.load(f)
if exportInfo["Version"] != 1:
raise ValueError(
"Could not export to directory: incompatible export versions.")
else:
# The folder doesn't exist. Make it.
os.mkdir(directory)
with open(exportInfoFile, "w") as f:
json.dump(
{"Version": 1, "ConnectorDB": self.db.get("meta/version").text}, f)
# Now we create the user directory
udir = os.path.join(directory, self.name)
os.mkdir(udir)
# Write the user's info
with open(os.path.join(udir, "user.json"), "w") as f:
json.dump(self.data, f)
# Now export the devices one by one
for d in self.devices():
d.export(os.path.join(udir, d.name))
|
[
"Exports",
"the",
"ConnectorDB",
"user",
"into",
"the",
"given",
"directory",
".",
"The",
"resulting",
"export",
"can",
"be",
"imported",
"by",
"using",
"the",
"import",
"command",
"(",
"cdb",
".",
"import",
"(",
"directory",
"))"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_user.py#L88-L133
|
[
"def",
"export",
"(",
"self",
",",
"directory",
")",
":",
"exportInfoFile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"\"connectordb.json\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"directory",
")",
":",
"# Ensure that there is an export there already, and it is version 1",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"exportInfoFile",
")",
":",
"raise",
"FileExistsError",
"(",
"\"The export directory already exsits, and is not a ConnectorDB export.\"",
")",
"with",
"open",
"(",
"exportInfoFile",
")",
"as",
"f",
":",
"exportInfo",
"=",
"json",
".",
"load",
"(",
"f",
")",
"if",
"exportInfo",
"[",
"\"Version\"",
"]",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Could not export to directory: incompatible export versions.\"",
")",
"else",
":",
"# The folder doesn't exist. Make it.",
"os",
".",
"mkdir",
"(",
"directory",
")",
"with",
"open",
"(",
"exportInfoFile",
",",
"\"w\"",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"{",
"\"Version\"",
":",
"1",
",",
"\"ConnectorDB\"",
":",
"self",
".",
"db",
".",
"get",
"(",
"\"meta/version\"",
")",
".",
"text",
"}",
",",
"f",
")",
"# Now we create the user directory",
"udir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"self",
".",
"name",
")",
"os",
".",
"mkdir",
"(",
"udir",
")",
"# Write the user's info",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"udir",
",",
"\"user.json\"",
")",
",",
"\"w\"",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"self",
".",
"data",
",",
"f",
")",
"# Now export the devices one by one",
"for",
"d",
"in",
"self",
".",
"devices",
"(",
")",
":",
"d",
".",
"export",
"(",
"os",
".",
"path",
".",
"join",
"(",
"udir",
",",
"d",
".",
"name",
")",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
User.import_device
|
Imports a device from the given directory. You export the device
by using device.export()
There are two special cases: user and meta devices.
If the device name is meta, import_device will not do anything.
If the device name is "user", import_device will overwrite the user device
even if it exists already.
|
connectordb/_user.py
|
def import_device(self, directory):
"""Imports a device from the given directory. You export the device
by using device.export()
There are two special cases: user and meta devices.
If the device name is meta, import_device will not do anything.
If the device name is "user", import_device will overwrite the user device
even if it exists already.
"""
# read the device's info
with open(os.path.join(directory, "device.json"), "r") as f:
ddata = json.load(f)
d = self[ddata["name"]]
dname = ddata["name"]
del ddata["name"]
if dname == "meta":
return
elif dname == "user":
d.set(ddata)
elif d.exists():
raise ValueError("The device " + d.name + " already exists")
else:
d.create(**ddata)
# Now import all of the streams
for name in os.listdir(directory):
sdir = os.path.join(directory, name)
if os.path.isdir(sdir):
d.import_stream(sdir)
|
def import_device(self, directory):
"""Imports a device from the given directory. You export the device
by using device.export()
There are two special cases: user and meta devices.
If the device name is meta, import_device will not do anything.
If the device name is "user", import_device will overwrite the user device
even if it exists already.
"""
# read the device's info
with open(os.path.join(directory, "device.json"), "r") as f:
ddata = json.load(f)
d = self[ddata["name"]]
dname = ddata["name"]
del ddata["name"]
if dname == "meta":
return
elif dname == "user":
d.set(ddata)
elif d.exists():
raise ValueError("The device " + d.name + " already exists")
else:
d.create(**ddata)
# Now import all of the streams
for name in os.listdir(directory):
sdir = os.path.join(directory, name)
if os.path.isdir(sdir):
d.import_stream(sdir)
|
[
"Imports",
"a",
"device",
"from",
"the",
"given",
"directory",
".",
"You",
"export",
"the",
"device",
"by",
"using",
"device",
".",
"export",
"()"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_user.py#L135-L167
|
[
"def",
"import_device",
"(",
"self",
",",
"directory",
")",
":",
"# read the device's info",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"\"device.json\"",
")",
",",
"\"r\"",
")",
"as",
"f",
":",
"ddata",
"=",
"json",
".",
"load",
"(",
"f",
")",
"d",
"=",
"self",
"[",
"ddata",
"[",
"\"name\"",
"]",
"]",
"dname",
"=",
"ddata",
"[",
"\"name\"",
"]",
"del",
"ddata",
"[",
"\"name\"",
"]",
"if",
"dname",
"==",
"\"meta\"",
":",
"return",
"elif",
"dname",
"==",
"\"user\"",
":",
"d",
".",
"set",
"(",
"ddata",
")",
"elif",
"d",
".",
"exists",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"The device \"",
"+",
"d",
".",
"name",
"+",
"\" already exists\"",
")",
"else",
":",
"d",
".",
"create",
"(",
"*",
"*",
"ddata",
")",
"# Now import all of the streams",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"directory",
")",
":",
"sdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"name",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"sdir",
")",
":",
"d",
".",
"import_stream",
"(",
"sdir",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
run_cutadapt
|
Adapter trimming for RNA-seq data
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq read 1
:param str r2_id: FileStoreID of fastq read 2 (if paired data)
:param str fwd_3pr_adapter: Adapter sequence for the forward 3' adapter
:param str rev_3pr_adapter: Adapter sequence for the reverse 3' adapter (second fastq pair)
:return: R1 and R2 FileStoreIDs
:rtype: tuple
|
src/toil_lib/tools/preprocessing.py
|
def run_cutadapt(job, r1_id, r2_id, fwd_3pr_adapter, rev_3pr_adapter):
"""
Adapter trimming for RNA-seq data
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq read 1
:param str r2_id: FileStoreID of fastq read 2 (if paired data)
:param str fwd_3pr_adapter: Adapter sequence for the forward 3' adapter
:param str rev_3pr_adapter: Adapter sequence for the reverse 3' adapter (second fastq pair)
:return: R1 and R2 FileStoreIDs
:rtype: tuple
"""
work_dir = job.fileStore.getLocalTempDir()
if r2_id:
require(rev_3pr_adapter, "Paired end data requires a reverse 3' adapter sequence.")
# Retrieve files
parameters = ['-a', fwd_3pr_adapter,
'-m', '35']
if r1_id and r2_id:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['-A', rev_3pr_adapter,
'-o', '/data/R1_cutadapt.fastq',
'-p', '/data/R2_cutadapt.fastq',
'/data/R1.fastq', '/data/R2.fastq'])
else:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters.extend(['-o', '/data/R1_cutadapt.fastq', '/data/R1.fastq'])
# Call: CutAdapt
dockerCall(job=job, tool='quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2',
workDir=work_dir, parameters=parameters)
# Write to fileStore
if r1_id and r2_id:
r1_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq'))
r2_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2_cutadapt.fastq'))
else:
r1_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq'))
r2_cut_id = None
return r1_cut_id, r2_cut_id
|
def run_cutadapt(job, r1_id, r2_id, fwd_3pr_adapter, rev_3pr_adapter):
"""
Adapter trimming for RNA-seq data
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq read 1
:param str r2_id: FileStoreID of fastq read 2 (if paired data)
:param str fwd_3pr_adapter: Adapter sequence for the forward 3' adapter
:param str rev_3pr_adapter: Adapter sequence for the reverse 3' adapter (second fastq pair)
:return: R1 and R2 FileStoreIDs
:rtype: tuple
"""
work_dir = job.fileStore.getLocalTempDir()
if r2_id:
require(rev_3pr_adapter, "Paired end data requires a reverse 3' adapter sequence.")
# Retrieve files
parameters = ['-a', fwd_3pr_adapter,
'-m', '35']
if r1_id and r2_id:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['-A', rev_3pr_adapter,
'-o', '/data/R1_cutadapt.fastq',
'-p', '/data/R2_cutadapt.fastq',
'/data/R1.fastq', '/data/R2.fastq'])
else:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters.extend(['-o', '/data/R1_cutadapt.fastq', '/data/R1.fastq'])
# Call: CutAdapt
dockerCall(job=job, tool='quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2',
workDir=work_dir, parameters=parameters)
# Write to fileStore
if r1_id and r2_id:
r1_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq'))
r2_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2_cutadapt.fastq'))
else:
r1_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq'))
r2_cut_id = None
return r1_cut_id, r2_cut_id
|
[
"Adapter",
"trimming",
"for",
"RNA",
"-",
"seq",
"data"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/preprocessing.py#L10-L48
|
[
"def",
"run_cutadapt",
"(",
"job",
",",
"r1_id",
",",
"r2_id",
",",
"fwd_3pr_adapter",
",",
"rev_3pr_adapter",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"if",
"r2_id",
":",
"require",
"(",
"rev_3pr_adapter",
",",
"\"Paired end data requires a reverse 3' adapter sequence.\"",
")",
"# Retrieve files",
"parameters",
"=",
"[",
"'-a'",
",",
"fwd_3pr_adapter",
",",
"'-m'",
",",
"'35'",
"]",
"if",
"r1_id",
"and",
"r2_id",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"r1_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'R1.fastq'",
")",
")",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"r2_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'R2.fastq'",
")",
")",
"parameters",
".",
"extend",
"(",
"[",
"'-A'",
",",
"rev_3pr_adapter",
",",
"'-o'",
",",
"'/data/R1_cutadapt.fastq'",
",",
"'-p'",
",",
"'/data/R2_cutadapt.fastq'",
",",
"'/data/R1.fastq'",
",",
"'/data/R2.fastq'",
"]",
")",
"else",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"r1_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'R1.fastq'",
")",
")",
"parameters",
".",
"extend",
"(",
"[",
"'-o'",
",",
"'/data/R1_cutadapt.fastq'",
",",
"'/data/R1.fastq'",
"]",
")",
"# Call: CutAdapt",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2'",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"parameters",
")",
"# Write to fileStore",
"if",
"r1_id",
"and",
"r2_id",
":",
"r1_cut_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'R1_cutadapt.fastq'",
")",
")",
"r2_cut_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'R2_cutadapt.fastq'",
")",
")",
"else",
":",
"r1_cut_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'R1_cutadapt.fastq'",
")",
")",
"r2_cut_id",
"=",
"None",
"return",
"r1_cut_id",
",",
"r2_cut_id"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
run_samtools_faidx
|
Use SAMtools to create reference index file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str ref_id: FileStoreID for the reference genome
:return: FileStoreID for reference index
:rtype: str
|
src/toil_lib/tools/preprocessing.py
|
def run_samtools_faidx(job, ref_id):
"""
Use SAMtools to create reference index file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str ref_id: FileStoreID for the reference genome
:return: FileStoreID for reference index
:rtype: str
"""
job.fileStore.logToMaster('Created reference index')
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fasta'))
command = ['faidx', 'ref.fasta']
dockerCall(job=job, workDir=work_dir, parameters=command,
tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e')
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'ref.fasta.fai'))
|
def run_samtools_faidx(job, ref_id):
"""
Use SAMtools to create reference index file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str ref_id: FileStoreID for the reference genome
:return: FileStoreID for reference index
:rtype: str
"""
job.fileStore.logToMaster('Created reference index')
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fasta'))
command = ['faidx', 'ref.fasta']
dockerCall(job=job, workDir=work_dir, parameters=command,
tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e')
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'ref.fasta.fai'))
|
[
"Use",
"SAMtools",
"to",
"create",
"reference",
"index",
"file"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/preprocessing.py#L51-L66
|
[
"def",
"run_samtools_faidx",
"(",
"job",
",",
"ref_id",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Created reference index'",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"ref_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'ref.fasta'",
")",
")",
"command",
"=",
"[",
"'faidx'",
",",
"'ref.fasta'",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"command",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e'",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'ref.fasta.fai'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
run_samtools_index
|
Runs SAMtools index to create a BAM index file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID of the BAM file
:return: FileStoreID for BAM index file
:rtype: str
|
src/toil_lib/tools/preprocessing.py
|
def run_samtools_index(job, bam):
"""
Runs SAMtools index to create a BAM index file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID of the BAM file
:return: FileStoreID for BAM index file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'sample.bam'))
# Call: index the bam
parameters = ['index', '/data/sample.bam']
dockerCall(job=job, workDir=work_dir, parameters=parameters,
tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e')
# Write to fileStore
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'sample.bam.bai'))
|
def run_samtools_index(job, bam):
"""
Runs SAMtools index to create a BAM index file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID of the BAM file
:return: FileStoreID for BAM index file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'sample.bam'))
# Call: index the bam
parameters = ['index', '/data/sample.bam']
dockerCall(job=job, workDir=work_dir, parameters=parameters,
tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e')
# Write to fileStore
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'sample.bam.bai'))
|
[
"Runs",
"SAMtools",
"index",
"to",
"create",
"a",
"BAM",
"index",
"file"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/preprocessing.py#L69-L85
|
[
"def",
"run_samtools_index",
"(",
"job",
",",
"bam",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"bam",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'sample.bam'",
")",
")",
"# Call: index the bam",
"parameters",
"=",
"[",
"'index'",
",",
"'/data/sample.bam'",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"parameters",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e'",
")",
"# Write to fileStore",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'sample.bam.bai'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
run_sambamba_markdup
|
Marks reads as PCR duplicates using Sambamba
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:return: FileStoreID for sorted BAM file
:rtype: str
|
src/toil_lib/tools/preprocessing.py
|
def run_sambamba_markdup(job, bam):
"""
Marks reads as PCR duplicates using Sambamba
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:return: FileStoreID for sorted BAM file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'input.bam'))
command = ['/usr/local/bin/sambamba',
'markdup',
'-t', str(int(job.cores)),
'/data/input.bam',
'/data/output.bam']
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/biocontainers/sambamba:0.6.6--0')
end_time = time.time()
_log_runtime(job, start_time, end_time, "sambamba mkdup")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.bam'))
|
def run_sambamba_markdup(job, bam):
"""
Marks reads as PCR duplicates using Sambamba
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:return: FileStoreID for sorted BAM file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'input.bam'))
command = ['/usr/local/bin/sambamba',
'markdup',
'-t', str(int(job.cores)),
'/data/input.bam',
'/data/output.bam']
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/biocontainers/sambamba:0.6.6--0')
end_time = time.time()
_log_runtime(job, start_time, end_time, "sambamba mkdup")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.bam'))
|
[
"Marks",
"reads",
"as",
"PCR",
"duplicates",
"using",
"Sambamba"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/preprocessing.py#L227-L250
|
[
"def",
"run_sambamba_markdup",
"(",
"job",
",",
"bam",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"bam",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'input.bam'",
")",
")",
"command",
"=",
"[",
"'/usr/local/bin/sambamba'",
",",
"'markdup'",
",",
"'-t'",
",",
"str",
"(",
"int",
"(",
"job",
".",
"cores",
")",
")",
",",
"'/data/input.bam'",
",",
"'/data/output.bam'",
"]",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"command",
",",
"tool",
"=",
"'quay.io/biocontainers/sambamba:0.6.6--0'",
")",
"end_time",
"=",
"time",
".",
"time",
"(",
")",
"_log_runtime",
"(",
"job",
",",
"start_time",
",",
"end_time",
",",
"\"sambamba mkdup\"",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'output.bam'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
run_samblaster
|
Marks reads as PCR duplicates using SAMBLASTER
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str sam: FileStoreID for SAM file
:return: FileStoreID for deduped SAM file
:rtype: str
|
src/toil_lib/tools/preprocessing.py
|
def run_samblaster(job, sam):
"""
Marks reads as PCR duplicates using SAMBLASTER
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str sam: FileStoreID for SAM file
:return: FileStoreID for deduped SAM file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(sam, os.path.join(work_dir, 'input.sam'))
command = ['/usr/local/bin/samblaster',
'-i', '/data/input.sam',
'-o', '/data/output.sam',
'--ignoreUnmated']
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/biocontainers/samblaster:0.1.24--0')
end_time = time.time()
_log_runtime(job, start_time, end_time, "SAMBLASTER")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.sam'))
|
def run_samblaster(job, sam):
"""
Marks reads as PCR duplicates using SAMBLASTER
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str sam: FileStoreID for SAM file
:return: FileStoreID for deduped SAM file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(sam, os.path.join(work_dir, 'input.sam'))
command = ['/usr/local/bin/samblaster',
'-i', '/data/input.sam',
'-o', '/data/output.sam',
'--ignoreUnmated']
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/biocontainers/samblaster:0.1.24--0')
end_time = time.time()
_log_runtime(job, start_time, end_time, "SAMBLASTER")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.sam'))
|
[
"Marks",
"reads",
"as",
"PCR",
"duplicates",
"using",
"SAMBLASTER"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/preprocessing.py#L253-L275
|
[
"def",
"run_samblaster",
"(",
"job",
",",
"sam",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"sam",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'input.sam'",
")",
")",
"command",
"=",
"[",
"'/usr/local/bin/samblaster'",
",",
"'-i'",
",",
"'/data/input.sam'",
",",
"'-o'",
",",
"'/data/output.sam'",
",",
"'--ignoreUnmated'",
"]",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"command",
",",
"tool",
"=",
"'quay.io/biocontainers/samblaster:0.1.24--0'",
")",
"end_time",
"=",
"time",
".",
"time",
"(",
")",
"_log_runtime",
"(",
"job",
",",
"start_time",
",",
"end_time",
",",
"\"SAMBLASTER\"",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'output.sam'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
picard_mark_duplicates
|
Runs Picard MarkDuplicates on a BAM file. Requires that the BAM file be coordinate sorted.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str validation_stringency: BAM file validation stringency, default is LENIENT
:return: FileStoreIDs for BAM and BAI files
:rtype: tuple
|
src/toil_lib/tools/preprocessing.py
|
def picard_mark_duplicates(job, bam, bai, validation_stringency='LENIENT'):
"""
Runs Picard MarkDuplicates on a BAM file. Requires that the BAM file be coordinate sorted.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str validation_stringency: BAM file validation stringency, default is LENIENT
:return: FileStoreIDs for BAM and BAI files
:rtype: tuple
"""
work_dir = job.fileStore.getLocalTempDir()
# Retrieve file path
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'sorted.bam'))
job.fileStore.readGlobalFile(bai, os.path.join(work_dir, 'sorted.bai'))
# Call: picardtools
command = ['MarkDuplicates',
'INPUT=sorted.bam',
'OUTPUT=mkdups.bam',
'METRICS_FILE=metrics.txt',
'ASSUME_SORTED=true',
'CREATE_INDEX=true',
'VALIDATION_STRINGENCY=%s' % validation_stringency.upper()]
# picard-tools container doesn't have JAVA_OPTS variable
# Set TMPDIR to /data to prevent writing temporary files to /tmp
docker_parameters = ['--rm',
'--log-driver', 'none',
'-e', 'JAVA_OPTIONS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory),
'-v', '{}:/data'.format(work_dir)]
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e',
dockerParameters=docker_parameters)
end_time = time.time()
_log_runtime(job, start_time, end_time, "Picard MarkDuplicates")
bam = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'mkdups.bam'))
bai = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'mkdups.bai'))
return bam, bai
|
def picard_mark_duplicates(job, bam, bai, validation_stringency='LENIENT'):
"""
Runs Picard MarkDuplicates on a BAM file. Requires that the BAM file be coordinate sorted.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str validation_stringency: BAM file validation stringency, default is LENIENT
:return: FileStoreIDs for BAM and BAI files
:rtype: tuple
"""
work_dir = job.fileStore.getLocalTempDir()
# Retrieve file path
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'sorted.bam'))
job.fileStore.readGlobalFile(bai, os.path.join(work_dir, 'sorted.bai'))
# Call: picardtools
command = ['MarkDuplicates',
'INPUT=sorted.bam',
'OUTPUT=mkdups.bam',
'METRICS_FILE=metrics.txt',
'ASSUME_SORTED=true',
'CREATE_INDEX=true',
'VALIDATION_STRINGENCY=%s' % validation_stringency.upper()]
# picard-tools container doesn't have JAVA_OPTS variable
# Set TMPDIR to /data to prevent writing temporary files to /tmp
docker_parameters = ['--rm',
'--log-driver', 'none',
'-e', 'JAVA_OPTIONS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory),
'-v', '{}:/data'.format(work_dir)]
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e',
dockerParameters=docker_parameters)
end_time = time.time()
_log_runtime(job, start_time, end_time, "Picard MarkDuplicates")
bam = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'mkdups.bam'))
bai = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'mkdups.bai'))
return bam, bai
|
[
"Runs",
"Picard",
"MarkDuplicates",
"on",
"a",
"BAM",
"file",
".",
"Requires",
"that",
"the",
"BAM",
"file",
"be",
"coordinate",
"sorted",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/preprocessing.py#L297-L340
|
[
"def",
"picard_mark_duplicates",
"(",
"job",
",",
"bam",
",",
"bai",
",",
"validation_stringency",
"=",
"'LENIENT'",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"# Retrieve file path",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"bam",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'sorted.bam'",
")",
")",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"bai",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'sorted.bai'",
")",
")",
"# Call: picardtools",
"command",
"=",
"[",
"'MarkDuplicates'",
",",
"'INPUT=sorted.bam'",
",",
"'OUTPUT=mkdups.bam'",
",",
"'METRICS_FILE=metrics.txt'",
",",
"'ASSUME_SORTED=true'",
",",
"'CREATE_INDEX=true'",
",",
"'VALIDATION_STRINGENCY=%s'",
"%",
"validation_stringency",
".",
"upper",
"(",
")",
"]",
"# picard-tools container doesn't have JAVA_OPTS variable",
"# Set TMPDIR to /data to prevent writing temporary files to /tmp",
"docker_parameters",
"=",
"[",
"'--rm'",
",",
"'--log-driver'",
",",
"'none'",
",",
"'-e'",
",",
"'JAVA_OPTIONS=-Djava.io.tmpdir=/data/ -Xmx{}'",
".",
"format",
"(",
"job",
".",
"memory",
")",
",",
"'-v'",
",",
"'{}:/data'",
".",
"format",
"(",
"work_dir",
")",
"]",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"command",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e'",
",",
"dockerParameters",
"=",
"docker_parameters",
")",
"end_time",
"=",
"time",
".",
"time",
"(",
")",
"_log_runtime",
"(",
"job",
",",
"start_time",
",",
"end_time",
",",
"\"Picard MarkDuplicates\"",
")",
"bam",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'mkdups.bam'",
")",
")",
"bai",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'mkdups.bai'",
")",
")",
"return",
"bam",
",",
"bai"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
run_picard_sort
|
Sorts BAM file using Picard SortSam
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param boolean sort_by_name: If true, sorts by read name instead of coordinate.
:return: FileStoreID for sorted BAM file
:rtype: str
|
src/toil_lib/tools/preprocessing.py
|
def run_picard_sort(job, bam, sort_by_name=False):
"""
Sorts BAM file using Picard SortSam
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param boolean sort_by_name: If true, sorts by read name instead of coordinate.
:return: FileStoreID for sorted BAM file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'input.bam'))
command = ['SortSam',
'O=/data/output.bam',
'I=/data/input.bam']
# picard-tools container doesn't have JAVA_OPTS variable
# Set TMPDIR to /data to prevent writing temporary files to /tmp
docker_parameters = ['--rm',
'--log-driver', 'none',
'-e', 'JAVA_OPTIONS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory),
'-v', '{}:/data'.format(work_dir)]
if sort_by_name:
command.append('SO=queryname')
else:
command.append('SO=coordinate')
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e',
dockerParameters=docker_parameters)
end_time = time.time()
_log_runtime(job, start_time, end_time, "Picard SortSam")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.bam'))
|
def run_picard_sort(job, bam, sort_by_name=False):
"""
Sorts BAM file using Picard SortSam
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param boolean sort_by_name: If true, sorts by read name instead of coordinate.
:return: FileStoreID for sorted BAM file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'input.bam'))
command = ['SortSam',
'O=/data/output.bam',
'I=/data/input.bam']
# picard-tools container doesn't have JAVA_OPTS variable
# Set TMPDIR to /data to prevent writing temporary files to /tmp
docker_parameters = ['--rm',
'--log-driver', 'none',
'-e', 'JAVA_OPTIONS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory),
'-v', '{}:/data'.format(work_dir)]
if sort_by_name:
command.append('SO=queryname')
else:
command.append('SO=coordinate')
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e',
dockerParameters=docker_parameters)
end_time = time.time()
_log_runtime(job, start_time, end_time, "Picard SortSam")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.bam'))
|
[
"Sorts",
"BAM",
"file",
"using",
"Picard",
"SortSam"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/preprocessing.py#L343-L378
|
[
"def",
"run_picard_sort",
"(",
"job",
",",
"bam",
",",
"sort_by_name",
"=",
"False",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"bam",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'input.bam'",
")",
")",
"command",
"=",
"[",
"'SortSam'",
",",
"'O=/data/output.bam'",
",",
"'I=/data/input.bam'",
"]",
"# picard-tools container doesn't have JAVA_OPTS variable",
"# Set TMPDIR to /data to prevent writing temporary files to /tmp",
"docker_parameters",
"=",
"[",
"'--rm'",
",",
"'--log-driver'",
",",
"'none'",
",",
"'-e'",
",",
"'JAVA_OPTIONS=-Djava.io.tmpdir=/data/ -Xmx{}'",
".",
"format",
"(",
"job",
".",
"memory",
")",
",",
"'-v'",
",",
"'{}:/data'",
".",
"format",
"(",
"work_dir",
")",
"]",
"if",
"sort_by_name",
":",
"command",
".",
"append",
"(",
"'SO=queryname'",
")",
"else",
":",
"command",
".",
"append",
"(",
"'SO=coordinate'",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"command",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e'",
",",
"dockerParameters",
"=",
"docker_parameters",
")",
"end_time",
"=",
"time",
".",
"time",
"(",
")",
"_log_runtime",
"(",
"job",
",",
"start_time",
",",
"end_time",
",",
"\"Picard SortSam\"",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'output.bam'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
run_gatk_preprocessing
|
GATK Preprocessing Pipeline
0: Mark duplicates
1: Create INDEL realignment intervals
2: Realign INDELs
3: Recalibrate base quality scores
4: Apply base score recalibration
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str ref: FileStoreID for reference genome fasta file
:param str ref_dict: FileStoreID for reference sequence dictionary file
:param str fai: FileStoreID for reference fasta index file
:param str g1k: FileStoreID for 1000 Genomes VCF file
:param str mills: FileStoreID for Mills VCF file
:param str dbsnp: FileStoreID for dbSNP VCF file
:param bool realign: If True, then runs GATK INDEL realignment"
:param bool unsafe: If True, runs GATK tools in UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreIDs for BAM and BAI files
:rtype: tuple(str, str)
|
src/toil_lib/tools/preprocessing.py
|
def run_gatk_preprocessing(job, bam, bai, ref, ref_dict, fai, g1k, mills, dbsnp, realign=False, unsafe=False):
"""
GATK Preprocessing Pipeline
0: Mark duplicates
1: Create INDEL realignment intervals
2: Realign INDELs
3: Recalibrate base quality scores
4: Apply base score recalibration
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str ref: FileStoreID for reference genome fasta file
:param str ref_dict: FileStoreID for reference sequence dictionary file
:param str fai: FileStoreID for reference fasta index file
:param str g1k: FileStoreID for 1000 Genomes VCF file
:param str mills: FileStoreID for Mills VCF file
:param str dbsnp: FileStoreID for dbSNP VCF file
:param bool realign: If True, then runs GATK INDEL realignment"
:param bool unsafe: If True, runs GATK tools in UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreIDs for BAM and BAI files
:rtype: tuple(str, str)
"""
# The MarkDuplicates disk requirement depends on the input BAM and BAI files and the output
# BAM and BAI files. The output BAM file is approximately the same size as the input BAM file.
mdups_disk = PromisedRequirement(lambda bam_, bai_: 2 * (bam_.size + bai_.size), bam, bai)
mdups = job.wrapJobFn(picard_mark_duplicates,
bam,
bai,
cores=job.cores,
disk=mdups_disk,
memory=job.memory)
# Store input for BQSR
bqsr_input_bam = mdups.rv(0)
bqsr_input_bai = mdups.rv(1)
# Get genome reference file sizes for calculating disk requirements
genome_ref_size = ref.size + ref_dict.size + fai.size
if realign:
# Get INDEL resource file sizes and genome reference file sizes
indel_ref_size = mills.size + g1k.size + genome_ref_size
# The RealignerTargetCreator disk requirement depends on the input BAM/BAI files, the genome reference files,
# and the output intervals file. The intervals file size is less than the reference file size, so estimate the
# interval file size as the reference file size.
realigner_target_disk = PromisedRequirement(lambda bam_, bai_, ref_size:
bam_.size + bai_.size + 2 * ref_size,
mdups.rv(0),
mdups.rv(1),
indel_ref_size)
realigner_target = job.wrapJobFn(run_realigner_target_creator,
mdups.rv(0),
mdups.rv(1),
ref, ref_dict, fai,
g1k, mills,
unsafe=unsafe,
cores=1, # RealignerTargetCreator is single threaded
disk=realigner_target_disk,
memory=job.memory)
# The INDEL realignment disk requirement depends on the input BAM and BAI files, the intervals
# file, the variant resource files, and the output BAM and BAI files. Here, we assume the
# output BAM and BAI files are approximately the same size as the input BAM and BAI files.
indel_realign_disk = PromisedRequirement(lambda bam_, bai_, intervals, ref_size:
2 * (bam_.size + bai_.size) + intervals.size + ref_size,
mdups.rv(0),
mdups.rv(1),
realigner_target.rv(),
indel_ref_size)
indel_realign = job.wrapJobFn(run_indel_realignment,
realigner_target.rv(),
mdups.rv(0),
mdups.rv(1),
ref, ref_dict, fai,
g1k, mills,
unsafe=unsafe,
cores=1, # IndelRealigner is single threaded
disk=indel_realign_disk,
memory=job.memory)
mdups.addChild(realigner_target)
realigner_target.addChild(indel_realign)
# Update input for BQSR using the realigned BAM files
bqsr_input_bam = indel_realign.rv(0)
bqsr_input_bai = indel_realign.rv(1)
# Get size of BQSR databases and genome reference files
bqsr_ref_size = dbsnp.size + mills.size + genome_ref_size
# The BQSR disk requirement depends on the input BAM and BAI files, the reference files, and the output
# recalibration table file. The recalibration table file size is less than the reference file sizes, so use
# the reference file sizes to estimate the recalibration table file size.
base_recal_disk = PromisedRequirement(lambda bam_, bai_, ref_size:
bam_.size + bai_.size + 2 * ref_size,
bqsr_input_bam,
bqsr_input_bai,
bqsr_ref_size)
base_recal = job.wrapJobFn(run_base_recalibration,
bqsr_input_bam,
bqsr_input_bai,
ref, ref_dict, fai,
dbsnp, mills,
unsafe=unsafe,
cores=job.cores,
disk=base_recal_disk,
memory=job.memory)
# The PrintReads disk requirement depends on the input BAM and BAI files, the recalibration table file, the
# genome reference files, and the output BAM and BAI files. The output BAM and BAI files are approximately the
# same size as the input BAM and BAI files.
recalibrate_reads_disk = PromisedRequirement(lambda bam_, bai_, recal, ref_size:
2 * (bam_.size + bai_.size) + recal.size + ref_size,
bqsr_input_bam,
bqsr_input_bai,
base_recal.rv(),
genome_ref_size)
recalibrate_reads = job.wrapJobFn(apply_bqsr_recalibration,
base_recal.rv(),
bqsr_input_bam,
bqsr_input_bai,
ref, ref_dict, fai,
unsafe=unsafe,
cores=job.cores,
disk=recalibrate_reads_disk,
memory=job.memory)
job.addChild(mdups)
mdups.addFollowOn(base_recal)
base_recal.addChild(recalibrate_reads)
return recalibrate_reads.rv(0), recalibrate_reads.rv(1)
|
def run_gatk_preprocessing(job, bam, bai, ref, ref_dict, fai, g1k, mills, dbsnp, realign=False, unsafe=False):
"""
GATK Preprocessing Pipeline
0: Mark duplicates
1: Create INDEL realignment intervals
2: Realign INDELs
3: Recalibrate base quality scores
4: Apply base score recalibration
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str ref: FileStoreID for reference genome fasta file
:param str ref_dict: FileStoreID for reference sequence dictionary file
:param str fai: FileStoreID for reference fasta index file
:param str g1k: FileStoreID for 1000 Genomes VCF file
:param str mills: FileStoreID for Mills VCF file
:param str dbsnp: FileStoreID for dbSNP VCF file
:param bool realign: If True, then runs GATK INDEL realignment"
:param bool unsafe: If True, runs GATK tools in UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreIDs for BAM and BAI files
:rtype: tuple(str, str)
"""
# The MarkDuplicates disk requirement depends on the input BAM and BAI files and the output
# BAM and BAI files. The output BAM file is approximately the same size as the input BAM file.
mdups_disk = PromisedRequirement(lambda bam_, bai_: 2 * (bam_.size + bai_.size), bam, bai)
mdups = job.wrapJobFn(picard_mark_duplicates,
bam,
bai,
cores=job.cores,
disk=mdups_disk,
memory=job.memory)
# Store input for BQSR
bqsr_input_bam = mdups.rv(0)
bqsr_input_bai = mdups.rv(1)
# Get genome reference file sizes for calculating disk requirements
genome_ref_size = ref.size + ref_dict.size + fai.size
if realign:
# Get INDEL resource file sizes and genome reference file sizes
indel_ref_size = mills.size + g1k.size + genome_ref_size
# The RealignerTargetCreator disk requirement depends on the input BAM/BAI files, the genome reference files,
# and the output intervals file. The intervals file size is less than the reference file size, so estimate the
# interval file size as the reference file size.
realigner_target_disk = PromisedRequirement(lambda bam_, bai_, ref_size:
bam_.size + bai_.size + 2 * ref_size,
mdups.rv(0),
mdups.rv(1),
indel_ref_size)
realigner_target = job.wrapJobFn(run_realigner_target_creator,
mdups.rv(0),
mdups.rv(1),
ref, ref_dict, fai,
g1k, mills,
unsafe=unsafe,
cores=1, # RealignerTargetCreator is single threaded
disk=realigner_target_disk,
memory=job.memory)
# The INDEL realignment disk requirement depends on the input BAM and BAI files, the intervals
# file, the variant resource files, and the output BAM and BAI files. Here, we assume the
# output BAM and BAI files are approximately the same size as the input BAM and BAI files.
indel_realign_disk = PromisedRequirement(lambda bam_, bai_, intervals, ref_size:
2 * (bam_.size + bai_.size) + intervals.size + ref_size,
mdups.rv(0),
mdups.rv(1),
realigner_target.rv(),
indel_ref_size)
indel_realign = job.wrapJobFn(run_indel_realignment,
realigner_target.rv(),
mdups.rv(0),
mdups.rv(1),
ref, ref_dict, fai,
g1k, mills,
unsafe=unsafe,
cores=1, # IndelRealigner is single threaded
disk=indel_realign_disk,
memory=job.memory)
mdups.addChild(realigner_target)
realigner_target.addChild(indel_realign)
# Update input for BQSR using the realigned BAM files
bqsr_input_bam = indel_realign.rv(0)
bqsr_input_bai = indel_realign.rv(1)
# Get size of BQSR databases and genome reference files
bqsr_ref_size = dbsnp.size + mills.size + genome_ref_size
# The BQSR disk requirement depends on the input BAM and BAI files, the reference files, and the output
# recalibration table file. The recalibration table file size is less than the reference file sizes, so use
# the reference file sizes to estimate the recalibration table file size.
base_recal_disk = PromisedRequirement(lambda bam_, bai_, ref_size:
bam_.size + bai_.size + 2 * ref_size,
bqsr_input_bam,
bqsr_input_bai,
bqsr_ref_size)
base_recal = job.wrapJobFn(run_base_recalibration,
bqsr_input_bam,
bqsr_input_bai,
ref, ref_dict, fai,
dbsnp, mills,
unsafe=unsafe,
cores=job.cores,
disk=base_recal_disk,
memory=job.memory)
# The PrintReads disk requirement depends on the input BAM and BAI files, the recalibration table file, the
# genome reference files, and the output BAM and BAI files. The output BAM and BAI files are approximately the
# same size as the input BAM and BAI files.
recalibrate_reads_disk = PromisedRequirement(lambda bam_, bai_, recal, ref_size:
2 * (bam_.size + bai_.size) + recal.size + ref_size,
bqsr_input_bam,
bqsr_input_bai,
base_recal.rv(),
genome_ref_size)
recalibrate_reads = job.wrapJobFn(apply_bqsr_recalibration,
base_recal.rv(),
bqsr_input_bam,
bqsr_input_bai,
ref, ref_dict, fai,
unsafe=unsafe,
cores=job.cores,
disk=recalibrate_reads_disk,
memory=job.memory)
job.addChild(mdups)
mdups.addFollowOn(base_recal)
base_recal.addChild(recalibrate_reads)
return recalibrate_reads.rv(0), recalibrate_reads.rv(1)
|
[
"GATK",
"Preprocessing",
"Pipeline",
"0",
":",
"Mark",
"duplicates",
"1",
":",
"Create",
"INDEL",
"realignment",
"intervals",
"2",
":",
"Realign",
"INDELs",
"3",
":",
"Recalibrate",
"base",
"quality",
"scores",
"4",
":",
"Apply",
"base",
"score",
"recalibration"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/preprocessing.py#L381-L518
|
[
"def",
"run_gatk_preprocessing",
"(",
"job",
",",
"bam",
",",
"bai",
",",
"ref",
",",
"ref_dict",
",",
"fai",
",",
"g1k",
",",
"mills",
",",
"dbsnp",
",",
"realign",
"=",
"False",
",",
"unsafe",
"=",
"False",
")",
":",
"# The MarkDuplicates disk requirement depends on the input BAM and BAI files and the output",
"# BAM and BAI files. The output BAM file is approximately the same size as the input BAM file.",
"mdups_disk",
"=",
"PromisedRequirement",
"(",
"lambda",
"bam_",
",",
"bai_",
":",
"2",
"*",
"(",
"bam_",
".",
"size",
"+",
"bai_",
".",
"size",
")",
",",
"bam",
",",
"bai",
")",
"mdups",
"=",
"job",
".",
"wrapJobFn",
"(",
"picard_mark_duplicates",
",",
"bam",
",",
"bai",
",",
"cores",
"=",
"job",
".",
"cores",
",",
"disk",
"=",
"mdups_disk",
",",
"memory",
"=",
"job",
".",
"memory",
")",
"# Store input for BQSR",
"bqsr_input_bam",
"=",
"mdups",
".",
"rv",
"(",
"0",
")",
"bqsr_input_bai",
"=",
"mdups",
".",
"rv",
"(",
"1",
")",
"# Get genome reference file sizes for calculating disk requirements",
"genome_ref_size",
"=",
"ref",
".",
"size",
"+",
"ref_dict",
".",
"size",
"+",
"fai",
".",
"size",
"if",
"realign",
":",
"# Get INDEL resource file sizes and genome reference file sizes",
"indel_ref_size",
"=",
"mills",
".",
"size",
"+",
"g1k",
".",
"size",
"+",
"genome_ref_size",
"# The RealignerTargetCreator disk requirement depends on the input BAM/BAI files, the genome reference files,",
"# and the output intervals file. The intervals file size is less than the reference file size, so estimate the",
"# interval file size as the reference file size.",
"realigner_target_disk",
"=",
"PromisedRequirement",
"(",
"lambda",
"bam_",
",",
"bai_",
",",
"ref_size",
":",
"bam_",
".",
"size",
"+",
"bai_",
".",
"size",
"+",
"2",
"*",
"ref_size",
",",
"mdups",
".",
"rv",
"(",
"0",
")",
",",
"mdups",
".",
"rv",
"(",
"1",
")",
",",
"indel_ref_size",
")",
"realigner_target",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_realigner_target_creator",
",",
"mdups",
".",
"rv",
"(",
"0",
")",
",",
"mdups",
".",
"rv",
"(",
"1",
")",
",",
"ref",
",",
"ref_dict",
",",
"fai",
",",
"g1k",
",",
"mills",
",",
"unsafe",
"=",
"unsafe",
",",
"cores",
"=",
"1",
",",
"# RealignerTargetCreator is single threaded",
"disk",
"=",
"realigner_target_disk",
",",
"memory",
"=",
"job",
".",
"memory",
")",
"# The INDEL realignment disk requirement depends on the input BAM and BAI files, the intervals",
"# file, the variant resource files, and the output BAM and BAI files. Here, we assume the",
"# output BAM and BAI files are approximately the same size as the input BAM and BAI files.",
"indel_realign_disk",
"=",
"PromisedRequirement",
"(",
"lambda",
"bam_",
",",
"bai_",
",",
"intervals",
",",
"ref_size",
":",
"2",
"*",
"(",
"bam_",
".",
"size",
"+",
"bai_",
".",
"size",
")",
"+",
"intervals",
".",
"size",
"+",
"ref_size",
",",
"mdups",
".",
"rv",
"(",
"0",
")",
",",
"mdups",
".",
"rv",
"(",
"1",
")",
",",
"realigner_target",
".",
"rv",
"(",
")",
",",
"indel_ref_size",
")",
"indel_realign",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_indel_realignment",
",",
"realigner_target",
".",
"rv",
"(",
")",
",",
"mdups",
".",
"rv",
"(",
"0",
")",
",",
"mdups",
".",
"rv",
"(",
"1",
")",
",",
"ref",
",",
"ref_dict",
",",
"fai",
",",
"g1k",
",",
"mills",
",",
"unsafe",
"=",
"unsafe",
",",
"cores",
"=",
"1",
",",
"# IndelRealigner is single threaded",
"disk",
"=",
"indel_realign_disk",
",",
"memory",
"=",
"job",
".",
"memory",
")",
"mdups",
".",
"addChild",
"(",
"realigner_target",
")",
"realigner_target",
".",
"addChild",
"(",
"indel_realign",
")",
"# Update input for BQSR using the realigned BAM files",
"bqsr_input_bam",
"=",
"indel_realign",
".",
"rv",
"(",
"0",
")",
"bqsr_input_bai",
"=",
"indel_realign",
".",
"rv",
"(",
"1",
")",
"# Get size of BQSR databases and genome reference files",
"bqsr_ref_size",
"=",
"dbsnp",
".",
"size",
"+",
"mills",
".",
"size",
"+",
"genome_ref_size",
"# The BQSR disk requirement depends on the input BAM and BAI files, the reference files, and the output",
"# recalibration table file. The recalibration table file size is less than the reference file sizes, so use",
"# the reference file sizes to estimate the recalibration table file size.",
"base_recal_disk",
"=",
"PromisedRequirement",
"(",
"lambda",
"bam_",
",",
"bai_",
",",
"ref_size",
":",
"bam_",
".",
"size",
"+",
"bai_",
".",
"size",
"+",
"2",
"*",
"ref_size",
",",
"bqsr_input_bam",
",",
"bqsr_input_bai",
",",
"bqsr_ref_size",
")",
"base_recal",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_base_recalibration",
",",
"bqsr_input_bam",
",",
"bqsr_input_bai",
",",
"ref",
",",
"ref_dict",
",",
"fai",
",",
"dbsnp",
",",
"mills",
",",
"unsafe",
"=",
"unsafe",
",",
"cores",
"=",
"job",
".",
"cores",
",",
"disk",
"=",
"base_recal_disk",
",",
"memory",
"=",
"job",
".",
"memory",
")",
"# The PrintReads disk requirement depends on the input BAM and BAI files, the recalibration table file, the",
"# genome reference files, and the output BAM and BAI files. The output BAM and BAI files are approximately the",
"# same size as the input BAM and BAI files.",
"recalibrate_reads_disk",
"=",
"PromisedRequirement",
"(",
"lambda",
"bam_",
",",
"bai_",
",",
"recal",
",",
"ref_size",
":",
"2",
"*",
"(",
"bam_",
".",
"size",
"+",
"bai_",
".",
"size",
")",
"+",
"recal",
".",
"size",
"+",
"ref_size",
",",
"bqsr_input_bam",
",",
"bqsr_input_bai",
",",
"base_recal",
".",
"rv",
"(",
")",
",",
"genome_ref_size",
")",
"recalibrate_reads",
"=",
"job",
".",
"wrapJobFn",
"(",
"apply_bqsr_recalibration",
",",
"base_recal",
".",
"rv",
"(",
")",
",",
"bqsr_input_bam",
",",
"bqsr_input_bai",
",",
"ref",
",",
"ref_dict",
",",
"fai",
",",
"unsafe",
"=",
"unsafe",
",",
"cores",
"=",
"job",
".",
"cores",
",",
"disk",
"=",
"recalibrate_reads_disk",
",",
"memory",
"=",
"job",
".",
"memory",
")",
"job",
".",
"addChild",
"(",
"mdups",
")",
"mdups",
".",
"addFollowOn",
"(",
"base_recal",
")",
"base_recal",
".",
"addChild",
"(",
"recalibrate_reads",
")",
"return",
"recalibrate_reads",
".",
"rv",
"(",
"0",
")",
",",
"recalibrate_reads",
".",
"rv",
"(",
"1",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
run_base_recalibration
|
Creates recalibration table for Base Quality Score Recalibration
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str ref: FileStoreID for reference genome fasta file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param str fai: FileStoreID for reference genome fasta index file
:param str dbsnp: FileStoreID for dbSNP VCF file
:param str mills: FileStoreID for Mills VCF file
:param bool unsafe: If True, runs GATK in UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for the recalibration table file
:rtype: str
|
src/toil_lib/tools/preprocessing.py
|
def run_base_recalibration(job, bam, bai, ref, ref_dict, fai, dbsnp, mills, unsafe=False):
"""
Creates recalibration table for Base Quality Score Recalibration
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str ref: FileStoreID for reference genome fasta file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param str fai: FileStoreID for reference genome fasta index file
:param str dbsnp: FileStoreID for dbSNP VCF file
:param str mills: FileStoreID for Mills VCF file
:param bool unsafe: If True, runs GATK in UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for the recalibration table file
:rtype: str
"""
inputs = {'ref.fasta': ref,
'ref.fasta.fai': fai,
'ref.dict': ref_dict,
'input.bam': bam,
'input.bai': bai,
'dbsnp.vcf': dbsnp,
'mills.vcf': mills}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Call: GATK -- BaseRecalibrator
parameters = ['-T', 'BaseRecalibrator',
'-nct', str(int(job.cores)),
'-R', '/data/ref.fasta',
'-I', '/data/input.bam',
# Recommended known sites:
# https://software.broadinstitute.org/gatk/guide/article?id=1247
'-knownSites', '/data/dbsnp.vcf',
'-knownSites', '/data/mills.vcf',
'-o', '/data/recal_data.table']
if unsafe:
parameters.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'])
# Set TMPDIR to /data to prevent writing temporary files to /tmp
docker_parameters = ['--rm',
'--log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory),
'-v', '{}:/data'.format(work_dir)]
start_time = time.time()
dockerCall(job=job, tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
workDir=work_dir,
parameters=parameters,
dockerParameters=docker_parameters)
end_time = time.time()
_log_runtime(job, start_time, end_time, "GATK3 BaseRecalibrator")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'recal_data.table'))
|
def run_base_recalibration(job, bam, bai, ref, ref_dict, fai, dbsnp, mills, unsafe=False):
"""
Creates recalibration table for Base Quality Score Recalibration
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str ref: FileStoreID for reference genome fasta file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param str fai: FileStoreID for reference genome fasta index file
:param str dbsnp: FileStoreID for dbSNP VCF file
:param str mills: FileStoreID for Mills VCF file
:param bool unsafe: If True, runs GATK in UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for the recalibration table file
:rtype: str
"""
inputs = {'ref.fasta': ref,
'ref.fasta.fai': fai,
'ref.dict': ref_dict,
'input.bam': bam,
'input.bai': bai,
'dbsnp.vcf': dbsnp,
'mills.vcf': mills}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Call: GATK -- BaseRecalibrator
parameters = ['-T', 'BaseRecalibrator',
'-nct', str(int(job.cores)),
'-R', '/data/ref.fasta',
'-I', '/data/input.bam',
# Recommended known sites:
# https://software.broadinstitute.org/gatk/guide/article?id=1247
'-knownSites', '/data/dbsnp.vcf',
'-knownSites', '/data/mills.vcf',
'-o', '/data/recal_data.table']
if unsafe:
parameters.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'])
# Set TMPDIR to /data to prevent writing temporary files to /tmp
docker_parameters = ['--rm',
'--log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory),
'-v', '{}:/data'.format(work_dir)]
start_time = time.time()
dockerCall(job=job, tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
workDir=work_dir,
parameters=parameters,
dockerParameters=docker_parameters)
end_time = time.time()
_log_runtime(job, start_time, end_time, "GATK3 BaseRecalibrator")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'recal_data.table'))
|
[
"Creates",
"recalibration",
"table",
"for",
"Base",
"Quality",
"Score",
"Recalibration"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/preprocessing.py#L649-L704
|
[
"def",
"run_base_recalibration",
"(",
"job",
",",
"bam",
",",
"bai",
",",
"ref",
",",
"ref_dict",
",",
"fai",
",",
"dbsnp",
",",
"mills",
",",
"unsafe",
"=",
"False",
")",
":",
"inputs",
"=",
"{",
"'ref.fasta'",
":",
"ref",
",",
"'ref.fasta.fai'",
":",
"fai",
",",
"'ref.dict'",
":",
"ref_dict",
",",
"'input.bam'",
":",
"bam",
",",
"'input.bai'",
":",
"bai",
",",
"'dbsnp.vcf'",
":",
"dbsnp",
",",
"'mills.vcf'",
":",
"mills",
"}",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"for",
"name",
",",
"file_store_id",
"in",
"inputs",
".",
"iteritems",
"(",
")",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"file_store_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"name",
")",
")",
"# Call: GATK -- BaseRecalibrator",
"parameters",
"=",
"[",
"'-T'",
",",
"'BaseRecalibrator'",
",",
"'-nct'",
",",
"str",
"(",
"int",
"(",
"job",
".",
"cores",
")",
")",
",",
"'-R'",
",",
"'/data/ref.fasta'",
",",
"'-I'",
",",
"'/data/input.bam'",
",",
"# Recommended known sites:",
"# https://software.broadinstitute.org/gatk/guide/article?id=1247",
"'-knownSites'",
",",
"'/data/dbsnp.vcf'",
",",
"'-knownSites'",
",",
"'/data/mills.vcf'",
",",
"'-o'",
",",
"'/data/recal_data.table'",
"]",
"if",
"unsafe",
":",
"parameters",
".",
"extend",
"(",
"[",
"'-U'",
",",
"'ALLOW_SEQ_DICT_INCOMPATIBILITY'",
"]",
")",
"# Set TMPDIR to /data to prevent writing temporary files to /tmp",
"docker_parameters",
"=",
"[",
"'--rm'",
",",
"'--log-driver'",
",",
"'none'",
",",
"'-e'",
",",
"'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'",
".",
"format",
"(",
"job",
".",
"memory",
")",
",",
"'-v'",
",",
"'{}:/data'",
".",
"format",
"(",
"work_dir",
")",
"]",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2'",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"parameters",
",",
"dockerParameters",
"=",
"docker_parameters",
")",
"end_time",
"=",
"time",
".",
"time",
"(",
")",
"_log_runtime",
"(",
"job",
",",
"start_time",
",",
"end_time",
",",
"\"GATK3 BaseRecalibrator\"",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'recal_data.table'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
run_kallisto
|
RNA quantification via Kallisto
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq (pair 1)
:param str r2_id: FileStoreID of fastq (pair 2 if applicable, otherwise pass None for single-end)
:param str kallisto_index_url: FileStoreID for Kallisto index file
:return: FileStoreID from Kallisto output
:rtype: str
|
src/toil_lib/tools/quantifiers.py
|
def run_kallisto(job, r1_id, r2_id, kallisto_index_url):
"""
RNA quantification via Kallisto
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq (pair 1)
:param str r2_id: FileStoreID of fastq (pair 2 if applicable, otherwise pass None for single-end)
:param str kallisto_index_url: FileStoreID for Kallisto index file
:return: FileStoreID from Kallisto output
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
download_url(job, url=kallisto_index_url, name='kallisto_hg38.idx', work_dir=work_dir)
# Retrieve files
parameters = ['quant',
'-i', '/data/kallisto_hg38.idx',
'-t', str(job.cores),
'-o', '/data/',
'-b', '100',
'--fusion']
if r1_id and r2_id:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['/data/R1.fastq', '/data/R2.fastq'])
else:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters.extend(['--single', '-l', '200', '-s', '15', '/data/R1.fastq'])
# Call: Kallisto
dockerCall(job=job, tool='quay.io/ucsc_cgl/kallisto:0.42.4--35ac87df5b21a8e8e8d159f26864ac1e1db8cf86',
workDir=work_dir, parameters=parameters)
# Tar output files together and store in fileStore
output_files = [os.path.join(work_dir, x) for x in ['run_info.json', 'abundance.tsv', 'abundance.h5', 'fusion.txt']]
tarball_files(tar_name='kallisto.tar.gz', file_paths=output_files, output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'kallisto.tar.gz'))
|
def run_kallisto(job, r1_id, r2_id, kallisto_index_url):
"""
RNA quantification via Kallisto
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq (pair 1)
:param str r2_id: FileStoreID of fastq (pair 2 if applicable, otherwise pass None for single-end)
:param str kallisto_index_url: FileStoreID for Kallisto index file
:return: FileStoreID from Kallisto output
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
download_url(job, url=kallisto_index_url, name='kallisto_hg38.idx', work_dir=work_dir)
# Retrieve files
parameters = ['quant',
'-i', '/data/kallisto_hg38.idx',
'-t', str(job.cores),
'-o', '/data/',
'-b', '100',
'--fusion']
if r1_id and r2_id:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['/data/R1.fastq', '/data/R2.fastq'])
else:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters.extend(['--single', '-l', '200', '-s', '15', '/data/R1.fastq'])
# Call: Kallisto
dockerCall(job=job, tool='quay.io/ucsc_cgl/kallisto:0.42.4--35ac87df5b21a8e8e8d159f26864ac1e1db8cf86',
workDir=work_dir, parameters=parameters)
# Tar output files together and store in fileStore
output_files = [os.path.join(work_dir, x) for x in ['run_info.json', 'abundance.tsv', 'abundance.h5', 'fusion.txt']]
tarball_files(tar_name='kallisto.tar.gz', file_paths=output_files, output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'kallisto.tar.gz'))
|
[
"RNA",
"quantification",
"via",
"Kallisto"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/quantifiers.py#L10-L45
|
[
"def",
"run_kallisto",
"(",
"job",
",",
"r1_id",
",",
"r2_id",
",",
"kallisto_index_url",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"download_url",
"(",
"job",
",",
"url",
"=",
"kallisto_index_url",
",",
"name",
"=",
"'kallisto_hg38.idx'",
",",
"work_dir",
"=",
"work_dir",
")",
"# Retrieve files",
"parameters",
"=",
"[",
"'quant'",
",",
"'-i'",
",",
"'/data/kallisto_hg38.idx'",
",",
"'-t'",
",",
"str",
"(",
"job",
".",
"cores",
")",
",",
"'-o'",
",",
"'/data/'",
",",
"'-b'",
",",
"'100'",
",",
"'--fusion'",
"]",
"if",
"r1_id",
"and",
"r2_id",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"r1_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'R1.fastq'",
")",
")",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"r2_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'R2.fastq'",
")",
")",
"parameters",
".",
"extend",
"(",
"[",
"'/data/R1.fastq'",
",",
"'/data/R2.fastq'",
"]",
")",
"else",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"r1_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'R1.fastq'",
")",
")",
"parameters",
".",
"extend",
"(",
"[",
"'--single'",
",",
"'-l'",
",",
"'200'",
",",
"'-s'",
",",
"'15'",
",",
"'/data/R1.fastq'",
"]",
")",
"# Call: Kallisto",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/kallisto:0.42.4--35ac87df5b21a8e8e8d159f26864ac1e1db8cf86'",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"parameters",
")",
"# Tar output files together and store in fileStore",
"output_files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"x",
")",
"for",
"x",
"in",
"[",
"'run_info.json'",
",",
"'abundance.tsv'",
",",
"'abundance.h5'",
",",
"'fusion.txt'",
"]",
"]",
"tarball_files",
"(",
"tar_name",
"=",
"'kallisto.tar.gz'",
",",
"file_paths",
"=",
"output_files",
",",
"output_dir",
"=",
"work_dir",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'kallisto.tar.gz'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
run_rsem
|
RNA quantification with RSEM
:param JobFunctionWrappingJob job: Passed automatically by Toil
:param str bam_id: FileStoreID of transcriptome bam for quantification
:param str rsem_ref_url: URL of RSEM reference (tarball)
:param bool paired: If True, uses parameters for paired end data
:return: FileStoreIDs for RSEM's gene and isoform output
:rtype: str
|
src/toil_lib/tools/quantifiers.py
|
def run_rsem(job, bam_id, rsem_ref_url, paired=True):
"""
RNA quantification with RSEM
:param JobFunctionWrappingJob job: Passed automatically by Toil
:param str bam_id: FileStoreID of transcriptome bam for quantification
:param str rsem_ref_url: URL of RSEM reference (tarball)
:param bool paired: If True, uses parameters for paired end data
:return: FileStoreIDs for RSEM's gene and isoform output
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
download_url(job, url=rsem_ref_url, name='rsem_ref.tar.gz', work_dir=work_dir)
subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'rsem_ref.tar.gz'), '-C', work_dir])
os.remove(os.path.join(work_dir, 'rsem_ref.tar.gz'))
# Determine tarball structure - based on it, ascertain folder name and rsem reference prefix
rsem_files = []
for root, directories, files in os.walk(work_dir):
rsem_files.extend([os.path.join(root, x) for x in files])
# "grp" is a required RSEM extension that should exist in the RSEM reference
ref_prefix = [os.path.basename(os.path.splitext(x)[0]) for x in rsem_files if 'grp' in x][0]
ref_folder = os.path.join('/data', os.listdir(work_dir)[0]) if len(os.listdir(work_dir)) == 1 else '/data'
# I/O
job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'transcriptome.bam'))
output_prefix = 'rsem'
# Call: RSEM
parameters = ['--quiet',
'--no-qualities',
'-p', str(job.cores),
'--forward-prob', '0.5',
'--seed-length', '25',
'--fragment-length-mean', '-1.0',
'--bam', '/data/transcriptome.bam',
os.path.join(ref_folder, ref_prefix),
output_prefix]
if paired:
parameters = ['--paired-end'] + parameters
dockerCall(job=job, tool='quay.io/ucsc_cgl/rsem:1.2.25--d4275175cc8df36967db460b06337a14f40d2f21',
parameters=parameters, workDir=work_dir)
# Write to FileStore
gene_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, output_prefix + '.genes.results'))
isoform_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, output_prefix + '.isoforms.results'))
return gene_id, isoform_id
|
def run_rsem(job, bam_id, rsem_ref_url, paired=True):
"""
RNA quantification with RSEM
:param JobFunctionWrappingJob job: Passed automatically by Toil
:param str bam_id: FileStoreID of transcriptome bam for quantification
:param str rsem_ref_url: URL of RSEM reference (tarball)
:param bool paired: If True, uses parameters for paired end data
:return: FileStoreIDs for RSEM's gene and isoform output
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
download_url(job, url=rsem_ref_url, name='rsem_ref.tar.gz', work_dir=work_dir)
subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'rsem_ref.tar.gz'), '-C', work_dir])
os.remove(os.path.join(work_dir, 'rsem_ref.tar.gz'))
# Determine tarball structure - based on it, ascertain folder name and rsem reference prefix
rsem_files = []
for root, directories, files in os.walk(work_dir):
rsem_files.extend([os.path.join(root, x) for x in files])
# "grp" is a required RSEM extension that should exist in the RSEM reference
ref_prefix = [os.path.basename(os.path.splitext(x)[0]) for x in rsem_files if 'grp' in x][0]
ref_folder = os.path.join('/data', os.listdir(work_dir)[0]) if len(os.listdir(work_dir)) == 1 else '/data'
# I/O
job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'transcriptome.bam'))
output_prefix = 'rsem'
# Call: RSEM
parameters = ['--quiet',
'--no-qualities',
'-p', str(job.cores),
'--forward-prob', '0.5',
'--seed-length', '25',
'--fragment-length-mean', '-1.0',
'--bam', '/data/transcriptome.bam',
os.path.join(ref_folder, ref_prefix),
output_prefix]
if paired:
parameters = ['--paired-end'] + parameters
dockerCall(job=job, tool='quay.io/ucsc_cgl/rsem:1.2.25--d4275175cc8df36967db460b06337a14f40d2f21',
parameters=parameters, workDir=work_dir)
# Write to FileStore
gene_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, output_prefix + '.genes.results'))
isoform_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, output_prefix + '.isoforms.results'))
return gene_id, isoform_id
|
[
"RNA",
"quantification",
"with",
"RSEM"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/quantifiers.py#L48-L90
|
[
"def",
"run_rsem",
"(",
"job",
",",
"bam_id",
",",
"rsem_ref_url",
",",
"paired",
"=",
"True",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"download_url",
"(",
"job",
",",
"url",
"=",
"rsem_ref_url",
",",
"name",
"=",
"'rsem_ref.tar.gz'",
",",
"work_dir",
"=",
"work_dir",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'tar'",
",",
"'-xvf'",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'rsem_ref.tar.gz'",
")",
",",
"'-C'",
",",
"work_dir",
"]",
")",
"os",
".",
"remove",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'rsem_ref.tar.gz'",
")",
")",
"# Determine tarball structure - based on it, ascertain folder name and rsem reference prefix",
"rsem_files",
"=",
"[",
"]",
"for",
"root",
",",
"directories",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"work_dir",
")",
":",
"rsem_files",
".",
"extend",
"(",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"x",
")",
"for",
"x",
"in",
"files",
"]",
")",
"# \"grp\" is a required RSEM extension that should exist in the RSEM reference",
"ref_prefix",
"=",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"x",
")",
"[",
"0",
"]",
")",
"for",
"x",
"in",
"rsem_files",
"if",
"'grp'",
"in",
"x",
"]",
"[",
"0",
"]",
"ref_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'/data'",
",",
"os",
".",
"listdir",
"(",
"work_dir",
")",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"os",
".",
"listdir",
"(",
"work_dir",
")",
")",
"==",
"1",
"else",
"'/data'",
"# I/O",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"bam_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'transcriptome.bam'",
")",
")",
"output_prefix",
"=",
"'rsem'",
"# Call: RSEM",
"parameters",
"=",
"[",
"'--quiet'",
",",
"'--no-qualities'",
",",
"'-p'",
",",
"str",
"(",
"job",
".",
"cores",
")",
",",
"'--forward-prob'",
",",
"'0.5'",
",",
"'--seed-length'",
",",
"'25'",
",",
"'--fragment-length-mean'",
",",
"'-1.0'",
",",
"'--bam'",
",",
"'/data/transcriptome.bam'",
",",
"os",
".",
"path",
".",
"join",
"(",
"ref_folder",
",",
"ref_prefix",
")",
",",
"output_prefix",
"]",
"if",
"paired",
":",
"parameters",
"=",
"[",
"'--paired-end'",
"]",
"+",
"parameters",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/rsem:1.2.25--d4275175cc8df36967db460b06337a14f40d2f21'",
",",
"parameters",
"=",
"parameters",
",",
"workDir",
"=",
"work_dir",
")",
"# Write to FileStore",
"gene_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"output_prefix",
"+",
"'.genes.results'",
")",
")",
"isoform_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"output_prefix",
"+",
"'.isoforms.results'",
")",
")",
"return",
"gene_id",
",",
"isoform_id"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
run_rsem_postprocess
|
Parses RSEMs output to produce the separate .tab files (TPM, FPKM, counts) for both gene and isoform.
These are two-column files: Genes and Quantifications.
HUGO files are also provided that have been mapped from Gencode/ENSEMBLE names.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str rsem_gene_id: FileStoreID of rsem_gene_ids
:param str rsem_isoform_id: FileStoreID of rsem_isoform_ids
:return: FileStoreID from RSEM post process tarball
:rytpe: str
|
src/toil_lib/tools/quantifiers.py
|
def run_rsem_postprocess(job, rsem_gene_id, rsem_isoform_id):
"""
Parses RSEMs output to produce the separate .tab files (TPM, FPKM, counts) for both gene and isoform.
These are two-column files: Genes and Quantifications.
HUGO files are also provided that have been mapped from Gencode/ENSEMBLE names.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str rsem_gene_id: FileStoreID of rsem_gene_ids
:param str rsem_isoform_id: FileStoreID of rsem_isoform_ids
:return: FileStoreID from RSEM post process tarball
:rytpe: str
"""
work_dir = job.fileStore.getLocalTempDir()
# I/O
genes = job.fileStore.readGlobalFile(rsem_gene_id, os.path.join(work_dir, 'rsem_genes.results'))
iso = job.fileStore.readGlobalFile(rsem_isoform_id, os.path.join(work_dir, 'rsem_isoforms.results'))
# Perform HUGO gene / isoform name mapping
command = ['-g', 'rsem_genes.results', '-i', 'rsem_isoforms.results']
dockerCall(job=job, tool='quay.io/ucsc_cgl/gencode_hugo_mapping:1.0--cb4865d02f9199462e66410f515c4dabbd061e4d',
parameters=command, workDir=work_dir)
hugo_files = [os.path.join(work_dir, x) for x in ['rsem_genes.hugo.results', 'rsem_isoforms.hugo.results']]
# Create tarballs for outputs
tarball_files('rsem.tar.gz', file_paths=[os.path.join(work_dir, x) for x in [genes, iso]], output_dir=work_dir)
tarball_files('rsem_hugo.tar.gz', file_paths=[os.path.join(work_dir, x) for x in hugo_files], output_dir=work_dir)
rsem_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem.tar.gz'))
hugo_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_hugo.tar.gz'))
return rsem_id, hugo_id
|
def run_rsem_postprocess(job, rsem_gene_id, rsem_isoform_id):
"""
Parses RSEMs output to produce the separate .tab files (TPM, FPKM, counts) for both gene and isoform.
These are two-column files: Genes and Quantifications.
HUGO files are also provided that have been mapped from Gencode/ENSEMBLE names.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str rsem_gene_id: FileStoreID of rsem_gene_ids
:param str rsem_isoform_id: FileStoreID of rsem_isoform_ids
:return: FileStoreID from RSEM post process tarball
:rytpe: str
"""
work_dir = job.fileStore.getLocalTempDir()
# I/O
genes = job.fileStore.readGlobalFile(rsem_gene_id, os.path.join(work_dir, 'rsem_genes.results'))
iso = job.fileStore.readGlobalFile(rsem_isoform_id, os.path.join(work_dir, 'rsem_isoforms.results'))
# Perform HUGO gene / isoform name mapping
command = ['-g', 'rsem_genes.results', '-i', 'rsem_isoforms.results']
dockerCall(job=job, tool='quay.io/ucsc_cgl/gencode_hugo_mapping:1.0--cb4865d02f9199462e66410f515c4dabbd061e4d',
parameters=command, workDir=work_dir)
hugo_files = [os.path.join(work_dir, x) for x in ['rsem_genes.hugo.results', 'rsem_isoforms.hugo.results']]
# Create tarballs for outputs
tarball_files('rsem.tar.gz', file_paths=[os.path.join(work_dir, x) for x in [genes, iso]], output_dir=work_dir)
tarball_files('rsem_hugo.tar.gz', file_paths=[os.path.join(work_dir, x) for x in hugo_files], output_dir=work_dir)
rsem_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem.tar.gz'))
hugo_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_hugo.tar.gz'))
return rsem_id, hugo_id
|
[
"Parses",
"RSEMs",
"output",
"to",
"produce",
"the",
"separate",
".",
"tab",
"files",
"(",
"TPM",
"FPKM",
"counts",
")",
"for",
"both",
"gene",
"and",
"isoform",
".",
"These",
"are",
"two",
"-",
"column",
"files",
":",
"Genes",
"and",
"Quantifications",
".",
"HUGO",
"files",
"are",
"also",
"provided",
"that",
"have",
"been",
"mapped",
"from",
"Gencode",
"/",
"ENSEMBLE",
"names",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/quantifiers.py#L93-L119
|
[
"def",
"run_rsem_postprocess",
"(",
"job",
",",
"rsem_gene_id",
",",
"rsem_isoform_id",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"# I/O",
"genes",
"=",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"rsem_gene_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'rsem_genes.results'",
")",
")",
"iso",
"=",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"rsem_isoform_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'rsem_isoforms.results'",
")",
")",
"# Perform HUGO gene / isoform name mapping",
"command",
"=",
"[",
"'-g'",
",",
"'rsem_genes.results'",
",",
"'-i'",
",",
"'rsem_isoforms.results'",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/gencode_hugo_mapping:1.0--cb4865d02f9199462e66410f515c4dabbd061e4d'",
",",
"parameters",
"=",
"command",
",",
"workDir",
"=",
"work_dir",
")",
"hugo_files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"x",
")",
"for",
"x",
"in",
"[",
"'rsem_genes.hugo.results'",
",",
"'rsem_isoforms.hugo.results'",
"]",
"]",
"# Create tarballs for outputs",
"tarball_files",
"(",
"'rsem.tar.gz'",
",",
"file_paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"x",
")",
"for",
"x",
"in",
"[",
"genes",
",",
"iso",
"]",
"]",
",",
"output_dir",
"=",
"work_dir",
")",
"tarball_files",
"(",
"'rsem_hugo.tar.gz'",
",",
"file_paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"x",
")",
"for",
"x",
"in",
"hugo_files",
"]",
",",
"output_dir",
"=",
"work_dir",
")",
"rsem_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'rsem.tar.gz'",
")",
")",
"hugo_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'rsem_hugo.tar.gz'",
")",
")",
"return",
"rsem_id",
",",
"hugo_id"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
switch
|
Set/clear boolean field value for model object
|
boolean_switch/views.py
|
def switch(request, url):
"""
Set/clear boolean field value for model object
"""
app_label, model_name, object_id, field = url.split('/')
try:
# django >= 1.7
from django.apps import apps
model = apps.get_model(app_label, model_name)
except ImportError:
# django < 1.7
from django.db.models import get_model
model = get_model(app_label, model_name)
object = get_object_or_404(model, pk=object_id)
perm_str = '%s.change_%s' % (app_label, model.__name__)
# check only model
if not request.user.has_perm(perm_str.lower()):
raise PermissionDenied
setattr(object, field, getattr(object, field) == 0)
object.save()
if request.is_ajax():
return JsonResponse({'object_id': object.pk, 'field': field, 'value': getattr(object, field)})
else:
msg = _(u'flag %(field)s was changed for %(object)s') % {'field': field, 'object': object}
messages.success(request, msg)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
|
def switch(request, url):
"""
Set/clear boolean field value for model object
"""
app_label, model_name, object_id, field = url.split('/')
try:
# django >= 1.7
from django.apps import apps
model = apps.get_model(app_label, model_name)
except ImportError:
# django < 1.7
from django.db.models import get_model
model = get_model(app_label, model_name)
object = get_object_or_404(model, pk=object_id)
perm_str = '%s.change_%s' % (app_label, model.__name__)
# check only model
if not request.user.has_perm(perm_str.lower()):
raise PermissionDenied
setattr(object, field, getattr(object, field) == 0)
object.save()
if request.is_ajax():
return JsonResponse({'object_id': object.pk, 'field': field, 'value': getattr(object, field)})
else:
msg = _(u'flag %(field)s was changed for %(object)s') % {'field': field, 'object': object}
messages.success(request, msg)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
|
[
"Set",
"/",
"clear",
"boolean",
"field",
"value",
"for",
"model",
"object"
] |
makeev/django-boolean-switch
|
python
|
https://github.com/makeev/django-boolean-switch/blob/ed740dbb56d0bb1ad20d4b1e124055283b0e932f/boolean_switch/views.py#L9-L37
|
[
"def",
"switch",
"(",
"request",
",",
"url",
")",
":",
"app_label",
",",
"model_name",
",",
"object_id",
",",
"field",
"=",
"url",
".",
"split",
"(",
"'/'",
")",
"try",
":",
"# django >= 1.7",
"from",
"django",
".",
"apps",
"import",
"apps",
"model",
"=",
"apps",
".",
"get_model",
"(",
"app_label",
",",
"model_name",
")",
"except",
"ImportError",
":",
"# django < 1.7",
"from",
"django",
".",
"db",
".",
"models",
"import",
"get_model",
"model",
"=",
"get_model",
"(",
"app_label",
",",
"model_name",
")",
"object",
"=",
"get_object_or_404",
"(",
"model",
",",
"pk",
"=",
"object_id",
")",
"perm_str",
"=",
"'%s.change_%s'",
"%",
"(",
"app_label",
",",
"model",
".",
"__name__",
")",
"# check only model",
"if",
"not",
"request",
".",
"user",
".",
"has_perm",
"(",
"perm_str",
".",
"lower",
"(",
")",
")",
":",
"raise",
"PermissionDenied",
"setattr",
"(",
"object",
",",
"field",
",",
"getattr",
"(",
"object",
",",
"field",
")",
"==",
"0",
")",
"object",
".",
"save",
"(",
")",
"if",
"request",
".",
"is_ajax",
"(",
")",
":",
"return",
"JsonResponse",
"(",
"{",
"'object_id'",
":",
"object",
".",
"pk",
",",
"'field'",
":",
"field",
",",
"'value'",
":",
"getattr",
"(",
"object",
",",
"field",
")",
"}",
")",
"else",
":",
"msg",
"=",
"_",
"(",
"u'flag %(field)s was changed for %(object)s'",
")",
"%",
"{",
"'field'",
":",
"field",
",",
"'object'",
":",
"object",
"}",
"messages",
".",
"success",
"(",
"request",
",",
"msg",
")",
"return",
"HttpResponseRedirect",
"(",
"request",
".",
"META",
".",
"get",
"(",
"'HTTP_REFERER'",
",",
"'/'",
")",
")"
] |
ed740dbb56d0bb1ad20d4b1e124055283b0e932f
|
test
|
SARPlus.fit
|
Main fit method for SAR. Expects the dataframes to have row_id, col_id columns which are indexes,
i.e. contain the sequential integer index of the original alphanumeric user and item IDs.
Dataframe also contains rating and timestamp as floats; timestamp is in seconds since Epoch by default.
Arguments:
df (pySpark.DataFrame): input dataframe which contains the index of users and items.
|
python/pysarplus/SARPlus.py
|
def fit(
self,
df,
similarity_type="jaccard",
time_decay_coefficient=30,
time_now=None,
timedecay_formula=False,
threshold=1,
):
"""Main fit method for SAR. Expects the dataframes to have row_id, col_id columns which are indexes,
i.e. contain the sequential integer index of the original alphanumeric user and item IDs.
Dataframe also contains rating and timestamp as floats; timestamp is in seconds since Epoch by default.
Arguments:
df (pySpark.DataFrame): input dataframe which contains the index of users and items. """
# threshold - items below this number get set to zero in coocurrence counts
assert threshold > 0
df.createOrReplaceTempView("{prefix}df_train_input".format(**self.header))
if timedecay_formula:
# WARNING: previously we would take the last value in training dataframe and set it
# as a matrix U element
# for each user-item pair. Now with time decay, we compute a sum over ratings given
# by a user in the case
# when T=np.inf, so user gets a cumulative sum of ratings for a particular item and
# not the last rating.
# Time Decay
# do a group by on user item pairs and apply the formula for time decay there
# Time T parameter is in days and input time is in seconds
# so we do dt/60/(T*24*60)=dt/(T*24*3600)
# the folling is the query which we want to run
query = self.f(
"""
SELECT
{col_user}, {col_item},
SUM({col_rating} * EXP(-log(2) * (latest_timestamp - CAST({col_timestamp} AS long)) / ({time_decay_coefficient} * 3600 * 24))) as {col_rating}
FROM {prefix}df_train_input,
(SELECT CAST(MAX({col_timestamp}) AS long) latest_timestamp FROM {prefix}df_train_input)
GROUP BY {col_user}, {col_item}
CLUSTER BY {col_user}
""",
time_now=time_now,
time_decay_coefficient=time_decay_coefficient,
)
# replace with timedecayed version
df = self.spark.sql(query)
else:
# since SQL is case insensitive, this check needs to be performed similar
if self.header['col_timestamp'].lower() in [s.name.lower() for s in df.schema]:
# we need to de-duplicate items by using the latest item
query = self.f(
"""
SELECT {col_user}, {col_item}, {col_rating}
FROM
(
SELECT
{col_user}, {col_item}, {col_rating},
ROW_NUMBER() OVER (PARTITION BY {col_user}, {col_item} ORDER BY {col_timestamp} DESC) latest
FROM {prefix}df_train_input
)
WHERE latest = 1
"""
)
df = self.spark.sql(query)
df.createOrReplaceTempView(self.f("{prefix}df_train"))
log.info("sarplus.fit 1/2: compute item cooccurences...")
# compute cooccurrence above minimum threshold
query = self.f(
"""
SELECT A.{col_item} i1, B.{col_item} i2, COUNT(*) value
FROM {prefix}df_train A INNER JOIN {prefix}df_train B
ON A.{col_user} = B.{col_user} AND A.{col_item} <= b.{col_item}
GROUP BY A.{col_item}, B.{col_item}
HAVING COUNT(*) >= {threshold}
CLUSTER BY i1, i2
""",
threshold=threshold,
)
item_cooccurrence = self.spark.sql(query)
item_cooccurrence.write.mode("overwrite").saveAsTable(
self.f("{prefix}item_cooccurrence")
)
# compute the diagonal used later for Jaccard and Lift
if similarity_type == SIM_LIFT or similarity_type == SIM_JACCARD:
item_marginal = self.spark.sql(
self.f(
"SELECT i1 i, value AS margin FROM {prefix}item_cooccurrence WHERE i1 = i2"
)
)
item_marginal.createOrReplaceTempView(self.f("{prefix}item_marginal"))
if similarity_type == SIM_COOCCUR:
self.item_similarity = item_cooccurrence
elif similarity_type == SIM_JACCARD:
query = self.f(
"""
SELECT i1, i2, value / (M1.margin + M2.margin - value) AS value
FROM {prefix}item_cooccurrence A
INNER JOIN {prefix}item_marginal M1 ON A.i1 = M1.i
INNER JOIN {prefix}item_marginal M2 ON A.i2 = M2.i
CLUSTER BY i1, i2
"""
)
self.item_similarity = self.spark.sql(query)
elif similarity_type == SIM_LIFT:
query = self.f(
"""
SELECT i1, i2, value / (M1.margin * M2.margin) AS value
FROM {prefix}item_cooccurrence A
INNER JOIN {prefix}item_marginal M1 ON A.i1 = M1.i
INNER JOIN {prefix}item_marginal M2 ON A.i2 = M2.i
CLUSTER BY i1, i2
"""
)
self.item_similarity = self.spark.sql(query)
else:
raise ValueError("Unknown similarity type: {0}".format(similarity_type))
# store upper triangular
log.info("sarplus.fit 2/2: compute similiarity metric %s..." % similarity_type)
self.item_similarity.write.mode("overwrite").saveAsTable(
self.f("{prefix}item_similarity_upper")
)
# expand upper triangular to full matrix
query = self.f(
"""
SELECT i1, i2, value
FROM
(
(SELECT i1, i2, value FROM {prefix}item_similarity_upper)
UNION ALL
(SELECT i2 i1, i1 i2, value FROM {prefix}item_similarity_upper WHERE i1 <> i2)
)
CLUSTER BY i1
"""
)
self.item_similarity = self.spark.sql(query)
self.item_similarity.write.mode("overwrite").saveAsTable(
self.f("{prefix}item_similarity")
)
# free space
self.spark.sql(self.f("DROP TABLE {prefix}item_cooccurrence"))
self.spark.sql(self.f("DROP TABLE {prefix}item_similarity_upper"))
self.item_similarity = self.spark.table(self.f("{prefix}item_similarity"))
|
def fit(
self,
df,
similarity_type="jaccard",
time_decay_coefficient=30,
time_now=None,
timedecay_formula=False,
threshold=1,
):
"""Main fit method for SAR. Expects the dataframes to have row_id, col_id columns which are indexes,
i.e. contain the sequential integer index of the original alphanumeric user and item IDs.
Dataframe also contains rating and timestamp as floats; timestamp is in seconds since Epoch by default.
Arguments:
df (pySpark.DataFrame): input dataframe which contains the index of users and items. """
# threshold - items below this number get set to zero in coocurrence counts
assert threshold > 0
df.createOrReplaceTempView("{prefix}df_train_input".format(**self.header))
if timedecay_formula:
# WARNING: previously we would take the last value in training dataframe and set it
# as a matrix U element
# for each user-item pair. Now with time decay, we compute a sum over ratings given
# by a user in the case
# when T=np.inf, so user gets a cumulative sum of ratings for a particular item and
# not the last rating.
# Time Decay
# do a group by on user item pairs and apply the formula for time decay there
# Time T parameter is in days and input time is in seconds
# so we do dt/60/(T*24*60)=dt/(T*24*3600)
# the folling is the query which we want to run
query = self.f(
"""
SELECT
{col_user}, {col_item},
SUM({col_rating} * EXP(-log(2) * (latest_timestamp - CAST({col_timestamp} AS long)) / ({time_decay_coefficient} * 3600 * 24))) as {col_rating}
FROM {prefix}df_train_input,
(SELECT CAST(MAX({col_timestamp}) AS long) latest_timestamp FROM {prefix}df_train_input)
GROUP BY {col_user}, {col_item}
CLUSTER BY {col_user}
""",
time_now=time_now,
time_decay_coefficient=time_decay_coefficient,
)
# replace with timedecayed version
df = self.spark.sql(query)
else:
# since SQL is case insensitive, this check needs to be performed similar
if self.header['col_timestamp'].lower() in [s.name.lower() for s in df.schema]:
# we need to de-duplicate items by using the latest item
query = self.f(
"""
SELECT {col_user}, {col_item}, {col_rating}
FROM
(
SELECT
{col_user}, {col_item}, {col_rating},
ROW_NUMBER() OVER (PARTITION BY {col_user}, {col_item} ORDER BY {col_timestamp} DESC) latest
FROM {prefix}df_train_input
)
WHERE latest = 1
"""
)
df = self.spark.sql(query)
df.createOrReplaceTempView(self.f("{prefix}df_train"))
log.info("sarplus.fit 1/2: compute item cooccurences...")
# compute cooccurrence above minimum threshold
query = self.f(
"""
SELECT A.{col_item} i1, B.{col_item} i2, COUNT(*) value
FROM {prefix}df_train A INNER JOIN {prefix}df_train B
ON A.{col_user} = B.{col_user} AND A.{col_item} <= b.{col_item}
GROUP BY A.{col_item}, B.{col_item}
HAVING COUNT(*) >= {threshold}
CLUSTER BY i1, i2
""",
threshold=threshold,
)
item_cooccurrence = self.spark.sql(query)
item_cooccurrence.write.mode("overwrite").saveAsTable(
self.f("{prefix}item_cooccurrence")
)
# compute the diagonal used later for Jaccard and Lift
if similarity_type == SIM_LIFT or similarity_type == SIM_JACCARD:
item_marginal = self.spark.sql(
self.f(
"SELECT i1 i, value AS margin FROM {prefix}item_cooccurrence WHERE i1 = i2"
)
)
item_marginal.createOrReplaceTempView(self.f("{prefix}item_marginal"))
if similarity_type == SIM_COOCCUR:
self.item_similarity = item_cooccurrence
elif similarity_type == SIM_JACCARD:
query = self.f(
"""
SELECT i1, i2, value / (M1.margin + M2.margin - value) AS value
FROM {prefix}item_cooccurrence A
INNER JOIN {prefix}item_marginal M1 ON A.i1 = M1.i
INNER JOIN {prefix}item_marginal M2 ON A.i2 = M2.i
CLUSTER BY i1, i2
"""
)
self.item_similarity = self.spark.sql(query)
elif similarity_type == SIM_LIFT:
query = self.f(
"""
SELECT i1, i2, value / (M1.margin * M2.margin) AS value
FROM {prefix}item_cooccurrence A
INNER JOIN {prefix}item_marginal M1 ON A.i1 = M1.i
INNER JOIN {prefix}item_marginal M2 ON A.i2 = M2.i
CLUSTER BY i1, i2
"""
)
self.item_similarity = self.spark.sql(query)
else:
raise ValueError("Unknown similarity type: {0}".format(similarity_type))
# store upper triangular
log.info("sarplus.fit 2/2: compute similiarity metric %s..." % similarity_type)
self.item_similarity.write.mode("overwrite").saveAsTable(
self.f("{prefix}item_similarity_upper")
)
# expand upper triangular to full matrix
query = self.f(
"""
SELECT i1, i2, value
FROM
(
(SELECT i1, i2, value FROM {prefix}item_similarity_upper)
UNION ALL
(SELECT i2 i1, i1 i2, value FROM {prefix}item_similarity_upper WHERE i1 <> i2)
)
CLUSTER BY i1
"""
)
self.item_similarity = self.spark.sql(query)
self.item_similarity.write.mode("overwrite").saveAsTable(
self.f("{prefix}item_similarity")
)
# free space
self.spark.sql(self.f("DROP TABLE {prefix}item_cooccurrence"))
self.spark.sql(self.f("DROP TABLE {prefix}item_similarity_upper"))
self.item_similarity = self.spark.table(self.f("{prefix}item_similarity"))
|
[
"Main",
"fit",
"method",
"for",
"SAR",
".",
"Expects",
"the",
"dataframes",
"to",
"have",
"row_id",
"col_id",
"columns",
"which",
"are",
"indexes",
"i",
".",
"e",
".",
"contain",
"the",
"sequential",
"integer",
"index",
"of",
"the",
"original",
"alphanumeric",
"user",
"and",
"item",
"IDs",
".",
"Dataframe",
"also",
"contains",
"rating",
"and",
"timestamp",
"as",
"floats",
";",
"timestamp",
"is",
"in",
"seconds",
"since",
"Epoch",
"by",
"default",
"."
] |
eisber/sarplus
|
python
|
https://github.com/eisber/sarplus/blob/945a1182e00a8bf70414fc3600086316701777f9/python/pysarplus/SARPlus.py#L48-L207
|
[
"def",
"fit",
"(",
"self",
",",
"df",
",",
"similarity_type",
"=",
"\"jaccard\"",
",",
"time_decay_coefficient",
"=",
"30",
",",
"time_now",
"=",
"None",
",",
"timedecay_formula",
"=",
"False",
",",
"threshold",
"=",
"1",
",",
")",
":",
"# threshold - items below this number get set to zero in coocurrence counts",
"assert",
"threshold",
">",
"0",
"df",
".",
"createOrReplaceTempView",
"(",
"\"{prefix}df_train_input\"",
".",
"format",
"(",
"*",
"*",
"self",
".",
"header",
")",
")",
"if",
"timedecay_formula",
":",
"# WARNING: previously we would take the last value in training dataframe and set it",
"# as a matrix U element",
"# for each user-item pair. Now with time decay, we compute a sum over ratings given",
"# by a user in the case",
"# when T=np.inf, so user gets a cumulative sum of ratings for a particular item and",
"# not the last rating.",
"# Time Decay",
"# do a group by on user item pairs and apply the formula for time decay there",
"# Time T parameter is in days and input time is in seconds",
"# so we do dt/60/(T*24*60)=dt/(T*24*3600)",
"# the folling is the query which we want to run",
"query",
"=",
"self",
".",
"f",
"(",
"\"\"\"\n SELECT\n {col_user}, {col_item}, \n SUM({col_rating} * EXP(-log(2) * (latest_timestamp - CAST({col_timestamp} AS long)) / ({time_decay_coefficient} * 3600 * 24))) as {col_rating}\n FROM {prefix}df_train_input,\n (SELECT CAST(MAX({col_timestamp}) AS long) latest_timestamp FROM {prefix}df_train_input)\n GROUP BY {col_user}, {col_item} \n CLUSTER BY {col_user} \n \"\"\"",
",",
"time_now",
"=",
"time_now",
",",
"time_decay_coefficient",
"=",
"time_decay_coefficient",
",",
")",
"# replace with timedecayed version",
"df",
"=",
"self",
".",
"spark",
".",
"sql",
"(",
"query",
")",
"else",
":",
"# since SQL is case insensitive, this check needs to be performed similar",
"if",
"self",
".",
"header",
"[",
"'col_timestamp'",
"]",
".",
"lower",
"(",
")",
"in",
"[",
"s",
".",
"name",
".",
"lower",
"(",
")",
"for",
"s",
"in",
"df",
".",
"schema",
"]",
":",
"# we need to de-duplicate items by using the latest item",
"query",
"=",
"self",
".",
"f",
"(",
"\"\"\"\n SELECT {col_user}, {col_item}, {col_rating}\n FROM\n (\n SELECT\n {col_user}, {col_item}, {col_rating}, \n ROW_NUMBER() OVER (PARTITION BY {col_user}, {col_item} ORDER BY {col_timestamp} DESC) latest\n FROM {prefix}df_train_input\n )\n WHERE latest = 1\n \"\"\"",
")",
"df",
"=",
"self",
".",
"spark",
".",
"sql",
"(",
"query",
")",
"df",
".",
"createOrReplaceTempView",
"(",
"self",
".",
"f",
"(",
"\"{prefix}df_train\"",
")",
")",
"log",
".",
"info",
"(",
"\"sarplus.fit 1/2: compute item cooccurences...\"",
")",
"# compute cooccurrence above minimum threshold",
"query",
"=",
"self",
".",
"f",
"(",
"\"\"\"\n SELECT A.{col_item} i1, B.{col_item} i2, COUNT(*) value\n FROM {prefix}df_train A INNER JOIN {prefix}df_train B\n ON A.{col_user} = B.{col_user} AND A.{col_item} <= b.{col_item} \n GROUP BY A.{col_item}, B.{col_item}\n HAVING COUNT(*) >= {threshold}\n CLUSTER BY i1, i2\n \"\"\"",
",",
"threshold",
"=",
"threshold",
",",
")",
"item_cooccurrence",
"=",
"self",
".",
"spark",
".",
"sql",
"(",
"query",
")",
"item_cooccurrence",
".",
"write",
".",
"mode",
"(",
"\"overwrite\"",
")",
".",
"saveAsTable",
"(",
"self",
".",
"f",
"(",
"\"{prefix}item_cooccurrence\"",
")",
")",
"# compute the diagonal used later for Jaccard and Lift",
"if",
"similarity_type",
"==",
"SIM_LIFT",
"or",
"similarity_type",
"==",
"SIM_JACCARD",
":",
"item_marginal",
"=",
"self",
".",
"spark",
".",
"sql",
"(",
"self",
".",
"f",
"(",
"\"SELECT i1 i, value AS margin FROM {prefix}item_cooccurrence WHERE i1 = i2\"",
")",
")",
"item_marginal",
".",
"createOrReplaceTempView",
"(",
"self",
".",
"f",
"(",
"\"{prefix}item_marginal\"",
")",
")",
"if",
"similarity_type",
"==",
"SIM_COOCCUR",
":",
"self",
".",
"item_similarity",
"=",
"item_cooccurrence",
"elif",
"similarity_type",
"==",
"SIM_JACCARD",
":",
"query",
"=",
"self",
".",
"f",
"(",
"\"\"\"\n SELECT i1, i2, value / (M1.margin + M2.margin - value) AS value\n FROM {prefix}item_cooccurrence A \n INNER JOIN {prefix}item_marginal M1 ON A.i1 = M1.i \n INNER JOIN {prefix}item_marginal M2 ON A.i2 = M2.i\n CLUSTER BY i1, i2\n \"\"\"",
")",
"self",
".",
"item_similarity",
"=",
"self",
".",
"spark",
".",
"sql",
"(",
"query",
")",
"elif",
"similarity_type",
"==",
"SIM_LIFT",
":",
"query",
"=",
"self",
".",
"f",
"(",
"\"\"\"\n SELECT i1, i2, value / (M1.margin * M2.margin) AS value\n FROM {prefix}item_cooccurrence A \n INNER JOIN {prefix}item_marginal M1 ON A.i1 = M1.i \n INNER JOIN {prefix}item_marginal M2 ON A.i2 = M2.i\n CLUSTER BY i1, i2\n \"\"\"",
")",
"self",
".",
"item_similarity",
"=",
"self",
".",
"spark",
".",
"sql",
"(",
"query",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown similarity type: {0}\"",
".",
"format",
"(",
"similarity_type",
")",
")",
"# store upper triangular",
"log",
".",
"info",
"(",
"\"sarplus.fit 2/2: compute similiarity metric %s...\"",
"%",
"similarity_type",
")",
"self",
".",
"item_similarity",
".",
"write",
".",
"mode",
"(",
"\"overwrite\"",
")",
".",
"saveAsTable",
"(",
"self",
".",
"f",
"(",
"\"{prefix}item_similarity_upper\"",
")",
")",
"# expand upper triangular to full matrix",
"query",
"=",
"self",
".",
"f",
"(",
"\"\"\"\n SELECT i1, i2, value\n FROM\n (\n (SELECT i1, i2, value FROM {prefix}item_similarity_upper)\n UNION ALL\n (SELECT i2 i1, i1 i2, value FROM {prefix}item_similarity_upper WHERE i1 <> i2)\n )\n CLUSTER BY i1\n \"\"\"",
")",
"self",
".",
"item_similarity",
"=",
"self",
".",
"spark",
".",
"sql",
"(",
"query",
")",
"self",
".",
"item_similarity",
".",
"write",
".",
"mode",
"(",
"\"overwrite\"",
")",
".",
"saveAsTable",
"(",
"self",
".",
"f",
"(",
"\"{prefix}item_similarity\"",
")",
")",
"# free space",
"self",
".",
"spark",
".",
"sql",
"(",
"self",
".",
"f",
"(",
"\"DROP TABLE {prefix}item_cooccurrence\"",
")",
")",
"self",
".",
"spark",
".",
"sql",
"(",
"self",
".",
"f",
"(",
"\"DROP TABLE {prefix}item_similarity_upper\"",
")",
")",
"self",
".",
"item_similarity",
"=",
"self",
".",
"spark",
".",
"table",
"(",
"self",
".",
"f",
"(",
"\"{prefix}item_similarity\"",
")",
")"
] |
945a1182e00a8bf70414fc3600086316701777f9
|
test
|
SARPlus.get_user_affinity
|
Prepare test set for C++ SAR prediction code.
Find all items the test users have seen in the past.
Arguments:
test (pySpark.DataFrame): input dataframe which contains test users.
|
python/pysarplus/SARPlus.py
|
def get_user_affinity(self, test):
"""Prepare test set for C++ SAR prediction code.
Find all items the test users have seen in the past.
Arguments:
test (pySpark.DataFrame): input dataframe which contains test users.
"""
test.createOrReplaceTempView(self.f("{prefix}df_test"))
query = self.f(
"SELECT DISTINCT {col_user} FROM {prefix}df_test CLUSTER BY {col_user}"
)
df_test_users = self.spark.sql(query)
df_test_users.write.mode("overwrite").saveAsTable(
self.f("{prefix}df_test_users")
)
query = self.f(
"""
SELECT a.{col_user}, a.{col_item}, CAST(a.{col_rating} AS double) {col_rating}
FROM {prefix}df_train a INNER JOIN {prefix}df_test_users b ON a.{col_user} = b.{col_user}
DISTRIBUTE BY {col_user}
SORT BY {col_user}, {col_item}
"""
)
return self.spark.sql(query)
|
def get_user_affinity(self, test):
"""Prepare test set for C++ SAR prediction code.
Find all items the test users have seen in the past.
Arguments:
test (pySpark.DataFrame): input dataframe which contains test users.
"""
test.createOrReplaceTempView(self.f("{prefix}df_test"))
query = self.f(
"SELECT DISTINCT {col_user} FROM {prefix}df_test CLUSTER BY {col_user}"
)
df_test_users = self.spark.sql(query)
df_test_users.write.mode("overwrite").saveAsTable(
self.f("{prefix}df_test_users")
)
query = self.f(
"""
SELECT a.{col_user}, a.{col_item}, CAST(a.{col_rating} AS double) {col_rating}
FROM {prefix}df_train a INNER JOIN {prefix}df_test_users b ON a.{col_user} = b.{col_user}
DISTRIBUTE BY {col_user}
SORT BY {col_user}, {col_item}
"""
)
return self.spark.sql(query)
|
[
"Prepare",
"test",
"set",
"for",
"C",
"++",
"SAR",
"prediction",
"code",
".",
"Find",
"all",
"items",
"the",
"test",
"users",
"have",
"seen",
"in",
"the",
"past",
"."
] |
eisber/sarplus
|
python
|
https://github.com/eisber/sarplus/blob/945a1182e00a8bf70414fc3600086316701777f9/python/pysarplus/SARPlus.py#L209-L236
|
[
"def",
"get_user_affinity",
"(",
"self",
",",
"test",
")",
":",
"test",
".",
"createOrReplaceTempView",
"(",
"self",
".",
"f",
"(",
"\"{prefix}df_test\"",
")",
")",
"query",
"=",
"self",
".",
"f",
"(",
"\"SELECT DISTINCT {col_user} FROM {prefix}df_test CLUSTER BY {col_user}\"",
")",
"df_test_users",
"=",
"self",
".",
"spark",
".",
"sql",
"(",
"query",
")",
"df_test_users",
".",
"write",
".",
"mode",
"(",
"\"overwrite\"",
")",
".",
"saveAsTable",
"(",
"self",
".",
"f",
"(",
"\"{prefix}df_test_users\"",
")",
")",
"query",
"=",
"self",
".",
"f",
"(",
"\"\"\"\n SELECT a.{col_user}, a.{col_item}, CAST(a.{col_rating} AS double) {col_rating}\n FROM {prefix}df_train a INNER JOIN {prefix}df_test_users b ON a.{col_user} = b.{col_user} \n DISTRIBUTE BY {col_user}\n SORT BY {col_user}, {col_item} \n \"\"\"",
")",
"return",
"self",
".",
"spark",
".",
"sql",
"(",
"query",
")"
] |
945a1182e00a8bf70414fc3600086316701777f9
|
test
|
SARPlus.recommend_k_items_slow
|
Recommend top K items for all users which are in the test set.
Args:
test: test Spark dataframe
top_k: top n items to return
remove_seen: remove items test users have already seen in the past from the recommended set.
|
python/pysarplus/SARPlus.py
|
def recommend_k_items_slow(self, test, top_k=10, remove_seen=True):
"""Recommend top K items for all users which are in the test set.
Args:
test: test Spark dataframe
top_k: top n items to return
remove_seen: remove items test users have already seen in the past from the recommended set.
"""
# TODO: remove seen
if remove_seen:
raise ValueError("Not implemented")
self.get_user_affinity(test)\
.write.mode("overwrite")\
.saveAsTable(self.f("{prefix}user_affinity"))
# user_affinity * item_similarity
# filter top-k
query = self.f(
"""
SELECT {col_user}, {col_item}, score
FROM
(
SELECT df.{col_user},
S.i2 {col_item},
SUM(df.{col_rating} * S.value) AS score,
row_number() OVER(PARTITION BY {col_user} ORDER BY SUM(df.{col_rating} * S.value) DESC) rank
FROM
{prefix}user_affinity df,
{prefix}item_similarity S
WHERE df.{col_item} = S.i1
GROUP BY df.{col_user}, S.i2
)
WHERE rank <= {top_k}
""",
top_k=top_k,
)
return self.spark.sql(query)
|
def recommend_k_items_slow(self, test, top_k=10, remove_seen=True):
"""Recommend top K items for all users which are in the test set.
Args:
test: test Spark dataframe
top_k: top n items to return
remove_seen: remove items test users have already seen in the past from the recommended set.
"""
# TODO: remove seen
if remove_seen:
raise ValueError("Not implemented")
self.get_user_affinity(test)\
.write.mode("overwrite")\
.saveAsTable(self.f("{prefix}user_affinity"))
# user_affinity * item_similarity
# filter top-k
query = self.f(
"""
SELECT {col_user}, {col_item}, score
FROM
(
SELECT df.{col_user},
S.i2 {col_item},
SUM(df.{col_rating} * S.value) AS score,
row_number() OVER(PARTITION BY {col_user} ORDER BY SUM(df.{col_rating} * S.value) DESC) rank
FROM
{prefix}user_affinity df,
{prefix}item_similarity S
WHERE df.{col_item} = S.i1
GROUP BY df.{col_user}, S.i2
)
WHERE rank <= {top_k}
""",
top_k=top_k,
)
return self.spark.sql(query)
|
[
"Recommend",
"top",
"K",
"items",
"for",
"all",
"users",
"which",
"are",
"in",
"the",
"test",
"set",
"."
] |
eisber/sarplus
|
python
|
https://github.com/eisber/sarplus/blob/945a1182e00a8bf70414fc3600086316701777f9/python/pysarplus/SARPlus.py#L323-L362
|
[
"def",
"recommend_k_items_slow",
"(",
"self",
",",
"test",
",",
"top_k",
"=",
"10",
",",
"remove_seen",
"=",
"True",
")",
":",
"# TODO: remove seen",
"if",
"remove_seen",
":",
"raise",
"ValueError",
"(",
"\"Not implemented\"",
")",
"self",
".",
"get_user_affinity",
"(",
"test",
")",
".",
"write",
".",
"mode",
"(",
"\"overwrite\"",
")",
".",
"saveAsTable",
"(",
"self",
".",
"f",
"(",
"\"{prefix}user_affinity\"",
")",
")",
"# user_affinity * item_similarity",
"# filter top-k",
"query",
"=",
"self",
".",
"f",
"(",
"\"\"\"\n SELECT {col_user}, {col_item}, score\n FROM\n (\n SELECT df.{col_user},\n S.i2 {col_item},\n SUM(df.{col_rating} * S.value) AS score,\n row_number() OVER(PARTITION BY {col_user} ORDER BY SUM(df.{col_rating} * S.value) DESC) rank\n FROM \n {prefix}user_affinity df, \n {prefix}item_similarity S\n WHERE df.{col_item} = S.i1\n GROUP BY df.{col_user}, S.i2\n )\n WHERE rank <= {top_k} \n \"\"\"",
",",
"top_k",
"=",
"top_k",
",",
")",
"return",
"self",
".",
"spark",
".",
"sql",
"(",
"query",
")"
] |
945a1182e00a8bf70414fc3600086316701777f9
|
test
|
WebsocketHandler.setauth
|
setauth can be used during runtime to make sure that authentication is reset.
it can be used when changing passwords/apikeys to make sure reconnects succeed
|
connectordb/_websocket.py
|
def setauth(self,basic_auth):
""" setauth can be used during runtime to make sure that authentication is reset.
it can be used when changing passwords/apikeys to make sure reconnects succeed """
self.headers = []
# If we have auth
if basic_auth is not None:
# we use a cheap hack to get the basic auth header out of the auth object.
# This snippet ends up with us having an array of the necessary headers
# to perform authentication.
class auth_extractor():
def __init__(self):
self.headers = {}
extractor = auth_extractor()
basic_auth(extractor)
for header in extractor.headers:
self.headers.append("%s: %s" % (header, extractor.headers[header]))
|
def setauth(self,basic_auth):
""" setauth can be used during runtime to make sure that authentication is reset.
it can be used when changing passwords/apikeys to make sure reconnects succeed """
self.headers = []
# If we have auth
if basic_auth is not None:
# we use a cheap hack to get the basic auth header out of the auth object.
# This snippet ends up with us having an array of the necessary headers
# to perform authentication.
class auth_extractor():
def __init__(self):
self.headers = {}
extractor = auth_extractor()
basic_auth(extractor)
for header in extractor.headers:
self.headers.append("%s: %s" % (header, extractor.headers[header]))
|
[
"setauth",
"can",
"be",
"used",
"during",
"runtime",
"to",
"make",
"sure",
"that",
"authentication",
"is",
"reset",
".",
"it",
"can",
"be",
"used",
"when",
"changing",
"passwords",
"/",
"apikeys",
"to",
"make",
"sure",
"reconnects",
"succeed"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_websocket.py#L69-L86
|
[
"def",
"setauth",
"(",
"self",
",",
"basic_auth",
")",
":",
"self",
".",
"headers",
"=",
"[",
"]",
"# If we have auth",
"if",
"basic_auth",
"is",
"not",
"None",
":",
"# we use a cheap hack to get the basic auth header out of the auth object.",
"# This snippet ends up with us having an array of the necessary headers",
"# to perform authentication.",
"class",
"auth_extractor",
"(",
")",
":",
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"headers",
"=",
"{",
"}",
"extractor",
"=",
"auth_extractor",
"(",
")",
"basic_auth",
"(",
"extractor",
")",
"for",
"header",
"in",
"extractor",
".",
"headers",
":",
"self",
".",
"headers",
".",
"append",
"(",
"\"%s: %s\"",
"%",
"(",
"header",
",",
"extractor",
".",
"headers",
"[",
"header",
"]",
")",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
WebsocketHandler.send
|
Send the given command thru the websocket
|
connectordb/_websocket.py
|
def send(self, cmd):
"""Send the given command thru the websocket"""
with self.ws_sendlock:
self.ws.send(json.dumps(cmd))
|
def send(self, cmd):
"""Send the given command thru the websocket"""
with self.ws_sendlock:
self.ws.send(json.dumps(cmd))
|
[
"Send",
"the",
"given",
"command",
"thru",
"the",
"websocket"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_websocket.py#L101-L104
|
[
"def",
"send",
"(",
"self",
",",
"cmd",
")",
":",
"with",
"self",
".",
"ws_sendlock",
":",
"self",
".",
"ws",
".",
"send",
"(",
"json",
".",
"dumps",
"(",
"cmd",
")",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
WebsocketHandler.subscribe
|
Given a stream, a callback and an optional transform, sets up the subscription
|
connectordb/_websocket.py
|
def subscribe(self, stream, callback, transform=""):
"""Given a stream, a callback and an optional transform, sets up the subscription"""
if self.status == "disconnected" or self.status == "disconnecting" or self.status == "connecting":
self.connect()
if self.status is not "connected":
return False
logging.debug("Subscribing to %s", stream)
self.send({"cmd": "subscribe", "arg": stream, "transform": transform})
with self.subscription_lock:
self.subscriptions[stream + ":" + transform] = callback
return True
|
def subscribe(self, stream, callback, transform=""):
"""Given a stream, a callback and an optional transform, sets up the subscription"""
if self.status == "disconnected" or self.status == "disconnecting" or self.status == "connecting":
self.connect()
if self.status is not "connected":
return False
logging.debug("Subscribing to %s", stream)
self.send({"cmd": "subscribe", "arg": stream, "transform": transform})
with self.subscription_lock:
self.subscriptions[stream + ":" + transform] = callback
return True
|
[
"Given",
"a",
"stream",
"a",
"callback",
"and",
"an",
"optional",
"transform",
"sets",
"up",
"the",
"subscription"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_websocket.py#L110-L121
|
[
"def",
"subscribe",
"(",
"self",
",",
"stream",
",",
"callback",
",",
"transform",
"=",
"\"\"",
")",
":",
"if",
"self",
".",
"status",
"==",
"\"disconnected\"",
"or",
"self",
".",
"status",
"==",
"\"disconnecting\"",
"or",
"self",
".",
"status",
"==",
"\"connecting\"",
":",
"self",
".",
"connect",
"(",
")",
"if",
"self",
".",
"status",
"is",
"not",
"\"connected\"",
":",
"return",
"False",
"logging",
".",
"debug",
"(",
"\"Subscribing to %s\"",
",",
"stream",
")",
"self",
".",
"send",
"(",
"{",
"\"cmd\"",
":",
"\"subscribe\"",
",",
"\"arg\"",
":",
"stream",
",",
"\"transform\"",
":",
"transform",
"}",
")",
"with",
"self",
".",
"subscription_lock",
":",
"self",
".",
"subscriptions",
"[",
"stream",
"+",
"\":\"",
"+",
"transform",
"]",
"=",
"callback",
"return",
"True"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
WebsocketHandler.unsubscribe
|
Unsubscribe from the given stream (with the optional transform)
|
connectordb/_websocket.py
|
def unsubscribe(self, stream, transform=""):
"""Unsubscribe from the given stream (with the optional transform)"""
if self.status is not "connected":
return False
logging.debug("Unsubscribing from %s", stream)
self.send(
{"cmd": "unsubscribe",
"arg": stream,
"transform": transform})
self.subscription_lock.acquire()
del self.subscriptions[stream + ":" + transform]
if len(self.subscriptions) is 0:
self.subscription_lock.release()
self.disconnect()
else:
self.subscription_lock.release()
|
def unsubscribe(self, stream, transform=""):
"""Unsubscribe from the given stream (with the optional transform)"""
if self.status is not "connected":
return False
logging.debug("Unsubscribing from %s", stream)
self.send(
{"cmd": "unsubscribe",
"arg": stream,
"transform": transform})
self.subscription_lock.acquire()
del self.subscriptions[stream + ":" + transform]
if len(self.subscriptions) is 0:
self.subscription_lock.release()
self.disconnect()
else:
self.subscription_lock.release()
|
[
"Unsubscribe",
"from",
"the",
"given",
"stream",
"(",
"with",
"the",
"optional",
"transform",
")"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_websocket.py#L123-L139
|
[
"def",
"unsubscribe",
"(",
"self",
",",
"stream",
",",
"transform",
"=",
"\"\"",
")",
":",
"if",
"self",
".",
"status",
"is",
"not",
"\"connected\"",
":",
"return",
"False",
"logging",
".",
"debug",
"(",
"\"Unsubscribing from %s\"",
",",
"stream",
")",
"self",
".",
"send",
"(",
"{",
"\"cmd\"",
":",
"\"unsubscribe\"",
",",
"\"arg\"",
":",
"stream",
",",
"\"transform\"",
":",
"transform",
"}",
")",
"self",
".",
"subscription_lock",
".",
"acquire",
"(",
")",
"del",
"self",
".",
"subscriptions",
"[",
"stream",
"+",
"\":\"",
"+",
"transform",
"]",
"if",
"len",
"(",
"self",
".",
"subscriptions",
")",
"is",
"0",
":",
"self",
".",
"subscription_lock",
".",
"release",
"(",
")",
"self",
".",
"disconnect",
"(",
")",
"else",
":",
"self",
".",
"subscription_lock",
".",
"release",
"(",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
WebsocketHandler.connect
|
Attempt to connect to the websocket - and returns either True or False depending on if
the connection was successful or not
|
connectordb/_websocket.py
|
def connect(self):
"""Attempt to connect to the websocket - and returns either True or False depending on if
the connection was successful or not"""
# Wait for the lock to be available (ie, the websocket is not being used (yet))
self.ws_openlock.acquire()
self.ws_openlock.release()
if self.status == "connected":
return True # Already connected
if self.status == "disconnecting":
# If currently disconnecting, wait a moment, and retry connect
time.sleep(0.1)
return self.connect()
if self.status == "disconnected" or self.status == "reconnecting":
self.ws = websocket.WebSocketApp(self.ws_url,
header=self.headers,
on_message=self.__on_message,
on_ping=self.__on_ping,
on_open=self.__on_open,
on_close=self.__on_close,
on_error=self.__on_error)
self.ws_thread = threading.Thread(target=self.ws.run_forever)
self.ws_thread.daemon = True
self.status = "connecting"
self.ws_openlock.acquire()
self.ws_thread.start()
self.ws_openlock.acquire()
self.ws_openlock.release()
return self.status == "connected"
|
def connect(self):
"""Attempt to connect to the websocket - and returns either True or False depending on if
the connection was successful or not"""
# Wait for the lock to be available (ie, the websocket is not being used (yet))
self.ws_openlock.acquire()
self.ws_openlock.release()
if self.status == "connected":
return True # Already connected
if self.status == "disconnecting":
# If currently disconnecting, wait a moment, and retry connect
time.sleep(0.1)
return self.connect()
if self.status == "disconnected" or self.status == "reconnecting":
self.ws = websocket.WebSocketApp(self.ws_url,
header=self.headers,
on_message=self.__on_message,
on_ping=self.__on_ping,
on_open=self.__on_open,
on_close=self.__on_close,
on_error=self.__on_error)
self.ws_thread = threading.Thread(target=self.ws.run_forever)
self.ws_thread.daemon = True
self.status = "connecting"
self.ws_openlock.acquire()
self.ws_thread.start()
self.ws_openlock.acquire()
self.ws_openlock.release()
return self.status == "connected"
|
[
"Attempt",
"to",
"connect",
"to",
"the",
"websocket",
"-",
"and",
"returns",
"either",
"True",
"or",
"False",
"depending",
"on",
"if",
"the",
"connection",
"was",
"successful",
"or",
"not"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_websocket.py#L141-L173
|
[
"def",
"connect",
"(",
"self",
")",
":",
"# Wait for the lock to be available (ie, the websocket is not being used (yet))",
"self",
".",
"ws_openlock",
".",
"acquire",
"(",
")",
"self",
".",
"ws_openlock",
".",
"release",
"(",
")",
"if",
"self",
".",
"status",
"==",
"\"connected\"",
":",
"return",
"True",
"# Already connected",
"if",
"self",
".",
"status",
"==",
"\"disconnecting\"",
":",
"# If currently disconnecting, wait a moment, and retry connect",
"time",
".",
"sleep",
"(",
"0.1",
")",
"return",
"self",
".",
"connect",
"(",
")",
"if",
"self",
".",
"status",
"==",
"\"disconnected\"",
"or",
"self",
".",
"status",
"==",
"\"reconnecting\"",
":",
"self",
".",
"ws",
"=",
"websocket",
".",
"WebSocketApp",
"(",
"self",
".",
"ws_url",
",",
"header",
"=",
"self",
".",
"headers",
",",
"on_message",
"=",
"self",
".",
"__on_message",
",",
"on_ping",
"=",
"self",
".",
"__on_ping",
",",
"on_open",
"=",
"self",
".",
"__on_open",
",",
"on_close",
"=",
"self",
".",
"__on_close",
",",
"on_error",
"=",
"self",
".",
"__on_error",
")",
"self",
".",
"ws_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"ws",
".",
"run_forever",
")",
"self",
".",
"ws_thread",
".",
"daemon",
"=",
"True",
"self",
".",
"status",
"=",
"\"connecting\"",
"self",
".",
"ws_openlock",
".",
"acquire",
"(",
")",
"self",
".",
"ws_thread",
".",
"start",
"(",
")",
"self",
".",
"ws_openlock",
".",
"acquire",
"(",
")",
"self",
".",
"ws_openlock",
".",
"release",
"(",
")",
"return",
"self",
".",
"status",
"==",
"\"connected\""
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
WebsocketHandler.__reconnect
|
This is called when a connection is lost - it attempts to reconnect to the server
|
connectordb/_websocket.py
|
def __reconnect(self):
"""This is called when a connection is lost - it attempts to reconnect to the server"""
self.status = "reconnecting"
# Reset the disconnect time after 15 minutes
if self.disconnected_time - self.connected_time > 15 * 60:
self.reconnect_time = self.reconnect_time_starting_seconds
else:
self.reconnect_time *= self.reconnect_time_backoff_multiplier
if self.reconnect_time > self.reconnect_time_max_seconds:
self.reconnect_time = self.reconnect_time_max_seconds
# We want to add some randomness to the reconnect rate - necessary so that we don't pound the server
# if it goes down
self.reconnect_time *= 1 + random.uniform(-0.2, 0.2)
if self.reconnect_time < self.reconnect_time_starting_seconds:
self.reconnect_time = self.reconnect_time_starting_seconds
logging.warn("ConnectorDB:WS: Attempting to reconnect in %fs",
self.reconnect_time)
self.reconnector = threading.Timer(self.reconnect_time,
self.__reconnect_fnc)
self.reconnector.daemon = True
self.reconnector.start()
|
def __reconnect(self):
"""This is called when a connection is lost - it attempts to reconnect to the server"""
self.status = "reconnecting"
# Reset the disconnect time after 15 minutes
if self.disconnected_time - self.connected_time > 15 * 60:
self.reconnect_time = self.reconnect_time_starting_seconds
else:
self.reconnect_time *= self.reconnect_time_backoff_multiplier
if self.reconnect_time > self.reconnect_time_max_seconds:
self.reconnect_time = self.reconnect_time_max_seconds
# We want to add some randomness to the reconnect rate - necessary so that we don't pound the server
# if it goes down
self.reconnect_time *= 1 + random.uniform(-0.2, 0.2)
if self.reconnect_time < self.reconnect_time_starting_seconds:
self.reconnect_time = self.reconnect_time_starting_seconds
logging.warn("ConnectorDB:WS: Attempting to reconnect in %fs",
self.reconnect_time)
self.reconnector = threading.Timer(self.reconnect_time,
self.__reconnect_fnc)
self.reconnector.daemon = True
self.reconnector.start()
|
[
"This",
"is",
"called",
"when",
"a",
"connection",
"is",
"lost",
"-",
"it",
"attempts",
"to",
"reconnect",
"to",
"the",
"server"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_websocket.py#L184-L210
|
[
"def",
"__reconnect",
"(",
"self",
")",
":",
"self",
".",
"status",
"=",
"\"reconnecting\"",
"# Reset the disconnect time after 15 minutes",
"if",
"self",
".",
"disconnected_time",
"-",
"self",
".",
"connected_time",
">",
"15",
"*",
"60",
":",
"self",
".",
"reconnect_time",
"=",
"self",
".",
"reconnect_time_starting_seconds",
"else",
":",
"self",
".",
"reconnect_time",
"*=",
"self",
".",
"reconnect_time_backoff_multiplier",
"if",
"self",
".",
"reconnect_time",
">",
"self",
".",
"reconnect_time_max_seconds",
":",
"self",
".",
"reconnect_time",
"=",
"self",
".",
"reconnect_time_max_seconds",
"# We want to add some randomness to the reconnect rate - necessary so that we don't pound the server",
"# if it goes down",
"self",
".",
"reconnect_time",
"*=",
"1",
"+",
"random",
".",
"uniform",
"(",
"-",
"0.2",
",",
"0.2",
")",
"if",
"self",
".",
"reconnect_time",
"<",
"self",
".",
"reconnect_time_starting_seconds",
":",
"self",
".",
"reconnect_time",
"=",
"self",
".",
"reconnect_time_starting_seconds",
"logging",
".",
"warn",
"(",
"\"ConnectorDB:WS: Attempting to reconnect in %fs\"",
",",
"self",
".",
"reconnect_time",
")",
"self",
".",
"reconnector",
"=",
"threading",
".",
"Timer",
"(",
"self",
".",
"reconnect_time",
",",
"self",
".",
"__reconnect_fnc",
")",
"self",
".",
"reconnector",
".",
"daemon",
"=",
"True",
"self",
".",
"reconnector",
".",
"start",
"(",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
WebsocketHandler.__resubscribe
|
Send subscribe command for all existing subscriptions. This allows to resume a connection
that was closed
|
connectordb/_websocket.py
|
def __resubscribe(self):
"""Send subscribe command for all existing subscriptions. This allows to resume a connection
that was closed"""
with self.subscription_lock:
for sub in self.subscriptions:
logging.debug("Resubscribing to %s", sub)
stream_transform = sub.split(":", 1)
self.send({
"cmd": "subscribe",
"arg": stream_transform[0],
"transform": stream_transform[1]
})
|
def __resubscribe(self):
"""Send subscribe command for all existing subscriptions. This allows to resume a connection
that was closed"""
with self.subscription_lock:
for sub in self.subscriptions:
logging.debug("Resubscribing to %s", sub)
stream_transform = sub.split(":", 1)
self.send({
"cmd": "subscribe",
"arg": stream_transform[0],
"transform": stream_transform[1]
})
|
[
"Send",
"subscribe",
"command",
"for",
"all",
"existing",
"subscriptions",
".",
"This",
"allows",
"to",
"resume",
"a",
"connection",
"that",
"was",
"closed"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_websocket.py#L219-L230
|
[
"def",
"__resubscribe",
"(",
"self",
")",
":",
"with",
"self",
".",
"subscription_lock",
":",
"for",
"sub",
"in",
"self",
".",
"subscriptions",
":",
"logging",
".",
"debug",
"(",
"\"Resubscribing to %s\"",
",",
"sub",
")",
"stream_transform",
"=",
"sub",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"self",
".",
"send",
"(",
"{",
"\"cmd\"",
":",
"\"subscribe\"",
",",
"\"arg\"",
":",
"stream_transform",
"[",
"0",
"]",
",",
"\"transform\"",
":",
"stream_transform",
"[",
"1",
"]",
"}",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
WebsocketHandler.__on_open
|
Called when the websocket is opened
|
connectordb/_websocket.py
|
def __on_open(self, ws):
"""Called when the websocket is opened"""
logging.debug("ConnectorDB: Websocket opened")
# Connection success - decrease the wait time for next connection
self.reconnect_time /= self.reconnect_time_backoff_multiplier
self.status = "connected"
self.lastpingtime = time.time()
self.__ensure_ping()
self.connected_time = time.time()
# Release the lock that connect called
self.ws_openlock.release()
|
def __on_open(self, ws):
"""Called when the websocket is opened"""
logging.debug("ConnectorDB: Websocket opened")
# Connection success - decrease the wait time for next connection
self.reconnect_time /= self.reconnect_time_backoff_multiplier
self.status = "connected"
self.lastpingtime = time.time()
self.__ensure_ping()
self.connected_time = time.time()
# Release the lock that connect called
self.ws_openlock.release()
|
[
"Called",
"when",
"the",
"websocket",
"is",
"opened"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_websocket.py#L232-L247
|
[
"def",
"__on_open",
"(",
"self",
",",
"ws",
")",
":",
"logging",
".",
"debug",
"(",
"\"ConnectorDB: Websocket opened\"",
")",
"# Connection success - decrease the wait time for next connection",
"self",
".",
"reconnect_time",
"/=",
"self",
".",
"reconnect_time_backoff_multiplier",
"self",
".",
"status",
"=",
"\"connected\"",
"self",
".",
"lastpingtime",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"__ensure_ping",
"(",
")",
"self",
".",
"connected_time",
"=",
"time",
".",
"time",
"(",
")",
"# Release the lock that connect called",
"self",
".",
"ws_openlock",
".",
"release",
"(",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
WebsocketHandler.__on_close
|
Called when the websocket is closed
|
connectordb/_websocket.py
|
def __on_close(self, ws):
"""Called when the websocket is closed"""
if self.status == "disconnected":
return # This can be double-called on disconnect
logging.debug("ConnectorDB:WS: Websocket closed")
# Turn off the ping timer
if self.pingtimer is not None:
self.pingtimer.cancel()
self.disconnected_time = time.time()
if self.status == "disconnecting":
self.status = "disconnected"
elif self.status == "connected":
self.__reconnect()
|
def __on_close(self, ws):
"""Called when the websocket is closed"""
if self.status == "disconnected":
return # This can be double-called on disconnect
logging.debug("ConnectorDB:WS: Websocket closed")
# Turn off the ping timer
if self.pingtimer is not None:
self.pingtimer.cancel()
self.disconnected_time = time.time()
if self.status == "disconnecting":
self.status = "disconnected"
elif self.status == "connected":
self.__reconnect()
|
[
"Called",
"when",
"the",
"websocket",
"is",
"closed"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_websocket.py#L249-L263
|
[
"def",
"__on_close",
"(",
"self",
",",
"ws",
")",
":",
"if",
"self",
".",
"status",
"==",
"\"disconnected\"",
":",
"return",
"# This can be double-called on disconnect",
"logging",
".",
"debug",
"(",
"\"ConnectorDB:WS: Websocket closed\"",
")",
"# Turn off the ping timer",
"if",
"self",
".",
"pingtimer",
"is",
"not",
"None",
":",
"self",
".",
"pingtimer",
".",
"cancel",
"(",
")",
"self",
".",
"disconnected_time",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"status",
"==",
"\"disconnecting\"",
":",
"self",
".",
"status",
"=",
"\"disconnected\"",
"elif",
"self",
".",
"status",
"==",
"\"connected\"",
":",
"self",
".",
"__reconnect",
"(",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
WebsocketHandler.__on_error
|
Called when there is an error in the websocket
|
connectordb/_websocket.py
|
def __on_error(self, ws, err):
"""Called when there is an error in the websocket"""
logging.debug("ConnectorDB:WS: Connection Error")
if self.status == "connecting":
self.status = "errored"
self.ws_openlock.release()
|
def __on_error(self, ws, err):
"""Called when there is an error in the websocket"""
logging.debug("ConnectorDB:WS: Connection Error")
if self.status == "connecting":
self.status = "errored"
self.ws_openlock.release()
|
[
"Called",
"when",
"there",
"is",
"an",
"error",
"in",
"the",
"websocket"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_websocket.py#L265-L271
|
[
"def",
"__on_error",
"(",
"self",
",",
"ws",
",",
"err",
")",
":",
"logging",
".",
"debug",
"(",
"\"ConnectorDB:WS: Connection Error\"",
")",
"if",
"self",
".",
"status",
"==",
"\"connecting\"",
":",
"self",
".",
"status",
"=",
"\"errored\"",
"self",
".",
"ws_openlock",
".",
"release",
"(",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
WebsocketHandler.__on_message
|
This function is called whenever there is a message received from the server
|
connectordb/_websocket.py
|
def __on_message(self, ws, msg):
"""This function is called whenever there is a message received from the server"""
msg = json.loads(msg)
logging.debug("ConnectorDB:WS: Msg '%s'", msg["stream"])
# Build the subcription key
stream_key = msg["stream"] + ":"
if "transform" in msg:
stream_key += msg["transform"]
self.subscription_lock.acquire()
if stream_key in self.subscriptions:
subscription_function = self.subscriptions[stream_key]
self.subscription_lock.release()
fresult = subscription_function(msg["stream"], msg["data"])
if fresult is True:
# This is a special result - if the subscription function of a downlink returns True,
# then the datapoint is acknowledged automatically (ie, reinserted in non-downlink stream)
fresult = msg["data"]
if fresult is not False and fresult is not None and msg["stream"].endswith(
"/downlink") and msg["stream"].count("/") == 3:
# If the above conditions are true, it means that the datapoints were from a downlink,
# and the subscriber function chooses to acknowledge them, so we reinsert them.
self.insert(msg["stream"][:-9], fresult)
else:
self.subscription_lock.release()
logging.warn(
"ConnectorDB:WS: Msg '%s' not subscribed! Subscriptions: %s",
msg["stream"], list(self.subscriptions.keys()))
|
def __on_message(self, ws, msg):
"""This function is called whenever there is a message received from the server"""
msg = json.loads(msg)
logging.debug("ConnectorDB:WS: Msg '%s'", msg["stream"])
# Build the subcription key
stream_key = msg["stream"] + ":"
if "transform" in msg:
stream_key += msg["transform"]
self.subscription_lock.acquire()
if stream_key in self.subscriptions:
subscription_function = self.subscriptions[stream_key]
self.subscription_lock.release()
fresult = subscription_function(msg["stream"], msg["data"])
if fresult is True:
# This is a special result - if the subscription function of a downlink returns True,
# then the datapoint is acknowledged automatically (ie, reinserted in non-downlink stream)
fresult = msg["data"]
if fresult is not False and fresult is not None and msg["stream"].endswith(
"/downlink") and msg["stream"].count("/") == 3:
# If the above conditions are true, it means that the datapoints were from a downlink,
# and the subscriber function chooses to acknowledge them, so we reinsert them.
self.insert(msg["stream"][:-9], fresult)
else:
self.subscription_lock.release()
logging.warn(
"ConnectorDB:WS: Msg '%s' not subscribed! Subscriptions: %s",
msg["stream"], list(self.subscriptions.keys()))
|
[
"This",
"function",
"is",
"called",
"whenever",
"there",
"is",
"a",
"message",
"received",
"from",
"the",
"server"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_websocket.py#L273-L304
|
[
"def",
"__on_message",
"(",
"self",
",",
"ws",
",",
"msg",
")",
":",
"msg",
"=",
"json",
".",
"loads",
"(",
"msg",
")",
"logging",
".",
"debug",
"(",
"\"ConnectorDB:WS: Msg '%s'\"",
",",
"msg",
"[",
"\"stream\"",
"]",
")",
"# Build the subcription key",
"stream_key",
"=",
"msg",
"[",
"\"stream\"",
"]",
"+",
"\":\"",
"if",
"\"transform\"",
"in",
"msg",
":",
"stream_key",
"+=",
"msg",
"[",
"\"transform\"",
"]",
"self",
".",
"subscription_lock",
".",
"acquire",
"(",
")",
"if",
"stream_key",
"in",
"self",
".",
"subscriptions",
":",
"subscription_function",
"=",
"self",
".",
"subscriptions",
"[",
"stream_key",
"]",
"self",
".",
"subscription_lock",
".",
"release",
"(",
")",
"fresult",
"=",
"subscription_function",
"(",
"msg",
"[",
"\"stream\"",
"]",
",",
"msg",
"[",
"\"data\"",
"]",
")",
"if",
"fresult",
"is",
"True",
":",
"# This is a special result - if the subscription function of a downlink returns True,",
"# then the datapoint is acknowledged automatically (ie, reinserted in non-downlink stream)",
"fresult",
"=",
"msg",
"[",
"\"data\"",
"]",
"if",
"fresult",
"is",
"not",
"False",
"and",
"fresult",
"is",
"not",
"None",
"and",
"msg",
"[",
"\"stream\"",
"]",
".",
"endswith",
"(",
"\"/downlink\"",
")",
"and",
"msg",
"[",
"\"stream\"",
"]",
".",
"count",
"(",
"\"/\"",
")",
"==",
"3",
":",
"# If the above conditions are true, it means that the datapoints were from a downlink,",
"# and the subscriber function chooses to acknowledge them, so we reinsert them.",
"self",
".",
"insert",
"(",
"msg",
"[",
"\"stream\"",
"]",
"[",
":",
"-",
"9",
"]",
",",
"fresult",
")",
"else",
":",
"self",
".",
"subscription_lock",
".",
"release",
"(",
")",
"logging",
".",
"warn",
"(",
"\"ConnectorDB:WS: Msg '%s' not subscribed! Subscriptions: %s\"",
",",
"msg",
"[",
"\"stream\"",
"]",
",",
"list",
"(",
"self",
".",
"subscriptions",
".",
"keys",
"(",
")",
")",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
WebsocketHandler.__on_ping
|
The server periodically sends us websocket ping messages to keep the connection alive. To
ensure that the connection to the server is still active, we memorize the most recent ping's time
and we periodically ensure that a ping was received in __ensure_ping
|
connectordb/_websocket.py
|
def __on_ping(self, ws, data):
"""The server periodically sends us websocket ping messages to keep the connection alive. To
ensure that the connection to the server is still active, we memorize the most recent ping's time
and we periodically ensure that a ping was received in __ensure_ping"""
logging.debug("ConnectorDB:WS: ping")
self.lastpingtime = time.time()
|
def __on_ping(self, ws, data):
"""The server periodically sends us websocket ping messages to keep the connection alive. To
ensure that the connection to the server is still active, we memorize the most recent ping's time
and we periodically ensure that a ping was received in __ensure_ping"""
logging.debug("ConnectorDB:WS: ping")
self.lastpingtime = time.time()
|
[
"The",
"server",
"periodically",
"sends",
"us",
"websocket",
"ping",
"messages",
"to",
"keep",
"the",
"connection",
"alive",
".",
"To",
"ensure",
"that",
"the",
"connection",
"to",
"the",
"server",
"is",
"still",
"active",
"we",
"memorize",
"the",
"most",
"recent",
"ping",
"s",
"time",
"and",
"we",
"periodically",
"ensure",
"that",
"a",
"ping",
"was",
"received",
"in",
"__ensure_ping"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_websocket.py#L306-L311
|
[
"def",
"__on_ping",
"(",
"self",
",",
"ws",
",",
"data",
")",
":",
"logging",
".",
"debug",
"(",
"\"ConnectorDB:WS: ping\"",
")",
"self",
".",
"lastpingtime",
"=",
"time",
".",
"time",
"(",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
WebsocketHandler.__ensure_ping
|
Each time the server sends a ping message, we record the timestamp. If we haven't received a ping
within the given interval, then we assume that the connection was lost, close the websocket and
attempt to reconnect
|
connectordb/_websocket.py
|
def __ensure_ping(self):
"""Each time the server sends a ping message, we record the timestamp. If we haven't received a ping
within the given interval, then we assume that the connection was lost, close the websocket and
attempt to reconnect"""
logging.debug("ConnectorDB:WS: pingcheck")
if (time.time() - self.lastpingtime > self.connection_ping_timeout):
logging.warn("ConnectorDB:WS: Websocket ping timed out!")
if self.ws is not None:
self.ws.close()
self.__on_close(self.ws)
else:
# reset the ping timer
self.pingtimer = threading.Timer(self.connection_ping_timeout,
self.__ensure_ping)
self.pingtimer.daemon = True
self.pingtimer.start()
|
def __ensure_ping(self):
"""Each time the server sends a ping message, we record the timestamp. If we haven't received a ping
within the given interval, then we assume that the connection was lost, close the websocket and
attempt to reconnect"""
logging.debug("ConnectorDB:WS: pingcheck")
if (time.time() - self.lastpingtime > self.connection_ping_timeout):
logging.warn("ConnectorDB:WS: Websocket ping timed out!")
if self.ws is not None:
self.ws.close()
self.__on_close(self.ws)
else:
# reset the ping timer
self.pingtimer = threading.Timer(self.connection_ping_timeout,
self.__ensure_ping)
self.pingtimer.daemon = True
self.pingtimer.start()
|
[
"Each",
"time",
"the",
"server",
"sends",
"a",
"ping",
"message",
"we",
"record",
"the",
"timestamp",
".",
"If",
"we",
"haven",
"t",
"received",
"a",
"ping",
"within",
"the",
"given",
"interval",
"then",
"we",
"assume",
"that",
"the",
"connection",
"was",
"lost",
"close",
"the",
"websocket",
"and",
"attempt",
"to",
"reconnect"
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_websocket.py#L313-L329
|
[
"def",
"__ensure_ping",
"(",
"self",
")",
":",
"logging",
".",
"debug",
"(",
"\"ConnectorDB:WS: pingcheck\"",
")",
"if",
"(",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"lastpingtime",
">",
"self",
".",
"connection_ping_timeout",
")",
":",
"logging",
".",
"warn",
"(",
"\"ConnectorDB:WS: Websocket ping timed out!\"",
")",
"if",
"self",
".",
"ws",
"is",
"not",
"None",
":",
"self",
".",
"ws",
".",
"close",
"(",
")",
"self",
".",
"__on_close",
"(",
"self",
".",
"ws",
")",
"else",
":",
"# reset the ping timer",
"self",
".",
"pingtimer",
"=",
"threading",
".",
"Timer",
"(",
"self",
".",
"connection_ping_timeout",
",",
"self",
".",
"__ensure_ping",
")",
"self",
".",
"pingtimer",
".",
"daemon",
"=",
"True",
"self",
".",
"pingtimer",
".",
"start",
"(",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
gatk_select_variants
|
Isolates a particular variant type from a VCF file using GATK SelectVariants
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str mode: variant type (i.e. SNP or INDEL)
:param str vcf_id: FileStoreID for input VCF file
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:return: FileStoreID for filtered VCF
:rtype: str
|
src/toil_lib/tools/variant_manipulation.py
|
def gatk_select_variants(job, mode, vcf_id, ref_fasta, ref_fai, ref_dict):
"""
Isolates a particular variant type from a VCF file using GATK SelectVariants
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str mode: variant type (i.e. SNP or INDEL)
:param str vcf_id: FileStoreID for input VCF file
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:return: FileStoreID for filtered VCF
:rtype: str
"""
job.fileStore.logToMaster('Running GATK SelectVariants to select %ss' % mode)
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict,
'input.vcf': vcf_id}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
command = ['-T', 'SelectVariants',
'-R', 'genome.fa',
'-V', 'input.vcf',
'-o', 'output.vcf',
'-selectType', mode]
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.vcf'))
|
def gatk_select_variants(job, mode, vcf_id, ref_fasta, ref_fai, ref_dict):
"""
Isolates a particular variant type from a VCF file using GATK SelectVariants
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str mode: variant type (i.e. SNP or INDEL)
:param str vcf_id: FileStoreID for input VCF file
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:return: FileStoreID for filtered VCF
:rtype: str
"""
job.fileStore.logToMaster('Running GATK SelectVariants to select %ss' % mode)
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict,
'input.vcf': vcf_id}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
command = ['-T', 'SelectVariants',
'-R', 'genome.fa',
'-V', 'input.vcf',
'-o', 'output.vcf',
'-selectType', mode]
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.vcf'))
|
[
"Isolates",
"a",
"particular",
"variant",
"type",
"from",
"a",
"VCF",
"file",
"using",
"GATK",
"SelectVariants"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/variant_manipulation.py#L7-L44
|
[
"def",
"gatk_select_variants",
"(",
"job",
",",
"mode",
",",
"vcf_id",
",",
"ref_fasta",
",",
"ref_fai",
",",
"ref_dict",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running GATK SelectVariants to select %ss'",
"%",
"mode",
")",
"inputs",
"=",
"{",
"'genome.fa'",
":",
"ref_fasta",
",",
"'genome.fa.fai'",
":",
"ref_fai",
",",
"'genome.dict'",
":",
"ref_dict",
",",
"'input.vcf'",
":",
"vcf_id",
"}",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"for",
"name",
",",
"file_store_id",
"in",
"inputs",
".",
"iteritems",
"(",
")",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"file_store_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"name",
")",
")",
"command",
"=",
"[",
"'-T'",
",",
"'SelectVariants'",
",",
"'-R'",
",",
"'genome.fa'",
",",
"'-V'",
",",
"'input.vcf'",
",",
"'-o'",
",",
"'output.vcf'",
",",
"'-selectType'",
",",
"mode",
"]",
"docker_parameters",
"=",
"[",
"'--rm'",
",",
"'log-driver'",
",",
"'none'",
",",
"'-e'",
",",
"'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'",
".",
"format",
"(",
"job",
".",
"memory",
")",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"command",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2'",
",",
"dockerParameters",
"=",
"docker_parameters",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'output.vcf'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
gatk_variant_filtration
|
Filters VCF file using GATK VariantFiltration. Fixes extra pair of quotation marks in VCF header that
may interfere with other VCF tools.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str vcf_id: FileStoreID for input VCF file
:param str filter_name: Name of filter for VCF header
:param str filter_expression: JEXL filter expression
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:return: FileStoreID for filtered VCF file
:rtype: str
|
src/toil_lib/tools/variant_manipulation.py
|
def gatk_variant_filtration(job, vcf_id, filter_name, filter_expression, ref_fasta, ref_fai, ref_dict):
"""
Filters VCF file using GATK VariantFiltration. Fixes extra pair of quotation marks in VCF header that
may interfere with other VCF tools.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str vcf_id: FileStoreID for input VCF file
:param str filter_name: Name of filter for VCF header
:param str filter_expression: JEXL filter expression
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:return: FileStoreID for filtered VCF file
:rtype: str
"""
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict,
'input.vcf': vcf_id}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
command = ['-T', 'VariantFiltration',
'-R', 'genome.fa',
'-V', 'input.vcf',
'--filterName', filter_name, # Documents filter name in header
'--filterExpression', filter_expression,
'-o', 'filtered_variants.vcf']
job.fileStore.logToMaster('Running GATK VariantFiltration using {name}: '
'{expression}'.format(name=filter_name, expression=filter_expression))
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
# Remove extra quotation marks around filter expression.
malformed_header = os.path.join(work_dir, 'filtered_variants.vcf')
fixed_header = os.path.join(work_dir, 'fixed_header.vcf')
filter_regex = re.escape('"%s"' % filter_expression)
with open(malformed_header, 'r') as f, open(fixed_header, 'w') as g:
for line in f:
g.write(re.sub(filter_regex, filter_expression, line))
return job.fileStore.writeGlobalFile(fixed_header)
|
def gatk_variant_filtration(job, vcf_id, filter_name, filter_expression, ref_fasta, ref_fai, ref_dict):
"""
Filters VCF file using GATK VariantFiltration. Fixes extra pair of quotation marks in VCF header that
may interfere with other VCF tools.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str vcf_id: FileStoreID for input VCF file
:param str filter_name: Name of filter for VCF header
:param str filter_expression: JEXL filter expression
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:return: FileStoreID for filtered VCF file
:rtype: str
"""
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict,
'input.vcf': vcf_id}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
command = ['-T', 'VariantFiltration',
'-R', 'genome.fa',
'-V', 'input.vcf',
'--filterName', filter_name, # Documents filter name in header
'--filterExpression', filter_expression,
'-o', 'filtered_variants.vcf']
job.fileStore.logToMaster('Running GATK VariantFiltration using {name}: '
'{expression}'.format(name=filter_name, expression=filter_expression))
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
# Remove extra quotation marks around filter expression.
malformed_header = os.path.join(work_dir, 'filtered_variants.vcf')
fixed_header = os.path.join(work_dir, 'fixed_header.vcf')
filter_regex = re.escape('"%s"' % filter_expression)
with open(malformed_header, 'r') as f, open(fixed_header, 'w') as g:
for line in f:
g.write(re.sub(filter_regex, filter_expression, line))
return job.fileStore.writeGlobalFile(fixed_header)
|
[
"Filters",
"VCF",
"file",
"using",
"GATK",
"VariantFiltration",
".",
"Fixes",
"extra",
"pair",
"of",
"quotation",
"marks",
"in",
"VCF",
"header",
"that",
"may",
"interfere",
"with",
"other",
"VCF",
"tools",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/variant_manipulation.py#L47-L96
|
[
"def",
"gatk_variant_filtration",
"(",
"job",
",",
"vcf_id",
",",
"filter_name",
",",
"filter_expression",
",",
"ref_fasta",
",",
"ref_fai",
",",
"ref_dict",
")",
":",
"inputs",
"=",
"{",
"'genome.fa'",
":",
"ref_fasta",
",",
"'genome.fa.fai'",
":",
"ref_fai",
",",
"'genome.dict'",
":",
"ref_dict",
",",
"'input.vcf'",
":",
"vcf_id",
"}",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"for",
"name",
",",
"file_store_id",
"in",
"inputs",
".",
"iteritems",
"(",
")",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"file_store_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"name",
")",
")",
"command",
"=",
"[",
"'-T'",
",",
"'VariantFiltration'",
",",
"'-R'",
",",
"'genome.fa'",
",",
"'-V'",
",",
"'input.vcf'",
",",
"'--filterName'",
",",
"filter_name",
",",
"# Documents filter name in header",
"'--filterExpression'",
",",
"filter_expression",
",",
"'-o'",
",",
"'filtered_variants.vcf'",
"]",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running GATK VariantFiltration using {name}: '",
"'{expression}'",
".",
"format",
"(",
"name",
"=",
"filter_name",
",",
"expression",
"=",
"filter_expression",
")",
")",
"docker_parameters",
"=",
"[",
"'--rm'",
",",
"'log-driver'",
",",
"'none'",
",",
"'-e'",
",",
"'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'",
".",
"format",
"(",
"job",
".",
"memory",
")",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"command",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2'",
",",
"dockerParameters",
"=",
"docker_parameters",
")",
"# Remove extra quotation marks around filter expression.",
"malformed_header",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'filtered_variants.vcf'",
")",
"fixed_header",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'fixed_header.vcf'",
")",
"filter_regex",
"=",
"re",
".",
"escape",
"(",
"'\"%s\"'",
"%",
"filter_expression",
")",
"with",
"open",
"(",
"malformed_header",
",",
"'r'",
")",
"as",
"f",
",",
"open",
"(",
"fixed_header",
",",
"'w'",
")",
"as",
"g",
":",
"for",
"line",
"in",
"f",
":",
"g",
".",
"write",
"(",
"re",
".",
"sub",
"(",
"filter_regex",
",",
"filter_expression",
",",
"line",
")",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"fixed_header",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
gatk_variant_recalibrator
|
Runs either SNP or INDEL variant quality score recalibration using GATK VariantRecalibrator. Because the VQSR method
models SNPs and INDELs differently, VQSR must be run separately for these variant types.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str mode: Determines variant recalibration mode (SNP or INDEL)
:param str vcf: FileStoreID for input VCF file
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param list[str] annotations: List of GATK variant annotations to filter on
:param str hapmap: FileStoreID for HapMap resource file, required for SNP VQSR
:param str omni: FileStoreID for Omni resource file, required for SNP VQSR
:param str phase: FileStoreID for 1000G resource file, required for SNP VQSR
:param str dbsnp: FilesStoreID for dbSNP resource file, required for SNP and INDEL VQSR
:param str mills: FileStoreID for Mills resource file, required for INDEL VQSR
:param int max_gaussians: Number of Gaussians used during training, default is 4
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for the variant recalibration table, tranche file, and plots file
:rtype: tuple
|
src/toil_lib/tools/variant_manipulation.py
|
def gatk_variant_recalibrator(job,
mode,
vcf,
ref_fasta, ref_fai, ref_dict,
annotations,
hapmap=None, omni=None, phase=None, dbsnp=None, mills=None,
max_gaussians=4,
unsafe_mode=False):
"""
Runs either SNP or INDEL variant quality score recalibration using GATK VariantRecalibrator. Because the VQSR method
models SNPs and INDELs differently, VQSR must be run separately for these variant types.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str mode: Determines variant recalibration mode (SNP or INDEL)
:param str vcf: FileStoreID for input VCF file
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param list[str] annotations: List of GATK variant annotations to filter on
:param str hapmap: FileStoreID for HapMap resource file, required for SNP VQSR
:param str omni: FileStoreID for Omni resource file, required for SNP VQSR
:param str phase: FileStoreID for 1000G resource file, required for SNP VQSR
:param str dbsnp: FilesStoreID for dbSNP resource file, required for SNP and INDEL VQSR
:param str mills: FileStoreID for Mills resource file, required for INDEL VQSR
:param int max_gaussians: Number of Gaussians used during training, default is 4
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for the variant recalibration table, tranche file, and plots file
:rtype: tuple
"""
mode = mode.upper()
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict,
'input.vcf': vcf}
# Refer to GATK documentation for description of recommended parameters:
# https://software.broadinstitute.org/gatk/documentation/article?id=1259
# https://software.broadinstitute.org/gatk/documentation/article?id=2805
# This base command includes parameters for both INDEL and SNP VQSR.
command = ['-T', 'VariantRecalibrator',
'-R', 'genome.fa',
'-input', 'input.vcf',
'-tranche', '100.0',
'-tranche', '99.9',
'-tranche', '99.0',
'-tranche', '90.0',
'--maxGaussians', str(max_gaussians),
'-recalFile', 'output.recal',
'-tranchesFile', 'output.tranches',
'-rscriptFile', 'output.plots.R']
# Parameters and resource files for SNP VQSR.
if mode == 'SNP':
command.extend(
['-resource:hapmap,known=false,training=true,truth=true,prior=15.0', 'hapmap.vcf',
'-resource:omni,known=false,training=true,truth=true,prior=12.0', 'omni.vcf',
'-resource:dbsnp,known=true,training=false,truth=false,prior=2.0', 'dbsnp.vcf',
'-resource:1000G,known=false,training=true,truth=false,prior=10.0', '1000G.vcf',
'-mode', 'SNP'])
inputs['hapmap.vcf'] = hapmap
inputs['omni.vcf'] = omni
inputs['dbsnp.vcf'] = dbsnp
inputs['1000G.vcf'] = phase
# Parameters and resource files for INDEL VQSR
elif mode == 'INDEL':
command.extend(
['-resource:mills,known=false,training=true,truth=true,prior=12.0', 'mills.vcf',
'-resource:dbsnp,known=true,training=false,truth=false,prior=2.0', 'dbsnp.vcf',
'-mode', 'INDEL'])
inputs['mills.vcf'] = mills
inputs['dbsnp.vcf'] = dbsnp
else:
raise ValueError('Variant filter modes can be SNP or INDEL, got %s' % mode)
for annotation in annotations:
command.extend(['-an', annotation])
if unsafe_mode:
command.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'])
# Delay reading in files until function is configured
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
job.fileStore.logToMaster('Running GATK VariantRecalibrator on {mode}s using the following annotations:\n'
'{annotations}'.format(mode=mode, annotations='\n'.join(annotations)))
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
recal_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.recal'))
tranches_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.tranches'))
plots_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.plots.R'))
return recal_id, tranches_id, plots_id
|
def gatk_variant_recalibrator(job,
mode,
vcf,
ref_fasta, ref_fai, ref_dict,
annotations,
hapmap=None, omni=None, phase=None, dbsnp=None, mills=None,
max_gaussians=4,
unsafe_mode=False):
"""
Runs either SNP or INDEL variant quality score recalibration using GATK VariantRecalibrator. Because the VQSR method
models SNPs and INDELs differently, VQSR must be run separately for these variant types.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str mode: Determines variant recalibration mode (SNP or INDEL)
:param str vcf: FileStoreID for input VCF file
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param list[str] annotations: List of GATK variant annotations to filter on
:param str hapmap: FileStoreID for HapMap resource file, required for SNP VQSR
:param str omni: FileStoreID for Omni resource file, required for SNP VQSR
:param str phase: FileStoreID for 1000G resource file, required for SNP VQSR
:param str dbsnp: FilesStoreID for dbSNP resource file, required for SNP and INDEL VQSR
:param str mills: FileStoreID for Mills resource file, required for INDEL VQSR
:param int max_gaussians: Number of Gaussians used during training, default is 4
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for the variant recalibration table, tranche file, and plots file
:rtype: tuple
"""
mode = mode.upper()
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict,
'input.vcf': vcf}
# Refer to GATK documentation for description of recommended parameters:
# https://software.broadinstitute.org/gatk/documentation/article?id=1259
# https://software.broadinstitute.org/gatk/documentation/article?id=2805
# This base command includes parameters for both INDEL and SNP VQSR.
command = ['-T', 'VariantRecalibrator',
'-R', 'genome.fa',
'-input', 'input.vcf',
'-tranche', '100.0',
'-tranche', '99.9',
'-tranche', '99.0',
'-tranche', '90.0',
'--maxGaussians', str(max_gaussians),
'-recalFile', 'output.recal',
'-tranchesFile', 'output.tranches',
'-rscriptFile', 'output.plots.R']
# Parameters and resource files for SNP VQSR.
if mode == 'SNP':
command.extend(
['-resource:hapmap,known=false,training=true,truth=true,prior=15.0', 'hapmap.vcf',
'-resource:omni,known=false,training=true,truth=true,prior=12.0', 'omni.vcf',
'-resource:dbsnp,known=true,training=false,truth=false,prior=2.0', 'dbsnp.vcf',
'-resource:1000G,known=false,training=true,truth=false,prior=10.0', '1000G.vcf',
'-mode', 'SNP'])
inputs['hapmap.vcf'] = hapmap
inputs['omni.vcf'] = omni
inputs['dbsnp.vcf'] = dbsnp
inputs['1000G.vcf'] = phase
# Parameters and resource files for INDEL VQSR
elif mode == 'INDEL':
command.extend(
['-resource:mills,known=false,training=true,truth=true,prior=12.0', 'mills.vcf',
'-resource:dbsnp,known=true,training=false,truth=false,prior=2.0', 'dbsnp.vcf',
'-mode', 'INDEL'])
inputs['mills.vcf'] = mills
inputs['dbsnp.vcf'] = dbsnp
else:
raise ValueError('Variant filter modes can be SNP or INDEL, got %s' % mode)
for annotation in annotations:
command.extend(['-an', annotation])
if unsafe_mode:
command.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'])
# Delay reading in files until function is configured
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
job.fileStore.logToMaster('Running GATK VariantRecalibrator on {mode}s using the following annotations:\n'
'{annotations}'.format(mode=mode, annotations='\n'.join(annotations)))
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
recal_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.recal'))
tranches_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.tranches'))
plots_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.plots.R'))
return recal_id, tranches_id, plots_id
|
[
"Runs",
"either",
"SNP",
"or",
"INDEL",
"variant",
"quality",
"score",
"recalibration",
"using",
"GATK",
"VariantRecalibrator",
".",
"Because",
"the",
"VQSR",
"method",
"models",
"SNPs",
"and",
"INDELs",
"differently",
"VQSR",
"must",
"be",
"run",
"separately",
"for",
"these",
"variant",
"types",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/variant_manipulation.py#L99-L203
|
[
"def",
"gatk_variant_recalibrator",
"(",
"job",
",",
"mode",
",",
"vcf",
",",
"ref_fasta",
",",
"ref_fai",
",",
"ref_dict",
",",
"annotations",
",",
"hapmap",
"=",
"None",
",",
"omni",
"=",
"None",
",",
"phase",
"=",
"None",
",",
"dbsnp",
"=",
"None",
",",
"mills",
"=",
"None",
",",
"max_gaussians",
"=",
"4",
",",
"unsafe_mode",
"=",
"False",
")",
":",
"mode",
"=",
"mode",
".",
"upper",
"(",
")",
"inputs",
"=",
"{",
"'genome.fa'",
":",
"ref_fasta",
",",
"'genome.fa.fai'",
":",
"ref_fai",
",",
"'genome.dict'",
":",
"ref_dict",
",",
"'input.vcf'",
":",
"vcf",
"}",
"# Refer to GATK documentation for description of recommended parameters:",
"# https://software.broadinstitute.org/gatk/documentation/article?id=1259",
"# https://software.broadinstitute.org/gatk/documentation/article?id=2805",
"# This base command includes parameters for both INDEL and SNP VQSR.",
"command",
"=",
"[",
"'-T'",
",",
"'VariantRecalibrator'",
",",
"'-R'",
",",
"'genome.fa'",
",",
"'-input'",
",",
"'input.vcf'",
",",
"'-tranche'",
",",
"'100.0'",
",",
"'-tranche'",
",",
"'99.9'",
",",
"'-tranche'",
",",
"'99.0'",
",",
"'-tranche'",
",",
"'90.0'",
",",
"'--maxGaussians'",
",",
"str",
"(",
"max_gaussians",
")",
",",
"'-recalFile'",
",",
"'output.recal'",
",",
"'-tranchesFile'",
",",
"'output.tranches'",
",",
"'-rscriptFile'",
",",
"'output.plots.R'",
"]",
"# Parameters and resource files for SNP VQSR.",
"if",
"mode",
"==",
"'SNP'",
":",
"command",
".",
"extend",
"(",
"[",
"'-resource:hapmap,known=false,training=true,truth=true,prior=15.0'",
",",
"'hapmap.vcf'",
",",
"'-resource:omni,known=false,training=true,truth=true,prior=12.0'",
",",
"'omni.vcf'",
",",
"'-resource:dbsnp,known=true,training=false,truth=false,prior=2.0'",
",",
"'dbsnp.vcf'",
",",
"'-resource:1000G,known=false,training=true,truth=false,prior=10.0'",
",",
"'1000G.vcf'",
",",
"'-mode'",
",",
"'SNP'",
"]",
")",
"inputs",
"[",
"'hapmap.vcf'",
"]",
"=",
"hapmap",
"inputs",
"[",
"'omni.vcf'",
"]",
"=",
"omni",
"inputs",
"[",
"'dbsnp.vcf'",
"]",
"=",
"dbsnp",
"inputs",
"[",
"'1000G.vcf'",
"]",
"=",
"phase",
"# Parameters and resource files for INDEL VQSR",
"elif",
"mode",
"==",
"'INDEL'",
":",
"command",
".",
"extend",
"(",
"[",
"'-resource:mills,known=false,training=true,truth=true,prior=12.0'",
",",
"'mills.vcf'",
",",
"'-resource:dbsnp,known=true,training=false,truth=false,prior=2.0'",
",",
"'dbsnp.vcf'",
",",
"'-mode'",
",",
"'INDEL'",
"]",
")",
"inputs",
"[",
"'mills.vcf'",
"]",
"=",
"mills",
"inputs",
"[",
"'dbsnp.vcf'",
"]",
"=",
"dbsnp",
"else",
":",
"raise",
"ValueError",
"(",
"'Variant filter modes can be SNP or INDEL, got %s'",
"%",
"mode",
")",
"for",
"annotation",
"in",
"annotations",
":",
"command",
".",
"extend",
"(",
"[",
"'-an'",
",",
"annotation",
"]",
")",
"if",
"unsafe_mode",
":",
"command",
".",
"extend",
"(",
"[",
"'-U'",
",",
"'ALLOW_SEQ_DICT_INCOMPATIBILITY'",
"]",
")",
"# Delay reading in files until function is configured",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"for",
"name",
",",
"file_store_id",
"in",
"inputs",
".",
"iteritems",
"(",
")",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"file_store_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"name",
")",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running GATK VariantRecalibrator on {mode}s using the following annotations:\\n'",
"'{annotations}'",
".",
"format",
"(",
"mode",
"=",
"mode",
",",
"annotations",
"=",
"'\\n'",
".",
"join",
"(",
"annotations",
")",
")",
")",
"docker_parameters",
"=",
"[",
"'--rm'",
",",
"'log-driver'",
",",
"'none'",
",",
"'-e'",
",",
"'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'",
".",
"format",
"(",
"job",
".",
"memory",
")",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"command",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2'",
",",
"dockerParameters",
"=",
"docker_parameters",
")",
"recal_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'output.recal'",
")",
")",
"tranches_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'output.tranches'",
")",
")",
"plots_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'output.plots.R'",
")",
")",
"return",
"recal_id",
",",
"tranches_id",
",",
"plots_id"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
gatk_apply_variant_recalibration
|
Applies variant quality score recalibration to VCF file using GATK ApplyRecalibration
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str mode: Determines variant recalibration mode (SNP or INDEL)
:param str vcf: FileStoreID for input VCF file
:param str recal_table: FileStoreID for recalibration table file
:param str tranches: FileStoreID for tranches file
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param float ts_filter_level: Sensitivity expressed as a percentage, default is 99.0
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for recalibrated VCF file
:rtype: str
|
src/toil_lib/tools/variant_manipulation.py
|
def gatk_apply_variant_recalibration(job,
mode,
vcf,
recal_table, tranches,
ref_fasta, ref_fai, ref_dict,
ts_filter_level=99.0,
unsafe_mode=False):
"""
Applies variant quality score recalibration to VCF file using GATK ApplyRecalibration
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str mode: Determines variant recalibration mode (SNP or INDEL)
:param str vcf: FileStoreID for input VCF file
:param str recal_table: FileStoreID for recalibration table file
:param str tranches: FileStoreID for tranches file
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param float ts_filter_level: Sensitivity expressed as a percentage, default is 99.0
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for recalibrated VCF file
:rtype: str
"""
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict,
'input.vcf': vcf,
'recal': recal_table,
'tranches': tranches}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
mode = mode.upper()
# GATK recommended parameters:
# https://software.broadinstitute.org/gatk/documentation/article?id=2805
command = ['-T', 'ApplyRecalibration',
'-mode', mode,
'-R', 'genome.fa',
'-input', 'input.vcf',
'-o', 'vqsr.vcf',
'-ts_filter_level', str(ts_filter_level),
'-recalFile', 'recal',
'-tranchesFile', 'tranches']
if unsafe_mode:
command.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'])
job.fileStore.logToMaster('Running GATK ApplyRecalibration on {mode}s '
'with a sensitivity of {sensitivity}%'.format(mode=mode,
sensitivity=ts_filter_level))
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'vqsr.vcf'))
|
def gatk_apply_variant_recalibration(job,
mode,
vcf,
recal_table, tranches,
ref_fasta, ref_fai, ref_dict,
ts_filter_level=99.0,
unsafe_mode=False):
"""
Applies variant quality score recalibration to VCF file using GATK ApplyRecalibration
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str mode: Determines variant recalibration mode (SNP or INDEL)
:param str vcf: FileStoreID for input VCF file
:param str recal_table: FileStoreID for recalibration table file
:param str tranches: FileStoreID for tranches file
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param float ts_filter_level: Sensitivity expressed as a percentage, default is 99.0
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for recalibrated VCF file
:rtype: str
"""
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict,
'input.vcf': vcf,
'recal': recal_table,
'tranches': tranches}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
mode = mode.upper()
# GATK recommended parameters:
# https://software.broadinstitute.org/gatk/documentation/article?id=2805
command = ['-T', 'ApplyRecalibration',
'-mode', mode,
'-R', 'genome.fa',
'-input', 'input.vcf',
'-o', 'vqsr.vcf',
'-ts_filter_level', str(ts_filter_level),
'-recalFile', 'recal',
'-tranchesFile', 'tranches']
if unsafe_mode:
command.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'])
job.fileStore.logToMaster('Running GATK ApplyRecalibration on {mode}s '
'with a sensitivity of {sensitivity}%'.format(mode=mode,
sensitivity=ts_filter_level))
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'vqsr.vcf'))
|
[
"Applies",
"variant",
"quality",
"score",
"recalibration",
"to",
"VCF",
"file",
"using",
"GATK",
"ApplyRecalibration"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/variant_manipulation.py#L206-L266
|
[
"def",
"gatk_apply_variant_recalibration",
"(",
"job",
",",
"mode",
",",
"vcf",
",",
"recal_table",
",",
"tranches",
",",
"ref_fasta",
",",
"ref_fai",
",",
"ref_dict",
",",
"ts_filter_level",
"=",
"99.0",
",",
"unsafe_mode",
"=",
"False",
")",
":",
"inputs",
"=",
"{",
"'genome.fa'",
":",
"ref_fasta",
",",
"'genome.fa.fai'",
":",
"ref_fai",
",",
"'genome.dict'",
":",
"ref_dict",
",",
"'input.vcf'",
":",
"vcf",
",",
"'recal'",
":",
"recal_table",
",",
"'tranches'",
":",
"tranches",
"}",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"for",
"name",
",",
"file_store_id",
"in",
"inputs",
".",
"iteritems",
"(",
")",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"file_store_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"name",
")",
")",
"mode",
"=",
"mode",
".",
"upper",
"(",
")",
"# GATK recommended parameters:",
"# https://software.broadinstitute.org/gatk/documentation/article?id=2805",
"command",
"=",
"[",
"'-T'",
",",
"'ApplyRecalibration'",
",",
"'-mode'",
",",
"mode",
",",
"'-R'",
",",
"'genome.fa'",
",",
"'-input'",
",",
"'input.vcf'",
",",
"'-o'",
",",
"'vqsr.vcf'",
",",
"'-ts_filter_level'",
",",
"str",
"(",
"ts_filter_level",
")",
",",
"'-recalFile'",
",",
"'recal'",
",",
"'-tranchesFile'",
",",
"'tranches'",
"]",
"if",
"unsafe_mode",
":",
"command",
".",
"extend",
"(",
"[",
"'-U'",
",",
"'ALLOW_SEQ_DICT_INCOMPATIBILITY'",
"]",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running GATK ApplyRecalibration on {mode}s '",
"'with a sensitivity of {sensitivity}%'",
".",
"format",
"(",
"mode",
"=",
"mode",
",",
"sensitivity",
"=",
"ts_filter_level",
")",
")",
"docker_parameters",
"=",
"[",
"'--rm'",
",",
"'log-driver'",
",",
"'none'",
",",
"'-e'",
",",
"'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'",
".",
"format",
"(",
"job",
".",
"memory",
")",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"command",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2'",
",",
"dockerParameters",
"=",
"docker_parameters",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'vqsr.vcf'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
gatk_combine_variants
|
Merges VCF files using GATK CombineVariants
:param JobFunctionWrappingJob job: Toil Job instance
:param dict vcfs: Dictionary of VCF FileStoreIDs {sample identifier: FileStoreID}
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param str merge_option: Value for --genotypemergeoption flag (Default: 'UNIQUIFY')
'UNIQUIFY': Multiple variants at a single site are merged into a
single variant record.
'UNSORTED': Used to merge VCFs from the same sample
:return: FileStoreID for merged VCF file
:rtype: str
|
src/toil_lib/tools/variant_manipulation.py
|
def gatk_combine_variants(job, vcfs, ref_fasta, ref_fai, ref_dict, merge_option='UNIQUIFY'):
"""
Merges VCF files using GATK CombineVariants
:param JobFunctionWrappingJob job: Toil Job instance
:param dict vcfs: Dictionary of VCF FileStoreIDs {sample identifier: FileStoreID}
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param str merge_option: Value for --genotypemergeoption flag (Default: 'UNIQUIFY')
'UNIQUIFY': Multiple variants at a single site are merged into a
single variant record.
'UNSORTED': Used to merge VCFs from the same sample
:return: FileStoreID for merged VCF file
:rtype: str
"""
job.fileStore.logToMaster('Running GATK CombineVariants')
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict}
inputs.update(vcfs)
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
command = ['-T', 'CombineVariants',
'-R', '/data/genome.fa',
'-o', '/data/merged.vcf',
'--genotypemergeoption', merge_option]
for uuid, vcf_id in vcfs.iteritems():
command.extend(['--variant', os.path.join('/data', uuid)])
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'merged.vcf'))
|
def gatk_combine_variants(job, vcfs, ref_fasta, ref_fai, ref_dict, merge_option='UNIQUIFY'):
"""
Merges VCF files using GATK CombineVariants
:param JobFunctionWrappingJob job: Toil Job instance
:param dict vcfs: Dictionary of VCF FileStoreIDs {sample identifier: FileStoreID}
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param str merge_option: Value for --genotypemergeoption flag (Default: 'UNIQUIFY')
'UNIQUIFY': Multiple variants at a single site are merged into a
single variant record.
'UNSORTED': Used to merge VCFs from the same sample
:return: FileStoreID for merged VCF file
:rtype: str
"""
job.fileStore.logToMaster('Running GATK CombineVariants')
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict}
inputs.update(vcfs)
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
command = ['-T', 'CombineVariants',
'-R', '/data/genome.fa',
'-o', '/data/merged.vcf',
'--genotypemergeoption', merge_option]
for uuid, vcf_id in vcfs.iteritems():
command.extend(['--variant', os.path.join('/data', uuid)])
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'merged.vcf'))
|
[
"Merges",
"VCF",
"files",
"using",
"GATK",
"CombineVariants"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/variant_manipulation.py#L269-L311
|
[
"def",
"gatk_combine_variants",
"(",
"job",
",",
"vcfs",
",",
"ref_fasta",
",",
"ref_fai",
",",
"ref_dict",
",",
"merge_option",
"=",
"'UNIQUIFY'",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running GATK CombineVariants'",
")",
"inputs",
"=",
"{",
"'genome.fa'",
":",
"ref_fasta",
",",
"'genome.fa.fai'",
":",
"ref_fai",
",",
"'genome.dict'",
":",
"ref_dict",
"}",
"inputs",
".",
"update",
"(",
"vcfs",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"for",
"name",
",",
"file_store_id",
"in",
"inputs",
".",
"iteritems",
"(",
")",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"file_store_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"name",
")",
")",
"command",
"=",
"[",
"'-T'",
",",
"'CombineVariants'",
",",
"'-R'",
",",
"'/data/genome.fa'",
",",
"'-o'",
",",
"'/data/merged.vcf'",
",",
"'--genotypemergeoption'",
",",
"merge_option",
"]",
"for",
"uuid",
",",
"vcf_id",
"in",
"vcfs",
".",
"iteritems",
"(",
")",
":",
"command",
".",
"extend",
"(",
"[",
"'--variant'",
",",
"os",
".",
"path",
".",
"join",
"(",
"'/data'",
",",
"uuid",
")",
"]",
")",
"docker_parameters",
"=",
"[",
"'--rm'",
",",
"'log-driver'",
",",
"'none'",
",",
"'-e'",
",",
"'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'",
".",
"format",
"(",
"job",
".",
"memory",
")",
"]",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"command",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2'",
",",
"dockerParameters",
"=",
"docker_parameters",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'merged.vcf'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
bam_quickcheck
|
Perform a quick check on a BAM via `samtools quickcheck`.
This will detect obvious BAM errors such as truncation.
:param str bam_path: path to BAM file to checked
:rtype: boolean
:return: True if the BAM is valid, False is BAM is invalid or something related to the call went wrong
|
src/toil_lib/validators.py
|
def bam_quickcheck(bam_path):
"""
Perform a quick check on a BAM via `samtools quickcheck`.
This will detect obvious BAM errors such as truncation.
:param str bam_path: path to BAM file to checked
:rtype: boolean
:return: True if the BAM is valid, False is BAM is invalid or something related to the call went wrong
"""
directory, bam_name = os.path.split(bam_path)
exit_code = subprocess.call(['docker', 'run', '-v', directory + ':/data',
'quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c',
'quickcheck', '-vv', '/data/' + bam_name])
if exit_code != 0:
return False
return True
|
def bam_quickcheck(bam_path):
"""
Perform a quick check on a BAM via `samtools quickcheck`.
This will detect obvious BAM errors such as truncation.
:param str bam_path: path to BAM file to checked
:rtype: boolean
:return: True if the BAM is valid, False is BAM is invalid or something related to the call went wrong
"""
directory, bam_name = os.path.split(bam_path)
exit_code = subprocess.call(['docker', 'run', '-v', directory + ':/data',
'quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c',
'quickcheck', '-vv', '/data/' + bam_name])
if exit_code != 0:
return False
return True
|
[
"Perform",
"a",
"quick",
"check",
"on",
"a",
"BAM",
"via",
"samtools",
"quickcheck",
".",
"This",
"will",
"detect",
"obvious",
"BAM",
"errors",
"such",
"as",
"truncation",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/validators.py#L8-L24
|
[
"def",
"bam_quickcheck",
"(",
"bam_path",
")",
":",
"directory",
",",
"bam_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"bam_path",
")",
"exit_code",
"=",
"subprocess",
".",
"call",
"(",
"[",
"'docker'",
",",
"'run'",
",",
"'-v'",
",",
"directory",
"+",
"':/data'",
",",
"'quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c'",
",",
"'quickcheck'",
",",
"'-vv'",
",",
"'/data/'",
"+",
"bam_name",
"]",
")",
"if",
"exit_code",
"!=",
"0",
":",
"return",
"False",
"return",
"True"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
load_handlers
|
Given a dictionary mapping which looks like the following, import the
objects based on the dotted path and yield the packet type and handler as
pairs.
If the special string '*' is passed, don't process that, pass it on as it
is a wildcard.
If an non-string object is given for either packet or handler (key or
value) assume these are the objects to use and yield them.
::
{
'rfxcom.protocol.Status': 'home.collect.logging_handler',
'rfxcom.protocol.Elec': 'home.collect.elec_handler',
'rfxcom.protocol.TempHumidity': 'home.collect.temp_humidity_handler',
'*': 'home.collect.logging_handler'
}
|
home/collect/handlers.py
|
def load_handlers(handler_mapping):
"""
Given a dictionary mapping which looks like the following, import the
objects based on the dotted path and yield the packet type and handler as
pairs.
If the special string '*' is passed, don't process that, pass it on as it
is a wildcard.
If an non-string object is given for either packet or handler (key or
value) assume these are the objects to use and yield them.
::
{
'rfxcom.protocol.Status': 'home.collect.logging_handler',
'rfxcom.protocol.Elec': 'home.collect.elec_handler',
'rfxcom.protocol.TempHumidity': 'home.collect.temp_humidity_handler',
'*': 'home.collect.logging_handler'
}
"""
handlers = {}
for packet_type, handler in handler_mapping.items():
if packet_type == '*':
Packet = packet_type
elif isinstance(packet_type, str):
Packet = importer(packet_type)
else:
Packet = packet_type
if isinstance(handler, str):
Handler = importer(handler)
else:
Handler = handler
if Packet in handlers:
raise HandlerConfigError(
"Handler already provided for packet %s" % Packet)
handlers[Packet] = Handler
return handlers
|
def load_handlers(handler_mapping):
"""
Given a dictionary mapping which looks like the following, import the
objects based on the dotted path and yield the packet type and handler as
pairs.
If the special string '*' is passed, don't process that, pass it on as it
is a wildcard.
If an non-string object is given for either packet or handler (key or
value) assume these are the objects to use and yield them.
::
{
'rfxcom.protocol.Status': 'home.collect.logging_handler',
'rfxcom.protocol.Elec': 'home.collect.elec_handler',
'rfxcom.protocol.TempHumidity': 'home.collect.temp_humidity_handler',
'*': 'home.collect.logging_handler'
}
"""
handlers = {}
for packet_type, handler in handler_mapping.items():
if packet_type == '*':
Packet = packet_type
elif isinstance(packet_type, str):
Packet = importer(packet_type)
else:
Packet = packet_type
if isinstance(handler, str):
Handler = importer(handler)
else:
Handler = handler
if Packet in handlers:
raise HandlerConfigError(
"Handler already provided for packet %s" % Packet)
handlers[Packet] = Handler
return handlers
|
[
"Given",
"a",
"dictionary",
"mapping",
"which",
"looks",
"like",
"the",
"following",
"import",
"the",
"objects",
"based",
"on",
"the",
"dotted",
"path",
"and",
"yield",
"the",
"packet",
"type",
"and",
"handler",
"as",
"pairs",
"."
] |
d0ugal/home
|
python
|
https://github.com/d0ugal/home/blob/e984716ae6c74dc8e40346584668ac5cfeaaf520/home/collect/handlers.py#L26-L69
|
[
"def",
"load_handlers",
"(",
"handler_mapping",
")",
":",
"handlers",
"=",
"{",
"}",
"for",
"packet_type",
",",
"handler",
"in",
"handler_mapping",
".",
"items",
"(",
")",
":",
"if",
"packet_type",
"==",
"'*'",
":",
"Packet",
"=",
"packet_type",
"elif",
"isinstance",
"(",
"packet_type",
",",
"str",
")",
":",
"Packet",
"=",
"importer",
"(",
"packet_type",
")",
"else",
":",
"Packet",
"=",
"packet_type",
"if",
"isinstance",
"(",
"handler",
",",
"str",
")",
":",
"Handler",
"=",
"importer",
"(",
"handler",
")",
"else",
":",
"Handler",
"=",
"handler",
"if",
"Packet",
"in",
"handlers",
":",
"raise",
"HandlerConfigError",
"(",
"\"Handler already provided for packet %s\"",
"%",
"Packet",
")",
"handlers",
"[",
"Packet",
"]",
"=",
"Handler",
"return",
"handlers"
] |
e984716ae6c74dc8e40346584668ac5cfeaaf520
|
test
|
write_config
|
Helper to write the JSON configuration to a file
|
src/ols_client/constants.py
|
def write_config(configuration):
"""Helper to write the JSON configuration to a file"""
with open(CONFIG_PATH, 'w') as f:
json.dump(configuration, f, indent=2, sort_keys=True)
|
def write_config(configuration):
"""Helper to write the JSON configuration to a file"""
with open(CONFIG_PATH, 'w') as f:
json.dump(configuration, f, indent=2, sort_keys=True)
|
[
"Helper",
"to",
"write",
"the",
"JSON",
"configuration",
"to",
"a",
"file"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/constants.py#L21-L24
|
[
"def",
"write_config",
"(",
"configuration",
")",
":",
"with",
"open",
"(",
"CONFIG_PATH",
",",
"'w'",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"configuration",
",",
"f",
",",
"indent",
"=",
"2",
",",
"sort_keys",
"=",
"True",
")"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
get_config
|
Gets the configuration for this project from the default JSON file, or writes one if it doesn't exist
:rtype: dict
|
src/ols_client/constants.py
|
def get_config():
"""Gets the configuration for this project from the default JSON file, or writes one if it doesn't exist
:rtype: dict
"""
if not os.path.exists(CONFIG_PATH):
write_config({})
with open(CONFIG_PATH) as f:
return json.load(f)
|
def get_config():
"""Gets the configuration for this project from the default JSON file, or writes one if it doesn't exist
:rtype: dict
"""
if not os.path.exists(CONFIG_PATH):
write_config({})
with open(CONFIG_PATH) as f:
return json.load(f)
|
[
"Gets",
"the",
"configuration",
"for",
"this",
"project",
"from",
"the",
"default",
"JSON",
"file",
"or",
"writes",
"one",
"if",
"it",
"doesn",
"t",
"exist"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/constants.py#L27-L36
|
[
"def",
"get_config",
"(",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"CONFIG_PATH",
")",
":",
"write_config",
"(",
"{",
"}",
")",
"with",
"open",
"(",
"CONFIG_PATH",
")",
"as",
"f",
":",
"return",
"json",
".",
"load",
"(",
"f",
")"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
OlsClient.get_ontology
|
Gets the metadata for a given ontology
:param str ontology: The name of the ontology
:return: The dictionary representing the JSON from the OLS
:rtype: dict
|
src/ols_client/client.py
|
def get_ontology(self, ontology):
"""Gets the metadata for a given ontology
:param str ontology: The name of the ontology
:return: The dictionary representing the JSON from the OLS
:rtype: dict
"""
url = self.ontology_metadata_fmt.format(ontology=ontology)
response = requests.get(url)
return response.json()
|
def get_ontology(self, ontology):
"""Gets the metadata for a given ontology
:param str ontology: The name of the ontology
:return: The dictionary representing the JSON from the OLS
:rtype: dict
"""
url = self.ontology_metadata_fmt.format(ontology=ontology)
response = requests.get(url)
return response.json()
|
[
"Gets",
"the",
"metadata",
"for",
"a",
"given",
"ontology"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/client.py#L56-L65
|
[
"def",
"get_ontology",
"(",
"self",
",",
"ontology",
")",
":",
"url",
"=",
"self",
".",
"ontology_metadata_fmt",
".",
"format",
"(",
"ontology",
"=",
"ontology",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"return",
"response",
".",
"json",
"(",
")"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
OlsClient.get_term
|
Gets the data for a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:rtype: dict
|
src/ols_client/client.py
|
def get_term(self, ontology, iri):
"""Gets the data for a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:rtype: dict
"""
url = self.ontology_term_fmt.format(ontology, iri)
response = requests.get(url)
return response.json()
|
def get_term(self, ontology, iri):
"""Gets the data for a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:rtype: dict
"""
url = self.ontology_term_fmt.format(ontology, iri)
response = requests.get(url)
return response.json()
|
[
"Gets",
"the",
"data",
"for",
"a",
"given",
"term"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/client.py#L67-L77
|
[
"def",
"get_term",
"(",
"self",
",",
"ontology",
",",
"iri",
")",
":",
"url",
"=",
"self",
".",
"ontology_term_fmt",
".",
"format",
"(",
"ontology",
",",
"iri",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"return",
"response",
".",
"json",
"(",
")"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
OlsClient.search
|
Searches the OLS with the given term
:param str name:
:param list[str] query_fields: Fields to query
:return: dict
|
src/ols_client/client.py
|
def search(self, name, query_fields=None):
"""Searches the OLS with the given term
:param str name:
:param list[str] query_fields: Fields to query
:return: dict
"""
params = {'q': name}
if query_fields is not None:
params['queryFields'] = '{{{}}}'.format(','.join(query_fields))
response = requests.get(self.ontology_search, params=params)
return response.json()
|
def search(self, name, query_fields=None):
"""Searches the OLS with the given term
:param str name:
:param list[str] query_fields: Fields to query
:return: dict
"""
params = {'q': name}
if query_fields is not None:
params['queryFields'] = '{{{}}}'.format(','.join(query_fields))
response = requests.get(self.ontology_search, params=params)
return response.json()
|
[
"Searches",
"the",
"OLS",
"with",
"the",
"given",
"term"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/client.py#L79-L91
|
[
"def",
"search",
"(",
"self",
",",
"name",
",",
"query_fields",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'q'",
":",
"name",
"}",
"if",
"query_fields",
"is",
"not",
"None",
":",
"params",
"[",
"'queryFields'",
"]",
"=",
"'{{{}}}'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"query_fields",
")",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"ontology_search",
",",
"params",
"=",
"params",
")",
"return",
"response",
".",
"json",
"(",
")"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
OlsClient.suggest
|
Suggest terms from an optional list of ontologies
:param str name:
:param list[str] ontology:
:rtype: dict
.. seealso:: https://www.ebi.ac.uk/ols/docs/api#_suggest_term
|
src/ols_client/client.py
|
def suggest(self, name, ontology=None):
"""Suggest terms from an optional list of ontologies
:param str name:
:param list[str] ontology:
:rtype: dict
.. seealso:: https://www.ebi.ac.uk/ols/docs/api#_suggest_term
"""
params = {'q': name}
if ontology:
params['ontology'] = ','.join(ontology)
response = requests.get(self.ontology_suggest, params=params)
return response.json()
|
def suggest(self, name, ontology=None):
"""Suggest terms from an optional list of ontologies
:param str name:
:param list[str] ontology:
:rtype: dict
.. seealso:: https://www.ebi.ac.uk/ols/docs/api#_suggest_term
"""
params = {'q': name}
if ontology:
params['ontology'] = ','.join(ontology)
response = requests.get(self.ontology_suggest, params=params)
return response.json()
|
[
"Suggest",
"terms",
"from",
"an",
"optional",
"list",
"of",
"ontologies"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/client.py#L93-L107
|
[
"def",
"suggest",
"(",
"self",
",",
"name",
",",
"ontology",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'q'",
":",
"name",
"}",
"if",
"ontology",
":",
"params",
"[",
"'ontology'",
"]",
"=",
"','",
".",
"join",
"(",
"ontology",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"ontology_suggest",
",",
"params",
"=",
"params",
")",
"return",
"response",
".",
"json",
"(",
")"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
OlsClient._iter_terms_helper
|
Iterates over all terms, lazily with paging
:param str url: The url to query
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to none.
:rtype: iter[dict]
|
src/ols_client/client.py
|
def _iter_terms_helper(url, size=None, sleep=None):
"""Iterates over all terms, lazily with paging
:param str url: The url to query
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to none.
:rtype: iter[dict]
"""
if size is None:
size = 500
elif size > 500:
raise ValueError('Maximum size is 500. Given: {}'.format(size))
t = time.time()
response = requests.get(url, params={'size': size}).json()
links = response['_links']
for response_term in _iterate_response_terms(response):
yield response_term
t = time.time() - t
log.info(
'Page %s/%s done in %.2f seconds',
response['page']['number'] + 1,
response['page']['totalPages'],
t
)
log.info('Estimated time until done: %.2f minutes', t * response['page']['totalPages'] / 60)
while 'next' in links:
if sleep:
time.sleep(sleep)
t = time.time()
response = requests.get(links['next']['href'], params={'size': size}).json()
links = response['_links']
for response_term in _iterate_response_terms(response):
yield response_term
log.info(
'Page %s/%s done in %.2f seconds',
response['page']['number'],
response['page']['totalPages'],
time.time() - t
)
|
def _iter_terms_helper(url, size=None, sleep=None):
"""Iterates over all terms, lazily with paging
:param str url: The url to query
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to none.
:rtype: iter[dict]
"""
if size is None:
size = 500
elif size > 500:
raise ValueError('Maximum size is 500. Given: {}'.format(size))
t = time.time()
response = requests.get(url, params={'size': size}).json()
links = response['_links']
for response_term in _iterate_response_terms(response):
yield response_term
t = time.time() - t
log.info(
'Page %s/%s done in %.2f seconds',
response['page']['number'] + 1,
response['page']['totalPages'],
t
)
log.info('Estimated time until done: %.2f minutes', t * response['page']['totalPages'] / 60)
while 'next' in links:
if sleep:
time.sleep(sleep)
t = time.time()
response = requests.get(links['next']['href'], params={'size': size}).json()
links = response['_links']
for response_term in _iterate_response_terms(response):
yield response_term
log.info(
'Page %s/%s done in %.2f seconds',
response['page']['number'],
response['page']['totalPages'],
time.time() - t
)
|
[
"Iterates",
"over",
"all",
"terms",
"lazily",
"with",
"paging"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/client.py#L110-L157
|
[
"def",
"_iter_terms_helper",
"(",
"url",
",",
"size",
"=",
"None",
",",
"sleep",
"=",
"None",
")",
":",
"if",
"size",
"is",
"None",
":",
"size",
"=",
"500",
"elif",
"size",
">",
"500",
":",
"raise",
"ValueError",
"(",
"'Maximum size is 500. Given: {}'",
".",
"format",
"(",
"size",
")",
")",
"t",
"=",
"time",
".",
"time",
"(",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"{",
"'size'",
":",
"size",
"}",
")",
".",
"json",
"(",
")",
"links",
"=",
"response",
"[",
"'_links'",
"]",
"for",
"response_term",
"in",
"_iterate_response_terms",
"(",
"response",
")",
":",
"yield",
"response_term",
"t",
"=",
"time",
".",
"time",
"(",
")",
"-",
"t",
"log",
".",
"info",
"(",
"'Page %s/%s done in %.2f seconds'",
",",
"response",
"[",
"'page'",
"]",
"[",
"'number'",
"]",
"+",
"1",
",",
"response",
"[",
"'page'",
"]",
"[",
"'totalPages'",
"]",
",",
"t",
")",
"log",
".",
"info",
"(",
"'Estimated time until done: %.2f minutes'",
",",
"t",
"*",
"response",
"[",
"'page'",
"]",
"[",
"'totalPages'",
"]",
"/",
"60",
")",
"while",
"'next'",
"in",
"links",
":",
"if",
"sleep",
":",
"time",
".",
"sleep",
"(",
"sleep",
")",
"t",
"=",
"time",
".",
"time",
"(",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"links",
"[",
"'next'",
"]",
"[",
"'href'",
"]",
",",
"params",
"=",
"{",
"'size'",
":",
"size",
"}",
")",
".",
"json",
"(",
")",
"links",
"=",
"response",
"[",
"'_links'",
"]",
"for",
"response_term",
"in",
"_iterate_response_terms",
"(",
"response",
")",
":",
"yield",
"response_term",
"log",
".",
"info",
"(",
"'Page %s/%s done in %.2f seconds'",
",",
"response",
"[",
"'page'",
"]",
"[",
"'number'",
"]",
",",
"response",
"[",
"'page'",
"]",
"[",
"'totalPages'",
"]",
",",
"time",
".",
"time",
"(",
")",
"-",
"t",
")"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
OlsClient.iter_terms
|
Iterates over all terms, lazily with paging
:param str ontology: The name of the ontology
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[dict]
|
src/ols_client/client.py
|
def iter_terms(self, ontology, size=None, sleep=None):
"""Iterates over all terms, lazily with paging
:param str ontology: The name of the ontology
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[dict]
"""
url = self.ontology_terms_fmt.format(ontology=ontology)
for term in self._iter_terms_helper(url, size=size, sleep=sleep):
yield term
|
def iter_terms(self, ontology, size=None, sleep=None):
"""Iterates over all terms, lazily with paging
:param str ontology: The name of the ontology
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[dict]
"""
url = self.ontology_terms_fmt.format(ontology=ontology)
for term in self._iter_terms_helper(url, size=size, sleep=sleep):
yield term
|
[
"Iterates",
"over",
"all",
"terms",
"lazily",
"with",
"paging"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/client.py#L159-L169
|
[
"def",
"iter_terms",
"(",
"self",
",",
"ontology",
",",
"size",
"=",
"None",
",",
"sleep",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"ontology_terms_fmt",
".",
"format",
"(",
"ontology",
"=",
"ontology",
")",
"for",
"term",
"in",
"self",
".",
"_iter_terms_helper",
"(",
"url",
",",
"size",
"=",
"size",
",",
"sleep",
"=",
"sleep",
")",
":",
"yield",
"term"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
OlsClient.iter_descendants
|
Iterates over the descendants of a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[dict]
|
src/ols_client/client.py
|
def iter_descendants(self, ontology, iri, size=None, sleep=None):
"""Iterates over the descendants of a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[dict]
"""
url = self.ontology_term_descendants_fmt.format(ontology=ontology, iri=iri)
log.info('getting %s', url)
for term in self._iter_terms_helper(url, size=size, sleep=sleep):
yield term
|
def iter_descendants(self, ontology, iri, size=None, sleep=None):
"""Iterates over the descendants of a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[dict]
"""
url = self.ontology_term_descendants_fmt.format(ontology=ontology, iri=iri)
log.info('getting %s', url)
for term in self._iter_terms_helper(url, size=size, sleep=sleep):
yield term
|
[
"Iterates",
"over",
"the",
"descendants",
"of",
"a",
"given",
"term"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/client.py#L171-L183
|
[
"def",
"iter_descendants",
"(",
"self",
",",
"ontology",
",",
"iri",
",",
"size",
"=",
"None",
",",
"sleep",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"ontology_term_descendants_fmt",
".",
"format",
"(",
"ontology",
"=",
"ontology",
",",
"iri",
"=",
"iri",
")",
"log",
".",
"info",
"(",
"'getting %s'",
",",
"url",
")",
"for",
"term",
"in",
"self",
".",
"_iter_terms_helper",
"(",
"url",
",",
"size",
"=",
"size",
",",
"sleep",
"=",
"sleep",
")",
":",
"yield",
"term"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
OlsClient.iter_descendants_labels
|
Iterates over the labels for the descendants of a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[str]
|
src/ols_client/client.py
|
def iter_descendants_labels(self, ontology, iri, size=None, sleep=None):
"""Iterates over the labels for the descendants of a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[str]
"""
for label in _help_iterate_labels(self.iter_descendants(ontology, iri, size=size, sleep=sleep)):
yield label
|
def iter_descendants_labels(self, ontology, iri, size=None, sleep=None):
"""Iterates over the labels for the descendants of a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[str]
"""
for label in _help_iterate_labels(self.iter_descendants(ontology, iri, size=size, sleep=sleep)):
yield label
|
[
"Iterates",
"over",
"the",
"labels",
"for",
"the",
"descendants",
"of",
"a",
"given",
"term"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/client.py#L185-L195
|
[
"def",
"iter_descendants_labels",
"(",
"self",
",",
"ontology",
",",
"iri",
",",
"size",
"=",
"None",
",",
"sleep",
"=",
"None",
")",
":",
"for",
"label",
"in",
"_help_iterate_labels",
"(",
"self",
".",
"iter_descendants",
"(",
"ontology",
",",
"iri",
",",
"size",
"=",
"size",
",",
"sleep",
"=",
"sleep",
")",
")",
":",
"yield",
"label"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
OlsClient.iter_labels
|
Iterates over the labels of terms in the ontology. Automatically wraps the pager returned by the OLS.
:param str ontology: The name of the ontology
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[str]
|
src/ols_client/client.py
|
def iter_labels(self, ontology, size=None, sleep=None):
"""Iterates over the labels of terms in the ontology. Automatically wraps the pager returned by the OLS.
:param str ontology: The name of the ontology
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[str]
"""
for label in _help_iterate_labels(self.iter_terms(ontology=ontology, size=size, sleep=sleep)):
yield label
|
def iter_labels(self, ontology, size=None, sleep=None):
"""Iterates over the labels of terms in the ontology. Automatically wraps the pager returned by the OLS.
:param str ontology: The name of the ontology
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[str]
"""
for label in _help_iterate_labels(self.iter_terms(ontology=ontology, size=size, sleep=sleep)):
yield label
|
[
"Iterates",
"over",
"the",
"labels",
"of",
"terms",
"in",
"the",
"ontology",
".",
"Automatically",
"wraps",
"the",
"pager",
"returned",
"by",
"the",
"OLS",
"."
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/client.py#L197-L206
|
[
"def",
"iter_labels",
"(",
"self",
",",
"ontology",
",",
"size",
"=",
"None",
",",
"sleep",
"=",
"None",
")",
":",
"for",
"label",
"in",
"_help_iterate_labels",
"(",
"self",
".",
"iter_terms",
"(",
"ontology",
"=",
"ontology",
",",
"size",
"=",
"size",
",",
"sleep",
"=",
"sleep",
")",
")",
":",
"yield",
"label"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
OlsClient.iter_hierarchy
|
Iterates over parent-child relations
:param str ontology: The name of the ontology
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[tuple[str,str]]
|
src/ols_client/client.py
|
def iter_hierarchy(self, ontology, size=None, sleep=None):
"""Iterates over parent-child relations
:param str ontology: The name of the ontology
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[tuple[str,str]]
"""
for term in self.iter_terms(ontology=ontology, size=size, sleep=sleep):
try:
hierarchy_children_link = term['_links'][HIERARCHICAL_CHILDREN]['href']
except KeyError: # there's no children for this one
continue
response = requests.get(hierarchy_children_link).json()
for child_term in response['_embedded']['terms']:
yield term['label'], child_term['label']
|
def iter_hierarchy(self, ontology, size=None, sleep=None):
"""Iterates over parent-child relations
:param str ontology: The name of the ontology
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[tuple[str,str]]
"""
for term in self.iter_terms(ontology=ontology, size=size, sleep=sleep):
try:
hierarchy_children_link = term['_links'][HIERARCHICAL_CHILDREN]['href']
except KeyError: # there's no children for this one
continue
response = requests.get(hierarchy_children_link).json()
for child_term in response['_embedded']['terms']:
yield term['label'], child_term['label']
|
[
"Iterates",
"over",
"parent",
"-",
"child",
"relations"
] |
cthoyt/ols-client
|
python
|
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/client.py#L208-L225
|
[
"def",
"iter_hierarchy",
"(",
"self",
",",
"ontology",
",",
"size",
"=",
"None",
",",
"sleep",
"=",
"None",
")",
":",
"for",
"term",
"in",
"self",
".",
"iter_terms",
"(",
"ontology",
"=",
"ontology",
",",
"size",
"=",
"size",
",",
"sleep",
"=",
"sleep",
")",
":",
"try",
":",
"hierarchy_children_link",
"=",
"term",
"[",
"'_links'",
"]",
"[",
"HIERARCHICAL_CHILDREN",
"]",
"[",
"'href'",
"]",
"except",
"KeyError",
":",
"# there's no children for this one",
"continue",
"response",
"=",
"requests",
".",
"get",
"(",
"hierarchy_children_link",
")",
".",
"json",
"(",
")",
"for",
"child_term",
"in",
"response",
"[",
"'_embedded'",
"]",
"[",
"'terms'",
"]",
":",
"yield",
"term",
"[",
"'label'",
"]",
",",
"child_term",
"[",
"'label'",
"]"
] |
8c6bb54888675652d25324184967392d00d128fc
|
test
|
run_fastqc
|
Run Fastqc on the input reads
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq read 1
:param str r2_id: FileStoreID of fastq read 2
:return: FileStoreID of fastQC output (tarball)
:rtype: str
|
src/toil_lib/tools/QC.py
|
def run_fastqc(job, r1_id, r2_id):
"""
Run Fastqc on the input reads
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq read 1
:param str r2_id: FileStoreID of fastq read 2
:return: FileStoreID of fastQC output (tarball)
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters = ['/data/R1.fastq']
output_names = ['R1_fastqc.html', 'R1_fastqc.zip']
if r2_id:
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['-t', '2', '/data/R2.fastq'])
output_names.extend(['R2_fastqc.html', 'R2_fastqc.zip'])
dockerCall(job=job, tool='quay.io/ucsc_cgl/fastqc:0.11.5--be13567d00cd4c586edf8ae47d991815c8c72a49',
workDir=work_dir, parameters=parameters)
output_files = [os.path.join(work_dir, x) for x in output_names]
tarball_files(tar_name='fastqc.tar.gz', file_paths=output_files, output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'fastqc.tar.gz'))
|
def run_fastqc(job, r1_id, r2_id):
"""
Run Fastqc on the input reads
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq read 1
:param str r2_id: FileStoreID of fastq read 2
:return: FileStoreID of fastQC output (tarball)
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters = ['/data/R1.fastq']
output_names = ['R1_fastqc.html', 'R1_fastqc.zip']
if r2_id:
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['-t', '2', '/data/R2.fastq'])
output_names.extend(['R2_fastqc.html', 'R2_fastqc.zip'])
dockerCall(job=job, tool='quay.io/ucsc_cgl/fastqc:0.11.5--be13567d00cd4c586edf8ae47d991815c8c72a49',
workDir=work_dir, parameters=parameters)
output_files = [os.path.join(work_dir, x) for x in output_names]
tarball_files(tar_name='fastqc.tar.gz', file_paths=output_files, output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'fastqc.tar.gz'))
|
[
"Run",
"Fastqc",
"on",
"the",
"input",
"reads"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/tools/QC.py#L8-L30
|
[
"def",
"run_fastqc",
"(",
"job",
",",
"r1_id",
",",
"r2_id",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"r1_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'R1.fastq'",
")",
")",
"parameters",
"=",
"[",
"'/data/R1.fastq'",
"]",
"output_names",
"=",
"[",
"'R1_fastqc.html'",
",",
"'R1_fastqc.zip'",
"]",
"if",
"r2_id",
":",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"r2_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'R2.fastq'",
")",
")",
"parameters",
".",
"extend",
"(",
"[",
"'-t'",
",",
"'2'",
",",
"'/data/R2.fastq'",
"]",
")",
"output_names",
".",
"extend",
"(",
"[",
"'R2_fastqc.html'",
",",
"'R2_fastqc.zip'",
"]",
")",
"dockerCall",
"(",
"job",
"=",
"job",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/fastqc:0.11.5--be13567d00cd4c586edf8ae47d991815c8c72a49'",
",",
"workDir",
"=",
"work_dir",
",",
"parameters",
"=",
"parameters",
")",
"output_files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"x",
")",
"for",
"x",
"in",
"output_names",
"]",
"tarball_files",
"(",
"tar_name",
"=",
"'fastqc.tar.gz'",
",",
"file_paths",
"=",
"output_files",
",",
"output_dir",
"=",
"work_dir",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'fastqc.tar.gz'",
")",
")"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
Merge.addStream
|
Adds the given stream to the query construction. The function supports both stream
names and Stream objects.
|
connectordb/query/merge.py
|
def addStream(self, stream, t1=None, t2=None, limit=None, i1=None, i2=None, transform=None):
"""Adds the given stream to the query construction. The function supports both stream
names and Stream objects."""
params = query_maker(t1, t2, limit, i1, i2, transform)
params["stream"] = get_stream(self.cdb, stream)
# Now add the stream to the query parameters
self.query.append(params)
|
def addStream(self, stream, t1=None, t2=None, limit=None, i1=None, i2=None, transform=None):
"""Adds the given stream to the query construction. The function supports both stream
names and Stream objects."""
params = query_maker(t1, t2, limit, i1, i2, transform)
params["stream"] = get_stream(self.cdb, stream)
# Now add the stream to the query parameters
self.query.append(params)
|
[
"Adds",
"the",
"given",
"stream",
"to",
"the",
"query",
"construction",
".",
"The",
"function",
"supports",
"both",
"stream",
"names",
"and",
"Stream",
"objects",
"."
] |
connectordb/connectordb-python
|
python
|
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/query/merge.py#L32-L40
|
[
"def",
"addStream",
"(",
"self",
",",
"stream",
",",
"t1",
"=",
"None",
",",
"t2",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"i1",
"=",
"None",
",",
"i2",
"=",
"None",
",",
"transform",
"=",
"None",
")",
":",
"params",
"=",
"query_maker",
"(",
"t1",
",",
"t2",
",",
"limit",
",",
"i1",
",",
"i2",
",",
"transform",
")",
"params",
"[",
"\"stream\"",
"]",
"=",
"get_stream",
"(",
"self",
".",
"cdb",
",",
"stream",
")",
"# Now add the stream to the query parameters\r",
"self",
".",
"query",
".",
"append",
"(",
"params",
")"
] |
2092b0cb30898139a247176bcf433d5a4abde7cb
|
test
|
create_app
|
This needs some tidying up. To avoid circular imports we import
everything here but it makes this method a bit more gross.
|
home/__init__.py
|
def create_app(config=None):
""" This needs some tidying up. To avoid circular imports we import
everything here but it makes this method a bit more gross.
"""
# Initialise the app
from home.config import TEMPLATE_FOLDER, STATIC_FOLDER
app = Flask(__name__, static_folder=STATIC_FOLDER,
template_folder=TEMPLATE_FOLDER)
app.config['SECRET_KEY'] = 'ssh, its a secret.'
# Load the default config, the specified config file and then any
# overwrites that are manually passed in.
app.config.from_object('home.config')
if 'HOME_SETTINGS' in environ:
app.config.from_envvar('HOME_SETTINGS')
app.config.from_object(config)
# Register the web front end and the API.
from home.dash.web import web
from home.dash.api import api
app.register_blueprint(web)
app.register_blueprint(api, url_prefix='/api')
login_manager.init_app(app)
login_manager.login_view = 'Dashboard Web.login'
from home.dash.models import User
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# Initialise the migrations app, we want to store all migrations within
# the project directory for easier packaging.
Migrate(app, db, directory=app.config['MIGRATE_DIRECTORY'])
admin = Admin(app)
from home.dash.admin import setup_admin
setup_admin(admin)
# Wire up the database to the app so it gets the config.
db.init_app(app)
return app
|
def create_app(config=None):
""" This needs some tidying up. To avoid circular imports we import
everything here but it makes this method a bit more gross.
"""
# Initialise the app
from home.config import TEMPLATE_FOLDER, STATIC_FOLDER
app = Flask(__name__, static_folder=STATIC_FOLDER,
template_folder=TEMPLATE_FOLDER)
app.config['SECRET_KEY'] = 'ssh, its a secret.'
# Load the default config, the specified config file and then any
# overwrites that are manually passed in.
app.config.from_object('home.config')
if 'HOME_SETTINGS' in environ:
app.config.from_envvar('HOME_SETTINGS')
app.config.from_object(config)
# Register the web front end and the API.
from home.dash.web import web
from home.dash.api import api
app.register_blueprint(web)
app.register_blueprint(api, url_prefix='/api')
login_manager.init_app(app)
login_manager.login_view = 'Dashboard Web.login'
from home.dash.models import User
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# Initialise the migrations app, we want to store all migrations within
# the project directory for easier packaging.
Migrate(app, db, directory=app.config['MIGRATE_DIRECTORY'])
admin = Admin(app)
from home.dash.admin import setup_admin
setup_admin(admin)
# Wire up the database to the app so it gets the config.
db.init_app(app)
return app
|
[
"This",
"needs",
"some",
"tidying",
"up",
".",
"To",
"avoid",
"circular",
"imports",
"we",
"import",
"everything",
"here",
"but",
"it",
"makes",
"this",
"method",
"a",
"bit",
"more",
"gross",
"."
] |
d0ugal/home
|
python
|
https://github.com/d0ugal/home/blob/e984716ae6c74dc8e40346584668ac5cfeaaf520/home/__init__.py#L26-L74
|
[
"def",
"create_app",
"(",
"config",
"=",
"None",
")",
":",
"# Initialise the app",
"from",
"home",
".",
"config",
"import",
"TEMPLATE_FOLDER",
",",
"STATIC_FOLDER",
"app",
"=",
"Flask",
"(",
"__name__",
",",
"static_folder",
"=",
"STATIC_FOLDER",
",",
"template_folder",
"=",
"TEMPLATE_FOLDER",
")",
"app",
".",
"config",
"[",
"'SECRET_KEY'",
"]",
"=",
"'ssh, its a secret.'",
"# Load the default config, the specified config file and then any",
"# overwrites that are manually passed in.",
"app",
".",
"config",
".",
"from_object",
"(",
"'home.config'",
")",
"if",
"'HOME_SETTINGS'",
"in",
"environ",
":",
"app",
".",
"config",
".",
"from_envvar",
"(",
"'HOME_SETTINGS'",
")",
"app",
".",
"config",
".",
"from_object",
"(",
"config",
")",
"# Register the web front end and the API.",
"from",
"home",
".",
"dash",
".",
"web",
"import",
"web",
"from",
"home",
".",
"dash",
".",
"api",
"import",
"api",
"app",
".",
"register_blueprint",
"(",
"web",
")",
"app",
".",
"register_blueprint",
"(",
"api",
",",
"url_prefix",
"=",
"'/api'",
")",
"login_manager",
".",
"init_app",
"(",
"app",
")",
"login_manager",
".",
"login_view",
"=",
"'Dashboard Web.login'",
"from",
"home",
".",
"dash",
".",
"models",
"import",
"User",
"@",
"login_manager",
".",
"user_loader",
"def",
"load_user",
"(",
"user_id",
")",
":",
"return",
"User",
".",
"query",
".",
"get",
"(",
"int",
"(",
"user_id",
")",
")",
"# Initialise the migrations app, we want to store all migrations within",
"# the project directory for easier packaging.",
"Migrate",
"(",
"app",
",",
"db",
",",
"directory",
"=",
"app",
".",
"config",
"[",
"'MIGRATE_DIRECTORY'",
"]",
")",
"admin",
"=",
"Admin",
"(",
"app",
")",
"from",
"home",
".",
"dash",
".",
"admin",
"import",
"setup_admin",
"setup_admin",
"(",
"admin",
")",
"# Wire up the database to the app so it gets the config.",
"db",
".",
"init_app",
"(",
"app",
")",
"return",
"app"
] |
e984716ae6c74dc8e40346584668ac5cfeaaf520
|
test
|
spawn_spark_cluster
|
:param numWorkers: The number of worker nodes to have in the cluster. \
Must be greater than or equal to 1.
:param cores: Optional parameter to set the number of cores per node. \
If not provided, we use the number of cores on the node that launches \
the service.
:param memory: Optional parameter to set the memory requested per node.
:param disk: Optional parameter to set the disk requested per node.
:type leaderMemory: int or string convertable by bd2k.util.humanize.human2bytes to an int
:type numWorkers: int
:type cores: int
:type memory: int or string convertable by bd2k.util.humanize.human2bytes to an int
:type disk: int or string convertable by bd2k.util.humanize.human2bytes to an int
|
src/toil_lib/spark.py
|
def spawn_spark_cluster(job,
numWorkers,
cores=None,
memory=None,
disk=None,
overrideLeaderIP=None):
'''
:param numWorkers: The number of worker nodes to have in the cluster. \
Must be greater than or equal to 1.
:param cores: Optional parameter to set the number of cores per node. \
If not provided, we use the number of cores on the node that launches \
the service.
:param memory: Optional parameter to set the memory requested per node.
:param disk: Optional parameter to set the disk requested per node.
:type leaderMemory: int or string convertable by bd2k.util.humanize.human2bytes to an int
:type numWorkers: int
:type cores: int
:type memory: int or string convertable by bd2k.util.humanize.human2bytes to an int
:type disk: int or string convertable by bd2k.util.humanize.human2bytes to an int
'''
if numWorkers < 1:
raise ValueError("Must have more than one worker. %d given." % numWorkers)
leaderService = SparkService(cores=cores,
memory=memory,
disk=disk,
overrideLeaderIP=overrideLeaderIP)
leaderIP = job.addService(leaderService)
for i in range(numWorkers):
job.addService(WorkerService(leaderIP,
cores=cores,
disk=disk,
memory=memory),
parentService=leaderService)
return leaderIP
|
def spawn_spark_cluster(job,
numWorkers,
cores=None,
memory=None,
disk=None,
overrideLeaderIP=None):
'''
:param numWorkers: The number of worker nodes to have in the cluster. \
Must be greater than or equal to 1.
:param cores: Optional parameter to set the number of cores per node. \
If not provided, we use the number of cores on the node that launches \
the service.
:param memory: Optional parameter to set the memory requested per node.
:param disk: Optional parameter to set the disk requested per node.
:type leaderMemory: int or string convertable by bd2k.util.humanize.human2bytes to an int
:type numWorkers: int
:type cores: int
:type memory: int or string convertable by bd2k.util.humanize.human2bytes to an int
:type disk: int or string convertable by bd2k.util.humanize.human2bytes to an int
'''
if numWorkers < 1:
raise ValueError("Must have more than one worker. %d given." % numWorkers)
leaderService = SparkService(cores=cores,
memory=memory,
disk=disk,
overrideLeaderIP=overrideLeaderIP)
leaderIP = job.addService(leaderService)
for i in range(numWorkers):
job.addService(WorkerService(leaderIP,
cores=cores,
disk=disk,
memory=memory),
parentService=leaderService)
return leaderIP
|
[
":",
"param",
"numWorkers",
":",
"The",
"number",
"of",
"worker",
"nodes",
"to",
"have",
"in",
"the",
"cluster",
".",
"\\",
"Must",
"be",
"greater",
"than",
"or",
"equal",
"to",
"1",
".",
":",
"param",
"cores",
":",
"Optional",
"parameter",
"to",
"set",
"the",
"number",
"of",
"cores",
"per",
"node",
".",
"\\",
"If",
"not",
"provided",
"we",
"use",
"the",
"number",
"of",
"cores",
"on",
"the",
"node",
"that",
"launches",
"\\",
"the",
"service",
".",
":",
"param",
"memory",
":",
"Optional",
"parameter",
"to",
"set",
"the",
"memory",
"requested",
"per",
"node",
".",
":",
"param",
"disk",
":",
"Optional",
"parameter",
"to",
"set",
"the",
"disk",
"requested",
"per",
"node",
".",
":",
"type",
"leaderMemory",
":",
"int",
"or",
"string",
"convertable",
"by",
"bd2k",
".",
"util",
".",
"humanize",
".",
"human2bytes",
"to",
"an",
"int",
":",
"type",
"numWorkers",
":",
"int",
":",
"type",
"cores",
":",
"int",
":",
"type",
"memory",
":",
"int",
"or",
"string",
"convertable",
"by",
"bd2k",
".",
"util",
".",
"humanize",
".",
"human2bytes",
"to",
"an",
"int",
":",
"type",
"disk",
":",
"int",
"or",
"string",
"convertable",
"by",
"bd2k",
".",
"util",
".",
"humanize",
".",
"human2bytes",
"to",
"an",
"int"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/spark.py#L28-L64
|
[
"def",
"spawn_spark_cluster",
"(",
"job",
",",
"numWorkers",
",",
"cores",
"=",
"None",
",",
"memory",
"=",
"None",
",",
"disk",
"=",
"None",
",",
"overrideLeaderIP",
"=",
"None",
")",
":",
"if",
"numWorkers",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Must have more than one worker. %d given.\"",
"%",
"numWorkers",
")",
"leaderService",
"=",
"SparkService",
"(",
"cores",
"=",
"cores",
",",
"memory",
"=",
"memory",
",",
"disk",
"=",
"disk",
",",
"overrideLeaderIP",
"=",
"overrideLeaderIP",
")",
"leaderIP",
"=",
"job",
".",
"addService",
"(",
"leaderService",
")",
"for",
"i",
"in",
"range",
"(",
"numWorkers",
")",
":",
"job",
".",
"addService",
"(",
"WorkerService",
"(",
"leaderIP",
",",
"cores",
"=",
"cores",
",",
"disk",
"=",
"disk",
",",
"memory",
"=",
"memory",
")",
",",
"parentService",
"=",
"leaderService",
")",
"return",
"leaderIP"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
SparkService.start
|
Start spark and hdfs master containers
:param job: The underlying job.
|
src/toil_lib/spark.py
|
def start(self, job):
"""
Start spark and hdfs master containers
:param job: The underlying job.
"""
if self.hostname is None:
self.hostname = subprocess.check_output(["hostname", "-f",])[:-1]
_log.info("Started Spark master container.")
self.sparkContainerID = dockerCheckOutput(job=job,
defer=STOP,
workDir=os.getcwd(),
tool="quay.io/ucsc_cgl/apache-spark-master:1.5.2",
dockerParameters=["--net=host",
"-d",
"-v", "/mnt/ephemeral/:/ephemeral/:rw",
"-e", "SPARK_MASTER_IP=" + self.hostname,
"-e", "SPARK_LOCAL_DIRS=/ephemeral/spark/local",
"-e", "SPARK_WORKER_DIR=/ephemeral/spark/work"],
parameters=[self.hostname])[:-1]
_log.info("Started HDFS Datanode.")
self.hdfsContainerID = dockerCheckOutput(job=job,
defer=STOP,
workDir=os.getcwd(),
tool="quay.io/ucsc_cgl/apache-hadoop-master:2.6.2",
dockerParameters=["--net=host",
"-d"],
parameters=[self.hostname])[:-1]
return self.hostname
|
def start(self, job):
"""
Start spark and hdfs master containers
:param job: The underlying job.
"""
if self.hostname is None:
self.hostname = subprocess.check_output(["hostname", "-f",])[:-1]
_log.info("Started Spark master container.")
self.sparkContainerID = dockerCheckOutput(job=job,
defer=STOP,
workDir=os.getcwd(),
tool="quay.io/ucsc_cgl/apache-spark-master:1.5.2",
dockerParameters=["--net=host",
"-d",
"-v", "/mnt/ephemeral/:/ephemeral/:rw",
"-e", "SPARK_MASTER_IP=" + self.hostname,
"-e", "SPARK_LOCAL_DIRS=/ephemeral/spark/local",
"-e", "SPARK_WORKER_DIR=/ephemeral/spark/work"],
parameters=[self.hostname])[:-1]
_log.info("Started HDFS Datanode.")
self.hdfsContainerID = dockerCheckOutput(job=job,
defer=STOP,
workDir=os.getcwd(),
tool="quay.io/ucsc_cgl/apache-hadoop-master:2.6.2",
dockerParameters=["--net=host",
"-d"],
parameters=[self.hostname])[:-1]
return self.hostname
|
[
"Start",
"spark",
"and",
"hdfs",
"master",
"containers"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/spark.py#L124-L155
|
[
"def",
"start",
"(",
"self",
",",
"job",
")",
":",
"if",
"self",
".",
"hostname",
"is",
"None",
":",
"self",
".",
"hostname",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"\"hostname\"",
",",
"\"-f\"",
",",
"]",
")",
"[",
":",
"-",
"1",
"]",
"_log",
".",
"info",
"(",
"\"Started Spark master container.\"",
")",
"self",
".",
"sparkContainerID",
"=",
"dockerCheckOutput",
"(",
"job",
"=",
"job",
",",
"defer",
"=",
"STOP",
",",
"workDir",
"=",
"os",
".",
"getcwd",
"(",
")",
",",
"tool",
"=",
"\"quay.io/ucsc_cgl/apache-spark-master:1.5.2\"",
",",
"dockerParameters",
"=",
"[",
"\"--net=host\"",
",",
"\"-d\"",
",",
"\"-v\"",
",",
"\"/mnt/ephemeral/:/ephemeral/:rw\"",
",",
"\"-e\"",
",",
"\"SPARK_MASTER_IP=\"",
"+",
"self",
".",
"hostname",
",",
"\"-e\"",
",",
"\"SPARK_LOCAL_DIRS=/ephemeral/spark/local\"",
",",
"\"-e\"",
",",
"\"SPARK_WORKER_DIR=/ephemeral/spark/work\"",
"]",
",",
"parameters",
"=",
"[",
"self",
".",
"hostname",
"]",
")",
"[",
":",
"-",
"1",
"]",
"_log",
".",
"info",
"(",
"\"Started HDFS Datanode.\"",
")",
"self",
".",
"hdfsContainerID",
"=",
"dockerCheckOutput",
"(",
"job",
"=",
"job",
",",
"defer",
"=",
"STOP",
",",
"workDir",
"=",
"os",
".",
"getcwd",
"(",
")",
",",
"tool",
"=",
"\"quay.io/ucsc_cgl/apache-hadoop-master:2.6.2\"",
",",
"dockerParameters",
"=",
"[",
"\"--net=host\"",
",",
"\"-d\"",
"]",
",",
"parameters",
"=",
"[",
"self",
".",
"hostname",
"]",
")",
"[",
":",
"-",
"1",
"]",
"return",
"self",
".",
"hostname"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
WorkerService.start
|
Start spark and hdfs worker containers
:param job: The underlying job.
|
src/toil_lib/spark.py
|
def start(self, job):
"""
Start spark and hdfs worker containers
:param job: The underlying job.
"""
# start spark and our datanode
self.sparkContainerID = dockerCheckOutput(job=job,
defer=STOP,
workDir=os.getcwd(),
tool="quay.io/ucsc_cgl/apache-spark-worker:1.5.2",
dockerParameters=["--net=host",
"-d",
"-v", "/mnt/ephemeral/:/ephemeral/:rw",
"-e",
"\"SPARK_MASTER_IP=" + self.masterIP + ":" + _SPARK_MASTER_PORT + "\"",
"-e", "SPARK_LOCAL_DIRS=/ephemeral/spark/local",
"-e", "SPARK_WORKER_DIR=/ephemeral/spark/work"],
parameters=[self.masterIP + ":" + _SPARK_MASTER_PORT])[:-1]
self.__start_datanode(job)
# fake do/while to check if HDFS is up
hdfs_down = True
retries = 0
while hdfs_down and (retries < 5):
_log.info("Sleeping 30 seconds before checking HDFS startup.")
time.sleep(30)
clusterID = ""
try:
clusterID = subprocess.check_output(["docker",
"exec",
self.hdfsContainerID,
"grep",
"clusterID",
"-R",
"/opt/apache-hadoop/logs"])
except:
# grep returns a non-zero exit code if the pattern is not found
# we expect to not find the pattern, so a non-zero code is OK
pass
if "Incompatible" in clusterID:
_log.warning("Hadoop Datanode failed to start with: %s", clusterID)
_log.warning("Retrying container startup, retry #%d.", retries)
retries += 1
_log.warning("Removing ephemeral hdfs directory.")
subprocess.check_call(["docker",
"exec",
self.hdfsContainerID,
"rm",
"-rf",
"/ephemeral/hdfs"])
_log.warning("Killing container %s.", self.hdfsContainerID)
subprocess.check_call(["docker",
"kill",
self.hdfsContainerID])
# todo: this is copied code. clean up!
_log.info("Restarting datanode.")
self.__start_datanode(job)
else:
_log.info("HDFS datanode started up OK!")
hdfs_down = False
if retries >= 5:
raise RuntimeError("Failed %d times trying to start HDFS datanode." % retries)
return
|
def start(self, job):
"""
Start spark and hdfs worker containers
:param job: The underlying job.
"""
# start spark and our datanode
self.sparkContainerID = dockerCheckOutput(job=job,
defer=STOP,
workDir=os.getcwd(),
tool="quay.io/ucsc_cgl/apache-spark-worker:1.5.2",
dockerParameters=["--net=host",
"-d",
"-v", "/mnt/ephemeral/:/ephemeral/:rw",
"-e",
"\"SPARK_MASTER_IP=" + self.masterIP + ":" + _SPARK_MASTER_PORT + "\"",
"-e", "SPARK_LOCAL_DIRS=/ephemeral/spark/local",
"-e", "SPARK_WORKER_DIR=/ephemeral/spark/work"],
parameters=[self.masterIP + ":" + _SPARK_MASTER_PORT])[:-1]
self.__start_datanode(job)
# fake do/while to check if HDFS is up
hdfs_down = True
retries = 0
while hdfs_down and (retries < 5):
_log.info("Sleeping 30 seconds before checking HDFS startup.")
time.sleep(30)
clusterID = ""
try:
clusterID = subprocess.check_output(["docker",
"exec",
self.hdfsContainerID,
"grep",
"clusterID",
"-R",
"/opt/apache-hadoop/logs"])
except:
# grep returns a non-zero exit code if the pattern is not found
# we expect to not find the pattern, so a non-zero code is OK
pass
if "Incompatible" in clusterID:
_log.warning("Hadoop Datanode failed to start with: %s", clusterID)
_log.warning("Retrying container startup, retry #%d.", retries)
retries += 1
_log.warning("Removing ephemeral hdfs directory.")
subprocess.check_call(["docker",
"exec",
self.hdfsContainerID,
"rm",
"-rf",
"/ephemeral/hdfs"])
_log.warning("Killing container %s.", self.hdfsContainerID)
subprocess.check_call(["docker",
"kill",
self.hdfsContainerID])
# todo: this is copied code. clean up!
_log.info("Restarting datanode.")
self.__start_datanode(job)
else:
_log.info("HDFS datanode started up OK!")
hdfs_down = False
if retries >= 5:
raise RuntimeError("Failed %d times trying to start HDFS datanode." % retries)
return
|
[
"Start",
"spark",
"and",
"hdfs",
"worker",
"containers"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/spark.py#L211-L283
|
[
"def",
"start",
"(",
"self",
",",
"job",
")",
":",
"# start spark and our datanode",
"self",
".",
"sparkContainerID",
"=",
"dockerCheckOutput",
"(",
"job",
"=",
"job",
",",
"defer",
"=",
"STOP",
",",
"workDir",
"=",
"os",
".",
"getcwd",
"(",
")",
",",
"tool",
"=",
"\"quay.io/ucsc_cgl/apache-spark-worker:1.5.2\"",
",",
"dockerParameters",
"=",
"[",
"\"--net=host\"",
",",
"\"-d\"",
",",
"\"-v\"",
",",
"\"/mnt/ephemeral/:/ephemeral/:rw\"",
",",
"\"-e\"",
",",
"\"\\\"SPARK_MASTER_IP=\"",
"+",
"self",
".",
"masterIP",
"+",
"\":\"",
"+",
"_SPARK_MASTER_PORT",
"+",
"\"\\\"\"",
",",
"\"-e\"",
",",
"\"SPARK_LOCAL_DIRS=/ephemeral/spark/local\"",
",",
"\"-e\"",
",",
"\"SPARK_WORKER_DIR=/ephemeral/spark/work\"",
"]",
",",
"parameters",
"=",
"[",
"self",
".",
"masterIP",
"+",
"\":\"",
"+",
"_SPARK_MASTER_PORT",
"]",
")",
"[",
":",
"-",
"1",
"]",
"self",
".",
"__start_datanode",
"(",
"job",
")",
"# fake do/while to check if HDFS is up",
"hdfs_down",
"=",
"True",
"retries",
"=",
"0",
"while",
"hdfs_down",
"and",
"(",
"retries",
"<",
"5",
")",
":",
"_log",
".",
"info",
"(",
"\"Sleeping 30 seconds before checking HDFS startup.\"",
")",
"time",
".",
"sleep",
"(",
"30",
")",
"clusterID",
"=",
"\"\"",
"try",
":",
"clusterID",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"\"docker\"",
",",
"\"exec\"",
",",
"self",
".",
"hdfsContainerID",
",",
"\"grep\"",
",",
"\"clusterID\"",
",",
"\"-R\"",
",",
"\"/opt/apache-hadoop/logs\"",
"]",
")",
"except",
":",
"# grep returns a non-zero exit code if the pattern is not found",
"# we expect to not find the pattern, so a non-zero code is OK",
"pass",
"if",
"\"Incompatible\"",
"in",
"clusterID",
":",
"_log",
".",
"warning",
"(",
"\"Hadoop Datanode failed to start with: %s\"",
",",
"clusterID",
")",
"_log",
".",
"warning",
"(",
"\"Retrying container startup, retry #%d.\"",
",",
"retries",
")",
"retries",
"+=",
"1",
"_log",
".",
"warning",
"(",
"\"Removing ephemeral hdfs directory.\"",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"\"docker\"",
",",
"\"exec\"",
",",
"self",
".",
"hdfsContainerID",
",",
"\"rm\"",
",",
"\"-rf\"",
",",
"\"/ephemeral/hdfs\"",
"]",
")",
"_log",
".",
"warning",
"(",
"\"Killing container %s.\"",
",",
"self",
".",
"hdfsContainerID",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"\"docker\"",
",",
"\"kill\"",
",",
"self",
".",
"hdfsContainerID",
"]",
")",
"# todo: this is copied code. clean up!",
"_log",
".",
"info",
"(",
"\"Restarting datanode.\"",
")",
"self",
".",
"__start_datanode",
"(",
"job",
")",
"else",
":",
"_log",
".",
"info",
"(",
"\"HDFS datanode started up OK!\"",
")",
"hdfs_down",
"=",
"False",
"if",
"retries",
">=",
"5",
":",
"raise",
"RuntimeError",
"(",
"\"Failed %d times trying to start HDFS datanode.\"",
"%",
"retries",
")",
"return"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
WorkerService.__start_datanode
|
Launches the Hadoop datanode.
:param job: The underlying job.
|
src/toil_lib/spark.py
|
def __start_datanode(self, job):
"""
Launches the Hadoop datanode.
:param job: The underlying job.
"""
self.hdfsContainerID = dockerCheckOutput(job=job,
defer=STOP,
workDir=os.getcwd(),
tool="quay.io/ucsc_cgl/apache-hadoop-worker:2.6.2",
dockerParameters=["--net=host",
"-d",
"-v", "/mnt/ephemeral/:/ephemeral/:rw"],
parameters=[self.masterIP])[:-1]
|
def __start_datanode(self, job):
"""
Launches the Hadoop datanode.
:param job: The underlying job.
"""
self.hdfsContainerID = dockerCheckOutput(job=job,
defer=STOP,
workDir=os.getcwd(),
tool="quay.io/ucsc_cgl/apache-hadoop-worker:2.6.2",
dockerParameters=["--net=host",
"-d",
"-v", "/mnt/ephemeral/:/ephemeral/:rw"],
parameters=[self.masterIP])[:-1]
|
[
"Launches",
"the",
"Hadoop",
"datanode",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/spark.py#L285-L298
|
[
"def",
"__start_datanode",
"(",
"self",
",",
"job",
")",
":",
"self",
".",
"hdfsContainerID",
"=",
"dockerCheckOutput",
"(",
"job",
"=",
"job",
",",
"defer",
"=",
"STOP",
",",
"workDir",
"=",
"os",
".",
"getcwd",
"(",
")",
",",
"tool",
"=",
"\"quay.io/ucsc_cgl/apache-hadoop-worker:2.6.2\"",
",",
"dockerParameters",
"=",
"[",
"\"--net=host\"",
",",
"\"-d\"",
",",
"\"-v\"",
",",
"\"/mnt/ephemeral/:/ephemeral/:rw\"",
"]",
",",
"parameters",
"=",
"[",
"self",
".",
"masterIP",
"]",
")",
"[",
":",
"-",
"1",
"]"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
WorkerService.stop
|
Stop spark and hdfs worker containers
:param job: The underlying job.
|
src/toil_lib/spark.py
|
def stop(self, fileStore):
"""
Stop spark and hdfs worker containers
:param job: The underlying job.
"""
subprocess.call(["docker", "exec", self.sparkContainerID, "rm", "-r", "/ephemeral/spark"])
subprocess.call(["docker", "stop", self.sparkContainerID])
subprocess.call(["docker", "rm", self.sparkContainerID])
_log.info("Stopped Spark worker.")
subprocess.call(["docker", "exec", self.hdfsContainerID, "rm", "-r", "/ephemeral/hdfs"])
subprocess.call(["docker", "stop", self.hdfsContainerID])
subprocess.call(["docker", "rm", self.hdfsContainerID])
_log.info("Stopped HDFS datanode.")
return
|
def stop(self, fileStore):
"""
Stop spark and hdfs worker containers
:param job: The underlying job.
"""
subprocess.call(["docker", "exec", self.sparkContainerID, "rm", "-r", "/ephemeral/spark"])
subprocess.call(["docker", "stop", self.sparkContainerID])
subprocess.call(["docker", "rm", self.sparkContainerID])
_log.info("Stopped Spark worker.")
subprocess.call(["docker", "exec", self.hdfsContainerID, "rm", "-r", "/ephemeral/hdfs"])
subprocess.call(["docker", "stop", self.hdfsContainerID])
subprocess.call(["docker", "rm", self.hdfsContainerID])
_log.info("Stopped HDFS datanode.")
return
|
[
"Stop",
"spark",
"and",
"hdfs",
"worker",
"containers"
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/spark.py#L300-L317
|
[
"def",
"stop",
"(",
"self",
",",
"fileStore",
")",
":",
"subprocess",
".",
"call",
"(",
"[",
"\"docker\"",
",",
"\"exec\"",
",",
"self",
".",
"sparkContainerID",
",",
"\"rm\"",
",",
"\"-r\"",
",",
"\"/ephemeral/spark\"",
"]",
")",
"subprocess",
".",
"call",
"(",
"[",
"\"docker\"",
",",
"\"stop\"",
",",
"self",
".",
"sparkContainerID",
"]",
")",
"subprocess",
".",
"call",
"(",
"[",
"\"docker\"",
",",
"\"rm\"",
",",
"self",
".",
"sparkContainerID",
"]",
")",
"_log",
".",
"info",
"(",
"\"Stopped Spark worker.\"",
")",
"subprocess",
".",
"call",
"(",
"[",
"\"docker\"",
",",
"\"exec\"",
",",
"self",
".",
"hdfsContainerID",
",",
"\"rm\"",
",",
"\"-r\"",
",",
"\"/ephemeral/hdfs\"",
"]",
")",
"subprocess",
".",
"call",
"(",
"[",
"\"docker\"",
",",
"\"stop\"",
",",
"self",
".",
"hdfsContainerID",
"]",
")",
"subprocess",
".",
"call",
"(",
"[",
"\"docker\"",
",",
"\"rm\"",
",",
"self",
".",
"hdfsContainerID",
"]",
")",
"_log",
".",
"info",
"(",
"\"Stopped HDFS datanode.\"",
")",
"return"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
WorkerService.check
|
Checks to see if Spark worker and HDFS datanode are still running.
|
src/toil_lib/spark.py
|
def check(self):
"""
Checks to see if Spark worker and HDFS datanode are still running.
"""
status = _checkContainerStatus(self.sparkContainerID,
self.hdfsContainerID,
sparkNoun='worker',
hdfsNoun='datanode')
return status
|
def check(self):
"""
Checks to see if Spark worker and HDFS datanode are still running.
"""
status = _checkContainerStatus(self.sparkContainerID,
self.hdfsContainerID,
sparkNoun='worker',
hdfsNoun='datanode')
return status
|
[
"Checks",
"to",
"see",
"if",
"Spark",
"worker",
"and",
"HDFS",
"datanode",
"are",
"still",
"running",
"."
] |
BD2KGenomics/toil-lib
|
python
|
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/spark.py#L320-L330
|
[
"def",
"check",
"(",
"self",
")",
":",
"status",
"=",
"_checkContainerStatus",
"(",
"self",
".",
"sparkContainerID",
",",
"self",
".",
"hdfsContainerID",
",",
"sparkNoun",
"=",
"'worker'",
",",
"hdfsNoun",
"=",
"'datanode'",
")",
"return",
"status"
] |
022a615fc3dc98fc1aaa7bfd232409962ca44fbd
|
test
|
base_tokenizer
|
Tokenizer. Generates tokens stream from text
|
mint.py
|
def base_tokenizer(fp):
'Tokenizer. Generates tokens stream from text'
if isinstance(fp, StringIO):
template_file = fp
size = template_file.len
else:
#empty file check
if os.fstat(fp.fileno()).st_size == 0:
yield TOKEN_EOF, 'EOF', 0, 0
return
template_file = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ)
size = template_file.size()
lineno = 0
while 1:
lineno += 1
pos = 1
# end of file
if template_file.tell() == size:
yield TOKEN_EOF, 'EOF', lineno, 0
break
# now we tokinize line by line
line = template_file.readline().decode('utf-8')
line = line.replace('\r\n', '')
line = line.replace('\n', '')
# ignoring non XML comments
if re_comment.match(line):
continue
last_text = deque()
while line:
line_len = len(line)
for token in tokens:
m = token.regex.match(line)
if m:
if last_text:
yield TOKEN_TEXT, ''.join(last_text), lineno, pos
pos += len(last_text)
last_text.clear()
offset, value = m.end(), m.group()
line = line[offset:]
yield token, value, lineno, pos
pos += offset
break
# we did not get right in tokens list, so next char is text
if line_len == len(line):
last_text.append(line[0])
line = line[1:]
if last_text:
yield TOKEN_TEXT, ''.join(last_text), lineno, pos
pos += len(last_text)
last_text.clear()
yield TOKEN_NEWLINE, '\n', lineno, pos
# all work is done
template_file.close()
|
def base_tokenizer(fp):
'Tokenizer. Generates tokens stream from text'
if isinstance(fp, StringIO):
template_file = fp
size = template_file.len
else:
#empty file check
if os.fstat(fp.fileno()).st_size == 0:
yield TOKEN_EOF, 'EOF', 0, 0
return
template_file = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ)
size = template_file.size()
lineno = 0
while 1:
lineno += 1
pos = 1
# end of file
if template_file.tell() == size:
yield TOKEN_EOF, 'EOF', lineno, 0
break
# now we tokinize line by line
line = template_file.readline().decode('utf-8')
line = line.replace('\r\n', '')
line = line.replace('\n', '')
# ignoring non XML comments
if re_comment.match(line):
continue
last_text = deque()
while line:
line_len = len(line)
for token in tokens:
m = token.regex.match(line)
if m:
if last_text:
yield TOKEN_TEXT, ''.join(last_text), lineno, pos
pos += len(last_text)
last_text.clear()
offset, value = m.end(), m.group()
line = line[offset:]
yield token, value, lineno, pos
pos += offset
break
# we did not get right in tokens list, so next char is text
if line_len == len(line):
last_text.append(line[0])
line = line[1:]
if last_text:
yield TOKEN_TEXT, ''.join(last_text), lineno, pos
pos += len(last_text)
last_text.clear()
yield TOKEN_NEWLINE, '\n', lineno, pos
# all work is done
template_file.close()
|
[
"Tokenizer",
".",
"Generates",
"tokens",
"stream",
"from",
"text"
] |
riffm/mint
|
python
|
https://github.com/riffm/mint/blob/db00855bbe9156d5ab281e00835af85a7958dd16/mint.py#L138-L196
|
[
"def",
"base_tokenizer",
"(",
"fp",
")",
":",
"if",
"isinstance",
"(",
"fp",
",",
"StringIO",
")",
":",
"template_file",
"=",
"fp",
"size",
"=",
"template_file",
".",
"len",
"else",
":",
"#empty file check",
"if",
"os",
".",
"fstat",
"(",
"fp",
".",
"fileno",
"(",
")",
")",
".",
"st_size",
"==",
"0",
":",
"yield",
"TOKEN_EOF",
",",
"'EOF'",
",",
"0",
",",
"0",
"return",
"template_file",
"=",
"mmap",
".",
"mmap",
"(",
"fp",
".",
"fileno",
"(",
")",
",",
"0",
",",
"access",
"=",
"mmap",
".",
"ACCESS_READ",
")",
"size",
"=",
"template_file",
".",
"size",
"(",
")",
"lineno",
"=",
"0",
"while",
"1",
":",
"lineno",
"+=",
"1",
"pos",
"=",
"1",
"# end of file",
"if",
"template_file",
".",
"tell",
"(",
")",
"==",
"size",
":",
"yield",
"TOKEN_EOF",
",",
"'EOF'",
",",
"lineno",
",",
"0",
"break",
"# now we tokinize line by line",
"line",
"=",
"template_file",
".",
"readline",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"line",
"=",
"line",
".",
"replace",
"(",
"'\\r\\n'",
",",
"''",
")",
"line",
"=",
"line",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"# ignoring non XML comments",
"if",
"re_comment",
".",
"match",
"(",
"line",
")",
":",
"continue",
"last_text",
"=",
"deque",
"(",
")",
"while",
"line",
":",
"line_len",
"=",
"len",
"(",
"line",
")",
"for",
"token",
"in",
"tokens",
":",
"m",
"=",
"token",
".",
"regex",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"if",
"last_text",
":",
"yield",
"TOKEN_TEXT",
",",
"''",
".",
"join",
"(",
"last_text",
")",
",",
"lineno",
",",
"pos",
"pos",
"+=",
"len",
"(",
"last_text",
")",
"last_text",
".",
"clear",
"(",
")",
"offset",
",",
"value",
"=",
"m",
".",
"end",
"(",
")",
",",
"m",
".",
"group",
"(",
")",
"line",
"=",
"line",
"[",
"offset",
":",
"]",
"yield",
"token",
",",
"value",
",",
"lineno",
",",
"pos",
"pos",
"+=",
"offset",
"break",
"# we did not get right in tokens list, so next char is text",
"if",
"line_len",
"==",
"len",
"(",
"line",
")",
":",
"last_text",
".",
"append",
"(",
"line",
"[",
"0",
"]",
")",
"line",
"=",
"line",
"[",
"1",
":",
"]",
"if",
"last_text",
":",
"yield",
"TOKEN_TEXT",
",",
"''",
".",
"join",
"(",
"last_text",
")",
",",
"lineno",
",",
"pos",
"pos",
"+=",
"len",
"(",
"last_text",
")",
"last_text",
".",
"clear",
"(",
")",
"yield",
"TOKEN_NEWLINE",
",",
"'\\n'",
",",
"lineno",
",",
"pos",
"# all work is done",
"template_file",
".",
"close",
"(",
")"
] |
db00855bbe9156d5ab281e00835af85a7958dd16
|
test
|
get_mint_tree
|
This function is wrapper to normal parsers (tag_parser, block_parser, etc.).
Returns mint tree.
|
mint.py
|
def get_mint_tree(tokens_stream):
'''
This function is wrapper to normal parsers (tag_parser, block_parser, etc.).
Returns mint tree.
'''
smart_stack = RecursiveStack()
block_parser.parse(tokens_stream, smart_stack)
return MintTemplate(body=smart_stack.stack)
|
def get_mint_tree(tokens_stream):
'''
This function is wrapper to normal parsers (tag_parser, block_parser, etc.).
Returns mint tree.
'''
smart_stack = RecursiveStack()
block_parser.parse(tokens_stream, smart_stack)
return MintTemplate(body=smart_stack.stack)
|
[
"This",
"function",
"is",
"wrapper",
"to",
"normal",
"parsers",
"(",
"tag_parser",
"block_parser",
"etc",
".",
")",
".",
"Returns",
"mint",
"tree",
"."
] |
riffm/mint
|
python
|
https://github.com/riffm/mint/blob/db00855bbe9156d5ab281e00835af85a7958dd16/mint.py#L1233-L1240
|
[
"def",
"get_mint_tree",
"(",
"tokens_stream",
")",
":",
"smart_stack",
"=",
"RecursiveStack",
"(",
")",
"block_parser",
".",
"parse",
"(",
"tokens_stream",
",",
"smart_stack",
")",
"return",
"MintTemplate",
"(",
"body",
"=",
"smart_stack",
".",
"stack",
")"
] |
db00855bbe9156d5ab281e00835af85a7958dd16
|
test
|
lookup_zone
|
Look up a zone ID for a zone string.
Args: conn: boto.route53.Route53Connection
zone: string eg. foursquare.com
Returns: zone ID eg. ZE2DYFZDWGSL4.
Raises: ZoneNotFoundError if zone not found.
|
src/r53/r53.py
|
def lookup_zone(conn, zone):
"""Look up a zone ID for a zone string.
Args: conn: boto.route53.Route53Connection
zone: string eg. foursquare.com
Returns: zone ID eg. ZE2DYFZDWGSL4.
Raises: ZoneNotFoundError if zone not found."""
all_zones = conn.get_all_hosted_zones()
for resp in all_zones['ListHostedZonesResponse']['HostedZones']:
if resp['Name'].rstrip('.') == zone.rstrip('.'):
return resp['Id'].replace('/hostedzone/', '')
raise ZoneNotFoundError('zone %s not found in response' % zone)
|
def lookup_zone(conn, zone):
"""Look up a zone ID for a zone string.
Args: conn: boto.route53.Route53Connection
zone: string eg. foursquare.com
Returns: zone ID eg. ZE2DYFZDWGSL4.
Raises: ZoneNotFoundError if zone not found."""
all_zones = conn.get_all_hosted_zones()
for resp in all_zones['ListHostedZonesResponse']['HostedZones']:
if resp['Name'].rstrip('.') == zone.rstrip('.'):
return resp['Id'].replace('/hostedzone/', '')
raise ZoneNotFoundError('zone %s not found in response' % zone)
|
[
"Look",
"up",
"a",
"zone",
"ID",
"for",
"a",
"zone",
"string",
"."
] |
coops/r53
|
python
|
https://github.com/coops/r53/blob/3c4e7242ad65b0e1ad4ba6b4ac893c7d501ceb0a/src/r53/r53.py#L26-L37
|
[
"def",
"lookup_zone",
"(",
"conn",
",",
"zone",
")",
":",
"all_zones",
"=",
"conn",
".",
"get_all_hosted_zones",
"(",
")",
"for",
"resp",
"in",
"all_zones",
"[",
"'ListHostedZonesResponse'",
"]",
"[",
"'HostedZones'",
"]",
":",
"if",
"resp",
"[",
"'Name'",
"]",
".",
"rstrip",
"(",
"'.'",
")",
"==",
"zone",
".",
"rstrip",
"(",
"'.'",
")",
":",
"return",
"resp",
"[",
"'Id'",
"]",
".",
"replace",
"(",
"'/hostedzone/'",
",",
"''",
")",
"raise",
"ZoneNotFoundError",
"(",
"'zone %s not found in response'",
"%",
"zone",
")"
] |
3c4e7242ad65b0e1ad4ba6b4ac893c7d501ceb0a
|
test
|
fetch_config
|
Fetch all pieces of a Route 53 config from Amazon.
Args: zone: string, hosted zone id.
conn: boto.route53.Route53Connection
Returns: list of ElementTrees, one for each piece of config.
|
src/r53/r53.py
|
def fetch_config(zone, conn):
"""Fetch all pieces of a Route 53 config from Amazon.
Args: zone: string, hosted zone id.
conn: boto.route53.Route53Connection
Returns: list of ElementTrees, one for each piece of config."""
more_to_fetch = True
cfg_chunks = []
next_name = None
next_type = None
next_identifier = None
while more_to_fetch == True:
more_to_fetch = False
getstr = '/%s/hostedzone/%s/rrset' % (R53_API_VERSION, zone)
if next_name is not None:
getstr += '?name=%s&type=%s' % (next_name, next_type)
if next_identifier is not None:
getstr += '&identifier=%s' % next_identifier
log.debug('requesting %s' % getstr)
resp = conn.make_request('GET', getstr)
etree = lxml.etree.parse(resp)
cfg_chunks.append(etree)
root = etree.getroot()
truncated = root.find('{%s}IsTruncated' % R53_XMLNS)
if truncated is not None and truncated.text == 'true':
more_to_fetch = True
next_name = root.find('{%s}NextRecordName' % R53_XMLNS).text
next_type = root.find('{%s}NextRecordType' % R53_XMLNS).text
try:
next_identifier = root.find('{%s}NextRecordIdentifier' % R53_XMLNS).text
except AttributeError: # may not have next_identifier
next_identifier = None
return cfg_chunks
|
def fetch_config(zone, conn):
"""Fetch all pieces of a Route 53 config from Amazon.
Args: zone: string, hosted zone id.
conn: boto.route53.Route53Connection
Returns: list of ElementTrees, one for each piece of config."""
more_to_fetch = True
cfg_chunks = []
next_name = None
next_type = None
next_identifier = None
while more_to_fetch == True:
more_to_fetch = False
getstr = '/%s/hostedzone/%s/rrset' % (R53_API_VERSION, zone)
if next_name is not None:
getstr += '?name=%s&type=%s' % (next_name, next_type)
if next_identifier is not None:
getstr += '&identifier=%s' % next_identifier
log.debug('requesting %s' % getstr)
resp = conn.make_request('GET', getstr)
etree = lxml.etree.parse(resp)
cfg_chunks.append(etree)
root = etree.getroot()
truncated = root.find('{%s}IsTruncated' % R53_XMLNS)
if truncated is not None and truncated.text == 'true':
more_to_fetch = True
next_name = root.find('{%s}NextRecordName' % R53_XMLNS).text
next_type = root.find('{%s}NextRecordType' % R53_XMLNS).text
try:
next_identifier = root.find('{%s}NextRecordIdentifier' % R53_XMLNS).text
except AttributeError: # may not have next_identifier
next_identifier = None
return cfg_chunks
|
[
"Fetch",
"all",
"pieces",
"of",
"a",
"Route",
"53",
"config",
"from",
"Amazon",
"."
] |
coops/r53
|
python
|
https://github.com/coops/r53/blob/3c4e7242ad65b0e1ad4ba6b4ac893c7d501ceb0a/src/r53/r53.py#L39-L71
|
[
"def",
"fetch_config",
"(",
"zone",
",",
"conn",
")",
":",
"more_to_fetch",
"=",
"True",
"cfg_chunks",
"=",
"[",
"]",
"next_name",
"=",
"None",
"next_type",
"=",
"None",
"next_identifier",
"=",
"None",
"while",
"more_to_fetch",
"==",
"True",
":",
"more_to_fetch",
"=",
"False",
"getstr",
"=",
"'/%s/hostedzone/%s/rrset'",
"%",
"(",
"R53_API_VERSION",
",",
"zone",
")",
"if",
"next_name",
"is",
"not",
"None",
":",
"getstr",
"+=",
"'?name=%s&type=%s'",
"%",
"(",
"next_name",
",",
"next_type",
")",
"if",
"next_identifier",
"is",
"not",
"None",
":",
"getstr",
"+=",
"'&identifier=%s'",
"%",
"next_identifier",
"log",
".",
"debug",
"(",
"'requesting %s'",
"%",
"getstr",
")",
"resp",
"=",
"conn",
".",
"make_request",
"(",
"'GET'",
",",
"getstr",
")",
"etree",
"=",
"lxml",
".",
"etree",
".",
"parse",
"(",
"resp",
")",
"cfg_chunks",
".",
"append",
"(",
"etree",
")",
"root",
"=",
"etree",
".",
"getroot",
"(",
")",
"truncated",
"=",
"root",
".",
"find",
"(",
"'{%s}IsTruncated'",
"%",
"R53_XMLNS",
")",
"if",
"truncated",
"is",
"not",
"None",
"and",
"truncated",
".",
"text",
"==",
"'true'",
":",
"more_to_fetch",
"=",
"True",
"next_name",
"=",
"root",
".",
"find",
"(",
"'{%s}NextRecordName'",
"%",
"R53_XMLNS",
")",
".",
"text",
"next_type",
"=",
"root",
".",
"find",
"(",
"'{%s}NextRecordType'",
"%",
"R53_XMLNS",
")",
".",
"text",
"try",
":",
"next_identifier",
"=",
"root",
".",
"find",
"(",
"'{%s}NextRecordIdentifier'",
"%",
"R53_XMLNS",
")",
".",
"text",
"except",
"AttributeError",
":",
"# may not have next_identifier",
"next_identifier",
"=",
"None",
"return",
"cfg_chunks"
] |
3c4e7242ad65b0e1ad4ba6b4ac893c7d501ceb0a
|
test
|
merge_config
|
Merge a set of fetched Route 53 config Etrees into a canonical form.
Args: cfg_chunks: [ lxml.etree.ETree ]
Returns: lxml.etree.Element
|
src/r53/r53.py
|
def merge_config(cfg_chunks):
"""Merge a set of fetched Route 53 config Etrees into a canonical form.
Args: cfg_chunks: [ lxml.etree.ETree ]
Returns: lxml.etree.Element"""
root = lxml.etree.XML('<ResourceRecordSets xmlns="%s"></ResourceRecordSets>' % R53_XMLNS, parser=XML_PARSER)
for chunk in cfg_chunks:
for rrset in chunk.iterfind('.//{%s}ResourceRecordSet' % R53_XMLNS):
root.append(rrset)
return root
|
def merge_config(cfg_chunks):
"""Merge a set of fetched Route 53 config Etrees into a canonical form.
Args: cfg_chunks: [ lxml.etree.ETree ]
Returns: lxml.etree.Element"""
root = lxml.etree.XML('<ResourceRecordSets xmlns="%s"></ResourceRecordSets>' % R53_XMLNS, parser=XML_PARSER)
for chunk in cfg_chunks:
for rrset in chunk.iterfind('.//{%s}ResourceRecordSet' % R53_XMLNS):
root.append(rrset)
return root
|
[
"Merge",
"a",
"set",
"of",
"fetched",
"Route",
"53",
"config",
"Etrees",
"into",
"a",
"canonical",
"form",
"."
] |
coops/r53
|
python
|
https://github.com/coops/r53/blob/3c4e7242ad65b0e1ad4ba6b4ac893c7d501ceb0a/src/r53/r53.py#L73-L82
|
[
"def",
"merge_config",
"(",
"cfg_chunks",
")",
":",
"root",
"=",
"lxml",
".",
"etree",
".",
"XML",
"(",
"'<ResourceRecordSets xmlns=\"%s\"></ResourceRecordSets>'",
"%",
"R53_XMLNS",
",",
"parser",
"=",
"XML_PARSER",
")",
"for",
"chunk",
"in",
"cfg_chunks",
":",
"for",
"rrset",
"in",
"chunk",
".",
"iterfind",
"(",
"'.//{%s}ResourceRecordSet'",
"%",
"R53_XMLNS",
")",
":",
"root",
".",
"append",
"(",
"rrset",
")",
"return",
"root"
] |
3c4e7242ad65b0e1ad4ba6b4ac893c7d501ceb0a
|
test
|
normalize_rrs
|
Lexically sort the order of every ResourceRecord in a ResourceRecords
element so we don't generate spurious changes: ordering of e.g. NS records
is irrelevant to the DNS line protocol, but XML sees it differently.
Also rewrite any wildcard records to use the ascii hex code: somewhere deep
inside route53 is something that used to look like tinydns, and amazon's
API will always display wildcard records as "\052.example.com".
Args: rrsest: lxml.etree.Element (<ResourceRecordSets>)
|
src/r53/r53.py
|
def normalize_rrs(rrsets):
"""Lexically sort the order of every ResourceRecord in a ResourceRecords
element so we don't generate spurious changes: ordering of e.g. NS records
is irrelevant to the DNS line protocol, but XML sees it differently.
Also rewrite any wildcard records to use the ascii hex code: somewhere deep
inside route53 is something that used to look like tinydns, and amazon's
API will always display wildcard records as "\052.example.com".
Args: rrsest: lxml.etree.Element (<ResourceRecordSets>) """
for rrset in rrsets:
if rrset.tag == '{%s}ResourceRecordSet' % R53_XMLNS:
for rrs in rrset:
# preformat wildcard records
if rrs.tag == '{%s}Name' % R53_XMLNS:
if rrs.text.startswith('*.'):
old_text = rrs.text
new_text = '\\052.%s' % old_text[2:]
print 'Found wildcard record, rewriting to %s' % new_text
rrs.text = rrs.text.replace(old_text, new_text)
# sort ResourceRecord elements by Value
if rrs.tag == '{%s}ResourceRecords' % R53_XMLNS:
# 0th value of ResourceRecord is always the Value element
sorted_rrs = sorted(rrs, key=lambda x: x[0].text)
rrs[:] = sorted_rrs
return rrsets
|
def normalize_rrs(rrsets):
"""Lexically sort the order of every ResourceRecord in a ResourceRecords
element so we don't generate spurious changes: ordering of e.g. NS records
is irrelevant to the DNS line protocol, but XML sees it differently.
Also rewrite any wildcard records to use the ascii hex code: somewhere deep
inside route53 is something that used to look like tinydns, and amazon's
API will always display wildcard records as "\052.example.com".
Args: rrsest: lxml.etree.Element (<ResourceRecordSets>) """
for rrset in rrsets:
if rrset.tag == '{%s}ResourceRecordSet' % R53_XMLNS:
for rrs in rrset:
# preformat wildcard records
if rrs.tag == '{%s}Name' % R53_XMLNS:
if rrs.text.startswith('*.'):
old_text = rrs.text
new_text = '\\052.%s' % old_text[2:]
print 'Found wildcard record, rewriting to %s' % new_text
rrs.text = rrs.text.replace(old_text, new_text)
# sort ResourceRecord elements by Value
if rrs.tag == '{%s}ResourceRecords' % R53_XMLNS:
# 0th value of ResourceRecord is always the Value element
sorted_rrs = sorted(rrs, key=lambda x: x[0].text)
rrs[:] = sorted_rrs
return rrsets
|
[
"Lexically",
"sort",
"the",
"order",
"of",
"every",
"ResourceRecord",
"in",
"a",
"ResourceRecords",
"element",
"so",
"we",
"don",
"t",
"generate",
"spurious",
"changes",
":",
"ordering",
"of",
"e",
".",
"g",
".",
"NS",
"records",
"is",
"irrelevant",
"to",
"the",
"DNS",
"line",
"protocol",
"but",
"XML",
"sees",
"it",
"differently",
"."
] |
coops/r53
|
python
|
https://github.com/coops/r53/blob/3c4e7242ad65b0e1ad4ba6b4ac893c7d501ceb0a/src/r53/r53.py#L87-L112
|
[
"def",
"normalize_rrs",
"(",
"rrsets",
")",
":",
"for",
"rrset",
"in",
"rrsets",
":",
"if",
"rrset",
".",
"tag",
"==",
"'{%s}ResourceRecordSet'",
"%",
"R53_XMLNS",
":",
"for",
"rrs",
"in",
"rrset",
":",
"# preformat wildcard records",
"if",
"rrs",
".",
"tag",
"==",
"'{%s}Name'",
"%",
"R53_XMLNS",
":",
"if",
"rrs",
".",
"text",
".",
"startswith",
"(",
"'*.'",
")",
":",
"old_text",
"=",
"rrs",
".",
"text",
"new_text",
"=",
"'\\\\052.%s'",
"%",
"old_text",
"[",
"2",
":",
"]",
"print",
"'Found wildcard record, rewriting to %s'",
"%",
"new_text",
"rrs",
".",
"text",
"=",
"rrs",
".",
"text",
".",
"replace",
"(",
"old_text",
",",
"new_text",
")",
"# sort ResourceRecord elements by Value",
"if",
"rrs",
".",
"tag",
"==",
"'{%s}ResourceRecords'",
"%",
"R53_XMLNS",
":",
"# 0th value of ResourceRecord is always the Value element",
"sorted_rrs",
"=",
"sorted",
"(",
"rrs",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
".",
"text",
")",
"rrs",
"[",
":",
"]",
"=",
"sorted_rrs",
"return",
"rrsets"
] |
3c4e7242ad65b0e1ad4ba6b4ac893c7d501ceb0a
|
test
|
generate_changeset
|
Diff two XML configs and return an object with changes to be written.
Args: old, new: lxml.etree.Element (<ResourceRecordSets>).
Returns: lxml.etree.ETree (<ChangeResourceRecordSetsRequest>) or None
|
src/r53/r53.py
|
def generate_changeset(old, new, comment=None):
"""Diff two XML configs and return an object with changes to be written.
Args: old, new: lxml.etree.Element (<ResourceRecordSets>).
Returns: lxml.etree.ETree (<ChangeResourceRecordSetsRequest>) or None"""
rrsets_tag = '{%s}ResourceRecordSets' % R53_XMLNS
if rrsets_tag not in (old.tag, new.tag):
log.error('both configs must be ResourceRecordSets tags. old: %s, new: %s' % (old.tag, new.tag))
raise InvalidArgumentException()
if comment is None:
comment = 'Generated by %s for %s@%s at %s.' % (
__file__,
os.environ['USER'],
socket.gethostname(),
time.strftime('%Y-%m-%d %H:%M:%S'))
root = lxml.etree.XML("""<ChangeResourceRecordSetsRequest xmlns="%s">
<ChangeBatch>
<Comment>%s</Comment>
<Changes/>
</ChangeBatch>
</ChangeResourceRecordSetsRequest>""" % (
R53_XMLNS, comment), parser=XML_PARSER)
changesroot = root.find('.//{%s}Changes' % R53_XMLNS)
old = normalize_rrs(old)
new = normalize_rrs(new)
oldset = set([lxml.etree.tostring(x).rstrip() for x in old])
newset = set([lxml.etree.tostring(x).rstrip() for x in new])
if oldset == newset:
return None
# look for removed elements
for rrs in old:
rrsst = lxml.etree.tostring(rrs).rstrip()
if rrsst not in newset:
log.debug("REMOVED:")
log.debug(rrsst)
change = lxml.etree.XML('<Change xmlns="%s"><Action>DELETE</Action></Change>' % R53_XMLNS, parser=XML_PARSER)
change.append(rrs)
changesroot.append(change)
# look for added elements
for rrs in new:
rrsst = lxml.etree.tostring(rrs).rstrip()
if rrsst not in oldset:
log.debug("ADDED:")
log.debug(rrsst)
change = lxml.etree.XML('<Change xmlns="%s"><Action>CREATE</Action></Change>' % R53_XMLNS, parser=XML_PARSER)
change.append(rrs)
changesroot.append(change)
return root
|
def generate_changeset(old, new, comment=None):
"""Diff two XML configs and return an object with changes to be written.
Args: old, new: lxml.etree.Element (<ResourceRecordSets>).
Returns: lxml.etree.ETree (<ChangeResourceRecordSetsRequest>) or None"""
rrsets_tag = '{%s}ResourceRecordSets' % R53_XMLNS
if rrsets_tag not in (old.tag, new.tag):
log.error('both configs must be ResourceRecordSets tags. old: %s, new: %s' % (old.tag, new.tag))
raise InvalidArgumentException()
if comment is None:
comment = 'Generated by %s for %s@%s at %s.' % (
__file__,
os.environ['USER'],
socket.gethostname(),
time.strftime('%Y-%m-%d %H:%M:%S'))
root = lxml.etree.XML("""<ChangeResourceRecordSetsRequest xmlns="%s">
<ChangeBatch>
<Comment>%s</Comment>
<Changes/>
</ChangeBatch>
</ChangeResourceRecordSetsRequest>""" % (
R53_XMLNS, comment), parser=XML_PARSER)
changesroot = root.find('.//{%s}Changes' % R53_XMLNS)
old = normalize_rrs(old)
new = normalize_rrs(new)
oldset = set([lxml.etree.tostring(x).rstrip() for x in old])
newset = set([lxml.etree.tostring(x).rstrip() for x in new])
if oldset == newset:
return None
# look for removed elements
for rrs in old:
rrsst = lxml.etree.tostring(rrs).rstrip()
if rrsst not in newset:
log.debug("REMOVED:")
log.debug(rrsst)
change = lxml.etree.XML('<Change xmlns="%s"><Action>DELETE</Action></Change>' % R53_XMLNS, parser=XML_PARSER)
change.append(rrs)
changesroot.append(change)
# look for added elements
for rrs in new:
rrsst = lxml.etree.tostring(rrs).rstrip()
if rrsst not in oldset:
log.debug("ADDED:")
log.debug(rrsst)
change = lxml.etree.XML('<Change xmlns="%s"><Action>CREATE</Action></Change>' % R53_XMLNS, parser=XML_PARSER)
change.append(rrs)
changesroot.append(change)
return root
|
[
"Diff",
"two",
"XML",
"configs",
"and",
"return",
"an",
"object",
"with",
"changes",
"to",
"be",
"written",
"."
] |
coops/r53
|
python
|
https://github.com/coops/r53/blob/3c4e7242ad65b0e1ad4ba6b4ac893c7d501ceb0a/src/r53/r53.py#L114-L161
|
[
"def",
"generate_changeset",
"(",
"old",
",",
"new",
",",
"comment",
"=",
"None",
")",
":",
"rrsets_tag",
"=",
"'{%s}ResourceRecordSets'",
"%",
"R53_XMLNS",
"if",
"rrsets_tag",
"not",
"in",
"(",
"old",
".",
"tag",
",",
"new",
".",
"tag",
")",
":",
"log",
".",
"error",
"(",
"'both configs must be ResourceRecordSets tags. old: %s, new: %s'",
"%",
"(",
"old",
".",
"tag",
",",
"new",
".",
"tag",
")",
")",
"raise",
"InvalidArgumentException",
"(",
")",
"if",
"comment",
"is",
"None",
":",
"comment",
"=",
"'Generated by %s for %s@%s at %s.'",
"%",
"(",
"__file__",
",",
"os",
".",
"environ",
"[",
"'USER'",
"]",
",",
"socket",
".",
"gethostname",
"(",
")",
",",
"time",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
")",
")",
"root",
"=",
"lxml",
".",
"etree",
".",
"XML",
"(",
"\"\"\"<ChangeResourceRecordSetsRequest xmlns=\"%s\">\n <ChangeBatch>\n <Comment>%s</Comment>\n <Changes/>\n </ChangeBatch>\n </ChangeResourceRecordSetsRequest>\"\"\"",
"%",
"(",
"R53_XMLNS",
",",
"comment",
")",
",",
"parser",
"=",
"XML_PARSER",
")",
"changesroot",
"=",
"root",
".",
"find",
"(",
"'.//{%s}Changes'",
"%",
"R53_XMLNS",
")",
"old",
"=",
"normalize_rrs",
"(",
"old",
")",
"new",
"=",
"normalize_rrs",
"(",
"new",
")",
"oldset",
"=",
"set",
"(",
"[",
"lxml",
".",
"etree",
".",
"tostring",
"(",
"x",
")",
".",
"rstrip",
"(",
")",
"for",
"x",
"in",
"old",
"]",
")",
"newset",
"=",
"set",
"(",
"[",
"lxml",
".",
"etree",
".",
"tostring",
"(",
"x",
")",
".",
"rstrip",
"(",
")",
"for",
"x",
"in",
"new",
"]",
")",
"if",
"oldset",
"==",
"newset",
":",
"return",
"None",
"# look for removed elements",
"for",
"rrs",
"in",
"old",
":",
"rrsst",
"=",
"lxml",
".",
"etree",
".",
"tostring",
"(",
"rrs",
")",
".",
"rstrip",
"(",
")",
"if",
"rrsst",
"not",
"in",
"newset",
":",
"log",
".",
"debug",
"(",
"\"REMOVED:\"",
")",
"log",
".",
"debug",
"(",
"rrsst",
")",
"change",
"=",
"lxml",
".",
"etree",
".",
"XML",
"(",
"'<Change xmlns=\"%s\"><Action>DELETE</Action></Change>'",
"%",
"R53_XMLNS",
",",
"parser",
"=",
"XML_PARSER",
")",
"change",
".",
"append",
"(",
"rrs",
")",
"changesroot",
".",
"append",
"(",
"change",
")",
"# look for added elements",
"for",
"rrs",
"in",
"new",
":",
"rrsst",
"=",
"lxml",
".",
"etree",
".",
"tostring",
"(",
"rrs",
")",
".",
"rstrip",
"(",
")",
"if",
"rrsst",
"not",
"in",
"oldset",
":",
"log",
".",
"debug",
"(",
"\"ADDED:\"",
")",
"log",
".",
"debug",
"(",
"rrsst",
")",
"change",
"=",
"lxml",
".",
"etree",
".",
"XML",
"(",
"'<Change xmlns=\"%s\"><Action>CREATE</Action></Change>'",
"%",
"R53_XMLNS",
",",
"parser",
"=",
"XML_PARSER",
")",
"change",
".",
"append",
"(",
"rrs",
")",
"changesroot",
".",
"append",
"(",
"change",
")",
"return",
"root"
] |
3c4e7242ad65b0e1ad4ba6b4ac893c7d501ceb0a
|
test
|
validate_changeset
|
Validate a changeset is compatible with Amazon's API spec.
Args: changeset: lxml.etree.Element (<ChangeResourceRecordSetsRequest>)
Returns: [ errors ] list of error strings or [].
|
src/r53/r53.py
|
def validate_changeset(changeset):
"""Validate a changeset is compatible with Amazon's API spec.
Args: changeset: lxml.etree.Element (<ChangeResourceRecordSetsRequest>)
Returns: [ errors ] list of error strings or []."""
errors = []
changes = changeset.findall('.//{%s}Change' % R53_XMLNS)
num_changes = len(changes)
if num_changes == 0:
errors.append('changeset must have at least one <Change> element')
if num_changes > 100:
errors.append('changeset has %d <Change> elements: max is 100' % num_changes)
rrs = changeset.findall('.//{%s}ResourceRecord' % R53_XMLNS)
num_rrs = len(rrs)
if num_rrs > 1000:
errors.append('changeset has %d ResourceRecord elements: max is 1000' % num_rrs)
values = changeset.findall('.//{%s}Value' % R53_XMLNS)
num_chars = 0
for value in values:
num_chars += len(value.text)
if num_chars > 10000:
errors.append('changeset has %d chars in <Value> text: max is 10000' % num_chars)
return errors
|
def validate_changeset(changeset):
"""Validate a changeset is compatible with Amazon's API spec.
Args: changeset: lxml.etree.Element (<ChangeResourceRecordSetsRequest>)
Returns: [ errors ] list of error strings or []."""
errors = []
changes = changeset.findall('.//{%s}Change' % R53_XMLNS)
num_changes = len(changes)
if num_changes == 0:
errors.append('changeset must have at least one <Change> element')
if num_changes > 100:
errors.append('changeset has %d <Change> elements: max is 100' % num_changes)
rrs = changeset.findall('.//{%s}ResourceRecord' % R53_XMLNS)
num_rrs = len(rrs)
if num_rrs > 1000:
errors.append('changeset has %d ResourceRecord elements: max is 1000' % num_rrs)
values = changeset.findall('.//{%s}Value' % R53_XMLNS)
num_chars = 0
for value in values:
num_chars += len(value.text)
if num_chars > 10000:
errors.append('changeset has %d chars in <Value> text: max is 10000' % num_chars)
return errors
|
[
"Validate",
"a",
"changeset",
"is",
"compatible",
"with",
"Amazon",
"s",
"API",
"spec",
"."
] |
coops/r53
|
python
|
https://github.com/coops/r53/blob/3c4e7242ad65b0e1ad4ba6b4ac893c7d501ceb0a/src/r53/r53.py#L163-L185
|
[
"def",
"validate_changeset",
"(",
"changeset",
")",
":",
"errors",
"=",
"[",
"]",
"changes",
"=",
"changeset",
".",
"findall",
"(",
"'.//{%s}Change'",
"%",
"R53_XMLNS",
")",
"num_changes",
"=",
"len",
"(",
"changes",
")",
"if",
"num_changes",
"==",
"0",
":",
"errors",
".",
"append",
"(",
"'changeset must have at least one <Change> element'",
")",
"if",
"num_changes",
">",
"100",
":",
"errors",
".",
"append",
"(",
"'changeset has %d <Change> elements: max is 100'",
"%",
"num_changes",
")",
"rrs",
"=",
"changeset",
".",
"findall",
"(",
"'.//{%s}ResourceRecord'",
"%",
"R53_XMLNS",
")",
"num_rrs",
"=",
"len",
"(",
"rrs",
")",
"if",
"num_rrs",
">",
"1000",
":",
"errors",
".",
"append",
"(",
"'changeset has %d ResourceRecord elements: max is 1000'",
"%",
"num_rrs",
")",
"values",
"=",
"changeset",
".",
"findall",
"(",
"'.//{%s}Value'",
"%",
"R53_XMLNS",
")",
"num_chars",
"=",
"0",
"for",
"value",
"in",
"values",
":",
"num_chars",
"+=",
"len",
"(",
"value",
".",
"text",
")",
"if",
"num_chars",
">",
"10000",
":",
"errors",
".",
"append",
"(",
"'changeset has %d chars in <Value> text: max is 10000'",
"%",
"num_chars",
")",
"return",
"errors"
] |
3c4e7242ad65b0e1ad4ba6b4ac893c7d501ceb0a
|
test
|
minimize_best_n
|
Orders population members from lowest fitness to highest fitness
Args:
Members (list): list of PyGenetics Member objects
Returns:
lsit: ordered lsit of Members, from highest fitness to lowest fitness
|
pygenetics/selection_functions.py
|
def minimize_best_n(Members):
'''
Orders population members from lowest fitness to highest fitness
Args:
Members (list): list of PyGenetics Member objects
Returns:
lsit: ordered lsit of Members, from highest fitness to lowest fitness
'''
return(list(reversed(sorted(
Members, key=lambda Member: Member.fitness_score
))))
|
def minimize_best_n(Members):
'''
Orders population members from lowest fitness to highest fitness
Args:
Members (list): list of PyGenetics Member objects
Returns:
lsit: ordered lsit of Members, from highest fitness to lowest fitness
'''
return(list(reversed(sorted(
Members, key=lambda Member: Member.fitness_score
))))
|
[
"Orders",
"population",
"members",
"from",
"lowest",
"fitness",
"to",
"highest",
"fitness"
] |
tjkessler/PyGenetics
|
python
|
https://github.com/tjkessler/PyGenetics/blob/b78ee6393605d6e85d2279fb05f3983f5833df40/pygenetics/selection_functions.py#L10-L23
|
[
"def",
"minimize_best_n",
"(",
"Members",
")",
":",
"return",
"(",
"list",
"(",
"reversed",
"(",
"sorted",
"(",
"Members",
",",
"key",
"=",
"lambda",
"Member",
":",
"Member",
".",
"fitness_score",
")",
")",
")",
")"
] |
b78ee6393605d6e85d2279fb05f3983f5833df40
|
test
|
Population.fitness
|
Population fitness == average member fitness score
|
pygenetics/ga_core.py
|
def fitness(self):
'''Population fitness == average member fitness score'''
if len(self.__members) != 0:
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
return sum(m.fitness_score for m in members) / len(members)
else:
return None
|
def fitness(self):
'''Population fitness == average member fitness score'''
if len(self.__members) != 0:
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
return sum(m.fitness_score for m in members) / len(members)
else:
return None
|
[
"Population",
"fitness",
"==",
"average",
"member",
"fitness",
"score"
] |
tjkessler/PyGenetics
|
python
|
https://github.com/tjkessler/PyGenetics/blob/b78ee6393605d6e85d2279fb05f3983f5833df40/pygenetics/ga_core.py#L126-L136
|
[
"def",
"fitness",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"__members",
")",
"!=",
"0",
":",
"if",
"self",
".",
"__num_processes",
">",
"1",
":",
"members",
"=",
"[",
"m",
".",
"get",
"(",
")",
"for",
"m",
"in",
"self",
".",
"__members",
"]",
"else",
":",
"members",
"=",
"self",
".",
"__members",
"return",
"sum",
"(",
"m",
".",
"fitness_score",
"for",
"m",
"in",
"members",
")",
"/",
"len",
"(",
"members",
")",
"else",
":",
"return",
"None"
] |
b78ee6393605d6e85d2279fb05f3983f5833df40
|
test
|
Population.ave_cost_fn_val
|
Returns average cost function return value for all members
|
pygenetics/ga_core.py
|
def ave_cost_fn_val(self):
'''Returns average cost function return value for all members'''
if len(self.__members) != 0:
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
return sum(m.cost_fn_val for m in members) / len(members)
else:
return None
|
def ave_cost_fn_val(self):
'''Returns average cost function return value for all members'''
if len(self.__members) != 0:
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
return sum(m.cost_fn_val for m in members) / len(members)
else:
return None
|
[
"Returns",
"average",
"cost",
"function",
"return",
"value",
"for",
"all",
"members"
] |
tjkessler/PyGenetics
|
python
|
https://github.com/tjkessler/PyGenetics/blob/b78ee6393605d6e85d2279fb05f3983f5833df40/pygenetics/ga_core.py#L145-L155
|
[
"def",
"ave_cost_fn_val",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"__members",
")",
"!=",
"0",
":",
"if",
"self",
".",
"__num_processes",
">",
"1",
":",
"members",
"=",
"[",
"m",
".",
"get",
"(",
")",
"for",
"m",
"in",
"self",
".",
"__members",
"]",
"else",
":",
"members",
"=",
"self",
".",
"__members",
"return",
"sum",
"(",
"m",
".",
"cost_fn_val",
"for",
"m",
"in",
"members",
")",
"/",
"len",
"(",
"members",
")",
"else",
":",
"return",
"None"
] |
b78ee6393605d6e85d2279fb05f3983f5833df40
|
test
|
Population.med_cost_fn_val
|
Returns median cost function return value for all members
|
pygenetics/ga_core.py
|
def med_cost_fn_val(self):
'''Returns median cost function return value for all members'''
if len(self.__members) != 0:
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
return median([m.cost_fn_val for m in members])
else:
return None
|
def med_cost_fn_val(self):
'''Returns median cost function return value for all members'''
if len(self.__members) != 0:
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
return median([m.cost_fn_val for m in members])
else:
return None
|
[
"Returns",
"median",
"cost",
"function",
"return",
"value",
"for",
"all",
"members"
] |
tjkessler/PyGenetics
|
python
|
https://github.com/tjkessler/PyGenetics/blob/b78ee6393605d6e85d2279fb05f3983f5833df40/pygenetics/ga_core.py#L158-L168
|
[
"def",
"med_cost_fn_val",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"__members",
")",
"!=",
"0",
":",
"if",
"self",
".",
"__num_processes",
">",
"1",
":",
"members",
"=",
"[",
"m",
".",
"get",
"(",
")",
"for",
"m",
"in",
"self",
".",
"__members",
"]",
"else",
":",
"members",
"=",
"self",
".",
"__members",
"return",
"median",
"(",
"[",
"m",
".",
"cost_fn_val",
"for",
"m",
"in",
"members",
"]",
")",
"else",
":",
"return",
"None"
] |
b78ee6393605d6e85d2279fb05f3983f5833df40
|
test
|
Population.parameters
|
Population parameter vals == average member parameter vals
|
pygenetics/ga_core.py
|
def parameters(self):
'''Population parameter vals == average member parameter vals'''
if len(self.__members) != 0:
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
params = {}
for p in self.__parameters:
params[p.name] = sum(
m.parameters[p.name] for m in members
) / len(members)
return params
else:
return None
|
def parameters(self):
'''Population parameter vals == average member parameter vals'''
if len(self.__members) != 0:
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
params = {}
for p in self.__parameters:
params[p.name] = sum(
m.parameters[p.name] for m in members
) / len(members)
return params
else:
return None
|
[
"Population",
"parameter",
"vals",
"==",
"average",
"member",
"parameter",
"vals"
] |
tjkessler/PyGenetics
|
python
|
https://github.com/tjkessler/PyGenetics/blob/b78ee6393605d6e85d2279fb05f3983f5833df40/pygenetics/ga_core.py#L176-L191
|
[
"def",
"parameters",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"__members",
")",
"!=",
"0",
":",
"if",
"self",
".",
"__num_processes",
">",
"1",
":",
"members",
"=",
"[",
"m",
".",
"get",
"(",
")",
"for",
"m",
"in",
"self",
".",
"__members",
"]",
"else",
":",
"members",
"=",
"self",
".",
"__members",
"params",
"=",
"{",
"}",
"for",
"p",
"in",
"self",
".",
"__parameters",
":",
"params",
"[",
"p",
".",
"name",
"]",
"=",
"sum",
"(",
"m",
".",
"parameters",
"[",
"p",
".",
"name",
"]",
"for",
"m",
"in",
"members",
")",
"/",
"len",
"(",
"members",
")",
"return",
"params",
"else",
":",
"return",
"None"
] |
b78ee6393605d6e85d2279fb05f3983f5833df40
|
test
|
Population.members
|
Returns Member objects of population
|
pygenetics/ga_core.py
|
def members(self):
'''Returns Member objects of population'''
if self.__num_processes > 1:
return [m.get() for m in self.__members]
else:
return self.__members
|
def members(self):
'''Returns Member objects of population'''
if self.__num_processes > 1:
return [m.get() for m in self.__members]
else:
return self.__members
|
[
"Returns",
"Member",
"objects",
"of",
"population"
] |
tjkessler/PyGenetics
|
python
|
https://github.com/tjkessler/PyGenetics/blob/b78ee6393605d6e85d2279fb05f3983f5833df40/pygenetics/ga_core.py#L200-L206
|
[
"def",
"members",
"(",
"self",
")",
":",
"if",
"self",
".",
"__num_processes",
">",
"1",
":",
"return",
"[",
"m",
".",
"get",
"(",
")",
"for",
"m",
"in",
"self",
".",
"__members",
"]",
"else",
":",
"return",
"self",
".",
"__members"
] |
b78ee6393605d6e85d2279fb05f3983f5833df40
|
test
|
Population.add_parameter
|
Adds a paramber to the Population
Args:
name (str): name of the parameter
min_val (int or float): minimum value for the parameter
max_val (int or float): maximum value for the parameter
|
pygenetics/ga_core.py
|
def add_parameter(self, name, min_val, max_val):
'''Adds a paramber to the Population
Args:
name (str): name of the parameter
min_val (int or float): minimum value for the parameter
max_val (int or float): maximum value for the parameter
'''
self.__parameters.append(Parameter(name, min_val, max_val))
|
def add_parameter(self, name, min_val, max_val):
'''Adds a paramber to the Population
Args:
name (str): name of the parameter
min_val (int or float): minimum value for the parameter
max_val (int or float): maximum value for the parameter
'''
self.__parameters.append(Parameter(name, min_val, max_val))
|
[
"Adds",
"a",
"paramber",
"to",
"the",
"Population"
] |
tjkessler/PyGenetics
|
python
|
https://github.com/tjkessler/PyGenetics/blob/b78ee6393605d6e85d2279fb05f3983f5833df40/pygenetics/ga_core.py#L208-L217
|
[
"def",
"add_parameter",
"(",
"self",
",",
"name",
",",
"min_val",
",",
"max_val",
")",
":",
"self",
".",
"__parameters",
".",
"append",
"(",
"Parameter",
"(",
"name",
",",
"min_val",
",",
"max_val",
")",
")"
] |
b78ee6393605d6e85d2279fb05f3983f5833df40
|
test
|
Population.generate_population
|
Generates self.__pop_size Members with randomly initialized values
for each parameter added with add_parameter(), evaluates their fitness
|
pygenetics/ga_core.py
|
def generate_population(self):
'''Generates self.__pop_size Members with randomly initialized values
for each parameter added with add_parameter(), evaluates their fitness
'''
if self.__num_processes > 1:
process_pool = Pool(processes=self.__num_processes)
self.__members = []
for _ in range(self.__pop_size):
feed_dict = {}
for param in self.__parameters:
feed_dict[param.name] = self.__random_param_val(
param.min_val,
param.max_val,
param.dtype
)
if self.__num_processes > 1:
self.__members.append(process_pool.apply_async(
self._start_process,
[self.__cost_fn, feed_dict, self.__cost_fn_args])
)
else:
self.__members.append(
Member(
feed_dict,
self.__cost_fn(feed_dict, self.__cost_fn_args)
)
)
if self.__num_processes > 1:
process_pool.close()
process_pool.join()
self.__determine_best_member()
|
def generate_population(self):
'''Generates self.__pop_size Members with randomly initialized values
for each parameter added with add_parameter(), evaluates their fitness
'''
if self.__num_processes > 1:
process_pool = Pool(processes=self.__num_processes)
self.__members = []
for _ in range(self.__pop_size):
feed_dict = {}
for param in self.__parameters:
feed_dict[param.name] = self.__random_param_val(
param.min_val,
param.max_val,
param.dtype
)
if self.__num_processes > 1:
self.__members.append(process_pool.apply_async(
self._start_process,
[self.__cost_fn, feed_dict, self.__cost_fn_args])
)
else:
self.__members.append(
Member(
feed_dict,
self.__cost_fn(feed_dict, self.__cost_fn_args)
)
)
if self.__num_processes > 1:
process_pool.close()
process_pool.join()
self.__determine_best_member()
|
[
"Generates",
"self",
".",
"__pop_size",
"Members",
"with",
"randomly",
"initialized",
"values",
"for",
"each",
"parameter",
"added",
"with",
"add_parameter",
"()",
"evaluates",
"their",
"fitness"
] |
tjkessler/PyGenetics
|
python
|
https://github.com/tjkessler/PyGenetics/blob/b78ee6393605d6e85d2279fb05f3983f5833df40/pygenetics/ga_core.py#L219-L253
|
[
"def",
"generate_population",
"(",
"self",
")",
":",
"if",
"self",
".",
"__num_processes",
">",
"1",
":",
"process_pool",
"=",
"Pool",
"(",
"processes",
"=",
"self",
".",
"__num_processes",
")",
"self",
".",
"__members",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"__pop_size",
")",
":",
"feed_dict",
"=",
"{",
"}",
"for",
"param",
"in",
"self",
".",
"__parameters",
":",
"feed_dict",
"[",
"param",
".",
"name",
"]",
"=",
"self",
".",
"__random_param_val",
"(",
"param",
".",
"min_val",
",",
"param",
".",
"max_val",
",",
"param",
".",
"dtype",
")",
"if",
"self",
".",
"__num_processes",
">",
"1",
":",
"self",
".",
"__members",
".",
"append",
"(",
"process_pool",
".",
"apply_async",
"(",
"self",
".",
"_start_process",
",",
"[",
"self",
".",
"__cost_fn",
",",
"feed_dict",
",",
"self",
".",
"__cost_fn_args",
"]",
")",
")",
"else",
":",
"self",
".",
"__members",
".",
"append",
"(",
"Member",
"(",
"feed_dict",
",",
"self",
".",
"__cost_fn",
"(",
"feed_dict",
",",
"self",
".",
"__cost_fn_args",
")",
")",
")",
"if",
"self",
".",
"__num_processes",
">",
"1",
":",
"process_pool",
".",
"close",
"(",
")",
"process_pool",
".",
"join",
"(",
")",
"self",
".",
"__determine_best_member",
"(",
")"
] |
b78ee6393605d6e85d2279fb05f3983f5833df40
|
test
|
Population.next_generation
|
Generates the next population from a previously evaluated generation
Args:
mut_rate (float): mutation rate for new members (0.0 - 1.0)
max_mut_amt (float): how much the member is allowed to mutate
(0.0 - 1.0, proportion change of mutated parameter)
log_base (int): the higher this number, the more likely the first
Members (chosen with supplied selection function) are chosen
as parents for the next generation
|
pygenetics/ga_core.py
|
def next_generation(self, mut_rate=0, max_mut_amt=0, log_base=10):
'''Generates the next population from a previously evaluated generation
Args:
mut_rate (float): mutation rate for new members (0.0 - 1.0)
max_mut_amt (float): how much the member is allowed to mutate
(0.0 - 1.0, proportion change of mutated parameter)
log_base (int): the higher this number, the more likely the first
Members (chosen with supplied selection function) are chosen
as parents for the next generation
'''
if self.__num_processes > 1:
process_pool = Pool(processes=self.__num_processes)
members = [m.get() for m in self.__members]
else:
members = self.__members
if len(members) == 0:
raise Exception(
'Generation 0 not found: use generate_population() first'
)
selected_members = self.__select_fn(members)
reproduction_probs = list(reversed(logspace(0.0, 1.0,
num=len(selected_members), base=log_base)))
reproduction_probs = reproduction_probs / sum(reproduction_probs)
self.__members = []
for _ in range(self.__pop_size):
parent_1 = nrandom.choice(selected_members, p=reproduction_probs)
parent_2 = nrandom.choice(selected_members, p=reproduction_probs)
feed_dict = {}
for param in self.__parameters:
which_parent = uniform(0, 1)
if which_parent < 0.5:
feed_dict[param.name] = parent_1.parameters[param.name]
else:
feed_dict[param.name] = parent_2.parameters[param.name]
feed_dict[param.name] = self.__mutate_parameter(
feed_dict[param.name], param, mut_rate, max_mut_amt
)
if self.__num_processes > 1:
self.__members.append(process_pool.apply_async(
self._start_process,
[self.__cost_fn, feed_dict, self.__cost_fn_args])
)
else:
self.__members.append(
Member(
feed_dict,
self.__cost_fn(feed_dict, self.__cost_fn_args)
)
)
if self.__num_processes > 1:
process_pool.close()
process_pool.join()
self.__determine_best_member()
|
def next_generation(self, mut_rate=0, max_mut_amt=0, log_base=10):
'''Generates the next population from a previously evaluated generation
Args:
mut_rate (float): mutation rate for new members (0.0 - 1.0)
max_mut_amt (float): how much the member is allowed to mutate
(0.0 - 1.0, proportion change of mutated parameter)
log_base (int): the higher this number, the more likely the first
Members (chosen with supplied selection function) are chosen
as parents for the next generation
'''
if self.__num_processes > 1:
process_pool = Pool(processes=self.__num_processes)
members = [m.get() for m in self.__members]
else:
members = self.__members
if len(members) == 0:
raise Exception(
'Generation 0 not found: use generate_population() first'
)
selected_members = self.__select_fn(members)
reproduction_probs = list(reversed(logspace(0.0, 1.0,
num=len(selected_members), base=log_base)))
reproduction_probs = reproduction_probs / sum(reproduction_probs)
self.__members = []
for _ in range(self.__pop_size):
parent_1 = nrandom.choice(selected_members, p=reproduction_probs)
parent_2 = nrandom.choice(selected_members, p=reproduction_probs)
feed_dict = {}
for param in self.__parameters:
which_parent = uniform(0, 1)
if which_parent < 0.5:
feed_dict[param.name] = parent_1.parameters[param.name]
else:
feed_dict[param.name] = parent_2.parameters[param.name]
feed_dict[param.name] = self.__mutate_parameter(
feed_dict[param.name], param, mut_rate, max_mut_amt
)
if self.__num_processes > 1:
self.__members.append(process_pool.apply_async(
self._start_process,
[self.__cost_fn, feed_dict, self.__cost_fn_args])
)
else:
self.__members.append(
Member(
feed_dict,
self.__cost_fn(feed_dict, self.__cost_fn_args)
)
)
if self.__num_processes > 1:
process_pool.close()
process_pool.join()
self.__determine_best_member()
|
[
"Generates",
"the",
"next",
"population",
"from",
"a",
"previously",
"evaluated",
"generation"
] |
tjkessler/PyGenetics
|
python
|
https://github.com/tjkessler/PyGenetics/blob/b78ee6393605d6e85d2279fb05f3983f5833df40/pygenetics/ga_core.py#L255-L317
|
[
"def",
"next_generation",
"(",
"self",
",",
"mut_rate",
"=",
"0",
",",
"max_mut_amt",
"=",
"0",
",",
"log_base",
"=",
"10",
")",
":",
"if",
"self",
".",
"__num_processes",
">",
"1",
":",
"process_pool",
"=",
"Pool",
"(",
"processes",
"=",
"self",
".",
"__num_processes",
")",
"members",
"=",
"[",
"m",
".",
"get",
"(",
")",
"for",
"m",
"in",
"self",
".",
"__members",
"]",
"else",
":",
"members",
"=",
"self",
".",
"__members",
"if",
"len",
"(",
"members",
")",
"==",
"0",
":",
"raise",
"Exception",
"(",
"'Generation 0 not found: use generate_population() first'",
")",
"selected_members",
"=",
"self",
".",
"__select_fn",
"(",
"members",
")",
"reproduction_probs",
"=",
"list",
"(",
"reversed",
"(",
"logspace",
"(",
"0.0",
",",
"1.0",
",",
"num",
"=",
"len",
"(",
"selected_members",
")",
",",
"base",
"=",
"log_base",
")",
")",
")",
"reproduction_probs",
"=",
"reproduction_probs",
"/",
"sum",
"(",
"reproduction_probs",
")",
"self",
".",
"__members",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"__pop_size",
")",
":",
"parent_1",
"=",
"nrandom",
".",
"choice",
"(",
"selected_members",
",",
"p",
"=",
"reproduction_probs",
")",
"parent_2",
"=",
"nrandom",
".",
"choice",
"(",
"selected_members",
",",
"p",
"=",
"reproduction_probs",
")",
"feed_dict",
"=",
"{",
"}",
"for",
"param",
"in",
"self",
".",
"__parameters",
":",
"which_parent",
"=",
"uniform",
"(",
"0",
",",
"1",
")",
"if",
"which_parent",
"<",
"0.5",
":",
"feed_dict",
"[",
"param",
".",
"name",
"]",
"=",
"parent_1",
".",
"parameters",
"[",
"param",
".",
"name",
"]",
"else",
":",
"feed_dict",
"[",
"param",
".",
"name",
"]",
"=",
"parent_2",
".",
"parameters",
"[",
"param",
".",
"name",
"]",
"feed_dict",
"[",
"param",
".",
"name",
"]",
"=",
"self",
".",
"__mutate_parameter",
"(",
"feed_dict",
"[",
"param",
".",
"name",
"]",
",",
"param",
",",
"mut_rate",
",",
"max_mut_amt",
")",
"if",
"self",
".",
"__num_processes",
">",
"1",
":",
"self",
".",
"__members",
".",
"append",
"(",
"process_pool",
".",
"apply_async",
"(",
"self",
".",
"_start_process",
",",
"[",
"self",
".",
"__cost_fn",
",",
"feed_dict",
",",
"self",
".",
"__cost_fn_args",
"]",
")",
")",
"else",
":",
"self",
".",
"__members",
".",
"append",
"(",
"Member",
"(",
"feed_dict",
",",
"self",
".",
"__cost_fn",
"(",
"feed_dict",
",",
"self",
".",
"__cost_fn_args",
")",
")",
")",
"if",
"self",
".",
"__num_processes",
">",
"1",
":",
"process_pool",
".",
"close",
"(",
")",
"process_pool",
".",
"join",
"(",
")",
"self",
".",
"__determine_best_member",
"(",
")"
] |
b78ee6393605d6e85d2279fb05f3983f5833df40
|
test
|
Population.__mutate_parameter
|
Private, static method: mutates parameter
Args:
value (int or float): current value for Member's parameter
param (Parameter): parameter object
mut_rate (float): mutation rate of the value
max_mut_amt (float): maximum mutation amount of the value
Returns:
int or float: mutated value
|
pygenetics/ga_core.py
|
def __mutate_parameter(value, param, mut_rate, max_mut_amt):
'''Private, static method: mutates parameter
Args:
value (int or float): current value for Member's parameter
param (Parameter): parameter object
mut_rate (float): mutation rate of the value
max_mut_amt (float): maximum mutation amount of the value
Returns:
int or float: mutated value
'''
if uniform(0, 1) < mut_rate:
mut_amt = uniform(0, max_mut_amt)
op = choice((add, sub))
new_val = op(value, param.dtype(
(param.max_val - param.min_val) * mut_amt
))
if new_val > param.max_val:
return param.max_val
elif new_val < param.min_val:
return param.min_val
else:
return new_val
else:
return value
|
def __mutate_parameter(value, param, mut_rate, max_mut_amt):
'''Private, static method: mutates parameter
Args:
value (int or float): current value for Member's parameter
param (Parameter): parameter object
mut_rate (float): mutation rate of the value
max_mut_amt (float): maximum mutation amount of the value
Returns:
int or float: mutated value
'''
if uniform(0, 1) < mut_rate:
mut_amt = uniform(0, max_mut_amt)
op = choice((add, sub))
new_val = op(value, param.dtype(
(param.max_val - param.min_val) * mut_amt
))
if new_val > param.max_val:
return param.max_val
elif new_val < param.min_val:
return param.min_val
else:
return new_val
else:
return value
|
[
"Private",
"static",
"method",
":",
"mutates",
"parameter"
] |
tjkessler/PyGenetics
|
python
|
https://github.com/tjkessler/PyGenetics/blob/b78ee6393605d6e85d2279fb05f3983f5833df40/pygenetics/ga_core.py#L351-L377
|
[
"def",
"__mutate_parameter",
"(",
"value",
",",
"param",
",",
"mut_rate",
",",
"max_mut_amt",
")",
":",
"if",
"uniform",
"(",
"0",
",",
"1",
")",
"<",
"mut_rate",
":",
"mut_amt",
"=",
"uniform",
"(",
"0",
",",
"max_mut_amt",
")",
"op",
"=",
"choice",
"(",
"(",
"add",
",",
"sub",
")",
")",
"new_val",
"=",
"op",
"(",
"value",
",",
"param",
".",
"dtype",
"(",
"(",
"param",
".",
"max_val",
"-",
"param",
".",
"min_val",
")",
"*",
"mut_amt",
")",
")",
"if",
"new_val",
">",
"param",
".",
"max_val",
":",
"return",
"param",
".",
"max_val",
"elif",
"new_val",
"<",
"param",
".",
"min_val",
":",
"return",
"param",
".",
"min_val",
"else",
":",
"return",
"new_val",
"else",
":",
"return",
"value"
] |
b78ee6393605d6e85d2279fb05f3983f5833df40
|
test
|
Population.__determine_best_member
|
Private method: determines if any current population members have a
fitness score better than the current best
|
pygenetics/ga_core.py
|
def __determine_best_member(self):
'''Private method: determines if any current population members have a
fitness score better than the current best
'''
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
if self.__best_fitness is None:
self.__best_fitness = members[0].fitness_score
self.__best_cost_fn_val = members[0].cost_fn_val
self.__best_parameters = {}
for p in self.__parameters:
self.__best_parameters[p.name] = members[0].parameters[p.name]
for m_id, member in enumerate(members):
if member.fitness_score > self.__best_fitness:
self.__best_fitness = member.fitness_score
self.__best_cost_fn_val = member.cost_fn_val
self.__best_parameters = {}
for p in self.__parameters:
self.__best_parameters[p.name] = member.parameters[p.name]
|
def __determine_best_member(self):
'''Private method: determines if any current population members have a
fitness score better than the current best
'''
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
if self.__best_fitness is None:
self.__best_fitness = members[0].fitness_score
self.__best_cost_fn_val = members[0].cost_fn_val
self.__best_parameters = {}
for p in self.__parameters:
self.__best_parameters[p.name] = members[0].parameters[p.name]
for m_id, member in enumerate(members):
if member.fitness_score > self.__best_fitness:
self.__best_fitness = member.fitness_score
self.__best_cost_fn_val = member.cost_fn_val
self.__best_parameters = {}
for p in self.__parameters:
self.__best_parameters[p.name] = member.parameters[p.name]
|
[
"Private",
"method",
":",
"determines",
"if",
"any",
"current",
"population",
"members",
"have",
"a",
"fitness",
"score",
"better",
"than",
"the",
"current",
"best"
] |
tjkessler/PyGenetics
|
python
|
https://github.com/tjkessler/PyGenetics/blob/b78ee6393605d6e85d2279fb05f3983f5833df40/pygenetics/ga_core.py#L379-L402
|
[
"def",
"__determine_best_member",
"(",
"self",
")",
":",
"if",
"self",
".",
"__num_processes",
">",
"1",
":",
"members",
"=",
"[",
"m",
".",
"get",
"(",
")",
"for",
"m",
"in",
"self",
".",
"__members",
"]",
"else",
":",
"members",
"=",
"self",
".",
"__members",
"if",
"self",
".",
"__best_fitness",
"is",
"None",
":",
"self",
".",
"__best_fitness",
"=",
"members",
"[",
"0",
"]",
".",
"fitness_score",
"self",
".",
"__best_cost_fn_val",
"=",
"members",
"[",
"0",
"]",
".",
"cost_fn_val",
"self",
".",
"__best_parameters",
"=",
"{",
"}",
"for",
"p",
"in",
"self",
".",
"__parameters",
":",
"self",
".",
"__best_parameters",
"[",
"p",
".",
"name",
"]",
"=",
"members",
"[",
"0",
"]",
".",
"parameters",
"[",
"p",
".",
"name",
"]",
"for",
"m_id",
",",
"member",
"in",
"enumerate",
"(",
"members",
")",
":",
"if",
"member",
".",
"fitness_score",
">",
"self",
".",
"__best_fitness",
":",
"self",
".",
"__best_fitness",
"=",
"member",
".",
"fitness_score",
"self",
".",
"__best_cost_fn_val",
"=",
"member",
".",
"cost_fn_val",
"self",
".",
"__best_parameters",
"=",
"{",
"}",
"for",
"p",
"in",
"self",
".",
"__parameters",
":",
"self",
".",
"__best_parameters",
"[",
"p",
".",
"name",
"]",
"=",
"member",
".",
"parameters",
"[",
"p",
".",
"name",
"]"
] |
b78ee6393605d6e85d2279fb05f3983f5833df40
|
test
|
ConfigOptionParser.update_defaults
|
Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists).
|
capybara/virtualenv/lib/python2.7/site-packages/pip/baseparser.py
|
def update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
for section in ('global', self.name):
config.update(
self.normalize_keys(self.get_config_section(section))
)
# 2. environmental variables
if not self.isolated:
config.update(self.normalize_keys(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
if option.action == 'append':
val = val.split()
val = [self.check_default(option, key, v) for v in val]
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
return defaults
|
def update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
for section in ('global', self.name):
config.update(
self.normalize_keys(self.get_config_section(section))
)
# 2. environmental variables
if not self.isolated:
config.update(self.normalize_keys(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
if option.action == 'append':
val = val.split()
val = [self.check_default(option, key, v) for v in val]
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
return defaults
|
[
"Updates",
"the",
"given",
"defaults",
"with",
"values",
"from",
"the",
"config",
"files",
"and",
"the",
"environ",
".",
"Does",
"a",
"little",
"special",
"handling",
"for",
"certain",
"types",
"of",
"options",
"(",
"lists",
")",
"."
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/pip/baseparser.py#L196-L226
|
[
"def",
"update_defaults",
"(",
"self",
",",
"defaults",
")",
":",
"# Then go and look for the other sources of configuration:",
"config",
"=",
"{",
"}",
"# 1. config files",
"for",
"section",
"in",
"(",
"'global'",
",",
"self",
".",
"name",
")",
":",
"config",
".",
"update",
"(",
"self",
".",
"normalize_keys",
"(",
"self",
".",
"get_config_section",
"(",
"section",
")",
")",
")",
"# 2. environmental variables",
"if",
"not",
"self",
".",
"isolated",
":",
"config",
".",
"update",
"(",
"self",
".",
"normalize_keys",
"(",
"self",
".",
"get_environ_vars",
"(",
")",
")",
")",
"# Then set the options with those values",
"for",
"key",
",",
"val",
"in",
"config",
".",
"items",
"(",
")",
":",
"option",
"=",
"self",
".",
"get_option",
"(",
"key",
")",
"if",
"option",
"is",
"not",
"None",
":",
"# ignore empty values",
"if",
"not",
"val",
":",
"continue",
"if",
"option",
".",
"action",
"in",
"(",
"'store_true'",
",",
"'store_false'",
",",
"'count'",
")",
":",
"val",
"=",
"strtobool",
"(",
"val",
")",
"if",
"option",
".",
"action",
"==",
"'append'",
":",
"val",
"=",
"val",
".",
"split",
"(",
")",
"val",
"=",
"[",
"self",
".",
"check_default",
"(",
"option",
",",
"key",
",",
"v",
")",
"for",
"v",
"in",
"val",
"]",
"else",
":",
"val",
"=",
"self",
".",
"check_default",
"(",
"option",
",",
"key",
",",
"val",
")",
"defaults",
"[",
"option",
".",
"dest",
"]",
"=",
"val",
"return",
"defaults"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
ConfigOptionParser.normalize_keys
|
Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files
|
capybara/virtualenv/lib/python2.7/site-packages/pip/baseparser.py
|
def normalize_keys(self, items):
"""Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files"""
normalized = {}
for key, val in items:
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
normalized[key] = val
return normalized
|
def normalize_keys(self, items):
"""Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files"""
normalized = {}
for key, val in items:
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
normalized[key] = val
return normalized
|
[
"Return",
"a",
"config",
"dictionary",
"with",
"normalized",
"keys",
"regardless",
"of",
"whether",
"the",
"keys",
"were",
"specified",
"in",
"environment",
"variables",
"or",
"in",
"config",
"files"
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/pip/baseparser.py#L228-L238
|
[
"def",
"normalize_keys",
"(",
"self",
",",
"items",
")",
":",
"normalized",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"items",
":",
"key",
"=",
"key",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
"if",
"not",
"key",
".",
"startswith",
"(",
"'--'",
")",
":",
"key",
"=",
"'--%s'",
"%",
"key",
"# only prefer long opts",
"normalized",
"[",
"key",
"]",
"=",
"val",
"return",
"normalized"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
ConfigOptionParser.get_environ_vars
|
Returns a generator with all environmental vars with prefix PIP_
|
capybara/virtualenv/lib/python2.7/site-packages/pip/baseparser.py
|
def get_environ_vars(self):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if _environ_prefix_re.search(key):
yield (_environ_prefix_re.sub("", key).lower(), val)
|
def get_environ_vars(self):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if _environ_prefix_re.search(key):
yield (_environ_prefix_re.sub("", key).lower(), val)
|
[
"Returns",
"a",
"generator",
"with",
"all",
"environmental",
"vars",
"with",
"prefix",
"PIP_"
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/pip/baseparser.py#L246-L250
|
[
"def",
"get_environ_vars",
"(",
"self",
")",
":",
"for",
"key",
",",
"val",
"in",
"os",
".",
"environ",
".",
"items",
"(",
")",
":",
"if",
"_environ_prefix_re",
".",
"search",
"(",
"key",
")",
":",
"yield",
"(",
"_environ_prefix_re",
".",
"sub",
"(",
"\"\"",
",",
"key",
")",
".",
"lower",
"(",
")",
",",
"val",
")"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
throws_exception
|
Return True if the callable throws the specified exception
>>> throws_exception(lambda: int('3'))
False
>>> throws_exception(lambda: int('a'))
True
>>> throws_exception(lambda: int('a'), KeyError)
False
|
jaraco/util/exceptions.py
|
def throws_exception(callable, *exceptions):
"""
Return True if the callable throws the specified exception
>>> throws_exception(lambda: int('3'))
False
>>> throws_exception(lambda: int('a'))
True
>>> throws_exception(lambda: int('a'), KeyError)
False
"""
with context.ExceptionTrap():
with context.ExceptionTrap(*exceptions) as exc:
callable()
return bool(exc)
|
def throws_exception(callable, *exceptions):
"""
Return True if the callable throws the specified exception
>>> throws_exception(lambda: int('3'))
False
>>> throws_exception(lambda: int('a'))
True
>>> throws_exception(lambda: int('a'), KeyError)
False
"""
with context.ExceptionTrap():
with context.ExceptionTrap(*exceptions) as exc:
callable()
return bool(exc)
|
[
"Return",
"True",
"if",
"the",
"callable",
"throws",
"the",
"specified",
"exception"
] |
jaraco/jaraco.util
|
python
|
https://github.com/jaraco/jaraco.util/blob/f21071c64f165a5cf844db15e39356e1a47f4b02/jaraco/util/exceptions.py#L6-L20
|
[
"def",
"throws_exception",
"(",
"callable",
",",
"*",
"exceptions",
")",
":",
"with",
"context",
".",
"ExceptionTrap",
"(",
")",
":",
"with",
"context",
".",
"ExceptionTrap",
"(",
"*",
"exceptions",
")",
"as",
"exc",
":",
"callable",
"(",
")",
"return",
"bool",
"(",
"exc",
")"
] |
f21071c64f165a5cf844db15e39356e1a47f4b02
|
test
|
transform_hits
|
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
|
capybara/virtualenv/lib/python2.7/site-packages/pip/commands/search.py
|
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if score is None:
score = 0
if name not in packages.keys():
packages[name] = {
'name': name,
'summary': summary,
'versions': [version],
'score': score,
}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a
# list sorted by score
package_list = sorted(
packages.values(),
key=lambda x: x['score'],
reverse=True,
)
return package_list
|
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if score is None:
score = 0
if name not in packages.keys():
packages[name] = {
'name': name,
'summary': summary,
'versions': [version],
'score': score,
}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a
# list sorted by score
package_list = sorted(
packages.values(),
key=lambda x: x['score'],
reverse=True,
)
return package_list
|
[
"The",
"list",
"from",
"pypi",
"is",
"really",
"a",
"list",
"of",
"versions",
".",
"We",
"want",
"a",
"list",
"of",
"packages",
"with",
"the",
"list",
"of",
"versions",
"stored",
"inline",
".",
"This",
"converts",
"the",
"list",
"from",
"pypi",
"into",
"one",
"we",
"can",
"use",
"."
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/pip/commands/search.py#L64-L101
|
[
"def",
"transform_hits",
"(",
"hits",
")",
":",
"packages",
"=",
"{",
"}",
"for",
"hit",
"in",
"hits",
":",
"name",
"=",
"hit",
"[",
"'name'",
"]",
"summary",
"=",
"hit",
"[",
"'summary'",
"]",
"version",
"=",
"hit",
"[",
"'version'",
"]",
"score",
"=",
"hit",
"[",
"'_pypi_ordering'",
"]",
"if",
"score",
"is",
"None",
":",
"score",
"=",
"0",
"if",
"name",
"not",
"in",
"packages",
".",
"keys",
"(",
")",
":",
"packages",
"[",
"name",
"]",
"=",
"{",
"'name'",
":",
"name",
",",
"'summary'",
":",
"summary",
",",
"'versions'",
":",
"[",
"version",
"]",
",",
"'score'",
":",
"score",
",",
"}",
"else",
":",
"packages",
"[",
"name",
"]",
"[",
"'versions'",
"]",
".",
"append",
"(",
"version",
")",
"# if this is the highest version, replace summary and score",
"if",
"version",
"==",
"highest_version",
"(",
"packages",
"[",
"name",
"]",
"[",
"'versions'",
"]",
")",
":",
"packages",
"[",
"name",
"]",
"[",
"'summary'",
"]",
"=",
"summary",
"packages",
"[",
"name",
"]",
"[",
"'score'",
"]",
"=",
"score",
"# each record has a unique name now, so we will convert the dict into a",
"# list sorted by score",
"package_list",
"=",
"sorted",
"(",
"packages",
".",
"values",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"'score'",
"]",
",",
"reverse",
"=",
"True",
",",
")",
"return",
"package_list"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
_transform_result
|
Convert the result back into the input type.
|
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py
|
def _transform_result(typ, result):
"""Convert the result back into the input type.
"""
if issubclass(typ, bytes):
return tostring(result, encoding='utf-8')
elif issubclass(typ, unicode):
return tostring(result, encoding='unicode')
else:
return result
|
def _transform_result(typ, result):
"""Convert the result back into the input type.
"""
if issubclass(typ, bytes):
return tostring(result, encoding='utf-8')
elif issubclass(typ, unicode):
return tostring(result, encoding='unicode')
else:
return result
|
[
"Convert",
"the",
"result",
"back",
"into",
"the",
"input",
"type",
"."
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L114-L122
|
[
"def",
"_transform_result",
"(",
"typ",
",",
"result",
")",
":",
"if",
"issubclass",
"(",
"typ",
",",
"bytes",
")",
":",
"return",
"tostring",
"(",
"result",
",",
"encoding",
"=",
"'utf-8'",
")",
"elif",
"issubclass",
"(",
"typ",
",",
"unicode",
")",
":",
"return",
"tostring",
"(",
"result",
",",
"encoding",
"=",
"'unicode'",
")",
"else",
":",
"return",
"result"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
fragments_fromstring
|
Parses several HTML elements, returning a list of elements.
The first item in the list may be a string (though leading
whitespace is removed). If no_leading_text is true, then it will
be an error if there is leading text, and it will always be a list
of only elements.
base_url will set the document's base_url attribute (and the tree's docinfo.URL)
|
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py
|
def fragments_fromstring(html, no_leading_text=False, base_url=None,
parser=None, **kw):
"""
Parses several HTML elements, returning a list of elements.
The first item in the list may be a string (though leading
whitespace is removed). If no_leading_text is true, then it will
be an error if there is leading text, and it will always be a list
of only elements.
base_url will set the document's base_url attribute (and the tree's docinfo.URL)
"""
if parser is None:
parser = html_parser
# FIXME: check what happens when you give html with a body, head, etc.
if isinstance(html, bytes):
if not _looks_like_full_html_bytes(html):
# can't use %-formatting in early Py3 versions
html = ('<html><body>'.encode('ascii') + html +
'</body></html>'.encode('ascii'))
else:
if not _looks_like_full_html_unicode(html):
html = '<html><body>%s</body></html>' % html
doc = document_fromstring(html, parser=parser, base_url=base_url, **kw)
assert _nons(doc.tag) == 'html'
bodies = [e for e in doc if _nons(e.tag) == 'body']
assert len(bodies) == 1, ("too many bodies: %r in %r" % (bodies, html))
body = bodies[0]
elements = []
if no_leading_text and body.text and body.text.strip():
raise etree.ParserError(
"There is leading text: %r" % body.text)
if body.text and body.text.strip():
elements.append(body.text)
elements.extend(body)
# FIXME: removing the reference to the parent artificial document
# would be nice
return elements
|
def fragments_fromstring(html, no_leading_text=False, base_url=None,
parser=None, **kw):
"""
Parses several HTML elements, returning a list of elements.
The first item in the list may be a string (though leading
whitespace is removed). If no_leading_text is true, then it will
be an error if there is leading text, and it will always be a list
of only elements.
base_url will set the document's base_url attribute (and the tree's docinfo.URL)
"""
if parser is None:
parser = html_parser
# FIXME: check what happens when you give html with a body, head, etc.
if isinstance(html, bytes):
if not _looks_like_full_html_bytes(html):
# can't use %-formatting in early Py3 versions
html = ('<html><body>'.encode('ascii') + html +
'</body></html>'.encode('ascii'))
else:
if not _looks_like_full_html_unicode(html):
html = '<html><body>%s</body></html>' % html
doc = document_fromstring(html, parser=parser, base_url=base_url, **kw)
assert _nons(doc.tag) == 'html'
bodies = [e for e in doc if _nons(e.tag) == 'body']
assert len(bodies) == 1, ("too many bodies: %r in %r" % (bodies, html))
body = bodies[0]
elements = []
if no_leading_text and body.text and body.text.strip():
raise etree.ParserError(
"There is leading text: %r" % body.text)
if body.text and body.text.strip():
elements.append(body.text)
elements.extend(body)
# FIXME: removing the reference to the parent artificial document
# would be nice
return elements
|
[
"Parses",
"several",
"HTML",
"elements",
"returning",
"a",
"list",
"of",
"elements",
"."
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L624-L661
|
[
"def",
"fragments_fromstring",
"(",
"html",
",",
"no_leading_text",
"=",
"False",
",",
"base_url",
"=",
"None",
",",
"parser",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"if",
"parser",
"is",
"None",
":",
"parser",
"=",
"html_parser",
"# FIXME: check what happens when you give html with a body, head, etc.",
"if",
"isinstance",
"(",
"html",
",",
"bytes",
")",
":",
"if",
"not",
"_looks_like_full_html_bytes",
"(",
"html",
")",
":",
"# can't use %-formatting in early Py3 versions",
"html",
"=",
"(",
"'<html><body>'",
".",
"encode",
"(",
"'ascii'",
")",
"+",
"html",
"+",
"'</body></html>'",
".",
"encode",
"(",
"'ascii'",
")",
")",
"else",
":",
"if",
"not",
"_looks_like_full_html_unicode",
"(",
"html",
")",
":",
"html",
"=",
"'<html><body>%s</body></html>'",
"%",
"html",
"doc",
"=",
"document_fromstring",
"(",
"html",
",",
"parser",
"=",
"parser",
",",
"base_url",
"=",
"base_url",
",",
"*",
"*",
"kw",
")",
"assert",
"_nons",
"(",
"doc",
".",
"tag",
")",
"==",
"'html'",
"bodies",
"=",
"[",
"e",
"for",
"e",
"in",
"doc",
"if",
"_nons",
"(",
"e",
".",
"tag",
")",
"==",
"'body'",
"]",
"assert",
"len",
"(",
"bodies",
")",
"==",
"1",
",",
"(",
"\"too many bodies: %r in %r\"",
"%",
"(",
"bodies",
",",
"html",
")",
")",
"body",
"=",
"bodies",
"[",
"0",
"]",
"elements",
"=",
"[",
"]",
"if",
"no_leading_text",
"and",
"body",
".",
"text",
"and",
"body",
".",
"text",
".",
"strip",
"(",
")",
":",
"raise",
"etree",
".",
"ParserError",
"(",
"\"There is leading text: %r\"",
"%",
"body",
".",
"text",
")",
"if",
"body",
".",
"text",
"and",
"body",
".",
"text",
".",
"strip",
"(",
")",
":",
"elements",
".",
"append",
"(",
"body",
".",
"text",
")",
"elements",
".",
"extend",
"(",
"body",
")",
"# FIXME: removing the reference to the parent artificial document",
"# would be nice",
"return",
"elements"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
fragment_fromstring
|
Parses a single HTML element; it is an error if there is more than
one element, or if anything but whitespace precedes or follows the
element.
If ``create_parent`` is true (or is a tag name) then a parent node
will be created to encapsulate the HTML in a single element. In this
case, leading or trailing text is also allowed, as are multiple elements
as result of the parsing.
Passing a ``base_url`` will set the document's ``base_url`` attribute
(and the tree's docinfo.URL).
|
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py
|
def fragment_fromstring(html, create_parent=False, base_url=None,
parser=None, **kw):
"""
Parses a single HTML element; it is an error if there is more than
one element, or if anything but whitespace precedes or follows the
element.
If ``create_parent`` is true (or is a tag name) then a parent node
will be created to encapsulate the HTML in a single element. In this
case, leading or trailing text is also allowed, as are multiple elements
as result of the parsing.
Passing a ``base_url`` will set the document's ``base_url`` attribute
(and the tree's docinfo.URL).
"""
if parser is None:
parser = html_parser
accept_leading_text = bool(create_parent)
elements = fragments_fromstring(
html, parser=parser, no_leading_text=not accept_leading_text,
base_url=base_url, **kw)
if create_parent:
if not isinstance(create_parent, basestring):
create_parent = 'div'
new_root = Element(create_parent)
if elements:
if isinstance(elements[0], basestring):
new_root.text = elements[0]
del elements[0]
new_root.extend(elements)
return new_root
if not elements:
raise etree.ParserError('No elements found')
if len(elements) > 1:
raise etree.ParserError(
"Multiple elements found (%s)"
% ', '.join([_element_name(e) for e in elements]))
el = elements[0]
if el.tail and el.tail.strip():
raise etree.ParserError(
"Element followed by text: %r" % el.tail)
el.tail = None
return el
|
def fragment_fromstring(html, create_parent=False, base_url=None,
parser=None, **kw):
"""
Parses a single HTML element; it is an error if there is more than
one element, or if anything but whitespace precedes or follows the
element.
If ``create_parent`` is true (or is a tag name) then a parent node
will be created to encapsulate the HTML in a single element. In this
case, leading or trailing text is also allowed, as are multiple elements
as result of the parsing.
Passing a ``base_url`` will set the document's ``base_url`` attribute
(and the tree's docinfo.URL).
"""
if parser is None:
parser = html_parser
accept_leading_text = bool(create_parent)
elements = fragments_fromstring(
html, parser=parser, no_leading_text=not accept_leading_text,
base_url=base_url, **kw)
if create_parent:
if not isinstance(create_parent, basestring):
create_parent = 'div'
new_root = Element(create_parent)
if elements:
if isinstance(elements[0], basestring):
new_root.text = elements[0]
del elements[0]
new_root.extend(elements)
return new_root
if not elements:
raise etree.ParserError('No elements found')
if len(elements) > 1:
raise etree.ParserError(
"Multiple elements found (%s)"
% ', '.join([_element_name(e) for e in elements]))
el = elements[0]
if el.tail and el.tail.strip():
raise etree.ParserError(
"Element followed by text: %r" % el.tail)
el.tail = None
return el
|
[
"Parses",
"a",
"single",
"HTML",
"element",
";",
"it",
"is",
"an",
"error",
"if",
"there",
"is",
"more",
"than",
"one",
"element",
"or",
"if",
"anything",
"but",
"whitespace",
"precedes",
"or",
"follows",
"the",
"element",
"."
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L663-L709
|
[
"def",
"fragment_fromstring",
"(",
"html",
",",
"create_parent",
"=",
"False",
",",
"base_url",
"=",
"None",
",",
"parser",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"if",
"parser",
"is",
"None",
":",
"parser",
"=",
"html_parser",
"accept_leading_text",
"=",
"bool",
"(",
"create_parent",
")",
"elements",
"=",
"fragments_fromstring",
"(",
"html",
",",
"parser",
"=",
"parser",
",",
"no_leading_text",
"=",
"not",
"accept_leading_text",
",",
"base_url",
"=",
"base_url",
",",
"*",
"*",
"kw",
")",
"if",
"create_parent",
":",
"if",
"not",
"isinstance",
"(",
"create_parent",
",",
"basestring",
")",
":",
"create_parent",
"=",
"'div'",
"new_root",
"=",
"Element",
"(",
"create_parent",
")",
"if",
"elements",
":",
"if",
"isinstance",
"(",
"elements",
"[",
"0",
"]",
",",
"basestring",
")",
":",
"new_root",
".",
"text",
"=",
"elements",
"[",
"0",
"]",
"del",
"elements",
"[",
"0",
"]",
"new_root",
".",
"extend",
"(",
"elements",
")",
"return",
"new_root",
"if",
"not",
"elements",
":",
"raise",
"etree",
".",
"ParserError",
"(",
"'No elements found'",
")",
"if",
"len",
"(",
"elements",
")",
">",
"1",
":",
"raise",
"etree",
".",
"ParserError",
"(",
"\"Multiple elements found (%s)\"",
"%",
"', '",
".",
"join",
"(",
"[",
"_element_name",
"(",
"e",
")",
"for",
"e",
"in",
"elements",
"]",
")",
")",
"el",
"=",
"elements",
"[",
"0",
"]",
"if",
"el",
".",
"tail",
"and",
"el",
".",
"tail",
".",
"strip",
"(",
")",
":",
"raise",
"etree",
".",
"ParserError",
"(",
"\"Element followed by text: %r\"",
"%",
"el",
".",
"tail",
")",
"el",
".",
"tail",
"=",
"None",
"return",
"el"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
fromstring
|
Parse the html, returning a single element/document.
This tries to minimally parse the chunk of text, without knowing if it
is a fragment or a document.
base_url will set the document's base_url attribute (and the tree's docinfo.URL)
|
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py
|
def fromstring(html, base_url=None, parser=None, **kw):
"""
Parse the html, returning a single element/document.
This tries to minimally parse the chunk of text, without knowing if it
is a fragment or a document.
base_url will set the document's base_url attribute (and the tree's docinfo.URL)
"""
if parser is None:
parser = html_parser
if isinstance(html, bytes):
is_full_html = _looks_like_full_html_bytes(html)
else:
is_full_html = _looks_like_full_html_unicode(html)
doc = document_fromstring(html, parser=parser, base_url=base_url, **kw)
if is_full_html:
return doc
# otherwise, lets parse it out...
bodies = doc.findall('body')
if not bodies:
bodies = doc.findall('{%s}body' % XHTML_NAMESPACE)
if bodies:
body = bodies[0]
if len(bodies) > 1:
# Somehow there are multiple bodies, which is bad, but just
# smash them into one body
for other_body in bodies[1:]:
if other_body.text:
if len(body):
body[-1].tail = (body[-1].tail or '') + other_body.text
else:
body.text = (body.text or '') + other_body.text
body.extend(other_body)
# We'll ignore tail
# I guess we are ignoring attributes too
other_body.drop_tree()
else:
body = None
heads = doc.findall('head')
if not heads:
heads = doc.findall('{%s}head' % XHTML_NAMESPACE)
if heads:
# Well, we have some sort of structure, so lets keep it all
head = heads[0]
if len(heads) > 1:
for other_head in heads[1:]:
head.extend(other_head)
# We don't care about text or tail in a head
other_head.drop_tree()
return doc
if body is None:
return doc
if (len(body) == 1 and (not body.text or not body.text.strip())
and (not body[-1].tail or not body[-1].tail.strip())):
# The body has just one element, so it was probably a single
# element passed in
return body[0]
# Now we have a body which represents a bunch of tags which have the
# content that was passed in. We will create a fake container, which
# is the body tag, except <body> implies too much structure.
if _contains_block_level_tag(body):
body.tag = 'div'
else:
body.tag = 'span'
return body
|
def fromstring(html, base_url=None, parser=None, **kw):
"""
Parse the html, returning a single element/document.
This tries to minimally parse the chunk of text, without knowing if it
is a fragment or a document.
base_url will set the document's base_url attribute (and the tree's docinfo.URL)
"""
if parser is None:
parser = html_parser
if isinstance(html, bytes):
is_full_html = _looks_like_full_html_bytes(html)
else:
is_full_html = _looks_like_full_html_unicode(html)
doc = document_fromstring(html, parser=parser, base_url=base_url, **kw)
if is_full_html:
return doc
# otherwise, lets parse it out...
bodies = doc.findall('body')
if not bodies:
bodies = doc.findall('{%s}body' % XHTML_NAMESPACE)
if bodies:
body = bodies[0]
if len(bodies) > 1:
# Somehow there are multiple bodies, which is bad, but just
# smash them into one body
for other_body in bodies[1:]:
if other_body.text:
if len(body):
body[-1].tail = (body[-1].tail or '') + other_body.text
else:
body.text = (body.text or '') + other_body.text
body.extend(other_body)
# We'll ignore tail
# I guess we are ignoring attributes too
other_body.drop_tree()
else:
body = None
heads = doc.findall('head')
if not heads:
heads = doc.findall('{%s}head' % XHTML_NAMESPACE)
if heads:
# Well, we have some sort of structure, so lets keep it all
head = heads[0]
if len(heads) > 1:
for other_head in heads[1:]:
head.extend(other_head)
# We don't care about text or tail in a head
other_head.drop_tree()
return doc
if body is None:
return doc
if (len(body) == 1 and (not body.text or not body.text.strip())
and (not body[-1].tail or not body[-1].tail.strip())):
# The body has just one element, so it was probably a single
# element passed in
return body[0]
# Now we have a body which represents a bunch of tags which have the
# content that was passed in. We will create a fake container, which
# is the body tag, except <body> implies too much structure.
if _contains_block_level_tag(body):
body.tag = 'div'
else:
body.tag = 'span'
return body
|
[
"Parse",
"the",
"html",
"returning",
"a",
"single",
"element",
"/",
"document",
"."
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L711-L776
|
[
"def",
"fromstring",
"(",
"html",
",",
"base_url",
"=",
"None",
",",
"parser",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"if",
"parser",
"is",
"None",
":",
"parser",
"=",
"html_parser",
"if",
"isinstance",
"(",
"html",
",",
"bytes",
")",
":",
"is_full_html",
"=",
"_looks_like_full_html_bytes",
"(",
"html",
")",
"else",
":",
"is_full_html",
"=",
"_looks_like_full_html_unicode",
"(",
"html",
")",
"doc",
"=",
"document_fromstring",
"(",
"html",
",",
"parser",
"=",
"parser",
",",
"base_url",
"=",
"base_url",
",",
"*",
"*",
"kw",
")",
"if",
"is_full_html",
":",
"return",
"doc",
"# otherwise, lets parse it out...",
"bodies",
"=",
"doc",
".",
"findall",
"(",
"'body'",
")",
"if",
"not",
"bodies",
":",
"bodies",
"=",
"doc",
".",
"findall",
"(",
"'{%s}body'",
"%",
"XHTML_NAMESPACE",
")",
"if",
"bodies",
":",
"body",
"=",
"bodies",
"[",
"0",
"]",
"if",
"len",
"(",
"bodies",
")",
">",
"1",
":",
"# Somehow there are multiple bodies, which is bad, but just",
"# smash them into one body",
"for",
"other_body",
"in",
"bodies",
"[",
"1",
":",
"]",
":",
"if",
"other_body",
".",
"text",
":",
"if",
"len",
"(",
"body",
")",
":",
"body",
"[",
"-",
"1",
"]",
".",
"tail",
"=",
"(",
"body",
"[",
"-",
"1",
"]",
".",
"tail",
"or",
"''",
")",
"+",
"other_body",
".",
"text",
"else",
":",
"body",
".",
"text",
"=",
"(",
"body",
".",
"text",
"or",
"''",
")",
"+",
"other_body",
".",
"text",
"body",
".",
"extend",
"(",
"other_body",
")",
"# We'll ignore tail",
"# I guess we are ignoring attributes too",
"other_body",
".",
"drop_tree",
"(",
")",
"else",
":",
"body",
"=",
"None",
"heads",
"=",
"doc",
".",
"findall",
"(",
"'head'",
")",
"if",
"not",
"heads",
":",
"heads",
"=",
"doc",
".",
"findall",
"(",
"'{%s}head'",
"%",
"XHTML_NAMESPACE",
")",
"if",
"heads",
":",
"# Well, we have some sort of structure, so lets keep it all",
"head",
"=",
"heads",
"[",
"0",
"]",
"if",
"len",
"(",
"heads",
")",
">",
"1",
":",
"for",
"other_head",
"in",
"heads",
"[",
"1",
":",
"]",
":",
"head",
".",
"extend",
"(",
"other_head",
")",
"# We don't care about text or tail in a head",
"other_head",
".",
"drop_tree",
"(",
")",
"return",
"doc",
"if",
"body",
"is",
"None",
":",
"return",
"doc",
"if",
"(",
"len",
"(",
"body",
")",
"==",
"1",
"and",
"(",
"not",
"body",
".",
"text",
"or",
"not",
"body",
".",
"text",
".",
"strip",
"(",
")",
")",
"and",
"(",
"not",
"body",
"[",
"-",
"1",
"]",
".",
"tail",
"or",
"not",
"body",
"[",
"-",
"1",
"]",
".",
"tail",
".",
"strip",
"(",
")",
")",
")",
":",
"# The body has just one element, so it was probably a single",
"# element passed in",
"return",
"body",
"[",
"0",
"]",
"# Now we have a body which represents a bunch of tags which have the",
"# content that was passed in. We will create a fake container, which",
"# is the body tag, except <body> implies too much structure.",
"if",
"_contains_block_level_tag",
"(",
"body",
")",
":",
"body",
".",
"tag",
"=",
"'div'",
"else",
":",
"body",
".",
"tag",
"=",
"'span'",
"return",
"body"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
parse
|
Parse a filename, URL, or file-like object into an HTML document
tree. Note: this returns a tree, not an element. Use
``parse(...).getroot()`` to get the document root.
You can override the base URL with the ``base_url`` keyword. This
is most useful when parsing from a file-like object.
|
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py
|
def parse(filename_or_url, parser=None, base_url=None, **kw):
"""
Parse a filename, URL, or file-like object into an HTML document
tree. Note: this returns a tree, not an element. Use
``parse(...).getroot()`` to get the document root.
You can override the base URL with the ``base_url`` keyword. This
is most useful when parsing from a file-like object.
"""
if parser is None:
parser = html_parser
return etree.parse(filename_or_url, parser, base_url=base_url, **kw)
|
def parse(filename_or_url, parser=None, base_url=None, **kw):
"""
Parse a filename, URL, or file-like object into an HTML document
tree. Note: this returns a tree, not an element. Use
``parse(...).getroot()`` to get the document root.
You can override the base URL with the ``base_url`` keyword. This
is most useful when parsing from a file-like object.
"""
if parser is None:
parser = html_parser
return etree.parse(filename_or_url, parser, base_url=base_url, **kw)
|
[
"Parse",
"a",
"filename",
"URL",
"or",
"file",
"-",
"like",
"object",
"into",
"an",
"HTML",
"document",
"tree",
".",
"Note",
":",
"this",
"returns",
"a",
"tree",
"not",
"an",
"element",
".",
"Use",
"parse",
"(",
"...",
")",
".",
"getroot",
"()",
"to",
"get",
"the",
"document",
"root",
"."
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L778-L789
|
[
"def",
"parse",
"(",
"filename_or_url",
",",
"parser",
"=",
"None",
",",
"base_url",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"if",
"parser",
"is",
"None",
":",
"parser",
"=",
"html_parser",
"return",
"etree",
".",
"parse",
"(",
"filename_or_url",
",",
"parser",
",",
"base_url",
"=",
"base_url",
",",
"*",
"*",
"kw",
")"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
submit_form
|
Helper function to submit a form. Returns a file-like object, as from
``urllib.urlopen()``. This object also has a ``.geturl()`` function,
which shows the URL if there were any redirects.
You can use this like::
form = doc.forms[0]
form.inputs['foo'].value = 'bar' # etc
response = form.submit()
doc = parse(response)
doc.make_links_absolute(response.geturl())
To change the HTTP requester, pass a function as ``open_http`` keyword
argument that opens the URL for you. The function must have the following
signature::
open_http(method, URL, values)
The action is one of 'GET' or 'POST', the URL is the target URL as a
string, and the values are a sequence of ``(name, value)`` tuples with the
form data.
|
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py
|
def submit_form(form, extra_values=None, open_http=None):
"""
Helper function to submit a form. Returns a file-like object, as from
``urllib.urlopen()``. This object also has a ``.geturl()`` function,
which shows the URL if there were any redirects.
You can use this like::
form = doc.forms[0]
form.inputs['foo'].value = 'bar' # etc
response = form.submit()
doc = parse(response)
doc.make_links_absolute(response.geturl())
To change the HTTP requester, pass a function as ``open_http`` keyword
argument that opens the URL for you. The function must have the following
signature::
open_http(method, URL, values)
The action is one of 'GET' or 'POST', the URL is the target URL as a
string, and the values are a sequence of ``(name, value)`` tuples with the
form data.
"""
values = form.form_values()
if extra_values:
if hasattr(extra_values, 'items'):
extra_values = extra_values.items()
values.extend(extra_values)
if open_http is None:
open_http = open_http_urllib
if form.action:
url = form.action
else:
url = form.base_url
return open_http(form.method, url, values)
|
def submit_form(form, extra_values=None, open_http=None):
"""
Helper function to submit a form. Returns a file-like object, as from
``urllib.urlopen()``. This object also has a ``.geturl()`` function,
which shows the URL if there were any redirects.
You can use this like::
form = doc.forms[0]
form.inputs['foo'].value = 'bar' # etc
response = form.submit()
doc = parse(response)
doc.make_links_absolute(response.geturl())
To change the HTTP requester, pass a function as ``open_http`` keyword
argument that opens the URL for you. The function must have the following
signature::
open_http(method, URL, values)
The action is one of 'GET' or 'POST', the URL is the target URL as a
string, and the values are a sequence of ``(name, value)`` tuples with the
form data.
"""
values = form.form_values()
if extra_values:
if hasattr(extra_values, 'items'):
extra_values = extra_values.items()
values.extend(extra_values)
if open_http is None:
open_http = open_http_urllib
if form.action:
url = form.action
else:
url = form.base_url
return open_http(form.method, url, values)
|
[
"Helper",
"function",
"to",
"submit",
"a",
"form",
".",
"Returns",
"a",
"file",
"-",
"like",
"object",
"as",
"from",
"urllib",
".",
"urlopen",
"()",
".",
"This",
"object",
"also",
"has",
"a",
".",
"geturl",
"()",
"function",
"which",
"shows",
"the",
"URL",
"if",
"there",
"were",
"any",
"redirects",
"."
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L918-L953
|
[
"def",
"submit_form",
"(",
"form",
",",
"extra_values",
"=",
"None",
",",
"open_http",
"=",
"None",
")",
":",
"values",
"=",
"form",
".",
"form_values",
"(",
")",
"if",
"extra_values",
":",
"if",
"hasattr",
"(",
"extra_values",
",",
"'items'",
")",
":",
"extra_values",
"=",
"extra_values",
".",
"items",
"(",
")",
"values",
".",
"extend",
"(",
"extra_values",
")",
"if",
"open_http",
"is",
"None",
":",
"open_http",
"=",
"open_http_urllib",
"if",
"form",
".",
"action",
":",
"url",
"=",
"form",
".",
"action",
"else",
":",
"url",
"=",
"form",
".",
"base_url",
"return",
"open_http",
"(",
"form",
".",
"method",
",",
"url",
",",
"values",
")"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
html_to_xhtml
|
Convert all tags in an HTML tree to XHTML by moving them to the
XHTML namespace.
|
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py
|
def html_to_xhtml(html):
"""Convert all tags in an HTML tree to XHTML by moving them to the
XHTML namespace.
"""
try:
html = html.getroot()
except AttributeError:
pass
prefix = "{%s}" % XHTML_NAMESPACE
for el in html.iter(etree.Element):
tag = el.tag
if tag[0] != '{':
el.tag = prefix + tag
|
def html_to_xhtml(html):
"""Convert all tags in an HTML tree to XHTML by moving them to the
XHTML namespace.
"""
try:
html = html.getroot()
except AttributeError:
pass
prefix = "{%s}" % XHTML_NAMESPACE
for el in html.iter(etree.Element):
tag = el.tag
if tag[0] != '{':
el.tag = prefix + tag
|
[
"Convert",
"all",
"tags",
"in",
"an",
"HTML",
"tree",
"to",
"XHTML",
"by",
"moving",
"them",
"to",
"the",
"XHTML",
"namespace",
"."
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L1544-L1556
|
[
"def",
"html_to_xhtml",
"(",
"html",
")",
":",
"try",
":",
"html",
"=",
"html",
".",
"getroot",
"(",
")",
"except",
"AttributeError",
":",
"pass",
"prefix",
"=",
"\"{%s}\"",
"%",
"XHTML_NAMESPACE",
"for",
"el",
"in",
"html",
".",
"iter",
"(",
"etree",
".",
"Element",
")",
":",
"tag",
"=",
"el",
".",
"tag",
"if",
"tag",
"[",
"0",
"]",
"!=",
"'{'",
":",
"el",
".",
"tag",
"=",
"prefix",
"+",
"tag"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
xhtml_to_html
|
Convert all tags in an XHTML tree to HTML by removing their
XHTML namespace.
|
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py
|
def xhtml_to_html(xhtml):
"""Convert all tags in an XHTML tree to HTML by removing their
XHTML namespace.
"""
try:
xhtml = xhtml.getroot()
except AttributeError:
pass
prefix = "{%s}" % XHTML_NAMESPACE
prefix_len = len(prefix)
for el in xhtml.iter(prefix + "*"):
el.tag = el.tag[prefix_len:]
|
def xhtml_to_html(xhtml):
"""Convert all tags in an XHTML tree to HTML by removing their
XHTML namespace.
"""
try:
xhtml = xhtml.getroot()
except AttributeError:
pass
prefix = "{%s}" % XHTML_NAMESPACE
prefix_len = len(prefix)
for el in xhtml.iter(prefix + "*"):
el.tag = el.tag[prefix_len:]
|
[
"Convert",
"all",
"tags",
"in",
"an",
"XHTML",
"tree",
"to",
"HTML",
"by",
"removing",
"their",
"XHTML",
"namespace",
"."
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L1558-L1569
|
[
"def",
"xhtml_to_html",
"(",
"xhtml",
")",
":",
"try",
":",
"xhtml",
"=",
"xhtml",
".",
"getroot",
"(",
")",
"except",
"AttributeError",
":",
"pass",
"prefix",
"=",
"\"{%s}\"",
"%",
"XHTML_NAMESPACE",
"prefix_len",
"=",
"len",
"(",
"prefix",
")",
"for",
"el",
"in",
"xhtml",
".",
"iter",
"(",
"prefix",
"+",
"\"*\"",
")",
":",
"el",
".",
"tag",
"=",
"el",
".",
"tag",
"[",
"prefix_len",
":",
"]"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
tostring
|
Return an HTML string representation of the document.
Note: if include_meta_content_type is true this will create a
``<meta http-equiv="Content-Type" ...>`` tag in the head;
regardless of the value of include_meta_content_type any existing
``<meta http-equiv="Content-Type" ...>`` tag will be removed
The ``encoding`` argument controls the output encoding (defauts to
ASCII, with &#...; character references for any characters outside
of ASCII). Note that you can pass the name ``'unicode'`` as
``encoding`` argument to serialise to a Unicode string.
The ``method`` argument defines the output method. It defaults to
'html', but can also be 'xml' for xhtml output, or 'text' to
serialise to plain text without markup.
To leave out the tail text of the top-level element that is being
serialised, pass ``with_tail=False``.
The ``doctype`` option allows passing in a plain string that will
be serialised before the XML tree. Note that passing in non
well-formed content here will make the XML output non well-formed.
Also, an existing doctype in the document tree will not be removed
when serialising an ElementTree instance.
Example::
>>> from lxml import html
>>> root = html.fragment_fromstring('<p>Hello<br>world!</p>')
>>> html.tostring(root)
b'<p>Hello<br>world!</p>'
>>> html.tostring(root, method='html')
b'<p>Hello<br>world!</p>'
>>> html.tostring(root, method='xml')
b'<p>Hello<br/>world!</p>'
>>> html.tostring(root, method='text')
b'Helloworld!'
>>> html.tostring(root, method='text', encoding='unicode')
u'Helloworld!'
>>> root = html.fragment_fromstring('<div><p>Hello<br>world!</p>TAIL</div>')
>>> html.tostring(root[0], method='text', encoding='unicode')
u'Helloworld!TAIL'
>>> html.tostring(root[0], method='text', encoding='unicode', with_tail=False)
u'Helloworld!'
>>> doc = html.document_fromstring('<p>Hello<br>world!</p>')
>>> html.tostring(doc, method='html', encoding='unicode')
u'<html><body><p>Hello<br>world!</p></body></html>'
>>> print(html.tostring(doc, method='html', encoding='unicode',
... doctype='<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"'
... ' "http://www.w3.org/TR/html4/strict.dtd">'))
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><body><p>Hello<br>world!</p></body></html>
|
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py
|
def tostring(doc, pretty_print=False, include_meta_content_type=False,
encoding=None, method="html", with_tail=True, doctype=None):
"""Return an HTML string representation of the document.
Note: if include_meta_content_type is true this will create a
``<meta http-equiv="Content-Type" ...>`` tag in the head;
regardless of the value of include_meta_content_type any existing
``<meta http-equiv="Content-Type" ...>`` tag will be removed
The ``encoding`` argument controls the output encoding (defauts to
ASCII, with &#...; character references for any characters outside
of ASCII). Note that you can pass the name ``'unicode'`` as
``encoding`` argument to serialise to a Unicode string.
The ``method`` argument defines the output method. It defaults to
'html', but can also be 'xml' for xhtml output, or 'text' to
serialise to plain text without markup.
To leave out the tail text of the top-level element that is being
serialised, pass ``with_tail=False``.
The ``doctype`` option allows passing in a plain string that will
be serialised before the XML tree. Note that passing in non
well-formed content here will make the XML output non well-formed.
Also, an existing doctype in the document tree will not be removed
when serialising an ElementTree instance.
Example::
>>> from lxml import html
>>> root = html.fragment_fromstring('<p>Hello<br>world!</p>')
>>> html.tostring(root)
b'<p>Hello<br>world!</p>'
>>> html.tostring(root, method='html')
b'<p>Hello<br>world!</p>'
>>> html.tostring(root, method='xml')
b'<p>Hello<br/>world!</p>'
>>> html.tostring(root, method='text')
b'Helloworld!'
>>> html.tostring(root, method='text', encoding='unicode')
u'Helloworld!'
>>> root = html.fragment_fromstring('<div><p>Hello<br>world!</p>TAIL</div>')
>>> html.tostring(root[0], method='text', encoding='unicode')
u'Helloworld!TAIL'
>>> html.tostring(root[0], method='text', encoding='unicode', with_tail=False)
u'Helloworld!'
>>> doc = html.document_fromstring('<p>Hello<br>world!</p>')
>>> html.tostring(doc, method='html', encoding='unicode')
u'<html><body><p>Hello<br>world!</p></body></html>'
>>> print(html.tostring(doc, method='html', encoding='unicode',
... doctype='<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"'
... ' "http://www.w3.org/TR/html4/strict.dtd">'))
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><body><p>Hello<br>world!</p></body></html>
"""
html = etree.tostring(doc, method=method, pretty_print=pretty_print,
encoding=encoding, with_tail=with_tail,
doctype=doctype)
if method == 'html' and not include_meta_content_type:
if isinstance(html, str):
html = __str_replace_meta_content_type('', html)
else:
html = __bytes_replace_meta_content_type(bytes(), html)
return html
|
def tostring(doc, pretty_print=False, include_meta_content_type=False,
encoding=None, method="html", with_tail=True, doctype=None):
"""Return an HTML string representation of the document.
Note: if include_meta_content_type is true this will create a
``<meta http-equiv="Content-Type" ...>`` tag in the head;
regardless of the value of include_meta_content_type any existing
``<meta http-equiv="Content-Type" ...>`` tag will be removed
The ``encoding`` argument controls the output encoding (defauts to
ASCII, with &#...; character references for any characters outside
of ASCII). Note that you can pass the name ``'unicode'`` as
``encoding`` argument to serialise to a Unicode string.
The ``method`` argument defines the output method. It defaults to
'html', but can also be 'xml' for xhtml output, or 'text' to
serialise to plain text without markup.
To leave out the tail text of the top-level element that is being
serialised, pass ``with_tail=False``.
The ``doctype`` option allows passing in a plain string that will
be serialised before the XML tree. Note that passing in non
well-formed content here will make the XML output non well-formed.
Also, an existing doctype in the document tree will not be removed
when serialising an ElementTree instance.
Example::
>>> from lxml import html
>>> root = html.fragment_fromstring('<p>Hello<br>world!</p>')
>>> html.tostring(root)
b'<p>Hello<br>world!</p>'
>>> html.tostring(root, method='html')
b'<p>Hello<br>world!</p>'
>>> html.tostring(root, method='xml')
b'<p>Hello<br/>world!</p>'
>>> html.tostring(root, method='text')
b'Helloworld!'
>>> html.tostring(root, method='text', encoding='unicode')
u'Helloworld!'
>>> root = html.fragment_fromstring('<div><p>Hello<br>world!</p>TAIL</div>')
>>> html.tostring(root[0], method='text', encoding='unicode')
u'Helloworld!TAIL'
>>> html.tostring(root[0], method='text', encoding='unicode', with_tail=False)
u'Helloworld!'
>>> doc = html.document_fromstring('<p>Hello<br>world!</p>')
>>> html.tostring(doc, method='html', encoding='unicode')
u'<html><body><p>Hello<br>world!</p></body></html>'
>>> print(html.tostring(doc, method='html', encoding='unicode',
... doctype='<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"'
... ' "http://www.w3.org/TR/html4/strict.dtd">'))
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><body><p>Hello<br>world!</p></body></html>
"""
html = etree.tostring(doc, method=method, pretty_print=pretty_print,
encoding=encoding, with_tail=with_tail,
doctype=doctype)
if method == 'html' and not include_meta_content_type:
if isinstance(html, str):
html = __str_replace_meta_content_type('', html)
else:
html = __bytes_replace_meta_content_type(bytes(), html)
return html
|
[
"Return",
"an",
"HTML",
"string",
"representation",
"of",
"the",
"document",
"."
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L1578-L1649
|
[
"def",
"tostring",
"(",
"doc",
",",
"pretty_print",
"=",
"False",
",",
"include_meta_content_type",
"=",
"False",
",",
"encoding",
"=",
"None",
",",
"method",
"=",
"\"html\"",
",",
"with_tail",
"=",
"True",
",",
"doctype",
"=",
"None",
")",
":",
"html",
"=",
"etree",
".",
"tostring",
"(",
"doc",
",",
"method",
"=",
"method",
",",
"pretty_print",
"=",
"pretty_print",
",",
"encoding",
"=",
"encoding",
",",
"with_tail",
"=",
"with_tail",
",",
"doctype",
"=",
"doctype",
")",
"if",
"method",
"==",
"'html'",
"and",
"not",
"include_meta_content_type",
":",
"if",
"isinstance",
"(",
"html",
",",
"str",
")",
":",
"html",
"=",
"__str_replace_meta_content_type",
"(",
"''",
",",
"html",
")",
"else",
":",
"html",
"=",
"__bytes_replace_meta_content_type",
"(",
"bytes",
"(",
")",
",",
"html",
")",
"return",
"html"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
open_in_browser
|
Open the HTML document in a web browser, saving it to a temporary
file to open it. Note that this does not delete the file after
use. This is mainly meant for debugging.
|
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py
|
def open_in_browser(doc, encoding=None):
"""
Open the HTML document in a web browser, saving it to a temporary
file to open it. Note that this does not delete the file after
use. This is mainly meant for debugging.
"""
import os
import webbrowser
import tempfile
if not isinstance(doc, etree._ElementTree):
doc = etree.ElementTree(doc)
handle, fn = tempfile.mkstemp(suffix='.html')
f = os.fdopen(handle, 'wb')
try:
doc.write(f, method="html", encoding=encoding or doc.docinfo.encoding or "UTF-8")
finally:
# we leak the file itself here, but we should at least close it
f.close()
url = 'file://' + fn.replace(os.path.sep, '/')
print(url)
webbrowser.open(url)
|
def open_in_browser(doc, encoding=None):
"""
Open the HTML document in a web browser, saving it to a temporary
file to open it. Note that this does not delete the file after
use. This is mainly meant for debugging.
"""
import os
import webbrowser
import tempfile
if not isinstance(doc, etree._ElementTree):
doc = etree.ElementTree(doc)
handle, fn = tempfile.mkstemp(suffix='.html')
f = os.fdopen(handle, 'wb')
try:
doc.write(f, method="html", encoding=encoding or doc.docinfo.encoding or "UTF-8")
finally:
# we leak the file itself here, but we should at least close it
f.close()
url = 'file://' + fn.replace(os.path.sep, '/')
print(url)
webbrowser.open(url)
|
[
"Open",
"the",
"HTML",
"document",
"in",
"a",
"web",
"browser",
"saving",
"it",
"to",
"a",
"temporary",
"file",
"to",
"open",
"it",
".",
"Note",
"that",
"this",
"does",
"not",
"delete",
"the",
"file",
"after",
"use",
".",
"This",
"is",
"mainly",
"meant",
"for",
"debugging",
"."
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L1653-L1673
|
[
"def",
"open_in_browser",
"(",
"doc",
",",
"encoding",
"=",
"None",
")",
":",
"import",
"os",
"import",
"webbrowser",
"import",
"tempfile",
"if",
"not",
"isinstance",
"(",
"doc",
",",
"etree",
".",
"_ElementTree",
")",
":",
"doc",
"=",
"etree",
".",
"ElementTree",
"(",
"doc",
")",
"handle",
",",
"fn",
"=",
"tempfile",
".",
"mkstemp",
"(",
"suffix",
"=",
"'.html'",
")",
"f",
"=",
"os",
".",
"fdopen",
"(",
"handle",
",",
"'wb'",
")",
"try",
":",
"doc",
".",
"write",
"(",
"f",
",",
"method",
"=",
"\"html\"",
",",
"encoding",
"=",
"encoding",
"or",
"doc",
".",
"docinfo",
".",
"encoding",
"or",
"\"UTF-8\"",
")",
"finally",
":",
"# we leak the file itself here, but we should at least close it",
"f",
".",
"close",
"(",
")",
"url",
"=",
"'file://'",
"+",
"fn",
".",
"replace",
"(",
"os",
".",
"path",
".",
"sep",
",",
"'/'",
")",
"print",
"(",
"url",
")",
"webbrowser",
".",
"open",
"(",
"url",
")"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
HtmlMixin._label__get
|
Get or set any <label> element associated with this element.
|
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py
|
def _label__get(self):
"""
Get or set any <label> element associated with this element.
"""
id = self.get('id')
if not id:
return None
result = _label_xpath(self, id=id)
if not result:
return None
else:
return result[0]
|
def _label__get(self):
"""
Get or set any <label> element associated with this element.
"""
id = self.get('id')
if not id:
return None
result = _label_xpath(self, id=id)
if not result:
return None
else:
return result[0]
|
[
"Get",
"or",
"set",
"any",
"<label",
">",
"element",
"associated",
"with",
"this",
"element",
"."
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L165-L176
|
[
"def",
"_label__get",
"(",
"self",
")",
":",
"id",
"=",
"self",
".",
"get",
"(",
"'id'",
")",
"if",
"not",
"id",
":",
"return",
"None",
"result",
"=",
"_label_xpath",
"(",
"self",
",",
"id",
"=",
"id",
")",
"if",
"not",
"result",
":",
"return",
"None",
"else",
":",
"return",
"result",
"[",
"0",
"]"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
test
|
HtmlMixin.drop_tree
|
Removes this element from the tree, including its children and
text. The tail text is joined to the previous element or
parent.
|
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py
|
def drop_tree(self):
"""
Removes this element from the tree, including its children and
text. The tail text is joined to the previous element or
parent.
"""
parent = self.getparent()
assert parent is not None
if self.tail:
previous = self.getprevious()
if previous is None:
parent.text = (parent.text or '') + self.tail
else:
previous.tail = (previous.tail or '') + self.tail
parent.remove(self)
|
def drop_tree(self):
"""
Removes this element from the tree, including its children and
text. The tail text is joined to the previous element or
parent.
"""
parent = self.getparent()
assert parent is not None
if self.tail:
previous = self.getprevious()
if previous is None:
parent.text = (parent.text or '') + self.tail
else:
previous.tail = (previous.tail or '') + self.tail
parent.remove(self)
|
[
"Removes",
"this",
"element",
"from",
"the",
"tree",
"including",
"its",
"children",
"and",
"text",
".",
"The",
"tail",
"text",
"is",
"joined",
"to",
"the",
"previous",
"element",
"or",
"parent",
"."
] |
AkihikoITOH/capybara
|
python
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L194-L208
|
[
"def",
"drop_tree",
"(",
"self",
")",
":",
"parent",
"=",
"self",
".",
"getparent",
"(",
")",
"assert",
"parent",
"is",
"not",
"None",
"if",
"self",
".",
"tail",
":",
"previous",
"=",
"self",
".",
"getprevious",
"(",
")",
"if",
"previous",
"is",
"None",
":",
"parent",
".",
"text",
"=",
"(",
"parent",
".",
"text",
"or",
"''",
")",
"+",
"self",
".",
"tail",
"else",
":",
"previous",
".",
"tail",
"=",
"(",
"previous",
".",
"tail",
"or",
"''",
")",
"+",
"self",
".",
"tail",
"parent",
".",
"remove",
"(",
"self",
")"
] |
e86c2173ea386654f4ae061148e8fbe3f25e715c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.