Search is not available for this dataset
text stringlengths 75 104k |
|---|
async def parse_prod_staff_results(soup):
"""
Parse a page of producer or staff results
:param soup: The BS4 class object
:return: A list of dictionaries containing a name and nationality.
"""
soup = soup.find_all('li')
producers = []
for item in soup:
producers.append({'nationality': item.abbr.get('title'), 'name': item.a.string})
return producers |
async def parse_character_results(soup):
"""
Parse a page of character results.
:param soup: The BS4 class object
:return: Returns a list of dictionaries containing a name, gender and list of dictionaries containing a game name/id pair
for games they appeared in.
"""
soup = list(soup.find_all('table', class_='stripe')[0].children)[1:]
characters = []
for item in soup:
temp_c = {'gender': None, 'name': None, 'games': {}}
temp_c['gender'] = item.abbr.get('title')
temp_c['name'] = list(item.children)[1].a.string
temp_c['games'] = []
for game in list(list(list(item.children)[1].children)[1].children):
if isinstance(game, NavigableString):
continue
temp_c['games'].append({'name': game.string, 'id': game.get('href').split('/')[1]})
characters.append(temp_c)
del temp_c
return characters |
async def parse_tag_results(soup):
"""
Parse a page of tag or trait results. Same format.
:param soup: BS4 Class Object
:return: A list of tags, Nothing else really useful there
"""
soup = soup.find_all('td', class_='tc3')
tags = []
for item in soup:
tags.append(item.a.string)
return tags |
async def parse_user_results(soup):
"""
Parse a page of user results
:param soup: Bs4 Class object
:return: A list of dictionaries containing a name and join date
"""
soup = list(soup.find_all('table', class_='stripe')[0].children)[1:]
users = []
for item in soup:
t_u = {'name': None, 'joined': None}
t_u['name'] = list(item.children)[0].a.string
t_u['joined'] = list(item.children)[1].string
users.append(t_u)
del t_u
return users |
def tarball_files(tar_name, file_paths, output_dir='.', prefix=''):
"""
Creates a tarball from a group of files
:param str tar_name: Name of tarball
:param list[str] file_paths: Absolute file paths to include in the tarball
:param str output_dir: Output destination for tarball
:param str prefix: Optional prefix for files in tarball
"""
with tarfile.open(os.path.join(output_dir, tar_name), 'w:gz') as f_out:
for file_path in file_paths:
if not file_path.startswith('/'):
raise ValueError('Path provided is relative not absolute.')
arcname = prefix + os.path.basename(file_path)
f_out.add(file_path, arcname=arcname) |
def __forall_files(file_paths, output_dir, op):
"""
Applies a function to a set of files and an output directory.
:param str output_dir: Output directory
:param list[str] file_paths: Absolute file paths to move
"""
for file_path in file_paths:
if not file_path.startswith('/'):
raise ValueError('Path provided (%s) is relative not absolute.' % file_path)
dest = os.path.join(output_dir, os.path.basename(file_path))
op(file_path, dest) |
def copy_file_job(job, name, file_id, output_dir):
"""
Job version of move_files for one file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str name: Name of output file (including extension)
:param str file_id: FileStoreID of file
:param str output_dir: Location to place output file
"""
work_dir = job.fileStore.getLocalTempDir()
fpath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, name))
copy_files([fpath], output_dir) |
def consolidate_tarballs_job(job, fname_to_id):
"""
Combine the contents of separate tarballs into one.
Subdirs within the tarball will be named the keys in **fname_to_id
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict[str,str] fname_to_id: Dictionary of the form: file-name-prefix=FileStoreID
:return: The file store ID of the generated tarball
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
# Retrieve output file paths to consolidate
tar_paths = []
for fname, file_store_id in fname_to_id.iteritems():
p = job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, fname + '.tar.gz'))
tar_paths.append((p, fname))
# I/O
# output_name is arbitrary as this job function returns a FileStoreId
output_name = 'foo.tar.gz'
out_tar = os.path.join(work_dir, output_name)
# Consolidate separate tarballs into one
with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out:
for tar, fname in tar_paths:
with tarfile.open(tar, 'r') as f_in:
for tarinfo in f_in:
with closing(f_in.extractfile(tarinfo)) as f_in_file:
tarinfo.name = os.path.join(output_name, fname, os.path.basename(tarinfo.name))
f_out.addfile(tarinfo, fileobj=f_in_file)
return job.fileStore.writeGlobalFile(out_tar) |
def _make_parameters(master_ip, default_parameters, memory, arguments, override_parameters):
"""
Makes a Spark Submit style job submission line.
:param masterIP: The Spark leader IP address.
:param default_parameters: Application specific Spark configuration parameters.
:param memory: The memory to allocate to each Spark driver and executor.
:param arguments: Arguments to pass to the submitted job.
:param override_parameters: Parameters passed by the user, that override our defaults.
:type masterIP: MasterAddress
:type default_parameters: list of string
:type arguments: list of string
:type memory: int or None
:type override_parameters: list of string or None
"""
# python doesn't support logical xor?
# anywho, exactly one of memory or override_parameters must be defined
require((override_parameters is not None or memory is not None) and
(override_parameters is None or memory is None),
"Either the memory setting must be defined or you must provide Spark configuration parameters.")
# if the user hasn't provided overrides, set our defaults
parameters = []
if memory is not None:
parameters = ["--master", "spark://%s:%s" % (master_ip, SPARK_MASTER_PORT),
"--conf", "spark.driver.memory=%sg" % memory,
"--conf", "spark.executor.memory=%sg" % memory,
"--conf", ("spark.hadoop.fs.default.name=hdfs://%s:%s" % (master_ip, HDFS_MASTER_PORT))]
else:
parameters.extend(override_parameters)
# add the tool specific spark parameters
parameters.extend(default_parameters)
# spark submit expects a '--' to split the spark conf arguments from tool arguments
parameters.append('--')
# now add the tool arguments and return
parameters.extend(arguments)
return parameters |
def call_conductor(job, master_ip, src, dst, memory=None, override_parameters=None):
"""
Invokes the Conductor container to copy files between S3 and HDFS and vice versa.
Find Conductor at https://github.com/BD2KGenomics/conductor.
:param toil.Job.job job: The Toil Job calling this function
:param masterIP: The Spark leader IP address.
:param src: URL of file to copy.
:param src: URL of location to copy file to.
:param memory: Gigabytes of memory to provision for Spark driver/worker.
:param override_parameters: Parameters passed by the user, that override our defaults.
:type masterIP: MasterAddress
:type src: string
:type dst: string
:type memory: int or None
:type override_parameters: list of string or None
"""
arguments = ["-C", src, dst]
docker_parameters = ['--log-driver', 'none', master_ip.docker_parameters(["--net=host"])]
dockerCall(job=job,
tool="quay.io/ucsc_cgl/conductor",
parameters=_make_parameters(master_ip,
[], # no conductor specific spark configuration
memory,
arguments,
override_parameters),
dockerParameters=docker_parameters) |
def call_adam(job, master_ip, arguments,
memory=None,
override_parameters=None,
run_local=False,
native_adam_path=None):
"""
Invokes the ADAM container. Find ADAM at https://github.com/bigdatagenomics/adam.
:param toil.Job.job job: The Toil Job calling this function
:param masterIP: The Spark leader IP address.
:param arguments: Arguments to pass to ADAM.
:param memory: Gigabytes of memory to provision for Spark driver/worker.
:param override_parameters: Parameters passed by the user, that override our defaults.
:param native_adam_path: Path to ADAM executable. If not provided, Docker is used.
:param run_local: If true, runs Spark with the --master local[*] setting, which uses
all cores on the local machine. The master_ip will be disregarded.
:type masterIP: MasterAddress
:type arguments: list of string
:type memory: int or None
:type override_parameters: list of string or None
:type native_adam_path: string or None
:type run_local: boolean
"""
if run_local:
master = ["--master", "local[*]"]
else:
master = ["--master",
("spark://%s:%s" % (master_ip, SPARK_MASTER_PORT)),
"--conf", ("spark.hadoop.fs.default.name=hdfs://%s:%s" % (master_ip, HDFS_MASTER_PORT)),]
default_params = (master + [
# set max result size to unlimited, see #177
"--conf", "spark.driver.maxResultSize=0",
# these memory tuning parameters were derived in the course of running the
# experiments for the ADAM sigmod paper:
#
# Nothaft, Frank Austin, et al. "Rethinking data-intensive science using scalable
# analytics systems." Proceedings of the 2015 ACM SIGMOD International Conference
# on Management of Data. ACM, 2015.
#
# the memory tunings reduce the amount of memory dedicated to caching, which we don't
# take advantage of, and the network timeout flag reduces the number of job failures
# caused by heavy gc load
"--conf", "spark.storage.memoryFraction=0.3",
"--conf", "spark.storage.unrollFraction=0.1",
"--conf", "spark.network.timeout=300s"])
# are we running adam via docker, or do we have a native path?
if native_adam_path is None:
docker_parameters = ['--log-driver', 'none', master_ip.docker_parameters(["--net=host"])]
dockerCall(job=job,
tool="quay.io/ucsc_cgl/adam:962-ehf--6e7085f8cac4b9a927dc9fb06b48007957256b80",
dockerParameters=docker_parameters,
parameters=_make_parameters(master_ip,
default_params,
memory,
arguments,
override_parameters))
else:
check_call([os.path.join(native_adam_path, "bin/adam-submit")] +
default_params +
arguments) |
def docker_parameters(self, docker_parameters=None):
"""
Augment a list of "docker run" arguments with those needed to map the notional Spark master address to the
real one, if they are different.
"""
if self != self.actual:
add_host_option = '--add-host=spark-master:' + self.actual
if docker_parameters is None:
docker_parameters = [add_host_option]
else:
docker_parameters.append(add_host_option)
return docker_parameters |
def refresh(self):
"""Refresh reloads data from the server. It raises an error if it fails to get the object's metadata"""
self.metadata = self.db.read(self.path).json() |
def set(self, property_dict):
"""Attempts to set the given properties of the object.
An example of this is setting the nickname of the object::
cdb.set({"nickname": "My new nickname"})
note that there is a convenience property `cdb.nickname` that allows you to get/set the nickname directly.
"""
self.metadata = self.db.update(self.path, property_dict).json() |
def run_mutect(job, normal_bam, normal_bai, tumor_bam, tumor_bai, ref, ref_dict, fai, cosmic, dbsnp):
"""
Calls MuTect to perform variant analysis
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
:param str ref: Reference genome FileStoreID
:param str ref_dict: Reference dictionary FileStoreID
:param str fai: Reference index FileStoreID
:param str cosmic: Cosmic VCF FileStoreID
:param str dbsnp: DBSNP VCF FileStoreID
:return: MuTect output (tarball) FileStoreID
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
file_ids = [normal_bam, normal_bai, tumor_bam, tumor_bai, ref, fai, ref_dict, cosmic, dbsnp]
file_names = ['normal.bam', 'normal.bai', 'tumor.bam', 'tumor.bai', 'ref.fasta',
'ref.fasta.fai', 'ref.dict', 'cosmic.vcf', 'dbsnp.vcf']
for file_store_id, name in zip(file_ids, file_names):
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Call: MuTect
parameters = ['--analysis_type', 'MuTect',
'--reference_sequence', 'ref.fasta',
'--cosmic', '/data/cosmic.vcf',
'--dbsnp', '/data/dbsnp.vcf',
'--input_file:normal', '/data/normal.bam',
'--input_file:tumor', '/data/tumor.bam',
'--tumor_lod', str(10), # Taken from MC3 pipeline
'--initial_tumor_lod', str(4.0), # Taken from MC3 pipeline
'--out', 'mutect.out',
'--coverage_file', 'mutect.cov',
'--vcf', 'mutect.vcf']
dockerCall(job=job, workDir=work_dir, parameters=parameters,
tool='quay.io/ucsc_cgl/mutect:1.1.7--e8bf09459cf0aecb9f55ee689c2b2d194754cbd3')
# Write output to file store
output_file_names = ['mutect.vcf', 'mutect.cov', 'mutect.out']
output_file_paths = [os.path.join(work_dir, x) for x in output_file_names]
tarball_files('mutect.tar.gz', file_paths=output_file_paths, output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'mutect.tar.gz')) |
def run_pindel(job, normal_bam, normal_bai, tumor_bam, tumor_bai, ref, fai):
"""
Calls Pindel to compute indels / deletions
:param JobFunctionWrappingJob job: Passed automatically by Toil
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
:param str ref: Reference genome FileStoreID
:param str fai: Reference index FileStoreID
:return: Pindel output (tarball) FileStoreID
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
file_ids = [normal_bam, normal_bai, tumor_bam, tumor_bai, ref, fai]
file_names = ['normal.bam', 'normal.bai', 'tumor.bam', 'tumor.bai', 'ref.fasta', 'ref.fasta.fai']
for file_store_id, name in zip(file_ids, file_names):
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Create Pindel config
with open(os.path.join(work_dir, 'pindel-config.txt'), 'w') as f:
for bam in ['normal', 'tumor']:
f.write('/data/{} {} {}\n'.format(bam + '.bam', get_mean_insert_size(work_dir, bam + '.bam'), bam))
# Call: Pindel
parameters = ['-f', '/data/ref.fasta',
'-i', '/data/pindel-config.txt',
'--number_of_threads', str(job.cores),
'--minimum_support_for_event', '3',
'--report_long_insertions', 'true',
'--report_breakpoints', 'true',
'-o', 'pindel']
dockerCall(job=job, tool='quay.io/ucsc_cgl/pindel:0.2.5b6--4e8d1b31d4028f464b3409c6558fb9dfcad73f88',
workDir=work_dir, parameters=parameters)
# Collect output files and write to file store
output_files = glob(os.path.join(work_dir, 'pindel*'))
tarball_files('pindel.tar.gz', file_paths=output_files, output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'pindel.tar.gz')) |
def create(self, public=False, **kwargs):
"""Creates the device. Attempts to create private devices by default,
but if public is set to true, creates public devices.
You can also set other default properties by passing in the relevant information.
For example, setting a device with the given nickname and description::
dev.create(nickname="mydevice", description="This is an example")
Furthermore, ConnectorDB supports creation of a device's streams immediately,
which can considerably speed up device setup::
dev.create(streams={
"stream1": {"schema": '{\"type\":\"number\"}'}
})
Note that the schema must be encoded as a string when creating in this format.
"""
kwargs["public"] = public
self.metadata = self.db.create(self.path, kwargs).json() |
def streams(self):
"""Returns the list of streams that belong to the device"""
result = self.db.read(self.path, {"q": "ls"})
if result is None or result.json() is None:
return []
streams = []
for s in result.json():
strm = self[s["name"]]
strm.metadata = s
streams.append(strm)
return streams |
def export(self, directory):
"""Exports the device to the given directory. The directory can't exist.
You can later import this device by running import_device on a user.
"""
if os.path.exists(directory):
raise FileExistsError(
"The device export directory already exists")
os.mkdir(directory)
# Write the device's info
with open(os.path.join(directory, "device.json"), "w") as f:
json.dump(self.data, f)
# Now export the streams one by one
for s in self.streams():
s.export(os.path.join(directory, s.name)) |
def import_stream(self, directory):
"""Imports a stream from the given directory. You export the Stream
by using stream.export()"""
# read the stream's info
with open(os.path.join(directory, "stream.json"), "r") as f:
sdata = json.load(f)
s = self[sdata["name"]]
if s.exists():
raise ValueError("The stream " + s.name + " already exists")
# Create the stream empty first, so we can insert all the data without
# worrying about schema violations or downlinks
s.create()
# Now, in order to insert data into this stream, we must be logged in as
# the owning device
ddb = DatabaseConnection(self.apikey, url=self.db.baseurl)
d = Device(ddb, self.path)
# Set up the owning device
sown = d[s.name]
# read the stream's info
sown.insert_array(DatapointArray().loadExport(directory))
# Now we MIGHT be able to recover the downlink data,
# only if we are not logged in as the device that the stream is being inserted into
# So we check. When downlink is true, data is inserted into the
# downlink stream
if (sdata["downlink"] and self.db.path != self.path):
s.downlink = True
with open(os.path.join(directory, "downlink.json"), "r") as f:
s.insert_array(json.load(f))
# And finally, update the device
del sdata["name"]
s.set(sdata) |
async def search_vndb(self, stype, term):
"""
Search vndb.org for a term and return matching results from type.
:param stype: type to search for.
Type should be one of:
v - Visual Novels
r - Releases
p - Producers
s - Staff
c - Characters
g - Tags
i - traits
u - Users
:param term: string to search for
:return: Results. Result format depends on what you searched for. See the Parsing.py module for more specific documentation.
Exceptions:
aiohttp.HttpBadRequest - On 404s
VNDBOneResult - When you search for something but it instead redirects us to a direct content page
VNDBNoResults - When nothing was found for that search
VNDBBadStype - Raised when an incorrect search type is passed
"""
fstype = ""
if stype not in ['v', 'r', 'p', 's', 'c', 'g', 'i', 'u']:
raise VNDBBadStype(stype)
else:
if stype in ['v', 'p', 's', 'c', 'u']:
fstype = '/{}/all'.format(stype)
elif stype in ['g', 'i']:
fstype = '/{}/list'.format(stype)
elif stype == 'r':
fstype = '/r'
async with self.session.get(self.base_url + "{}".format(fstype), params={"q": term}, headers=self.headers) as response:
if response.status == 404:
raise aiohttp.HttpBadRequest("VN Not Found")
elif 'q=' not in response.url:
raise VNDBOneResult(term, response.url.rsplit('/', 1)[1])
text = await response.text()
if 'No Results' in text:
raise VNDBNoResults(term)
soup = BeautifulSoup(text, 'lxml')
resp = await self.parse_search(stype, soup)
if resp == []:
raise VNDBNoResults(term)
return resp |
async def get_novel(self, term, hide_nsfw=False):
"""
If term is an ID will return that specific ID. If it's a string, it will return the details of the first search result for that term.
Returned Dictionary Has the following structure:
Please note, if it says list or dict, it means the python types.
Indentation indicates level. So English is ['Titles']['English']
'Titles' - Contains all the titles found for the anime
'English' - English title of the novel
'Alt' - Alternative title (Usually the Japanese one, but other languages exist)
'Aliases' - A list of str that define the aliases as given in VNDB.
'Img' - Link to the Image shown on VNDB for that Visual Novel
'Length' - Length given by VNDB
'Developers' - A list containing the Developers of the VN.
'Publishers' - A list containing the Publishers of the VN.
'Tags' - Contains 3 lists of different tag categories
'Content' - List of tags that have to do with the story's content as defined by VNDB. Ex: Edo Era
'Technology' - List of tags that have to do with the VN's technology. Ex: Protagonist with a Face (Wew Lad, 21st century)
'Erotic' - List of tags that have to do with the VN's sexual content. Ex: Tentacles
'Releases' - A list of dictionaries. They have the following format.
'Date' - Date VNDB lists for release
'Ages' - Age group appropriate for as determined on VNDB
'Platform' - Release Platform
'Name' - The name for this particular Release
'ID' - The id for this release, also doubles as the link if you append https://vndb.org/ to it
'Description' - Contains novel description text if there is any.
'ID' - The id for this novel, also doubles as the link if you append https://vndb.org/ to it
:param term: id or name to get details of.
:param hide_nsfw: bool if 'Img' should filter links flagged as NSFW or not. (no reason to be kwargs...yet)
:return dict: Dictionary with the parsed results of a novel
"""
if not term.isdigit() and not term.startswith('v'):
try:
vnid = await self.search_vndb('v', term)
vnid = vnid[0]['id']
except VNDBOneResult as e:
vnid = e.vnid
else:
vnid = str(term)
if not vnid.startswith('v'):
vnid = 'v' + vnid
async with self.session.get(self.base_url + "/{}".format(vnid), headers=self.headers) as response:
if response.status == 404:
raise aiohttp.HttpBadRequest("VNDB reported that there is no data for ID {}".format(vnid))
text = await response.text()
soup = BeautifulSoup(text, 'lxml')
data = {'titles': {'english': [], 'alt': [], 'aliases': []}, 'img': None, 'length': None, 'developers': [], 'publishers': [], 'tags': {}, 'releases': {}, 'id': vnid}
data['titles']['english'] = soup.find_all('div', class_='mainbox')[0].h1.string
try:
data['titles']['alt'] = soup.find_all('h2', class_='alttitle')[0].string
except IndexError:
data['titles']['alt'] = None
try:
imgdiv = soup.find_all('div', class_='vnimg')[0]
if not (hide_nsfw and 'class' in imgdiv.p.attrs):
data['img'] = 'https:' + imgdiv.img.get('src')
except AttributeError:
pass
for item in soup.find_all('tr'):
if 'class' in item.attrs or len(list(item.children)) == 1:
continue
if item.td.string == 'Aliases':
tlist = []
for alias in list(item.children)[1:]:
tlist.append(alias.string)
data['titles']['aliases'] = tlist
elif item.td.string == 'Length':
data['length'] = list(item.children)[1].string
elif item.td.string == 'Developer':
tl = []
for item in list(list(item.children)[1].children):
if isinstance(item, NavigableString):
continue
if 'href' in item.attrs:
tl.append(item.string)
data['developers'] = tl
del tl
elif item.td.string == 'Publishers':
tl = []
for item in list(list(item.children)[1].children):
if isinstance(item, NavigableString):
continue
if 'href' in item.attrs:
tl.append(item.string)
data['publishers'] = tl
conttags = []
techtags = []
erotags = []
test = soup.find('div', attrs={'id': 'vntags'})
if test:
for item in list(test.children):
if isinstance(item, NavigableString):
continue
if 'class' not in item.attrs:
continue
if 'cont' in " ".join(item.get('class')):
conttags.append(item.a.string)
if 'tech' in " ".join(item.get('class')):
techtags.append(item.a.string)
if 'ero' in " ".join(item.get('class')):
erotags.append(item.a.string)
data['tags']['content'] = conttags if len(conttags) else None
data['tags']['technology'] = techtags if len(techtags) else None
data['tags']['erotic'] = erotags if len(erotags) else None
del conttags
del techtags
del erotags
releases = []
cur_lang = None
for item in list(soup.find('div', class_='mainbox releases').table.children):
if isinstance(item, NavigableString):
continue
if 'class' in item.attrs:
if cur_lang is None:
cur_lang = item.td.abbr.get('title')
else:
data['releases'][cur_lang] = releases
releases = []
cur_lang = item.td.abbr.get('title')
else:
temp_rel = {'date': 0, 'ages': 0, 'platform': 0, 'name': 0, 'id': 0}
children = list(item.children)
temp_rel['date'] = children[0].string
temp_rel['ages'] = children[1].string
temp_rel['platform'] = children[2].abbr.get('title')
temp_rel['name'] = children[3].a.string
temp_rel['id'] = children[3].a.get('href')[1:]
del children
releases.append(temp_rel)
del temp_rel
if len(releases) > 0 and cur_lang is not None:
data['releases'][cur_lang] = releases
del releases
del cur_lang
desc = ""
for item in list(soup.find_all('td', class_='vndesc')[0].children)[1].contents:
if not isinstance(item, NavigableString):
continue
if item.startswith('['):
continue
if item.endswith(']'):
continue
desc += item.string + "\n"
data['description'] = desc
return data |
async def parse_search(self, stype, soup):
"""
This is our parsing dispatcher
:param stype: Search type category
:param soup: The beautifulsoup object that contains the parsed html
"""
if stype == 'v':
return await parse_vn_results(soup)
elif stype == 'r':
return await parse_release_results(soup)
elif stype == 'p':
return await parse_prod_staff_results(soup)
elif stype == 's':
return await parse_prod_staff_results(soup)
elif stype == 'c':
return await parse_character_results(soup)
elif stype == 'g':
return await parse_tag_results(soup)
elif stype == 'i':
return await parse_tag_results(soup)
elif stype == 'u':
return await parse_user_results(soup) |
def addStream(self, stream, interpolator="closest", t1=None, t2=None, dt=None, limit=None, i1=None, i2=None, transform=None,colname=None):
"""Adds the given stream to the query construction. Additionally, you can choose the interpolator to use for this stream, as well as a special name
for the column in the returned dataset. If no column name is given, the full stream path will be used.
addStream also supports Merge queries. You can insert a merge query instead of a stream, but be sure to name the column::
d = Dataset(cdb, t1=time.time()-1000,t2=time.time(),dt=10.)
d.addStream("temperature","average")
d.addStream("steps","sum")
m = Merge(cdb)
m.addStream("mystream")
m.addStream("mystream2")
d.addStream(m,colname="mycolumn")
result = d.run()
"""
streamquery = query_maker(t1, t2, limit, i1, i2, transform)
param_stream(self.cdb, streamquery, stream)
streamquery["interpolator"] = interpolator
if colname is None:
# What do we call this column?
if isinstance(stream, six.string_types):
colname = stream
elif isinstance(stream, Stream):
colname = stream.path
else:
raise Exception(
"Could not find a name for the column! use the 'colname' parameter.")
if colname in self.query["dataset"] or colname is "x":
raise Exception(
"The column name either exists, or is labeled 'x'. Use the colname parameter to change the column name.")
self.query["dataset"][colname] = streamquery |
def reset_apikey(self):
"""invalidates the device's current api key, and generates a new one. Resets current auth to use the new apikey,
since the change would have future queries fail if they use the old api key."""
apikey = Device.reset_apikey(self)
self.db.setauth(apikey)
return apikey |
def info(self):
"""returns a dictionary of information about the database, including the database version, the transforms
and the interpolators supported::
>>>cdb = connectordb.ConnectorDB(apikey)
>>>cdb.info()
{
"version": "0.3.0",
"transforms": {
"sum": {"description": "Returns the sum of all the datapoints that go through the transform"}
...
},
"interpolators": {
"closest": {"description": "Uses the datapoint closest to the interpolation timestamp"}
...
}
}
"""
return {
"version": self.db.get("meta/version").text,
"transforms": self.db.get("meta/transforms").json(),
"interpolators": self.db.get("meta/interpolators").json()
} |
def users(self):
"""Returns the list of users in the database"""
result = self.db.read("", {"q": "ls"})
if result is None or result.json() is None:
return []
users = []
for u in result.json():
usr = self(u["name"])
usr.metadata = u
users.append(usr)
return users |
def import_users(self, directory):
"""Imports version 1 of ConnectorDB export. These exports can be generated
by running user.export(dir), possibly on multiple users.
"""
exportInfoFile = os.path.join(directory, "connectordb.json")
with open(exportInfoFile) as f:
exportInfo = json.load(f)
if exportInfo["Version"] != 1:
raise ValueError("Not able to read this import version")
# Now we list all the user directories
for name in os.listdir(directory):
udir = os.path.join(directory, name)
if os.path.isdir(udir):
# Let's read in the user
with open(os.path.join(udir, "user.json")) as f:
usrdata = json.load(f)
u = self(usrdata["name"])
if u.exists():
raise ValueError("The user " + name + " already exists")
del usrdata["name"]
u.create(password=name, **usrdata)
# Now read all of the user's devices
for dname in os.listdir(udir):
ddir = os.path.join(udir, dname)
if os.path.isdir(ddir):
u.import_device(ddir) |
def run_bwa_index(job, ref_id):
"""
Use BWA to create reference index files
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str ref_id: FileStoreID for the reference genome
:return: FileStoreIDs for BWA index files
:rtype: tuple(str, str, str, str, str)
"""
job.fileStore.logToMaster('Created BWA index files')
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fa'))
command = ['index', '/data/ref.fa']
dockerCall(job=job, workDir=work_dir, parameters=command,
tool='quay.io/ucsc_cgl/bwa:0.7.12--256539928ea162949d8a65ca5c79a72ef557ce7c')
ids = {}
for output in ['ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa']:
ids[output.split('.')[-1]] = (job.fileStore.writeGlobalFile(os.path.join(work_dir, output)))
return ids['amb'], ids['ann'], ids['bwt'], ids['pac'], ids['sa'] |
def connectordb(self):
"""Returns the ConnectorDB object that the logger uses. Raises an error if Logger isn't able to connect"""
if self.__cdb is None:
logging.debug("Logger: Connecting to " + self.serverurl)
self.__cdb = ConnectorDB(self.apikey, url=self.serverurl)
return self.__cdb |
def addStream(self, streamname, schema=None, **kwargs):
"""Adds the given stream to the logger. Requires an active connection to the ConnectorDB database.
If a schema is not specified, loads the stream from the database. If a schema is specified, and the stream
does not exist, creates the stream. You can also add stream properties such as description or nickname to be added
during creation."""
stream = self.connectordb[streamname]
if not stream.exists():
if schema is not None:
stream.create(schema, **kwargs)
else:
raise Exception(
"The stream '%s' was not found" % (streamname, ))
self.addStream_force(streamname, stream.schema) |
def addStream_force(self, streamname, schema=None):
"""This function adds the given stream to the logger, but does not check with a ConnectorDB database
to make sure that the stream exists. Use at your own risk."""
c = self.database.cursor()
c.execute("INSERT OR REPLACE INTO streams VALUES (?,?);",
(streamname, json.dumps(schema)))
self.streams[streamname] = schema |
def insert(self, streamname, value):
"""Insert the datapoint into the logger for the given stream name. The logger caches the datapoint
and eventually synchronizes it with ConnectorDB"""
if streamname not in self.streams:
raise Exception("The stream '%s' was not found" % (streamname, ))
# Validate the schema
validate(value, self.streams[streamname])
# Insert the datapoint - it fits the schema
value = json.dumps(value)
logging.debug("Logger: %s <= %s" % (streamname, value))
c = self.database.cursor()
c.execute("INSERT INTO cache VALUES (?,?,?);",
(streamname, time.time(), value)) |
def insert_many(self, data_dict):
""" Inserts data into the cache, if the data is a dict of the form {streamname: [{"t": timestamp,"d":data,...]}"""
c = self.database.cursor()
c.execute("BEGIN TRANSACTION;")
try:
for streamname in data_dict:
if streamname not in self.streams:
raise Exception(
"The stream '%s' was not found" % (streamname, ))
for dp in data_dict[streamname]:
validate(dp["d"], self.streams[streamname])
c.execute("INSERT INTO cache VALUES (?,?,?);",
(streamname, dp["t"], dp["d"]))
except:
c.execute("ROLLBACK;")
raise
c.exectute("COMMIT;") |
def sync(self):
"""Attempt to sync with the ConnectorDB server"""
logging.debug("Logger: Syncing...")
failed = False
try:
# Get the connectordb object
cdb = self.connectordb
# Ping the database - most connection errors will happen here
cdb.ping()
with self.synclock:
c = self.database.cursor()
for stream in self.streams:
s = cdb[stream]
c.execute(
"SELECT * FROM cache WHERE stream=? ORDER BY timestamp ASC;",
(stream, ))
datapointArray = []
for dp in c.fetchall():
datapointArray.append(
{"t": dp[1],
"d": json.loads(dp[2])})
# First, check if the data already inserted has newer timestamps,
# and in that case, assume that there was an error, and remove the datapoints
# with an older timestamp, so that we don't have an error when syncing
if len(s) > 0:
newtime = s[-1]["t"]
while (len(datapointArray) > 0 and datapointArray[0]["t"] < newtime):
logging.debug("Datapoint exists with older timestamp. Removing the datapoint.")
datapointArray = datapointArray[1:]
if len(datapointArray) > 0:
logging.debug("%s: syncing %i datapoints" %
(stream, len(datapointArray)))
while (len(datapointArray) > DATAPOINT_INSERT_LIMIT):
# We insert datapoints in chunks of a couple
# thousand so that they fit in the insert size
# limit of ConnectorDB
s.insert_array(
datapointArray[:DATAPOINT_INSERT_LIMIT])
# Clear the written datapoints
datapointArray = datapointArray[
DATAPOINT_INSERT_LIMIT:]
# If there was no error inserting, delete the
# datapoints from the cache
c.execute(
"DELETE FROM cache WHERE stream=? AND timestamp <?",
(stream, datapointArray[0]["t"]))
s.insert_array(datapointArray)
# If there was no error inserting, delete the
# datapoints from the cache
c.execute(
"DELETE FROM cache WHERE stream=? AND timestamp <=?",
(stream, datapointArray[-1]["t"]))
self.lastsynctime = time.time()
if self.onsync is not None:
self.onsync()
except Exception as e:
# Handle the sync failure callback
falied = True
reraise = self.syncraise
if self.onsyncfail is not None:
reraise = self.onsyncfail(e)
if reraise:
raise |
def start(self):
"""Start the logger background synchronization service. This allows you to not need to
worry about syncing with ConnectorDB - you just insert into the Logger, and the Logger
will by synced every syncperiod."""
with self.synclock:
if self.syncthread is not None:
logging.warn(
"Logger: Start called on a syncer that is already running")
return
self.sync() # Attempt a sync right away
self.__setsync() |
def stop(self):
"""Stops the background synchronization thread"""
with self.synclock:
if self.syncthread is not None:
self.syncthread.cancel()
self.syncthread = None |
def data(self):
"""The data property allows the user to save settings/data in the database, so that
there does not need to be extra code messing around with settings.
Use this property to save things that can be converted to JSON inside the logger database,
so that you don't have to mess with configuration files or saving setting otherwise::
from connectordb.logger import Logger
l = Logger("log.db")
l.data = {"hi": 56}
# prints the data dictionary
print l.data
"""
c = self.database.cursor()
c.execute("SELECT userdatajson FROM metadata;")
return json.loads(next(c)[0]) |
def read(*paths):
"""Build a file path from *paths* and return the contents."""
filename = os.path.join(*paths)
with codecs.open(filename, mode='r', encoding='utf-8') as handle:
return handle.read() |
def download_url(job, url, work_dir='.', name=None, s3_key_path=None, cghub_key_path=None):
"""
Downloads URL, can pass in file://, http://, s3://, or ftp://, gnos://cghub/analysisID, or gnos:///analysisID
If downloading S3 URLs, the S3AM binary must be on the PATH
:param toil.job.Job job: Toil job that is calling this function
:param str url: URL to download from
:param str work_dir: Directory to download file to
:param str name: Name of output file, if None, basename of URL is used
:param str s3_key_path: Path to 32-byte encryption key if url points to S3 file that uses SSE-C
:param str cghub_key_path: Path to cghub key used to download from CGHub.
:return: Path to the downloaded file
:rtype: str
"""
file_path = os.path.join(work_dir, name) if name else os.path.join(work_dir, os.path.basename(url))
if cghub_key_path:
_download_with_genetorrent(job, url, file_path, cghub_key_path)
elif urlparse(url).scheme == 's3':
_s3am_with_retry(job, num_cores=1, file_path=file_path, s3_url=url, mode='download', s3_key_path=s3_key_path)
elif urlparse(url).scheme == 'file':
shutil.copy(urlparse(url).path, file_path)
else:
subprocess.check_call(['curl', '-fs', '--retry', '5', '--create-dir', url, '-o', file_path])
assert os.path.exists(file_path)
return file_path |
def download_url_job(job, url, name=None, s3_key_path=None, cghub_key_path=None):
"""Job version of `download_url`"""
work_dir = job.fileStore.getLocalTempDir()
fpath = download_url(job=job, url=url, work_dir=work_dir, name=name,
s3_key_path=s3_key_path, cghub_key_path=cghub_key_path)
return job.fileStore.writeGlobalFile(fpath) |
def s3am_upload(job, fpath, s3_dir, num_cores=1, s3_key_path=None):
"""
Uploads a file to s3 via S3AM
S3AM binary must be on the PATH to use this function
For SSE-C encryption: provide a path to a 32-byte file
:param toil.job.Job job: Toil job that is calling this function
:param str fpath: Path to file to upload
:param str s3_dir: Ouptut S3 path. Format: s3://bucket/[directory]
:param int num_cores: Number of cores to use for up/download with S3AM
:param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption
"""
require(s3_dir.startswith('s3://'), 'Format of s3_dir (s3://) is incorrect: %s', s3_dir)
s3_dir = os.path.join(s3_dir, os.path.basename(fpath))
_s3am_with_retry(job=job, num_cores=num_cores, file_path=fpath,
s3_url=s3_dir, mode='upload', s3_key_path=s3_key_path) |
def s3am_upload_job(job, file_id, file_name, s3_dir, s3_key_path=None):
"""Job version of s3am_upload"""
work_dir = job.fileStore.getLocalTempDir()
fpath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, file_name))
s3am_upload(job=job, fpath=fpath, s3_dir=s3_dir, num_cores=job.cores, s3_key_path=s3_key_path) |
def _s3am_with_retry(job, num_cores, file_path, s3_url, mode='upload', s3_key_path=None):
"""
Run s3am with 3 retries
:param toil.job.Job job: Toil job that is calling this function
:param int num_cores: Number of cores to pass to upload/download slots
:param str file_path: Full path to the file
:param str s3_url: S3 URL
:param str mode: Mode to run s3am in. Either "upload" or "download"
:param str s3_key_path: Path to the SSE-C key if using encryption
"""
container_key_file = None
# try to find suitable credentials
base_boto = '.boto'
base_aws = '.aws/credentials'
docker_home_dir = '/root'
# map existing credential paths to their mount point within the container
credentials_to_mount = {os.path.join(os.path.expanduser("~"), path): os.path.join(docker_home_dir, path)
for path in [base_aws, base_boto]
if os.path.exists(os.path.join(os.path.expanduser("~"), path))}
require(os.path.isabs(file_path), "'file_path' parameter must be an absolute path")
dir_path, file_name = file_path.rsplit('/', 1)
# Mirror user specified paths to simplify debugging
container_dir_path = '/data' + dir_path
container_file = os.path.join(container_dir_path, file_name)
mounts = {dir_path: container_dir_path}
if s3_key_path:
require(os.path.isabs(s3_key_path), "'s3_key_path' parameter must be an absolute path")
key_dir_path, key_name = s3_key_path.rsplit('/', 1)
container_key_dir_path = '/data' + key_dir_path
container_key_file = os.path.join(container_key_dir_path, key_name)
# if the key directory is identical to the file directory this assignment is idempotent
mounts[key_dir_path] = container_key_dir_path
for k, v in credentials_to_mount.iteritems():
mounts[k] = v
arguments = []
url_arguments = []
if mode == 'upload':
arguments.extend(['upload', '--force', '--upload-slots=%s' % num_cores, '--exists=overwrite'])
url_arguments.extend(['file://' + container_file, s3_url])
elif mode == 'download':
arguments.extend(['download', '--file-exists=overwrite', '--download-exists=discard'])
url_arguments.extend([s3_url, 'file://' + container_file])
else:
raise ValueError('Improper mode specified. mode must be equal to "upload" or "download".')
if s3_key_path:
arguments.extend(['--sse-key-is-master', '--sse-key-file', container_key_file])
arguments.extend(['--part-size=50M', '--download-slots=%s' % num_cores])
# finally, add the url path arguments after all the tool parameters are set
arguments.extend(url_arguments)
# Pass credential-related environment variables into container
env = {}
if 'AWS_PROFILE' in os.environ:
env['AWS_PROFILE'] = os.environ['AWS_PROFILE']
# Create parameters to pass to Docker
docker_parameters = ['--rm', '--log-driver', 'none']
if mounts:
for k, v in mounts.iteritems():
docker_parameters.extend(['-v', k + ':' + v])
if env:
for e, v in env.iteritems():
docker_parameters.extend(['-e', '{}={}'.format(e, v)])
# Run s3am with retries
retry_count = 3
for i in xrange(retry_count):
try:
dockerCall(job=job, tool='quay.io/ucsc_cgl/s3am:2.0--fed932897e7fd40f4ec878362e5dd6afe15caaf0',
parameters=arguments, dockerParameters=docker_parameters)
except subprocess.CalledProcessError:
_log.debug('S3AM %s failed', mode, exc_info=True)
else:
_log.debug('S3AM %s succeeded', mode)
return
raise RuntimeError("S3AM failed to %s after %i retries with arguments %s. Enable 'debug' "
"level logging to see more information about the failed attempts." %
(mode, retry_count, arguments)) |
def labels(ontology, output, ols_base):
"""Output the names to the given file"""
for label in get_labels(ontology=ontology, ols_base=ols_base):
click.echo(label, file=output) |
def tree(ontology, output, ols_base):
"""Output the parent-child relations to the given file"""
for parent, child in get_hierarchy(ontology=ontology, ols_base=ols_base):
click.echo('{}\t{}'.format(parent, child), file=output) |
def get_mean_insert_size(work_dir, bam_name):
"""Function taken from MC3 Pipeline"""
cmd = "docker run --log-driver=none --rm -v {}:/data quay.io/ucsc_cgl/samtools " \
"view -f66 {}".format(work_dir, os.path.join(work_dir, bam_name))
process = subprocess.Popen(args=cmd, shell=True, stdout=subprocess.PIPE)
b_sum = 0.0
b_count = 0.0
while True:
line = process.stdout.readline()
if not line:
break
tmp = line.split("\t")
if abs(long(tmp[8])) < 10000:
b_sum += abs(long(tmp[8]))
b_count += 1
process.wait()
try:
mean = b_sum / b_count
except ZeroDivisionError:
mean = 150
print "Using insert size: %d" % mean
return int(mean) |
def partitions(l, partition_size):
"""
>>> list(partitions([], 10))
[]
>>> list(partitions([1,2,3,4,5], 1))
[[1], [2], [3], [4], [5]]
>>> list(partitions([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
>>> list(partitions([1,2,3,4,5], 5))
[[1, 2, 3, 4, 5]]
:param list l: List to be partitioned
:param int partition_size: Size of partitions
"""
for i in xrange(0, len(l), partition_size):
yield l[i:i + partition_size] |
def required_length(nmin, nmax):
"""
For use with argparse's action argument. Allows setting a range for nargs.
Example: nargs='+', action=required_length(2, 3)
:param int nmin: Minimum number of arguments
:param int nmax: Maximum number of arguments
:return: RequiredLength object
"""
class RequiredLength(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if not nmin <= len(values) <= nmax:
msg = 'argument "{f}" requires between {nmin} and {nmax} arguments'.format(
f=self.dest, nmin=nmin, nmax=nmax)
raise argparse.ArgumentTypeError(msg)
setattr(args, self.dest, values)
return RequiredLength |
def current_docker_container_id():
"""
Returns a string that represents the container ID of the current Docker container. If this
function is invoked outside of a container a NotInsideContainerError is raised.
>>> import subprocess
>>> import sys
>>> a = subprocess.check_output(['docker', 'run', '-v',
... sys.modules[__name__].__file__ + ':/foo.py',
... 'python:2.7.12','python', '-c',
... 'from foo import current_docker_container_id;\\
... print current_docker_container_id()'])
int call will fail if a is not a valid hex string
>>> int(a, 16) > 0
True
"""
try:
with open('/proc/1/cgroup', 'r') as readable:
raw = readable.read()
ids = set(re.compile('[0-9a-f]{12,}').findall(raw))
assert len(ids) == 1
return ids.pop()
except:
logging.exception('Failed to obtain current container ID')
raise NotInsideContainerError() |
def run_star(job, r1_id, r2_id, star_index_url, wiggle=False, sort=True):
"""
Performs alignment of fastqs to bam via STAR
--limitBAMsortRAM step added to deal with memory explosion when sorting certain samples.
The value was chosen to complement the recommended amount of memory to have when running STAR (60G)
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq (pair 1)
:param str r2_id: FileStoreID of fastq (pair 2 if applicable, else pass None)
:param str star_index_url: STAR index tarball
:param bool wiggle: If True, will output a wiggle file and return it
:return: FileStoreID from RSEM
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
download_url(job, url=star_index_url, name='starIndex.tar.gz', work_dir=work_dir)
subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'starIndex.tar.gz'), '-C', work_dir])
os.remove(os.path.join(work_dir, 'starIndex.tar.gz'))
# Determine tarball structure - star index contains are either in a subdir or in the tarball itself
star_index = os.path.join('/data', os.listdir(work_dir)[0]) if len(os.listdir(work_dir)) == 1 else '/data'
# Parameter handling for paired / single-end data
parameters = ['--runThreadN', str(job.cores),
'--genomeDir', star_index,
'--outFileNamePrefix', 'rna',
'--outSAMunmapped', 'Within',
'--quantMode', 'TranscriptomeSAM',
'--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD',
'--outFilterType', 'BySJout',
'--outFilterMultimapNmax', '20',
'--outFilterMismatchNmax', '999',
'--outFilterMismatchNoverReadLmax', '0.04',
'--alignIntronMin', '20',
'--alignIntronMax', '1000000',
'--alignMatesGapMax', '1000000',
'--alignSJoverhangMin', '8',
'--alignSJDBoverhangMin', '1',
'--sjdbScore', '1',
'--limitBAMsortRAM', '49268954168']
# Modify paramaters based on function arguments
if sort:
parameters.extend(['--outSAMtype', 'BAM', 'SortedByCoordinate'])
aligned_bam = 'rnaAligned.sortedByCoord.out.bam'
else:
parameters.extend(['--outSAMtype', 'BAM', 'Unsorted'])
aligned_bam = 'rnaAligned.out.bam'
if wiggle:
parameters.extend(['--outWigType', 'bedGraph',
'--outWigStrand', 'Unstranded',
'--outWigReferencesPrefix', 'chr'])
if r1_id and r2_id:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['--readFilesIn', '/data/R1.fastq', '/data/R2.fastq'])
else:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters.extend(['--readFilesIn', '/data/R1.fastq'])
# Call: STAR Mapping
dockerCall(job=job, tool='quay.io/ucsc_cgl/star:2.4.2a--bcbd5122b69ff6ac4ef61958e47bde94001cfe80',
workDir=work_dir, parameters=parameters)
# Check output bam isnt size zero if sorted
aligned_bam_path = os.path.join(work_dir, aligned_bam)
if sort:
assert(os.stat(aligned_bam_path).st_size > 0, 'Aligned bam failed to sort. Ensure sufficient memory is free.')
# Write to fileStore
aligned_id = job.fileStore.writeGlobalFile(aligned_bam_path)
transcriptome_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.toTranscriptome.out.bam'))
log_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaLog.final.out'))
sj_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaSJ.out.tab'))
if wiggle:
wiggle_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaSignal.UniqueMultiple.str1.out.bg'))
return transcriptome_id, aligned_id, wiggle_id, log_id, sj_id
else:
return transcriptome_id, aligned_id, log_id, sj_id |
def run_bwakit(job, config, sort=True, trim=False, mark_secondary=False):
"""
Runs BWA-Kit to align single or paired-end fastq files or realign SAM/BAM files.
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param Namespace config: A configuration object that holds strings as attributes.
The attributes must be accessible via the dot operator.
The config must have:
config.r1 FileStoreID for FASTQ file, or None if realigning SAM/BAM
config.r2 FileStoreID for paired FASTQ file, or None if single-ended
config.bam FileStoreID for BAM file to be realigned, or None if aligning fastq
config.sam FileStoreID for SAM file to be realigned, or None if aligning fastq
config.ref FileStoreID for the reference genome
config.fai FileStoreID for the reference index file
config.amb FileStoreID for the reference amb file
config.ann FileStoreID for the reference ann file
config.bwt FileStoreID for the reference bwt file
config.pac FileStoreID for the reference pac file
config.sa FileStoreID for the reference sa file
config.alt FileStoreID for the reference alt (or None)
config.rg_line The read group value to use (or None -- see below)
config.library Read group attribute: library
config.platform Read group attribute: platform
config.program_unit Read group attribute: program unit
config.uuid Read group attribute: sample ID
If specifying config.rg_line, use the following format:
BAM read group header line (@RG), as defined on page 3 of the SAM spec.
Tabs should be escaped, e.g., @RG\\tID:foo\\tLB:bar...
for the read group "foo" from sequencing library "bar".
Multiple @RG lines can be defined, but should be split by an escaped newline \\n,
e.g., @RG\\tID:foo\\t:LB:bar\\n@RG\\tID:santa\\tLB:cruz.
:param bool sort: If True, sorts the BAM
:param bool trim: If True, performs adapter trimming
:param bool mark_secondary: If True, mark shorter split reads as secondary
:return: FileStoreID of BAM
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
rg = None
inputs = {'ref.fa': config.ref,
'ref.fa.fai': config.fai,
'ref.fa.amb': config.amb,
'ref.fa.ann': config.ann,
'ref.fa.bwt': config.bwt,
'ref.fa.pac': config.pac,
'ref.fa.sa': config.sa}
samples = []
realignment = False
# If a fastq pair was provided
if getattr(config, 'r1', None):
inputs['input.1.fq.gz'] = config.r1
samples.append('input.1.fq.gz')
if getattr(config, 'r2', None):
inputs['input.2.fq.gz'] = config.r2
samples.append('input.2.fq.gz')
if getattr(config, 'bam', None):
inputs['input.bam'] = config.bam
samples.append('input.bam')
realignment = True
if getattr(config, 'sam', None):
inputs['input.sam'] = config.sam
samples.append('input.sam')
realignment = True
# If an alt file was provided
if getattr(config, 'alt', None):
inputs['ref.fa.alt'] = config.alt
for name, fileStoreID in inputs.iteritems():
job.fileStore.readGlobalFile(fileStoreID, os.path.join(work_dir, name))
# If a read group line was provided
if getattr(config, 'rg_line', None):
rg = config.rg_line
# Otherwise, generate a read group line to place in the BAM.
elif all(getattr(config, elem, None) for elem in ['library', 'platform', 'program_unit', 'uuid']):
rg = "@RG\\tID:{0}".format(config.uuid) # '\' character is escaped so bwakit gets passed '\t' properly
rg_attributes = [config.library, config.platform, config.program_unit, config.uuid]
for tag, info in zip(['LB', 'PL', 'PU', 'SM'], rg_attributes):
rg += '\\t{0}:{1}'.format(tag, info)
# If realigning, then bwakit can use pre-existing read group data
elif realignment:
rg = None
# BWA Options
opt_args = []
if sort:
opt_args.append('-s')
if trim:
opt_args.append('-a')
if mark_secondary:
opt_args.append('-M')
# Call: bwakit
parameters = ['-t', str(job.cores)] + opt_args + ['-o', '/data/aligned', '/data/ref.fa']
if rg is not None:
parameters = ['-R', rg] + parameters
for sample in samples:
parameters.append('/data/{}'.format(sample))
dockerCall(job=job, tool='quay.io/ucsc_cgl/bwakit:0.7.12--c85ccff267d5021b75bb1c9ccf5f4b79f91835cc',
parameters=parameters, workDir=work_dir)
# Either write file to local output directory or upload to S3 cloud storage
job.fileStore.logToMaster('Aligned sample: {}'.format(config.uuid))
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'aligned.aln.bam')) |
def query_maker(t1=None, t2=None, limit=None, i1=None, i2=None, transform=None, downlink=False):
"""query_maker takes the optional arguments and constructs a json query for a stream's
datapoints using it::
#{"t1": 5, "transform": "if $ > 5"}
print query_maker(t1=5,transform="if $ > 5")
"""
params = {}
if t1 is not None:
params["t1"] = t1
if t2 is not None:
params["t2"] = t2
if limit is not None:
params["limit"] = limit
if i1 is not None or i2 is not None:
if len(params) > 0:
raise AssertionError(
"Stream cannot be accessed both by index and by timestamp at the same time.")
if i1 is not None:
params["i1"] = i1
if i2 is not None:
params["i2"] = i2
# If no range is given, query whole stream
if len(params) == 0:
params["i1"] = 0
params["i2"] = 0
if transform is not None:
params["transform"] = transform
if downlink:
params["downlink"] = True
return params |
def create(self, schema="{}", **kwargs):
"""Creates a stream given an optional JSON schema encoded as a python dict. You can also add other properties
of the stream, such as the icon, datatype or description. Create accepts both a string schema and
a dict-encoded schema."""
if isinstance(schema, basestring):
strschema = schema
schema = json.loads(schema)
else:
strschema = json.dumps(schema)
Draft4Validator.check_schema(schema)
kwargs["schema"] = strschema
self.metadata = self.db.create(self.path, kwargs).json() |
def insert_array(self, datapoint_array, restamp=False):
"""given an array of datapoints, inserts them to the stream. This is different from insert(),
because it requires an array of valid datapoints, whereas insert only requires the data portion
of the datapoint, and fills out the rest::
s = cdb["mystream"]
s.create({"type": "number"})
s.insert_array([{"d": 4, "t": time.time()},{"d": 5, "t": time.time()}], restamp=False)
The optional `restamp` parameter specifies whether or not the database should rewrite the timestamps
of datapoints which have a timestamp that is less than one that already exists in the database.
That is, if restamp is False, and a datapoint has a timestamp less than a datapoint that already
exists in the database, then the insert will fail. If restamp is True, then all datapoints
with timestamps below the datapoints already in the database will have their timestamps overwritten
to the same timestamp as the most recent datapoint hat already exists in the database, and the insert will
succeed.
"""
# To be safe, we split into chunks
while (len(datapoint_array) > DATAPOINT_INSERT_LIMIT):
# We insert datapoints in chunks of a couple thousand so that they
# fit in the insert size limit of ConnectorDB
a = datapoint_array[:DATAPOINT_INSERT_LIMIT]
if restamp:
self.db.update(self.path + "/data", a)
else:
self.db.create(self.path + "/data", a)
# Clear the written datapoints
datapoint_array = datapoint_array[DATAPOINT_INSERT_LIMIT:]
if restamp:
self.db.update(self.path + "/data", datapoint_array)
else:
self.db.create(self.path + "/data", datapoint_array) |
def insert(self, data):
"""insert inserts one datapoint with the given data, and appends it to
the end of the stream::
s = cdb["mystream"]
s.create({"type": "string"})
s.insert("Hello World!")
"""
self.insert_array([{"d": data, "t": time.time()}], restamp=True) |
def subscribe(self, callback, transform="", downlink=False):
"""Subscribes to the stream, running the callback function each time datapoints are inserted into
the given stream. There is an optional transform to the datapoints, and a downlink parameter.::
s = cdb["mystream"]
def subscription_callback(stream,data):
print stream, data
s.subscribe(subscription_callback)
The downlink parameter is for downlink streams - it allows to subscribe to the downlink substream,
before it is acknowledged. This is especially useful for something like lights - have lights be
a boolean downlink stream, and the light itself be subscribed to the downlink, so that other
devices can write to the light, turning it on and off::
def light_control(stream,data):
light_boolean = data[0]["d"]
print "Setting light to", light_boolean
set_light(light_boolean)
#Acknowledge the write
return True
# We don't care about intermediate values, we only want the most recent setting
# of the light, meaning we want the "if last" transform
s.subscribe(light_control, downlink=True, transform="if last")
"""
streampath = self.path
if downlink:
streampath += "/downlink"
return self.db.subscribe(streampath, callback, transform) |
def unsubscribe(self, transform="", downlink=False):
"""Unsubscribes from a previously subscribed stream. Note that the same values of transform
and downlink must be passed in order to do the correct unsubscribe::
s.subscribe(callback,transform="if last")
s.unsubscribe(transform="if last")
"""
streampath = self.path
if downlink:
streampath += "/downlink"
return self.db.unsubscribe(streampath, transform) |
def export(self, directory):
"""Exports the stream to the given directory. The directory can't exist.
You can later import this device by running import_stream on a device.
"""
if os.path.exists(directory):
raise FileExistsError(
"The stream export directory already exists")
os.mkdir(directory)
# Write the stream's info
with open(os.path.join(directory, "stream.json"), "w") as f:
json.dump(self.data, f)
# Now write the stream's data
# We sort it first, since older versions of ConnectorDB had a bug
# where sometimes datapoints would be returned out of order.
self[:].sort().writeJSON(os.path.join(directory, "data.json"))
# And if the stream is a downlink, write the downlink data
if self.downlink:
self(i1=0, i2=0, downlink=True).sort().writeJSON(os.path.join(directory, "downlink.json")) |
def schema(self, schema):
"""sets the stream's schema. An empty schema is "{}". The schemas allow you to set a specific data type.
Both python dicts and strings are accepted."""
if isinstance(schema, basestring):
strschema = schema
schema = json.loads(schema)
else:
strschema = json.dumps(schema)
Draft4Validator.check_schema(schema)
self.set({"schema": strschema}) |
def device(self):
"""returns the device which owns the given stream"""
splitted_path = self.path.split("/")
return Device(self.db,
splitted_path[0] + "/" + splitted_path[1]) |
def get_labels(ontology, ols_base=None):
"""Iterates over the labels of terms in the ontology
:param str ontology: The name of the ontology
:param str ols_base: An optional, custom OLS base url
:rtype: iter[str]
"""
client = OlsClient(ols_base=ols_base)
return client.iter_labels(ontology) |
def get_metadata(ontology, ols_base=None):
"""Gets the metadata for a given ontology
:param str ontology: The name of the ontology
:param str ols_base: An optional, custom OLS base url
:return: The dictionary representing the JSON from the OLS
:rtype: dict
"""
client = OlsClient(ols_base=ols_base)
return client.get_ontology(ontology) |
def get_hierarchy(ontology, ols_base=None):
"""Iterates over the parent-child relationships in an ontolog
:param str ontology: The name of the ontology
:param str ols_base: An optional, custom OLS base url
:rtype: iter[tuple[str,str]]
"""
client = OlsClient(ols_base=ols_base)
return client.iter_hierarchy(ontology) |
def run(cls, name, desc):
"""
Prepares and runs the pipeline. Note this method must be invoked both from inside a
Docker container and while the docker daemon is reachable.
:param str name: The name of the command to start the workflow.
:param str desc: The description of the workflow.
"""
wrapper = cls(name, desc)
mount_path = wrapper._get_mount_path()
# prepare parser
arg_parser = wrapper._create_argument_parser()
wrapper._extend_argument_parser(arg_parser)
# prepare config file
empty_config = wrapper.__get_empty_config()
config_yaml = ruamel.yaml.load(empty_config)
wrapper.__populate_parser_from_config(arg_parser, config_yaml)
args = arg_parser.parse_args()
for k,v in vars(args).items():
k = k.replace('_', '-')
if k in config_yaml:
config_yaml[k] = v
config_path = wrapper._get_config_path()
with open(config_path, 'w') as writable:
ruamel.yaml.dump(config_yaml, stream=writable)
# prepare workdir
workdir_path = os.path.join(mount_path, 'Toil-' + wrapper._name)
if os.path.exists(workdir_path):
if args.restart:
log.info('Reusing temporary directory: %s', workdir_path)
else:
raise UserError('Temporary directory {} already exists. Run with --restart '
'option or remove directory.'.format(workdir_path))
else:
os.makedirs(workdir_path)
log.info('Temporary directory created: %s', workdir_path)
command = wrapper._create_pipeline_command(args, workdir_path, config_path)
wrapper._extend_pipeline_command(command, args)
# run command
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
print(e, file=sys.stderr)
finally:
stat = os.stat(mount_path)
log.info('Pipeline terminated, changing ownership of output files in %s from root to '
'uid %s and gid %s.', mount_path, stat.st_uid, stat.st_gid)
chown_command = ['chown', '-R', '%s:%s' % (stat.st_uid, stat.st_gid), mount_path]
subprocess.check_call(chown_command)
if args.no_clean:
log.info('Flag "--no-clean" was used, therefore %s was not deleted.', workdir_path)
else:
log.info('Cleaning up temporary directory: %s', workdir_path)
shutil.rmtree(workdir_path) |
def __populate_parser_from_config(self, arg_parser, config_data, prefix=''):
"""
Populates an ArgumentParser object with arguments where each argument is a key from the
given config_data dictionary.
:param str prefix: Prepends the key with this prefix delimited by a single '.' character.
:param argparse.ArgumentParser arg_parser:
:param dict config_data: The parsed yaml data from the config.
>>> pw = AbstractPipelineWrapper('test', 'this is a test')
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser, {'a':None, 'b':2})
>>> vars(parser.parse_args(['--a', '1']))
{'a': '1', 'b': 2}
>>> vars(parser.parse_args(['--b', '3']))
{'a': None, 'b': '3'}
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser, {})
>>> vars(parser.parse_args([]))
{}
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser,
... dict(a={'a':'b', 'c':{'d':'e'}},
... f='g', h={}))
>>> vars(parser.parse_args([]))
{'f': 'g', 'a.a': 'b', 'a.c.d': 'e'}
"""
for k,v in config_data.items():
k = prefix + '.' + k if prefix else k
if isinstance(v, dict):
self.__populate_parser_from_config(arg_parser, v, prefix=k)
else:
self._add_option(arg_parser, name=k, default=v) |
def __get_empty_config(self):
"""
Returns the config file contents as a string. The config file is generated and then deleted.
"""
self._generate_config()
path = self._get_config_path()
with open(path, 'r') as readable:
contents = readable.read()
os.remove(path)
return contents |
def _get_mount_path(self):
"""
Returns the path of the mount point of the current container. If this method is invoked
outside of a Docker container a NotInsideContainerError is raised. Likewise if the docker
daemon is unreachable from inside the container a UserError is raised. This method is
idempotent.
"""
if self._mount_path is None:
name = current_docker_container_id()
if dockerd_is_reachable():
# Get name of mounted volume
blob = json.loads(subprocess.check_output(['docker', 'inspect', name]))
mounts = blob[0]['Mounts']
# Ensure docker.sock is mounted correctly
sock_mnt = [x['Source'] == x['Destination']
for x in mounts if 'docker.sock' in x['Source']]
require(len(sock_mnt) == 1,
'Missing socket mount. Requires the following: '
'docker run -v /var/run/docker.sock:/var/run/docker.sock')
# Ensure formatting of command for 2 mount points
if len(mounts) == 2:
require(all(x['Source'] == x['Destination'] for x in mounts),
'Docker Src/Dst mount points, invoked with the -v argument, '
'must be the same if only using one mount point aside from the docker '
'socket.')
work_mount = [x['Source'] for x in mounts if 'docker.sock' not in x['Source']]
else:
# Ensure only one mirror mount exists aside from docker.sock
mirror_mounts = [x['Source'] for x in mounts if x['Source'] == x['Destination']]
work_mount = [x for x in mirror_mounts if 'docker.sock' not in x]
require(len(work_mount) == 1, 'Wrong number of mirror mounts provided, see '
'documentation.')
self._mount_path = work_mount[0]
log.info('The work mount is: %s', self._mount_path)
else:
raise UserError('Docker daemon is not reachable, ensure Docker is being run with: '
'"-v /var/run/docker.sock:/var/run/docker.sock" as an argument.')
return self._mount_path |
def _add_option(self, arg_parser, name, *args, **kwargs):
"""
Add an argument to the given arg_parser with the given name.
:param argparse.ArgumentParser arg_parser:
:param str name: The name of the option.
"""
arg_parser.add_argument('--' + name, *args, **kwargs) |
def _create_argument_parser(self):
"""
Creates and returns an ArgumentParser object prepopulated with 'no clean', 'cores' and
'restart' arguments.
"""
parser = argparse.ArgumentParser(description=self._desc,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--no-clean', action='store_true',
help='If this flag is used, temporary work directory is not cleaned.')
parser.add_argument('--restart', action='store_true',
help='If this flag is used, a previously uncleaned workflow in the same'
' directory will be resumed')
parser.add_argument('--cores', type=int, default=None,
help='Will set a cap on number of cores to use, default is all '
'available cores.')
return parser |
def _create_pipeline_command(self, args, workdir_path, config_path):
"""
Creates and returns a list that represents a command for running the pipeline.
"""
return ([self._name, 'run', os.path.join(workdir_path, 'jobStore'),
'--config', config_path,
'--workDir', workdir_path, '--retryCount', '1']
+ (['--restart'] if args.restart else [])) |
def setauth(self, user_or_apikey=None, user_password=None):
""" setauth sets the authentication header for use in the session.
It is for use when apikey is updated or something of the sort, such that
there is a seamless experience. """
auth = None
if user_or_apikey is not None:
# ConnectorDB allows login using both basic auth or an apikey url param.
# The python client uses basic auth for all logins
if user_password is None:
# Login by api key - the basic auth login uses "" user and
# apikey as password
user_password = user_or_apikey
user_or_apikey = ""
auth = HTTPBasicAuth(user_or_apikey, user_password)
self.r.auth = auth
# Set the websocket's authentication
self.ws.setauth(auth) |
def handleresult(self, r):
"""Handles HTTP error codes for the given request
Raises:
AuthenticationError on the appropriate 4** errors
ServerError if the response is not an ok (2**)
Arguments:
r -- The request result
"""
if r.status_code >= 400 and r.status_code < 500:
msg = r.json()
raise AuthenticationError(str(msg["code"]) + ": " + msg["msg"] +
" (" + msg["ref"] + ")")
elif r.status_code > 300:
err = None
try:
msg = r.json()
err = ServerError(str(msg["code"]) + ": " + msg["msg"] + " (" +
msg["ref"] + ")")
except:
raise ServerError(
"Server returned error, but did not give a valid error message")
raise err
return r |
def ping(self):
"""Attempts to ping the server using current credentials, and responds with the path of the currently
authenticated device"""
return self.handleresult(self.r.get(self.url,
params={"q": "this"})).text |
def query(self, query_type, query=None):
"""Run the given query on the connection (POST request to /query)"""
return self.handleresult(self.r.post(urljoin(self.url + "query/",
query_type),
data=json.dumps(query))).json() |
def create(self, path, data=None):
"""Send a POST CRUD API request to the given path using the given data which will be converted
to json"""
return self.handleresult(self.r.post(urljoin(self.url + CRUD_PATH,
path),
data=json.dumps(data))) |
def read(self, path, params=None):
"""Read the result at the given path (GET) from the CRUD API, using the optional params dictionary
as url parameters."""
return self.handleresult(self.r.get(urljoin(self.url + CRUD_PATH,
path),
params=params)) |
def update(self, path, data=None):
"""Send an update request to the given path of the CRUD API, with the given data dict, which will be converted
into json"""
return self.handleresult(self.r.put(urljoin(self.url + CRUD_PATH,
path),
data=json.dumps(data))) |
def delete(self, path):
"""Send a delete request to the given path of the CRUD API. This deletes the object. Or at least tries to."""
return self.handleresult(self.r.delete(urljoin(self.url + CRUD_PATH,
path))) |
def subscribe(self, stream, callback, transform=""):
"""Subscribe to the given stream with the callback"""
return self.ws.subscribe(stream, callback, transform) |
def create(self, email, password, role="user", public=True, **kwargs):
"""Creates the given user - using the passed in email and password.
You can also set other default properties by passing in the relevant information::
usr.create("my@email","mypass",description="I like trains.")
Furthermore, ConnectorDB permits immediate initialization of an entire user tree,
so that you can create all relevant devices and streams in one go::
usr.create("my@email","mypass",devices={
"device1": {
"nickname": "My train",
"streams": {
"stream1": {
"schema": "{\"type\":\"string\"}",
"datatype": "train.choochoo"
}
},
}
})
The user and meta devices are created by default. If you want to add streams to the user device,
use the "streams" option in place of devices in create.
"""
kwargs["email"] = email
kwargs["password"] = password
kwargs["role"] = role
kwargs["public"] = public
self.metadata = self.db.create(
self.path, kwargs).json() |
def devices(self):
"""Returns the list of devices that belong to the user"""
result = self.db.read(self.path, {"q": "ls"})
if result is None or result.json() is None:
return []
devices = []
for d in result.json():
dev = self[d["name"]]
dev.metadata = d
devices.append(dev)
return devices |
def streams(self, public=False, downlink=False, visible=True):
"""Returns the list of streams that belong to the user.
The list can optionally be filtered in 3 ways:
- public: when True, returns only streams belonging to public devices
- downlink: If True, returns only downlink streams
- visible: If True (default), returns only streams of visible devices
"""
result = self.db.read(self.path, {"q": "streams",
"public": str(public).lower(),
"downlink": str(downlink).lower(),
"visible": str(visible).lower()})
if result is None or result.json() is None:
return []
streams = []
for d in result.json():
s = self[d["device"]][d["name"]]
s.metadata = d
streams.append(s)
return streams |
def export(self, directory):
"""Exports the ConnectorDB user into the given directory.
The resulting export can be imported by using the import command(cdb.import(directory)),
Note that Python cannot export passwords, since the REST API does
not expose password hashes. Therefore, the imported user will have
password same as username.
The user export function is different than device and stream exports because
it outputs a format compatible directly with connectorDB's import functionality:
connectordb import < mydatabase > <directory >
This also means that you can export multiple users into the same directory without issue
"""
exportInfoFile = os.path.join(directory, "connectordb.json")
if os.path.exists(directory):
# Ensure that there is an export there already, and it is version 1
if not os.path.exists(exportInfoFile):
raise FileExistsError(
"The export directory already exsits, and is not a ConnectorDB export.")
with open(exportInfoFile) as f:
exportInfo = json.load(f)
if exportInfo["Version"] != 1:
raise ValueError(
"Could not export to directory: incompatible export versions.")
else:
# The folder doesn't exist. Make it.
os.mkdir(directory)
with open(exportInfoFile, "w") as f:
json.dump(
{"Version": 1, "ConnectorDB": self.db.get("meta/version").text}, f)
# Now we create the user directory
udir = os.path.join(directory, self.name)
os.mkdir(udir)
# Write the user's info
with open(os.path.join(udir, "user.json"), "w") as f:
json.dump(self.data, f)
# Now export the devices one by one
for d in self.devices():
d.export(os.path.join(udir, d.name)) |
def import_device(self, directory):
"""Imports a device from the given directory. You export the device
by using device.export()
There are two special cases: user and meta devices.
If the device name is meta, import_device will not do anything.
If the device name is "user", import_device will overwrite the user device
even if it exists already.
"""
# read the device's info
with open(os.path.join(directory, "device.json"), "r") as f:
ddata = json.load(f)
d = self[ddata["name"]]
dname = ddata["name"]
del ddata["name"]
if dname == "meta":
return
elif dname == "user":
d.set(ddata)
elif d.exists():
raise ValueError("The device " + d.name + " already exists")
else:
d.create(**ddata)
# Now import all of the streams
for name in os.listdir(directory):
sdir = os.path.join(directory, name)
if os.path.isdir(sdir):
d.import_stream(sdir) |
def run_cutadapt(job, r1_id, r2_id, fwd_3pr_adapter, rev_3pr_adapter):
"""
Adapter trimming for RNA-seq data
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq read 1
:param str r2_id: FileStoreID of fastq read 2 (if paired data)
:param str fwd_3pr_adapter: Adapter sequence for the forward 3' adapter
:param str rev_3pr_adapter: Adapter sequence for the reverse 3' adapter (second fastq pair)
:return: R1 and R2 FileStoreIDs
:rtype: tuple
"""
work_dir = job.fileStore.getLocalTempDir()
if r2_id:
require(rev_3pr_adapter, "Paired end data requires a reverse 3' adapter sequence.")
# Retrieve files
parameters = ['-a', fwd_3pr_adapter,
'-m', '35']
if r1_id and r2_id:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['-A', rev_3pr_adapter,
'-o', '/data/R1_cutadapt.fastq',
'-p', '/data/R2_cutadapt.fastq',
'/data/R1.fastq', '/data/R2.fastq'])
else:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters.extend(['-o', '/data/R1_cutadapt.fastq', '/data/R1.fastq'])
# Call: CutAdapt
dockerCall(job=job, tool='quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2',
workDir=work_dir, parameters=parameters)
# Write to fileStore
if r1_id and r2_id:
r1_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq'))
r2_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2_cutadapt.fastq'))
else:
r1_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq'))
r2_cut_id = None
return r1_cut_id, r2_cut_id |
def run_samtools_faidx(job, ref_id):
"""
Use SAMtools to create reference index file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str ref_id: FileStoreID for the reference genome
:return: FileStoreID for reference index
:rtype: str
"""
job.fileStore.logToMaster('Created reference index')
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fasta'))
command = ['faidx', 'ref.fasta']
dockerCall(job=job, workDir=work_dir, parameters=command,
tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e')
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'ref.fasta.fai')) |
def run_samtools_index(job, bam):
"""
Runs SAMtools index to create a BAM index file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID of the BAM file
:return: FileStoreID for BAM index file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'sample.bam'))
# Call: index the bam
parameters = ['index', '/data/sample.bam']
dockerCall(job=job, workDir=work_dir, parameters=parameters,
tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e')
# Write to fileStore
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'sample.bam.bai')) |
def run_sambamba_markdup(job, bam):
"""
Marks reads as PCR duplicates using Sambamba
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:return: FileStoreID for sorted BAM file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'input.bam'))
command = ['/usr/local/bin/sambamba',
'markdup',
'-t', str(int(job.cores)),
'/data/input.bam',
'/data/output.bam']
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/biocontainers/sambamba:0.6.6--0')
end_time = time.time()
_log_runtime(job, start_time, end_time, "sambamba mkdup")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.bam')) |
def run_samblaster(job, sam):
"""
Marks reads as PCR duplicates using SAMBLASTER
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str sam: FileStoreID for SAM file
:return: FileStoreID for deduped SAM file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(sam, os.path.join(work_dir, 'input.sam'))
command = ['/usr/local/bin/samblaster',
'-i', '/data/input.sam',
'-o', '/data/output.sam',
'--ignoreUnmated']
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/biocontainers/samblaster:0.1.24--0')
end_time = time.time()
_log_runtime(job, start_time, end_time, "SAMBLASTER")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.sam')) |
def picard_mark_duplicates(job, bam, bai, validation_stringency='LENIENT'):
"""
Runs Picard MarkDuplicates on a BAM file. Requires that the BAM file be coordinate sorted.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str validation_stringency: BAM file validation stringency, default is LENIENT
:return: FileStoreIDs for BAM and BAI files
:rtype: tuple
"""
work_dir = job.fileStore.getLocalTempDir()
# Retrieve file path
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'sorted.bam'))
job.fileStore.readGlobalFile(bai, os.path.join(work_dir, 'sorted.bai'))
# Call: picardtools
command = ['MarkDuplicates',
'INPUT=sorted.bam',
'OUTPUT=mkdups.bam',
'METRICS_FILE=metrics.txt',
'ASSUME_SORTED=true',
'CREATE_INDEX=true',
'VALIDATION_STRINGENCY=%s' % validation_stringency.upper()]
# picard-tools container doesn't have JAVA_OPTS variable
# Set TMPDIR to /data to prevent writing temporary files to /tmp
docker_parameters = ['--rm',
'--log-driver', 'none',
'-e', 'JAVA_OPTIONS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory),
'-v', '{}:/data'.format(work_dir)]
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e',
dockerParameters=docker_parameters)
end_time = time.time()
_log_runtime(job, start_time, end_time, "Picard MarkDuplicates")
bam = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'mkdups.bam'))
bai = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'mkdups.bai'))
return bam, bai |
def run_picard_sort(job, bam, sort_by_name=False):
"""
Sorts BAM file using Picard SortSam
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param boolean sort_by_name: If true, sorts by read name instead of coordinate.
:return: FileStoreID for sorted BAM file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'input.bam'))
command = ['SortSam',
'O=/data/output.bam',
'I=/data/input.bam']
# picard-tools container doesn't have JAVA_OPTS variable
# Set TMPDIR to /data to prevent writing temporary files to /tmp
docker_parameters = ['--rm',
'--log-driver', 'none',
'-e', 'JAVA_OPTIONS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory),
'-v', '{}:/data'.format(work_dir)]
if sort_by_name:
command.append('SO=queryname')
else:
command.append('SO=coordinate')
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e',
dockerParameters=docker_parameters)
end_time = time.time()
_log_runtime(job, start_time, end_time, "Picard SortSam")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.bam')) |
def run_gatk_preprocessing(job, bam, bai, ref, ref_dict, fai, g1k, mills, dbsnp, realign=False, unsafe=False):
"""
GATK Preprocessing Pipeline
0: Mark duplicates
1: Create INDEL realignment intervals
2: Realign INDELs
3: Recalibrate base quality scores
4: Apply base score recalibration
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str ref: FileStoreID for reference genome fasta file
:param str ref_dict: FileStoreID for reference sequence dictionary file
:param str fai: FileStoreID for reference fasta index file
:param str g1k: FileStoreID for 1000 Genomes VCF file
:param str mills: FileStoreID for Mills VCF file
:param str dbsnp: FileStoreID for dbSNP VCF file
:param bool realign: If True, then runs GATK INDEL realignment"
:param bool unsafe: If True, runs GATK tools in UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreIDs for BAM and BAI files
:rtype: tuple(str, str)
"""
# The MarkDuplicates disk requirement depends on the input BAM and BAI files and the output
# BAM and BAI files. The output BAM file is approximately the same size as the input BAM file.
mdups_disk = PromisedRequirement(lambda bam_, bai_: 2 * (bam_.size + bai_.size), bam, bai)
mdups = job.wrapJobFn(picard_mark_duplicates,
bam,
bai,
cores=job.cores,
disk=mdups_disk,
memory=job.memory)
# Store input for BQSR
bqsr_input_bam = mdups.rv(0)
bqsr_input_bai = mdups.rv(1)
# Get genome reference file sizes for calculating disk requirements
genome_ref_size = ref.size + ref_dict.size + fai.size
if realign:
# Get INDEL resource file sizes and genome reference file sizes
indel_ref_size = mills.size + g1k.size + genome_ref_size
# The RealignerTargetCreator disk requirement depends on the input BAM/BAI files, the genome reference files,
# and the output intervals file. The intervals file size is less than the reference file size, so estimate the
# interval file size as the reference file size.
realigner_target_disk = PromisedRequirement(lambda bam_, bai_, ref_size:
bam_.size + bai_.size + 2 * ref_size,
mdups.rv(0),
mdups.rv(1),
indel_ref_size)
realigner_target = job.wrapJobFn(run_realigner_target_creator,
mdups.rv(0),
mdups.rv(1),
ref, ref_dict, fai,
g1k, mills,
unsafe=unsafe,
cores=1, # RealignerTargetCreator is single threaded
disk=realigner_target_disk,
memory=job.memory)
# The INDEL realignment disk requirement depends on the input BAM and BAI files, the intervals
# file, the variant resource files, and the output BAM and BAI files. Here, we assume the
# output BAM and BAI files are approximately the same size as the input BAM and BAI files.
indel_realign_disk = PromisedRequirement(lambda bam_, bai_, intervals, ref_size:
2 * (bam_.size + bai_.size) + intervals.size + ref_size,
mdups.rv(0),
mdups.rv(1),
realigner_target.rv(),
indel_ref_size)
indel_realign = job.wrapJobFn(run_indel_realignment,
realigner_target.rv(),
mdups.rv(0),
mdups.rv(1),
ref, ref_dict, fai,
g1k, mills,
unsafe=unsafe,
cores=1, # IndelRealigner is single threaded
disk=indel_realign_disk,
memory=job.memory)
mdups.addChild(realigner_target)
realigner_target.addChild(indel_realign)
# Update input for BQSR using the realigned BAM files
bqsr_input_bam = indel_realign.rv(0)
bqsr_input_bai = indel_realign.rv(1)
# Get size of BQSR databases and genome reference files
bqsr_ref_size = dbsnp.size + mills.size + genome_ref_size
# The BQSR disk requirement depends on the input BAM and BAI files, the reference files, and the output
# recalibration table file. The recalibration table file size is less than the reference file sizes, so use
# the reference file sizes to estimate the recalibration table file size.
base_recal_disk = PromisedRequirement(lambda bam_, bai_, ref_size:
bam_.size + bai_.size + 2 * ref_size,
bqsr_input_bam,
bqsr_input_bai,
bqsr_ref_size)
base_recal = job.wrapJobFn(run_base_recalibration,
bqsr_input_bam,
bqsr_input_bai,
ref, ref_dict, fai,
dbsnp, mills,
unsafe=unsafe,
cores=job.cores,
disk=base_recal_disk,
memory=job.memory)
# The PrintReads disk requirement depends on the input BAM and BAI files, the recalibration table file, the
# genome reference files, and the output BAM and BAI files. The output BAM and BAI files are approximately the
# same size as the input BAM and BAI files.
recalibrate_reads_disk = PromisedRequirement(lambda bam_, bai_, recal, ref_size:
2 * (bam_.size + bai_.size) + recal.size + ref_size,
bqsr_input_bam,
bqsr_input_bai,
base_recal.rv(),
genome_ref_size)
recalibrate_reads = job.wrapJobFn(apply_bqsr_recalibration,
base_recal.rv(),
bqsr_input_bam,
bqsr_input_bai,
ref, ref_dict, fai,
unsafe=unsafe,
cores=job.cores,
disk=recalibrate_reads_disk,
memory=job.memory)
job.addChild(mdups)
mdups.addFollowOn(base_recal)
base_recal.addChild(recalibrate_reads)
return recalibrate_reads.rv(0), recalibrate_reads.rv(1) |
def run_base_recalibration(job, bam, bai, ref, ref_dict, fai, dbsnp, mills, unsafe=False):
"""
Creates recalibration table for Base Quality Score Recalibration
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str ref: FileStoreID for reference genome fasta file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param str fai: FileStoreID for reference genome fasta index file
:param str dbsnp: FileStoreID for dbSNP VCF file
:param str mills: FileStoreID for Mills VCF file
:param bool unsafe: If True, runs GATK in UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for the recalibration table file
:rtype: str
"""
inputs = {'ref.fasta': ref,
'ref.fasta.fai': fai,
'ref.dict': ref_dict,
'input.bam': bam,
'input.bai': bai,
'dbsnp.vcf': dbsnp,
'mills.vcf': mills}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Call: GATK -- BaseRecalibrator
parameters = ['-T', 'BaseRecalibrator',
'-nct', str(int(job.cores)),
'-R', '/data/ref.fasta',
'-I', '/data/input.bam',
# Recommended known sites:
# https://software.broadinstitute.org/gatk/guide/article?id=1247
'-knownSites', '/data/dbsnp.vcf',
'-knownSites', '/data/mills.vcf',
'-o', '/data/recal_data.table']
if unsafe:
parameters.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'])
# Set TMPDIR to /data to prevent writing temporary files to /tmp
docker_parameters = ['--rm',
'--log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory),
'-v', '{}:/data'.format(work_dir)]
start_time = time.time()
dockerCall(job=job, tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
workDir=work_dir,
parameters=parameters,
dockerParameters=docker_parameters)
end_time = time.time()
_log_runtime(job, start_time, end_time, "GATK3 BaseRecalibrator")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'recal_data.table')) |
def run_kallisto(job, r1_id, r2_id, kallisto_index_url):
"""
RNA quantification via Kallisto
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq (pair 1)
:param str r2_id: FileStoreID of fastq (pair 2 if applicable, otherwise pass None for single-end)
:param str kallisto_index_url: FileStoreID for Kallisto index file
:return: FileStoreID from Kallisto output
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
download_url(job, url=kallisto_index_url, name='kallisto_hg38.idx', work_dir=work_dir)
# Retrieve files
parameters = ['quant',
'-i', '/data/kallisto_hg38.idx',
'-t', str(job.cores),
'-o', '/data/',
'-b', '100',
'--fusion']
if r1_id and r2_id:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['/data/R1.fastq', '/data/R2.fastq'])
else:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters.extend(['--single', '-l', '200', '-s', '15', '/data/R1.fastq'])
# Call: Kallisto
dockerCall(job=job, tool='quay.io/ucsc_cgl/kallisto:0.42.4--35ac87df5b21a8e8e8d159f26864ac1e1db8cf86',
workDir=work_dir, parameters=parameters)
# Tar output files together and store in fileStore
output_files = [os.path.join(work_dir, x) for x in ['run_info.json', 'abundance.tsv', 'abundance.h5', 'fusion.txt']]
tarball_files(tar_name='kallisto.tar.gz', file_paths=output_files, output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'kallisto.tar.gz')) |
def run_rsem(job, bam_id, rsem_ref_url, paired=True):
"""
RNA quantification with RSEM
:param JobFunctionWrappingJob job: Passed automatically by Toil
:param str bam_id: FileStoreID of transcriptome bam for quantification
:param str rsem_ref_url: URL of RSEM reference (tarball)
:param bool paired: If True, uses parameters for paired end data
:return: FileStoreIDs for RSEM's gene and isoform output
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
download_url(job, url=rsem_ref_url, name='rsem_ref.tar.gz', work_dir=work_dir)
subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'rsem_ref.tar.gz'), '-C', work_dir])
os.remove(os.path.join(work_dir, 'rsem_ref.tar.gz'))
# Determine tarball structure - based on it, ascertain folder name and rsem reference prefix
rsem_files = []
for root, directories, files in os.walk(work_dir):
rsem_files.extend([os.path.join(root, x) for x in files])
# "grp" is a required RSEM extension that should exist in the RSEM reference
ref_prefix = [os.path.basename(os.path.splitext(x)[0]) for x in rsem_files if 'grp' in x][0]
ref_folder = os.path.join('/data', os.listdir(work_dir)[0]) if len(os.listdir(work_dir)) == 1 else '/data'
# I/O
job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'transcriptome.bam'))
output_prefix = 'rsem'
# Call: RSEM
parameters = ['--quiet',
'--no-qualities',
'-p', str(job.cores),
'--forward-prob', '0.5',
'--seed-length', '25',
'--fragment-length-mean', '-1.0',
'--bam', '/data/transcriptome.bam',
os.path.join(ref_folder, ref_prefix),
output_prefix]
if paired:
parameters = ['--paired-end'] + parameters
dockerCall(job=job, tool='quay.io/ucsc_cgl/rsem:1.2.25--d4275175cc8df36967db460b06337a14f40d2f21',
parameters=parameters, workDir=work_dir)
# Write to FileStore
gene_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, output_prefix + '.genes.results'))
isoform_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, output_prefix + '.isoforms.results'))
return gene_id, isoform_id |
def run_rsem_postprocess(job, rsem_gene_id, rsem_isoform_id):
"""
Parses RSEMs output to produce the separate .tab files (TPM, FPKM, counts) for both gene and isoform.
These are two-column files: Genes and Quantifications.
HUGO files are also provided that have been mapped from Gencode/ENSEMBLE names.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str rsem_gene_id: FileStoreID of rsem_gene_ids
:param str rsem_isoform_id: FileStoreID of rsem_isoform_ids
:return: FileStoreID from RSEM post process tarball
:rytpe: str
"""
work_dir = job.fileStore.getLocalTempDir()
# I/O
genes = job.fileStore.readGlobalFile(rsem_gene_id, os.path.join(work_dir, 'rsem_genes.results'))
iso = job.fileStore.readGlobalFile(rsem_isoform_id, os.path.join(work_dir, 'rsem_isoforms.results'))
# Perform HUGO gene / isoform name mapping
command = ['-g', 'rsem_genes.results', '-i', 'rsem_isoforms.results']
dockerCall(job=job, tool='quay.io/ucsc_cgl/gencode_hugo_mapping:1.0--cb4865d02f9199462e66410f515c4dabbd061e4d',
parameters=command, workDir=work_dir)
hugo_files = [os.path.join(work_dir, x) for x in ['rsem_genes.hugo.results', 'rsem_isoforms.hugo.results']]
# Create tarballs for outputs
tarball_files('rsem.tar.gz', file_paths=[os.path.join(work_dir, x) for x in [genes, iso]], output_dir=work_dir)
tarball_files('rsem_hugo.tar.gz', file_paths=[os.path.join(work_dir, x) for x in hugo_files], output_dir=work_dir)
rsem_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem.tar.gz'))
hugo_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_hugo.tar.gz'))
return rsem_id, hugo_id |
def switch(request, url):
"""
Set/clear boolean field value for model object
"""
app_label, model_name, object_id, field = url.split('/')
try:
# django >= 1.7
from django.apps import apps
model = apps.get_model(app_label, model_name)
except ImportError:
# django < 1.7
from django.db.models import get_model
model = get_model(app_label, model_name)
object = get_object_or_404(model, pk=object_id)
perm_str = '%s.change_%s' % (app_label, model.__name__)
# check only model
if not request.user.has_perm(perm_str.lower()):
raise PermissionDenied
setattr(object, field, getattr(object, field) == 0)
object.save()
if request.is_ajax():
return JsonResponse({'object_id': object.pk, 'field': field, 'value': getattr(object, field)})
else:
msg = _(u'flag %(field)s was changed for %(object)s') % {'field': field, 'object': object}
messages.success(request, msg)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/')) |
def fit(
self,
df,
similarity_type="jaccard",
time_decay_coefficient=30,
time_now=None,
timedecay_formula=False,
threshold=1,
):
"""Main fit method for SAR. Expects the dataframes to have row_id, col_id columns which are indexes,
i.e. contain the sequential integer index of the original alphanumeric user and item IDs.
Dataframe also contains rating and timestamp as floats; timestamp is in seconds since Epoch by default.
Arguments:
df (pySpark.DataFrame): input dataframe which contains the index of users and items. """
# threshold - items below this number get set to zero in coocurrence counts
assert threshold > 0
df.createOrReplaceTempView("{prefix}df_train_input".format(**self.header))
if timedecay_formula:
# WARNING: previously we would take the last value in training dataframe and set it
# as a matrix U element
# for each user-item pair. Now with time decay, we compute a sum over ratings given
# by a user in the case
# when T=np.inf, so user gets a cumulative sum of ratings for a particular item and
# not the last rating.
# Time Decay
# do a group by on user item pairs and apply the formula for time decay there
# Time T parameter is in days and input time is in seconds
# so we do dt/60/(T*24*60)=dt/(T*24*3600)
# the folling is the query which we want to run
query = self.f(
"""
SELECT
{col_user}, {col_item},
SUM({col_rating} * EXP(-log(2) * (latest_timestamp - CAST({col_timestamp} AS long)) / ({time_decay_coefficient} * 3600 * 24))) as {col_rating}
FROM {prefix}df_train_input,
(SELECT CAST(MAX({col_timestamp}) AS long) latest_timestamp FROM {prefix}df_train_input)
GROUP BY {col_user}, {col_item}
CLUSTER BY {col_user}
""",
time_now=time_now,
time_decay_coefficient=time_decay_coefficient,
)
# replace with timedecayed version
df = self.spark.sql(query)
else:
# since SQL is case insensitive, this check needs to be performed similar
if self.header['col_timestamp'].lower() in [s.name.lower() for s in df.schema]:
# we need to de-duplicate items by using the latest item
query = self.f(
"""
SELECT {col_user}, {col_item}, {col_rating}
FROM
(
SELECT
{col_user}, {col_item}, {col_rating},
ROW_NUMBER() OVER (PARTITION BY {col_user}, {col_item} ORDER BY {col_timestamp} DESC) latest
FROM {prefix}df_train_input
)
WHERE latest = 1
"""
)
df = self.spark.sql(query)
df.createOrReplaceTempView(self.f("{prefix}df_train"))
log.info("sarplus.fit 1/2: compute item cooccurences...")
# compute cooccurrence above minimum threshold
query = self.f(
"""
SELECT A.{col_item} i1, B.{col_item} i2, COUNT(*) value
FROM {prefix}df_train A INNER JOIN {prefix}df_train B
ON A.{col_user} = B.{col_user} AND A.{col_item} <= b.{col_item}
GROUP BY A.{col_item}, B.{col_item}
HAVING COUNT(*) >= {threshold}
CLUSTER BY i1, i2
""",
threshold=threshold,
)
item_cooccurrence = self.spark.sql(query)
item_cooccurrence.write.mode("overwrite").saveAsTable(
self.f("{prefix}item_cooccurrence")
)
# compute the diagonal used later for Jaccard and Lift
if similarity_type == SIM_LIFT or similarity_type == SIM_JACCARD:
item_marginal = self.spark.sql(
self.f(
"SELECT i1 i, value AS margin FROM {prefix}item_cooccurrence WHERE i1 = i2"
)
)
item_marginal.createOrReplaceTempView(self.f("{prefix}item_marginal"))
if similarity_type == SIM_COOCCUR:
self.item_similarity = item_cooccurrence
elif similarity_type == SIM_JACCARD:
query = self.f(
"""
SELECT i1, i2, value / (M1.margin + M2.margin - value) AS value
FROM {prefix}item_cooccurrence A
INNER JOIN {prefix}item_marginal M1 ON A.i1 = M1.i
INNER JOIN {prefix}item_marginal M2 ON A.i2 = M2.i
CLUSTER BY i1, i2
"""
)
self.item_similarity = self.spark.sql(query)
elif similarity_type == SIM_LIFT:
query = self.f(
"""
SELECT i1, i2, value / (M1.margin * M2.margin) AS value
FROM {prefix}item_cooccurrence A
INNER JOIN {prefix}item_marginal M1 ON A.i1 = M1.i
INNER JOIN {prefix}item_marginal M2 ON A.i2 = M2.i
CLUSTER BY i1, i2
"""
)
self.item_similarity = self.spark.sql(query)
else:
raise ValueError("Unknown similarity type: {0}".format(similarity_type))
# store upper triangular
log.info("sarplus.fit 2/2: compute similiarity metric %s..." % similarity_type)
self.item_similarity.write.mode("overwrite").saveAsTable(
self.f("{prefix}item_similarity_upper")
)
# expand upper triangular to full matrix
query = self.f(
"""
SELECT i1, i2, value
FROM
(
(SELECT i1, i2, value FROM {prefix}item_similarity_upper)
UNION ALL
(SELECT i2 i1, i1 i2, value FROM {prefix}item_similarity_upper WHERE i1 <> i2)
)
CLUSTER BY i1
"""
)
self.item_similarity = self.spark.sql(query)
self.item_similarity.write.mode("overwrite").saveAsTable(
self.f("{prefix}item_similarity")
)
# free space
self.spark.sql(self.f("DROP TABLE {prefix}item_cooccurrence"))
self.spark.sql(self.f("DROP TABLE {prefix}item_similarity_upper"))
self.item_similarity = self.spark.table(self.f("{prefix}item_similarity")) |
def get_user_affinity(self, test):
"""Prepare test set for C++ SAR prediction code.
Find all items the test users have seen in the past.
Arguments:
test (pySpark.DataFrame): input dataframe which contains test users.
"""
test.createOrReplaceTempView(self.f("{prefix}df_test"))
query = self.f(
"SELECT DISTINCT {col_user} FROM {prefix}df_test CLUSTER BY {col_user}"
)
df_test_users = self.spark.sql(query)
df_test_users.write.mode("overwrite").saveAsTable(
self.f("{prefix}df_test_users")
)
query = self.f(
"""
SELECT a.{col_user}, a.{col_item}, CAST(a.{col_rating} AS double) {col_rating}
FROM {prefix}df_train a INNER JOIN {prefix}df_test_users b ON a.{col_user} = b.{col_user}
DISTRIBUTE BY {col_user}
SORT BY {col_user}, {col_item}
"""
)
return self.spark.sql(query) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.