source_code
stringlengths
45
7.24k
function_name
stringlengths
1
111
messages
list
def x(self): for event in sys.stdin: if event.startswith('['): continue name = json.loads(event.lstrip(','))['name'] for obj in self.loader.objects: if obj.output_options['name'] == name: obj.on_click(json.loads(event.lstrip(',')))
handle_events
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n for event in sys.stdin:\n if event.startswith('['):\n continue\n name = json.loads(event.lstrip(','))['name']\n for obj in self.loader.objects:\n if obj.output_options['name'] == name:\n obj.on_click(json.loads(event.lstrip(',')))\n", "role": "user" }, { "content": "handle_events", "role": "assistant" } ]
def x(self, model): if self.field_filter: return dict([(f.name, f) for f in model._meta.fields if self. field_filter(f)]) else: return dict([(f.name, f) for f in model._meta.fields if not f.rel and not f.primary_key and not f.unique and not isinstance(f, ( models.AutoField, models.TextField))])
field_dict
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, model):\n if self.field_filter:\n return dict([(f.name, f) for f in model._meta.fields if self.\n field_filter(f)])\n else:\n return dict([(f.name, f) for f in model._meta.fields if not f.rel and\n not f.primary_key and not f.unique and not isinstance(f, (\n models.AutoField, models.TextField))])\n", "role": "user" }, { "content": "field_dict", "role": "assistant" } ]
def x(self): self.running = True while self.running: ret = self.func() self.output_dict[ret['name']] = ret time.sleep(self.interval) return
run
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n self.running = True\n while self.running:\n ret = self.func()\n self.output_dict[ret['name']] = ret\n time.sleep(self.interval)\n return\n", "role": "user" }, { "content": "run", "role": "assistant" } ]
def x(self, func, interval): t = Thread(func, interval, self.output_dict) t.start() self._thread_pool.append(t)
add_thread
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, func, interval):\n t = Thread(func, interval, self.output_dict)\n t.start()\n self._thread_pool.append(t)\n", "role": "user" }, { "content": "add_thread", "role": "assistant" } ]
def x(self): for f in glob.glob(os.path.join(self.dir_path, '*.py')): if not os.path.isfile(os.path.join(self.dir_path, f + 'c')): compileall.compile_dir(self.dir_path, quiet=True) logging.debug('Compiled plugins as a new plugin has been added.') return elif os.path.getmtime(os.path.join(self.dir_path, f) ) > os.path.getmtime(os.path.join(self.dir_path, f + 'c')): compileall.compile_dir(self.dir_path, quiet=True) logging.debug('Compiled plugins as a plugin has been changed.') return
compile_files
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n for f in glob.glob(os.path.join(self.dir_path, '*.py')):\n if not os.path.isfile(os.path.join(self.dir_path, f + 'c')):\n compileall.compile_dir(self.dir_path, quiet=True)\n logging.debug('Compiled plugins as a new plugin has been added.')\n return\n elif os.path.getmtime(os.path.join(self.dir_path, f)\n ) > os.path.getmtime(os.path.join(self.dir_path, f + 'c')):\n compileall.compile_dir(self.dir_path, quiet=True)\n logging.debug('Compiled plugins as a plugin has been changed.')\n return\n", "role": "user" }, { "content": "compile_files", "role": "assistant" } ]
def x(self, file_path): name = os.path.splitext(os.path.split(file_path)[-1])[0] plugin_directory = os.sep.join(os.path.split(file_path)[0:-1]) compiled_directory = os.path.join(plugin_directory, '__pycache__') compiled_file = glob.glob(os.path.join(compiled_directory, name + '.*'))[0] plugin = imp.load_compiled(name, compiled_file) return plugin
load_compiled
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, file_path):\n name = os.path.splitext(os.path.split(file_path)[-1])[0]\n plugin_directory = os.sep.join(os.path.split(file_path)[0:-1])\n compiled_directory = os.path.join(plugin_directory, '__pycache__')\n compiled_file = glob.glob(os.path.join(compiled_directory, name + '.*'))[0]\n plugin = imp.load_compiled(name, compiled_file)\n return plugin\n", "role": "user" }, { "content": "load_compiled", "role": "assistant" } ]
def x(self): objects = [] for settings in self._config: if settings['plugin'] in self.plugins: module = self.plugins[settings['plugin']] plugin_class = getattr(module, module.__all__) objects.append(plugin_class(settings)) logging.debug('Loaded a plugin object based upon {0}'.format( settings['plugin'])) else: logging.critical('Missing plugin {0} was not found in {1}'. format(settings['plugin'], self.dir_path)) raise MissingPlugin('The plugin {0} was not found in {1}'. format(settings['plugin'], self.dir_path)) return objects
load_objects
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n objects = []\n for settings in self._config:\n if settings['plugin'] in self.plugins:\n module = self.plugins[settings['plugin']]\n plugin_class = getattr(module, module.__all__)\n objects.append(plugin_class(settings))\n logging.debug('Loaded a plugin object based upon {0}'.format(\n settings['plugin']))\n else:\n logging.critical('Missing plugin {0} was not found in {1}'.\n format(settings['plugin'], self.dir_path))\n raise MissingPlugin('The plugin {0} was not found in {1}'.\n format(settings['plugin'], self.dir_path))\n return objects\n", "role": "user" }, { "content": "load_objects", "role": "assistant" } ]
def x(self): plugins = {} _plugin_files = glob.glob(os.path.join(self.dir_path, '[!_]*.pyc')) for f in glob.glob(os.path.join(self.dir_path, '[!_]*.py')): if not any(os.path.splitext(f)[0] == os.path.splitext(x)[0] for x in _plugin_files): logging.debug('Adding plugin {0}'.format(f)) _plugin_files.append(f) for f in _plugin_files: plugin = self._load_compiled(f) plugins[plugin.__name__] = plugin logging.debug('Loaded module object for plugin: {0}'.format(f)) return plugins
refresh_files
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n plugins = {}\n _plugin_files = glob.glob(os.path.join(self.dir_path, '[!_]*.pyc'))\n for f in glob.glob(os.path.join(self.dir_path, '[!_]*.py')):\n if not any(os.path.splitext(f)[0] == os.path.splitext(x)[0] for x in\n _plugin_files):\n logging.debug('Adding plugin {0}'.format(f))\n _plugin_files.append(f)\n for f in _plugin_files:\n plugin = self._load_compiled(f)\n plugins[plugin.__name__] = plugin\n logging.debug('Loaded module object for plugin: {0}'.format(f))\n return plugins\n", "role": "user" }, { "content": "refresh_files", "role": "assistant" } ]
def x(self, request, url): if url is None: return self.main_view(request) try: plugin_name, rest_of_url = url.split('/', 1) except ValueError: plugin_name, rest_of_url = url, None try: plugin = self.plugins[plugin_name] except KeyError: raise http.Http404('A plugin with the requested name does not exist.') return plugin.model_view(request, self, rest_of_url)
root
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, request, url):\n if url is None:\n return self.main_view(request)\n try:\n plugin_name, rest_of_url = url.split('/', 1)\n except ValueError:\n plugin_name, rest_of_url = url, None\n try:\n plugin = self.plugins[plugin_name]\n except KeyError:\n raise http.Http404('A plugin with the requested name does not exist.')\n return plugin.model_view(request, self, rest_of_url)\n", "role": "user" }, { "content": "root", "role": "assistant" } ]
def x(self, *model_list, **options): databrowse_class = options.pop('databrowse_class', DefaultModelDatabrowse) for model in model_list: if model in self.registry: raise AlreadyRegistered('The model %s is already registered' % model.__name__) self.registry[model] = databrowse_class
register
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, *model_list, **options):\n databrowse_class = options.pop('databrowse_class', DefaultModelDatabrowse)\n for model in model_list:\n if model in self.registry:\n raise AlreadyRegistered('The model %s is already registered' %\n model.__name__)\n self.registry[model] = databrowse_class\n", "role": "user" }, { "content": "register", "role": "assistant" } ]
def x(self, *model_list): for model in model_list: if model not in self.registry: raise NotRegistered('The model %s is not registered' % model. __name__) del self.registry[model]
unregister
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, *model_list):\n for model in model_list:\n if model not in self.registry:\n raise NotRegistered('The model %s is not registered' % model.\n __name__)\n del self.registry[model]\n", "role": "user" }, { "content": "unregister", "role": "assistant" } ]
def x(self, request, url): self.root_url = request.path[:len(request.path) - len(url)] url = url.rstrip('/') if url == '': return self.index(request) elif '/' in url: return self.model_page(request, *url.split('/', 2)) raise http.Http404('The requested databrowse page does not exist.')
root
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, request, url):\n self.root_url = request.path[:len(request.path) - len(url)]\n url = url.rstrip('/')\n if url == '':\n return self.index(request)\n elif '/' in url:\n return self.model_page(request, *url.split('/', 2))\n raise http.Http404('The requested databrowse page does not exist.')\n", "role": "user" }, { "content": "root", "role": "assistant" } ]
def x(self, request, app_label, model_name, rest_of_url=None): try: model = get_model(app_label, model_name) except LookupError: model = None if model is None: raise http.Http404('App %r, model %r, not found.' % (app_label, model_name)) try: databrowse_class = self.registry[model] except KeyError: raise http.Http404( 'This model exists but has not been registered with databrowse.') return databrowse_class(model, self).root(request, rest_of_url)
model_page
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, request, app_label, model_name, rest_of_url=None):\n try:\n model = get_model(app_label, model_name)\n except LookupError:\n model = None\n if model is None:\n raise http.Http404('App %r, model %r, not found.' % (app_label,\n model_name))\n try:\n databrowse_class = self.registry[model]\n except KeyError:\n raise http.Http404(\n 'This model exists but has not been registered with databrowse.')\n return databrowse_class(model, self).root(request, rest_of_url)\n", "role": "user" }, { "content": "model_page", "role": "assistant" } ]
def x(self): if self.field.rel: if isinstance(self.field.rel, models.ManyToOneRel): objs = getattr(self.instance.instance, self.field.name) elif isinstance(self.field.rel, models.ManyToManyRel): return list(getattr(self.instance.instance, self.field.name).all()) elif self.field.choices: objs = dict(self.field.choices).get(self.raw_value, EMPTY_VALUE) elif isinstance(self.field, models.DateField) or isinstance(self.field, models.TimeField): if self.raw_value: if isinstance(self.field, models.DateTimeField): objs = capfirst(formats.date_format(self.raw_value, 'DATETIME_FORMAT')) elif isinstance(self.field, models.TimeField): objs = capfirst(formats.time_format(self.raw_value, 'TIME_FORMAT')) else: objs = capfirst(formats.date_format(self.raw_value, 'DATE_FORMAT')) else: objs = EMPTY_VALUE elif isinstance(self.field, models.BooleanField) or isinstance(self. field, models.NullBooleanField): objs = {(True): 'Yes', (False): 'No', None: 'Unknown'}[self.raw_value] else: objs = self.raw_value return [objs]
values
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n if self.field.rel:\n if isinstance(self.field.rel, models.ManyToOneRel):\n objs = getattr(self.instance.instance, self.field.name)\n elif isinstance(self.field.rel, models.ManyToManyRel):\n return list(getattr(self.instance.instance, self.field.name).all())\n elif self.field.choices:\n objs = dict(self.field.choices).get(self.raw_value, EMPTY_VALUE)\n elif isinstance(self.field, models.DateField) or isinstance(self.field,\n models.TimeField):\n if self.raw_value:\n if isinstance(self.field, models.DateTimeField):\n objs = capfirst(formats.date_format(self.raw_value,\n 'DATETIME_FORMAT'))\n elif isinstance(self.field, models.TimeField):\n objs = capfirst(formats.time_format(self.raw_value,\n 'TIME_FORMAT'))\n else:\n objs = capfirst(formats.date_format(self.raw_value,\n 'DATE_FORMAT'))\n else:\n objs = EMPTY_VALUE\n elif isinstance(self.field, models.BooleanField) or isinstance(self.\n field, models.NullBooleanField):\n objs = {(True): 'Yes', (False): 'No', None: 'Unknown'}[self.raw_value]\n else:\n objs = self.raw_value\n return [objs]\n", "role": "user" }, { "content": "values", "role": "assistant" } ]
def x(self): plugin_urls = [] for plugin_name, plugin in self.model.model_databrowse().plugins.items(): urls = plugin.urls(plugin_name, self) if urls is not None: values = self.values() return zip(self.values(), urls) if self.field.rel: m = EasyModel(self.model.site, self.field.rel.to) if self.field.rel.to in self.model.model_list: lst = [] for value in self.values(): if value is None: continue url = mark_safe('%s%s/%s/objects/%s/' % (self.model.site. root_url, m.model._meta.app_label, m.model._meta. model_name, iri_to_uri(value._get_pk_val()))) lst.append((smart_text(value), url)) else: lst = [(value, None) for value in self.values()] elif self.field.choices: lst = [] for value in self.values(): url = mark_safe('%s%s/%s/fields/%s/%s/' % (self.model.site. root_url, self.model.model._meta.app_label, self.model. model._meta.model_name, self.field.name, iri_to_uri(self. raw_value))) lst.append((value, url)) elif isinstance(self.field, models.URLField): val = self.values()[0] lst = [(val, iri_to_uri(val))] else: lst = [(self.values()[0], None)] return lst
urls
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n plugin_urls = []\n for plugin_name, plugin in self.model.model_databrowse().plugins.items():\n urls = plugin.urls(plugin_name, self)\n if urls is not None:\n values = self.values()\n return zip(self.values(), urls)\n if self.field.rel:\n m = EasyModel(self.model.site, self.field.rel.to)\n if self.field.rel.to in self.model.model_list:\n lst = []\n for value in self.values():\n if value is None:\n continue\n url = mark_safe('%s%s/%s/objects/%s/' % (self.model.site.\n root_url, m.model._meta.app_label, m.model._meta.\n model_name, iri_to_uri(value._get_pk_val())))\n lst.append((smart_text(value), url))\n else:\n lst = [(value, None) for value in self.values()]\n elif self.field.choices:\n lst = []\n for value in self.values():\n url = mark_safe('%s%s/%s/fields/%s/%s/' % (self.model.site.\n root_url, self.model.model._meta.app_label, self.model.\n model._meta.model_name, self.field.name, iri_to_uri(self.\n raw_value)))\n lst.append((value, url))\n elif isinstance(self.field, models.URLField):\n val = self.values()[0]\n lst = [(val, iri_to_uri(val))]\n else:\n lst = [(self.values()[0], None)]\n return lst\n", "role": "user" }, { "content": "urls", "role": "assistant" } ]
def x(self, model): if self.field_names is None: return dict([(f.name, f) for f in model._meta.fields if isinstance( f, models.DateField)]) else: return dict([(f.name, f) for f in model._meta.fields if isinstance( f, models.DateField) and f.name in self.field_names])
field_dict
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, model):\n if self.field_names is None:\n return dict([(f.name, f) for f in model._meta.fields if isinstance(\n f, models.DateField)])\n else:\n return dict([(f.name, f) for f in model._meta.fields if isinstance(\n f, models.DateField) and f.name in self.field_names])\n", "role": "user" }, { "content": "field_dict", "role": "assistant" } ]
def x(job, input_file, output_path, kmer_length, spark_conf, workers, cores, memory, sudo): require(spark_conf is not None and workers is None or workers is not None and cores is not None and memory is not None and spark_conf is not None, 'Either worker count (--workers) must be defined or user must pass in Spark configuration (--spark-conf).' ) if spark_conf is None: master_hostname = spawn_spark_cluster(job, sudo, workers, cores) else: spark_conf = shlex.split(spark_conf) job.addChildJobFn(download_count_upload, masterHostname, input_file, output_file, kmer_length, spark_conf, memory, sudo)
kmer_dag
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, input_file, output_path, kmer_length, spark_conf, workers, cores,\n memory, sudo):\n require(spark_conf is not None and workers is None or workers is not\n None and cores is not None and memory is not None and spark_conf is not\n None,\n 'Either worker count (--workers) must be defined or user must pass in Spark configuration (--spark-conf).'\n )\n if spark_conf is None:\n master_hostname = spawn_spark_cluster(job, sudo, workers, cores)\n else:\n spark_conf = shlex.split(spark_conf)\n job.addChildJobFn(download_count_upload, masterHostname, input_file,\n output_file, kmer_length, spark_conf, memory, sudo)\n", "role": "user" }, { "content": "kmer_dag", "role": "assistant" } ]
def x(job, master_ip, input_file, output_file, kmer_length, spark_conf, memory, sudo): if master_ip is not None: hdfs_dir = 'hdfs://{0}:{1}/'.format(master_ip, HDFS_MASTER_PORT) else: _log.warn( 'Master IP is not set. If default filesystem is not set, jobs may fail.' ) hdfs_dir = '' hdfs_input_file = hdfs_dir if input_file.startswith('s3://'): hdfs_input_file += input_file.split('/')[-1] _log.info('Downloading input file %s to %s.', input_file, hdfs_input_file) call_conductor(job, master_ip, input_file, hdfs_input_file, memory= memory, override_parameters=spark_conf) elif not input_file.startswith('hdfs://'): _log.warn('If not in S3, input file (%s) expected to be in HDFS (%s).', input_file, hdfs_dir) run_upload = True hdfs_output_file = hdfs_dir + 'kmer_output.txt' if output_file.startswith(hdfs_dir): run_upload = False hdfs_output_file = output_file if hdfs_input_file.endswith('.bam') or hdfs_input_file.endswith('.sam' ) or hdfs_input_file.endswith('.fq') or hdfs_input_file.endswith( '.fastq'): hdfs_tmp_file = hdfs_input_file hdfs_input_file = '.'.join(hdfs_input_file.split('.')[:-1].append( 'adam')) _log.info('Converting %s into ADAM format at %s.', hdfs_tmp_file, hdfs_input_file) call_adam(job, master_ip, ['transform', hdfs_tmp_file, hdfs_input_file], memory=memory, override_parameters=spark_conf) _log.info('Counting %d-mers in %s, and saving to %s.', kmer_length, hdfs_input_file, hdfs_output_file) call_adam(job, master_ip, ['count_kmers', hdfs_input_file, hdfs_output_file, str(kmer_length)], memory=memory, override_parameters=spark_conf) if run_upload: _log.info('Uploading output file %s to %s.', hdfs_output_file, output_file) call_conductor(job, master_ip, hdfs_output_file, output_file, memory=memory, override_parameters=spark_conf)
download_count_upload
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, master_ip, input_file, output_file, kmer_length, spark_conf,\n memory, sudo):\n if master_ip is not None:\n hdfs_dir = 'hdfs://{0}:{1}/'.format(master_ip, HDFS_MASTER_PORT)\n else:\n _log.warn(\n 'Master IP is not set. If default filesystem is not set, jobs may fail.'\n )\n hdfs_dir = ''\n hdfs_input_file = hdfs_dir\n if input_file.startswith('s3://'):\n hdfs_input_file += input_file.split('/')[-1]\n _log.info('Downloading input file %s to %s.', input_file,\n hdfs_input_file)\n call_conductor(job, master_ip, input_file, hdfs_input_file, memory=\n memory, override_parameters=spark_conf)\n elif not input_file.startswith('hdfs://'):\n _log.warn('If not in S3, input file (%s) expected to be in HDFS (%s).',\n input_file, hdfs_dir)\n run_upload = True\n hdfs_output_file = hdfs_dir + 'kmer_output.txt'\n if output_file.startswith(hdfs_dir):\n run_upload = False\n hdfs_output_file = output_file\n if hdfs_input_file.endswith('.bam') or hdfs_input_file.endswith('.sam'\n ) or hdfs_input_file.endswith('.fq') or hdfs_input_file.endswith(\n '.fastq'):\n hdfs_tmp_file = hdfs_input_file\n hdfs_input_file = '.'.join(hdfs_input_file.split('.')[:-1].append(\n 'adam'))\n _log.info('Converting %s into ADAM format at %s.', hdfs_tmp_file,\n hdfs_input_file)\n call_adam(job, master_ip, ['transform', hdfs_tmp_file,\n hdfs_input_file], memory=memory, override_parameters=spark_conf)\n _log.info('Counting %d-mers in %s, and saving to %s.', kmer_length,\n hdfs_input_file, hdfs_output_file)\n call_adam(job, master_ip, ['count_kmers', hdfs_input_file,\n hdfs_output_file, str(kmer_length)], memory=memory,\n override_parameters=spark_conf)\n if run_upload:\n _log.info('Uploading output file %s to %s.', hdfs_output_file,\n output_file)\n call_conductor(job, master_ip, hdfs_output_file, output_file,\n memory=memory, override_parameters=spark_conf)\n", "role": "user" }, { "content": "download_count_upload", "role": "assistant" } ]
def x(): parser = argparse.ArgumentParser() parser.add_argument('--input_path', help= 'The full path to the input SAM/BAM/ADAM/FASTQ file.') parser.add_argument('--output-path', help= 'full path where final results will be output.') parser.add_argument('--kmer-length', help= 'Length to use for k-mer counting. Defaults to 20.', default=20, type=int) parser.add_argument('--spark-conf', help= 'Optional configuration to pass to Spark commands. Either this or --workers must be specified.' , default=None) parser.add_argument('--memory', help= 'Optional memory configuration for Spark workers/driver. This must be specified if --workers is specified.' , default=None, type=int) parser.add_argument('--cores', help= 'Optional core configuration for Spark workers/driver. This must be specified if --workers is specified.' , default=None, type=int) parser.add_argument('--workers', help= 'Number of workers to spin up in Toil. Either this or --spark-conf must be specified. If this is specified, --memory and --cores must be specified.' , default=None, type=int) parser.add_argument('--sudo', help= 'Run docker containers with sudo. Defaults to False.', default= False, action='store_true') Job.Runner.addToilOptions(parser) args = parser.parse_args() Job.Runner.startToil(Job.wrapJobFn(kmer_dag, args.kmer_length, args. input_path, args.output_path, args.spark_conf, args.workers, args. cores, args.memory, args.sudo, checkpoint=True), args)
main
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x():\n parser = argparse.ArgumentParser()\n parser.add_argument('--input_path', help=\n 'The full path to the input SAM/BAM/ADAM/FASTQ file.')\n parser.add_argument('--output-path', help=\n 'full path where final results will be output.')\n parser.add_argument('--kmer-length', help=\n 'Length to use for k-mer counting. Defaults to 20.', default=20,\n type=int)\n parser.add_argument('--spark-conf', help=\n 'Optional configuration to pass to Spark commands. Either this or --workers must be specified.'\n , default=None)\n parser.add_argument('--memory', help=\n 'Optional memory configuration for Spark workers/driver. This must be specified if --workers is specified.'\n , default=None, type=int)\n parser.add_argument('--cores', help=\n 'Optional core configuration for Spark workers/driver. This must be specified if --workers is specified.'\n , default=None, type=int)\n parser.add_argument('--workers', help=\n 'Number of workers to spin up in Toil. Either this or --spark-conf must be specified. If this is specified, --memory and --cores must be specified.'\n , default=None, type=int)\n parser.add_argument('--sudo', help=\n 'Run docker containers with sudo. Defaults to False.', default=\n False, action='store_true')\n Job.Runner.addToilOptions(parser)\n args = parser.parse_args()\n Job.Runner.startToil(Job.wrapJobFn(kmer_dag, args.kmer_length, args.\n input_path, args.output_path, args.spark_conf, args.workers, args.\n cores, args.memory, args.sudo, checkpoint=True), args)\n", "role": "user" }, { "content": "main", "role": "assistant" } ]
def x(job, samples, config): work_dir = job.fileStore.getLocalTempDir() st = os.statvfs(work_dir) config.available_disk = st.f_bavail * st.f_frsize num_samples = len(samples) if config.joint_genotype and not 30 < num_samples < 200: job.fileStore.logToMaster( 'WARNING: GATK recommends batches of 30 to 200 samples for joint genotyping. The current cohort has %d samples.' % num_samples) shared_files = Job.wrapJobFn(download_shared_files, config).encapsulate() job.addChild(shared_files) if config.preprocess_only: for sample in samples: shared_files.addChildJobFn(prepare_bam, sample.uuid, sample.url, shared_files.rv(), paired_url=sample.paired_url, rg_line= sample.rg_line) else: run_pipeline = Job.wrapJobFn(gatk_germline_pipeline, samples, shared_files.rv()).encapsulate() shared_files.addChild(run_pipeline) if config.run_oncotator: annotate = Job.wrapJobFn(annotate_vcfs, run_pipeline.rv(), shared_files.rv()) run_pipeline.addChild(annotate)
run_gatk_germline_pipeline
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, samples, config):\n work_dir = job.fileStore.getLocalTempDir()\n st = os.statvfs(work_dir)\n config.available_disk = st.f_bavail * st.f_frsize\n num_samples = len(samples)\n if config.joint_genotype and not 30 < num_samples < 200:\n job.fileStore.logToMaster(\n 'WARNING: GATK recommends batches of 30 to 200 samples for joint genotyping. The current cohort has %d samples.'\n % num_samples)\n shared_files = Job.wrapJobFn(download_shared_files, config).encapsulate()\n job.addChild(shared_files)\n if config.preprocess_only:\n for sample in samples:\n shared_files.addChildJobFn(prepare_bam, sample.uuid, sample.url,\n shared_files.rv(), paired_url=sample.paired_url, rg_line=\n sample.rg_line)\n else:\n run_pipeline = Job.wrapJobFn(gatk_germline_pipeline, samples,\n shared_files.rv()).encapsulate()\n shared_files.addChild(run_pipeline)\n if config.run_oncotator:\n annotate = Job.wrapJobFn(annotate_vcfs, run_pipeline.rv(),\n shared_files.rv())\n run_pipeline.addChild(annotate)\n", "role": "user" }, { "content": "run_gatk_germline_pipeline", "role": "assistant" } ]
def x(job, samples, config): require(len(samples) > 0, 'No samples were provided!') genome_ref_size = (config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size) group_bam_jobs = Job() gvcfs = {} for sample in samples: get_bam = group_bam_jobs.addChildJobFn(prepare_bam, sample.uuid, sample.url, config, paired_url=sample.paired_url, rg_line= sample.rg_line) hc_disk = PromisedRequirement(lambda bam, bai, ref_size: 2 * bam. size + bai.size + ref_size, get_bam.rv(0), get_bam.rv(1), genome_ref_size) get_gvcf = get_bam.addFollowOnJobFn(gatk_haplotype_caller, get_bam. rv(0), get_bam.rv(1), config.genome_fasta, config.genome_fai, config.genome_dict, annotations=config.annotations, cores= config.cores, disk=hc_disk, memory=config.xmx, hc_output=config .hc_output) gvcfs[sample.uuid] = get_gvcf.rv() vqsr_name = '{}{}.g.vcf'.format(sample.uuid, config.suffix) get_gvcf.addChildJobFn(output_file_job, vqsr_name, get_gvcf.rv(), os.path.join(config.output_dir, sample.uuid), s3_key_path= config.ssec, disk=PromisedRequirement(lambda x: x.size, get_gvcf.rv())) filtered_vcfs = {} if config.joint_genotype: filtered_vcfs = group_bam_jobs.addFollowOnJobFn( joint_genotype_and_filter, gvcfs, config).rv() else: for uuid, gvcf_id in gvcfs.iteritems(): filtered_vcfs[uuid] = group_bam_jobs.addFollowOnJobFn( genotype_and_filter, {uuid: gvcf_id}, config).rv() job.addChild(group_bam_jobs) return filtered_vcfs
gatk_germline_pipeline
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, samples, config):\n require(len(samples) > 0, 'No samples were provided!')\n genome_ref_size = (config.genome_fasta.size + config.genome_fai.size +\n config.genome_dict.size)\n group_bam_jobs = Job()\n gvcfs = {}\n for sample in samples:\n get_bam = group_bam_jobs.addChildJobFn(prepare_bam, sample.uuid,\n sample.url, config, paired_url=sample.paired_url, rg_line=\n sample.rg_line)\n hc_disk = PromisedRequirement(lambda bam, bai, ref_size: 2 * bam.\n size + bai.size + ref_size, get_bam.rv(0), get_bam.rv(1),\n genome_ref_size)\n get_gvcf = get_bam.addFollowOnJobFn(gatk_haplotype_caller, get_bam.\n rv(0), get_bam.rv(1), config.genome_fasta, config.genome_fai,\n config.genome_dict, annotations=config.annotations, cores=\n config.cores, disk=hc_disk, memory=config.xmx, hc_output=config\n .hc_output)\n gvcfs[sample.uuid] = get_gvcf.rv()\n vqsr_name = '{}{}.g.vcf'.format(sample.uuid, config.suffix)\n get_gvcf.addChildJobFn(output_file_job, vqsr_name, get_gvcf.rv(),\n os.path.join(config.output_dir, sample.uuid), s3_key_path=\n config.ssec, disk=PromisedRequirement(lambda x: x.size,\n get_gvcf.rv()))\n filtered_vcfs = {}\n if config.joint_genotype:\n filtered_vcfs = group_bam_jobs.addFollowOnJobFn(\n joint_genotype_and_filter, gvcfs, config).rv()\n else:\n for uuid, gvcf_id in gvcfs.iteritems():\n filtered_vcfs[uuid] = group_bam_jobs.addFollowOnJobFn(\n genotype_and_filter, {uuid: gvcf_id}, config).rv()\n job.addChild(group_bam_jobs)\n return filtered_vcfs\n", "role": "user" }, { "content": "gatk_germline_pipeline", "role": "assistant" } ]
def x(job, gvcfs, config): genome_ref_size = (config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size) cohort_size = sum(gvcf.size for gvcf in gvcfs.values()) require(int(2.5 * cohort_size + genome_ref_size) < config. available_disk, """There is not enough disk space to joint genotype samples: {}""". format('\n'.join(gvcfs.keys()))) job.fileStore.logToMaster('Merging cohort into a single GVCF file') return job.addChildJobFn(genotype_and_filter, gvcfs, config).rv()
joint_genotype_and_filter
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, gvcfs, config):\n genome_ref_size = (config.genome_fasta.size + config.genome_fai.size +\n config.genome_dict.size)\n cohort_size = sum(gvcf.size for gvcf in gvcfs.values())\n require(int(2.5 * cohort_size + genome_ref_size) < config.\n available_disk,\n \"\"\"There is not enough disk space to joint genotype samples:\n{}\"\"\".\n format('\\n'.join(gvcfs.keys())))\n job.fileStore.logToMaster('Merging cohort into a single GVCF file')\n return job.addChildJobFn(genotype_and_filter, gvcfs, config).rv()\n", "role": "user" }, { "content": "joint_genotype_and_filter", "role": "assistant" } ]
def x(job, gvcfs, config): genome_ref_size = (config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size) genotype_gvcf_disk = PromisedRequirement(lambda gvcf_ids, ref_size: 2 * sum(gvcf_.size for gvcf_ in gvcf_ids) + ref_size, gvcfs.values(), genome_ref_size) genotype_gvcf = job.addChildJobFn(gatk_genotype_gvcfs, gvcfs, config. genome_fasta, config.genome_fai, config.genome_dict, annotations= config.annotations, unsafe_mode=config.unsafe_mode, cores=config. cores, disk=genotype_gvcf_disk, memory=config.xmx) if len(gvcfs) == 1: uuid = gvcfs.keys()[0] else: uuid = 'joint_genotyped' genotyped_filename = '%s.genotyped%s.vcf' % (uuid, config.suffix) genotype_gvcf.addChildJobFn(output_file_job, genotyped_filename, genotype_gvcf.rv(), os.path.join(config.output_dir, uuid), s3_key_path=config.ssec, disk=PromisedRequirement(lambda x: x.size, genotype_gvcf.rv())) if config.run_vqsr: if not config.joint_genotype: job.fileStore.logToMaster( 'WARNING: Running VQSR without joint genotyping.') joint_genotype_vcf = genotype_gvcf.addFollowOnJobFn(vqsr_pipeline, uuid, genotype_gvcf.rv(), config) else: joint_genotype_vcf = genotype_gvcf.addFollowOnJobFn( hard_filter_pipeline, uuid, genotype_gvcf.rv(), config) return joint_genotype_vcf.rv()
genotype_and_filter
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, gvcfs, config):\n genome_ref_size = (config.genome_fasta.size + config.genome_fai.size +\n config.genome_dict.size)\n genotype_gvcf_disk = PromisedRequirement(lambda gvcf_ids, ref_size: 2 *\n sum(gvcf_.size for gvcf_ in gvcf_ids) + ref_size, gvcfs.values(),\n genome_ref_size)\n genotype_gvcf = job.addChildJobFn(gatk_genotype_gvcfs, gvcfs, config.\n genome_fasta, config.genome_fai, config.genome_dict, annotations=\n config.annotations, unsafe_mode=config.unsafe_mode, cores=config.\n cores, disk=genotype_gvcf_disk, memory=config.xmx)\n if len(gvcfs) == 1:\n uuid = gvcfs.keys()[0]\n else:\n uuid = 'joint_genotyped'\n genotyped_filename = '%s.genotyped%s.vcf' % (uuid, config.suffix)\n genotype_gvcf.addChildJobFn(output_file_job, genotyped_filename,\n genotype_gvcf.rv(), os.path.join(config.output_dir, uuid),\n s3_key_path=config.ssec, disk=PromisedRequirement(lambda x: x.size,\n genotype_gvcf.rv()))\n if config.run_vqsr:\n if not config.joint_genotype:\n job.fileStore.logToMaster(\n 'WARNING: Running VQSR without joint genotyping.')\n joint_genotype_vcf = genotype_gvcf.addFollowOnJobFn(vqsr_pipeline,\n uuid, genotype_gvcf.rv(), config)\n else:\n joint_genotype_vcf = genotype_gvcf.addFollowOnJobFn(\n hard_filter_pipeline, uuid, genotype_gvcf.rv(), config)\n return joint_genotype_vcf.rv()\n", "role": "user" }, { "content": "genotype_and_filter", "role": "assistant" } ]
def x(job, vcfs, config): job.fileStore.logToMaster( 'Running Oncotator on the following samples:\n%s' % '\n'.join(vcfs. keys())) for uuid, vcf_id in vcfs.iteritems(): onco_disk = PromisedRequirement(lambda vcf, db: 3 * vcf.size + db. size, vcf_id, config.oncotator_db) annotated_vcf = job.addChildJobFn(run_oncotator, vcf_id, config. oncotator_db, disk=onco_disk, cores=config.cores, memory=config.xmx ) output_dir = os.path.join(config.output_dir, uuid) filename = '{}.oncotator{}.vcf'.format(uuid, config.suffix) annotated_vcf.addChildJobFn(output_file_job, filename, annotated_vcf.rv(), output_dir, s3_key_path=config.ssec, disk= PromisedRequirement(lambda x: x.size, annotated_vcf.rv()))
annotate_vcfs
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, vcfs, config):\n job.fileStore.logToMaster(\n 'Running Oncotator on the following samples:\\n%s' % '\\n'.join(vcfs.\n keys()))\n for uuid, vcf_id in vcfs.iteritems():\n onco_disk = PromisedRequirement(lambda vcf, db: 3 * vcf.size + db.\n size, vcf_id, config.oncotator_db)\n annotated_vcf = job.addChildJobFn(run_oncotator, vcf_id, config.\n oncotator_db, disk=onco_disk, cores=config.cores, memory=config.xmx\n )\n output_dir = os.path.join(config.output_dir, uuid)\n filename = '{}.oncotator{}.vcf'.format(uuid, config.suffix)\n annotated_vcf.addChildJobFn(output_file_job, filename,\n annotated_vcf.rv(), output_dir, s3_key_path=config.ssec, disk=\n PromisedRequirement(lambda x: x.size, annotated_vcf.rv()))\n", "role": "user" }, { "content": "annotate_vcfs", "role": "assistant" } ]
def x(path_to_manifest): bam_re = '^(?P<uuid>\\S+)\\s(?P<url>\\S+[bsc][r]?am)' fq_re = ( '^(?P<uuid>\\S+)\\s(?P<url>\\S+)\\s(?P<paired_url>\\S+)?\\s?(?P<rg_line>@RG\\S+)' ) samples = [] with open(path_to_manifest, 'r') as f: for line in f.readlines(): line = line.strip() if line.startswith('#'): continue bam_match = re.match(bam_re, line) fastq_match = re.match(fq_re, line) if bam_match: uuid = bam_match.group('uuid') url = bam_match.group('url') paired_url = None rg_line = None require('.bam' in url.lower(), 'Expected .bam extension:\n{}:\t{}'.format(uuid, url)) elif fastq_match: uuid = fastq_match.group('uuid') url = fastq_match.group('url') paired_url = fastq_match.group('paired_url') rg_line = fastq_match.group('rg_line') require('.fq' in url.lower() or '.fastq' in url.lower(), """Expected .fq extension: {}: {}""".format(uuid, url)) else: raise ValueError( 'Could not parse entry in manifest: %s\n%s' % (f.name, line)) require(urlparse(url).scheme, 'Invalid URL passed for {}'. format(url)) samples.append(GermlineSample(uuid, url, paired_url, rg_line)) return samples
parse_manifest
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(path_to_manifest):\n bam_re = '^(?P<uuid>\\\\S+)\\\\s(?P<url>\\\\S+[bsc][r]?am)'\n fq_re = (\n '^(?P<uuid>\\\\S+)\\\\s(?P<url>\\\\S+)\\\\s(?P<paired_url>\\\\S+)?\\\\s?(?P<rg_line>@RG\\\\S+)'\n )\n samples = []\n with open(path_to_manifest, 'r') as f:\n for line in f.readlines():\n line = line.strip()\n if line.startswith('#'):\n continue\n bam_match = re.match(bam_re, line)\n fastq_match = re.match(fq_re, line)\n if bam_match:\n uuid = bam_match.group('uuid')\n url = bam_match.group('url')\n paired_url = None\n rg_line = None\n require('.bam' in url.lower(),\n 'Expected .bam extension:\\n{}:\\t{}'.format(uuid, url))\n elif fastq_match:\n uuid = fastq_match.group('uuid')\n url = fastq_match.group('url')\n paired_url = fastq_match.group('paired_url')\n rg_line = fastq_match.group('rg_line')\n require('.fq' in url.lower() or '.fastq' in url.lower(),\n \"\"\"Expected .fq extension:\n{}:\t{}\"\"\".format(uuid, url))\n else:\n raise ValueError(\n 'Could not parse entry in manifest: %s\\n%s' % (f.name,\n line))\n require(urlparse(url).scheme, 'Invalid URL passed for {}'.\n format(url))\n samples.append(GermlineSample(uuid, url, paired_url, rg_line))\n return samples\n", "role": "user" }, { "content": "parse_manifest", "role": "assistant" } ]
def x(job, config): job.fileStore.logToMaster('Downloading shared reference files') shared_files = {'genome_fasta', 'genome_fai', 'genome_dict'} nonessential_files = {'genome_fai', 'genome_dict'} if config.run_bwa: shared_files |= {'amb', 'ann', 'bwt', 'pac', 'sa', 'alt'} nonessential_files.add('alt') if config.preprocess: shared_files |= {'g1k_indel', 'mills', 'dbsnp'} if config.run_vqsr: shared_files |= {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'} if config.run_oncotator: shared_files.add('oncotator_db') for name in shared_files: try: url = getattr(config, name, None) if url is None: continue setattr(config, name, job.addChildJobFn(download_url_job, url, name=name, s3_key_path=config.ssec, disk='15G').rv()) finally: if getattr(config, name, None ) is None and name not in nonessential_files: raise ValueError( 'Necessary configuration parameter is missing:\n{}'. format(name)) return job.addFollowOnJobFn(reference_preprocessing, config).rv()
download_shared_files
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, config):\n job.fileStore.logToMaster('Downloading shared reference files')\n shared_files = {'genome_fasta', 'genome_fai', 'genome_dict'}\n nonessential_files = {'genome_fai', 'genome_dict'}\n if config.run_bwa:\n shared_files |= {'amb', 'ann', 'bwt', 'pac', 'sa', 'alt'}\n nonessential_files.add('alt')\n if config.preprocess:\n shared_files |= {'g1k_indel', 'mills', 'dbsnp'}\n if config.run_vqsr:\n shared_files |= {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'}\n if config.run_oncotator:\n shared_files.add('oncotator_db')\n for name in shared_files:\n try:\n url = getattr(config, name, None)\n if url is None:\n continue\n setattr(config, name, job.addChildJobFn(download_url_job, url,\n name=name, s3_key_path=config.ssec, disk='15G').rv())\n finally:\n if getattr(config, name, None\n ) is None and name not in nonessential_files:\n raise ValueError(\n 'Necessary configuration parameter is missing:\\n{}'.\n format(name))\n return job.addFollowOnJobFn(reference_preprocessing, config).rv()\n", "role": "user" }, { "content": "download_shared_files", "role": "assistant" } ]
def x(job, config): job.fileStore.logToMaster('Preparing Reference Files') genome_id = config.genome_fasta if getattr(config, 'genome_fai', None) is None: config.genome_fai = job.addChildJobFn(run_samtools_faidx, genome_id, cores=config.cores).rv() if getattr(config, 'genome_dict', None) is None: config.genome_dict = job.addChildJobFn( run_picard_create_sequence_dictionary, genome_id, cores=config. cores, memory=config.xmx).rv() return config
reference_preprocessing
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, config):\n job.fileStore.logToMaster('Preparing Reference Files')\n genome_id = config.genome_fasta\n if getattr(config, 'genome_fai', None) is None:\n config.genome_fai = job.addChildJobFn(run_samtools_faidx, genome_id,\n cores=config.cores).rv()\n if getattr(config, 'genome_dict', None) is None:\n config.genome_dict = job.addChildJobFn(\n run_picard_create_sequence_dictionary, genome_id, cores=config.\n cores, memory=config.xmx).rv()\n return config\n", "role": "user" }, { "content": "reference_preprocessing", "role": "assistant" } ]
def x(job, uuid, url, config, paired_url=None, rg_line=None): if config.run_bwa: get_bam = job.wrapJobFn(setup_and_run_bwakit, uuid, url, rg_line, config, paired_url=paired_url).encapsulate() elif '.bam' in url.lower(): job.fileStore.logToMaster('Downloading BAM: %s' % uuid) get_bam = job.wrapJobFn(download_url_job, url, name='toil.bam', s3_key_path=config.ssec, disk=config.file_size).encapsulate() else: raise ValueError( """Could not generate BAM file for %s Provide a FASTQ URL and set run-bwa or provide a BAM URL that includes .bam extension.""" % uuid) if config.sorted and not config.run_bwa: sorted_bam = get_bam else: sorted_bam_disk = PromisedRequirement(lambda bam: 3 * bam.size, get_bam.rv()) sorted_bam = get_bam.addChildJobFn(run_samtools_sort, get_bam.rv(), cores=config.cores, disk=sorted_bam_disk) index_bam_disk = PromisedRequirement(lambda bam: bam.size, sorted_bam.rv()) index_bam = job.wrapJobFn(run_samtools_index, sorted_bam.rv(), disk= index_bam_disk) job.addChild(get_bam) sorted_bam.addChild(index_bam) if config.preprocess: preprocess = job.wrapJobFn(run_gatk_preprocessing, sorted_bam.rv(), index_bam.rv(), config.genome_fasta, config.genome_dict, config .genome_fai, config.g1k_indel, config.mills, config.dbsnp, memory=config.xmx, cores=config.cores).encapsulate() sorted_bam.addChild(preprocess) index_bam.addChild(preprocess) output_bam_promise = preprocess.rv(0) output_bai_promise = preprocess.rv(1) output_dir = os.path.join(config.output_dir, uuid) filename = '{}.preprocessed{}.bam'.format(uuid, config.suffix) output_bam = job.wrapJobFn(output_file_job, filename, preprocess.rv (0), output_dir, s3_key_path=config.ssec) preprocess.addChild(output_bam) else: output_bam_promise = sorted_bam.rv() output_bai_promise = index_bam.rv() return output_bam_promise, output_bai_promise
prepare_bam
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, uuid, url, config, paired_url=None, rg_line=None):\n if config.run_bwa:\n get_bam = job.wrapJobFn(setup_and_run_bwakit, uuid, url, rg_line,\n config, paired_url=paired_url).encapsulate()\n elif '.bam' in url.lower():\n job.fileStore.logToMaster('Downloading BAM: %s' % uuid)\n get_bam = job.wrapJobFn(download_url_job, url, name='toil.bam',\n s3_key_path=config.ssec, disk=config.file_size).encapsulate()\n else:\n raise ValueError(\n \"\"\"Could not generate BAM file for %s\nProvide a FASTQ URL and set run-bwa or provide a BAM URL that includes .bam extension.\"\"\"\n % uuid)\n if config.sorted and not config.run_bwa:\n sorted_bam = get_bam\n else:\n sorted_bam_disk = PromisedRequirement(lambda bam: 3 * bam.size,\n get_bam.rv())\n sorted_bam = get_bam.addChildJobFn(run_samtools_sort, get_bam.rv(),\n cores=config.cores, disk=sorted_bam_disk)\n index_bam_disk = PromisedRequirement(lambda bam: bam.size, sorted_bam.rv())\n index_bam = job.wrapJobFn(run_samtools_index, sorted_bam.rv(), disk=\n index_bam_disk)\n job.addChild(get_bam)\n sorted_bam.addChild(index_bam)\n if config.preprocess:\n preprocess = job.wrapJobFn(run_gatk_preprocessing, sorted_bam.rv(),\n index_bam.rv(), config.genome_fasta, config.genome_dict, config\n .genome_fai, config.g1k_indel, config.mills, config.dbsnp,\n memory=config.xmx, cores=config.cores).encapsulate()\n sorted_bam.addChild(preprocess)\n index_bam.addChild(preprocess)\n output_bam_promise = preprocess.rv(0)\n output_bai_promise = preprocess.rv(1)\n output_dir = os.path.join(config.output_dir, uuid)\n filename = '{}.preprocessed{}.bam'.format(uuid, config.suffix)\n output_bam = job.wrapJobFn(output_file_job, filename, preprocess.rv\n (0), output_dir, s3_key_path=config.ssec)\n preprocess.addChild(output_bam)\n else:\n output_bam_promise = sorted_bam.rv()\n output_bai_promise = index_bam.rv()\n return output_bam_promise, output_bai_promise\n", "role": "user" }, { "content": "prepare_bam", "role": "assistant" } ]
def x(job, uuid, url, rg_line, config, paired_url=None): bwa_config = deepcopy(config) bwa_config.uuid = uuid bwa_config.rg_line = rg_line bwa_config.ref = config.genome_fasta bwa_config.fai = config.genome_fai basename, ext = os.path.splitext(url) ext = ext.lower() if ext == '.gz': _, ext = os.path.splitext(basename) ext = ext.lower() require(ext in ['.fq', '.fastq', '.bam'], 'Please use .fq or .bam file extensions:\n%s' % url) samples = [] input1 = job.addChildJobFn(download_url_job, url, name='file1', s3_key_path=config.ssec, disk=config.file_size) samples.append(input1.rv()) if ext == '.bam': bwa_config.bam = input1.rv() else: bwa_config.r1 = input1.rv() if paired_url: input2 = job.addChildJobFn(download_url_job, paired_url, name= 'file2', s3_key_path=config.ssec, disk=config.file_size) samples.append(input2.rv()) bwa_config.r2 = input2.rv() bwa_index_size = sum([getattr(config, index_file).size for index_file in ['amb', 'ann', 'bwt', 'pac', 'sa', 'alt'] if getattr(config, index_file, None) is not None]) bwakit_disk = PromisedRequirement(lambda lst, index_size: int(4 * sum(x .size for x in lst) + index_size), samples, bwa_index_size) return job.addFollowOnJobFn(run_bwakit, bwa_config, sort=False, trim= config.trim, cores=config.cores, disk=bwakit_disk).rv()
setup_and_run_bwakit
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, uuid, url, rg_line, config, paired_url=None):\n bwa_config = deepcopy(config)\n bwa_config.uuid = uuid\n bwa_config.rg_line = rg_line\n bwa_config.ref = config.genome_fasta\n bwa_config.fai = config.genome_fai\n basename, ext = os.path.splitext(url)\n ext = ext.lower()\n if ext == '.gz':\n _, ext = os.path.splitext(basename)\n ext = ext.lower()\n require(ext in ['.fq', '.fastq', '.bam'], \n 'Please use .fq or .bam file extensions:\\n%s' % url)\n samples = []\n input1 = job.addChildJobFn(download_url_job, url, name='file1',\n s3_key_path=config.ssec, disk=config.file_size)\n samples.append(input1.rv())\n if ext == '.bam':\n bwa_config.bam = input1.rv()\n else:\n bwa_config.r1 = input1.rv()\n if paired_url:\n input2 = job.addChildJobFn(download_url_job, paired_url, name=\n 'file2', s3_key_path=config.ssec, disk=config.file_size)\n samples.append(input2.rv())\n bwa_config.r2 = input2.rv()\n bwa_index_size = sum([getattr(config, index_file).size for index_file in\n ['amb', 'ann', 'bwt', 'pac', 'sa', 'alt'] if getattr(config,\n index_file, None) is not None])\n bwakit_disk = PromisedRequirement(lambda lst, index_size: int(4 * sum(x\n .size for x in lst) + index_size), samples, bwa_index_size)\n return job.addFollowOnJobFn(run_bwakit, bwa_config, sort=False, trim=\n config.trim, cores=config.cores, disk=bwakit_disk).rv()\n", "role": "user" }, { "content": "setup_and_run_bwakit", "role": "assistant" } ]
def x(job, bam, bai, ref, fai, ref_dict, annotations=None, emit_threshold= 10.0, call_threshold=30.0, unsafe_mode=False, hc_output=None): job.fileStore.logToMaster('Running GATK HaplotypeCaller') inputs = {'genome.fa': ref, 'genome.fa.fai': fai, 'genome.dict': ref_dict, 'input.bam': bam, 'input.bam.bai': bai} work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) command = ['-T', 'HaplotypeCaller', '-nct', str(job.cores), '-R', 'genome.fa', '-I', 'input.bam', '-o', 'output.g.vcf', '-stand_call_conf', str(call_threshold), '-stand_emit_conf', str( emit_threshold), '-variant_index_type', 'LINEAR', '-variant_index_parameter', '128000', '--genotyping_mode', 'Discovery', '--emitRefConfidence', 'GVCF'] if unsafe_mode: command = ['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'] + command if annotations: for annotation in annotations: command.extend(['-A', annotation]) outputs = {'output.g.vcf': hc_output} docker_call(job=job, work_dir=work_dir, env={'JAVA_OPTS': '-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)}, parameters= command, tool= 'quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2', inputs=inputs.keys(), outputs=outputs, mock=True if outputs[ 'output.g.vcf'] else False) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.g.vcf') )
gatk_haplotype_caller
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, bam, bai, ref, fai, ref_dict, annotations=None, emit_threshold=\n 10.0, call_threshold=30.0, unsafe_mode=False, hc_output=None):\n job.fileStore.logToMaster('Running GATK HaplotypeCaller')\n inputs = {'genome.fa': ref, 'genome.fa.fai': fai, 'genome.dict':\n ref_dict, 'input.bam': bam, 'input.bam.bai': bai}\n work_dir = job.fileStore.getLocalTempDir()\n for name, file_store_id in inputs.iteritems():\n job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir,\n name))\n command = ['-T', 'HaplotypeCaller', '-nct', str(job.cores), '-R',\n 'genome.fa', '-I', 'input.bam', '-o', 'output.g.vcf',\n '-stand_call_conf', str(call_threshold), '-stand_emit_conf', str(\n emit_threshold), '-variant_index_type', 'LINEAR',\n '-variant_index_parameter', '128000', '--genotyping_mode',\n 'Discovery', '--emitRefConfidence', 'GVCF']\n if unsafe_mode:\n command = ['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'] + command\n if annotations:\n for annotation in annotations:\n command.extend(['-A', annotation])\n outputs = {'output.g.vcf': hc_output}\n docker_call(job=job, work_dir=work_dir, env={'JAVA_OPTS':\n '-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)}, parameters=\n command, tool=\n 'quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',\n inputs=inputs.keys(), outputs=outputs, mock=True if outputs[\n 'output.g.vcf'] else False)\n return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.g.vcf')\n )\n", "role": "user" }, { "content": "gatk_haplotype_caller", "role": "assistant" } ]
def x(): parser = argparse.ArgumentParser(description=__doc__, formatter_class= argparse.RawTextHelpFormatter) subparsers = parser.add_subparsers(dest='command') subparsers.add_parser('generate-config', help= 'Generates an editable config in the current working directory.') subparsers.add_parser('generate-manifest', help= 'Generates an editable manifest in the current working directory.') subparsers.add_parser('generate', help= 'Generates a config and manifest in the current working directory.') parser_run = subparsers.add_parser('run', help= 'Runs the GATK germline pipeline') parser_run.add_argument('--config', required=True, type=str, help= 'Path to the (filled in) config file, generated with "generate-config".' ) parser_run.add_argument('--manifest', type=str, help= """Path to the (filled in) manifest file, generated with "generate-manifest". Default value: "%(default)s".""" ) parser_run.add_argument('--sample', default=None, nargs=2, type=str, help='Input sample identifier and BAM file URL or local path') parser_run.add_argument('--output-dir', default=None, help= 'Path/URL to output directory') parser_run.add_argument('-s', '--suffix', default=None, help= 'Additional suffix to add to the names of the output files') parser_run.add_argument('--preprocess-only', action='store_true', help= 'Only runs preprocessing steps') Job.Runner.addToilOptions(parser_run) options = parser.parse_args() cwd = os.getcwd() if options.command == 'generate-config' or options.command == 'generate': generate_file(os.path.join(cwd, 'config-toil-germline.yaml'), generate_config) if options.command == 'generate-manifest' or options.command == 'generate': generate_file(os.path.join(cwd, 'manifest-toil-germline.tsv'), generate_manifest) elif options.command == 'run': for program in ['curl', 'docker']: require(next(which(program)), program + ' must be installed on every node.'.format(program)) require(os.path.exists(options.config), '{} not found. Please run "generate-config"'.format(options.config) ) samples = [] if options.manifest: samples.extend(parse_manifest(options.manifest)) if options.sample: uuid, url = options.sample samples.append(GermlineSample(uuid, url, None, None)) require(len(samples) > 0, 'No samples were detected in the manifest or on the command line') inputs = {x.replace('-', '_'): y for x, y in yaml.load(open(options .config).read()).iteritems()} required_fields = {'genome_fasta', 'output_dir', 'run_bwa', 'sorted', 'snp_filter_annotations', 'indel_filter_annotations', 'preprocess', 'preprocess_only', 'run_vqsr', 'joint_genotype', 'run_oncotator', 'cores', 'file_size', 'xmx', 'suffix'} input_fields = set(inputs.keys()) require(input_fields > required_fields, 'Missing config parameters:\n{}'.format(', '.join( required_fields - input_fields))) if inputs['output_dir'] is None: inputs['output_dir'] = options.output_dir require(inputs['output_dir'] is not None, 'Missing output directory PATH/URL') if inputs['suffix'] is None: inputs['suffix'] = options.suffix if options.suffix else '' if inputs['preprocess_only'] is None: inputs['preprocess_only'] = options.preprocess_only if inputs['run_vqsr']: vqsr_fields = {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'} require(input_fields > vqsr_fields, 'Missing parameters for VQSR:\n{}'.format(', '.join( vqsr_fields - input_fields))) elif not inputs['preprocess_only']: hard_filter_fields = {'snp_filter_name', 'snp_filter_expression', 'indel_filter_name', 'indel_filter_expression'} require(input_fields > hard_filter_fields, 'Missing parameters for hard filtering:\n{}'.format(', '. join(hard_filter_fields - input_fields))) for hard_filter_field in hard_filter_fields: require(inputs[hard_filter_field], 'Missing %s value for hard filtering, got %s.' % ( hard_filter_field, inputs[hard_filter_field])) inputs['xmx'] = human2bytes(inputs['xmx']) inputs['file_size'] = human2bytes(inputs['file_size']) inputs['cores'] = int(inputs['cores']) inputs['annotations'] = set(inputs['snp_filter_annotations'] + inputs['indel_filter_annotations']) inputs['hc_output'] = inputs.get('hc_output', None) config = argparse.Namespace(**inputs) root = Job.wrapJobFn(run_gatk_germline_pipeline, samples, config) Job.Runner.startToil(root, options)
main
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x():\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=\n argparse.RawTextHelpFormatter)\n subparsers = parser.add_subparsers(dest='command')\n subparsers.add_parser('generate-config', help=\n 'Generates an editable config in the current working directory.')\n subparsers.add_parser('generate-manifest', help=\n 'Generates an editable manifest in the current working directory.')\n subparsers.add_parser('generate', help=\n 'Generates a config and manifest in the current working directory.')\n parser_run = subparsers.add_parser('run', help=\n 'Runs the GATK germline pipeline')\n parser_run.add_argument('--config', required=True, type=str, help=\n 'Path to the (filled in) config file, generated with \"generate-config\".'\n )\n parser_run.add_argument('--manifest', type=str, help=\n \"\"\"Path to the (filled in) manifest file, generated with \"generate-manifest\".\nDefault value: \"%(default)s\".\"\"\"\n )\n parser_run.add_argument('--sample', default=None, nargs=2, type=str,\n help='Input sample identifier and BAM file URL or local path')\n parser_run.add_argument('--output-dir', default=None, help=\n 'Path/URL to output directory')\n parser_run.add_argument('-s', '--suffix', default=None, help=\n 'Additional suffix to add to the names of the output files')\n parser_run.add_argument('--preprocess-only', action='store_true', help=\n 'Only runs preprocessing steps')\n Job.Runner.addToilOptions(parser_run)\n options = parser.parse_args()\n cwd = os.getcwd()\n if options.command == 'generate-config' or options.command == 'generate':\n generate_file(os.path.join(cwd, 'config-toil-germline.yaml'),\n generate_config)\n if options.command == 'generate-manifest' or options.command == 'generate':\n generate_file(os.path.join(cwd, 'manifest-toil-germline.tsv'),\n generate_manifest)\n elif options.command == 'run':\n for program in ['curl', 'docker']:\n require(next(which(program)), program +\n ' must be installed on every node.'.format(program))\n require(os.path.exists(options.config),\n '{} not found. Please run \"generate-config\"'.format(options.config)\n )\n samples = []\n if options.manifest:\n samples.extend(parse_manifest(options.manifest))\n if options.sample:\n uuid, url = options.sample\n samples.append(GermlineSample(uuid, url, None, None))\n require(len(samples) > 0,\n 'No samples were detected in the manifest or on the command line')\n inputs = {x.replace('-', '_'): y for x, y in yaml.load(open(options\n .config).read()).iteritems()}\n required_fields = {'genome_fasta', 'output_dir', 'run_bwa',\n 'sorted', 'snp_filter_annotations', 'indel_filter_annotations',\n 'preprocess', 'preprocess_only', 'run_vqsr', 'joint_genotype',\n 'run_oncotator', 'cores', 'file_size', 'xmx', 'suffix'}\n input_fields = set(inputs.keys())\n require(input_fields > required_fields,\n 'Missing config parameters:\\n{}'.format(', '.join(\n required_fields - input_fields)))\n if inputs['output_dir'] is None:\n inputs['output_dir'] = options.output_dir\n require(inputs['output_dir'] is not None,\n 'Missing output directory PATH/URL')\n if inputs['suffix'] is None:\n inputs['suffix'] = options.suffix if options.suffix else ''\n if inputs['preprocess_only'] is None:\n inputs['preprocess_only'] = options.preprocess_only\n if inputs['run_vqsr']:\n vqsr_fields = {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'}\n require(input_fields > vqsr_fields,\n 'Missing parameters for VQSR:\\n{}'.format(', '.join(\n vqsr_fields - input_fields)))\n elif not inputs['preprocess_only']:\n hard_filter_fields = {'snp_filter_name',\n 'snp_filter_expression', 'indel_filter_name',\n 'indel_filter_expression'}\n require(input_fields > hard_filter_fields,\n 'Missing parameters for hard filtering:\\n{}'.format(', '.\n join(hard_filter_fields - input_fields)))\n for hard_filter_field in hard_filter_fields:\n require(inputs[hard_filter_field], \n 'Missing %s value for hard filtering, got %s.' % (\n hard_filter_field, inputs[hard_filter_field]))\n inputs['xmx'] = human2bytes(inputs['xmx'])\n inputs['file_size'] = human2bytes(inputs['file_size'])\n inputs['cores'] = int(inputs['cores'])\n inputs['annotations'] = set(inputs['snp_filter_annotations'] +\n inputs['indel_filter_annotations'])\n inputs['hc_output'] = inputs.get('hc_output', None)\n config = argparse.Namespace(**inputs)\n root = Job.wrapJobFn(run_gatk_germline_pipeline, samples, config)\n Job.Runner.startToil(root, options)\n", "role": "user" }, { "content": "main", "role": "assistant" } ]
def x(job, uuid_list, inputs): for uuid_rg in uuid_list: uuid_items = uuid_rg.split(',') uuid = uuid_items[0] rg_line = None if len(uuid_items) > 1: rg_line = uuid_items[1] job.addChildJobFn(static_dag, uuid, rg_line, inputs)
sample_loop
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, uuid_list, inputs):\n for uuid_rg in uuid_list:\n uuid_items = uuid_rg.split(',')\n uuid = uuid_items[0]\n rg_line = None\n if len(uuid_items) > 1:\n rg_line = uuid_items[1]\n job.addChildJobFn(static_dag, uuid, rg_line, inputs)\n", "role": "user" }, { "content": "sample_loop", "role": "assistant" } ]
def x(job, uuid, rg_line, inputs): work_dir = job.fileStore.getLocalTempDir() inputs.cpu_count = cpu_count() inputs.maxCores = sys.maxint args = {'uuid': uuid, 's3_bucket': inputs.s3_bucket, 'sequence_dir': inputs.sequence_dir, 'dir_suffix': inputs.dir_suffix} inputs.rg_line = rg_line inputs.output_dir = 's3://{s3_bucket}/alignment{dir_suffix}'.format(**args) bwa = job.wrapJobFn(download_reference_files, inputs, [[uuid, [ 's3://{s3_bucket}/{sequence_dir}/{uuid}_1.fastq.gz'.format(**args), 's3://{s3_bucket}/{sequence_dir}/{uuid}_2.fastq.gz'.format(**args)]]] ).encapsulate() adam_preprocess = job.wrapJobFn(static_adam_preprocessing_dag, inputs, 's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args), 's3://{s3_bucket}/analysis{dir_suffix}/{uuid}'.format(**args), suffix='.adam').encapsulate() gatk_preprocessing_inputs = copy.deepcopy(inputs) gatk_preprocessing_inputs.suffix = '.gatk' gatk_preprocessing_inputs.preprocess = True gatk_preprocessing_inputs.preprocess_only = True gatk_preprocessing_inputs.output_dir = ( 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args)) gatk_preprocess = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args), None, None), gatk_preprocessing_inputs).encapsulate() adam_call_inputs = inputs adam_call_inputs.suffix = '.adam' adam_call_inputs.sorted = True adam_call_inputs.preprocess = False adam_call_inputs.run_vqsr = False adam_call_inputs.joint_genotype = False adam_call_inputs.output_dir = ('s3://{s3_bucket}/analysis{dir_suffix}'. format(**args)) gatk_adam_call = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 's3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.adam.bam'. format(**args), None, None), adam_call_inputs).encapsulate() gatk_call_inputs = copy.deepcopy(inputs) gatk_call_inputs.sorted = True gatk_call_inputs.preprocess = False gatk_call_inputs.run_vqsr = False gatk_call_inputs.joint_genotype = False gatk_call_inputs.output_dir = ('s3://{s3_bucket}/analysis{dir_suffix}'. format(**args)) gatk_gatk_call = job.wrapJobFn(run_gatk_germline_pipeline, GermlineSample(uuid, 'S3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.gatk.bam'. format(**args), None, None), gatk_call_inputs).encapsulate() if not inputs.skip_alignment: job.addChild(bwa) if inputs.pipeline_to_run == 'adam' or inputs.pipeline_to_run == 'both': if inputs.skip_preprocessing: job.addChild(gatk_adam_call) else: if inputs.skip_alignment: job.addChild(adam_preprocess) else: bwa.addChild(adam_preprocess) adam_preprocess.addChild(gatk_adam_call) if inputs.pipeline_to_run == 'gatk' or inputs.pipeline_to_run == 'both': if inputs.skip_preprocessing: job.addChild(gatk_gatk_call) else: if inputs.skip_alignment: job.addChild(gatk_preprocess) else: bwa.addChild(gatk_preprocess) gatk_preprocess.addChild(gatk_gatk_call)
static_dag
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, uuid, rg_line, inputs):\n work_dir = job.fileStore.getLocalTempDir()\n inputs.cpu_count = cpu_count()\n inputs.maxCores = sys.maxint\n args = {'uuid': uuid, 's3_bucket': inputs.s3_bucket, 'sequence_dir':\n inputs.sequence_dir, 'dir_suffix': inputs.dir_suffix}\n inputs.rg_line = rg_line\n inputs.output_dir = 's3://{s3_bucket}/alignment{dir_suffix}'.format(**args)\n bwa = job.wrapJobFn(download_reference_files, inputs, [[uuid, [\n 's3://{s3_bucket}/{sequence_dir}/{uuid}_1.fastq.gz'.format(**args),\n 's3://{s3_bucket}/{sequence_dir}/{uuid}_2.fastq.gz'.format(**args)]]]\n ).encapsulate()\n adam_preprocess = job.wrapJobFn(static_adam_preprocessing_dag, inputs,\n 's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args),\n 's3://{s3_bucket}/analysis{dir_suffix}/{uuid}'.format(**args),\n suffix='.adam').encapsulate()\n gatk_preprocessing_inputs = copy.deepcopy(inputs)\n gatk_preprocessing_inputs.suffix = '.gatk'\n gatk_preprocessing_inputs.preprocess = True\n gatk_preprocessing_inputs.preprocess_only = True\n gatk_preprocessing_inputs.output_dir = (\n 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args))\n gatk_preprocess = job.wrapJobFn(run_gatk_germline_pipeline,\n GermlineSample(uuid,\n 's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args),\n None, None), gatk_preprocessing_inputs).encapsulate()\n adam_call_inputs = inputs\n adam_call_inputs.suffix = '.adam'\n adam_call_inputs.sorted = True\n adam_call_inputs.preprocess = False\n adam_call_inputs.run_vqsr = False\n adam_call_inputs.joint_genotype = False\n adam_call_inputs.output_dir = ('s3://{s3_bucket}/analysis{dir_suffix}'.\n format(**args))\n gatk_adam_call = job.wrapJobFn(run_gatk_germline_pipeline,\n GermlineSample(uuid,\n 's3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.adam.bam'.\n format(**args), None, None), adam_call_inputs).encapsulate()\n gatk_call_inputs = copy.deepcopy(inputs)\n gatk_call_inputs.sorted = True\n gatk_call_inputs.preprocess = False\n gatk_call_inputs.run_vqsr = False\n gatk_call_inputs.joint_genotype = False\n gatk_call_inputs.output_dir = ('s3://{s3_bucket}/analysis{dir_suffix}'.\n format(**args))\n gatk_gatk_call = job.wrapJobFn(run_gatk_germline_pipeline,\n GermlineSample(uuid,\n 'S3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.gatk.bam'.\n format(**args), None, None), gatk_call_inputs).encapsulate()\n if not inputs.skip_alignment:\n job.addChild(bwa)\n if inputs.pipeline_to_run == 'adam' or inputs.pipeline_to_run == 'both':\n if inputs.skip_preprocessing:\n job.addChild(gatk_adam_call)\n else:\n if inputs.skip_alignment:\n job.addChild(adam_preprocess)\n else:\n bwa.addChild(adam_preprocess)\n adam_preprocess.addChild(gatk_adam_call)\n if inputs.pipeline_to_run == 'gatk' or inputs.pipeline_to_run == 'both':\n if inputs.skip_preprocessing:\n job.addChild(gatk_gatk_call)\n else:\n if inputs.skip_alignment:\n job.addChild(gatk_preprocess)\n else:\n bwa.addChild(gatk_preprocess)\n gatk_preprocess.addChild(gatk_gatk_call)\n", "role": "user" }, { "content": "static_dag", "role": "assistant" } ]
def x(): if mock_mode(): usage_msg = ( 'You have the TOIL_SCRIPTS_MOCK_MODE environment variable set, so this pipeline will run in mock mode. To disable mock mode, set TOIL_SCRIPTS_MOCK_MODE=0' ) else: usage_msg = None parser = argparse.ArgumentParser(usage=usage_msg) subparsers = parser.add_subparsers(dest='command') subparsers.add_parser('generate-config', help= 'Generates an editable config in the current working directory.') subparsers.add_parser('generate-manifest', help= 'Generates an editable manifest in the current working directory.') subparsers.add_parser('generate', help= 'Generates a config and manifest in the current working directory.') parser_run = subparsers.add_parser('run', help= 'Runs the ADAM/GATK pipeline') default_config = 'adam-gatk-mock.config' if mock_mode( ) else 'adam-gatk.config' default_manifest = 'adam-gatk-mock-manifest.csv' if mock_mode( ) else 'adam-gatk-manifest.csv' parser_run.add_argument('--config', default=default_config, type=str, help= 'Path to the (filled in) config file, generated with "generate-config".' ) parser_run.add_argument('--manifest', default=default_manifest, type= str, help= """Path to the (filled in) manifest file, generated with "generate-manifest". Default value: "%(default)s".""" ) Job.Runner.addToilOptions(parser_run) args = parser.parse_args() cwd = os.getcwd() if args.command == 'generate-config' or args.command == 'generate': generate_file(os.path.join(cwd, default_config), generate_config) if args.command == 'generate-manifest' or args.command == 'generate': generate_file(os.path.join(cwd, default_manifest), generate_manifest) elif args.command == 'run': require(os.path.exists(args.config), '{} not found. Please run generate-config'.format(args.config)) if not hasattr(args, 'sample'): require(os.path.exists(args.manifest), '{} not found and no samples provided. Please run "generate-manifest"' .format(args.manifest)) parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open( args.config).read()).iteritems()} inputs = argparse.Namespace(**parsed_config) uuid_list = [] with open(args.manifest) as f_manifest: for line in f_manifest: if not line.isspace() and not line.startswith('#'): uuid_list.append(line.strip()) inputs.sort = False if not inputs.dir_suffix: inputs.dir_suffix = '' if not inputs.s3_bucket: inputs.s3_bucket = '' if inputs.master_ip and inputs.num_nodes: raise ValueError( 'Exactly one of master_ip (%s) and num_nodes (%d) must be provided.' % (inputs.master_ip, inputs.num_nodes)) if not hasattr(inputs, 'master_ip') and inputs.num_nodes <= 1: raise ValueError( 'num_nodes allocates one Spark/HDFS master and n-1 workers, and thus must be greater than 1. %d was passed.' % inputs.num_nodes) if (inputs.pipeline_to_run != 'adam' and inputs.pipeline_to_run != 'gatk' and inputs.pipeline_to_run != 'both'): raise ValueError( "pipeline_to_run must be either 'adam', 'gatk', or 'both'. %s was passed." % inputs.pipeline_to_run) Job.Runner.startToil(Job.wrapJobFn(sample_loop, uuid_list, inputs), args)
main
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x():\n if mock_mode():\n usage_msg = (\n 'You have the TOIL_SCRIPTS_MOCK_MODE environment variable set, so this pipeline will run in mock mode. To disable mock mode, set TOIL_SCRIPTS_MOCK_MODE=0'\n )\n else:\n usage_msg = None\n parser = argparse.ArgumentParser(usage=usage_msg)\n subparsers = parser.add_subparsers(dest='command')\n subparsers.add_parser('generate-config', help=\n 'Generates an editable config in the current working directory.')\n subparsers.add_parser('generate-manifest', help=\n 'Generates an editable manifest in the current working directory.')\n subparsers.add_parser('generate', help=\n 'Generates a config and manifest in the current working directory.')\n parser_run = subparsers.add_parser('run', help=\n 'Runs the ADAM/GATK pipeline')\n default_config = 'adam-gatk-mock.config' if mock_mode(\n ) else 'adam-gatk.config'\n default_manifest = 'adam-gatk-mock-manifest.csv' if mock_mode(\n ) else 'adam-gatk-manifest.csv'\n parser_run.add_argument('--config', default=default_config, type=str,\n help=\n 'Path to the (filled in) config file, generated with \"generate-config\".'\n )\n parser_run.add_argument('--manifest', default=default_manifest, type=\n str, help=\n \"\"\"Path to the (filled in) manifest file, generated with \"generate-manifest\". \nDefault value: \"%(default)s\".\"\"\"\n )\n Job.Runner.addToilOptions(parser_run)\n args = parser.parse_args()\n cwd = os.getcwd()\n if args.command == 'generate-config' or args.command == 'generate':\n generate_file(os.path.join(cwd, default_config), generate_config)\n if args.command == 'generate-manifest' or args.command == 'generate':\n generate_file(os.path.join(cwd, default_manifest), generate_manifest)\n elif args.command == 'run':\n require(os.path.exists(args.config),\n '{} not found. Please run generate-config'.format(args.config))\n if not hasattr(args, 'sample'):\n require(os.path.exists(args.manifest),\n '{} not found and no samples provided. Please run \"generate-manifest\"'\n .format(args.manifest))\n parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(\n args.config).read()).iteritems()}\n inputs = argparse.Namespace(**parsed_config)\n uuid_list = []\n with open(args.manifest) as f_manifest:\n for line in f_manifest:\n if not line.isspace() and not line.startswith('#'):\n uuid_list.append(line.strip())\n inputs.sort = False\n if not inputs.dir_suffix:\n inputs.dir_suffix = ''\n if not inputs.s3_bucket:\n inputs.s3_bucket = ''\n if inputs.master_ip and inputs.num_nodes:\n raise ValueError(\n 'Exactly one of master_ip (%s) and num_nodes (%d) must be provided.'\n % (inputs.master_ip, inputs.num_nodes))\n if not hasattr(inputs, 'master_ip') and inputs.num_nodes <= 1:\n raise ValueError(\n 'num_nodes allocates one Spark/HDFS master and n-1 workers, and thus must be greater than 1. %d was passed.'\n % inputs.num_nodes)\n if (inputs.pipeline_to_run != 'adam' and inputs.pipeline_to_run !=\n 'gatk' and inputs.pipeline_to_run != 'both'):\n raise ValueError(\n \"pipeline_to_run must be either 'adam', 'gatk', or 'both'. %s was passed.\"\n % inputs.pipeline_to_run)\n Job.Runner.startToil(Job.wrapJobFn(sample_loop, uuid_list, inputs),\n args)\n", "role": "user" }, { "content": "main", "role": "assistant" } ]
def x(master_key_path, url): with open(master_key_path, 'r') as f: master_key = f.read() assert len(master_key ) == 32, 'Invalid Key! Must be 32 characters. Key: {}, Length: {}'.format( master_key, len(master_key)) new_key = hashlib.sha256(master_key + url).digest() assert len(new_key ) == 32, 'New key is invalid and is not 32 characters: {}'.format( new_key) return new_key
generate_unique_key
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(master_key_path, url):\n with open(master_key_path, 'r') as f:\n master_key = f.read()\n assert len(master_key\n ) == 32, 'Invalid Key! Must be 32 characters. Key: {}, Length: {}'.format(\n master_key, len(master_key))\n new_key = hashlib.sha256(master_key + url).digest()\n assert len(new_key\n ) == 32, 'New key is invalid and is not 32 characters: {}'.format(\n new_key)\n return new_key\n", "role": "user" }, { "content": "generate_unique_key", "role": "assistant" } ]
def x(work_dir, url, key_path, name): file_path = os.path.join(work_dir, name) key = generate_unique_key(key_path, url) encoded_key = base64.b64encode(key) encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest()) h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256' h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key) h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format( encoded_key_md5) try: subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path]) except OSError: raise RuntimeError( 'Failed to find "curl". Install via "apt-get install curl"') assert os.path.exists(file_path)
download_encrypted_file
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(work_dir, url, key_path, name):\n file_path = os.path.join(work_dir, name)\n key = generate_unique_key(key_path, url)\n encoded_key = base64.b64encode(key)\n encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest())\n h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256'\n h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key)\n h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(\n encoded_key_md5)\n try:\n subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1,\n '-H', h2, '-H', h3, url, '-o', file_path])\n except OSError:\n raise RuntimeError(\n 'Failed to find \"curl\". Install via \"apt-get install curl\"')\n assert os.path.exists(file_path)\n", "role": "user" }, { "content": "download_encrypted_file", "role": "assistant" } ]
def x(job, work_dir, ids, *args): paths = OrderedDict() for name in args: if not os.path.exists(os.path.join(work_dir, name)): file_path = job.fileStore.readGlobalFile(ids[name], os.path. join(work_dir, name)) else: file_path = os.path.join(work_dir, name) paths[name] = file_path if len(args) == 1: return file_path return paths.values()
return_input_paths
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, work_dir, ids, *args):\n paths = OrderedDict()\n for name in args:\n if not os.path.exists(os.path.join(work_dir, name)):\n file_path = job.fileStore.readGlobalFile(ids[name], os.path.\n join(work_dir, name))\n else:\n file_path = os.path.join(work_dir, name)\n paths[name] = file_path\n if len(args) == 1:\n return file_path\n return paths.values()\n", "role": "user" }, { "content": "return_input_paths", "role": "assistant" } ]
def x(work_dir, output_dir, uuid=None, files=list()): for fname in files: if uuid is None: shutil.move(os.path.join(work_dir, fname), os.path.join( output_dir, fname)) else: shutil.move(os.path.join(work_dir, fname), os.path.join( output_dir, '{}.{}'.format(uuid, fname)))
move_to_output_dir
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(work_dir, output_dir, uuid=None, files=list()):\n for fname in files:\n if uuid is None:\n shutil.move(os.path.join(work_dir, fname), os.path.join(\n output_dir, fname))\n else:\n shutil.move(os.path.join(work_dir, fname), os.path.join(\n output_dir, '{}.{}'.format(uuid, fname)))\n", "role": "user" }, { "content": "move_to_output_dir", "role": "assistant" } ]
def x(job, input_args): shared_files = ['ref.fa', 'ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai'] shared_ids = {} for fname in shared_files: url = input_args[fname] shared_ids[fname] = job.addChildJobFn(download_from_url, url, fname ).rv() job.addFollowOnJobFn(spawn_batch_jobs, shared_ids, input_args)
batch_start
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, input_args):\n shared_files = ['ref.fa', 'ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt',\n 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai']\n shared_ids = {}\n for fname in shared_files:\n url = input_args[fname]\n shared_ids[fname] = job.addChildJobFn(download_from_url, url, fname\n ).rv()\n job.addFollowOnJobFn(spawn_batch_jobs, shared_ids, input_args)\n", "role": "user" }, { "content": "batch_start", "role": "assistant" } ]
def x(job, shared_ids, input_args): samples = [] config = input_args['config'] with open(config, 'r') as f_in: for line in f_in: line = line.strip().split(',') uuid = line[0] urls = line[1:] samples.append((uuid, urls)) for sample in samples: job.addChildJobFn(alignment, shared_ids, input_args, sample, cores= 32, memory='20 G', disk='100 G')
spawn_batch_jobs
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, shared_ids, input_args):\n samples = []\n config = input_args['config']\n with open(config, 'r') as f_in:\n for line in f_in:\n line = line.strip().split(',')\n uuid = line[0]\n urls = line[1:]\n samples.append((uuid, urls))\n for sample in samples:\n job.addChildJobFn(alignment, shared_ids, input_args, sample, cores=\n 32, memory='20 G', disk='100 G')\n", "role": "user" }, { "content": "spawn_batch_jobs", "role": "assistant" } ]
def x(job, ids, input_args, sample): uuid, urls = sample work_dir = job.fileStore.getLocalTempDir() output_dir = input_args['output_dir'] key_path = input_args['ssec'] cores = multiprocessing.cpu_count() return_input_paths(job, work_dir, ids, 'ref.fa', 'ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai') for url in urls: download_encrypted_file(work_dir, url, key_path, os.path.basename(url)) docker_cmd = ['docker', 'run', '--rm', '-v', '{}:/data'.format(work_dir)] bwa_command = ['jvivian/bwa', 'mem', '-R', '@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper'.format(uuid), '-T', str(0), '-t', str(cores), '/data/ref.fa'] + [os.path.join('/data/', os.path.basename(x)) for x in urls] bamsort_command = ['jeltje/biobambam', '/usr/local/bin/bamsort', 'inputformat=sam', 'level=1', 'inputthreads={}'.format(cores), 'outputthreads={}'.format(cores), 'calmdnm=1', 'calmdnmrecompindetonly=1', 'calmdnmreference=/data/ref.fa', 'I=/data/{}'.format(uuid + '.sam')] with open(os.path.join(work_dir, uuid + '.sam'), 'w') as f_out: subprocess.check_call(docker_cmd + bwa_command, stdout=f_out) with open(os.path.join(work_dir, uuid + '.bam'), 'w') as f_out: subprocess.check_call(docker_cmd + bamsort_command, stdout=f_out) ids['bam'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, uuid + '.bam')) if input_args['s3_dir']: job.addChildJobFn(upload_bam_to_s3, ids, input_args, sample, cores= 32, memory='20 G', disk='30 G') if input_args['output_dir']: move_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.bam'])
alignment
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, ids, input_args, sample):\n uuid, urls = sample\n work_dir = job.fileStore.getLocalTempDir()\n output_dir = input_args['output_dir']\n key_path = input_args['ssec']\n cores = multiprocessing.cpu_count()\n return_input_paths(job, work_dir, ids, 'ref.fa', 'ref.fa.amb',\n 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai')\n for url in urls:\n download_encrypted_file(work_dir, url, key_path, os.path.basename(url))\n docker_cmd = ['docker', 'run', '--rm', '-v', '{}:/data'.format(work_dir)]\n bwa_command = ['jvivian/bwa', 'mem', '-R',\n '@RG\\tID:{0}\\tPL:Illumina\\tSM:{0}\\tLB:KapaHyper'.format(uuid), '-T',\n str(0), '-t', str(cores), '/data/ref.fa'] + [os.path.join('/data/',\n os.path.basename(x)) for x in urls]\n bamsort_command = ['jeltje/biobambam', '/usr/local/bin/bamsort',\n 'inputformat=sam', 'level=1', 'inputthreads={}'.format(cores),\n 'outputthreads={}'.format(cores), 'calmdnm=1',\n 'calmdnmrecompindetonly=1', 'calmdnmreference=/data/ref.fa',\n 'I=/data/{}'.format(uuid + '.sam')]\n with open(os.path.join(work_dir, uuid + '.sam'), 'w') as f_out:\n subprocess.check_call(docker_cmd + bwa_command, stdout=f_out)\n with open(os.path.join(work_dir, uuid + '.bam'), 'w') as f_out:\n subprocess.check_call(docker_cmd + bamsort_command, stdout=f_out)\n ids['bam'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, uuid +\n '.bam'))\n if input_args['s3_dir']:\n job.addChildJobFn(upload_bam_to_s3, ids, input_args, sample, cores=\n 32, memory='20 G', disk='30 G')\n if input_args['output_dir']:\n move_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid +\n '.bam'])\n", "role": "user" }, { "content": "alignment", "role": "assistant" } ]
def x(job, ids, input_args, sample): uuid, urls = sample key_path = input_args['ssec'] work_dir = job.fileStore.getLocalTempDir() s3_dir = input_args['s3_dir'] bucket_name = s3_dir.lstrip('/').split('/')[0] bucket_dir = '/'.join(s3_dir.lstrip('/').split('/')[1:]) base_url = 'https://s3-us-west-2.amazonaws.com/' url = os.path.join(base_url, bucket_name, bucket_dir, uuid + '.bam') job.fileStore.readGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam')) with open(os.path.join(work_dir, uuid + '.key'), 'wb') as f_out: f_out.write(generate_unique_key(key_path, url)) s3am_command = ['s3am', 'upload', '--sse-key-file', os.path.join( work_dir, uuid + '.key'), 'file://{}'.format(os.path.join(work_dir, uuid + '.bam')), bucket_name, os.path.join(bucket_dir, uuid + '.bam')] subprocess.check_call(s3am_command)
upload_bam_to_s3
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, ids, input_args, sample):\n uuid, urls = sample\n key_path = input_args['ssec']\n work_dir = job.fileStore.getLocalTempDir()\n s3_dir = input_args['s3_dir']\n bucket_name = s3_dir.lstrip('/').split('/')[0]\n bucket_dir = '/'.join(s3_dir.lstrip('/').split('/')[1:])\n base_url = 'https://s3-us-west-2.amazonaws.com/'\n url = os.path.join(base_url, bucket_name, bucket_dir, uuid + '.bam')\n job.fileStore.readGlobalFile(ids['bam'], os.path.join(work_dir, uuid +\n '.bam'))\n with open(os.path.join(work_dir, uuid + '.key'), 'wb') as f_out:\n f_out.write(generate_unique_key(key_path, url))\n s3am_command = ['s3am', 'upload', '--sse-key-file', os.path.join(\n work_dir, uuid + '.key'), 'file://{}'.format(os.path.join(work_dir,\n uuid + '.bam')), bucket_name, os.path.join(bucket_dir, uuid + '.bam')]\n subprocess.check_call(s3am_command)\n", "role": "user" }, { "content": "upload_bam_to_s3", "role": "assistant" } ]
def x(job, uuid, vcf_id, config): genome_ref_size = (config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size) snp_resources = ['hapmap', 'omni', 'dbsnp', 'g1k_snp'] snp_resource_size = sum(getattr(config, resource).size for resource in snp_resources) snp_recal_disk = PromisedRequirement(lambda in_vcf, ref_size, resource_size: 2 * in_vcf.size + ref_size + resource_size, vcf_id, genome_ref_size, snp_resource_size) snp_recal = job.wrapJobFn(gatk_variant_recalibrator, 'SNP', vcf_id, config.genome_fasta, config.genome_fai, config.genome_dict, get_short_annotations(config.snp_filter_annotations), hapmap=config .hapmap, omni=config.omni, phase=config.g1k_snp, dbsnp=config.dbsnp, unsafe_mode=config.unsafe_mode, disk=snp_recal_disk, cores=config. cores, memory=config.xmx) indel_resource_size = config.mills.size + config.dbsnp.size indel_recal_disk = PromisedRequirement(lambda in_vcf, ref_size, resource_size: 2 * in_vcf.size + ref_size + resource_size, vcf_id, genome_ref_size, indel_resource_size) indel_recal = job.wrapJobFn(gatk_variant_recalibrator, 'INDEL', vcf_id, config.genome_fasta, config.genome_fai, config.genome_dict, get_short_annotations(config.indel_filter_annotations), dbsnp= config.dbsnp, mills=config.mills, unsafe_mode=config.unsafe_mode, disk=indel_recal_disk, cores=config.cores, memory=config.xmx) apply_snp_recal_disk = PromisedRequirement(lambda in_vcf, recal, tranche, ref_size: int(2.1 * in_vcf.size + recal.size + tranche. size + ref_size), vcf_id, snp_recal.rv(0), snp_recal.rv(1), genome_ref_size) apply_snp_recal = job.wrapJobFn(gatk_apply_variant_recalibration, 'SNP', vcf_id, snp_recal.rv(0), snp_recal.rv(1), config.genome_fasta, config.genome_fai, config.genome_dict, unsafe_mode=config. unsafe_mode, disk=apply_snp_recal_disk, cores=config.cores, memory= config.xmx) apply_indel_recal_disk = PromisedRequirement(lambda in_vcf, recal, tranche, ref_size: int(2.1 * in_vcf.size + recal.size + tranche. size + ref_size), vcf_id, indel_recal.rv(0), indel_recal.rv(1), genome_ref_size) apply_indel_recal = job.wrapJobFn(gatk_apply_variant_recalibration, 'INDEL', apply_snp_recal.rv(), indel_recal.rv(0), indel_recal.rv(1), config.genome_fasta, config.genome_fai, config.genome_dict, unsafe_mode=config.unsafe_mode, disk=apply_indel_recal_disk, cores= config.cores, memory=config.xmx) job.addChild(snp_recal) job.addChild(indel_recal) snp_recal.addChild(apply_snp_recal) indel_recal.addChild(apply_indel_recal) apply_snp_recal.addChild(apply_indel_recal) output_dir = config.output_dir output_dir = os.path.join(output_dir, uuid) vqsr_name = '%s.vqsr%s.vcf' % (uuid, config.suffix) output_vqsr = job.wrapJobFn(output_file_job, vqsr_name, apply_indel_recal.rv(), output_dir, s3_key_path=config.ssec, disk= PromisedRequirement(lambda x: x.size, apply_indel_recal.rv())) apply_indel_recal.addChild(output_vqsr) return apply_indel_recal.rv()
vqsr_pipeline
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, uuid, vcf_id, config):\n genome_ref_size = (config.genome_fasta.size + config.genome_fai.size +\n config.genome_dict.size)\n snp_resources = ['hapmap', 'omni', 'dbsnp', 'g1k_snp']\n snp_resource_size = sum(getattr(config, resource).size for resource in\n snp_resources)\n snp_recal_disk = PromisedRequirement(lambda in_vcf, ref_size,\n resource_size: 2 * in_vcf.size + ref_size + resource_size, vcf_id,\n genome_ref_size, snp_resource_size)\n snp_recal = job.wrapJobFn(gatk_variant_recalibrator, 'SNP', vcf_id,\n config.genome_fasta, config.genome_fai, config.genome_dict,\n get_short_annotations(config.snp_filter_annotations), hapmap=config\n .hapmap, omni=config.omni, phase=config.g1k_snp, dbsnp=config.dbsnp,\n unsafe_mode=config.unsafe_mode, disk=snp_recal_disk, cores=config.\n cores, memory=config.xmx)\n indel_resource_size = config.mills.size + config.dbsnp.size\n indel_recal_disk = PromisedRequirement(lambda in_vcf, ref_size,\n resource_size: 2 * in_vcf.size + ref_size + resource_size, vcf_id,\n genome_ref_size, indel_resource_size)\n indel_recal = job.wrapJobFn(gatk_variant_recalibrator, 'INDEL', vcf_id,\n config.genome_fasta, config.genome_fai, config.genome_dict,\n get_short_annotations(config.indel_filter_annotations), dbsnp=\n config.dbsnp, mills=config.mills, unsafe_mode=config.unsafe_mode,\n disk=indel_recal_disk, cores=config.cores, memory=config.xmx)\n apply_snp_recal_disk = PromisedRequirement(lambda in_vcf, recal,\n tranche, ref_size: int(2.1 * in_vcf.size + recal.size + tranche.\n size + ref_size), vcf_id, snp_recal.rv(0), snp_recal.rv(1),\n genome_ref_size)\n apply_snp_recal = job.wrapJobFn(gatk_apply_variant_recalibration, 'SNP',\n vcf_id, snp_recal.rv(0), snp_recal.rv(1), config.genome_fasta,\n config.genome_fai, config.genome_dict, unsafe_mode=config.\n unsafe_mode, disk=apply_snp_recal_disk, cores=config.cores, memory=\n config.xmx)\n apply_indel_recal_disk = PromisedRequirement(lambda in_vcf, recal,\n tranche, ref_size: int(2.1 * in_vcf.size + recal.size + tranche.\n size + ref_size), vcf_id, indel_recal.rv(0), indel_recal.rv(1),\n genome_ref_size)\n apply_indel_recal = job.wrapJobFn(gatk_apply_variant_recalibration,\n 'INDEL', apply_snp_recal.rv(), indel_recal.rv(0), indel_recal.rv(1),\n config.genome_fasta, config.genome_fai, config.genome_dict,\n unsafe_mode=config.unsafe_mode, disk=apply_indel_recal_disk, cores=\n config.cores, memory=config.xmx)\n job.addChild(snp_recal)\n job.addChild(indel_recal)\n snp_recal.addChild(apply_snp_recal)\n indel_recal.addChild(apply_indel_recal)\n apply_snp_recal.addChild(apply_indel_recal)\n output_dir = config.output_dir\n output_dir = os.path.join(output_dir, uuid)\n vqsr_name = '%s.vqsr%s.vcf' % (uuid, config.suffix)\n output_vqsr = job.wrapJobFn(output_file_job, vqsr_name,\n apply_indel_recal.rv(), output_dir, s3_key_path=config.ssec, disk=\n PromisedRequirement(lambda x: x.size, apply_indel_recal.rv()))\n apply_indel_recal.addChild(output_vqsr)\n return apply_indel_recal.rv()\n", "role": "user" }, { "content": "vqsr_pipeline", "role": "assistant" } ]
def x(annotations): short_name = {'QualByDepth': 'QD', 'FisherStrand': 'FS', 'StrandOddsRatio': 'SOR', 'ReadPosRankSumTest': 'ReadPosRankSum', 'MappingQualityRankSumTest': 'MQRankSum', 'RMSMappingQuality': 'MQ', 'InbreedingCoeff': 'ID'} short_annotations = [] for annotation in annotations: if annotation in short_name: annotation = short_name[annotation] short_annotations.append(annotation) return short_annotations
get_short_annotations
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(annotations):\n short_name = {'QualByDepth': 'QD', 'FisherStrand': 'FS',\n 'StrandOddsRatio': 'SOR', 'ReadPosRankSumTest': 'ReadPosRankSum',\n 'MappingQualityRankSumTest': 'MQRankSum', 'RMSMappingQuality': 'MQ',\n 'InbreedingCoeff': 'ID'}\n short_annotations = []\n for annotation in annotations:\n if annotation in short_name:\n annotation = short_name[annotation]\n short_annotations.append(annotation)\n return short_annotations\n", "role": "user" }, { "content": "get_short_annotations", "role": "assistant" } ]
def x(path_to_config): samples = [] with open(path_to_config, 'r') as f: for line in f.readlines(): if not line.isspace(): samples.append(line.strip()) return samples
parse_sra
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(path_to_config):\n samples = []\n with open(path_to_config, 'r') as f:\n for line in f.readlines():\n if not line.isspace():\n samples.append(line.strip())\n return samples\n", "role": "user" }, { "content": "parse_sra", "role": "assistant" } ]
def x(work_dir, tar_name, uuid=None, files=None): with tarfile.open(os.path.join(work_dir, tar_name), 'w:gz') as f_out: for fname in files: if uuid: f_out.add(os.path.join(work_dir, fname), arcname=uuid + '.' + fname) else: f_out.add(os.path.join(work_dir, fname), arcname=fname)
tarball_files
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(work_dir, tar_name, uuid=None, files=None):\n with tarfile.open(os.path.join(work_dir, tar_name), 'w:gz') as f_out:\n for fname in files:\n if uuid:\n f_out.add(os.path.join(work_dir, fname), arcname=uuid + '.' +\n fname)\n else:\n f_out.add(os.path.join(work_dir, fname), arcname=fname)\n", "role": "user" }, { "content": "tarball_files", "role": "assistant" } ]
def x(job, input_args): samples = parse_sra(input_args['sra']) job.addChildJobFn(download_and_transfer_sample, input_args, samples, cores=1, disk='30')
start_batch
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, input_args):\n samples = parse_sra(input_args['sra'])\n job.addChildJobFn(download_and_transfer_sample, input_args, samples,\n cores=1, disk='30')\n", "role": "user" }, { "content": "start_batch", "role": "assistant" } ]
def x(job, input_args, samples): if len(samples) > 1: a = samples[len(samples) / 2:] b = samples[:len(samples) / 2] job.addChildJobFn(download_and_transfer_sample, input_args, a, disk ='30G') job.addChildJobFn(download_and_transfer_sample, input_args, b, disk ='30G') else: analysis_id = samples[0] work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] shutil.copy(input_args['dbgap_key'], os.path.join(work_dir, 'dbgap.ngc')) if input_args['single_end']: parameters = [analysis_id] else: parameters = ['--split-files', analysis_id] docker_call(tool= 'quay.io/ucsc_cgl/fastq-dump:2.5.7--4577a6c1a3c94adaa0c25dd6c03518ee610433d1' , work_dir=work_dir, tool_parameters=parameters, sudo=sudo) shutil.rmtree(os.path.join(work_dir, 'sra')) sample_name = analysis_id + '.tar.gz' if input_args['single_end']: r = [os.path.basename(x) for x in glob.glob(os.path.join( work_dir, '*.f*'))] tarball_files(work_dir, tar_name=sample_name, files=r) else: r1 = [os.path.basename(x) for x in glob.glob(os.path.join( work_dir, '*_1*'))] r2 = [os.path.basename(x) for x in glob.glob(os.path.join( work_dir, '*_2*'))] tarball_files(work_dir, tar_name=sample_name, files=r1 + r2) key_path = input_args['ssec'] s3_dir = input_args['s3_dir'] bucket_name = s3_dir.lstrip('/').split('/')[0] base_url = 'https://s3-us-west-2.amazonaws.com/' url = os.path.join(base_url, bucket_name, sample_name) with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out: f_out.write(generate_unique_key(key_path, url)) s3am_command = ['s3am', 'upload', '--sse-key-file', os.path.join( work_dir, 'temp.key'), 'file://{}'.format(os.path.join(work_dir, sample_name)), 's3://' + bucket_name + '/'] subprocess.check_call(s3am_command)
download_and_transfer_sample
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, input_args, samples):\n if len(samples) > 1:\n a = samples[len(samples) / 2:]\n b = samples[:len(samples) / 2]\n job.addChildJobFn(download_and_transfer_sample, input_args, a, disk\n ='30G')\n job.addChildJobFn(download_and_transfer_sample, input_args, b, disk\n ='30G')\n else:\n analysis_id = samples[0]\n work_dir = job.fileStore.getLocalTempDir()\n sudo = input_args['sudo']\n shutil.copy(input_args['dbgap_key'], os.path.join(work_dir,\n 'dbgap.ngc'))\n if input_args['single_end']:\n parameters = [analysis_id]\n else:\n parameters = ['--split-files', analysis_id]\n docker_call(tool=\n 'quay.io/ucsc_cgl/fastq-dump:2.5.7--4577a6c1a3c94adaa0c25dd6c03518ee610433d1'\n , work_dir=work_dir, tool_parameters=parameters, sudo=sudo)\n shutil.rmtree(os.path.join(work_dir, 'sra'))\n sample_name = analysis_id + '.tar.gz'\n if input_args['single_end']:\n r = [os.path.basename(x) for x in glob.glob(os.path.join(\n work_dir, '*.f*'))]\n tarball_files(work_dir, tar_name=sample_name, files=r)\n else:\n r1 = [os.path.basename(x) for x in glob.glob(os.path.join(\n work_dir, '*_1*'))]\n r2 = [os.path.basename(x) for x in glob.glob(os.path.join(\n work_dir, '*_2*'))]\n tarball_files(work_dir, tar_name=sample_name, files=r1 + r2)\n key_path = input_args['ssec']\n s3_dir = input_args['s3_dir']\n bucket_name = s3_dir.lstrip('/').split('/')[0]\n base_url = 'https://s3-us-west-2.amazonaws.com/'\n url = os.path.join(base_url, bucket_name, sample_name)\n with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out:\n f_out.write(generate_unique_key(key_path, url))\n s3am_command = ['s3am', 'upload', '--sse-key-file', os.path.join(\n work_dir, 'temp.key'), 'file://{}'.format(os.path.join(work_dir,\n sample_name)), 's3://' + bucket_name + '/']\n subprocess.check_call(s3am_command)\n", "role": "user" }, { "content": "download_and_transfer_sample", "role": "assistant" } ]
def x(): parser = build_parser() Job.Runner.addToilOptions(parser) args = parser.parse_args() inputs = {'sra': args.sra, 'dbgap_key': args.dbgap_key, 'ssec': args. ssec, 's3_dir': args.s3_dir, 'single_end': args.single_end, 'sudo': args.sudo} if args.ssec: assert os.path.isfile(args.ssec) if args.sra: assert os.path.isfile(args.sra) if args.dbgap_key: assert os.path.isfile(args.dbgap_key) Job.Runner.startToil(Job.wrapJobFn(start_batch, inputs), args)
main
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x():\n parser = build_parser()\n Job.Runner.addToilOptions(parser)\n args = parser.parse_args()\n inputs = {'sra': args.sra, 'dbgap_key': args.dbgap_key, 'ssec': args.\n ssec, 's3_dir': args.s3_dir, 'single_end': args.single_end, 'sudo':\n args.sudo}\n if args.ssec:\n assert os.path.isfile(args.ssec)\n if args.sra:\n assert os.path.isfile(args.sra)\n if args.dbgap_key:\n assert os.path.isfile(args.dbgap_key)\n Job.Runner.startToil(Job.wrapJobFn(start_batch, inputs), args)\n", "role": "user" }, { "content": "main", "role": "assistant" } ]
def x(job, filename, file_id, output_dir, s3_key_path=None): job.fileStore.logToMaster('Writing {} to {}'.format(filename, output_dir)) work_dir = job.fileStore.getLocalTempDir() filepath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, filename)) if urlparse(output_dir).scheme == 's3': s3am_upload(job=job, fpath=os.path.join(work_dir, filepath), s3_dir =output_dir, s3_key_path=s3_key_path) elif os.path.exists(os.path.join(output_dir, filename)): job.fileStore.logToMaster('File already exists: {}'.format(filename)) else: mkdir_p(output_dir) copy_files([filepath], output_dir)
output_file_job
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, filename, file_id, output_dir, s3_key_path=None):\n job.fileStore.logToMaster('Writing {} to {}'.format(filename, output_dir))\n work_dir = job.fileStore.getLocalTempDir()\n filepath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir,\n filename))\n if urlparse(output_dir).scheme == 's3':\n s3am_upload(job=job, fpath=os.path.join(work_dir, filepath), s3_dir\n =output_dir, s3_key_path=s3_key_path)\n elif os.path.exists(os.path.join(output_dir, filename)):\n job.fileStore.logToMaster('File already exists: {}'.format(filename))\n else:\n mkdir_p(output_dir)\n copy_files([filepath], output_dir)\n", "role": "user" }, { "content": "output_file_job", "role": "assistant" } ]
def x(job, input_args, name): work_dir = job.fileStore.getLocalTempDir() key_path = input_args['ssec'] file_path = os.path.join(work_dir, name) url = input_args[name] with open(key_path, 'r') as f: key = f.read() if len(key) != 32: raise RuntimeError('Invalid Key! Must be 32 bytes: {}'.format(key)) key = generate_unique_key(key_path, url) encoded_key = base64.b64encode(key) encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest()) h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256' h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key) h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format( encoded_key_md5) try: subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path]) except OSError: raise RuntimeError( 'Failed to find "curl". Install via "apt-get install curl"') assert os.path.exists(file_path) return job.fileStore.writeGlobalFile(file_path)
download_encrypted_file
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, input_args, name):\n work_dir = job.fileStore.getLocalTempDir()\n key_path = input_args['ssec']\n file_path = os.path.join(work_dir, name)\n url = input_args[name]\n with open(key_path, 'r') as f:\n key = f.read()\n if len(key) != 32:\n raise RuntimeError('Invalid Key! Must be 32 bytes: {}'.format(key))\n key = generate_unique_key(key_path, url)\n encoded_key = base64.b64encode(key)\n encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest())\n h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256'\n h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key)\n h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(\n encoded_key_md5)\n try:\n subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1,\n '-H', h2, '-H', h3, url, '-o', file_path])\n except OSError:\n raise RuntimeError(\n 'Failed to find \"curl\". Install via \"apt-get install curl\"')\n assert os.path.exists(file_path)\n return job.fileStore.writeGlobalFile(file_path)\n", "role": "user" }, { "content": "download_encrypted_file", "role": "assistant" } ]
def x(job, url): work_dir = job.fileStore.getLocalTempDir() file_path = os.path.join(work_dir, os.path.basename(url)) if not os.path.exists(file_path): if url.startswith('s3:'): download_from_s3_url(file_path, url) else: try: subprocess.check_call(['curl', '-fs', '--retry', '5', '--create-dir', url, '-o', file_path]) except OSError: raise RuntimeError( 'Failed to find "curl". Install via "apt-get install curl"' ) assert os.path.exists(file_path) return job.fileStore.writeGlobalFile(file_path)
download_from_url
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, url):\n work_dir = job.fileStore.getLocalTempDir()\n file_path = os.path.join(work_dir, os.path.basename(url))\n if not os.path.exists(file_path):\n if url.startswith('s3:'):\n download_from_s3_url(file_path, url)\n else:\n try:\n subprocess.check_call(['curl', '-fs', '--retry', '5',\n '--create-dir', url, '-o', file_path])\n except OSError:\n raise RuntimeError(\n 'Failed to find \"curl\". Install via \"apt-get install curl\"'\n )\n assert os.path.exists(file_path)\n return job.fileStore.writeGlobalFile(file_path)\n", "role": "user" }, { "content": "download_from_url", "role": "assistant" } ]
def x(work_dir, tool_parameters, tool, java_opts=None, outfile=None, sudo=False ): base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format( work_dir).split() if sudo: base_docker_call = ['sudo'] + base_docker_call if java_opts: base_docker_call = base_docker_call + ['-e', 'JAVA_OPTS={}'.format( java_opts)] try: if outfile: subprocess.check_call(base_docker_call + [tool] + tool_parameters, stdout=outfile) else: subprocess.check_call(base_docker_call + [tool] + tool_parameters) except subprocess.CalledProcessError: raise RuntimeError( 'docker command returned a non-zero exit status. Check error logs.' ) except OSError: raise RuntimeError('docker not found on system. Install on all nodes.')
docker_call
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(work_dir, tool_parameters, tool, java_opts=None, outfile=None, sudo=False\n ):\n base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(\n work_dir).split()\n if sudo:\n base_docker_call = ['sudo'] + base_docker_call\n if java_opts:\n base_docker_call = base_docker_call + ['-e', 'JAVA_OPTS={}'.format(\n java_opts)]\n try:\n if outfile:\n subprocess.check_call(base_docker_call + [tool] +\n tool_parameters, stdout=outfile)\n else:\n subprocess.check_call(base_docker_call + [tool] + tool_parameters)\n except subprocess.CalledProcessError:\n raise RuntimeError(\n 'docker command returned a non-zero exit status. Check error logs.'\n )\n except OSError:\n raise RuntimeError('docker not found on system. Install on all nodes.')\n", "role": "user" }, { "content": "docker_call", "role": "assistant" } ]
def x(work_dir, output_dir, uuid=None, files=list()): for fname in files: if uuid is None: shutil.copy(os.path.join(work_dir, fname), os.path.join( output_dir, fname)) else: shutil.copy(os.path.join(work_dir, fname), os.path.join( output_dir, '{}.{}'.format(uuid, fname)))
copy_to_output_dir
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(work_dir, output_dir, uuid=None, files=list()):\n for fname in files:\n if uuid is None:\n shutil.copy(os.path.join(work_dir, fname), os.path.join(\n output_dir, fname))\n else:\n shutil.copy(os.path.join(work_dir, fname), os.path.join(\n output_dir, '{}.{}'.format(uuid, fname)))\n", "role": "user" }, { "content": "copy_to_output_dir", "role": "assistant" } ]
def x(job, input_args): for program in ['curl', 'docker', 'unzip', 'samtools']: assert which(program ), 'Program "{}" must be installed on every node.'.format(program) job.addChildJobFn(download_shared_files, input_args)
program_checks
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, input_args):\n for program in ['curl', 'docker', 'unzip', 'samtools']:\n assert which(program\n ), 'Program \"{}\" must be installed on every node.'.format(program)\n job.addChildJobFn(download_shared_files, input_args)\n", "role": "user" }, { "content": "program_checks", "role": "assistant" } ]
def x(job, input_args): shared_files = ['unc.bed', 'hg19.transcripts.fa', 'composite_exons.bed', 'normalize.pl', 'rsem_ref.zip', 'ebwt.zip', 'chromosomes.zip'] shared_ids = {} for f in shared_files: shared_ids[f] = job.addChildJobFn(download_from_url, input_args[f]).rv( ) if input_args['config'] or input_args['config_fastq']: job.addFollowOnJobFn(parse_config_file, shared_ids, input_args) else: sample_path = input_args['input'] uuid = os.path.splitext(os.path.basename(sample_path))[0] sample = uuid, sample_path job.addFollowOnJobFn(download_sample, shared_ids, input_args, sample)
download_shared_files
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, input_args):\n shared_files = ['unc.bed', 'hg19.transcripts.fa', 'composite_exons.bed',\n 'normalize.pl', 'rsem_ref.zip', 'ebwt.zip', 'chromosomes.zip']\n shared_ids = {}\n for f in shared_files:\n shared_ids[f] = job.addChildJobFn(download_from_url, input_args[f]).rv(\n )\n if input_args['config'] or input_args['config_fastq']:\n job.addFollowOnJobFn(parse_config_file, shared_ids, input_args)\n else:\n sample_path = input_args['input']\n uuid = os.path.splitext(os.path.basename(sample_path))[0]\n sample = uuid, sample_path\n job.addFollowOnJobFn(download_sample, shared_ids, input_args, sample)\n", "role": "user" }, { "content": "download_shared_files", "role": "assistant" } ]
def x(job, ids, input_args): samples = [] config = input_args['config'] with open(config, 'r') as f: for line in f.readlines(): if not line.isspace(): sample = line.strip().split(',') samples.append(sample) for sample in samples: job.addChildJobFn(download_sample, ids, input_args, sample)
parse_config_file
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, ids, input_args):\n samples = []\n config = input_args['config']\n with open(config, 'r') as f:\n for line in f.readlines():\n if not line.isspace():\n sample = line.strip().split(',')\n samples.append(sample)\n for sample in samples:\n job.addChildJobFn(download_sample, ids, input_args, sample)\n", "role": "user" }, { "content": "parse_config_file", "role": "assistant" } ]
def x(job, ids, input_args, sample): if len(sample) == 2: uuid, sample_location = sample url1, url2 = None, None else: uuid, url1, url2 = sample sample_location = None sample_input = dict(input_args) sample_input['uuid'] = uuid sample_input['sample.tar'] = sample_location if sample_input['output_dir']: sample_input['output_dir'] = os.path.join(input_args['output_dir'], uuid) sample_input['cpu_count'] = multiprocessing.cpu_count() job_vars = sample_input, ids if sample_input['input']: ids['sample.tar'] = job.fileStore.writeGlobalFile(os.path.abspath( sample_location)) elif sample_input['config_fastq']: ids['R1.fastq'] = job.fileStore.writeGlobalFile(urlparse(url1).path) ids['R2.fastq'] = job.fileStore.writeGlobalFile(urlparse(url2).path) elif sample_input['ssec']: ids['sample.tar'] = job.addChildJobFn(download_encrypted_file, sample_input, 'sample.tar', disk='25G').rv() else: ids['sample.tar'] = job.addChildJobFn(download_from_url, sample_input['sample.tar'], disk='25G').rv() job.addFollowOnJobFn(static_dag_launchpoint, job_vars)
download_sample
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, ids, input_args, sample):\n if len(sample) == 2:\n uuid, sample_location = sample\n url1, url2 = None, None\n else:\n uuid, url1, url2 = sample\n sample_location = None\n sample_input = dict(input_args)\n sample_input['uuid'] = uuid\n sample_input['sample.tar'] = sample_location\n if sample_input['output_dir']:\n sample_input['output_dir'] = os.path.join(input_args['output_dir'],\n uuid)\n sample_input['cpu_count'] = multiprocessing.cpu_count()\n job_vars = sample_input, ids\n if sample_input['input']:\n ids['sample.tar'] = job.fileStore.writeGlobalFile(os.path.abspath(\n sample_location))\n elif sample_input['config_fastq']:\n ids['R1.fastq'] = job.fileStore.writeGlobalFile(urlparse(url1).path)\n ids['R2.fastq'] = job.fileStore.writeGlobalFile(urlparse(url2).path)\n elif sample_input['ssec']:\n ids['sample.tar'] = job.addChildJobFn(download_encrypted_file,\n sample_input, 'sample.tar', disk='25G').rv()\n else:\n ids['sample.tar'] = job.addChildJobFn(download_from_url,\n sample_input['sample.tar'], disk='25G').rv()\n job.addFollowOnJobFn(static_dag_launchpoint, job_vars)\n", "role": "user" }, { "content": "download_sample", "role": "assistant" } ]
def x(job, job_vars): input_args, ids = job_vars if input_args['config_fastq']: cores = input_args['cpu_count'] a = job.wrapJobFn(mapsplice, job_vars, cores=cores, disk='130G' ).encapsulate() else: a = job.wrapJobFn(merge_fastqs, job_vars, disk='70 G').encapsulate() b = job.wrapJobFn(consolidate_output, job_vars, a.rv()) job.addChild(a) a.addChild(b)
static_dag_launchpoint
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, job_vars):\n input_args, ids = job_vars\n if input_args['config_fastq']:\n cores = input_args['cpu_count']\n a = job.wrapJobFn(mapsplice, job_vars, cores=cores, disk='130G'\n ).encapsulate()\n else:\n a = job.wrapJobFn(merge_fastqs, job_vars, disk='70 G').encapsulate()\n b = job.wrapJobFn(consolidate_output, job_vars, a.rv())\n job.addChild(a)\n a.addChild(b)\n", "role": "user" }, { "content": "static_dag_launchpoint", "role": "assistant" } ]
def x(job, job_vars): input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cores = input_args['cpu_count'] single_end_reads = input_args['single_end_reads'] sample = return_input_paths(job, work_dir, ids, 'sample.tar') subprocess.check_call(['tar', '-xvf', sample, '-C', work_dir]) os.remove(os.path.join(work_dir, 'sample.tar')) if single_end_reads: files = sorted(glob.glob(os.path.join(work_dir, '*'))) with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1: subprocess.check_call(['zcat'] + files, stdout=f1) ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join( work_dir, 'R1.fastq')) else: r1_files = sorted(glob.glob(os.path.join(work_dir, '*R1*'))) r2_files = sorted(glob.glob(os.path.join(work_dir, '*R2*'))) with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1: p1 = subprocess.Popen(['zcat'] + r1_files, stdout=f1) with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2: p2 = subprocess.Popen(['zcat'] + r2_files, stdout=f2) p1.wait() p2.wait() ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join( work_dir, 'R1.fastq')) ids['R2.fastq'] = job.fileStore.writeGlobalFile(os.path.join( work_dir, 'R2.fastq')) job.fileStore.deleteGlobalFile(ids['sample.tar']) return job.addChildJobFn(mapsplice, job_vars, cores=cores, disk='130 G' ).rv()
merge_fastqs
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, job_vars):\n input_args, ids = job_vars\n work_dir = job.fileStore.getLocalTempDir()\n cores = input_args['cpu_count']\n single_end_reads = input_args['single_end_reads']\n sample = return_input_paths(job, work_dir, ids, 'sample.tar')\n subprocess.check_call(['tar', '-xvf', sample, '-C', work_dir])\n os.remove(os.path.join(work_dir, 'sample.tar'))\n if single_end_reads:\n files = sorted(glob.glob(os.path.join(work_dir, '*')))\n with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:\n subprocess.check_call(['zcat'] + files, stdout=f1)\n ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(\n work_dir, 'R1.fastq'))\n else:\n r1_files = sorted(glob.glob(os.path.join(work_dir, '*R1*')))\n r2_files = sorted(glob.glob(os.path.join(work_dir, '*R2*')))\n with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:\n p1 = subprocess.Popen(['zcat'] + r1_files, stdout=f1)\n with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2:\n p2 = subprocess.Popen(['zcat'] + r2_files, stdout=f2)\n p1.wait()\n p2.wait()\n ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(\n work_dir, 'R1.fastq'))\n ids['R2.fastq'] = job.fileStore.writeGlobalFile(os.path.join(\n work_dir, 'R2.fastq'))\n job.fileStore.deleteGlobalFile(ids['sample.tar'])\n return job.addChildJobFn(mapsplice, job_vars, cores=cores, disk='130 G'\n ).rv()\n", "role": "user" }, { "content": "merge_fastqs", "role": "assistant" } ]
def x(job, job_vars): input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cores = input_args['cpu_count'] sudo = input_args['sudo'] single_end_reads = input_args['single_end_reads'] files_to_delete = ['R1.fastq'] return_input_paths(job, work_dir, ids, 'ebwt.zip', 'chromosomes.zip') if single_end_reads: return_input_paths(job, work_dir, ids, 'R1.fastq') else: return_input_paths(job, work_dir, ids, 'R1.fastq', 'R2.fastq') files_to_delete.extend(['R2.fastq']) for fname in ['chromosomes.zip', 'ebwt.zip']: subprocess.check_call(['unzip', '-o', os.path.join(work_dir, fname), '-d', work_dir]) parameters = ['-p', str(cores), '-s', '25', '--bam', '--min-map-len', '50', '-x', '/data/ebwt', '-c', '/data/chromosomes', '-1', '/data/R1.fastq', '-o', '/data'] if not single_end_reads: parameters.extend(['-2', '/data/R2.fastq']) docker_call(tool= 'quay.io/ucsc_cgl/mapsplice:2.1.8--dd5ac549b95eb3e5d166a5e310417ef13651994e' , tool_parameters=parameters, work_dir=work_dir, sudo=sudo) for fname in ['alignments.bam', 'stats.txt']: ids[fname] = job.fileStore.writeGlobalFile(os.path.join(work_dir, fname)) for fname in files_to_delete: job.fileStore.deleteGlobalFile(ids[fname]) if input_args['upload_bam_to_s3'] and input_args['s3_dir']: job.addChildJobFn(upload_bam_to_s3, job_vars) output_ids = job.addChildJobFn(add_read_groups, job_vars, disk='30 G').rv() return output_ids
mapsplice
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, job_vars):\n input_args, ids = job_vars\n work_dir = job.fileStore.getLocalTempDir()\n cores = input_args['cpu_count']\n sudo = input_args['sudo']\n single_end_reads = input_args['single_end_reads']\n files_to_delete = ['R1.fastq']\n return_input_paths(job, work_dir, ids, 'ebwt.zip', 'chromosomes.zip')\n if single_end_reads:\n return_input_paths(job, work_dir, ids, 'R1.fastq')\n else:\n return_input_paths(job, work_dir, ids, 'R1.fastq', 'R2.fastq')\n files_to_delete.extend(['R2.fastq'])\n for fname in ['chromosomes.zip', 'ebwt.zip']:\n subprocess.check_call(['unzip', '-o', os.path.join(work_dir, fname),\n '-d', work_dir])\n parameters = ['-p', str(cores), '-s', '25', '--bam', '--min-map-len',\n '50', '-x', '/data/ebwt', '-c', '/data/chromosomes', '-1',\n '/data/R1.fastq', '-o', '/data']\n if not single_end_reads:\n parameters.extend(['-2', '/data/R2.fastq'])\n docker_call(tool=\n 'quay.io/ucsc_cgl/mapsplice:2.1.8--dd5ac549b95eb3e5d166a5e310417ef13651994e'\n , tool_parameters=parameters, work_dir=work_dir, sudo=sudo)\n for fname in ['alignments.bam', 'stats.txt']:\n ids[fname] = job.fileStore.writeGlobalFile(os.path.join(work_dir,\n fname))\n for fname in files_to_delete:\n job.fileStore.deleteGlobalFile(ids[fname])\n if input_args['upload_bam_to_s3'] and input_args['s3_dir']:\n job.addChildJobFn(upload_bam_to_s3, job_vars)\n output_ids = job.addChildJobFn(add_read_groups, job_vars, disk='30 G').rv()\n return output_ids\n", "role": "user" }, { "content": "mapsplice", "role": "assistant" } ]
def x(job, job_vars): input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] alignments = return_input_paths(job, work_dir, ids, 'alignments.bam') output = os.path.join(work_dir, 'rg_alignments.bam') parameter = ['AddOrReplaceReadGroups', 'INPUT={}'.format(docker_path( alignments)), 'OUTPUT={}'.format(docker_path(output)), 'RGSM={}'. format(input_args['uuid']), 'RGID={}'.format(input_args['uuid']), 'RGLB=TruSeq', 'RGPL=illumina', 'RGPU=barcode', 'VALIDATION_STRINGENCY=SILENT'] docker_call(tool= 'quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e' , tool_parameters=parameter, work_dir=work_dir, sudo=sudo) ids['rg_alignments.bam'] = job.fileStore.writeGlobalFile(output) return job.addChildJobFn(bamsort_and_index, job_vars, disk='30 G').rv()
add_read_groups
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, job_vars):\n input_args, ids = job_vars\n work_dir = job.fileStore.getLocalTempDir()\n sudo = input_args['sudo']\n alignments = return_input_paths(job, work_dir, ids, 'alignments.bam')\n output = os.path.join(work_dir, 'rg_alignments.bam')\n parameter = ['AddOrReplaceReadGroups', 'INPUT={}'.format(docker_path(\n alignments)), 'OUTPUT={}'.format(docker_path(output)), 'RGSM={}'.\n format(input_args['uuid']), 'RGID={}'.format(input_args['uuid']),\n 'RGLB=TruSeq', 'RGPL=illumina', 'RGPU=barcode',\n 'VALIDATION_STRINGENCY=SILENT']\n docker_call(tool=\n 'quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e'\n , tool_parameters=parameter, work_dir=work_dir, sudo=sudo)\n ids['rg_alignments.bam'] = job.fileStore.writeGlobalFile(output)\n return job.addChildJobFn(bamsort_and_index, job_vars, disk='30 G').rv()\n", "role": "user" }, { "content": "add_read_groups", "role": "assistant" } ]
def x(job, job_vars): input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] rg_alignments = return_input_paths(job, work_dir, ids, 'rg_alignments.bam') output = os.path.join(work_dir, 'sorted.bam') cmd1 = ['sort', docker_path(rg_alignments), docker_path('sorted')] cmd2 = ['index', docker_path(output)] docker_call(tool= 'quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e' , tool_parameters=cmd1, work_dir=work_dir, sudo=sudo) docker_call(tool= 'quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e' , tool_parameters=cmd2, work_dir=work_dir, sudo=sudo) ids['sorted.bam'] = job.fileStore.writeGlobalFile(output) ids['sorted.bam.bai'] = job.fileStore.writeGlobalFile(os.path.join( work_dir, 'sorted.bam.bai')) output_ids = job.addChildJobFn(sort_bam_by_reference, job_vars, disk='50 G' ).rv() rseq_id = job.addChildJobFn(rseq_qc, job_vars, disk='20 G').rv() return rseq_id, output_ids
bamsort_and_index
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, job_vars):\n input_args, ids = job_vars\n work_dir = job.fileStore.getLocalTempDir()\n sudo = input_args['sudo']\n rg_alignments = return_input_paths(job, work_dir, ids, 'rg_alignments.bam')\n output = os.path.join(work_dir, 'sorted.bam')\n cmd1 = ['sort', docker_path(rg_alignments), docker_path('sorted')]\n cmd2 = ['index', docker_path(output)]\n docker_call(tool=\n 'quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e'\n , tool_parameters=cmd1, work_dir=work_dir, sudo=sudo)\n docker_call(tool=\n 'quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e'\n , tool_parameters=cmd2, work_dir=work_dir, sudo=sudo)\n ids['sorted.bam'] = job.fileStore.writeGlobalFile(output)\n ids['sorted.bam.bai'] = job.fileStore.writeGlobalFile(os.path.join(\n work_dir, 'sorted.bam.bai'))\n output_ids = job.addChildJobFn(sort_bam_by_reference, job_vars, disk='50 G'\n ).rv()\n rseq_id = job.addChildJobFn(rseq_qc, job_vars, disk='20 G').rv()\n return rseq_id, output_ids\n", "role": "user" }, { "content": "bamsort_and_index", "role": "assistant" } ]
def x(job, job_vars): input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] sudo = input_args['sudo'] return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai') docker_call(tool='jvivian/qc', tool_parameters=[ '/opt/cgl-docker-lib/RseqQC_v2.sh', '/data/sorted.bam', uuid], work_dir=work_dir, sudo=sudo) output_files = [f for f in glob.glob(os.path.join(work_dir, '*')) if 'sorted.bam' not in f] tarball_files(work_dir, tar_name='qc.tar.gz', uuid=None, files=output_files ) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'qc.tar.gz'))
rseq_qc
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, job_vars):\n input_args, ids = job_vars\n work_dir = job.fileStore.getLocalTempDir()\n uuid = input_args['uuid']\n sudo = input_args['sudo']\n return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai')\n docker_call(tool='jvivian/qc', tool_parameters=[\n '/opt/cgl-docker-lib/RseqQC_v2.sh', '/data/sorted.bam', uuid],\n work_dir=work_dir, sudo=sudo)\n output_files = [f for f in glob.glob(os.path.join(work_dir, '*')) if \n 'sorted.bam' not in f]\n tarball_files(work_dir, tar_name='qc.tar.gz', uuid=None, files=output_files\n )\n return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'qc.tar.gz'))\n", "role": "user" }, { "content": "rseq_qc", "role": "assistant" } ]
def x(job, job_vars): input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sorted_bam, sorted_bai = return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai') output = os.path.join(work_dir, 'sort_by_ref.bam') ref_seqs = [] handle = subprocess.Popen(['samtools', 'view', '-H', sorted_bam], stdout=subprocess.PIPE).stdout for line in handle: if line.startswith('@SQ'): tmp = line.split('\t') chrom = tmp[1].split(':')[1] ref_seqs.append(chrom) handle.close() for chrom in ref_seqs: cmd_view = ['samtools', 'view', '-b', sorted_bam, chrom] cmd_sort = ['samtools', 'sort', '-m', '3000000000', '-n', '-', os. path.join(work_dir, chrom)] p1 = subprocess.Popen(cmd_view, stdout=subprocess.PIPE) subprocess.check_call(cmd_sort, stdin=p1.stdout) sorted_files = [(os.path.join(work_dir, chrom) + '.bam') for chrom in ref_seqs] cmd = ['samtools', 'cat', '-o', output] + sorted_files subprocess.check_call(cmd) ids['sort_by_ref.bam'] = job.fileStore.writeGlobalFile(output) rsem_id = job.addChildJobFn(transcriptome, job_vars, disk='30 G', memory='30 G').rv() exon_id = job.addChildJobFn(exon_count, job_vars, disk='30 G').rv() return exon_id, rsem_id
sort_bam_by_reference
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, job_vars):\n input_args, ids = job_vars\n work_dir = job.fileStore.getLocalTempDir()\n sorted_bam, sorted_bai = return_input_paths(job, work_dir, ids,\n 'sorted.bam', 'sorted.bam.bai')\n output = os.path.join(work_dir, 'sort_by_ref.bam')\n ref_seqs = []\n handle = subprocess.Popen(['samtools', 'view', '-H', sorted_bam],\n stdout=subprocess.PIPE).stdout\n for line in handle:\n if line.startswith('@SQ'):\n tmp = line.split('\\t')\n chrom = tmp[1].split(':')[1]\n ref_seqs.append(chrom)\n handle.close()\n for chrom in ref_seqs:\n cmd_view = ['samtools', 'view', '-b', sorted_bam, chrom]\n cmd_sort = ['samtools', 'sort', '-m', '3000000000', '-n', '-', os.\n path.join(work_dir, chrom)]\n p1 = subprocess.Popen(cmd_view, stdout=subprocess.PIPE)\n subprocess.check_call(cmd_sort, stdin=p1.stdout)\n sorted_files = [(os.path.join(work_dir, chrom) + '.bam') for chrom in\n ref_seqs]\n cmd = ['samtools', 'cat', '-o', output] + sorted_files\n subprocess.check_call(cmd)\n ids['sort_by_ref.bam'] = job.fileStore.writeGlobalFile(output)\n rsem_id = job.addChildJobFn(transcriptome, job_vars, disk='30 G',\n memory='30 G').rv()\n exon_id = job.addChildJobFn(exon_count, job_vars, disk='30 G').rv()\n return exon_id, rsem_id\n", "role": "user" }, { "content": "sort_bam_by_reference", "role": "assistant" } ]
def x(job, job_vars): input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] sudo = input_args['sudo'] sort_by_ref, normalize_pl, composite_bed = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam', 'normalize.pl', 'composite_exons.bed' ) tool = 'jvivian/bedtools' cmd_1 = ['coverage', '-split', '-abam', docker_path(sort_by_ref), '-b', docker_path(composite_bed)] cmd_2 = ['perl', os.path.join(work_dir, 'normalize.pl'), sort_by_ref, composite_bed] popen_docker = ['docker', 'run', '-v', '{}:/data'.format(work_dir), tool] if sudo: popen_docker = ['sudo'] + popen_docker p = subprocess.Popen(popen_docker + cmd_1, stdout=subprocess.PIPE) with open(os.path.join(work_dir, 'exon_quant'), 'w') as f: subprocess.check_call(cmd_2, stdin=p.stdout, stdout=f) p1 = subprocess.Popen(['cat', os.path.join(work_dir, 'exon_quant')], stdout=subprocess.PIPE) p2 = subprocess.Popen(['tr', '":"', '"\t"'], stdin=p1.stdout, stdout= subprocess.PIPE) p3 = subprocess.Popen(['tr', '"-"', '"\t"'], stdin=p2.stdout, stdout= subprocess.PIPE) with open(os.path.join(work_dir, 'exon_quant.bed'), 'w') as f: subprocess.check_call(['cut', '-f1-4'], stdin=p3.stdout, stdout=f) output_files = ['exon_quant.bed', 'exon_quant'] tarball_files(work_dir, tar_name='exon.tar.gz', uuid=uuid, files= output_files) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'exon.tar.gz'))
exon_count
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, job_vars):\n input_args, ids = job_vars\n work_dir = job.fileStore.getLocalTempDir()\n uuid = input_args['uuid']\n sudo = input_args['sudo']\n sort_by_ref, normalize_pl, composite_bed = return_input_paths(job,\n work_dir, ids, 'sort_by_ref.bam', 'normalize.pl', 'composite_exons.bed'\n )\n tool = 'jvivian/bedtools'\n cmd_1 = ['coverage', '-split', '-abam', docker_path(sort_by_ref), '-b',\n docker_path(composite_bed)]\n cmd_2 = ['perl', os.path.join(work_dir, 'normalize.pl'), sort_by_ref,\n composite_bed]\n popen_docker = ['docker', 'run', '-v', '{}:/data'.format(work_dir), tool]\n if sudo:\n popen_docker = ['sudo'] + popen_docker\n p = subprocess.Popen(popen_docker + cmd_1, stdout=subprocess.PIPE)\n with open(os.path.join(work_dir, 'exon_quant'), 'w') as f:\n subprocess.check_call(cmd_2, stdin=p.stdout, stdout=f)\n p1 = subprocess.Popen(['cat', os.path.join(work_dir, 'exon_quant')],\n stdout=subprocess.PIPE)\n p2 = subprocess.Popen(['tr', '\":\"', '\"\\t\"'], stdin=p1.stdout, stdout=\n subprocess.PIPE)\n p3 = subprocess.Popen(['tr', '\"-\"', '\"\\t\"'], stdin=p2.stdout, stdout=\n subprocess.PIPE)\n with open(os.path.join(work_dir, 'exon_quant.bed'), 'w') as f:\n subprocess.check_call(['cut', '-f1-4'], stdin=p3.stdout, stdout=f)\n output_files = ['exon_quant.bed', 'exon_quant']\n tarball_files(work_dir, tar_name='exon.tar.gz', uuid=uuid, files=\n output_files)\n return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'exon.tar.gz'))\n", "role": "user" }, { "content": "exon_count", "role": "assistant" } ]
def x(job, job_vars): input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] sort_by_ref, bed, hg19_fa = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam', 'unc.bed', 'hg19.transcripts.fa') output = os.path.join(work_dir, 'transcriptome.bam') parameters = ['sam-xlate', '--bed', docker_path(bed), '--in', docker_path(sort_by_ref), '--order', docker_path(hg19_fa), '--out', docker_path(output), '--xgtag', '--reverse'] docker_call(tool= 'quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843', tool_parameters=parameters, work_dir=work_dir, java_opts='-Xmx30g', sudo=sudo) ids['transcriptome.bam'] = job.fileStore.writeGlobalFile(output) return job.addChildJobFn(filter_bam, job_vars, memory='30G', disk='30G' ).rv()
transcriptome
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, job_vars):\n input_args, ids = job_vars\n work_dir = job.fileStore.getLocalTempDir()\n sudo = input_args['sudo']\n sort_by_ref, bed, hg19_fa = return_input_paths(job, work_dir, ids,\n 'sort_by_ref.bam', 'unc.bed', 'hg19.transcripts.fa')\n output = os.path.join(work_dir, 'transcriptome.bam')\n parameters = ['sam-xlate', '--bed', docker_path(bed), '--in',\n docker_path(sort_by_ref), '--order', docker_path(hg19_fa), '--out',\n docker_path(output), '--xgtag', '--reverse']\n docker_call(tool=\n 'quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843',\n tool_parameters=parameters, work_dir=work_dir, java_opts='-Xmx30g',\n sudo=sudo)\n ids['transcriptome.bam'] = job.fileStore.writeGlobalFile(output)\n return job.addChildJobFn(filter_bam, job_vars, memory='30G', disk='30G'\n ).rv()\n", "role": "user" }, { "content": "transcriptome", "role": "assistant" } ]
def x(job, job_vars): input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cores = input_args['cpu_count'] sudo = input_args['sudo'] transcriptome_bam = return_input_paths(job, work_dir, ids, 'transcriptome.bam') output = os.path.join(work_dir, 'filtered.bam') parameters = ['sam-filter', '--strip-indels', '--max-insert', '1000', '--mapq', '1', '--in', docker_path(transcriptome_bam), '--out', docker_path(output)] docker_call(tool= 'quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843', tool_parameters=parameters, work_dir=os.path.dirname(output), java_opts='-Xmx30g', sudo=sudo) ids['filtered.bam'] = job.fileStore.writeGlobalFile(output) return job.addChildJobFn(rsem, job_vars, cores=cores, disk='30 G').rv()
filter_bam
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, job_vars):\n input_args, ids = job_vars\n work_dir = job.fileStore.getLocalTempDir()\n cores = input_args['cpu_count']\n sudo = input_args['sudo']\n transcriptome_bam = return_input_paths(job, work_dir, ids,\n 'transcriptome.bam')\n output = os.path.join(work_dir, 'filtered.bam')\n parameters = ['sam-filter', '--strip-indels', '--max-insert', '1000',\n '--mapq', '1', '--in', docker_path(transcriptome_bam), '--out',\n docker_path(output)]\n docker_call(tool=\n 'quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843',\n tool_parameters=parameters, work_dir=os.path.dirname(output),\n java_opts='-Xmx30g', sudo=sudo)\n ids['filtered.bam'] = job.fileStore.writeGlobalFile(output)\n return job.addChildJobFn(rsem, job_vars, cores=cores, disk='30 G').rv()\n", "role": "user" }, { "content": "filter_bam", "role": "assistant" } ]
def x(job, job_vars): input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cpus = input_args['cpu_count'] sudo = input_args['sudo'] single_end_reads = input_args['single_end_reads'] filtered_bam, rsem_ref = return_input_paths(job, work_dir, ids, 'filtered.bam', 'rsem_ref.zip') subprocess.check_call(['unzip', '-o', os.path.join(work_dir, 'rsem_ref.zip'), '-d', work_dir]) output_prefix = 'rsem' parameters = ['--quiet', '--no-qualities', '-p', str(cpus), '--forward-prob', '0.5', '--seed-length', '25', '--fragment-length-mean', '-1.0', '--bam', docker_path(filtered_bam)] if not single_end_reads: parameters.extend(['--paired-end']) parameters.extend(['/data/rsem_ref/hg19_M_rCRS_ref', output_prefix]) docker_call(tool= 'quay.io/ucsc_cgl/rsem:1.2.25--4e8d1b31d4028f464b3409c6558fb9dfcad73f88' , tool_parameters=parameters, work_dir=work_dir, sudo=sudo) os.rename(os.path.join(work_dir, output_prefix + '.genes.results'), os. path.join(work_dir, 'rsem_gene.tab')) os.rename(os.path.join(work_dir, output_prefix + '.isoforms.results'), os.path.join(work_dir, 'rsem_isoform.tab')) ids['rsem_gene.tab'] = job.fileStore.writeGlobalFile(os.path.join( work_dir, 'rsem_gene.tab')) ids['rsem_isoform.tab'] = job.fileStore.writeGlobalFile(os.path.join( work_dir, 'rsem_isoform.tab')) return job.addChildJobFn(rsem_postprocess, job_vars).rv()
rsem
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, job_vars):\n input_args, ids = job_vars\n work_dir = job.fileStore.getLocalTempDir()\n cpus = input_args['cpu_count']\n sudo = input_args['sudo']\n single_end_reads = input_args['single_end_reads']\n filtered_bam, rsem_ref = return_input_paths(job, work_dir, ids,\n 'filtered.bam', 'rsem_ref.zip')\n subprocess.check_call(['unzip', '-o', os.path.join(work_dir,\n 'rsem_ref.zip'), '-d', work_dir])\n output_prefix = 'rsem'\n parameters = ['--quiet', '--no-qualities', '-p', str(cpus),\n '--forward-prob', '0.5', '--seed-length', '25',\n '--fragment-length-mean', '-1.0', '--bam', docker_path(filtered_bam)]\n if not single_end_reads:\n parameters.extend(['--paired-end'])\n parameters.extend(['/data/rsem_ref/hg19_M_rCRS_ref', output_prefix])\n docker_call(tool=\n 'quay.io/ucsc_cgl/rsem:1.2.25--4e8d1b31d4028f464b3409c6558fb9dfcad73f88'\n , tool_parameters=parameters, work_dir=work_dir, sudo=sudo)\n os.rename(os.path.join(work_dir, output_prefix + '.genes.results'), os.\n path.join(work_dir, 'rsem_gene.tab'))\n os.rename(os.path.join(work_dir, output_prefix + '.isoforms.results'),\n os.path.join(work_dir, 'rsem_isoform.tab'))\n ids['rsem_gene.tab'] = job.fileStore.writeGlobalFile(os.path.join(\n work_dir, 'rsem_gene.tab'))\n ids['rsem_isoform.tab'] = job.fileStore.writeGlobalFile(os.path.join(\n work_dir, 'rsem_isoform.tab'))\n return job.addChildJobFn(rsem_postprocess, job_vars).rv()\n", "role": "user" }, { "content": "rsem", "role": "assistant" } ]
def x(job, job_vars, output_ids): input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] rseq_id, exon_id, rsem_id = flatten(output_ids) qc_tar = job.fileStore.readGlobalFile(rseq_id, os.path.join(work_dir, 'qc.tar.gz')) exon_tar = job.fileStore.readGlobalFile(exon_id, os.path.join(work_dir, 'exon.tar.gz')) rsem_tar = job.fileStore.readGlobalFile(rsem_id, os.path.join(work_dir, 'rsem.tar.gz')) out_tar = os.path.join(work_dir, uuid + '.tar.gz') with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out: for tar in [rsem_tar, exon_tar, qc_tar]: with tarfile.open(tar, 'r') as f_in: for tarinfo in f_in: with closing(f_in.extractfile(tarinfo)) as f_in_file: if tar == qc_tar: tarinfo.name = os.path.join(uuid, 'rseq_qc', os .path.basename(tarinfo.name)) else: tarinfo.name = os.path.join(uuid, os.path. basename(tarinfo.name)) f_out.addfile(tarinfo, fileobj=f_in_file) if input_args['output_dir']: output_dir = input_args['output_dir'] mkdir_p(output_dir) copy_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.tar.gz']) ids['uuid.tar.gz'] = job.fileStore.writeGlobalFile(out_tar) if input_args['s3_dir']: job.addChildJobFn(upload_output_to_s3, job_vars)
consolidate_output
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, job_vars, output_ids):\n input_args, ids = job_vars\n work_dir = job.fileStore.getLocalTempDir()\n uuid = input_args['uuid']\n rseq_id, exon_id, rsem_id = flatten(output_ids)\n qc_tar = job.fileStore.readGlobalFile(rseq_id, os.path.join(work_dir,\n 'qc.tar.gz'))\n exon_tar = job.fileStore.readGlobalFile(exon_id, os.path.join(work_dir,\n 'exon.tar.gz'))\n rsem_tar = job.fileStore.readGlobalFile(rsem_id, os.path.join(work_dir,\n 'rsem.tar.gz'))\n out_tar = os.path.join(work_dir, uuid + '.tar.gz')\n with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out:\n for tar in [rsem_tar, exon_tar, qc_tar]:\n with tarfile.open(tar, 'r') as f_in:\n for tarinfo in f_in:\n with closing(f_in.extractfile(tarinfo)) as f_in_file:\n if tar == qc_tar:\n tarinfo.name = os.path.join(uuid, 'rseq_qc', os\n .path.basename(tarinfo.name))\n else:\n tarinfo.name = os.path.join(uuid, os.path.\n basename(tarinfo.name))\n f_out.addfile(tarinfo, fileobj=f_in_file)\n if input_args['output_dir']:\n output_dir = input_args['output_dir']\n mkdir_p(output_dir)\n copy_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid +\n '.tar.gz'])\n ids['uuid.tar.gz'] = job.fileStore.writeGlobalFile(out_tar)\n if input_args['s3_dir']:\n job.addChildJobFn(upload_output_to_s3, job_vars)\n", "role": "user" }, { "content": "consolidate_output", "role": "assistant" } ]
def x(job, job_vars): import boto from boto.s3.key import Key input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] s3_dir = input_args['s3_dir'] bucket_name = s3_dir.split('/')[0] bucket_dir = '/'.join(s3_dir.split('/')[1:]) uuid_tar = return_input_paths(job, work_dir, ids, 'uuid.tar.gz') conn = boto.connect_s3() bucket = conn.get_bucket(bucket_name) k = Key(bucket) k.key = os.path.join(bucket_dir, uuid + '.tar.gz') k.set_contents_from_filename(uuid_tar)
upload_output_to_s3
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, job_vars):\n import boto\n from boto.s3.key import Key\n input_args, ids = job_vars\n work_dir = job.fileStore.getLocalTempDir()\n uuid = input_args['uuid']\n s3_dir = input_args['s3_dir']\n bucket_name = s3_dir.split('/')[0]\n bucket_dir = '/'.join(s3_dir.split('/')[1:])\n uuid_tar = return_input_paths(job, work_dir, ids, 'uuid.tar.gz')\n conn = boto.connect_s3()\n bucket = conn.get_bucket(bucket_name)\n k = Key(bucket)\n k.key = os.path.join(bucket_dir, uuid + '.tar.gz')\n k.set_contents_from_filename(uuid_tar)\n", "role": "user" }, { "content": "upload_output_to_s3", "role": "assistant" } ]
def x(job, job_vars): input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] job.fileStore.readGlobalFile(ids['alignments.bam'], os.path.join( work_dir, 'alignments.bam')) bam_path = os.path.join(work_dir, 'alignments.bam') sample_name = uuid + '.bam' s3_dir = input_args['s3_dir'] bucket_name = s3_dir.split('/')[0] bucket_dir = os.path.join('/'.join(s3_dir.split('/')[1:]), 'bam_files') s3am_command = ['s3am', 'upload', 'file://{}'.format(bam_path), os.path .join('s3://', bucket_name, bucket_dir, sample_name)] subprocess.check_call(s3am_command)
upload_bam_to_s3
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, job_vars):\n input_args, ids = job_vars\n work_dir = job.fileStore.getLocalTempDir()\n uuid = input_args['uuid']\n job.fileStore.readGlobalFile(ids['alignments.bam'], os.path.join(\n work_dir, 'alignments.bam'))\n bam_path = os.path.join(work_dir, 'alignments.bam')\n sample_name = uuid + '.bam'\n s3_dir = input_args['s3_dir']\n bucket_name = s3_dir.split('/')[0]\n bucket_dir = os.path.join('/'.join(s3_dir.split('/')[1:]), 'bam_files')\n s3am_command = ['s3am', 'upload', 'file://{}'.format(bam_path), os.path\n .join('s3://', bucket_name, bucket_dir, sample_name)]\n subprocess.check_call(s3am_command)\n", "role": "user" }, { "content": "upload_bam_to_s3", "role": "assistant" } ]
def x(): parser = build_parser() Job.Runner.addToilOptions(parser) args = parser.parse_args() inputs = {'config': args.config, 'config_fastq': args.config_fastq, 'input': args.input, 'unc.bed': args.unc, 'hg19.transcripts.fa': args.fasta, 'composite_exons.bed': args.composite_exons, 'normalize.pl': args.normalize, 'output_dir': args.output_dir, 'rsem_ref.zip': args.rsem_ref, 'chromosomes.zip': args.chromosomes, 'ebwt.zip': args.ebwt, 'ssec': args.ssec, 's3_dir': args.s3_dir, 'sudo': args.sudo, 'single_end_reads': args.single_end_reads, 'upload_bam_to_s3': args.upload_bam_to_s3, 'uuid': None, 'sample.tar': None, 'cpu_count': None} Job.Runner.startToil(Job.wrapJobFn(download_shared_files, inputs), args)
main
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x():\n parser = build_parser()\n Job.Runner.addToilOptions(parser)\n args = parser.parse_args()\n inputs = {'config': args.config, 'config_fastq': args.config_fastq,\n 'input': args.input, 'unc.bed': args.unc, 'hg19.transcripts.fa':\n args.fasta, 'composite_exons.bed': args.composite_exons,\n 'normalize.pl': args.normalize, 'output_dir': args.output_dir,\n 'rsem_ref.zip': args.rsem_ref, 'chromosomes.zip': args.chromosomes,\n 'ebwt.zip': args.ebwt, 'ssec': args.ssec, 's3_dir': args.s3_dir,\n 'sudo': args.sudo, 'single_end_reads': args.single_end_reads,\n 'upload_bam_to_s3': args.upload_bam_to_s3, 'uuid': None,\n 'sample.tar': None, 'cpu_count': None}\n Job.Runner.startToil(Job.wrapJobFn(download_shared_files, inputs), args)\n", "role": "user" }, { "content": "main", "role": "assistant" } ]
def x(master_ip, filename, spark_on_toil): master_ip = master_ip.actual ssh_call = ['ssh', '-o', 'StrictHostKeyChecking=no', master_ip] if spark_on_toil: output = check_output(ssh_call + ['docker', 'ps']) container_id = next(line.split()[0] for line in output.splitlines() if 'apache-hadoop-master' in line) ssh_call += ['docker', 'exec', container_id] try: check_call(ssh_call + ['hdfs', 'dfs', '-rm', '-r', '/' + filename]) except: pass
remove_file
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(master_ip, filename, spark_on_toil):\n master_ip = master_ip.actual\n ssh_call = ['ssh', '-o', 'StrictHostKeyChecking=no', master_ip]\n if spark_on_toil:\n output = check_output(ssh_call + ['docker', 'ps'])\n container_id = next(line.split()[0] for line in output.splitlines() if\n 'apache-hadoop-master' in line)\n ssh_call += ['docker', 'exec', container_id]\n try:\n check_call(ssh_call + ['hdfs', 'dfs', '-rm', '-r', '/' + filename])\n except:\n pass\n", "role": "user" }, { "content": "remove_file", "role": "assistant" } ]
def x(job, master_ip, inputs, known_snps, bam, hdfs_snps, hdfs_bam): log.info('Downloading known sites file %s to %s.', known_snps, hdfs_snps) call_conductor(job, master_ip, known_snps, hdfs_snps, memory=inputs.memory) log.info('Downloading input BAM %s to %s.', bam, hdfs_bam) call_conductor(job, master_ip, bam, hdfs_bam, memory=inputs.memory)
download_data
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, master_ip, inputs, known_snps, bam, hdfs_snps, hdfs_bam):\n log.info('Downloading known sites file %s to %s.', known_snps, hdfs_snps)\n call_conductor(job, master_ip, known_snps, hdfs_snps, memory=inputs.memory)\n log.info('Downloading input BAM %s to %s.', bam, hdfs_bam)\n call_conductor(job, master_ip, bam, hdfs_bam, memory=inputs.memory)\n", "role": "user" }, { "content": "download_data", "role": "assistant" } ]
def x(job, master_ip, inputs, in_file, in_snps, adam_file, adam_snps, spark_on_toil): log.info('Converting input BAM to ADAM.') call_adam(job, master_ip, ['transform', in_file, adam_file], memory= inputs.memory, run_local=inputs.run_local, native_adam_path=inputs. native_adam_path) in_file_name = in_file.split('/')[-1] remove_file(master_ip, in_file_name, spark_on_toil) log.info('Converting known sites VCF to ADAM.') call_adam(job, master_ip, ['vcf2adam', '-only_variants', in_snps, adam_snps], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) in_snps_name = in_snps.split('/')[-1] remove_file(master_ip, in_snps_name, spark_on_toil)
adam_convert
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, master_ip, inputs, in_file, in_snps, adam_file, adam_snps,\n spark_on_toil):\n log.info('Converting input BAM to ADAM.')\n call_adam(job, master_ip, ['transform', in_file, adam_file], memory=\n inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.\n native_adam_path)\n in_file_name = in_file.split('/')[-1]\n remove_file(master_ip, in_file_name, spark_on_toil)\n log.info('Converting known sites VCF to ADAM.')\n call_adam(job, master_ip, ['vcf2adam', '-only_variants', in_snps,\n adam_snps], memory=inputs.memory, run_local=inputs.run_local,\n native_adam_path=inputs.native_adam_path)\n in_snps_name = in_snps.split('/')[-1]\n remove_file(master_ip, in_snps_name, spark_on_toil)\n", "role": "user" }, { "content": "adam_convert", "role": "assistant" } ]
def x(job, master_ip, inputs, in_file, snp_file, hdfs_dir, out_file, spark_on_toil): log.info('Marking duplicate reads.') call_adam(job, master_ip, ['transform', in_file, hdfs_dir + '/mkdups.adam', '-aligned_read_predicate', '-limit_projection', '-mark_duplicate_reads'], memory=inputs.memory, run_local=inputs. run_local, native_adam_path=inputs.native_adam_path) in_file_name = in_file.split('/')[-1] remove_file(master_ip, in_file_name + '*', spark_on_toil) log.info('Realigning INDELs.') call_adam(job, master_ip, ['transform', hdfs_dir + '/mkdups.adam', hdfs_dir + '/ri.adam', '-realign_indels'], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) remove_file(master_ip, hdfs_dir + '/mkdups.adam*', spark_on_toil) log.info('Recalibrating base quality scores.') call_adam(job, master_ip, ['transform', hdfs_dir + '/ri.adam', hdfs_dir + '/bqsr.adam', '-recalibrate_base_qualities', '-known_snps', snp_file], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) remove_file(master_ip, 'ri.adam*', spark_on_toil) log.info('Sorting reads and saving a single BAM file.') call_adam(job, master_ip, ['transform', hdfs_dir + '/bqsr.adam', out_file, '-sort_reads', '-single'], memory=inputs.memory, run_local=inputs.run_local, native_adam_path=inputs.native_adam_path) remove_file(master_ip, 'bqsr.adam*', spark_on_toil) return out_file
adam_transform
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, master_ip, inputs, in_file, snp_file, hdfs_dir, out_file,\n spark_on_toil):\n log.info('Marking duplicate reads.')\n call_adam(job, master_ip, ['transform', in_file, hdfs_dir +\n '/mkdups.adam', '-aligned_read_predicate', '-limit_projection',\n '-mark_duplicate_reads'], memory=inputs.memory, run_local=inputs.\n run_local, native_adam_path=inputs.native_adam_path)\n in_file_name = in_file.split('/')[-1]\n remove_file(master_ip, in_file_name + '*', spark_on_toil)\n log.info('Realigning INDELs.')\n call_adam(job, master_ip, ['transform', hdfs_dir + '/mkdups.adam', \n hdfs_dir + '/ri.adam', '-realign_indels'], memory=inputs.memory,\n run_local=inputs.run_local, native_adam_path=inputs.native_adam_path)\n remove_file(master_ip, hdfs_dir + '/mkdups.adam*', spark_on_toil)\n log.info('Recalibrating base quality scores.')\n call_adam(job, master_ip, ['transform', hdfs_dir + '/ri.adam', hdfs_dir +\n '/bqsr.adam', '-recalibrate_base_qualities', '-known_snps',\n snp_file], memory=inputs.memory, run_local=inputs.run_local,\n native_adam_path=inputs.native_adam_path)\n remove_file(master_ip, 'ri.adam*', spark_on_toil)\n log.info('Sorting reads and saving a single BAM file.')\n call_adam(job, master_ip, ['transform', hdfs_dir + '/bqsr.adam',\n out_file, '-sort_reads', '-single'], memory=inputs.memory,\n run_local=inputs.run_local, native_adam_path=inputs.native_adam_path)\n remove_file(master_ip, 'bqsr.adam*', spark_on_toil)\n return out_file\n", "role": "user" }, { "content": "adam_transform", "role": "assistant" } ]
def x(job, master_ip, inputs, hdfs_name, upload_name, spark_on_toil): if mock_mode(): truncate_file(master_ip, hdfs_name, spark_on_toil) log.info('Uploading output BAM %s to %s.', hdfs_name, upload_name) call_conductor(job, master_ip, hdfs_name, upload_name, memory=inputs.memory ) remove_file(master_ip, hdfs_name, spark_on_toil)
upload_data
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, master_ip, inputs, hdfs_name, upload_name, spark_on_toil):\n if mock_mode():\n truncate_file(master_ip, hdfs_name, spark_on_toil)\n log.info('Uploading output BAM %s to %s.', hdfs_name, upload_name)\n call_conductor(job, master_ip, hdfs_name, upload_name, memory=inputs.memory\n )\n remove_file(master_ip, hdfs_name, spark_on_toil)\n", "role": "user" }, { "content": "upload_data", "role": "assistant" } ]
def x(job, master_ip, inputs, spark_on_toil): master_ip = MasterAddress(master_ip) bam_name = inputs.sample.split('://')[-1].split('/')[-1] sample_name = '.'.join(os.path.splitext(bam_name)[:-1]) hdfs_subdir = sample_name + '-dir' if inputs.run_local: inputs.local_dir = job.fileStore.getLocalTempDir() if inputs.native_adam_path is None: hdfs_dir = '/data/' else: hdfs_dir = inputs.local_dir else: inputs.local_dir = None hdfs_dir = 'hdfs://{0}:{1}/{2}'.format(master_ip, HDFS_MASTER_PORT, hdfs_subdir) try: hdfs_prefix = hdfs_dir + '/' + sample_name hdfs_bam = hdfs_dir + '/' + bam_name hdfs_snps = hdfs_dir + '/' + inputs.dbsnp.split('://')[-1].split('/')[ -1] if not inputs.run_local: download_data(job, master_ip, inputs, inputs.dbsnp, inputs. sample, hdfs_snps, hdfs_bam) else: copy_files([inputs.sample, inputs.dbsnp], inputs.local_dir) adam_input = hdfs_prefix + '.adam' adam_snps = hdfs_dir + '/snps.var.adam' adam_convert(job, master_ip, inputs, hdfs_bam, hdfs_snps, adam_input, adam_snps, spark_on_toil) adam_output = hdfs_prefix + '.processed.bam' adam_transform(job, master_ip, inputs, adam_input, adam_snps, hdfs_dir, adam_output, spark_on_toil) out_file = (inputs.output_dir + '/' + sample_name + inputs.suffix + '.bam') if not inputs.run_local: upload_data(job, master_ip, inputs, adam_output, out_file, spark_on_toil) else: local_adam_output = '%s/%s.processed.bam' % (inputs.local_dir, sample_name) move_files([local_adam_output], inputs.output_dir) remove_file(master_ip, hdfs_subdir, spark_on_toil) except: remove_file(master_ip, hdfs_subdir, spark_on_toil) raise
download_run_and_upload
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, master_ip, inputs, spark_on_toil):\n master_ip = MasterAddress(master_ip)\n bam_name = inputs.sample.split('://')[-1].split('/')[-1]\n sample_name = '.'.join(os.path.splitext(bam_name)[:-1])\n hdfs_subdir = sample_name + '-dir'\n if inputs.run_local:\n inputs.local_dir = job.fileStore.getLocalTempDir()\n if inputs.native_adam_path is None:\n hdfs_dir = '/data/'\n else:\n hdfs_dir = inputs.local_dir\n else:\n inputs.local_dir = None\n hdfs_dir = 'hdfs://{0}:{1}/{2}'.format(master_ip, HDFS_MASTER_PORT,\n hdfs_subdir)\n try:\n hdfs_prefix = hdfs_dir + '/' + sample_name\n hdfs_bam = hdfs_dir + '/' + bam_name\n hdfs_snps = hdfs_dir + '/' + inputs.dbsnp.split('://')[-1].split('/')[\n -1]\n if not inputs.run_local:\n download_data(job, master_ip, inputs, inputs.dbsnp, inputs.\n sample, hdfs_snps, hdfs_bam)\n else:\n copy_files([inputs.sample, inputs.dbsnp], inputs.local_dir)\n adam_input = hdfs_prefix + '.adam'\n adam_snps = hdfs_dir + '/snps.var.adam'\n adam_convert(job, master_ip, inputs, hdfs_bam, hdfs_snps,\n adam_input, adam_snps, spark_on_toil)\n adam_output = hdfs_prefix + '.processed.bam'\n adam_transform(job, master_ip, inputs, adam_input, adam_snps,\n hdfs_dir, adam_output, spark_on_toil)\n out_file = (inputs.output_dir + '/' + sample_name + inputs.suffix +\n '.bam')\n if not inputs.run_local:\n upload_data(job, master_ip, inputs, adam_output, out_file,\n spark_on_toil)\n else:\n local_adam_output = '%s/%s.processed.bam' % (inputs.local_dir,\n sample_name)\n move_files([local_adam_output], inputs.output_dir)\n remove_file(master_ip, hdfs_subdir, spark_on_toil)\n except:\n remove_file(master_ip, hdfs_subdir, spark_on_toil)\n raise\n", "role": "user" }, { "content": "download_run_and_upload", "role": "assistant" } ]
def x(job, inputs, sample, output_dir, suffix=''): inputs.sample = sample inputs.output_dir = output_dir inputs.suffix = suffix if inputs.master_ip is not None or inputs.run_local: if not inputs.run_local and inputs.master_ip == 'auto': spark_on_toil = False scale_up = job.wrapJobFn(scale_external_spark_cluster, 1) job.addChild(scale_up) spark_work = job.wrapJobFn(download_run_and_upload, inputs. master_ip, inputs, spark_on_toil) scale_up.addChild(spark_work) scale_down = job.wrapJobFn(scale_external_spark_cluster, -1) spark_work.addChild(scale_down) else: spark_on_toil = False spark_work = job.wrapJobFn(download_run_and_upload, inputs. master_ip, inputs, spark_on_toil) job.addChild(spark_work) else: spark_on_toil = True cores = multiprocessing.cpu_count() master_ip = spawn_spark_cluster(job, False, inputs.num_nodes - 1, cores=cores, memory=inputs.memory) spark_work = job.wrapJobFn(download_run_and_upload, master_ip, inputs, spark_on_toil) job.addChild(spark_work)
static_adam_preprocessing_dag
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, inputs, sample, output_dir, suffix=''):\n inputs.sample = sample\n inputs.output_dir = output_dir\n inputs.suffix = suffix\n if inputs.master_ip is not None or inputs.run_local:\n if not inputs.run_local and inputs.master_ip == 'auto':\n spark_on_toil = False\n scale_up = job.wrapJobFn(scale_external_spark_cluster, 1)\n job.addChild(scale_up)\n spark_work = job.wrapJobFn(download_run_and_upload, inputs.\n master_ip, inputs, spark_on_toil)\n scale_up.addChild(spark_work)\n scale_down = job.wrapJobFn(scale_external_spark_cluster, -1)\n spark_work.addChild(scale_down)\n else:\n spark_on_toil = False\n spark_work = job.wrapJobFn(download_run_and_upload, inputs.\n master_ip, inputs, spark_on_toil)\n job.addChild(spark_work)\n else:\n spark_on_toil = True\n cores = multiprocessing.cpu_count()\n master_ip = spawn_spark_cluster(job, False, inputs.num_nodes - 1,\n cores=cores, memory=inputs.memory)\n spark_work = job.wrapJobFn(download_run_and_upload, master_ip,\n inputs, spark_on_toil)\n job.addChild(spark_work)\n", "role": "user" }, { "content": "static_adam_preprocessing_dag", "role": "assistant" } ]
def x(job, uuid, vcf_id, config): job.fileStore.logToMaster('Running Hard Filter on {}'.format(uuid)) genome_ref_size = (config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size) select_variants_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size, vcf_id, genome_ref_size) select_snps = job.wrapJobFn(gatk_select_variants, 'SNP', vcf_id, config .genome_fasta, config.genome_fai, config.genome_dict, memory=config .xmx, disk=select_variants_disk) snp_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf. size + ref_size, select_snps.rv(), genome_ref_size) snp_filter = job.wrapJobFn(gatk_variant_filtration, select_snps.rv(), config.snp_filter_name, config.snp_filter_expression, config. genome_fasta, config.genome_fai, config.genome_dict, memory=config. xmx, disk=snp_filter_disk) select_indels = job.wrapJobFn(gatk_select_variants, 'INDEL', vcf_id, config.genome_fasta, config.genome_fai, config.genome_dict, memory= config.xmx, disk=select_variants_disk) indel_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf. size + ref_size, select_indels.rv(), genome_ref_size) indel_filter = job.wrapJobFn(gatk_variant_filtration, select_indels.rv( ), config.indel_filter_name, config.indel_filter_expression, config .genome_fasta, config.genome_fai, config.genome_dict, memory=config .xmx, disk=indel_filter_disk) combine_vcfs_disk = PromisedRequirement(lambda vcf1, vcf2, ref_size: 2 * (vcf1.size + vcf2.size) + ref_size, indel_filter.rv(), snp_filter. rv(), genome_ref_size) combine_vcfs = job.wrapJobFn(gatk_combine_variants, {'SNPs': snp_filter .rv(), 'INDELs': indel_filter.rv()}, config.genome_fasta, config. genome_fai, config.genome_dict, merge_option='UNSORTED', memory= config.xmx, disk=combine_vcfs_disk) job.addChild(select_snps) job.addChild(select_indels) select_snps.addChild(snp_filter) snp_filter.addChild(combine_vcfs) select_indels.addChild(indel_filter) indel_filter.addChild(combine_vcfs) output_dir = os.path.join(config.output_dir, uuid) output_filename = '%s.hard_filter%s.vcf' % (uuid, config.suffix) output_vcf = job.wrapJobFn(output_file_job, output_filename, combine_vcfs.rv(), output_dir, s3_key_path=config.ssec, disk= PromisedRequirement(lambda x: x.size, combine_vcfs.rv())) combine_vcfs.addChild(output_vcf) return combine_vcfs.rv()
hard_filter_pipeline
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, uuid, vcf_id, config):\n job.fileStore.logToMaster('Running Hard Filter on {}'.format(uuid))\n genome_ref_size = (config.genome_fasta.size + config.genome_fai.size +\n config.genome_dict.size)\n select_variants_disk = PromisedRequirement(lambda vcf, ref_size: 2 *\n vcf.size + ref_size, vcf_id, genome_ref_size)\n select_snps = job.wrapJobFn(gatk_select_variants, 'SNP', vcf_id, config\n .genome_fasta, config.genome_fai, config.genome_dict, memory=config\n .xmx, disk=select_variants_disk)\n snp_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.\n size + ref_size, select_snps.rv(), genome_ref_size)\n snp_filter = job.wrapJobFn(gatk_variant_filtration, select_snps.rv(),\n config.snp_filter_name, config.snp_filter_expression, config.\n genome_fasta, config.genome_fai, config.genome_dict, memory=config.\n xmx, disk=snp_filter_disk)\n select_indels = job.wrapJobFn(gatk_select_variants, 'INDEL', vcf_id,\n config.genome_fasta, config.genome_fai, config.genome_dict, memory=\n config.xmx, disk=select_variants_disk)\n indel_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.\n size + ref_size, select_indels.rv(), genome_ref_size)\n indel_filter = job.wrapJobFn(gatk_variant_filtration, select_indels.rv(\n ), config.indel_filter_name, config.indel_filter_expression, config\n .genome_fasta, config.genome_fai, config.genome_dict, memory=config\n .xmx, disk=indel_filter_disk)\n combine_vcfs_disk = PromisedRequirement(lambda vcf1, vcf2, ref_size: 2 *\n (vcf1.size + vcf2.size) + ref_size, indel_filter.rv(), snp_filter.\n rv(), genome_ref_size)\n combine_vcfs = job.wrapJobFn(gatk_combine_variants, {'SNPs': snp_filter\n .rv(), 'INDELs': indel_filter.rv()}, config.genome_fasta, config.\n genome_fai, config.genome_dict, merge_option='UNSORTED', memory=\n config.xmx, disk=combine_vcfs_disk)\n job.addChild(select_snps)\n job.addChild(select_indels)\n select_snps.addChild(snp_filter)\n snp_filter.addChild(combine_vcfs)\n select_indels.addChild(indel_filter)\n indel_filter.addChild(combine_vcfs)\n output_dir = os.path.join(config.output_dir, uuid)\n output_filename = '%s.hard_filter%s.vcf' % (uuid, config.suffix)\n output_vcf = job.wrapJobFn(output_file_job, output_filename,\n combine_vcfs.rv(), output_dir, s3_key_path=config.ssec, disk=\n PromisedRequirement(lambda x: x.size, combine_vcfs.rv()))\n combine_vcfs.addChild(output_vcf)\n return combine_vcfs.rv()\n", "role": "user" }, { "content": "hard_filter_pipeline", "role": "assistant" } ]
def x(): parser = build_parser() Job.Runner.addToilOptions(parser) args = parser.parse_args() inputs = {'genetorrent': args.genetorrent, 'genetorrent_key': args. genetorrent_key, 'ssec': args.ssec, 's3_dir': args.s3_dir} if args.ssec: assert os.path.isfile(args.ssec) if args.genetorrent: assert os.path.isfile(args.genetorrent) if args.genetorrent_key: assert os.path.isfile(args.genetorrent_key) samples = parse_genetorrent(args.genetorrent) Job.Runner.startToil(Job.wrapJobFn(map_job, download_and_transfer_sample, samples, inputs), args)
main
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x():\n parser = build_parser()\n Job.Runner.addToilOptions(parser)\n args = parser.parse_args()\n inputs = {'genetorrent': args.genetorrent, 'genetorrent_key': args.\n genetorrent_key, 'ssec': args.ssec, 's3_dir': args.s3_dir}\n if args.ssec:\n assert os.path.isfile(args.ssec)\n if args.genetorrent:\n assert os.path.isfile(args.genetorrent)\n if args.genetorrent_key:\n assert os.path.isfile(args.genetorrent_key)\n samples = parse_genetorrent(args.genetorrent)\n Job.Runner.startToil(Job.wrapJobFn(map_job,\n download_and_transfer_sample, samples, inputs), args)\n", "role": "user" }, { "content": "main", "role": "assistant" } ]
def x(s): if _HEX_RE.match(s): return len(s.split('::')) <= 2 if _DOTTED_QUAD_RE.match(s): halves = s.split('::') if len(halves) > 2: return False hextets = s.split(':') quads = hextets[-1].split('.') for q in quads: if int(q) > 255: return False return True return False
validate_ip
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(s):\n if _HEX_RE.match(s):\n return len(s.split('::')) <= 2\n if _DOTTED_QUAD_RE.match(s):\n halves = s.split('::')\n if len(halves) > 2:\n return False\n hextets = s.split(':')\n quads = hextets[-1].split('.')\n for q in quads:\n if int(q) > 255:\n return False\n return True\n return False\n", "role": "user" }, { "content": "validate_ip", "role": "assistant" } ]
def x(ip): if not validate_ip(ip): return None if '.' in ip: chunks = ip.split(':') v4_int = ipv4.ip2long(chunks.pop()) if v4_int is None: return None chunks.append('%x' % (v4_int >> 16 & 65535)) chunks.append('%x' % (v4_int & 65535)) ip = ':'.join(chunks) halves = ip.split('::') hextets = halves[0].split(':') if len(halves) == 2: h2 = halves[1].split(':') for z in range(8 - (len(hextets) + len(h2))): hextets.append('0') for h in h2: hextets.append(h) lngip = 0 for h in hextets: if '' == h: h = '0' lngip = lngip << 16 | int(h, 16) return lngip
ip2long
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(ip):\n if not validate_ip(ip):\n return None\n if '.' in ip:\n chunks = ip.split(':')\n v4_int = ipv4.ip2long(chunks.pop())\n if v4_int is None:\n return None\n chunks.append('%x' % (v4_int >> 16 & 65535))\n chunks.append('%x' % (v4_int & 65535))\n ip = ':'.join(chunks)\n halves = ip.split('::')\n hextets = halves[0].split(':')\n if len(halves) == 2:\n h2 = halves[1].split(':')\n for z in range(8 - (len(hextets) + len(h2))):\n hextets.append('0')\n for h in h2:\n hextets.append(h)\n lngip = 0\n for h in hextets:\n if '' == h:\n h = '0'\n lngip = lngip << 16 | int(h, 16)\n return lngip\n", "role": "user" }, { "content": "ip2long", "role": "assistant" } ]
def x(l, rfc1924=False): if MAX_IP < l or l < MIN_IP: raise TypeError('expected int between %d and %d inclusive' % ( MIN_IP, MAX_IP)) if rfc1924: return long2rfc1924(l) hex_str = '%032x' % l hextets = [('%x' % int(hex_str[x:x + 4], 16)) for x in range(0, 32, 4)] dc_start, dc_len = -1, 0 run_start, run_len = -1, 0 for idx, hextet in enumerate(hextets): if '0' == hextet: run_len += 1 if -1 == run_start: run_start = idx if run_len > dc_len: dc_len, dc_start = run_len, run_start else: run_len, run_start = 0, -1 if dc_len > 1: dc_end = dc_start + dc_len if dc_end == len(hextets): hextets += [''] hextets[dc_start:dc_end] = [''] if dc_start == 0: hextets = [''] + hextets return ':'.join(hextets)
long2ip
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(l, rfc1924=False):\n if MAX_IP < l or l < MIN_IP:\n raise TypeError('expected int between %d and %d inclusive' % (\n MIN_IP, MAX_IP))\n if rfc1924:\n return long2rfc1924(l)\n hex_str = '%032x' % l\n hextets = [('%x' % int(hex_str[x:x + 4], 16)) for x in range(0, 32, 4)]\n dc_start, dc_len = -1, 0\n run_start, run_len = -1, 0\n for idx, hextet in enumerate(hextets):\n if '0' == hextet:\n run_len += 1\n if -1 == run_start:\n run_start = idx\n if run_len > dc_len:\n dc_len, dc_start = run_len, run_start\n else:\n run_len, run_start = 0, -1\n if dc_len > 1:\n dc_end = dc_start + dc_len\n if dc_end == len(hextets):\n hextets += ['']\n hextets[dc_start:dc_end] = ['']\n if dc_start == 0:\n hextets = [''] + hextets\n return ':'.join(hextets)\n", "role": "user" }, { "content": "long2ip", "role": "assistant" } ]
def x(l): if MAX_IP < l or l < MIN_IP: raise TypeError('expected int between %d and %d inclusive' % ( MIN_IP, MAX_IP)) o = [] r = l while r > 85: o.append(_RFC1924_ALPHABET[r % 85]) r = r // 85 o.append(_RFC1924_ALPHABET[r]) return ''.join(reversed(o)).zfill(20)
long2rfc1924
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(l):\n if MAX_IP < l or l < MIN_IP:\n raise TypeError('expected int between %d and %d inclusive' % (\n MIN_IP, MAX_IP))\n o = []\n r = l\n while r > 85:\n o.append(_RFC1924_ALPHABET[r % 85])\n r = r // 85\n o.append(_RFC1924_ALPHABET[r])\n return ''.join(reversed(o)).zfill(20)\n", "role": "user" }, { "content": "long2rfc1924", "role": "assistant" } ]
def x(s): global _RFC1924_REV if not _RFC1924_RE.match(s): return None if _RFC1924_REV is None: _RFC1924_REV = {v: k for k, v in enumerate(_RFC1924_ALPHABET)} x = 0 for c in s: x = x * 85 + _RFC1924_REV[c] if x > MAX_IP: return None return x
rfc19242long
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(s):\n global _RFC1924_REV\n if not _RFC1924_RE.match(s):\n return None\n if _RFC1924_REV is None:\n _RFC1924_REV = {v: k for k, v in enumerate(_RFC1924_ALPHABET)}\n x = 0\n for c in s:\n x = x * 85 + _RFC1924_REV[c]\n if x > MAX_IP:\n return None\n return x\n", "role": "user" }, { "content": "rfc19242long", "role": "assistant" } ]
def x(s): if _CIDR_RE.match(s): ip, mask = s.split('/') if validate_ip(ip): if int(mask) > 128: return False else: return False return True return False
validate_cidr
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(s):\n if _CIDR_RE.match(s):\n ip, mask = s.split('/')\n if validate_ip(ip):\n if int(mask) > 128:\n return False\n else:\n return False\n return True\n return False\n", "role": "user" }, { "content": "validate_cidr", "role": "assistant" } ]
def x(cidr): if not validate_cidr(cidr): return None ip, prefix = cidr.split('/') prefix = int(prefix) ip = ip2long(ip) shift = 128 - prefix block_start = ip >> shift << shift mask = (1 << shift) - 1 block_end = block_start | mask return long2ip(block_start), long2ip(block_end)
cidr2block
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(cidr):\n if not validate_cidr(cidr):\n return None\n ip, prefix = cidr.split('/')\n prefix = int(prefix)\n ip = ip2long(ip)\n shift = 128 - prefix\n block_start = ip >> shift << shift\n mask = (1 << shift) - 1\n block_end = block_start | mask\n return long2ip(block_start), long2ip(block_end)\n", "role": "user" }, { "content": "cidr2block", "role": "assistant" } ]
def x(job, inputs): job.fileStore.logToMaster('Parsing input samples and batching jobs') samples = [] if inputs.config: with open(inputs.config, 'r') as f: for line in f.readlines(): if not line.isspace(): sample = line.strip().split(',') assert len(sample ) == 2, 'Error: Config file is inappropriately formatted.' samples.append(sample) job.addChildJobFn(map_job, download_sample, samples, inputs)
parse_input_samples
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, inputs):\n job.fileStore.logToMaster('Parsing input samples and batching jobs')\n samples = []\n if inputs.config:\n with open(inputs.config, 'r') as f:\n for line in f.readlines():\n if not line.isspace():\n sample = line.strip().split(',')\n assert len(sample\n ) == 2, 'Error: Config file is inappropriately formatted.'\n samples.append(sample)\n job.addChildJobFn(map_job, download_sample, samples, inputs)\n", "role": "user" }, { "content": "parse_input_samples", "role": "assistant" } ]
def x(job, sample, inputs): uuid, url = sample job.fileStore.logToMaster('Downloading sample: {}'.format(uuid)) tar_id = job.addChildJobFn(download_url_job, url, s3_key_path=inputs. ssec, disk='30G').rv() sample_inputs = argparse.Namespace(**vars(inputs)) sample_inputs.uuid = uuid sample_inputs.cores = multiprocessing.cpu_count() job.addFollowOnJobFn(process_sample, sample_inputs, tar_id, cores=2, disk='60G')
download_sample
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, sample, inputs):\n uuid, url = sample\n job.fileStore.logToMaster('Downloading sample: {}'.format(uuid))\n tar_id = job.addChildJobFn(download_url_job, url, s3_key_path=inputs.\n ssec, disk='30G').rv()\n sample_inputs = argparse.Namespace(**vars(inputs))\n sample_inputs.uuid = uuid\n sample_inputs.cores = multiprocessing.cpu_count()\n job.addFollowOnJobFn(process_sample, sample_inputs, tar_id, cores=2,\n disk='60G')\n", "role": "user" }, { "content": "download_sample", "role": "assistant" } ]
def x(job, inputs, tar_id): job.fileStore.logToMaster('Processing sample into read pairs: {}'. format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() tar_path = job.fileStore.readGlobalFile(tar_id, os.path.join(work_dir, 'sample.tar')) subprocess.check_call(['tar', '-xvf', tar_path, '-C', work_dir]) os.remove(os.path.join(work_dir, 'sample.tar')) fastqs = [] for root, subdir, files in os.walk(work_dir): fastqs.extend([os.path.join(root, x) for x in files]) r1 = sorted([x for x in fastqs if 'R1' in x]) r2 = sorted([x for x in fastqs if 'R2' in x]) if not r1 or not r2: r1 = sorted([x for x in fastqs if '_1' in x]) r2 = sorted([x for x in fastqs if '_2' in x]) if len(r1) > len(r2): r1 = [x for x in r1 if x not in r2] elif len(r2) > len(r1): r2 = [x for x in r2 if x not in r1] assert r1 and r2, 'This pipeline does not support single-ended data. R1: {}\nR2:{}'.format( r1, r2) command = 'zcat' if r1[0].endswith('gz') and r2[0].endswith('gz' ) else 'cat' with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1: p1 = subprocess.Popen([command] + r1, stdout=f1) with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2: p2 = subprocess.Popen([command] + r2, stdout=f2) p1.wait() p2.wait() r1_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq')) r2_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq')) job.fileStore.deleteGlobalFile(tar_id) job.addChildJobFn(cutadapt, inputs, r1_id, r2_id, disk='60G').rv()
process_sample
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, inputs, tar_id):\n job.fileStore.logToMaster('Processing sample into read pairs: {}'.\n format(inputs.uuid))\n work_dir = job.fileStore.getLocalTempDir()\n tar_path = job.fileStore.readGlobalFile(tar_id, os.path.join(work_dir,\n 'sample.tar'))\n subprocess.check_call(['tar', '-xvf', tar_path, '-C', work_dir])\n os.remove(os.path.join(work_dir, 'sample.tar'))\n fastqs = []\n for root, subdir, files in os.walk(work_dir):\n fastqs.extend([os.path.join(root, x) for x in files])\n r1 = sorted([x for x in fastqs if 'R1' in x])\n r2 = sorted([x for x in fastqs if 'R2' in x])\n if not r1 or not r2:\n r1 = sorted([x for x in fastqs if '_1' in x])\n r2 = sorted([x for x in fastqs if '_2' in x])\n if len(r1) > len(r2):\n r1 = [x for x in r1 if x not in r2]\n elif len(r2) > len(r1):\n r2 = [x for x in r2 if x not in r1]\n assert r1 and r2, 'This pipeline does not support single-ended data. R1: {}\\nR2:{}'.format(\n r1, r2)\n command = 'zcat' if r1[0].endswith('gz') and r2[0].endswith('gz'\n ) else 'cat'\n with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:\n p1 = subprocess.Popen([command] + r1, stdout=f1)\n with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2:\n p2 = subprocess.Popen([command] + r2, stdout=f2)\n p1.wait()\n p2.wait()\n r1_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))\n r2_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq'))\n job.fileStore.deleteGlobalFile(tar_id)\n job.addChildJobFn(cutadapt, inputs, r1_id, r2_id, disk='60G').rv()\n", "role": "user" }, { "content": "process_sample", "role": "assistant" } ]
def x(job, inputs, r1_id, r2_id): job.fileStore.logToMaster('Running CutAdapt: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() inputs.improper_pair = None job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq')) job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq')) parameters = ['-a', inputs.fwd_3pr_adapter, '-m', '35', '-A', inputs. rev_3pr_adapter, '-o', '/data/R1_cutadapt.fastq', '-p', '/data/R2_cutadapt.fastq', '/data/R1.fastq', '/data/R2.fastq'] base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format( work_dir).split() if inputs.sudo: base_docker_call = ['sudo'] + base_docker_call tool = ( 'quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2' ) p = subprocess.Popen(base_docker_call + [tool] + parameters, stderr= subprocess.PIPE, stdout=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: if 'improperly paired' in stderr: inputs.improper_pair = True shutil.move(os.path.join(work_dir, 'R1.fastq'), os.path.join( work_dir, 'R1_cutadapt.fastq')) shutil.move(os.path.join(work_dir, 'R2.fastq'), os.path.join( work_dir, 'R2_cutadapt.fastq')) if inputs.improper_pair: r1_cutadapt = r1_id r2_cutadapt = r2_id else: r1_cutadapt = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq')) r2_cutadapt = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2_cutadapt.fastq')) job.fileStore.deleteGlobalFile(r1_id) job.fileStore.deleteGlobalFile(r2_id) cores = min(inputs.cores, 16) job.addChildJobFn(star, inputs, r1_cutadapt, r2_cutadapt, cores=cores, disk='100G', memory='40G').rv()
cutadapt
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, inputs, r1_id, r2_id):\n job.fileStore.logToMaster('Running CutAdapt: {}'.format(inputs.uuid))\n work_dir = job.fileStore.getLocalTempDir()\n inputs.improper_pair = None\n job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))\n job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))\n parameters = ['-a', inputs.fwd_3pr_adapter, '-m', '35', '-A', inputs.\n rev_3pr_adapter, '-o', '/data/R1_cutadapt.fastq', '-p',\n '/data/R2_cutadapt.fastq', '/data/R1.fastq', '/data/R2.fastq']\n base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(\n work_dir).split()\n if inputs.sudo:\n base_docker_call = ['sudo'] + base_docker_call\n tool = (\n 'quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2'\n )\n p = subprocess.Popen(base_docker_call + [tool] + parameters, stderr=\n subprocess.PIPE, stdout=subprocess.PIPE)\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n if 'improperly paired' in stderr:\n inputs.improper_pair = True\n shutil.move(os.path.join(work_dir, 'R1.fastq'), os.path.join(\n work_dir, 'R1_cutadapt.fastq'))\n shutil.move(os.path.join(work_dir, 'R2.fastq'), os.path.join(\n work_dir, 'R2_cutadapt.fastq'))\n if inputs.improper_pair:\n r1_cutadapt = r1_id\n r2_cutadapt = r2_id\n else:\n r1_cutadapt = job.fileStore.writeGlobalFile(os.path.join(work_dir,\n 'R1_cutadapt.fastq'))\n r2_cutadapt = job.fileStore.writeGlobalFile(os.path.join(work_dir,\n 'R2_cutadapt.fastq'))\n job.fileStore.deleteGlobalFile(r1_id)\n job.fileStore.deleteGlobalFile(r2_id)\n cores = min(inputs.cores, 16)\n job.addChildJobFn(star, inputs, r1_cutadapt, r2_cutadapt, cores=cores,\n disk='100G', memory='40G').rv()\n", "role": "user" }, { "content": "cutadapt", "role": "assistant" } ]
def x(job, inputs, r1_cutadapt, r2_cutadapt): job.fileStore.logToMaster('Aligning with STAR: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() cores = min(inputs.cores, 16) job.fileStore.readGlobalFile(r1_cutadapt, os.path.join(work_dir, 'R1_cutadapt.fastq')) job.fileStore.readGlobalFile(r2_cutadapt, os.path.join(work_dir, 'R2_cutadapt.fastq')) download_url(job=job, url=inputs.star_index, work_dir=work_dir, name= 'starIndex.tar.gz') subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'starIndex.tar.gz'), '-C', work_dir]) parameters = ['--runThreadN', str(cores), '--genomeDir', '/data/starIndex', '--outFileNamePrefix', 'rna', '--outSAMtype', 'BAM', 'SortedByCoordinate', '--outSAMunmapped', 'Within', '--quantMode', 'TranscriptomeSAM', '--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD', '--outFilterType', 'BySJout', '--outFilterMultimapNmax', '20', '--outFilterMismatchNmax', '999', '--outFilterMismatchNoverReadLmax', '0.04', '--alignIntronMin', '20', '--alignIntronMax', '1000000', '--alignMatesGapMax', '1000000', '--alignSJoverhangMin', '8', '--alignSJDBoverhangMin', '1', '--sjdbScore', '1', '--readFilesIn', '/data/R1_cutadapt.fastq', '/data/R2_cutadapt.fastq'] docker_call(job=job, tool= 'quay.io/ucsc_cgl/star:2.4.2a--bcbd5122b69ff6ac4ef61958e47bde94001cfe80' , work_dir=work_dir, parameters=parameters) index_command = ['index', '/data/rnaAligned.sortedByCoord.out.bam'] docker_call(job=job, work_dir=work_dir, parameters=index_command, tool= 'quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c' ) bam_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.sortedByCoord.out.bam')) bai_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.sortedByCoord.out.bam.bai')) job.fileStore.deleteGlobalFile(r1_cutadapt) job.fileStore.deleteGlobalFile(r2_cutadapt) vcqc_id = job.addChildJobFn(variant_calling_and_qc, inputs, bam_id, bai_id, cores=2, disk='30G').rv() spladder_id = job.addChildJobFn(spladder, inputs, bam_id, bai_id, disk= '30G').rv() job.addFollowOnJobFn(consolidate_output_tarballs, inputs, vcqc_id, spladder_id, disk='30G')
star
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, inputs, r1_cutadapt, r2_cutadapt):\n job.fileStore.logToMaster('Aligning with STAR: {}'.format(inputs.uuid))\n work_dir = job.fileStore.getLocalTempDir()\n cores = min(inputs.cores, 16)\n job.fileStore.readGlobalFile(r1_cutadapt, os.path.join(work_dir,\n 'R1_cutadapt.fastq'))\n job.fileStore.readGlobalFile(r2_cutadapt, os.path.join(work_dir,\n 'R2_cutadapt.fastq'))\n download_url(job=job, url=inputs.star_index, work_dir=work_dir, name=\n 'starIndex.tar.gz')\n subprocess.check_call(['tar', '-xvf', os.path.join(work_dir,\n 'starIndex.tar.gz'), '-C', work_dir])\n parameters = ['--runThreadN', str(cores), '--genomeDir',\n '/data/starIndex', '--outFileNamePrefix', 'rna', '--outSAMtype',\n 'BAM', 'SortedByCoordinate', '--outSAMunmapped', 'Within',\n '--quantMode', 'TranscriptomeSAM', '--outSAMattributes', 'NH', 'HI',\n 'AS', 'NM', 'MD', '--outFilterType', 'BySJout',\n '--outFilterMultimapNmax', '20', '--outFilterMismatchNmax', '999',\n '--outFilterMismatchNoverReadLmax', '0.04', '--alignIntronMin',\n '20', '--alignIntronMax', '1000000', '--alignMatesGapMax',\n '1000000', '--alignSJoverhangMin', '8', '--alignSJDBoverhangMin',\n '1', '--sjdbScore', '1', '--readFilesIn', '/data/R1_cutadapt.fastq',\n '/data/R2_cutadapt.fastq']\n docker_call(job=job, tool=\n 'quay.io/ucsc_cgl/star:2.4.2a--bcbd5122b69ff6ac4ef61958e47bde94001cfe80'\n , work_dir=work_dir, parameters=parameters)\n index_command = ['index', '/data/rnaAligned.sortedByCoord.out.bam']\n docker_call(job=job, work_dir=work_dir, parameters=index_command, tool=\n 'quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c'\n )\n bam_id = job.fileStore.writeGlobalFile(os.path.join(work_dir,\n 'rnaAligned.sortedByCoord.out.bam'))\n bai_id = job.fileStore.writeGlobalFile(os.path.join(work_dir,\n 'rnaAligned.sortedByCoord.out.bam.bai'))\n job.fileStore.deleteGlobalFile(r1_cutadapt)\n job.fileStore.deleteGlobalFile(r2_cutadapt)\n vcqc_id = job.addChildJobFn(variant_calling_and_qc, inputs, bam_id,\n bai_id, cores=2, disk='30G').rv()\n spladder_id = job.addChildJobFn(spladder, inputs, bam_id, bai_id, disk=\n '30G').rv()\n job.addFollowOnJobFn(consolidate_output_tarballs, inputs, vcqc_id,\n spladder_id, disk='30G')\n", "role": "user" }, { "content": "star", "role": "assistant" } ]
def x(job, inputs, bam_id, bai_id): job.fileStore.logToMaster('Variant calling and QC: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'alignment.bam')) job.fileStore.readGlobalFile(bai_id, os.path.join(work_dir, 'alignment.bam.bai')) input_info = [(inputs.genome, 'genome.fa'), (inputs.positions, 'positions.tsv'), (inputs.genome_index, 'genome.fa.fai'), (inputs. gtf, 'annotation.gtf'), (inputs.gtf_m53, 'annotation.m53')] for url, fname in input_info: download_url(job=job, url=url, work_dir=work_dir, name=fname) variant_command = ['mpileup', '-f', 'genome.fa', '-l', 'positions.tsv', '-v', 'alignment.bam', '-t', 'DP,SP,INFO/AD,INFO/ADF,INFO/ADR,INFO/DPR,SP', '-o', '/data/output.vcf.gz'] docker_call(job=job, work_dir=work_dir, parameters=variant_command, tool= 'quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c' ) qc_command = ['-o', 'qc', '-n', 'alignment.bam', '-a', 'annotation.gtf', '-m', 'annotation.m53'] docker_call(job=job, work_dir=work_dir, parameters=qc_command, tool= 'jvivian/checkbias:612f129--b08a1fb6526a620bbb0304b08356f2ae7c3c0ec3') output_tsv = glob(os.path.join(work_dir, '*counts.tsv*'))[0] output_vcf = os.path.join(work_dir, 'output.vcf.gz') tarball_files('vcqc.tar.gz', file_paths=[output_tsv, output_vcf], output_dir=work_dir) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'vcqc.tar.gz'))
variant_calling_and_qc
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, inputs, bam_id, bai_id):\n job.fileStore.logToMaster('Variant calling and QC: {}'.format(inputs.uuid))\n work_dir = job.fileStore.getLocalTempDir()\n job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir,\n 'alignment.bam'))\n job.fileStore.readGlobalFile(bai_id, os.path.join(work_dir,\n 'alignment.bam.bai'))\n input_info = [(inputs.genome, 'genome.fa'), (inputs.positions,\n 'positions.tsv'), (inputs.genome_index, 'genome.fa.fai'), (inputs.\n gtf, 'annotation.gtf'), (inputs.gtf_m53, 'annotation.m53')]\n for url, fname in input_info:\n download_url(job=job, url=url, work_dir=work_dir, name=fname)\n variant_command = ['mpileup', '-f', 'genome.fa', '-l', 'positions.tsv',\n '-v', 'alignment.bam', '-t',\n 'DP,SP,INFO/AD,INFO/ADF,INFO/ADR,INFO/DPR,SP', '-o',\n '/data/output.vcf.gz']\n docker_call(job=job, work_dir=work_dir, parameters=variant_command,\n tool=\n 'quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c'\n )\n qc_command = ['-o', 'qc', '-n', 'alignment.bam', '-a', 'annotation.gtf',\n '-m', 'annotation.m53']\n docker_call(job=job, work_dir=work_dir, parameters=qc_command, tool=\n 'jvivian/checkbias:612f129--b08a1fb6526a620bbb0304b08356f2ae7c3c0ec3')\n output_tsv = glob(os.path.join(work_dir, '*counts.tsv*'))[0]\n output_vcf = os.path.join(work_dir, 'output.vcf.gz')\n tarball_files('vcqc.tar.gz', file_paths=[output_tsv, output_vcf],\n output_dir=work_dir)\n return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'vcqc.tar.gz'))\n", "role": "user" }, { "content": "variant_calling_and_qc", "role": "assistant" } ]
def x(job, inputs, vcqc_id, spladder_id): job.fileStore.logToMaster('Consolidating files and uploading: {}'. format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() uuid = inputs.uuid vcqc_tar = job.fileStore.readGlobalFile(vcqc_id, os.path.join(work_dir, 'vcqc.tar.gz')) spladder_tar = job.fileStore.readGlobalFile(spladder_id, os.path.join( work_dir, 'spladder.tar.gz')) fname = (uuid + '.tar.gz' if not inputs.improper_pair else 'IMPROPER_PAIR' + uuid + '.tar.gz') out_tar = os.path.join(work_dir, fname) with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out: for tar in [vcqc_tar, spladder_tar]: with tarfile.open(tar, 'r') as f_in: for tarinfo in f_in: with closing(f_in.extractfile(tarinfo)) as f_in_file: if tar == vcqc_tar: tarinfo.name = os.path.join(uuid, 'variants_and_qc', os.path.basename(tarinfo .name)) else: tarinfo.name = os.path.join(uuid, 'spladder', os.path.basename(tarinfo.name)) f_out.addfile(tarinfo, fileobj=f_in_file) if inputs.output_dir: mkdir_p(inputs.output_dir) shutil.copy(out_tar, os.path.join(inputs.output_dir, os.path. basename(out_tar))) if inputs.output_s3_dir: out_id = job.fileStore.writeGlobalFile(out_tar) job.addChildJobFn(s3am_upload_job, file_id=out_id, s3_dir=inputs. output_s3_dir, file_name=fname, key_path=inputs.ssec, cores= inputs.cores)
consolidate_output_tarballs
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(job, inputs, vcqc_id, spladder_id):\n job.fileStore.logToMaster('Consolidating files and uploading: {}'.\n format(inputs.uuid))\n work_dir = job.fileStore.getLocalTempDir()\n uuid = inputs.uuid\n vcqc_tar = job.fileStore.readGlobalFile(vcqc_id, os.path.join(work_dir,\n 'vcqc.tar.gz'))\n spladder_tar = job.fileStore.readGlobalFile(spladder_id, os.path.join(\n work_dir, 'spladder.tar.gz'))\n fname = (uuid + '.tar.gz' if not inputs.improper_pair else \n 'IMPROPER_PAIR' + uuid + '.tar.gz')\n out_tar = os.path.join(work_dir, fname)\n with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out:\n for tar in [vcqc_tar, spladder_tar]:\n with tarfile.open(tar, 'r') as f_in:\n for tarinfo in f_in:\n with closing(f_in.extractfile(tarinfo)) as f_in_file:\n if tar == vcqc_tar:\n tarinfo.name = os.path.join(uuid,\n 'variants_and_qc', os.path.basename(tarinfo\n .name))\n else:\n tarinfo.name = os.path.join(uuid, 'spladder',\n os.path.basename(tarinfo.name))\n f_out.addfile(tarinfo, fileobj=f_in_file)\n if inputs.output_dir:\n mkdir_p(inputs.output_dir)\n shutil.copy(out_tar, os.path.join(inputs.output_dir, os.path.\n basename(out_tar)))\n if inputs.output_s3_dir:\n out_id = job.fileStore.writeGlobalFile(out_tar)\n job.addChildJobFn(s3am_upload_job, file_id=out_id, s3_dir=inputs.\n output_s3_dir, file_name=fname, key_path=inputs.ssec, cores=\n inputs.cores)\n", "role": "user" }, { "content": "consolidate_output_tarballs", "role": "assistant" } ]
def x(): url_prefix = 'https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/' parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--config', required=True, help= 'Path to configuration file for samples, one per line. UUID,URL_to_bamfile. The URL may be a standard "http://", a "file://<abs_path>", or "s3://<bucket>/<key>"' ) parser.add_argument('--gtf', help='URL to annotation GTF file', default =url_prefix + 'rnaseq_cgl/gencode.v23.annotation.gtf') parser.add_argument('--gtf-pickle', help='Pickled GTF file', default= url_prefix + 'spladder/gencode.v23.annotation.gtf.pickle') parser.add_argument('--gtf-m53', help= 'M53 preprocessing annotation table', default=url_prefix + 'spladder/gencode.v23.annotation.gtf.m53') parser.add_argument('--positions', help= 'URL to SNP positions over genes file (TSV)', default=url_prefix + 'spladder/positions_fixed.tsv') parser.add_argument('--genome', help='URL to Genome fasta', default= url_prefix + 'rnaseq_cgl/hg38_no_alt.fa') parser.add_argument('--genome-index', help='Index file (fai) of genome', default=url_prefix + 'spladder/hg38_no_alt.fa.fai') parser.add_argument('--ssec', default=None, help= 'Path to master key used for downloading encrypted files.') parser.add_argument('--output-s3-dir', default=None, help= 'S3 Directory of the form: s3://bucket/directory') parser.add_argument('--output-dir', default=None, help= 'full path where final results will be output') parser.add_argument('--sudo', action='store_true', default=False, help= 'Set flag if sudo is required to run Docker.') parser.add_argument('--star-index', help= 'URL to download STAR Index built from HG38/gencodev23 annotation.', default=url_prefix + 'rnaseq_cgl/starIndex_hg38_no_alt.tar.gz') parser.add_argument('--fwd-3pr-adapter', help= "Sequence for the FWD 3' Read Adapter.", default='AGATCGGAAGAG') parser.add_argument('--rev-3pr-adapter', help= "Sequence for the REV 3' Read Adapter.", default='AGATCGGAAGAG') Job.Runner.addToilOptions(parser) args = parser.parse_args() if args.config: assert os.path.isfile(args.config), 'Config not found at: {}'.format( args.config) if args.ssec: assert os.path.isfile(args.ssec ), 'Encryption key not found at: {}'.format(args.config) if args.output_s3_dir: assert args.output_s3_dir.startswith('s3://' ), 'Wrong format for output s3 directory' for program in ['curl', 'docker']: assert which(program ), 'Program "{}" must be installed on every node.'.format(program) Job.Runner.startToil(Job.wrapJobFn(parse_input_samples, args), args)
main
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x():\n url_prefix = 'https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/'\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('--config', required=True, help=\n 'Path to configuration file for samples, one per line. UUID,URL_to_bamfile. The URL may be a standard \"http://\", a \"file://<abs_path>\", or \"s3://<bucket>/<key>\"'\n )\n parser.add_argument('--gtf', help='URL to annotation GTF file', default\n =url_prefix + 'rnaseq_cgl/gencode.v23.annotation.gtf')\n parser.add_argument('--gtf-pickle', help='Pickled GTF file', default=\n url_prefix + 'spladder/gencode.v23.annotation.gtf.pickle')\n parser.add_argument('--gtf-m53', help=\n 'M53 preprocessing annotation table', default=url_prefix +\n 'spladder/gencode.v23.annotation.gtf.m53')\n parser.add_argument('--positions', help=\n 'URL to SNP positions over genes file (TSV)', default=url_prefix +\n 'spladder/positions_fixed.tsv')\n parser.add_argument('--genome', help='URL to Genome fasta', default=\n url_prefix + 'rnaseq_cgl/hg38_no_alt.fa')\n parser.add_argument('--genome-index', help='Index file (fai) of genome',\n default=url_prefix + 'spladder/hg38_no_alt.fa.fai')\n parser.add_argument('--ssec', default=None, help=\n 'Path to master key used for downloading encrypted files.')\n parser.add_argument('--output-s3-dir', default=None, help=\n 'S3 Directory of the form: s3://bucket/directory')\n parser.add_argument('--output-dir', default=None, help=\n 'full path where final results will be output')\n parser.add_argument('--sudo', action='store_true', default=False, help=\n 'Set flag if sudo is required to run Docker.')\n parser.add_argument('--star-index', help=\n 'URL to download STAR Index built from HG38/gencodev23 annotation.',\n default=url_prefix + 'rnaseq_cgl/starIndex_hg38_no_alt.tar.gz')\n parser.add_argument('--fwd-3pr-adapter', help=\n \"Sequence for the FWD 3' Read Adapter.\", default='AGATCGGAAGAG')\n parser.add_argument('--rev-3pr-adapter', help=\n \"Sequence for the REV 3' Read Adapter.\", default='AGATCGGAAGAG')\n Job.Runner.addToilOptions(parser)\n args = parser.parse_args()\n if args.config:\n assert os.path.isfile(args.config), 'Config not found at: {}'.format(\n args.config)\n if args.ssec:\n assert os.path.isfile(args.ssec\n ), 'Encryption key not found at: {}'.format(args.config)\n if args.output_s3_dir:\n assert args.output_s3_dir.startswith('s3://'\n ), 'Wrong format for output s3 directory'\n for program in ['curl', 'docker']:\n assert which(program\n ), 'Program \"{}\" must be installed on every node.'.format(program)\n Job.Runner.startToil(Job.wrapJobFn(parse_input_samples, args), args)\n", "role": "user" }, { "content": "main", "role": "assistant" } ]
def x(s): if _DOTTED_QUAD_RE.match(s): quads = s.split('.') for q in quads: if int(q) > 255: return False return True return False
validate_ip
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(s):\n if _DOTTED_QUAD_RE.match(s):\n quads = s.split('.')\n for q in quads:\n if int(q) > 255:\n return False\n return True\n return False\n", "role": "user" }, { "content": "validate_ip", "role": "assistant" } ]
def x(s): if validate_ip(s): mask = bin(ip2network(s))[2:].zfill(32) seen0 = False for c in mask: if '1' == c: if seen0: return False else: seen0 = True return True else: return False
validate_netmask
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(s):\n if validate_ip(s):\n mask = bin(ip2network(s))[2:].zfill(32)\n seen0 = False\n for c in mask:\n if '1' == c:\n if seen0:\n return False\n else:\n seen0 = True\n return True\n else:\n return False\n", "role": "user" }, { "content": "validate_netmask", "role": "assistant" } ]
def x(s): if isinstance(s, basestring): if '/' in s: start, mask = s.split('/', 2) return validate_ip(start) and validate_netmask(mask) else: return False raise TypeError('expected string or unicode')
validate_subnet
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(s):\n if isinstance(s, basestring):\n if '/' in s:\n start, mask = s.split('/', 2)\n return validate_ip(start) and validate_netmask(mask)\n else:\n return False\n raise TypeError('expected string or unicode')\n", "role": "user" }, { "content": "validate_subnet", "role": "assistant" } ]